From aa43c2158d5ae1dc76cccb08cd57a3ffd32c3825 Mon Sep 17 00:00:00 2001 From: Jeff Kirsher Date: Fri, 8 Apr 2011 19:06:30 -0700 Subject: qlogic: Move the QLogic drivers Moves the QLogic drivers into drivers/net/ethernet/qlogic/ and the necessary Kconfig and Makefile changes. CC: Ron Mercer CC: Amit Kumar Salecha CC: Anirban Chakraborty Signed-off-by: Jeff Kirsher Acked-by: Anirban Chakraborty --- drivers/net/Kconfig | 26 - drivers/net/Makefile | 3 - drivers/net/ethernet/Kconfig | 1 + drivers/net/ethernet/Makefile | 1 + drivers/net/ethernet/qlogic/Kconfig | 53 + drivers/net/ethernet/qlogic/Makefile | 8 + drivers/net/ethernet/qlogic/netxen/Makefile | 29 + drivers/net/ethernet/qlogic/netxen/netxen_nic.h | 1441 ++++++ .../net/ethernet/qlogic/netxen/netxen_nic_ctx.c | 793 ++++ .../ethernet/qlogic/netxen/netxen_nic_ethtool.c | 835 ++++ .../net/ethernet/qlogic/netxen/netxen_nic_hdr.h | 1050 +++++ drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c | 1976 ++++++++ drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h | 287 ++ .../net/ethernet/qlogic/netxen/netxen_nic_init.c | 1949 ++++++++ .../net/ethernet/qlogic/netxen/netxen_nic_main.c | 3100 ++++++++++++ drivers/net/ethernet/qlogic/qla3xxx.c | 3970 ++++++++++++++++ drivers/net/ethernet/qlogic/qla3xxx.h | 1189 +++++ drivers/net/ethernet/qlogic/qlcnic/Makefile | 8 + drivers/net/ethernet/qlogic/qlcnic/qlcnic.h | 1555 ++++++ drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c | 1117 +++++ .../net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c | 1234 +++++ drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h | 1023 ++++ drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c | 1787 +++++++ drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c | 1898 ++++++++ drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | 4390 +++++++++++++++++ drivers/net/ethernet/qlogic/qlge/Makefile | 7 + drivers/net/ethernet/qlogic/qlge/qlge.h | 2334 +++++++++ drivers/net/ethernet/qlogic/qlge/qlge_dbg.c | 2044 ++++++++ drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c | 688 +++ drivers/net/ethernet/qlogic/qlge/qlge_main.c | 4987 ++++++++++++++++++++ drivers/net/ethernet/qlogic/qlge/qlge_mpi.c | 1284 +++++ drivers/net/netxen/Makefile | 29 - drivers/net/netxen/netxen_nic.h | 1441 ------ drivers/net/netxen/netxen_nic_ctx.c | 793 ---- drivers/net/netxen/netxen_nic_ethtool.c | 835 ---- drivers/net/netxen/netxen_nic_hdr.h | 1050 ----- drivers/net/netxen/netxen_nic_hw.c | 1976 -------- drivers/net/netxen/netxen_nic_hw.h | 287 -- drivers/net/netxen/netxen_nic_init.c | 1949 -------- drivers/net/netxen/netxen_nic_main.c | 3100 ------------ drivers/net/qla3xxx.c | 3970 ---------------- drivers/net/qla3xxx.h | 1189 ----- drivers/net/qlcnic/Makefile | 8 - drivers/net/qlcnic/qlcnic.h | 1555 ------ drivers/net/qlcnic/qlcnic_ctx.c | 1117 ----- drivers/net/qlcnic/qlcnic_ethtool.c | 1234 ----- drivers/net/qlcnic/qlcnic_hdr.h | 1023 ---- drivers/net/qlcnic/qlcnic_hw.c | 1787 ------- drivers/net/qlcnic/qlcnic_init.c | 1898 -------- drivers/net/qlcnic/qlcnic_main.c | 4390 ----------------- drivers/net/qlge/Makefile | 7 - drivers/net/qlge/qlge.h | 2334 --------- drivers/net/qlge/qlge_dbg.c | 2044 -------- drivers/net/qlge/qlge_ethtool.c | 688 --- drivers/net/qlge/qlge_main.c | 4987 -------------------- drivers/net/qlge/qlge_mpi.c | 1284 ----- 56 files changed, 41038 insertions(+), 41004 deletions(-) create mode 100644 drivers/net/ethernet/qlogic/Kconfig create mode 100644 drivers/net/ethernet/qlogic/Makefile create mode 100644 drivers/net/ethernet/qlogic/netxen/Makefile create mode 100644 drivers/net/ethernet/qlogic/netxen/netxen_nic.h create mode 100644 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c create mode 100644 drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c create mode 100644 drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h create mode 100644 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c create mode 100644 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h create mode 100644 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c create mode 100644 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c create mode 100644 drivers/net/ethernet/qlogic/qla3xxx.c create mode 100644 drivers/net/ethernet/qlogic/qla3xxx.h create mode 100644 drivers/net/ethernet/qlogic/qlcnic/Makefile create mode 100644 drivers/net/ethernet/qlogic/qlcnic/qlcnic.h create mode 100644 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c create mode 100644 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c create mode 100644 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h create mode 100644 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c create mode 100644 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c create mode 100644 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c create mode 100644 drivers/net/ethernet/qlogic/qlge/Makefile create mode 100644 drivers/net/ethernet/qlogic/qlge/qlge.h create mode 100644 drivers/net/ethernet/qlogic/qlge/qlge_dbg.c create mode 100644 drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c create mode 100644 drivers/net/ethernet/qlogic/qlge/qlge_main.c create mode 100644 drivers/net/ethernet/qlogic/qlge/qlge_mpi.c delete mode 100644 drivers/net/netxen/Makefile delete mode 100644 drivers/net/netxen/netxen_nic.h delete mode 100644 drivers/net/netxen/netxen_nic_ctx.c delete mode 100644 drivers/net/netxen/netxen_nic_ethtool.c delete mode 100644 drivers/net/netxen/netxen_nic_hdr.h delete mode 100644 drivers/net/netxen/netxen_nic_hw.c delete mode 100644 drivers/net/netxen/netxen_nic_hw.h delete mode 100644 drivers/net/netxen/netxen_nic_init.c delete mode 100644 drivers/net/netxen/netxen_nic_main.c delete mode 100644 drivers/net/qla3xxx.c delete mode 100644 drivers/net/qla3xxx.h delete mode 100644 drivers/net/qlcnic/Makefile delete mode 100644 drivers/net/qlcnic/qlcnic.h delete mode 100644 drivers/net/qlcnic/qlcnic_ctx.c delete mode 100644 drivers/net/qlcnic/qlcnic_ethtool.c delete mode 100644 drivers/net/qlcnic/qlcnic_hdr.h delete mode 100644 drivers/net/qlcnic/qlcnic_hw.c delete mode 100644 drivers/net/qlcnic/qlcnic_init.c delete mode 100644 drivers/net/qlcnic/qlcnic_main.c delete mode 100644 drivers/net/qlge/Makefile delete mode 100644 drivers/net/qlge/qlge.h delete mode 100644 drivers/net/qlge/qlge_dbg.c delete mode 100644 drivers/net/qlge/qlge_ethtool.c delete mode 100644 drivers/net/qlge/qlge_main.c delete mode 100644 drivers/net/qlge/qlge_mpi.c (limited to 'drivers/net') diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index e6491169cc6f..de2293d23d55 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -1707,15 +1707,6 @@ config XILINX_LL_TEMAC This driver supports the Xilinx 10/100/1000 LocalLink TEMAC core used in Xilinx Spartan and Virtex FPGAs -config QLA3XXX - tristate "QLogic QLA3XXX Network Driver Support" - depends on PCI - help - This driver supports QLogic ISP3XXX gigabit Ethernet cards. - - To compile this driver as a module, choose M here: the module - will be called qla3xxx. - config ATL1 tristate "Atheros/Attansic L1 Gigabit Ethernet support" depends on PCI @@ -1954,23 +1945,6 @@ config TEHUTI help Tehuti Networks 10G Ethernet NIC -config QLCNIC - tristate "QLOGIC QLCNIC 1/10Gb Converged Ethernet NIC Support" - depends on PCI - select FW_LOADER - help - This driver supports QLogic QLE8240 and QLE8242 Converged Ethernet - devices. - -config QLGE - tristate "QLogic QLGE 10Gb Ethernet Driver Support" - depends on PCI - help - This driver supports QLogic ISP8XXX 10Gb Ethernet cards. - - To compile this driver as a module, choose M here: the module - will be called qlge. - config BNA tristate "Brocade 1010/1020 10Gb Ethernet Driver support" depends on PCI diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 84b986004385..a58a9f0b7999 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -110,9 +110,6 @@ obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o ll_temac-objs := ll_temac_main.o ll_temac_mdio.o obj-$(CONFIG_XILINX_LL_TEMAC) += ll_temac.o obj-$(CONFIG_XILINX_EMACLITE) += xilinx_emaclite.o -obj-$(CONFIG_QLA3XXX) += qla3xxx.o -obj-$(CONFIG_QLCNIC) += qlcnic/ -obj-$(CONFIG_QLGE) += qlge/ obj-$(CONFIG_PPP) += ppp_generic.o obj-$(CONFIG_PPP_ASYNC) += ppp_async.o diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index a2fd38562cb3..ab591bb96702 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -17,5 +17,6 @@ source "drivers/net/ethernet/amd/Kconfig" source "drivers/net/ethernet/broadcom/Kconfig" source "drivers/net/ethernet/chelsio/Kconfig" source "drivers/net/ethernet/intel/Kconfig" +source "drivers/net/ethernet/qlogic/Kconfig" endif # ETHERNET diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 526527177d13..d8cf120e3322 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -8,3 +8,4 @@ obj-$(CONFIG_NET_VENDOR_AMD) += amd/ obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/ obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/ obj-$(CONFIG_NET_VENDOR_INTEL) += intel/ +obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/ diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig new file mode 100644 index 000000000000..a7c4424011ec --- /dev/null +++ b/drivers/net/ethernet/qlogic/Kconfig @@ -0,0 +1,53 @@ +# +# QLogic network device configuration +# + +config NET_VENDOR_QLOGIC + bool "QLogic devices" + depends on PCI + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about QLogic cards. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_QLOGIC + +config QLA3XXX + tristate "QLogic QLA3XXX Network Driver Support" + depends on PCI + ---help--- + This driver supports QLogic ISP3XXX gigabit Ethernet cards. + + To compile this driver as a module, choose M here: the module + will be called qla3xxx. + +config QLCNIC + tristate "QLOGIC QLCNIC 1/10Gb Converged Ethernet NIC Support" + depends on PCI + select FW_LOADER + ---help--- + This driver supports QLogic QLE8240 and QLE8242 Converged Ethernet + devices. + +config QLGE + tristate "QLogic QLGE 10Gb Ethernet Driver Support" + depends on PCI + ---help--- + This driver supports QLogic ISP8XXX 10Gb Ethernet cards. + + To compile this driver as a module, choose M here: the module + will be called qlge. + +config NETXEN_NIC + tristate "NetXen Multi port (1/10) Gigabit Ethernet NIC" + depends on PCI + select FW_LOADER + ---help--- + This enables the support for NetXen's Gigabit Ethernet card. + +endif # NET_VENDOR_QLOGIC diff --git a/drivers/net/ethernet/qlogic/Makefile b/drivers/net/ethernet/qlogic/Makefile new file mode 100644 index 000000000000..b2a283d9ae60 --- /dev/null +++ b/drivers/net/ethernet/qlogic/Makefile @@ -0,0 +1,8 @@ +# +# Makefile for the QLogic network device drivers. +# + +obj-$(CONFIG_QLA3XXX) += qla3xxx.o +obj-$(CONFIG_QLCNIC) += qlcnic/ +obj-$(CONFIG_QLGE) += qlge/ +obj-$(CONFIG_NETXEN_NIC) += netxen/ diff --git a/drivers/net/ethernet/qlogic/netxen/Makefile b/drivers/net/ethernet/qlogic/netxen/Makefile new file mode 100644 index 000000000000..861a0590b1f4 --- /dev/null +++ b/drivers/net/ethernet/qlogic/netxen/Makefile @@ -0,0 +1,29 @@ +# Copyright (C) 2003 - 2009 NetXen, Inc. +# Copyright (C) 2009 - QLogic Corporation. +# All rights reserved. +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place - Suite 330, Boston, +# MA 02111-1307, USA. +# +# The full GNU General Public License is included in this distribution +# in the file called "COPYING". +# +# + + +obj-$(CONFIG_NETXEN_NIC) := netxen_nic.o + +netxen_nic-y := netxen_nic_hw.o netxen_nic_main.o netxen_nic_init.o \ + netxen_nic_ethtool.o netxen_nic_ctx.o diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h new file mode 100644 index 000000000000..196b660e1d91 --- /dev/null +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h @@ -0,0 +1,1441 @@ +/* + * Copyright (C) 2003 - 2009 NetXen, Inc. + * Copyright (C) 2009 - QLogic Corporation. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, + * MA 02111-1307, USA. + * + * The full GNU General Public License is included in this distribution + * in the file called "COPYING". + * + */ + +#ifndef _NETXEN_NIC_H_ +#define _NETXEN_NIC_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include +#include + +#include "netxen_nic_hdr.h" +#include "netxen_nic_hw.h" + +#define _NETXEN_NIC_LINUX_MAJOR 4 +#define _NETXEN_NIC_LINUX_MINOR 0 +#define _NETXEN_NIC_LINUX_SUBVERSION 76 +#define NETXEN_NIC_LINUX_VERSIONID "4.0.76" + +#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) +#define _major(v) (((v) >> 24) & 0xff) +#define _minor(v) (((v) >> 16) & 0xff) +#define _build(v) ((v) & 0xffff) + +/* version in image has weird encoding: + * 7:0 - major + * 15:8 - minor + * 31:16 - build (little endian) + */ +#define NETXEN_DECODE_VERSION(v) \ + NETXEN_VERSION_CODE(((v) & 0xff), (((v) >> 8) & 0xff), ((v) >> 16)) + +#define NETXEN_NUM_FLASH_SECTORS (64) +#define NETXEN_FLASH_SECTOR_SIZE (64 * 1024) +#define NETXEN_FLASH_TOTAL_SIZE (NETXEN_NUM_FLASH_SECTORS \ + * NETXEN_FLASH_SECTOR_SIZE) + +#define RCV_DESC_RINGSIZE(rds_ring) \ + (sizeof(struct rcv_desc) * (rds_ring)->num_desc) +#define RCV_BUFF_RINGSIZE(rds_ring) \ + (sizeof(struct netxen_rx_buffer) * rds_ring->num_desc) +#define STATUS_DESC_RINGSIZE(sds_ring) \ + (sizeof(struct status_desc) * (sds_ring)->num_desc) +#define TX_BUFF_RINGSIZE(tx_ring) \ + (sizeof(struct netxen_cmd_buffer) * tx_ring->num_desc) +#define TX_DESC_RINGSIZE(tx_ring) \ + (sizeof(struct cmd_desc_type0) * tx_ring->num_desc) + +#define find_diff_among(a,b,range) ((a)<(b)?((b)-(a)):((b)+(range)-(a))) + +#define NETXEN_RCV_PRODUCER_OFFSET 0 +#define NETXEN_RCV_PEG_DB_ID 2 +#define NETXEN_HOST_DUMMY_DMA_SIZE 1024 +#define FLASH_SUCCESS 0 + +#define ADDR_IN_WINDOW1(off) \ + ((off > NETXEN_CRB_PCIX_HOST2) && (off < NETXEN_CRB_MAX)) ? 1 : 0 + +#define ADDR_IN_RANGE(addr, low, high) \ + (((addr) < (high)) && ((addr) >= (low))) + +/* + * normalize a 64MB crb address to 32MB PCI window + * To use NETXEN_CRB_NORMALIZE, window _must_ be set to 1 + */ +#define NETXEN_CRB_NORMAL(reg) \ + ((reg) - NETXEN_CRB_PCIX_HOST2 + NETXEN_CRB_PCIX_HOST) + +#define NETXEN_CRB_NORMALIZE(adapter, reg) \ + pci_base_offset(adapter, NETXEN_CRB_NORMAL(reg)) + +#define DB_NORMALIZE(adapter, off) \ + (adapter->ahw.db_base + (off)) + +#define NX_P2_C0 0x24 +#define NX_P2_C1 0x25 +#define NX_P3_A0 0x30 +#define NX_P3_A2 0x30 +#define NX_P3_B0 0x40 +#define NX_P3_B1 0x41 +#define NX_P3_B2 0x42 +#define NX_P3P_A0 0x50 + +#define NX_IS_REVISION_P2(REVISION) (REVISION <= NX_P2_C1) +#define NX_IS_REVISION_P3(REVISION) (REVISION >= NX_P3_A0) +#define NX_IS_REVISION_P3P(REVISION) (REVISION >= NX_P3P_A0) + +#define FIRST_PAGE_GROUP_START 0 +#define FIRST_PAGE_GROUP_END 0x100000 + +#define SECOND_PAGE_GROUP_START 0x6000000 +#define SECOND_PAGE_GROUP_END 0x68BC000 + +#define THIRD_PAGE_GROUP_START 0x70E4000 +#define THIRD_PAGE_GROUP_END 0x8000000 + +#define FIRST_PAGE_GROUP_SIZE FIRST_PAGE_GROUP_END - FIRST_PAGE_GROUP_START +#define SECOND_PAGE_GROUP_SIZE SECOND_PAGE_GROUP_END - SECOND_PAGE_GROUP_START +#define THIRD_PAGE_GROUP_SIZE THIRD_PAGE_GROUP_END - THIRD_PAGE_GROUP_START + +#define P2_MAX_MTU (8000) +#define P3_MAX_MTU (9600) +#define NX_ETHERMTU 1500 +#define NX_MAX_ETHERHDR 32 /* This contains some padding */ + +#define NX_P2_RX_BUF_MAX_LEN 1760 +#define NX_P3_RX_BUF_MAX_LEN (NX_MAX_ETHERHDR + NX_ETHERMTU) +#define NX_P2_RX_JUMBO_BUF_MAX_LEN (NX_MAX_ETHERHDR + P2_MAX_MTU) +#define NX_P3_RX_JUMBO_BUF_MAX_LEN (NX_MAX_ETHERHDR + P3_MAX_MTU) +#define NX_CT_DEFAULT_RX_BUF_LEN 2048 +#define NX_LRO_BUFFER_EXTRA 2048 + +#define NX_RX_LRO_BUFFER_LENGTH (8060) + +/* + * Maximum number of ring contexts + */ +#define MAX_RING_CTX 1 + +/* Opcodes to be used with the commands */ +#define TX_ETHER_PKT 0x01 +#define TX_TCP_PKT 0x02 +#define TX_UDP_PKT 0x03 +#define TX_IP_PKT 0x04 +#define TX_TCP_LSO 0x05 +#define TX_TCP_LSO6 0x06 +#define TX_IPSEC 0x07 +#define TX_IPSEC_CMD 0x0a +#define TX_TCPV6_PKT 0x0b +#define TX_UDPV6_PKT 0x0c + +/* The following opcodes are for internal consumption. */ +#define NETXEN_CONTROL_OP 0x10 +#define PEGNET_REQUEST 0x11 + +#define MAX_NUM_CARDS 4 + +#define NETXEN_MAX_FRAGS_PER_TX 14 +#define MAX_TSO_HEADER_DESC 2 +#define MGMT_CMD_DESC_RESV 4 +#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \ + + MGMT_CMD_DESC_RESV) +#define NX_MAX_TX_TIMEOUTS 2 + +/* + * Following are the states of the Phantom. Phantom will set them and + * Host will read to check if the fields are correct. + */ +#define PHAN_INITIALIZE_START 0xff00 +#define PHAN_INITIALIZE_FAILED 0xffff +#define PHAN_INITIALIZE_COMPLETE 0xff01 + +/* Host writes the following to notify that it has done the init-handshake */ +#define PHAN_INITIALIZE_ACK 0xf00f + +#define NUM_RCV_DESC_RINGS 3 +#define NUM_STS_DESC_RINGS 4 + +#define RCV_RING_NORMAL 0 +#define RCV_RING_JUMBO 1 +#define RCV_RING_LRO 2 + +#define MIN_CMD_DESCRIPTORS 64 +#define MIN_RCV_DESCRIPTORS 64 +#define MIN_JUMBO_DESCRIPTORS 32 + +#define MAX_CMD_DESCRIPTORS 1024 +#define MAX_RCV_DESCRIPTORS_1G 4096 +#define MAX_RCV_DESCRIPTORS_10G 8192 +#define MAX_JUMBO_RCV_DESCRIPTORS_1G 512 +#define MAX_JUMBO_RCV_DESCRIPTORS_10G 1024 +#define MAX_LRO_RCV_DESCRIPTORS 8 + +#define DEFAULT_RCV_DESCRIPTORS_1G 2048 +#define DEFAULT_RCV_DESCRIPTORS_10G 4096 + +#define NETXEN_CTX_SIGNATURE 0xdee0 +#define NETXEN_CTX_SIGNATURE_V2 0x0002dee0 +#define NETXEN_CTX_RESET 0xbad0 +#define NETXEN_CTX_D3_RESET 0xacc0 +#define NETXEN_RCV_PRODUCER(ringid) (ringid) + +#define PHAN_PEG_RCV_INITIALIZED 0xff01 +#define PHAN_PEG_RCV_START_INITIALIZE 0xff00 + +#define get_next_index(index, length) \ + (((index) + 1) & ((length) - 1)) + +#define get_index_range(index,length,count) \ + (((index) + (count)) & ((length) - 1)) + +#define MPORT_SINGLE_FUNCTION_MODE 0x1111 +#define MPORT_MULTI_FUNCTION_MODE 0x2222 + +#define NX_MAX_PCI_FUNC 8 + +/* + * NetXen host-peg signal message structure + * + * Bit 0-1 : peg_id => 0x2 for tx and 01 for rx + * Bit 2 : priv_id => must be 1 + * Bit 3-17 : count => for doorbell + * Bit 18-27 : ctx_id => Context id + * Bit 28-31 : opcode + */ + +typedef u32 netxen_ctx_msg; + +#define netxen_set_msg_peg_id(config_word, val) \ + ((config_word) &= ~3, (config_word) |= val & 3) +#define netxen_set_msg_privid(config_word) \ + ((config_word) |= 1 << 2) +#define netxen_set_msg_count(config_word, val) \ + ((config_word) &= ~(0x7fff<<3), (config_word) |= (val & 0x7fff) << 3) +#define netxen_set_msg_ctxid(config_word, val) \ + ((config_word) &= ~(0x3ff<<18), (config_word) |= (val & 0x3ff) << 18) +#define netxen_set_msg_opcode(config_word, val) \ + ((config_word) &= ~(0xf<<28), (config_word) |= (val & 0xf) << 28) + +struct netxen_rcv_ring { + __le64 addr; + __le32 size; + __le32 rsrvd; +}; + +struct netxen_sts_ring { + __le64 addr; + __le32 size; + __le16 msi_index; + __le16 rsvd; +} ; + +struct netxen_ring_ctx { + + /* one command ring */ + __le64 cmd_consumer_offset; + __le64 cmd_ring_addr; + __le32 cmd_ring_size; + __le32 rsrvd; + + /* three receive rings */ + struct netxen_rcv_ring rcv_rings[NUM_RCV_DESC_RINGS]; + + __le64 sts_ring_addr; + __le32 sts_ring_size; + + __le32 ctx_id; + + __le64 rsrvd_2[3]; + __le32 sts_ring_count; + __le32 rsrvd_3; + struct netxen_sts_ring sts_rings[NUM_STS_DESC_RINGS]; + +} __attribute__ ((aligned(64))); + +/* + * Following data structures describe the descriptors that will be used. + * Added fileds of tcpHdrSize and ipHdrSize, The driver needs to do it only when + * we are doing LSO (above the 1500 size packet) only. + */ + +/* + * The size of reference handle been changed to 16 bits to pass the MSS fields + * for the LSO packet + */ + +#define FLAGS_CHECKSUM_ENABLED 0x01 +#define FLAGS_LSO_ENABLED 0x02 +#define FLAGS_IPSEC_SA_ADD 0x04 +#define FLAGS_IPSEC_SA_DELETE 0x08 +#define FLAGS_VLAN_TAGGED 0x10 +#define FLAGS_VLAN_OOB 0x40 + +#define netxen_set_tx_vlan_tci(cmd_desc, v) \ + (cmd_desc)->vlan_TCI = cpu_to_le16(v); + +#define netxen_set_cmd_desc_port(cmd_desc, var) \ + ((cmd_desc)->port_ctxid |= ((var) & 0x0F)) +#define netxen_set_cmd_desc_ctxid(cmd_desc, var) \ + ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0)) + +#define netxen_set_tx_port(_desc, _port) \ + (_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0) + +#define netxen_set_tx_flags_opcode(_desc, _flags, _opcode) \ + (_desc)->flags_opcode = \ + cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)) + +#define netxen_set_tx_frags_len(_desc, _frags, _len) \ + (_desc)->nfrags__length = \ + cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8)) + +struct cmd_desc_type0 { + u8 tcp_hdr_offset; /* For LSO only */ + u8 ip_hdr_offset; /* For LSO only */ + __le16 flags_opcode; /* 15:13 unused, 12:7 opcode, 6:0 flags */ + __le32 nfrags__length; /* 31:8 total len, 7:0 frag count */ + + __le64 addr_buffer2; + + __le16 reference_handle; + __le16 mss; + u8 port_ctxid; /* 7:4 ctxid 3:0 port */ + u8 total_hdr_length; /* LSO only : MAC+IP+TCP Hdr size */ + __le16 conn_id; /* IPSec offoad only */ + + __le64 addr_buffer3; + __le64 addr_buffer1; + + __le16 buffer_length[4]; + + __le64 addr_buffer4; + + __le32 reserved2; + __le16 reserved; + __le16 vlan_TCI; + +} __attribute__ ((aligned(64))); + +/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */ +struct rcv_desc { + __le16 reference_handle; + __le16 reserved; + __le32 buffer_length; /* allocated buffer length (usually 2K) */ + __le64 addr_buffer; +}; + +/* opcode field in status_desc */ +#define NETXEN_NIC_SYN_OFFLOAD 0x03 +#define NETXEN_NIC_RXPKT_DESC 0x04 +#define NETXEN_OLD_RXPKT_DESC 0x3f +#define NETXEN_NIC_RESPONSE_DESC 0x05 +#define NETXEN_NIC_LRO_DESC 0x12 + +/* for status field in status_desc */ +#define STATUS_NEED_CKSUM (1) +#define STATUS_CKSUM_OK (2) + +/* owner bits of status_desc */ +#define STATUS_OWNER_HOST (0x1ULL << 56) +#define STATUS_OWNER_PHANTOM (0x2ULL << 56) + +/* Status descriptor: + 0-3 port, 4-7 status, 8-11 type, 12-27 total_length + 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset + 53-55 desc_cnt, 56-57 owner, 58-63 opcode + */ +#define netxen_get_sts_port(sts_data) \ + ((sts_data) & 0x0F) +#define netxen_get_sts_status(sts_data) \ + (((sts_data) >> 4) & 0x0F) +#define netxen_get_sts_type(sts_data) \ + (((sts_data) >> 8) & 0x0F) +#define netxen_get_sts_totallength(sts_data) \ + (((sts_data) >> 12) & 0xFFFF) +#define netxen_get_sts_refhandle(sts_data) \ + (((sts_data) >> 28) & 0xFFFF) +#define netxen_get_sts_prot(sts_data) \ + (((sts_data) >> 44) & 0x0F) +#define netxen_get_sts_pkt_offset(sts_data) \ + (((sts_data) >> 48) & 0x1F) +#define netxen_get_sts_desc_cnt(sts_data) \ + (((sts_data) >> 53) & 0x7) +#define netxen_get_sts_opcode(sts_data) \ + (((sts_data) >> 58) & 0x03F) + +#define netxen_get_lro_sts_refhandle(sts_data) \ + ((sts_data) & 0x0FFFF) +#define netxen_get_lro_sts_length(sts_data) \ + (((sts_data) >> 16) & 0x0FFFF) +#define netxen_get_lro_sts_l2_hdr_offset(sts_data) \ + (((sts_data) >> 32) & 0x0FF) +#define netxen_get_lro_sts_l4_hdr_offset(sts_data) \ + (((sts_data) >> 40) & 0x0FF) +#define netxen_get_lro_sts_timestamp(sts_data) \ + (((sts_data) >> 48) & 0x1) +#define netxen_get_lro_sts_type(sts_data) \ + (((sts_data) >> 49) & 0x7) +#define netxen_get_lro_sts_push_flag(sts_data) \ + (((sts_data) >> 52) & 0x1) +#define netxen_get_lro_sts_seq_number(sts_data) \ + ((sts_data) & 0x0FFFFFFFF) + + +struct status_desc { + __le64 status_desc_data[2]; +} __attribute__ ((aligned(16))); + +/* UNIFIED ROMIMAGE *************************/ +#define NX_UNI_DIR_SECT_PRODUCT_TBL 0x0 +#define NX_UNI_DIR_SECT_BOOTLD 0x6 +#define NX_UNI_DIR_SECT_FW 0x7 + +/*Offsets */ +#define NX_UNI_CHIP_REV_OFF 10 +#define NX_UNI_FLAGS_OFF 11 +#define NX_UNI_BIOS_VERSION_OFF 12 +#define NX_UNI_BOOTLD_IDX_OFF 27 +#define NX_UNI_FIRMWARE_IDX_OFF 29 + +struct uni_table_desc{ + uint32_t findex; + uint32_t num_entries; + uint32_t entry_size; + uint32_t reserved[5]; +}; + +struct uni_data_desc{ + uint32_t findex; + uint32_t size; + uint32_t reserved[5]; +}; + +/* UNIFIED ROMIMAGE *************************/ + +/* The version of the main data structure */ +#define NETXEN_BDINFO_VERSION 1 + +/* Magic number to let user know flash is programmed */ +#define NETXEN_BDINFO_MAGIC 0x12345678 + +/* Max number of Gig ports on a Phantom board */ +#define NETXEN_MAX_PORTS 4 + +#define NETXEN_BRDTYPE_P1_BD 0x0000 +#define NETXEN_BRDTYPE_P1_SB 0x0001 +#define NETXEN_BRDTYPE_P1_SMAX 0x0002 +#define NETXEN_BRDTYPE_P1_SOCK 0x0003 + +#define NETXEN_BRDTYPE_P2_SOCK_31 0x0008 +#define NETXEN_BRDTYPE_P2_SOCK_35 0x0009 +#define NETXEN_BRDTYPE_P2_SB35_4G 0x000a +#define NETXEN_BRDTYPE_P2_SB31_10G 0x000b +#define NETXEN_BRDTYPE_P2_SB31_2G 0x000c + +#define NETXEN_BRDTYPE_P2_SB31_10G_IMEZ 0x000d +#define NETXEN_BRDTYPE_P2_SB31_10G_HMEZ 0x000e +#define NETXEN_BRDTYPE_P2_SB31_10G_CX4 0x000f + +#define NETXEN_BRDTYPE_P3_REF_QG 0x0021 +#define NETXEN_BRDTYPE_P3_HMEZ 0x0022 +#define NETXEN_BRDTYPE_P3_10G_CX4_LP 0x0023 +#define NETXEN_BRDTYPE_P3_4_GB 0x0024 +#define NETXEN_BRDTYPE_P3_IMEZ 0x0025 +#define NETXEN_BRDTYPE_P3_10G_SFP_PLUS 0x0026 +#define NETXEN_BRDTYPE_P3_10000_BASE_T 0x0027 +#define NETXEN_BRDTYPE_P3_XG_LOM 0x0028 +#define NETXEN_BRDTYPE_P3_4_GB_MM 0x0029 +#define NETXEN_BRDTYPE_P3_10G_SFP_CT 0x002a +#define NETXEN_BRDTYPE_P3_10G_SFP_QT 0x002b +#define NETXEN_BRDTYPE_P3_10G_CX4 0x0031 +#define NETXEN_BRDTYPE_P3_10G_XFP 0x0032 +#define NETXEN_BRDTYPE_P3_10G_TP 0x0080 + +/* Flash memory map */ +#define NETXEN_CRBINIT_START 0 /* crbinit section */ +#define NETXEN_BRDCFG_START 0x4000 /* board config */ +#define NETXEN_INITCODE_START 0x6000 /* pegtune code */ +#define NETXEN_BOOTLD_START 0x10000 /* bootld */ +#define NETXEN_IMAGE_START 0x43000 /* compressed image */ +#define NETXEN_SECONDARY_START 0x200000 /* backup images */ +#define NETXEN_PXE_START 0x3E0000 /* PXE boot rom */ +#define NETXEN_USER_START 0x3E8000 /* Firmare info */ +#define NETXEN_FIXED_START 0x3F0000 /* backup of crbinit */ +#define NETXEN_USER_START_OLD NETXEN_PXE_START /* very old flash */ + +#define NX_OLD_MAC_ADDR_OFFSET (NETXEN_USER_START) +#define NX_FW_VERSION_OFFSET (NETXEN_USER_START+0x408) +#define NX_FW_SIZE_OFFSET (NETXEN_USER_START+0x40c) +#define NX_FW_MAC_ADDR_OFFSET (NETXEN_USER_START+0x418) +#define NX_FW_SERIAL_NUM_OFFSET (NETXEN_USER_START+0x81c) +#define NX_BIOS_VERSION_OFFSET (NETXEN_USER_START+0x83c) + +#define NX_HDR_VERSION_OFFSET (NETXEN_BRDCFG_START) +#define NX_BRDTYPE_OFFSET (NETXEN_BRDCFG_START+0x8) +#define NX_FW_MAGIC_OFFSET (NETXEN_BRDCFG_START+0x128) + +#define NX_FW_MIN_SIZE (0x3fffff) +#define NX_P2_MN_ROMIMAGE 0 +#define NX_P3_CT_ROMIMAGE 1 +#define NX_P3_MN_ROMIMAGE 2 +#define NX_UNIFIED_ROMIMAGE 3 +#define NX_FLASH_ROMIMAGE 4 +#define NX_UNKNOWN_ROMIMAGE 0xff + +#define NX_P2_MN_ROMIMAGE_NAME "nxromimg.bin" +#define NX_P3_CT_ROMIMAGE_NAME "nx3fwct.bin" +#define NX_P3_MN_ROMIMAGE_NAME "nx3fwmn.bin" +#define NX_UNIFIED_ROMIMAGE_NAME "phanfw.bin" +#define NX_FLASH_ROMIMAGE_NAME "flash" + +extern char netxen_nic_driver_name[]; + +/* Number of status descriptors to handle per interrupt */ +#define MAX_STATUS_HANDLE (64) + +/* + * netxen_skb_frag{} is to contain mapping info for each SG list. This + * has to be freed when DMA is complete. This is part of netxen_tx_buffer{}. + */ +struct netxen_skb_frag { + u64 dma; + u64 length; +}; + +struct netxen_recv_crb { + u32 crb_rcv_producer[NUM_RCV_DESC_RINGS]; + u32 crb_sts_consumer[NUM_STS_DESC_RINGS]; + u32 sw_int_mask[NUM_STS_DESC_RINGS]; +}; + +/* Following defines are for the state of the buffers */ +#define NETXEN_BUFFER_FREE 0 +#define NETXEN_BUFFER_BUSY 1 + +/* + * There will be one netxen_buffer per skb packet. These will be + * used to save the dma info for pci_unmap_page() + */ +struct netxen_cmd_buffer { + struct sk_buff *skb; + struct netxen_skb_frag frag_array[MAX_SKB_FRAGS + 1]; + u32 frag_count; +}; + +/* In rx_buffer, we do not need multiple fragments as is a single buffer */ +struct netxen_rx_buffer { + struct list_head list; + struct sk_buff *skb; + u64 dma; + u16 ref_handle; + u16 state; +}; + +/* Board types */ +#define NETXEN_NIC_GBE 0x01 +#define NETXEN_NIC_XGBE 0x02 + +/* + * One hardware_context{} per adapter + * contains interrupt info as well shared hardware info. + */ +struct netxen_hardware_context { + void __iomem *pci_base0; + void __iomem *pci_base1; + void __iomem *pci_base2; + void __iomem *db_base; + void __iomem *ocm_win_crb; + + unsigned long db_len; + unsigned long pci_len0; + + u32 ocm_win; + u32 crb_win; + + rwlock_t crb_lock; + spinlock_t mem_lock; + + u8 cut_through; + u8 revision_id; + u8 pci_func; + u8 linkup; + u16 port_type; + u16 board_type; +}; + +#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* With FCS */ +#define ETHERNET_FCS_SIZE 4 + +struct netxen_adapter_stats { + u64 xmitcalled; + u64 xmitfinished; + u64 rxdropped; + u64 txdropped; + u64 csummed; + u64 rx_pkts; + u64 lro_pkts; + u64 rxbytes; + u64 txbytes; +}; + +/* + * Rcv Descriptor Context. One such per Rcv Descriptor. There may + * be one Rcv Descriptor for normal packets, one for jumbo and may be others. + */ +struct nx_host_rds_ring { + u32 producer; + u32 num_desc; + u32 dma_size; + u32 skb_size; + u32 flags; + void __iomem *crb_rcv_producer; + struct rcv_desc *desc_head; + struct netxen_rx_buffer *rx_buf_arr; + struct list_head free_list; + spinlock_t lock; + dma_addr_t phys_addr; +}; + +struct nx_host_sds_ring { + u32 consumer; + u32 num_desc; + void __iomem *crb_sts_consumer; + void __iomem *crb_intr_mask; + + struct status_desc *desc_head; + struct netxen_adapter *adapter; + struct napi_struct napi; + struct list_head free_list[NUM_RCV_DESC_RINGS]; + + int irq; + + dma_addr_t phys_addr; + char name[IFNAMSIZ+4]; +}; + +struct nx_host_tx_ring { + u32 producer; + __le32 *hw_consumer; + u32 sw_consumer; + void __iomem *crb_cmd_producer; + void __iomem *crb_cmd_consumer; + u32 num_desc; + + struct netdev_queue *txq; + + struct netxen_cmd_buffer *cmd_buf_arr; + struct cmd_desc_type0 *desc_head; + dma_addr_t phys_addr; +}; + +/* + * Receive context. There is one such structure per instance of the + * receive processing. Any state information that is relevant to + * the receive, and is must be in this structure. The global data may be + * present elsewhere. + */ +struct netxen_recv_context { + u32 state; + u16 context_id; + u16 virt_port; + + struct nx_host_rds_ring *rds_rings; + struct nx_host_sds_ring *sds_rings; + + struct netxen_ring_ctx *hwctx; + dma_addr_t phys_addr; +}; + +/* New HW context creation */ + +#define NX_OS_CRB_RETRY_COUNT 4000 +#define NX_CDRP_SIGNATURE_MAKE(pcifn, version) \ + (((pcifn) & 0xff) | (((version) & 0xff) << 8) | (0xcafe << 16)) + +#define NX_CDRP_CLEAR 0x00000000 +#define NX_CDRP_CMD_BIT 0x80000000 + +/* + * All responses must have the NX_CDRP_CMD_BIT cleared + * in the crb NX_CDRP_CRB_OFFSET. + */ +#define NX_CDRP_FORM_RSP(rsp) (rsp) +#define NX_CDRP_IS_RSP(rsp) (((rsp) & NX_CDRP_CMD_BIT) == 0) + +#define NX_CDRP_RSP_OK 0x00000001 +#define NX_CDRP_RSP_FAIL 0x00000002 +#define NX_CDRP_RSP_TIMEOUT 0x00000003 + +/* + * All commands must have the NX_CDRP_CMD_BIT set in + * the crb NX_CDRP_CRB_OFFSET. + */ +#define NX_CDRP_FORM_CMD(cmd) (NX_CDRP_CMD_BIT | (cmd)) +#define NX_CDRP_IS_CMD(cmd) (((cmd) & NX_CDRP_CMD_BIT) != 0) + +#define NX_CDRP_CMD_SUBMIT_CAPABILITIES 0x00000001 +#define NX_CDRP_CMD_READ_MAX_RDS_PER_CTX 0x00000002 +#define NX_CDRP_CMD_READ_MAX_SDS_PER_CTX 0x00000003 +#define NX_CDRP_CMD_READ_MAX_RULES_PER_CTX 0x00000004 +#define NX_CDRP_CMD_READ_MAX_RX_CTX 0x00000005 +#define NX_CDRP_CMD_READ_MAX_TX_CTX 0x00000006 +#define NX_CDRP_CMD_CREATE_RX_CTX 0x00000007 +#define NX_CDRP_CMD_DESTROY_RX_CTX 0x00000008 +#define NX_CDRP_CMD_CREATE_TX_CTX 0x00000009 +#define NX_CDRP_CMD_DESTROY_TX_CTX 0x0000000a +#define NX_CDRP_CMD_SETUP_STATISTICS 0x0000000e +#define NX_CDRP_CMD_GET_STATISTICS 0x0000000f +#define NX_CDRP_CMD_DELETE_STATISTICS 0x00000010 +#define NX_CDRP_CMD_SET_MTU 0x00000012 +#define NX_CDRP_CMD_READ_PHY 0x00000013 +#define NX_CDRP_CMD_WRITE_PHY 0x00000014 +#define NX_CDRP_CMD_READ_HW_REG 0x00000015 +#define NX_CDRP_CMD_GET_FLOW_CTL 0x00000016 +#define NX_CDRP_CMD_SET_FLOW_CTL 0x00000017 +#define NX_CDRP_CMD_READ_MAX_MTU 0x00000018 +#define NX_CDRP_CMD_READ_MAX_LRO 0x00000019 +#define NX_CDRP_CMD_CONFIGURE_TOE 0x0000001a +#define NX_CDRP_CMD_FUNC_ATTRIB 0x0000001b +#define NX_CDRP_CMD_READ_PEXQ_PARAMETERS 0x0000001c +#define NX_CDRP_CMD_GET_LIC_CAPABILITIES 0x0000001d +#define NX_CDRP_CMD_READ_MAX_LRO_PER_BOARD 0x0000001e +#define NX_CDRP_CMD_CONFIG_GBE_PORT 0x0000001f +#define NX_CDRP_CMD_MAX 0x00000020 + +#define NX_RCODE_SUCCESS 0 +#define NX_RCODE_NO_HOST_MEM 1 +#define NX_RCODE_NO_HOST_RESOURCE 2 +#define NX_RCODE_NO_CARD_CRB 3 +#define NX_RCODE_NO_CARD_MEM 4 +#define NX_RCODE_NO_CARD_RESOURCE 5 +#define NX_RCODE_INVALID_ARGS 6 +#define NX_RCODE_INVALID_ACTION 7 +#define NX_RCODE_INVALID_STATE 8 +#define NX_RCODE_NOT_SUPPORTED 9 +#define NX_RCODE_NOT_PERMITTED 10 +#define NX_RCODE_NOT_READY 11 +#define NX_RCODE_DOES_NOT_EXIST 12 +#define NX_RCODE_ALREADY_EXISTS 13 +#define NX_RCODE_BAD_SIGNATURE 14 +#define NX_RCODE_CMD_NOT_IMPL 15 +#define NX_RCODE_CMD_INVALID 16 +#define NX_RCODE_TIMEOUT 17 +#define NX_RCODE_CMD_FAILED 18 +#define NX_RCODE_MAX_EXCEEDED 19 +#define NX_RCODE_MAX 20 + +#define NX_DESTROY_CTX_RESET 0 +#define NX_DESTROY_CTX_D3_RESET 1 +#define NX_DESTROY_CTX_MAX 2 + +/* + * Capabilities + */ +#define NX_CAP_BIT(class, bit) (1 << bit) +#define NX_CAP0_LEGACY_CONTEXT NX_CAP_BIT(0, 0) +#define NX_CAP0_MULTI_CONTEXT NX_CAP_BIT(0, 1) +#define NX_CAP0_LEGACY_MN NX_CAP_BIT(0, 2) +#define NX_CAP0_LEGACY_MS NX_CAP_BIT(0, 3) +#define NX_CAP0_CUT_THROUGH NX_CAP_BIT(0, 4) +#define NX_CAP0_LRO NX_CAP_BIT(0, 5) +#define NX_CAP0_LSO NX_CAP_BIT(0, 6) +#define NX_CAP0_JUMBO_CONTIGUOUS NX_CAP_BIT(0, 7) +#define NX_CAP0_LRO_CONTIGUOUS NX_CAP_BIT(0, 8) +#define NX_CAP0_HW_LRO NX_CAP_BIT(0, 10) + +/* + * Context state + */ +#define NX_HOST_CTX_STATE_FREED 0 +#define NX_HOST_CTX_STATE_ALLOCATED 1 +#define NX_HOST_CTX_STATE_ACTIVE 2 +#define NX_HOST_CTX_STATE_DISABLED 3 +#define NX_HOST_CTX_STATE_QUIESCED 4 +#define NX_HOST_CTX_STATE_MAX 5 + +/* + * Rx context + */ + +typedef struct { + __le64 host_phys_addr; /* Ring base addr */ + __le32 ring_size; /* Ring entries */ + __le16 msi_index; + __le16 rsvd; /* Padding */ +} nx_hostrq_sds_ring_t; + +typedef struct { + __le64 host_phys_addr; /* Ring base addr */ + __le64 buff_size; /* Packet buffer size */ + __le32 ring_size; /* Ring entries */ + __le32 ring_kind; /* Class of ring */ +} nx_hostrq_rds_ring_t; + +typedef struct { + __le64 host_rsp_dma_addr; /* Response dma'd here */ + __le32 capabilities[4]; /* Flag bit vector */ + __le32 host_int_crb_mode; /* Interrupt crb usage */ + __le32 host_rds_crb_mode; /* RDS crb usage */ + /* These ring offsets are relative to data[0] below */ + __le32 rds_ring_offset; /* Offset to RDS config */ + __le32 sds_ring_offset; /* Offset to SDS config */ + __le16 num_rds_rings; /* Count of RDS rings */ + __le16 num_sds_rings; /* Count of SDS rings */ + __le16 rsvd1; /* Padding */ + __le16 rsvd2; /* Padding */ + u8 reserved[128]; /* reserve space for future expansion*/ + /* MUST BE 64-bit aligned. + The following is packed: + - N hostrq_rds_rings + - N hostrq_sds_rings */ + char data[0]; +} nx_hostrq_rx_ctx_t; + +typedef struct { + __le32 host_producer_crb; /* Crb to use */ + __le32 rsvd1; /* Padding */ +} nx_cardrsp_rds_ring_t; + +typedef struct { + __le32 host_consumer_crb; /* Crb to use */ + __le32 interrupt_crb; /* Crb to use */ +} nx_cardrsp_sds_ring_t; + +typedef struct { + /* These ring offsets are relative to data[0] below */ + __le32 rds_ring_offset; /* Offset to RDS config */ + __le32 sds_ring_offset; /* Offset to SDS config */ + __le32 host_ctx_state; /* Starting State */ + __le32 num_fn_per_port; /* How many PCI fn share the port */ + __le16 num_rds_rings; /* Count of RDS rings */ + __le16 num_sds_rings; /* Count of SDS rings */ + __le16 context_id; /* Handle for context */ + u8 phys_port; /* Physical id of port */ + u8 virt_port; /* Virtual/Logical id of port */ + u8 reserved[128]; /* save space for future expansion */ + /* MUST BE 64-bit aligned. + The following is packed: + - N cardrsp_rds_rings + - N cardrs_sds_rings */ + char data[0]; +} nx_cardrsp_rx_ctx_t; + +#define SIZEOF_HOSTRQ_RX(HOSTRQ_RX, rds_rings, sds_rings) \ + (sizeof(HOSTRQ_RX) + \ + (rds_rings)*(sizeof(nx_hostrq_rds_ring_t)) + \ + (sds_rings)*(sizeof(nx_hostrq_sds_ring_t))) + +#define SIZEOF_CARDRSP_RX(CARDRSP_RX, rds_rings, sds_rings) \ + (sizeof(CARDRSP_RX) + \ + (rds_rings)*(sizeof(nx_cardrsp_rds_ring_t)) + \ + (sds_rings)*(sizeof(nx_cardrsp_sds_ring_t))) + +/* + * Tx context + */ + +typedef struct { + __le64 host_phys_addr; /* Ring base addr */ + __le32 ring_size; /* Ring entries */ + __le32 rsvd; /* Padding */ +} nx_hostrq_cds_ring_t; + +typedef struct { + __le64 host_rsp_dma_addr; /* Response dma'd here */ + __le64 cmd_cons_dma_addr; /* */ + __le64 dummy_dma_addr; /* */ + __le32 capabilities[4]; /* Flag bit vector */ + __le32 host_int_crb_mode; /* Interrupt crb usage */ + __le32 rsvd1; /* Padding */ + __le16 rsvd2; /* Padding */ + __le16 interrupt_ctl; + __le16 msi_index; + __le16 rsvd3; /* Padding */ + nx_hostrq_cds_ring_t cds_ring; /* Desc of cds ring */ + u8 reserved[128]; /* future expansion */ +} nx_hostrq_tx_ctx_t; + +typedef struct { + __le32 host_producer_crb; /* Crb to use */ + __le32 interrupt_crb; /* Crb to use */ +} nx_cardrsp_cds_ring_t; + +typedef struct { + __le32 host_ctx_state; /* Starting state */ + __le16 context_id; /* Handle for context */ + u8 phys_port; /* Physical id of port */ + u8 virt_port; /* Virtual/Logical id of port */ + nx_cardrsp_cds_ring_t cds_ring; /* Card cds settings */ + u8 reserved[128]; /* future expansion */ +} nx_cardrsp_tx_ctx_t; + +#define SIZEOF_HOSTRQ_TX(HOSTRQ_TX) (sizeof(HOSTRQ_TX)) +#define SIZEOF_CARDRSP_TX(CARDRSP_TX) (sizeof(CARDRSP_TX)) + +/* CRB */ + +#define NX_HOST_RDS_CRB_MODE_UNIQUE 0 +#define NX_HOST_RDS_CRB_MODE_SHARED 1 +#define NX_HOST_RDS_CRB_MODE_CUSTOM 2 +#define NX_HOST_RDS_CRB_MODE_MAX 3 + +#define NX_HOST_INT_CRB_MODE_UNIQUE 0 +#define NX_HOST_INT_CRB_MODE_SHARED 1 +#define NX_HOST_INT_CRB_MODE_NORX 2 +#define NX_HOST_INT_CRB_MODE_NOTX 3 +#define NX_HOST_INT_CRB_MODE_NORXTX 4 + + +/* MAC */ + +#define MC_COUNT_P2 16 +#define MC_COUNT_P3 38 + +#define NETXEN_MAC_NOOP 0 +#define NETXEN_MAC_ADD 1 +#define NETXEN_MAC_DEL 2 + +typedef struct nx_mac_list_s { + struct list_head list; + uint8_t mac_addr[ETH_ALEN+2]; +} nx_mac_list_t; + +struct nx_vlan_ip_list { + struct list_head list; + u32 ip_addr; +}; + +/* + * Interrupt coalescing defaults. The defaults are for 1500 MTU. It is + * adjusted based on configured MTU. + */ +#define NETXEN_DEFAULT_INTR_COALESCE_RX_TIME_US 3 +#define NETXEN_DEFAULT_INTR_COALESCE_RX_PACKETS 256 +#define NETXEN_DEFAULT_INTR_COALESCE_TX_PACKETS 64 +#define NETXEN_DEFAULT_INTR_COALESCE_TX_TIME_US 4 + +#define NETXEN_NIC_INTR_DEFAULT 0x04 + +typedef union { + struct { + uint16_t rx_packets; + uint16_t rx_time_us; + uint16_t tx_packets; + uint16_t tx_time_us; + } data; + uint64_t word; +} nx_nic_intr_coalesce_data_t; + +typedef struct { + uint16_t stats_time_us; + uint16_t rate_sample_time; + uint16_t flags; + uint16_t rsvd_1; + uint32_t low_threshold; + uint32_t high_threshold; + nx_nic_intr_coalesce_data_t normal; + nx_nic_intr_coalesce_data_t low; + nx_nic_intr_coalesce_data_t high; + nx_nic_intr_coalesce_data_t irq; +} nx_nic_intr_coalesce_t; + +#define NX_HOST_REQUEST 0x13 +#define NX_NIC_REQUEST 0x14 + +#define NX_MAC_EVENT 0x1 + +#define NX_IP_UP 2 +#define NX_IP_DOWN 3 + +/* + * Driver --> Firmware + */ +#define NX_NIC_H2C_OPCODE_START 0 +#define NX_NIC_H2C_OPCODE_CONFIG_RSS 1 +#define NX_NIC_H2C_OPCODE_CONFIG_RSS_TBL 2 +#define NX_NIC_H2C_OPCODE_CONFIG_INTR_COALESCE 3 +#define NX_NIC_H2C_OPCODE_CONFIG_LED 4 +#define NX_NIC_H2C_OPCODE_CONFIG_PROMISCUOUS 5 +#define NX_NIC_H2C_OPCODE_CONFIG_L2_MAC 6 +#define NX_NIC_H2C_OPCODE_LRO_REQUEST 7 +#define NX_NIC_H2C_OPCODE_GET_SNMP_STATS 8 +#define NX_NIC_H2C_OPCODE_PROXY_START_REQUEST 9 +#define NX_NIC_H2C_OPCODE_PROXY_STOP_REQUEST 10 +#define NX_NIC_H2C_OPCODE_PROXY_SET_MTU 11 +#define NX_NIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE 12 +#define NX_NIC_H2C_OPCODE_GET_FINGER_PRINT_REQUEST 13 +#define NX_NIC_H2C_OPCODE_INSTALL_LICENSE_REQUEST 14 +#define NX_NIC_H2C_OPCODE_GET_LICENSE_CAPABILITY_REQUEST 15 +#define NX_NIC_H2C_OPCODE_GET_NET_STATS 16 +#define NX_NIC_H2C_OPCODE_PROXY_UPDATE_P2V 17 +#define NX_NIC_H2C_OPCODE_CONFIG_IPADDR 18 +#define NX_NIC_H2C_OPCODE_CONFIG_LOOPBACK 19 +#define NX_NIC_H2C_OPCODE_PROXY_STOP_DONE 20 +#define NX_NIC_H2C_OPCODE_GET_LINKEVENT 21 +#define NX_NIC_C2C_OPCODE 22 +#define NX_NIC_H2C_OPCODE_CONFIG_BRIDGING 23 +#define NX_NIC_H2C_OPCODE_CONFIG_HW_LRO 24 +#define NX_NIC_H2C_OPCODE_LAST 25 + +/* + * Firmware --> Driver + */ + +#define NX_NIC_C2H_OPCODE_START 128 +#define NX_NIC_C2H_OPCODE_CONFIG_RSS_RESPONSE 129 +#define NX_NIC_C2H_OPCODE_CONFIG_RSS_TBL_RESPONSE 130 +#define NX_NIC_C2H_OPCODE_CONFIG_MAC_RESPONSE 131 +#define NX_NIC_C2H_OPCODE_CONFIG_PROMISCUOUS_RESPONSE 132 +#define NX_NIC_C2H_OPCODE_CONFIG_L2_MAC_RESPONSE 133 +#define NX_NIC_C2H_OPCODE_LRO_DELETE_RESPONSE 134 +#define NX_NIC_C2H_OPCODE_LRO_ADD_FAILURE_RESPONSE 135 +#define NX_NIC_C2H_OPCODE_GET_SNMP_STATS 136 +#define NX_NIC_C2H_OPCODE_GET_FINGER_PRINT_REPLY 137 +#define NX_NIC_C2H_OPCODE_INSTALL_LICENSE_REPLY 138 +#define NX_NIC_C2H_OPCODE_GET_LICENSE_CAPABILITIES_REPLY 139 +#define NX_NIC_C2H_OPCODE_GET_NET_STATS_RESPONSE 140 +#define NX_NIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 141 +#define NX_NIC_C2H_OPCODE_LAST 142 + +#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */ +#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */ +#define VPORT_MISS_MODE_ACCEPT_MULTI 2 /* accept unmatched multicast */ + +#define NX_NIC_LRO_REQUEST_FIRST 0 +#define NX_NIC_LRO_REQUEST_ADD_FLOW 1 +#define NX_NIC_LRO_REQUEST_DELETE_FLOW 2 +#define NX_NIC_LRO_REQUEST_TIMER 3 +#define NX_NIC_LRO_REQUEST_CLEANUP 4 +#define NX_NIC_LRO_REQUEST_ADD_FLOW_SCHEDULED 5 +#define NX_TOE_LRO_REQUEST_ADD_FLOW 6 +#define NX_TOE_LRO_REQUEST_ADD_FLOW_RESPONSE 7 +#define NX_TOE_LRO_REQUEST_DELETE_FLOW 8 +#define NX_TOE_LRO_REQUEST_DELETE_FLOW_RESPONSE 9 +#define NX_TOE_LRO_REQUEST_TIMER 10 +#define NX_NIC_LRO_REQUEST_LAST 11 + +#define NX_FW_CAPABILITY_LINK_NOTIFICATION (1 << 5) +#define NX_FW_CAPABILITY_SWITCHING (1 << 6) +#define NX_FW_CAPABILITY_PEXQ (1 << 7) +#define NX_FW_CAPABILITY_BDG (1 << 8) +#define NX_FW_CAPABILITY_FVLANTX (1 << 9) +#define NX_FW_CAPABILITY_HW_LRO (1 << 10) +#define NX_FW_CAPABILITY_GBE_LINK_CFG (1 << 11) + +/* module types */ +#define LINKEVENT_MODULE_NOT_PRESENT 1 +#define LINKEVENT_MODULE_OPTICAL_UNKNOWN 2 +#define LINKEVENT_MODULE_OPTICAL_SRLR 3 +#define LINKEVENT_MODULE_OPTICAL_LRM 4 +#define LINKEVENT_MODULE_OPTICAL_SFP_1G 5 +#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE 6 +#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN 7 +#define LINKEVENT_MODULE_TWINAX 8 + +#define LINKSPEED_10GBPS 10000 +#define LINKSPEED_1GBPS 1000 +#define LINKSPEED_100MBPS 100 +#define LINKSPEED_10MBPS 10 + +#define LINKSPEED_ENCODED_10MBPS 0 +#define LINKSPEED_ENCODED_100MBPS 1 +#define LINKSPEED_ENCODED_1GBPS 2 + +#define LINKEVENT_AUTONEG_DISABLED 0 +#define LINKEVENT_AUTONEG_ENABLED 1 + +#define LINKEVENT_HALF_DUPLEX 0 +#define LINKEVENT_FULL_DUPLEX 1 + +#define LINKEVENT_LINKSPEED_MBPS 0 +#define LINKEVENT_LINKSPEED_ENCODED 1 + +#define AUTO_FW_RESET_ENABLED 0xEF10AF12 +#define AUTO_FW_RESET_DISABLED 0xDCBAAF12 + +/* firmware response header: + * 63:58 - message type + * 57:56 - owner + * 55:53 - desc count + * 52:48 - reserved + * 47:40 - completion id + * 39:32 - opcode + * 31:16 - error code + * 15:00 - reserved + */ +#define netxen_get_nic_msgtype(msg_hdr) \ + ((msg_hdr >> 58) & 0x3F) +#define netxen_get_nic_msg_compid(msg_hdr) \ + ((msg_hdr >> 40) & 0xFF) +#define netxen_get_nic_msg_opcode(msg_hdr) \ + ((msg_hdr >> 32) & 0xFF) +#define netxen_get_nic_msg_errcode(msg_hdr) \ + ((msg_hdr >> 16) & 0xFFFF) + +typedef struct { + union { + struct { + u64 hdr; + u64 body[7]; + }; + u64 words[8]; + }; +} nx_fw_msg_t; + +typedef struct { + __le64 qhdr; + __le64 req_hdr; + __le64 words[6]; +} nx_nic_req_t; + +typedef struct { + u8 op; + u8 tag; + u8 mac_addr[6]; +} nx_mac_req_t; + +#define MAX_PENDING_DESC_BLOCK_SIZE 64 + +#define NETXEN_NIC_MSI_ENABLED 0x02 +#define NETXEN_NIC_MSIX_ENABLED 0x04 +#define NETXEN_NIC_LRO_ENABLED 0x08 +#define NETXEN_NIC_LRO_DISABLED 0x00 +#define NETXEN_NIC_BRIDGE_ENABLED 0X10 +#define NETXEN_NIC_DIAG_ENABLED 0x20 +#define NETXEN_IS_MSI_FAMILY(adapter) \ + ((adapter)->flags & (NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED)) + +#define MSIX_ENTRIES_PER_ADAPTER NUM_STS_DESC_RINGS +#define NETXEN_MSIX_TBL_SPACE 8192 +#define NETXEN_PCI_REG_MSIX_TBL 0x44 + +#define NETXEN_DB_MAPSIZE_BYTES 0x1000 + +#define NETXEN_NETDEV_WEIGHT 128 +#define NETXEN_ADAPTER_UP_MAGIC 777 +#define NETXEN_NIC_PEG_TUNE 0 + +#define __NX_FW_ATTACHED 0 +#define __NX_DEV_UP 1 +#define __NX_RESETTING 2 + +struct netxen_dummy_dma { + void *addr; + dma_addr_t phys_addr; +}; + +struct netxen_adapter { + struct netxen_hardware_context ahw; + + struct net_device *netdev; + struct pci_dev *pdev; + struct list_head mac_list; + struct list_head vlan_ip_list; + + spinlock_t tx_clean_lock; + + u16 num_txd; + u16 num_rxd; + u16 num_jumbo_rxd; + u16 num_lro_rxd; + + u8 max_rds_rings; + u8 max_sds_rings; + u8 driver_mismatch; + u8 msix_supported; + u8 __pad; + u8 pci_using_dac; + u8 portnum; + u8 physical_port; + + u8 mc_enabled; + u8 max_mc_count; + u8 rss_supported; + u8 link_changed; + u8 fw_wait_cnt; + u8 fw_fail_cnt; + u8 tx_timeo_cnt; + u8 need_fw_reset; + + u8 has_link_events; + u8 fw_type; + u16 tx_context_id; + u16 mtu; + u16 is_up; + + u16 link_speed; + u16 link_duplex; + u16 link_autoneg; + u16 module_type; + + u32 capabilities; + u32 flags; + u32 irq; + u32 temp; + + u32 int_vec_bit; + u32 heartbit; + + u8 mac_addr[ETH_ALEN]; + + struct netxen_adapter_stats stats; + + struct netxen_recv_context recv_ctx; + struct nx_host_tx_ring *tx_ring; + + int (*macaddr_set) (struct netxen_adapter *, u8 *); + int (*set_mtu) (struct netxen_adapter *, int); + int (*set_promisc) (struct netxen_adapter *, u32); + void (*set_multi) (struct net_device *); + int (*phy_read) (struct netxen_adapter *, u32 reg, u32 *); + int (*phy_write) (struct netxen_adapter *, u32 reg, u32 val); + int (*init_port) (struct netxen_adapter *, int); + int (*stop_port) (struct netxen_adapter *); + + u32 (*crb_read)(struct netxen_adapter *, ulong); + int (*crb_write)(struct netxen_adapter *, ulong, u32); + + int (*pci_mem_read)(struct netxen_adapter *, u64, u64 *); + int (*pci_mem_write)(struct netxen_adapter *, u64, u64); + + int (*pci_set_window)(struct netxen_adapter *, u64, u32 *); + + u32 (*io_read)(struct netxen_adapter *, void __iomem *); + void (*io_write)(struct netxen_adapter *, void __iomem *, u32); + + void __iomem *tgt_mask_reg; + void __iomem *pci_int_reg; + void __iomem *tgt_status_reg; + void __iomem *crb_int_state_reg; + void __iomem *isr_int_vec; + + struct msix_entry msix_entries[MSIX_ENTRIES_PER_ADAPTER]; + + struct netxen_dummy_dma dummy_dma; + + struct delayed_work fw_work; + + struct work_struct tx_timeout_task; + + nx_nic_intr_coalesce_t coal; + + unsigned long state; + __le32 file_prd_off; /*File fw product offset*/ + u32 fw_version; + const struct firmware *fw; +}; + +int nx_fw_cmd_query_phy(struct netxen_adapter *adapter, u32 reg, u32 *val); +int nx_fw_cmd_set_phy(struct netxen_adapter *adapter, u32 reg, u32 val); + +#define NXRD32(adapter, off) \ + (adapter->crb_read(adapter, off)) +#define NXWR32(adapter, off, val) \ + (adapter->crb_write(adapter, off, val)) +#define NXRDIO(adapter, addr) \ + (adapter->io_read(adapter, addr)) +#define NXWRIO(adapter, addr, val) \ + (adapter->io_write(adapter, addr, val)) + +int netxen_pcie_sem_lock(struct netxen_adapter *, int, u32); +void netxen_pcie_sem_unlock(struct netxen_adapter *, int); + +#define netxen_rom_lock(a) \ + netxen_pcie_sem_lock((a), 2, NETXEN_ROM_LOCK_ID) +#define netxen_rom_unlock(a) \ + netxen_pcie_sem_unlock((a), 2) +#define netxen_phy_lock(a) \ + netxen_pcie_sem_lock((a), 3, NETXEN_PHY_LOCK_ID) +#define netxen_phy_unlock(a) \ + netxen_pcie_sem_unlock((a), 3) +#define netxen_api_lock(a) \ + netxen_pcie_sem_lock((a), 5, 0) +#define netxen_api_unlock(a) \ + netxen_pcie_sem_unlock((a), 5) +#define netxen_sw_lock(a) \ + netxen_pcie_sem_lock((a), 6, 0) +#define netxen_sw_unlock(a) \ + netxen_pcie_sem_unlock((a), 6) +#define crb_win_lock(a) \ + netxen_pcie_sem_lock((a), 7, NETXEN_CRB_WIN_LOCK_ID) +#define crb_win_unlock(a) \ + netxen_pcie_sem_unlock((a), 7) + +int netxen_nic_get_board_info(struct netxen_adapter *adapter); +int netxen_nic_wol_supported(struct netxen_adapter *adapter); + +/* Functions from netxen_nic_init.c */ +int netxen_init_dummy_dma(struct netxen_adapter *adapter); +void netxen_free_dummy_dma(struct netxen_adapter *adapter); + +int netxen_check_flash_fw_compatibility(struct netxen_adapter *adapter); +int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val); +int netxen_load_firmware(struct netxen_adapter *adapter); +int netxen_need_fw_reset(struct netxen_adapter *adapter); +void netxen_request_firmware(struct netxen_adapter *adapter); +void netxen_release_firmware(struct netxen_adapter *adapter); +int netxen_pinit_from_rom(struct netxen_adapter *adapter); + +int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp); +int netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr, + u8 *bytes, size_t size); +int netxen_rom_fast_write_words(struct netxen_adapter *adapter, int addr, + u8 *bytes, size_t size); +int netxen_flash_unlock(struct netxen_adapter *adapter); +int netxen_backup_crbinit(struct netxen_adapter *adapter); +int netxen_flash_erase_secondary(struct netxen_adapter *adapter); +int netxen_flash_erase_primary(struct netxen_adapter *adapter); +void netxen_halt_pegs(struct netxen_adapter *adapter); + +int netxen_rom_se(struct netxen_adapter *adapter, int addr); + +int netxen_alloc_sw_resources(struct netxen_adapter *adapter); +void netxen_free_sw_resources(struct netxen_adapter *adapter); + +void netxen_setup_hwops(struct netxen_adapter *adapter); +void __iomem *netxen_get_ioaddr(struct netxen_adapter *, u32); + +int netxen_alloc_hw_resources(struct netxen_adapter *adapter); +void netxen_free_hw_resources(struct netxen_adapter *adapter); + +void netxen_release_rx_buffers(struct netxen_adapter *adapter); +void netxen_release_tx_buffers(struct netxen_adapter *adapter); + +int netxen_init_firmware(struct netxen_adapter *adapter); +void netxen_nic_clear_stats(struct netxen_adapter *adapter); +void netxen_watchdog_task(struct work_struct *work); +void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid, + struct nx_host_rds_ring *rds_ring); +int netxen_process_cmd_ring(struct netxen_adapter *adapter); +int netxen_process_rcv_ring(struct nx_host_sds_ring *sds_ring, int max); + +void netxen_p3_free_mac_list(struct netxen_adapter *adapter); +int netxen_config_intr_coalesce(struct netxen_adapter *adapter); +int netxen_config_rss(struct netxen_adapter *adapter, int enable); +int netxen_config_ipaddr(struct netxen_adapter *adapter, u32 ip, int cmd); +int netxen_linkevent_request(struct netxen_adapter *adapter, int enable); +void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup); +void netxen_pci_camqm_read_2M(struct netxen_adapter *, u64, u64 *); +void netxen_pci_camqm_write_2M(struct netxen_adapter *, u64, u64); + +int nx_fw_cmd_set_gbe_port(struct netxen_adapter *adapter, + u32 speed, u32 duplex, u32 autoneg); +int nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu); +int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu); +int netxen_config_hw_lro(struct netxen_adapter *adapter, int enable); +int netxen_config_bridged_mode(struct netxen_adapter *adapter, int enable); +int netxen_send_lro_cleanup(struct netxen_adapter *adapter); + +void netxen_nic_update_cmd_producer(struct netxen_adapter *adapter, + struct nx_host_tx_ring *tx_ring); + +/* Functions from netxen_nic_main.c */ +int netxen_nic_reset_context(struct netxen_adapter *); + +/* + * NetXen Board information + */ + +#define NETXEN_MAX_SHORT_NAME 32 +struct netxen_brdinfo { + int brdtype; /* type of board */ + long ports; /* max no of physical ports */ + char short_name[NETXEN_MAX_SHORT_NAME]; +}; + +static const struct netxen_brdinfo netxen_boards[] = { + {NETXEN_BRDTYPE_P2_SB31_10G_CX4, 1, "XGb CX4"}, + {NETXEN_BRDTYPE_P2_SB31_10G_HMEZ, 1, "XGb HMEZ"}, + {NETXEN_BRDTYPE_P2_SB31_10G_IMEZ, 2, "XGb IMEZ"}, + {NETXEN_BRDTYPE_P2_SB31_10G, 1, "XGb XFP"}, + {NETXEN_BRDTYPE_P2_SB35_4G, 4, "Quad Gb"}, + {NETXEN_BRDTYPE_P2_SB31_2G, 2, "Dual Gb"}, + {NETXEN_BRDTYPE_P3_REF_QG, 4, "Reference Quad Gig "}, + {NETXEN_BRDTYPE_P3_HMEZ, 2, "Dual XGb HMEZ"}, + {NETXEN_BRDTYPE_P3_10G_CX4_LP, 2, "Dual XGb CX4 LP"}, + {NETXEN_BRDTYPE_P3_4_GB, 4, "Quad Gig LP"}, + {NETXEN_BRDTYPE_P3_IMEZ, 2, "Dual XGb IMEZ"}, + {NETXEN_BRDTYPE_P3_10G_SFP_PLUS, 2, "Dual XGb SFP+ LP"}, + {NETXEN_BRDTYPE_P3_10000_BASE_T, 1, "XGB 10G BaseT LP"}, + {NETXEN_BRDTYPE_P3_XG_LOM, 2, "Dual XGb LOM"}, + {NETXEN_BRDTYPE_P3_4_GB_MM, 4, "NX3031 Gigabit Ethernet"}, + {NETXEN_BRDTYPE_P3_10G_SFP_CT, 2, "NX3031 10 Gigabit Ethernet"}, + {NETXEN_BRDTYPE_P3_10G_SFP_QT, 2, "Quanta Dual XGb SFP+"}, + {NETXEN_BRDTYPE_P3_10G_CX4, 2, "Reference Dual CX4 Option"}, + {NETXEN_BRDTYPE_P3_10G_XFP, 1, "Reference Single XFP Option"} +}; + +#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(netxen_boards) + +static inline void get_brd_name_by_type(u32 type, char *name) +{ + int i, found = 0; + for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) { + if (netxen_boards[i].brdtype == type) { + strcpy(name, netxen_boards[i].short_name); + found = 1; + break; + } + + } + if (!found) + name = "Unknown"; +} + +static inline u32 netxen_tx_avail(struct nx_host_tx_ring *tx_ring) +{ + smp_mb(); + return find_diff_among(tx_ring->producer, + tx_ring->sw_consumer, tx_ring->num_desc); + +} + +int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac); +int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac); +extern void netxen_change_ringparam(struct netxen_adapter *adapter); +extern int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, + int *valp); + +extern const struct ethtool_ops netxen_nic_ethtool_ops; + +#endif /* __NETXEN_NIC_H_ */ diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c new file mode 100644 index 000000000000..a925392abd6f --- /dev/null +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c @@ -0,0 +1,793 @@ +/* + * Copyright (C) 2003 - 2009 NetXen, Inc. + * Copyright (C) 2009 - QLogic Corporation. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, + * MA 02111-1307, USA. + * + * The full GNU General Public License is included in this distribution + * in the file called "COPYING". + * + */ + +#include "netxen_nic_hw.h" +#include "netxen_nic.h" + +#define NXHAL_VERSION 1 + +static u32 +netxen_poll_rsp(struct netxen_adapter *adapter) +{ + u32 rsp = NX_CDRP_RSP_OK; + int timeout = 0; + + do { + /* give atleast 1ms for firmware to respond */ + msleep(1); + + if (++timeout > NX_OS_CRB_RETRY_COUNT) + return NX_CDRP_RSP_TIMEOUT; + + rsp = NXRD32(adapter, NX_CDRP_CRB_OFFSET); + } while (!NX_CDRP_IS_RSP(rsp)); + + return rsp; +} + +static u32 +netxen_issue_cmd(struct netxen_adapter *adapter, + u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd) +{ + u32 rsp; + u32 signature = 0; + u32 rcode = NX_RCODE_SUCCESS; + + signature = NX_CDRP_SIGNATURE_MAKE(pci_fn, version); + + /* Acquire semaphore before accessing CRB */ + if (netxen_api_lock(adapter)) + return NX_RCODE_TIMEOUT; + + NXWR32(adapter, NX_SIGN_CRB_OFFSET, signature); + + NXWR32(adapter, NX_ARG1_CRB_OFFSET, arg1); + + NXWR32(adapter, NX_ARG2_CRB_OFFSET, arg2); + + NXWR32(adapter, NX_ARG3_CRB_OFFSET, arg3); + + NXWR32(adapter, NX_CDRP_CRB_OFFSET, NX_CDRP_FORM_CMD(cmd)); + + rsp = netxen_poll_rsp(adapter); + + if (rsp == NX_CDRP_RSP_TIMEOUT) { + printk(KERN_ERR "%s: card response timeout.\n", + netxen_nic_driver_name); + + rcode = NX_RCODE_TIMEOUT; + } else if (rsp == NX_CDRP_RSP_FAIL) { + rcode = NXRD32(adapter, NX_ARG1_CRB_OFFSET); + + printk(KERN_ERR "%s: failed card response code:0x%x\n", + netxen_nic_driver_name, rcode); + } + + /* Release semaphore */ + netxen_api_unlock(adapter); + + return rcode; +} + +int +nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu) +{ + u32 rcode = NX_RCODE_SUCCESS; + struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; + + if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE) + rcode = netxen_issue_cmd(adapter, + adapter->ahw.pci_func, + NXHAL_VERSION, + recv_ctx->context_id, + mtu, + 0, + NX_CDRP_CMD_SET_MTU); + + if (rcode != NX_RCODE_SUCCESS) + return -EIO; + + return 0; +} + +int +nx_fw_cmd_set_gbe_port(struct netxen_adapter *adapter, + u32 speed, u32 duplex, u32 autoneg) +{ + + return netxen_issue_cmd(adapter, + adapter->ahw.pci_func, + NXHAL_VERSION, + speed, + duplex, + autoneg, + NX_CDRP_CMD_CONFIG_GBE_PORT); + +} + +static int +nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter) +{ + void *addr; + nx_hostrq_rx_ctx_t *prq; + nx_cardrsp_rx_ctx_t *prsp; + nx_hostrq_rds_ring_t *prq_rds; + nx_hostrq_sds_ring_t *prq_sds; + nx_cardrsp_rds_ring_t *prsp_rds; + nx_cardrsp_sds_ring_t *prsp_sds; + struct nx_host_rds_ring *rds_ring; + struct nx_host_sds_ring *sds_ring; + + dma_addr_t hostrq_phys_addr, cardrsp_phys_addr; + u64 phys_addr; + + int i, nrds_rings, nsds_rings; + size_t rq_size, rsp_size; + u32 cap, reg, val; + + int err; + + struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; + + nrds_rings = adapter->max_rds_rings; + nsds_rings = adapter->max_sds_rings; + + rq_size = + SIZEOF_HOSTRQ_RX(nx_hostrq_rx_ctx_t, nrds_rings, nsds_rings); + rsp_size = + SIZEOF_CARDRSP_RX(nx_cardrsp_rx_ctx_t, nrds_rings, nsds_rings); + + addr = pci_alloc_consistent(adapter->pdev, + rq_size, &hostrq_phys_addr); + if (addr == NULL) + return -ENOMEM; + prq = addr; + + addr = pci_alloc_consistent(adapter->pdev, + rsp_size, &cardrsp_phys_addr); + if (addr == NULL) { + err = -ENOMEM; + goto out_free_rq; + } + prsp = addr; + + prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr); + + cap = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN); + cap |= (NX_CAP0_JUMBO_CONTIGUOUS | NX_CAP0_LRO_CONTIGUOUS); + + prq->capabilities[0] = cpu_to_le32(cap); + prq->host_int_crb_mode = + cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED); + prq->host_rds_crb_mode = + cpu_to_le32(NX_HOST_RDS_CRB_MODE_UNIQUE); + + prq->num_rds_rings = cpu_to_le16(nrds_rings); + prq->num_sds_rings = cpu_to_le16(nsds_rings); + prq->rds_ring_offset = cpu_to_le32(0); + + val = le32_to_cpu(prq->rds_ring_offset) + + (sizeof(nx_hostrq_rds_ring_t) * nrds_rings); + prq->sds_ring_offset = cpu_to_le32(val); + + prq_rds = (nx_hostrq_rds_ring_t *)(prq->data + + le32_to_cpu(prq->rds_ring_offset)); + + for (i = 0; i < nrds_rings; i++) { + + rds_ring = &recv_ctx->rds_rings[i]; + + prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr); + prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc); + prq_rds[i].ring_kind = cpu_to_le32(i); + prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size); + } + + prq_sds = (nx_hostrq_sds_ring_t *)(prq->data + + le32_to_cpu(prq->sds_ring_offset)); + + for (i = 0; i < nsds_rings; i++) { + + sds_ring = &recv_ctx->sds_rings[i]; + + prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr); + prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc); + prq_sds[i].msi_index = cpu_to_le16(i); + } + + phys_addr = hostrq_phys_addr; + err = netxen_issue_cmd(adapter, + adapter->ahw.pci_func, + NXHAL_VERSION, + (u32)(phys_addr >> 32), + (u32)(phys_addr & 0xffffffff), + rq_size, + NX_CDRP_CMD_CREATE_RX_CTX); + if (err) { + printk(KERN_WARNING + "Failed to create rx ctx in firmware%d\n", err); + goto out_free_rsp; + } + + + prsp_rds = ((nx_cardrsp_rds_ring_t *) + &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]); + + for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) { + rds_ring = &recv_ctx->rds_rings[i]; + + reg = le32_to_cpu(prsp_rds[i].host_producer_crb); + rds_ring->crb_rcv_producer = netxen_get_ioaddr(adapter, + NETXEN_NIC_REG(reg - 0x200)); + } + + prsp_sds = ((nx_cardrsp_sds_ring_t *) + &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]); + + for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) { + sds_ring = &recv_ctx->sds_rings[i]; + + reg = le32_to_cpu(prsp_sds[i].host_consumer_crb); + sds_ring->crb_sts_consumer = netxen_get_ioaddr(adapter, + NETXEN_NIC_REG(reg - 0x200)); + + reg = le32_to_cpu(prsp_sds[i].interrupt_crb); + sds_ring->crb_intr_mask = netxen_get_ioaddr(adapter, + NETXEN_NIC_REG(reg - 0x200)); + } + + recv_ctx->state = le32_to_cpu(prsp->host_ctx_state); + recv_ctx->context_id = le16_to_cpu(prsp->context_id); + recv_ctx->virt_port = prsp->virt_port; + +out_free_rsp: + pci_free_consistent(adapter->pdev, rsp_size, prsp, cardrsp_phys_addr); +out_free_rq: + pci_free_consistent(adapter->pdev, rq_size, prq, hostrq_phys_addr); + return err; +} + +static void +nx_fw_cmd_destroy_rx_ctx(struct netxen_adapter *adapter) +{ + struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; + + if (netxen_issue_cmd(adapter, + adapter->ahw.pci_func, + NXHAL_VERSION, + recv_ctx->context_id, + NX_DESTROY_CTX_RESET, + 0, + NX_CDRP_CMD_DESTROY_RX_CTX)) { + + printk(KERN_WARNING + "%s: Failed to destroy rx ctx in firmware\n", + netxen_nic_driver_name); + } +} + +static int +nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter) +{ + nx_hostrq_tx_ctx_t *prq; + nx_hostrq_cds_ring_t *prq_cds; + nx_cardrsp_tx_ctx_t *prsp; + void *rq_addr, *rsp_addr; + size_t rq_size, rsp_size; + u32 temp; + int err = 0; + u64 offset, phys_addr; + dma_addr_t rq_phys_addr, rsp_phys_addr; + struct nx_host_tx_ring *tx_ring = adapter->tx_ring; + struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; + + rq_size = SIZEOF_HOSTRQ_TX(nx_hostrq_tx_ctx_t); + rq_addr = pci_alloc_consistent(adapter->pdev, + rq_size, &rq_phys_addr); + if (!rq_addr) + return -ENOMEM; + + rsp_size = SIZEOF_CARDRSP_TX(nx_cardrsp_tx_ctx_t); + rsp_addr = pci_alloc_consistent(adapter->pdev, + rsp_size, &rsp_phys_addr); + if (!rsp_addr) { + err = -ENOMEM; + goto out_free_rq; + } + + memset(rq_addr, 0, rq_size); + prq = rq_addr; + + memset(rsp_addr, 0, rsp_size); + prsp = rsp_addr; + + prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr); + + temp = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN | NX_CAP0_LSO); + prq->capabilities[0] = cpu_to_le32(temp); + + prq->host_int_crb_mode = + cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED); + + prq->interrupt_ctl = 0; + prq->msi_index = 0; + + prq->dummy_dma_addr = cpu_to_le64(adapter->dummy_dma.phys_addr); + + offset = recv_ctx->phys_addr + sizeof(struct netxen_ring_ctx); + prq->cmd_cons_dma_addr = cpu_to_le64(offset); + + prq_cds = &prq->cds_ring; + + prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr); + prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc); + + phys_addr = rq_phys_addr; + err = netxen_issue_cmd(adapter, + adapter->ahw.pci_func, + NXHAL_VERSION, + (u32)(phys_addr >> 32), + ((u32)phys_addr & 0xffffffff), + rq_size, + NX_CDRP_CMD_CREATE_TX_CTX); + + if (err == NX_RCODE_SUCCESS) { + temp = le32_to_cpu(prsp->cds_ring.host_producer_crb); + tx_ring->crb_cmd_producer = netxen_get_ioaddr(adapter, + NETXEN_NIC_REG(temp - 0x200)); +#if 0 + adapter->tx_state = + le32_to_cpu(prsp->host_ctx_state); +#endif + adapter->tx_context_id = + le16_to_cpu(prsp->context_id); + } else { + printk(KERN_WARNING + "Failed to create tx ctx in firmware%d\n", err); + err = -EIO; + } + + pci_free_consistent(adapter->pdev, rsp_size, rsp_addr, rsp_phys_addr); + +out_free_rq: + pci_free_consistent(adapter->pdev, rq_size, rq_addr, rq_phys_addr); + + return err; +} + +static void +nx_fw_cmd_destroy_tx_ctx(struct netxen_adapter *adapter) +{ + if (netxen_issue_cmd(adapter, + adapter->ahw.pci_func, + NXHAL_VERSION, + adapter->tx_context_id, + NX_DESTROY_CTX_RESET, + 0, + NX_CDRP_CMD_DESTROY_TX_CTX)) { + + printk(KERN_WARNING + "%s: Failed to destroy tx ctx in firmware\n", + netxen_nic_driver_name); + } +} + +int +nx_fw_cmd_query_phy(struct netxen_adapter *adapter, u32 reg, u32 *val) +{ + u32 rcode; + + rcode = netxen_issue_cmd(adapter, + adapter->ahw.pci_func, + NXHAL_VERSION, + reg, + 0, + 0, + NX_CDRP_CMD_READ_PHY); + + if (rcode != NX_RCODE_SUCCESS) + return -EIO; + + return NXRD32(adapter, NX_ARG1_CRB_OFFSET); +} + +int +nx_fw_cmd_set_phy(struct netxen_adapter *adapter, u32 reg, u32 val) +{ + u32 rcode; + + rcode = netxen_issue_cmd(adapter, + adapter->ahw.pci_func, + NXHAL_VERSION, + reg, + val, + 0, + NX_CDRP_CMD_WRITE_PHY); + + if (rcode != NX_RCODE_SUCCESS) + return -EIO; + + return 0; +} + +static u64 ctx_addr_sig_regs[][3] = { + {NETXEN_NIC_REG(0x188), NETXEN_NIC_REG(0x18c), NETXEN_NIC_REG(0x1c0)}, + {NETXEN_NIC_REG(0x190), NETXEN_NIC_REG(0x194), NETXEN_NIC_REG(0x1c4)}, + {NETXEN_NIC_REG(0x198), NETXEN_NIC_REG(0x19c), NETXEN_NIC_REG(0x1c8)}, + {NETXEN_NIC_REG(0x1a0), NETXEN_NIC_REG(0x1a4), NETXEN_NIC_REG(0x1cc)} +}; + +#define CRB_CTX_ADDR_REG_LO(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][0]) +#define CRB_CTX_ADDR_REG_HI(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][2]) +#define CRB_CTX_SIGNATURE_REG(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][1]) + +#define lower32(x) ((u32)((x) & 0xffffffff)) +#define upper32(x) ((u32)(((u64)(x) >> 32) & 0xffffffff)) + +static struct netxen_recv_crb recv_crb_registers[] = { + /* Instance 0 */ + { + /* crb_rcv_producer: */ + { + NETXEN_NIC_REG(0x100), + /* Jumbo frames */ + NETXEN_NIC_REG(0x110), + /* LRO */ + NETXEN_NIC_REG(0x120) + }, + /* crb_sts_consumer: */ + { + NETXEN_NIC_REG(0x138), + NETXEN_NIC_REG_2(0x000), + NETXEN_NIC_REG_2(0x004), + NETXEN_NIC_REG_2(0x008), + }, + /* sw_int_mask */ + { + CRB_SW_INT_MASK_0, + NETXEN_NIC_REG_2(0x044), + NETXEN_NIC_REG_2(0x048), + NETXEN_NIC_REG_2(0x04c), + }, + }, + /* Instance 1 */ + { + /* crb_rcv_producer: */ + { + NETXEN_NIC_REG(0x144), + /* Jumbo frames */ + NETXEN_NIC_REG(0x154), + /* LRO */ + NETXEN_NIC_REG(0x164) + }, + /* crb_sts_consumer: */ + { + NETXEN_NIC_REG(0x17c), + NETXEN_NIC_REG_2(0x020), + NETXEN_NIC_REG_2(0x024), + NETXEN_NIC_REG_2(0x028), + }, + /* sw_int_mask */ + { + CRB_SW_INT_MASK_1, + NETXEN_NIC_REG_2(0x064), + NETXEN_NIC_REG_2(0x068), + NETXEN_NIC_REG_2(0x06c), + }, + }, + /* Instance 2 */ + { + /* crb_rcv_producer: */ + { + NETXEN_NIC_REG(0x1d8), + /* Jumbo frames */ + NETXEN_NIC_REG(0x1f8), + /* LRO */ + NETXEN_NIC_REG(0x208) + }, + /* crb_sts_consumer: */ + { + NETXEN_NIC_REG(0x220), + NETXEN_NIC_REG_2(0x03c), + NETXEN_NIC_REG_2(0x03c), + NETXEN_NIC_REG_2(0x03c), + }, + /* sw_int_mask */ + { + CRB_SW_INT_MASK_2, + NETXEN_NIC_REG_2(0x03c), + NETXEN_NIC_REG_2(0x03c), + NETXEN_NIC_REG_2(0x03c), + }, + }, + /* Instance 3 */ + { + /* crb_rcv_producer: */ + { + NETXEN_NIC_REG(0x22c), + /* Jumbo frames */ + NETXEN_NIC_REG(0x23c), + /* LRO */ + NETXEN_NIC_REG(0x24c) + }, + /* crb_sts_consumer: */ + { + NETXEN_NIC_REG(0x264), + NETXEN_NIC_REG_2(0x03c), + NETXEN_NIC_REG_2(0x03c), + NETXEN_NIC_REG_2(0x03c), + }, + /* sw_int_mask */ + { + CRB_SW_INT_MASK_3, + NETXEN_NIC_REG_2(0x03c), + NETXEN_NIC_REG_2(0x03c), + NETXEN_NIC_REG_2(0x03c), + }, + }, +}; + +static int +netxen_init_old_ctx(struct netxen_adapter *adapter) +{ + struct netxen_recv_context *recv_ctx; + struct nx_host_rds_ring *rds_ring; + struct nx_host_sds_ring *sds_ring; + struct nx_host_tx_ring *tx_ring; + int ring; + int port = adapter->portnum; + struct netxen_ring_ctx *hwctx; + u32 signature; + + tx_ring = adapter->tx_ring; + recv_ctx = &adapter->recv_ctx; + hwctx = recv_ctx->hwctx; + + hwctx->cmd_ring_addr = cpu_to_le64(tx_ring->phys_addr); + hwctx->cmd_ring_size = cpu_to_le32(tx_ring->num_desc); + + + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &recv_ctx->rds_rings[ring]; + + hwctx->rcv_rings[ring].addr = + cpu_to_le64(rds_ring->phys_addr); + hwctx->rcv_rings[ring].size = + cpu_to_le32(rds_ring->num_desc); + } + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + + if (ring == 0) { + hwctx->sts_ring_addr = cpu_to_le64(sds_ring->phys_addr); + hwctx->sts_ring_size = cpu_to_le32(sds_ring->num_desc); + } + hwctx->sts_rings[ring].addr = cpu_to_le64(sds_ring->phys_addr); + hwctx->sts_rings[ring].size = cpu_to_le32(sds_ring->num_desc); + hwctx->sts_rings[ring].msi_index = cpu_to_le16(ring); + } + hwctx->sts_ring_count = cpu_to_le32(adapter->max_sds_rings); + + signature = (adapter->max_sds_rings > 1) ? + NETXEN_CTX_SIGNATURE_V2 : NETXEN_CTX_SIGNATURE; + + NXWR32(adapter, CRB_CTX_ADDR_REG_LO(port), + lower32(recv_ctx->phys_addr)); + NXWR32(adapter, CRB_CTX_ADDR_REG_HI(port), + upper32(recv_ctx->phys_addr)); + NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port), + signature | port); + return 0; +} + +int netxen_alloc_hw_resources(struct netxen_adapter *adapter) +{ + void *addr; + int err = 0; + int ring; + struct netxen_recv_context *recv_ctx; + struct nx_host_rds_ring *rds_ring; + struct nx_host_sds_ring *sds_ring; + struct nx_host_tx_ring *tx_ring; + + struct pci_dev *pdev = adapter->pdev; + struct net_device *netdev = adapter->netdev; + int port = adapter->portnum; + + recv_ctx = &adapter->recv_ctx; + tx_ring = adapter->tx_ring; + + addr = pci_alloc_consistent(pdev, + sizeof(struct netxen_ring_ctx) + sizeof(uint32_t), + &recv_ctx->phys_addr); + if (addr == NULL) { + dev_err(&pdev->dev, "failed to allocate hw context\n"); + return -ENOMEM; + } + + memset(addr, 0, sizeof(struct netxen_ring_ctx)); + recv_ctx->hwctx = addr; + recv_ctx->hwctx->ctx_id = cpu_to_le32(port); + recv_ctx->hwctx->cmd_consumer_offset = + cpu_to_le64(recv_ctx->phys_addr + + sizeof(struct netxen_ring_ctx)); + tx_ring->hw_consumer = + (__le32 *)(((char *)addr) + sizeof(struct netxen_ring_ctx)); + + /* cmd desc ring */ + addr = pci_alloc_consistent(pdev, TX_DESC_RINGSIZE(tx_ring), + &tx_ring->phys_addr); + + if (addr == NULL) { + dev_err(&pdev->dev, "%s: failed to allocate tx desc ring\n", + netdev->name); + err = -ENOMEM; + goto err_out_free; + } + + tx_ring->desc_head = addr; + + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &recv_ctx->rds_rings[ring]; + addr = pci_alloc_consistent(adapter->pdev, + RCV_DESC_RINGSIZE(rds_ring), + &rds_ring->phys_addr); + if (addr == NULL) { + dev_err(&pdev->dev, + "%s: failed to allocate rds ring [%d]\n", + netdev->name, ring); + err = -ENOMEM; + goto err_out_free; + } + rds_ring->desc_head = addr; + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + rds_ring->crb_rcv_producer = + netxen_get_ioaddr(adapter, + recv_crb_registers[port].crb_rcv_producer[ring]); + } + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + + addr = pci_alloc_consistent(adapter->pdev, + STATUS_DESC_RINGSIZE(sds_ring), + &sds_ring->phys_addr); + if (addr == NULL) { + dev_err(&pdev->dev, + "%s: failed to allocate sds ring [%d]\n", + netdev->name, ring); + err = -ENOMEM; + goto err_out_free; + } + sds_ring->desc_head = addr; + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + sds_ring->crb_sts_consumer = + netxen_get_ioaddr(adapter, + recv_crb_registers[port].crb_sts_consumer[ring]); + + sds_ring->crb_intr_mask = + netxen_get_ioaddr(adapter, + recv_crb_registers[port].sw_int_mask[ring]); + } + } + + + if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + if (test_and_set_bit(__NX_FW_ATTACHED, &adapter->state)) + goto done; + err = nx_fw_cmd_create_rx_ctx(adapter); + if (err) + goto err_out_free; + err = nx_fw_cmd_create_tx_ctx(adapter); + if (err) + goto err_out_free; + } else { + err = netxen_init_old_ctx(adapter); + if (err) + goto err_out_free; + } + +done: + return 0; + +err_out_free: + netxen_free_hw_resources(adapter); + return err; +} + +void netxen_free_hw_resources(struct netxen_adapter *adapter) +{ + struct netxen_recv_context *recv_ctx; + struct nx_host_rds_ring *rds_ring; + struct nx_host_sds_ring *sds_ring; + struct nx_host_tx_ring *tx_ring; + int ring; + + int port = adapter->portnum; + + if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + if (!test_and_clear_bit(__NX_FW_ATTACHED, &adapter->state)) + goto done; + + nx_fw_cmd_destroy_rx_ctx(adapter); + nx_fw_cmd_destroy_tx_ctx(adapter); + } else { + netxen_api_lock(adapter); + NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port), + NETXEN_CTX_D3_RESET | port); + netxen_api_unlock(adapter); + } + + /* Allow dma queues to drain after context reset */ + msleep(20); + +done: + recv_ctx = &adapter->recv_ctx; + + if (recv_ctx->hwctx != NULL) { + pci_free_consistent(adapter->pdev, + sizeof(struct netxen_ring_ctx) + + sizeof(uint32_t), + recv_ctx->hwctx, + recv_ctx->phys_addr); + recv_ctx->hwctx = NULL; + } + + tx_ring = adapter->tx_ring; + if (tx_ring->desc_head != NULL) { + pci_free_consistent(adapter->pdev, + TX_DESC_RINGSIZE(tx_ring), + tx_ring->desc_head, tx_ring->phys_addr); + tx_ring->desc_head = NULL; + } + + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &recv_ctx->rds_rings[ring]; + + if (rds_ring->desc_head != NULL) { + pci_free_consistent(adapter->pdev, + RCV_DESC_RINGSIZE(rds_ring), + rds_ring->desc_head, + rds_ring->phys_addr); + rds_ring->desc_head = NULL; + } + } + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + + if (sds_ring->desc_head != NULL) { + pci_free_consistent(adapter->pdev, + STATUS_DESC_RINGSIZE(sds_ring), + sds_ring->desc_head, + sds_ring->phys_addr); + sds_ring->desc_head = NULL; + } + } +} + diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c new file mode 100644 index 000000000000..b34fb74d07e3 --- /dev/null +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c @@ -0,0 +1,835 @@ +/* + * Copyright (C) 2003 - 2009 NetXen, Inc. + * Copyright (C) 2009 - QLogic Corporation. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, + * MA 02111-1307, USA. + * + * The full GNU General Public License is included in this distribution + * in the file called "COPYING". + * + */ + +#include +#include +#include +#include +#include +#include + +#include "netxen_nic.h" +#include "netxen_nic_hw.h" + +struct netxen_nic_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define NETXEN_NIC_STAT(m) sizeof(((struct netxen_adapter *)0)->m), \ + offsetof(struct netxen_adapter, m) + +#define NETXEN_NIC_PORT_WINDOW 0x10000 +#define NETXEN_NIC_INVALID_DATA 0xDEADBEEF + +static const struct netxen_nic_stats netxen_nic_gstrings_stats[] = { + {"xmit_called", NETXEN_NIC_STAT(stats.xmitcalled)}, + {"xmit_finished", NETXEN_NIC_STAT(stats.xmitfinished)}, + {"rx_dropped", NETXEN_NIC_STAT(stats.rxdropped)}, + {"tx_dropped", NETXEN_NIC_STAT(stats.txdropped)}, + {"csummed", NETXEN_NIC_STAT(stats.csummed)}, + {"rx_pkts", NETXEN_NIC_STAT(stats.rx_pkts)}, + {"lro_pkts", NETXEN_NIC_STAT(stats.lro_pkts)}, + {"rx_bytes", NETXEN_NIC_STAT(stats.rxbytes)}, + {"tx_bytes", NETXEN_NIC_STAT(stats.txbytes)}, +}; + +#define NETXEN_NIC_STATS_LEN ARRAY_SIZE(netxen_nic_gstrings_stats) + +static const char netxen_nic_gstrings_test[][ETH_GSTRING_LEN] = { + "Register_Test_on_offline", + "Link_Test_on_offline" +}; + +#define NETXEN_NIC_TEST_LEN ARRAY_SIZE(netxen_nic_gstrings_test) + +#define NETXEN_NIC_REGS_COUNT 30 +#define NETXEN_NIC_REGS_LEN (NETXEN_NIC_REGS_COUNT * sizeof(__le32)) +#define NETXEN_MAX_EEPROM_LEN 1024 + +static int netxen_nic_get_eeprom_len(struct net_device *dev) +{ + return NETXEN_FLASH_TOTAL_SIZE; +} + +static void +netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + u32 fw_major = 0; + u32 fw_minor = 0; + u32 fw_build = 0; + + strncpy(drvinfo->driver, netxen_nic_driver_name, 32); + strncpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID, 32); + fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR); + fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR); + fw_build = NXRD32(adapter, NETXEN_FW_VERSION_SUB); + sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build); + + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); + drvinfo->regdump_len = NETXEN_NIC_REGS_LEN; + drvinfo->eedump_len = netxen_nic_get_eeprom_len(dev); +} + +static int +netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + int check_sfp_module = 0; + + /* read which mode */ + if (adapter->ahw.port_type == NETXEN_NIC_GBE) { + ecmd->supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full); + + ecmd->advertising = (ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full); + + ecmd->port = PORT_TP; + + ethtool_cmd_speed_set(ecmd, adapter->link_speed); + ecmd->duplex = adapter->link_duplex; + ecmd->autoneg = adapter->link_autoneg; + + } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { + u32 val; + + val = NXRD32(adapter, NETXEN_PORT_MODE_ADDR); + if (val == NETXEN_PORT_MODE_802_3_AP) { + ecmd->supported = SUPPORTED_1000baseT_Full; + ecmd->advertising = ADVERTISED_1000baseT_Full; + } else { + ecmd->supported = SUPPORTED_10000baseT_Full; + ecmd->advertising = ADVERTISED_10000baseT_Full; + } + + if (netif_running(dev) && adapter->has_link_events) { + ethtool_cmd_speed_set(ecmd, adapter->link_speed); + ecmd->autoneg = adapter->link_autoneg; + ecmd->duplex = adapter->link_duplex; + goto skip; + } + + ecmd->port = PORT_TP; + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + u16 pcifn = adapter->ahw.pci_func; + + val = NXRD32(adapter, P3_LINK_SPEED_REG(pcifn)); + ethtool_cmd_speed_set(ecmd, P3_LINK_SPEED_MHZ * + P3_LINK_SPEED_VAL(pcifn, val)); + } else + ethtool_cmd_speed_set(ecmd, SPEED_10000); + + ecmd->duplex = DUPLEX_FULL; + ecmd->autoneg = AUTONEG_DISABLE; + } else + return -EIO; + +skip: + ecmd->phy_address = adapter->physical_port; + ecmd->transceiver = XCVR_EXTERNAL; + + switch (adapter->ahw.board_type) { + case NETXEN_BRDTYPE_P2_SB35_4G: + case NETXEN_BRDTYPE_P2_SB31_2G: + case NETXEN_BRDTYPE_P3_REF_QG: + case NETXEN_BRDTYPE_P3_4_GB: + case NETXEN_BRDTYPE_P3_4_GB_MM: + + ecmd->supported |= SUPPORTED_Autoneg; + ecmd->advertising |= ADVERTISED_Autoneg; + case NETXEN_BRDTYPE_P2_SB31_10G_CX4: + case NETXEN_BRDTYPE_P3_10G_CX4: + case NETXEN_BRDTYPE_P3_10G_CX4_LP: + case NETXEN_BRDTYPE_P3_10000_BASE_T: + ecmd->supported |= SUPPORTED_TP; + ecmd->advertising |= ADVERTISED_TP; + ecmd->port = PORT_TP; + ecmd->autoneg = (adapter->ahw.board_type == + NETXEN_BRDTYPE_P2_SB31_10G_CX4) ? + (AUTONEG_DISABLE) : (adapter->link_autoneg); + break; + case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ: + case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ: + case NETXEN_BRDTYPE_P3_IMEZ: + case NETXEN_BRDTYPE_P3_XG_LOM: + case NETXEN_BRDTYPE_P3_HMEZ: + ecmd->supported |= SUPPORTED_MII; + ecmd->advertising |= ADVERTISED_MII; + ecmd->port = PORT_MII; + ecmd->autoneg = AUTONEG_DISABLE; + break; + case NETXEN_BRDTYPE_P3_10G_SFP_PLUS: + case NETXEN_BRDTYPE_P3_10G_SFP_CT: + case NETXEN_BRDTYPE_P3_10G_SFP_QT: + ecmd->advertising |= ADVERTISED_TP; + ecmd->supported |= SUPPORTED_TP; + check_sfp_module = netif_running(dev) && + adapter->has_link_events; + case NETXEN_BRDTYPE_P2_SB31_10G: + case NETXEN_BRDTYPE_P3_10G_XFP: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_FIBRE; + ecmd->autoneg = AUTONEG_DISABLE; + break; + case NETXEN_BRDTYPE_P3_10G_TP: + if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { + ecmd->autoneg = AUTONEG_DISABLE; + ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP); + ecmd->advertising |= + (ADVERTISED_FIBRE | ADVERTISED_TP); + ecmd->port = PORT_FIBRE; + check_sfp_module = netif_running(dev) && + adapter->has_link_events; + } else { + ecmd->supported |= (SUPPORTED_TP |SUPPORTED_Autoneg); + ecmd->advertising |= + (ADVERTISED_TP | ADVERTISED_Autoneg); + ecmd->port = PORT_TP; + } + break; + default: + printk(KERN_ERR "netxen-nic: Unsupported board model %d\n", + adapter->ahw.board_type); + return -EIO; + } + + if (check_sfp_module) { + switch (adapter->module_type) { + case LINKEVENT_MODULE_OPTICAL_UNKNOWN: + case LINKEVENT_MODULE_OPTICAL_SRLR: + case LINKEVENT_MODULE_OPTICAL_LRM: + case LINKEVENT_MODULE_OPTICAL_SFP_1G: + ecmd->port = PORT_FIBRE; + break; + case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE: + case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN: + case LINKEVENT_MODULE_TWINAX: + ecmd->port = PORT_TP; + break; + default: + ecmd->port = -1; + } + } + + return 0; +} + +static int +netxen_nic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + u32 speed = ethtool_cmd_speed(ecmd); + int ret; + + if (adapter->ahw.port_type != NETXEN_NIC_GBE) + return -EOPNOTSUPP; + + if (!(adapter->capabilities & NX_FW_CAPABILITY_GBE_LINK_CFG)) + return -EOPNOTSUPP; + + ret = nx_fw_cmd_set_gbe_port(adapter, speed, ecmd->duplex, + ecmd->autoneg); + if (ret == NX_RCODE_NOT_SUPPORTED) + return -EOPNOTSUPP; + else if (ret) + return -EIO; + + adapter->link_speed = speed; + adapter->link_duplex = ecmd->duplex; + adapter->link_autoneg = ecmd->autoneg; + + if (!netif_running(dev)) + return 0; + + dev->netdev_ops->ndo_stop(dev); + return dev->netdev_ops->ndo_open(dev); +} + +static int netxen_nic_get_regs_len(struct net_device *dev) +{ + return NETXEN_NIC_REGS_LEN; +} + +static void +netxen_nic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; + struct nx_host_sds_ring *sds_ring; + u32 *regs_buff = p; + int ring, i = 0; + int port = adapter->physical_port; + + memset(p, 0, NETXEN_NIC_REGS_LEN); + + regs->version = (1 << 24) | (adapter->ahw.revision_id << 16) | + (adapter->pdev)->device; + + if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) + return; + + regs_buff[i++] = NXRD32(adapter, CRB_CMDPEG_STATE); + regs_buff[i++] = NXRD32(adapter, CRB_RCVPEG_STATE); + regs_buff[i++] = NXRD32(adapter, CRB_FW_CAPABILITIES_1); + regs_buff[i++] = NXRDIO(adapter, adapter->crb_int_state_reg); + regs_buff[i++] = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); + regs_buff[i++] = NXRD32(adapter, NX_CRB_DEV_STATE); + regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER); + regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1); + regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_HALT_STATUS2); + + regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_0+0x3c); + regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_1+0x3c); + regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_2+0x3c); + regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_3+0x3c); + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + + regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_4+0x3c); + i += 2; + + regs_buff[i++] = NXRD32(adapter, CRB_XG_STATE_P3); + regs_buff[i++] = le32_to_cpu(*(adapter->tx_ring->hw_consumer)); + + } else { + i++; + + regs_buff[i++] = NXRD32(adapter, + NETXEN_NIU_XGE_CONFIG_0+(0x10000*port)); + regs_buff[i++] = NXRD32(adapter, + NETXEN_NIU_XGE_CONFIG_1+(0x10000*port)); + + regs_buff[i++] = NXRD32(adapter, CRB_XG_STATE); + regs_buff[i++] = NXRDIO(adapter, + adapter->tx_ring->crb_cmd_consumer); + } + + regs_buff[i++] = NXRDIO(adapter, adapter->tx_ring->crb_cmd_producer); + + regs_buff[i++] = NXRDIO(adapter, + recv_ctx->rds_rings[0].crb_rcv_producer); + regs_buff[i++] = NXRDIO(adapter, + recv_ctx->rds_rings[1].crb_rcv_producer); + + regs_buff[i++] = adapter->max_sds_rings; + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &(recv_ctx->sds_rings[ring]); + regs_buff[i++] = NXRDIO(adapter, + sds_ring->crb_sts_consumer); + } +} + +static u32 netxen_nic_test_link(struct net_device *dev) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + u32 val, port; + + port = adapter->physical_port; + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + val = NXRD32(adapter, CRB_XG_STATE_P3); + val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val); + return (val == XG_LINK_UP_P3) ? 0 : 1; + } else { + val = NXRD32(adapter, CRB_XG_STATE); + val = (val >> port*8) & 0xff; + return (val == XG_LINK_UP) ? 0 : 1; + } +} + +static int +netxen_nic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, + u8 * bytes) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + int offset; + int ret; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = (adapter->pdev)->vendor | + ((adapter->pdev)->device << 16); + offset = eeprom->offset; + + ret = netxen_rom_fast_read_words(adapter, offset, bytes, + eeprom->len); + if (ret < 0) + return ret; + + return 0; +} + +static void +netxen_nic_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ring) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + + ring->rx_pending = adapter->num_rxd; + ring->rx_jumbo_pending = adapter->num_jumbo_rxd; + ring->rx_jumbo_pending += adapter->num_lro_rxd; + ring->tx_pending = adapter->num_txd; + + if (adapter->ahw.port_type == NETXEN_NIC_GBE) { + ring->rx_max_pending = MAX_RCV_DESCRIPTORS_1G; + ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_1G; + } else { + ring->rx_max_pending = MAX_RCV_DESCRIPTORS_10G; + ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_10G; + } + + ring->tx_max_pending = MAX_CMD_DESCRIPTORS; + + ring->rx_mini_max_pending = 0; + ring->rx_mini_pending = 0; +} + +static u32 +netxen_validate_ringparam(u32 val, u32 min, u32 max, char *r_name) +{ + u32 num_desc; + num_desc = max(val, min); + num_desc = min(num_desc, max); + num_desc = roundup_pow_of_two(num_desc); + + if (val != num_desc) { + printk(KERN_INFO "%s: setting %s ring size %d instead of %d\n", + netxen_nic_driver_name, r_name, num_desc, val); + } + + return num_desc; +} + +static int +netxen_nic_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ring) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + u16 max_rcv_desc = MAX_RCV_DESCRIPTORS_10G; + u16 max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G; + u16 num_rxd, num_jumbo_rxd, num_txd; + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + return -EOPNOTSUPP; + + if (ring->rx_mini_pending) + return -EOPNOTSUPP; + + if (adapter->ahw.port_type == NETXEN_NIC_GBE) { + max_rcv_desc = MAX_RCV_DESCRIPTORS_1G; + max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G; + } + + num_rxd = netxen_validate_ringparam(ring->rx_pending, + MIN_RCV_DESCRIPTORS, max_rcv_desc, "rx"); + + num_jumbo_rxd = netxen_validate_ringparam(ring->rx_jumbo_pending, + MIN_JUMBO_DESCRIPTORS, max_jumbo_desc, "rx jumbo"); + + num_txd = netxen_validate_ringparam(ring->tx_pending, + MIN_CMD_DESCRIPTORS, MAX_CMD_DESCRIPTORS, "tx"); + + if (num_rxd == adapter->num_rxd && num_txd == adapter->num_txd && + num_jumbo_rxd == adapter->num_jumbo_rxd) + return 0; + + adapter->num_rxd = num_rxd; + adapter->num_jumbo_rxd = num_jumbo_rxd; + adapter->num_txd = num_txd; + + return netxen_nic_reset_context(adapter); +} + +static void +netxen_nic_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + __u32 val; + int port = adapter->physical_port; + + if (adapter->ahw.port_type == NETXEN_NIC_GBE) { + if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS)) + return; + /* get flow control settings */ + val = NXRD32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port)); + pause->rx_pause = netxen_gb_get_rx_flowctl(val); + val = NXRD32(adapter, NETXEN_NIU_GB_PAUSE_CTL); + switch (port) { + case 0: + pause->tx_pause = !(netxen_gb_get_gb0_mask(val)); + break; + case 1: + pause->tx_pause = !(netxen_gb_get_gb1_mask(val)); + break; + case 2: + pause->tx_pause = !(netxen_gb_get_gb2_mask(val)); + break; + case 3: + default: + pause->tx_pause = !(netxen_gb_get_gb3_mask(val)); + break; + } + } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { + if ((port < 0) || (port > NETXEN_NIU_MAX_XG_PORTS)) + return; + pause->rx_pause = 1; + val = NXRD32(adapter, NETXEN_NIU_XG_PAUSE_CTL); + if (port == 0) + pause->tx_pause = !(netxen_xg_get_xg0_mask(val)); + else + pause->tx_pause = !(netxen_xg_get_xg1_mask(val)); + } else { + printk(KERN_ERR"%s: Unknown board type: %x\n", + netxen_nic_driver_name, adapter->ahw.port_type); + } +} + +static int +netxen_nic_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + __u32 val; + int port = adapter->physical_port; + /* read mode */ + if (adapter->ahw.port_type == NETXEN_NIC_GBE) { + if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS)) + return -EIO; + /* set flow control */ + val = NXRD32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port)); + + if (pause->rx_pause) + netxen_gb_rx_flowctl(val); + else + netxen_gb_unset_rx_flowctl(val); + + NXWR32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port), + val); + /* set autoneg */ + val = NXRD32(adapter, NETXEN_NIU_GB_PAUSE_CTL); + switch (port) { + case 0: + if (pause->tx_pause) + netxen_gb_unset_gb0_mask(val); + else + netxen_gb_set_gb0_mask(val); + break; + case 1: + if (pause->tx_pause) + netxen_gb_unset_gb1_mask(val); + else + netxen_gb_set_gb1_mask(val); + break; + case 2: + if (pause->tx_pause) + netxen_gb_unset_gb2_mask(val); + else + netxen_gb_set_gb2_mask(val); + break; + case 3: + default: + if (pause->tx_pause) + netxen_gb_unset_gb3_mask(val); + else + netxen_gb_set_gb3_mask(val); + break; + } + NXWR32(adapter, NETXEN_NIU_GB_PAUSE_CTL, val); + } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { + if ((port < 0) || (port > NETXEN_NIU_MAX_XG_PORTS)) + return -EIO; + val = NXRD32(adapter, NETXEN_NIU_XG_PAUSE_CTL); + if (port == 0) { + if (pause->tx_pause) + netxen_xg_unset_xg0_mask(val); + else + netxen_xg_set_xg0_mask(val); + } else { + if (pause->tx_pause) + netxen_xg_unset_xg1_mask(val); + else + netxen_xg_set_xg1_mask(val); + } + NXWR32(adapter, NETXEN_NIU_XG_PAUSE_CTL, val); + } else { + printk(KERN_ERR "%s: Unknown board type: %x\n", + netxen_nic_driver_name, + adapter->ahw.port_type); + } + return 0; +} + +static int netxen_nic_reg_test(struct net_device *dev) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + u32 data_read, data_written; + + data_read = NXRD32(adapter, NETXEN_PCIX_PH_REG(0)); + if ((data_read & 0xffff) != adapter->pdev->vendor) + return 1; + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + return 0; + + data_written = (u32)0xa5a5a5a5; + + NXWR32(adapter, CRB_SCRATCHPAD_TEST, data_written); + data_read = NXRD32(adapter, CRB_SCRATCHPAD_TEST); + if (data_written != data_read) + return 1; + + return 0; +} + +static int netxen_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return NETXEN_NIC_TEST_LEN; + case ETH_SS_STATS: + return NETXEN_NIC_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void +netxen_nic_diag_test(struct net_device *dev, struct ethtool_test *eth_test, + u64 * data) +{ + memset(data, 0, sizeof(uint64_t) * NETXEN_NIC_TEST_LEN); + if ((data[0] = netxen_nic_reg_test(dev))) + eth_test->flags |= ETH_TEST_FL_FAILED; + /* link test */ + if ((data[1] = (u64) netxen_nic_test_link(dev))) + eth_test->flags |= ETH_TEST_FL_FAILED; +} + +static void +netxen_nic_get_strings(struct net_device *dev, u32 stringset, u8 * data) +{ + int index; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *netxen_nic_gstrings_test, + NETXEN_NIC_TEST_LEN * ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + for (index = 0; index < NETXEN_NIC_STATS_LEN; index++) { + memcpy(data + index * ETH_GSTRING_LEN, + netxen_nic_gstrings_stats[index].stat_string, + ETH_GSTRING_LEN); + } + break; + } +} + +static void +netxen_nic_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 * data) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + int index; + + for (index = 0; index < NETXEN_NIC_STATS_LEN; index++) { + char *p = + (char *)adapter + + netxen_nic_gstrings_stats[index].stat_offset; + data[index] = + (netxen_nic_gstrings_stats[index].sizeof_stat == + sizeof(u64)) ? *(u64 *) p : *(u32 *) p; + } +} + +static void +netxen_nic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + u32 wol_cfg = 0; + + wol->supported = 0; + wol->wolopts = 0; + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + return; + + wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG_NV); + if (wol_cfg & (1UL << adapter->portnum)) + wol->supported |= WAKE_MAGIC; + + wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG); + if (wol_cfg & (1UL << adapter->portnum)) + wol->wolopts |= WAKE_MAGIC; +} + +static int +netxen_nic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + u32 wol_cfg = 0; + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + return -EOPNOTSUPP; + + if (wol->wolopts & ~WAKE_MAGIC) + return -EOPNOTSUPP; + + wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG_NV); + if (!(wol_cfg & (1 << adapter->portnum))) + return -EOPNOTSUPP; + + wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG); + if (wol->wolopts & WAKE_MAGIC) + wol_cfg |= 1UL << adapter->portnum; + else + wol_cfg &= ~(1UL << adapter->portnum); + NXWR32(adapter, NETXEN_WOL_CONFIG, wol_cfg); + + return 0; +} + +/* + * Set the coalescing parameters. Currently only normal is supported. + * If rx_coalesce_usecs == 0 or rx_max_coalesced_frames == 0 then set the + * firmware coalescing to default. + */ +static int netxen_set_intr_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ethcoal) +{ + struct netxen_adapter *adapter = netdev_priv(netdev); + + if (!NX_IS_REVISION_P3(adapter->ahw.revision_id)) + return -EINVAL; + + if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) + return -EINVAL; + + /* + * Return Error if unsupported values or + * unsupported parameters are set. + */ + if (ethcoal->rx_coalesce_usecs > 0xffff || + ethcoal->rx_max_coalesced_frames > 0xffff || + ethcoal->tx_coalesce_usecs > 0xffff || + ethcoal->tx_max_coalesced_frames > 0xffff || + ethcoal->rx_coalesce_usecs_irq || + ethcoal->rx_max_coalesced_frames_irq || + ethcoal->tx_coalesce_usecs_irq || + ethcoal->tx_max_coalesced_frames_irq || + ethcoal->stats_block_coalesce_usecs || + ethcoal->use_adaptive_rx_coalesce || + ethcoal->use_adaptive_tx_coalesce || + ethcoal->pkt_rate_low || + ethcoal->rx_coalesce_usecs_low || + ethcoal->rx_max_coalesced_frames_low || + ethcoal->tx_coalesce_usecs_low || + ethcoal->tx_max_coalesced_frames_low || + ethcoal->pkt_rate_high || + ethcoal->rx_coalesce_usecs_high || + ethcoal->rx_max_coalesced_frames_high || + ethcoal->tx_coalesce_usecs_high || + ethcoal->tx_max_coalesced_frames_high) + return -EINVAL; + + if (!ethcoal->rx_coalesce_usecs || + !ethcoal->rx_max_coalesced_frames) { + adapter->coal.flags = NETXEN_NIC_INTR_DEFAULT; + adapter->coal.normal.data.rx_time_us = + NETXEN_DEFAULT_INTR_COALESCE_RX_TIME_US; + adapter->coal.normal.data.rx_packets = + NETXEN_DEFAULT_INTR_COALESCE_RX_PACKETS; + } else { + adapter->coal.flags = 0; + adapter->coal.normal.data.rx_time_us = + ethcoal->rx_coalesce_usecs; + adapter->coal.normal.data.rx_packets = + ethcoal->rx_max_coalesced_frames; + } + adapter->coal.normal.data.tx_time_us = ethcoal->tx_coalesce_usecs; + adapter->coal.normal.data.tx_packets = + ethcoal->tx_max_coalesced_frames; + + netxen_config_intr_coalesce(adapter); + + return 0; +} + +static int netxen_get_intr_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ethcoal) +{ + struct netxen_adapter *adapter = netdev_priv(netdev); + + if (!NX_IS_REVISION_P3(adapter->ahw.revision_id)) + return -EINVAL; + + if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) + return -EINVAL; + + ethcoal->rx_coalesce_usecs = adapter->coal.normal.data.rx_time_us; + ethcoal->tx_coalesce_usecs = adapter->coal.normal.data.tx_time_us; + ethcoal->rx_max_coalesced_frames = + adapter->coal.normal.data.rx_packets; + ethcoal->tx_max_coalesced_frames = + adapter->coal.normal.data.tx_packets; + + return 0; +} + +const struct ethtool_ops netxen_nic_ethtool_ops = { + .get_settings = netxen_nic_get_settings, + .set_settings = netxen_nic_set_settings, + .get_drvinfo = netxen_nic_get_drvinfo, + .get_regs_len = netxen_nic_get_regs_len, + .get_regs = netxen_nic_get_regs, + .get_link = ethtool_op_get_link, + .get_eeprom_len = netxen_nic_get_eeprom_len, + .get_eeprom = netxen_nic_get_eeprom, + .get_ringparam = netxen_nic_get_ringparam, + .set_ringparam = netxen_nic_set_ringparam, + .get_pauseparam = netxen_nic_get_pauseparam, + .set_pauseparam = netxen_nic_set_pauseparam, + .get_wol = netxen_nic_get_wol, + .set_wol = netxen_nic_set_wol, + .self_test = netxen_nic_diag_test, + .get_strings = netxen_nic_get_strings, + .get_ethtool_stats = netxen_nic_get_ethtool_stats, + .get_sset_count = netxen_get_sset_count, + .get_coalesce = netxen_get_intr_coalesce, + .set_coalesce = netxen_set_intr_coalesce, +}; diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h new file mode 100644 index 000000000000..dc1967c1f312 --- /dev/null +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h @@ -0,0 +1,1050 @@ +/* + * Copyright (C) 2003 - 2009 NetXen, Inc. + * Copyright (C) 2009 - QLogic Corporation. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, + * MA 02111-1307, USA. + * + * The full GNU General Public License is included in this distribution + * in the file called "COPYING". + * + */ + +#ifndef __NETXEN_NIC_HDR_H_ +#define __NETXEN_NIC_HDR_H_ + +#include +#include + +/* + * The basic unit of access when reading/writing control registers. + */ + +typedef __le32 netxen_crbword_t; /* single word in CRB space */ + +enum { + NETXEN_HW_H0_CH_HUB_ADR = 0x05, + NETXEN_HW_H1_CH_HUB_ADR = 0x0E, + NETXEN_HW_H2_CH_HUB_ADR = 0x03, + NETXEN_HW_H3_CH_HUB_ADR = 0x01, + NETXEN_HW_H4_CH_HUB_ADR = 0x06, + NETXEN_HW_H5_CH_HUB_ADR = 0x07, + NETXEN_HW_H6_CH_HUB_ADR = 0x08 +}; + +/* Hub 0 */ +enum { + NETXEN_HW_MN_CRB_AGT_ADR = 0x15, + NETXEN_HW_MS_CRB_AGT_ADR = 0x25 +}; + +/* Hub 1 */ +enum { + NETXEN_HW_PS_CRB_AGT_ADR = 0x73, + NETXEN_HW_SS_CRB_AGT_ADR = 0x20, + NETXEN_HW_RPMX3_CRB_AGT_ADR = 0x0b, + NETXEN_HW_QMS_CRB_AGT_ADR = 0x00, + NETXEN_HW_SQGS0_CRB_AGT_ADR = 0x01, + NETXEN_HW_SQGS1_CRB_AGT_ADR = 0x02, + NETXEN_HW_SQGS2_CRB_AGT_ADR = 0x03, + NETXEN_HW_SQGS3_CRB_AGT_ADR = 0x04, + NETXEN_HW_C2C0_CRB_AGT_ADR = 0x58, + NETXEN_HW_C2C1_CRB_AGT_ADR = 0x59, + NETXEN_HW_C2C2_CRB_AGT_ADR = 0x5a, + NETXEN_HW_RPMX2_CRB_AGT_ADR = 0x0a, + NETXEN_HW_RPMX4_CRB_AGT_ADR = 0x0c, + NETXEN_HW_RPMX7_CRB_AGT_ADR = 0x0f, + NETXEN_HW_RPMX9_CRB_AGT_ADR = 0x12, + NETXEN_HW_SMB_CRB_AGT_ADR = 0x18 +}; + +/* Hub 2 */ +enum { + NETXEN_HW_NIU_CRB_AGT_ADR = 0x31, + NETXEN_HW_I2C0_CRB_AGT_ADR = 0x19, + NETXEN_HW_I2C1_CRB_AGT_ADR = 0x29, + + NETXEN_HW_SN_CRB_AGT_ADR = 0x10, + NETXEN_HW_I2Q_CRB_AGT_ADR = 0x20, + NETXEN_HW_LPC_CRB_AGT_ADR = 0x22, + NETXEN_HW_ROMUSB_CRB_AGT_ADR = 0x21, + NETXEN_HW_QM_CRB_AGT_ADR = 0x66, + NETXEN_HW_SQG0_CRB_AGT_ADR = 0x60, + NETXEN_HW_SQG1_CRB_AGT_ADR = 0x61, + NETXEN_HW_SQG2_CRB_AGT_ADR = 0x62, + NETXEN_HW_SQG3_CRB_AGT_ADR = 0x63, + NETXEN_HW_RPMX1_CRB_AGT_ADR = 0x09, + NETXEN_HW_RPMX5_CRB_AGT_ADR = 0x0d, + NETXEN_HW_RPMX6_CRB_AGT_ADR = 0x0e, + NETXEN_HW_RPMX8_CRB_AGT_ADR = 0x11 +}; + +/* Hub 3 */ +enum { + NETXEN_HW_PH_CRB_AGT_ADR = 0x1A, + NETXEN_HW_SRE_CRB_AGT_ADR = 0x50, + NETXEN_HW_EG_CRB_AGT_ADR = 0x51, + NETXEN_HW_RPMX0_CRB_AGT_ADR = 0x08 +}; + +/* Hub 4 */ +enum { + NETXEN_HW_PEGN0_CRB_AGT_ADR = 0x40, + NETXEN_HW_PEGN1_CRB_AGT_ADR, + NETXEN_HW_PEGN2_CRB_AGT_ADR, + NETXEN_HW_PEGN3_CRB_AGT_ADR, + NETXEN_HW_PEGNI_CRB_AGT_ADR, + NETXEN_HW_PEGND_CRB_AGT_ADR, + NETXEN_HW_PEGNC_CRB_AGT_ADR, + NETXEN_HW_PEGR0_CRB_AGT_ADR, + NETXEN_HW_PEGR1_CRB_AGT_ADR, + NETXEN_HW_PEGR2_CRB_AGT_ADR, + NETXEN_HW_PEGR3_CRB_AGT_ADR, + NETXEN_HW_PEGN4_CRB_AGT_ADR +}; + +/* Hub 5 */ +enum { + NETXEN_HW_PEGS0_CRB_AGT_ADR = 0x40, + NETXEN_HW_PEGS1_CRB_AGT_ADR, + NETXEN_HW_PEGS2_CRB_AGT_ADR, + NETXEN_HW_PEGS3_CRB_AGT_ADR, + NETXEN_HW_PEGSI_CRB_AGT_ADR, + NETXEN_HW_PEGSD_CRB_AGT_ADR, + NETXEN_HW_PEGSC_CRB_AGT_ADR +}; + +/* Hub 6 */ +enum { + NETXEN_HW_CAS0_CRB_AGT_ADR = 0x46, + NETXEN_HW_CAS1_CRB_AGT_ADR = 0x47, + NETXEN_HW_CAS2_CRB_AGT_ADR = 0x48, + NETXEN_HW_CAS3_CRB_AGT_ADR = 0x49, + NETXEN_HW_NCM_CRB_AGT_ADR = 0x16, + NETXEN_HW_TMR_CRB_AGT_ADR = 0x17, + NETXEN_HW_XDMA_CRB_AGT_ADR = 0x05, + NETXEN_HW_OCM0_CRB_AGT_ADR = 0x06, + NETXEN_HW_OCM1_CRB_AGT_ADR = 0x07 +}; + +/* Floaters - non existent modules */ +#define NETXEN_HW_EFC_RPMX0_CRB_AGT_ADR 0x67 + +/* This field defines PCI/X adr [25:20] of agents on the CRB */ +enum { + NETXEN_HW_PX_MAP_CRB_PH = 0, + NETXEN_HW_PX_MAP_CRB_PS, + NETXEN_HW_PX_MAP_CRB_MN, + NETXEN_HW_PX_MAP_CRB_MS, + NETXEN_HW_PX_MAP_CRB_PGR1, + NETXEN_HW_PX_MAP_CRB_SRE, + NETXEN_HW_PX_MAP_CRB_NIU, + NETXEN_HW_PX_MAP_CRB_QMN, + NETXEN_HW_PX_MAP_CRB_SQN0, + NETXEN_HW_PX_MAP_CRB_SQN1, + NETXEN_HW_PX_MAP_CRB_SQN2, + NETXEN_HW_PX_MAP_CRB_SQN3, + NETXEN_HW_PX_MAP_CRB_QMS, + NETXEN_HW_PX_MAP_CRB_SQS0, + NETXEN_HW_PX_MAP_CRB_SQS1, + NETXEN_HW_PX_MAP_CRB_SQS2, + NETXEN_HW_PX_MAP_CRB_SQS3, + NETXEN_HW_PX_MAP_CRB_PGN0, + NETXEN_HW_PX_MAP_CRB_PGN1, + NETXEN_HW_PX_MAP_CRB_PGN2, + NETXEN_HW_PX_MAP_CRB_PGN3, + NETXEN_HW_PX_MAP_CRB_PGND, + NETXEN_HW_PX_MAP_CRB_PGNI, + NETXEN_HW_PX_MAP_CRB_PGS0, + NETXEN_HW_PX_MAP_CRB_PGS1, + NETXEN_HW_PX_MAP_CRB_PGS2, + NETXEN_HW_PX_MAP_CRB_PGS3, + NETXEN_HW_PX_MAP_CRB_PGSD, + NETXEN_HW_PX_MAP_CRB_PGSI, + NETXEN_HW_PX_MAP_CRB_SN, + NETXEN_HW_PX_MAP_CRB_PGR2, + NETXEN_HW_PX_MAP_CRB_EG, + NETXEN_HW_PX_MAP_CRB_PH2, + NETXEN_HW_PX_MAP_CRB_PS2, + NETXEN_HW_PX_MAP_CRB_CAM, + NETXEN_HW_PX_MAP_CRB_CAS0, + NETXEN_HW_PX_MAP_CRB_CAS1, + NETXEN_HW_PX_MAP_CRB_CAS2, + NETXEN_HW_PX_MAP_CRB_C2C0, + NETXEN_HW_PX_MAP_CRB_C2C1, + NETXEN_HW_PX_MAP_CRB_TIMR, + NETXEN_HW_PX_MAP_CRB_PGR3, + NETXEN_HW_PX_MAP_CRB_RPMX1, + NETXEN_HW_PX_MAP_CRB_RPMX2, + NETXEN_HW_PX_MAP_CRB_RPMX3, + NETXEN_HW_PX_MAP_CRB_RPMX4, + NETXEN_HW_PX_MAP_CRB_RPMX5, + NETXEN_HW_PX_MAP_CRB_RPMX6, + NETXEN_HW_PX_MAP_CRB_RPMX7, + NETXEN_HW_PX_MAP_CRB_XDMA, + NETXEN_HW_PX_MAP_CRB_I2Q, + NETXEN_HW_PX_MAP_CRB_ROMUSB, + NETXEN_HW_PX_MAP_CRB_CAS3, + NETXEN_HW_PX_MAP_CRB_RPMX0, + NETXEN_HW_PX_MAP_CRB_RPMX8, + NETXEN_HW_PX_MAP_CRB_RPMX9, + NETXEN_HW_PX_MAP_CRB_OCM0, + NETXEN_HW_PX_MAP_CRB_OCM1, + NETXEN_HW_PX_MAP_CRB_SMB, + NETXEN_HW_PX_MAP_CRB_I2C0, + NETXEN_HW_PX_MAP_CRB_I2C1, + NETXEN_HW_PX_MAP_CRB_LPC, + NETXEN_HW_PX_MAP_CRB_PGNC, + NETXEN_HW_PX_MAP_CRB_PGR0 +}; + +/* This field defines CRB adr [31:20] of the agents */ + +#define NETXEN_HW_CRB_HUB_AGT_ADR_MN \ + ((NETXEN_HW_H0_CH_HUB_ADR << 7) | NETXEN_HW_MN_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PH \ + ((NETXEN_HW_H0_CH_HUB_ADR << 7) | NETXEN_HW_PH_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_MS \ + ((NETXEN_HW_H0_CH_HUB_ADR << 7) | NETXEN_HW_MS_CRB_AGT_ADR) + +#define NETXEN_HW_CRB_HUB_AGT_ADR_PS \ + ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_PS_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_SS \ + ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SS_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX3 \ + ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX3_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_QMS \ + ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_QMS_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_SQS0 \ + ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS0_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_SQS1 \ + ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS1_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_SQS2 \ + ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS2_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_SQS3 \ + ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS3_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_C2C0 \ + ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_C2C0_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_C2C1 \ + ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_C2C1_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX2 \ + ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX2_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX4 \ + ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX4_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX7 \ + ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX7_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX9 \ + ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX9_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_SMB \ + ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SMB_CRB_AGT_ADR) + +#define NETXEN_HW_CRB_HUB_AGT_ADR_NIU \ + ((NETXEN_HW_H2_CH_HUB_ADR << 7) | NETXEN_HW_NIU_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_I2C0 \ + ((NETXEN_HW_H2_CH_HUB_ADR << 7) | NETXEN_HW_I2C0_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_I2C1 \ + ((NETXEN_HW_H2_CH_HUB_ADR << 7) | NETXEN_HW_I2C1_CRB_AGT_ADR) + +#define NETXEN_HW_CRB_HUB_AGT_ADR_SRE \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SRE_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_EG \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_EG_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX0 \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX0_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_QMN \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_QM_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_SQN0 \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG0_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_SQN1 \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG1_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_SQN2 \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG2_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_SQN3 \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG3_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX1 \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX1_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX5 \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX5_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX6 \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX6_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX8 \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX8_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_CAS0 \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS0_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_CAS1 \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS1_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_CAS2 \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS2_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_CAS3 \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS3_CRB_AGT_ADR) + +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGNI \ + ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGNI_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGND \ + ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGND_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN0 \ + ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN0_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN1 \ + ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN1_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN2 \ + ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN2_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN3 \ + ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN3_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN4 \ + ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN4_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGNC \ + ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGNC_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGR0 \ + ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR0_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGR1 \ + ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR1_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGR2 \ + ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR2_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGR3 \ + ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR3_CRB_AGT_ADR) + +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGSI \ + ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGSI_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGSD \ + ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGSD_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGS0 \ + ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS0_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGS1 \ + ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS1_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGS2 \ + ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS2_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGS3 \ + ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS3_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGSC \ + ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGSC_CRB_AGT_ADR) + +#define NETXEN_HW_CRB_HUB_AGT_ADR_CAM \ + ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_NCM_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_TIMR \ + ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_TMR_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_XDMA \ + ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_XDMA_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_SN \ + ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_SN_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_I2Q \ + ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_I2Q_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_ROMUSB \ + ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_ROMUSB_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_OCM0 \ + ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_OCM0_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_OCM1 \ + ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_OCM1_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_LPC \ + ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_LPC_CRB_AGT_ADR) + +#define NETXEN_SRE_MISC (NETXEN_CRB_SRE + 0x0002c) +#define NETXEN_SRE_INT_STATUS (NETXEN_CRB_SRE + 0x00034) +#define NETXEN_SRE_PBI_ACTIVE_STATUS (NETXEN_CRB_SRE + 0x01014) +#define NETXEN_SRE_L1RE_CTL (NETXEN_CRB_SRE + 0x03000) +#define NETXEN_SRE_L2RE_CTL (NETXEN_CRB_SRE + 0x05000) +#define NETXEN_SRE_BUF_CTL (NETXEN_CRB_SRE + 0x01000) + +#define NETXEN_DMA_BASE(U) (NETXEN_CRB_PCIX_MD + 0x20000 + ((U)<<16)) +#define NETXEN_DMA_COMMAND(U) (NETXEN_DMA_BASE(U) + 0x00008) + +#define NETXEN_I2Q_CLR_PCI_HI (NETXEN_CRB_I2Q + 0x00034) + +#define PEG_NETWORK_BASE(N) (NETXEN_CRB_PEG_NET_0 + (((N)&3) << 20)) +#define CRB_REG_EX_PC 0x3c + +#define ROMUSB_GLB (NETXEN_CRB_ROMUSB + 0x00000) +#define ROMUSB_ROM (NETXEN_CRB_ROMUSB + 0x10000) + +#define NETXEN_ROMUSB_GLB_STATUS (ROMUSB_GLB + 0x0004) +#define NETXEN_ROMUSB_GLB_SW_RESET (ROMUSB_GLB + 0x0008) +#define NETXEN_ROMUSB_GLB_PAD_GPIO_I (ROMUSB_GLB + 0x000c) +#define NETXEN_ROMUSB_GLB_CAS_RST (ROMUSB_GLB + 0x0038) +#define NETXEN_ROMUSB_GLB_TEST_MUX_SEL (ROMUSB_GLB + 0x0044) +#define NETXEN_ROMUSB_GLB_PEGTUNE_DONE (ROMUSB_GLB + 0x005c) +#define NETXEN_ROMUSB_GLB_CHIP_CLK_CTRL (ROMUSB_GLB + 0x00A8) + +#define NETXEN_ROMUSB_GPIO(n) (ROMUSB_GLB + 0x60 + (4 * (n))) + +#define NETXEN_ROMUSB_ROM_INSTR_OPCODE (ROMUSB_ROM + 0x0004) +#define NETXEN_ROMUSB_ROM_ADDRESS (ROMUSB_ROM + 0x0008) +#define NETXEN_ROMUSB_ROM_WDATA (ROMUSB_ROM + 0x000c) +#define NETXEN_ROMUSB_ROM_ABYTE_CNT (ROMUSB_ROM + 0x0010) +#define NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT (ROMUSB_ROM + 0x0014) +#define NETXEN_ROMUSB_ROM_RDATA (ROMUSB_ROM + 0x0018) + +/* Lock IDs for ROM lock */ +#define ROM_LOCK_DRIVER 0x0d417340 + +/****************************************************************************** +* +* Definitions specific to M25P flash +* +******************************************************************************* +* Instructions +*/ +#define M25P_INSTR_WREN 0x06 +#define M25P_INSTR_WRDI 0x04 +#define M25P_INSTR_RDID 0x9f +#define M25P_INSTR_RDSR 0x05 +#define M25P_INSTR_WRSR 0x01 +#define M25P_INSTR_READ 0x03 +#define M25P_INSTR_FAST_READ 0x0b +#define M25P_INSTR_PP 0x02 +#define M25P_INSTR_SE 0xd8 +#define M25P_INSTR_BE 0xc7 +#define M25P_INSTR_DP 0xb9 +#define M25P_INSTR_RES 0xab + +/* all are 1MB windows */ + +#define NETXEN_PCI_CRB_WINDOWSIZE 0x00100000 +#define NETXEN_PCI_CRB_WINDOW(A) \ + (NETXEN_PCI_CRBSPACE + (A)*NETXEN_PCI_CRB_WINDOWSIZE) + +#define NETXEN_CRB_NIU NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_NIU) +#define NETXEN_CRB_SRE NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SRE) +#define NETXEN_CRB_ROMUSB \ + NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_ROMUSB) +#define NETXEN_CRB_I2Q NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_I2Q) +#define NETXEN_CRB_I2C0 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_I2C0) +#define NETXEN_CRB_SMB NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SMB) +#define NETXEN_CRB_MAX NETXEN_PCI_CRB_WINDOW(64) + +#define NETXEN_CRB_PCIX_HOST NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PH) +#define NETXEN_CRB_PCIX_HOST2 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PH2) +#define NETXEN_CRB_PEG_NET_0 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN0) +#define NETXEN_CRB_PEG_NET_1 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN1) +#define NETXEN_CRB_PEG_NET_2 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN2) +#define NETXEN_CRB_PEG_NET_3 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN3) +#define NETXEN_CRB_PEG_NET_4 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SQS2) +#define NETXEN_CRB_PEG_NET_D NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGND) +#define NETXEN_CRB_PEG_NET_I NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGNI) +#define NETXEN_CRB_DDR_NET NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_MN) +#define NETXEN_CRB_QDR_NET NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SN) + +#define NETXEN_CRB_PCIX_MD NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PS) +#define NETXEN_CRB_PCIE NETXEN_CRB_PCIX_MD + +#define ISR_INT_VECTOR (NETXEN_PCIX_PS_REG(PCIX_INT_VECTOR)) +#define ISR_INT_MASK (NETXEN_PCIX_PS_REG(PCIX_INT_MASK)) +#define ISR_INT_MASK_SLOW (NETXEN_PCIX_PS_REG(PCIX_INT_MASK)) +#define ISR_INT_TARGET_STATUS (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS)) +#define ISR_INT_TARGET_MASK (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK)) +#define ISR_INT_TARGET_STATUS_F1 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F1)) +#define ISR_INT_TARGET_MASK_F1 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F1)) +#define ISR_INT_TARGET_STATUS_F2 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F2)) +#define ISR_INT_TARGET_MASK_F2 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F2)) +#define ISR_INT_TARGET_STATUS_F3 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F3)) +#define ISR_INT_TARGET_MASK_F3 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F3)) +#define ISR_INT_TARGET_STATUS_F4 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F4)) +#define ISR_INT_TARGET_MASK_F4 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F4)) +#define ISR_INT_TARGET_STATUS_F5 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F5)) +#define ISR_INT_TARGET_MASK_F5 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F5)) +#define ISR_INT_TARGET_STATUS_F6 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F6)) +#define ISR_INT_TARGET_MASK_F6 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F6)) +#define ISR_INT_TARGET_STATUS_F7 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F7)) +#define ISR_INT_TARGET_MASK_F7 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F7)) + +#define NETXEN_PCI_MAPSIZE 128 +#define NETXEN_PCI_DDR_NET (0x00000000UL) +#define NETXEN_PCI_QDR_NET (0x04000000UL) +#define NETXEN_PCI_DIRECT_CRB (0x04400000UL) +#define NETXEN_PCI_CAMQM (0x04800000UL) +#define NETXEN_PCI_CAMQM_MAX (0x04ffffffUL) +#define NETXEN_PCI_OCM0 (0x05000000UL) +#define NETXEN_PCI_OCM0_MAX (0x050fffffUL) +#define NETXEN_PCI_OCM1 (0x05100000UL) +#define NETXEN_PCI_OCM1_MAX (0x051fffffUL) +#define NETXEN_PCI_CRBSPACE (0x06000000UL) +#define NETXEN_PCI_128MB_SIZE (0x08000000UL) +#define NETXEN_PCI_32MB_SIZE (0x02000000UL) +#define NETXEN_PCI_2MB_SIZE (0x00200000UL) + +#define NETXEN_PCI_MN_2M (0) +#define NETXEN_PCI_MS_2M (0x80000) +#define NETXEN_PCI_OCM0_2M (0x000c0000UL) +#define NETXEN_PCI_CAMQM_2M_BASE (0x000ff800UL) +#define NETXEN_PCI_CAMQM_2M_END (0x04800800UL) + +#define NETXEN_CRB_CAM NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_CAM) + +#define NETXEN_ADDR_DDR_NET (0x0000000000000000ULL) +#define NETXEN_ADDR_DDR_NET_MAX (0x000000000fffffffULL) +#define NETXEN_ADDR_OCM0 (0x0000000200000000ULL) +#define NETXEN_ADDR_OCM0_MAX (0x00000002000fffffULL) +#define NETXEN_ADDR_OCM1 (0x0000000200400000ULL) +#define NETXEN_ADDR_OCM1_MAX (0x00000002004fffffULL) +#define NETXEN_ADDR_QDR_NET (0x0000000300000000ULL) +#define NETXEN_ADDR_QDR_NET_MAX_P2 (0x00000003003fffffULL) +#define NETXEN_ADDR_QDR_NET_MAX_P3 (0x0000000303ffffffULL) + +/* + * Register offsets for MN + */ +#define NETXEN_MIU_CONTROL (0x000) +#define NETXEN_MIU_MN_CONTROL (NETXEN_CRB_DDR_NET+NETXEN_MIU_CONTROL) + + /* 200ms delay in each loop */ +#define NETXEN_NIU_PHY_WAITLEN 200000 + /* 10 seconds before we give up */ +#define NETXEN_NIU_PHY_WAITMAX 50 +#define NETXEN_NIU_MAX_GBE_PORTS 4 +#define NETXEN_NIU_MAX_XG_PORTS 2 + +#define NETXEN_NIU_MODE (NETXEN_CRB_NIU + 0x00000) + +#define NETXEN_NIU_XG_SINGLE_TERM (NETXEN_CRB_NIU + 0x00004) +#define NETXEN_NIU_XG_DRIVE_HI (NETXEN_CRB_NIU + 0x00008) +#define NETXEN_NIU_XG_DRIVE_LO (NETXEN_CRB_NIU + 0x0000c) +#define NETXEN_NIU_XG_DTX (NETXEN_CRB_NIU + 0x00010) +#define NETXEN_NIU_XG_DEQ (NETXEN_CRB_NIU + 0x00014) +#define NETXEN_NIU_XG_WORD_ALIGN (NETXEN_CRB_NIU + 0x00018) +#define NETXEN_NIU_XG_RESET (NETXEN_CRB_NIU + 0x0001c) +#define NETXEN_NIU_XG_POWER_DOWN (NETXEN_CRB_NIU + 0x00020) +#define NETXEN_NIU_XG_RESET_PLL (NETXEN_CRB_NIU + 0x00024) +#define NETXEN_NIU_XG_SERDES_LOOPBACK (NETXEN_CRB_NIU + 0x00028) +#define NETXEN_NIU_XG_DO_BYTE_ALIGN (NETXEN_CRB_NIU + 0x0002c) +#define NETXEN_NIU_XG_TX_ENABLE (NETXEN_CRB_NIU + 0x00030) +#define NETXEN_NIU_XG_RX_ENABLE (NETXEN_CRB_NIU + 0x00034) +#define NETXEN_NIU_XG_STATUS (NETXEN_CRB_NIU + 0x00038) +#define NETXEN_NIU_XG_PAUSE_THRESHOLD (NETXEN_CRB_NIU + 0x0003c) +#define NETXEN_NIU_INT_MASK (NETXEN_CRB_NIU + 0x00040) +#define NETXEN_NIU_ACTIVE_INT (NETXEN_CRB_NIU + 0x00044) +#define NETXEN_NIU_MASKABLE_INT (NETXEN_CRB_NIU + 0x00048) + +#define NETXEN_NIU_STRAP_VALUE_SAVE_HIGHER (NETXEN_CRB_NIU + 0x0004c) + +#define NETXEN_NIU_GB_SERDES_RESET (NETXEN_CRB_NIU + 0x00050) +#define NETXEN_NIU_GB0_GMII_MODE (NETXEN_CRB_NIU + 0x00054) +#define NETXEN_NIU_GB0_MII_MODE (NETXEN_CRB_NIU + 0x00058) +#define NETXEN_NIU_GB1_GMII_MODE (NETXEN_CRB_NIU + 0x0005c) +#define NETXEN_NIU_GB1_MII_MODE (NETXEN_CRB_NIU + 0x00060) +#define NETXEN_NIU_GB2_GMII_MODE (NETXEN_CRB_NIU + 0x00064) +#define NETXEN_NIU_GB2_MII_MODE (NETXEN_CRB_NIU + 0x00068) +#define NETXEN_NIU_GB3_GMII_MODE (NETXEN_CRB_NIU + 0x0006c) +#define NETXEN_NIU_GB3_MII_MODE (NETXEN_CRB_NIU + 0x00070) +#define NETXEN_NIU_REMOTE_LOOPBACK (NETXEN_CRB_NIU + 0x00074) +#define NETXEN_NIU_GB0_HALF_DUPLEX (NETXEN_CRB_NIU + 0x00078) +#define NETXEN_NIU_GB1_HALF_DUPLEX (NETXEN_CRB_NIU + 0x0007c) +#define NETXEN_NIU_RESET_SYS_FIFOS (NETXEN_CRB_NIU + 0x00088) +#define NETXEN_NIU_GB_CRC_DROP (NETXEN_CRB_NIU + 0x0008c) +#define NETXEN_NIU_GB_DROP_WRONGADDR (NETXEN_CRB_NIU + 0x00090) +#define NETXEN_NIU_TEST_MUX_CTL (NETXEN_CRB_NIU + 0x00094) +#define NETXEN_NIU_XG_PAUSE_CTL (NETXEN_CRB_NIU + 0x00098) +#define NETXEN_NIU_XG_PAUSE_LEVEL (NETXEN_CRB_NIU + 0x000dc) +#define NETXEN_NIU_FRAME_COUNT_SELECT (NETXEN_CRB_NIU + 0x000ac) +#define NETXEN_NIU_FRAME_COUNT (NETXEN_CRB_NIU + 0x000b0) +#define NETXEN_NIU_XG_SEL (NETXEN_CRB_NIU + 0x00128) +#define NETXEN_NIU_GB_PAUSE_CTL (NETXEN_CRB_NIU + 0x0030c) + +#define NETXEN_NIU_FULL_LEVEL_XG (NETXEN_CRB_NIU + 0x00450) + +#define NETXEN_NIU_XG1_RESET (NETXEN_CRB_NIU + 0x0011c) +#define NETXEN_NIU_XG1_POWER_DOWN (NETXEN_CRB_NIU + 0x00120) +#define NETXEN_NIU_XG1_RESET_PLL (NETXEN_CRB_NIU + 0x00124) + +#define NETXEN_MAC_ADDR_CNTL_REG (NETXEN_CRB_NIU + 0x1000) + +#define NETXEN_MULTICAST_ADDR_HI_0 (NETXEN_CRB_NIU + 0x1010) +#define NETXEN_MULTICAST_ADDR_HI_1 (NETXEN_CRB_NIU + 0x1014) +#define NETXEN_MULTICAST_ADDR_HI_2 (NETXEN_CRB_NIU + 0x1018) +#define NETXEN_MULTICAST_ADDR_HI_3 (NETXEN_CRB_NIU + 0x101c) + +#define NETXEN_UNICAST_ADDR_BASE (NETXEN_CRB_NIU + 0x1080) +#define NETXEN_MULTICAST_ADDR_BASE (NETXEN_CRB_NIU + 0x1100) + +#define NETXEN_NIU_GB_MAC_CONFIG_0(I) \ + (NETXEN_CRB_NIU + 0x30000 + (I)*0x10000) +#define NETXEN_NIU_GB_MAC_CONFIG_1(I) \ + (NETXEN_CRB_NIU + 0x30004 + (I)*0x10000) +#define NETXEN_NIU_GB_MAC_IPG_IFG(I) \ + (NETXEN_CRB_NIU + 0x30008 + (I)*0x10000) +#define NETXEN_NIU_GB_HALF_DUPLEX_CTRL(I) \ + (NETXEN_CRB_NIU + 0x3000c + (I)*0x10000) +#define NETXEN_NIU_GB_MAX_FRAME_SIZE(I) \ + (NETXEN_CRB_NIU + 0x30010 + (I)*0x10000) +#define NETXEN_NIU_GB_TEST_REG(I) \ + (NETXEN_CRB_NIU + 0x3001c + (I)*0x10000) +#define NETXEN_NIU_GB_MII_MGMT_CONFIG(I) \ + (NETXEN_CRB_NIU + 0x30020 + (I)*0x10000) +#define NETXEN_NIU_GB_MII_MGMT_COMMAND(I) \ + (NETXEN_CRB_NIU + 0x30024 + (I)*0x10000) +#define NETXEN_NIU_GB_MII_MGMT_ADDR(I) \ + (NETXEN_CRB_NIU + 0x30028 + (I)*0x10000) +#define NETXEN_NIU_GB_MII_MGMT_CTRL(I) \ + (NETXEN_CRB_NIU + 0x3002c + (I)*0x10000) +#define NETXEN_NIU_GB_MII_MGMT_STATUS(I) \ + (NETXEN_CRB_NIU + 0x30030 + (I)*0x10000) +#define NETXEN_NIU_GB_MII_MGMT_INDICATE(I) \ + (NETXEN_CRB_NIU + 0x30034 + (I)*0x10000) +#define NETXEN_NIU_GB_INTERFACE_CTRL(I) \ + (NETXEN_CRB_NIU + 0x30038 + (I)*0x10000) +#define NETXEN_NIU_GB_INTERFACE_STATUS(I) \ + (NETXEN_CRB_NIU + 0x3003c + (I)*0x10000) +#define NETXEN_NIU_GB_STATION_ADDR_0(I) \ + (NETXEN_CRB_NIU + 0x30040 + (I)*0x10000) +#define NETXEN_NIU_GB_STATION_ADDR_1(I) \ + (NETXEN_CRB_NIU + 0x30044 + (I)*0x10000) + +#define NETXEN_NIU_XGE_CONFIG_0 (NETXEN_CRB_NIU + 0x70000) +#define NETXEN_NIU_XGE_CONFIG_1 (NETXEN_CRB_NIU + 0x70004) +#define NETXEN_NIU_XGE_IPG (NETXEN_CRB_NIU + 0x70008) +#define NETXEN_NIU_XGE_STATION_ADDR_0_HI (NETXEN_CRB_NIU + 0x7000c) +#define NETXEN_NIU_XGE_STATION_ADDR_0_1 (NETXEN_CRB_NIU + 0x70010) +#define NETXEN_NIU_XGE_STATION_ADDR_1_LO (NETXEN_CRB_NIU + 0x70014) +#define NETXEN_NIU_XGE_STATUS (NETXEN_CRB_NIU + 0x70018) +#define NETXEN_NIU_XGE_MAX_FRAME_SIZE (NETXEN_CRB_NIU + 0x7001c) +#define NETXEN_NIU_XGE_PAUSE_FRAME_VALUE (NETXEN_CRB_NIU + 0x70020) +#define NETXEN_NIU_XGE_TX_BYTE_CNT (NETXEN_CRB_NIU + 0x70024) +#define NETXEN_NIU_XGE_TX_FRAME_CNT (NETXEN_CRB_NIU + 0x70028) +#define NETXEN_NIU_XGE_RX_BYTE_CNT (NETXEN_CRB_NIU + 0x7002c) +#define NETXEN_NIU_XGE_RX_FRAME_CNT (NETXEN_CRB_NIU + 0x70030) +#define NETXEN_NIU_XGE_AGGR_ERROR_CNT (NETXEN_CRB_NIU + 0x70034) +#define NETXEN_NIU_XGE_MULTICAST_FRAME_CNT (NETXEN_CRB_NIU + 0x70038) +#define NETXEN_NIU_XGE_UNICAST_FRAME_CNT (NETXEN_CRB_NIU + 0x7003c) +#define NETXEN_NIU_XGE_CRC_ERROR_CNT (NETXEN_CRB_NIU + 0x70040) +#define NETXEN_NIU_XGE_OVERSIZE_FRAME_ERR (NETXEN_CRB_NIU + 0x70044) +#define NETXEN_NIU_XGE_UNDERSIZE_FRAME_ERR (NETXEN_CRB_NIU + 0x70048) +#define NETXEN_NIU_XGE_LOCAL_ERROR_CNT (NETXEN_CRB_NIU + 0x7004c) +#define NETXEN_NIU_XGE_REMOTE_ERROR_CNT (NETXEN_CRB_NIU + 0x70050) +#define NETXEN_NIU_XGE_CONTROL_CHAR_CNT (NETXEN_CRB_NIU + 0x70054) +#define NETXEN_NIU_XGE_PAUSE_FRAME_CNT (NETXEN_CRB_NIU + 0x70058) +#define NETXEN_NIU_XG1_CONFIG_0 (NETXEN_CRB_NIU + 0x80000) +#define NETXEN_NIU_XG1_CONFIG_1 (NETXEN_CRB_NIU + 0x80004) +#define NETXEN_NIU_XG1_IPG (NETXEN_CRB_NIU + 0x80008) +#define NETXEN_NIU_XG1_STATION_ADDR_0_HI (NETXEN_CRB_NIU + 0x8000c) +#define NETXEN_NIU_XG1_STATION_ADDR_0_1 (NETXEN_CRB_NIU + 0x80010) +#define NETXEN_NIU_XG1_STATION_ADDR_1_LO (NETXEN_CRB_NIU + 0x80014) +#define NETXEN_NIU_XG1_STATUS (NETXEN_CRB_NIU + 0x80018) +#define NETXEN_NIU_XG1_MAX_FRAME_SIZE (NETXEN_CRB_NIU + 0x8001c) +#define NETXEN_NIU_XG1_PAUSE_FRAME_VALUE (NETXEN_CRB_NIU + 0x80020) +#define NETXEN_NIU_XG1_TX_BYTE_CNT (NETXEN_CRB_NIU + 0x80024) +#define NETXEN_NIU_XG1_TX_FRAME_CNT (NETXEN_CRB_NIU + 0x80028) +#define NETXEN_NIU_XG1_RX_BYTE_CNT (NETXEN_CRB_NIU + 0x8002c) +#define NETXEN_NIU_XG1_RX_FRAME_CNT (NETXEN_CRB_NIU + 0x80030) +#define NETXEN_NIU_XG1_AGGR_ERROR_CNT (NETXEN_CRB_NIU + 0x80034) +#define NETXEN_NIU_XG1_MULTICAST_FRAME_CNT (NETXEN_CRB_NIU + 0x80038) +#define NETXEN_NIU_XG1_UNICAST_FRAME_CNT (NETXEN_CRB_NIU + 0x8003c) +#define NETXEN_NIU_XG1_CRC_ERROR_CNT (NETXEN_CRB_NIU + 0x80040) +#define NETXEN_NIU_XG1_OVERSIZE_FRAME_ERR (NETXEN_CRB_NIU + 0x80044) +#define NETXEN_NIU_XG1_UNDERSIZE_FRAME_ERR (NETXEN_CRB_NIU + 0x80048) +#define NETXEN_NIU_XG1_LOCAL_ERROR_CNT (NETXEN_CRB_NIU + 0x8004c) +#define NETXEN_NIU_XG1_REMOTE_ERROR_CNT (NETXEN_CRB_NIU + 0x80050) +#define NETXEN_NIU_XG1_CONTROL_CHAR_CNT (NETXEN_CRB_NIU + 0x80054) +#define NETXEN_NIU_XG1_PAUSE_FRAME_CNT (NETXEN_CRB_NIU + 0x80058) + +/* P3 802.3ap */ +#define NETXEN_NIU_AP_MAC_CONFIG_0(I) (NETXEN_CRB_NIU+0xa0000+(I)*0x10000) +#define NETXEN_NIU_AP_MAC_CONFIG_1(I) (NETXEN_CRB_NIU+0xa0004+(I)*0x10000) +#define NETXEN_NIU_AP_MAC_IPG_IFG(I) (NETXEN_CRB_NIU+0xa0008+(I)*0x10000) +#define NETXEN_NIU_AP_HALF_DUPLEX_CTRL(I) (NETXEN_CRB_NIU+0xa000c+(I)*0x10000) +#define NETXEN_NIU_AP_MAX_FRAME_SIZE(I) (NETXEN_CRB_NIU+0xa0010+(I)*0x10000) +#define NETXEN_NIU_AP_TEST_REG(I) (NETXEN_CRB_NIU+0xa001c+(I)*0x10000) +#define NETXEN_NIU_AP_MII_MGMT_CONFIG(I) (NETXEN_CRB_NIU+0xa0020+(I)*0x10000) +#define NETXEN_NIU_AP_MII_MGMT_COMMAND(I) (NETXEN_CRB_NIU+0xa0024+(I)*0x10000) +#define NETXEN_NIU_AP_MII_MGMT_ADDR(I) (NETXEN_CRB_NIU+0xa0028+(I)*0x10000) +#define NETXEN_NIU_AP_MII_MGMT_CTRL(I) (NETXEN_CRB_NIU+0xa002c+(I)*0x10000) +#define NETXEN_NIU_AP_MII_MGMT_STATUS(I) (NETXEN_CRB_NIU+0xa0030+(I)*0x10000) +#define NETXEN_NIU_AP_MII_MGMT_INDICATE(I) (NETXEN_CRB_NIU+0xa0034+(I)*0x10000) +#define NETXEN_NIU_AP_INTERFACE_CTRL(I) (NETXEN_CRB_NIU+0xa0038+(I)*0x10000) +#define NETXEN_NIU_AP_INTERFACE_STATUS(I) (NETXEN_CRB_NIU+0xa003c+(I)*0x10000) +#define NETXEN_NIU_AP_STATION_ADDR_0(I) (NETXEN_CRB_NIU+0xa0040+(I)*0x10000) +#define NETXEN_NIU_AP_STATION_ADDR_1(I) (NETXEN_CRB_NIU+0xa0044+(I)*0x10000) + + +#define TEST_AGT_CTRL (0x00) + +#define TA_CTL_START 1 +#define TA_CTL_ENABLE 2 +#define TA_CTL_WRITE 4 +#define TA_CTL_BUSY 8 + +/* + * Register offsets for MN + */ +#define MIU_TEST_AGT_BASE (0x90) + +#define MIU_TEST_AGT_ADDR_LO (0x04) +#define MIU_TEST_AGT_ADDR_HI (0x08) +#define MIU_TEST_AGT_WRDATA_LO (0x10) +#define MIU_TEST_AGT_WRDATA_HI (0x14) +#define MIU_TEST_AGT_RDDATA_LO (0x18) +#define MIU_TEST_AGT_RDDATA_HI (0x1c) + +#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8 +#define MIU_TEST_AGT_UPPER_ADDR(off) (0) + +/* + * Register offsets for MS + */ +#define SIU_TEST_AGT_BASE (0x60) + +#define SIU_TEST_AGT_ADDR_LO (0x04) +#define SIU_TEST_AGT_ADDR_HI (0x18) +#define SIU_TEST_AGT_WRDATA_LO (0x08) +#define SIU_TEST_AGT_WRDATA_HI (0x0c) +#define SIU_TEST_AGT_WRDATA(i) (0x08+(4*(i))) +#define SIU_TEST_AGT_RDDATA_LO (0x10) +#define SIU_TEST_AGT_RDDATA_HI (0x14) +#define SIU_TEST_AGT_RDDATA(i) (0x10+(4*(i))) + +#define SIU_TEST_AGT_ADDR_MASK 0x3ffff8 +#define SIU_TEST_AGT_UPPER_ADDR(off) ((off)>>22) + +/* XG Link status */ +#define XG_LINK_UP 0x10 +#define XG_LINK_DOWN 0x20 + +#define XG_LINK_UP_P3 0x01 +#define XG_LINK_DOWN_P3 0x02 +#define XG_LINK_STATE_P3_MASK 0xf +#define XG_LINK_STATE_P3(pcifn,val) \ + (((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3_MASK) + +#define P3_LINK_SPEED_MHZ 100 +#define P3_LINK_SPEED_MASK 0xff +#define P3_LINK_SPEED_REG(pcifn) \ + (CRB_PF_LINK_SPEED_1 + (((pcifn) / 4) * 4)) +#define P3_LINK_SPEED_VAL(pcifn, reg) \ + (((reg) >> (8 * ((pcifn) & 0x3))) & P3_LINK_SPEED_MASK) + +#define NETXEN_CAM_RAM_BASE (NETXEN_CRB_CAM + 0x02000) +#define NETXEN_CAM_RAM(reg) (NETXEN_CAM_RAM_BASE + (reg)) +#define NETXEN_FW_VERSION_MAJOR (NETXEN_CAM_RAM(0x150)) +#define NETXEN_FW_VERSION_MINOR (NETXEN_CAM_RAM(0x154)) +#define NETXEN_FW_VERSION_SUB (NETXEN_CAM_RAM(0x158)) +#define NETXEN_ROM_LOCK_ID (NETXEN_CAM_RAM(0x100)) +#define NETXEN_PHY_LOCK_ID (NETXEN_CAM_RAM(0x120)) +#define NETXEN_CRB_WIN_LOCK_ID (NETXEN_CAM_RAM(0x124)) + +#define NIC_CRB_BASE (NETXEN_CAM_RAM(0x200)) +#define NIC_CRB_BASE_2 (NETXEN_CAM_RAM(0x700)) +#define NETXEN_NIC_REG(X) (NIC_CRB_BASE+(X)) +#define NETXEN_NIC_REG_2(X) (NIC_CRB_BASE_2+(X)) + +#define NX_CDRP_CRB_OFFSET (NETXEN_NIC_REG(0x18)) +#define NX_ARG1_CRB_OFFSET (NETXEN_NIC_REG(0x1c)) +#define NX_ARG2_CRB_OFFSET (NETXEN_NIC_REG(0x20)) +#define NX_ARG3_CRB_OFFSET (NETXEN_NIC_REG(0x24)) +#define NX_SIGN_CRB_OFFSET (NETXEN_NIC_REG(0x28)) + +#define CRB_HOST_DUMMY_BUF_ADDR_HI (NETXEN_NIC_REG(0x3c)) +#define CRB_HOST_DUMMY_BUF_ADDR_LO (NETXEN_NIC_REG(0x40)) + +#define CRB_CMDPEG_STATE (NETXEN_NIC_REG(0x50)) +#define CRB_RCVPEG_STATE (NETXEN_NIC_REG(0x13c)) + +#define CRB_XG_STATE (NETXEN_NIC_REG(0x94)) +#define CRB_XG_STATE_P3 (NETXEN_NIC_REG(0x98)) +#define CRB_PF_LINK_SPEED_1 (NETXEN_NIC_REG(0xe8)) +#define CRB_PF_LINK_SPEED_2 (NETXEN_NIC_REG(0xec)) + +#define CRB_MPORT_MODE (NETXEN_NIC_REG(0xc4)) +#define CRB_DMA_SHIFT (NETXEN_NIC_REG(0xcc)) +#define CRB_INT_VECTOR (NETXEN_NIC_REG(0xd4)) + +#define CRB_CMD_PRODUCER_OFFSET (NETXEN_NIC_REG(0x08)) +#define CRB_CMD_CONSUMER_OFFSET (NETXEN_NIC_REG(0x0c)) +#define CRB_CMD_PRODUCER_OFFSET_1 (NETXEN_NIC_REG(0x1ac)) +#define CRB_CMD_CONSUMER_OFFSET_1 (NETXEN_NIC_REG(0x1b0)) +#define CRB_CMD_PRODUCER_OFFSET_2 (NETXEN_NIC_REG(0x1b8)) +#define CRB_CMD_CONSUMER_OFFSET_2 (NETXEN_NIC_REG(0x1bc)) +#define CRB_CMD_PRODUCER_OFFSET_3 (NETXEN_NIC_REG(0x1d0)) +#define CRB_CMD_CONSUMER_OFFSET_3 (NETXEN_NIC_REG(0x1d4)) +#define CRB_TEMP_STATE (NETXEN_NIC_REG(0x1b4)) + +#define CRB_V2P_0 (NETXEN_NIC_REG(0x290)) +#define CRB_V2P(port) (CRB_V2P_0+((port)*4)) +#define CRB_DRIVER_VERSION (NETXEN_NIC_REG(0x2a0)) + +#define CRB_SW_INT_MASK_0 (NETXEN_NIC_REG(0x1d8)) +#define CRB_SW_INT_MASK_1 (NETXEN_NIC_REG(0x1e0)) +#define CRB_SW_INT_MASK_2 (NETXEN_NIC_REG(0x1e4)) +#define CRB_SW_INT_MASK_3 (NETXEN_NIC_REG(0x1e8)) + +#define CRB_FW_CAPABILITIES_1 (NETXEN_CAM_RAM(0x128)) +#define CRB_MAC_BLOCK_START (NETXEN_CAM_RAM(0x1c0)) + +/* + * capabilities register, can be used to selectively enable/disable features + * for backward compatibility + */ +#define CRB_NIC_CAPABILITIES_HOST NETXEN_NIC_REG(0x1a8) +#define CRB_NIC_MSI_MODE_HOST NETXEN_NIC_REG(0x270) + +#define INTR_SCHEME_PERPORT 0x1 +#define MSI_MODE_MULTIFUNC 0x1 + +/* used for ethtool tests */ +#define CRB_SCRATCHPAD_TEST NETXEN_NIC_REG(0x280) + +/* + * CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address + * which can be read by the Phantom host to get producer/consumer indexes from + * Phantom/Casper. If it is not HOST_SHARED_MEMORY, then the following + * registers will be used for the addresses of the ring's shared memory + * on the Phantom. + */ + +#define nx_get_temp_val(x) ((x) >> 16) +#define nx_get_temp_state(x) ((x) & 0xffff) +#define nx_encode_temp(val, state) (((val) << 16) | (state)) + +/* + * Temperature control. + */ +enum { + NX_TEMP_NORMAL = 0x1, /* Normal operating range */ + NX_TEMP_WARN, /* Sound alert, temperature getting high */ + NX_TEMP_PANIC /* Fatal error, hardware has shut down. */ +}; + +/* Lock IDs for PHY lock */ +#define PHY_LOCK_DRIVER 0x44524956 + +/* Used for PS PCI Memory access */ +#define PCIX_PS_OP_ADDR_LO (0x10000) +/* via CRB (PS side only) */ +#define PCIX_PS_OP_ADDR_HI (0x10004) + +#define PCIX_INT_VECTOR (0x10100) +#define PCIX_INT_MASK (0x10104) + +#define PCIX_CRB_WINDOW (0x10210) +#define PCIX_CRB_WINDOW_F0 (0x10210) +#define PCIX_CRB_WINDOW_F1 (0x10230) +#define PCIX_CRB_WINDOW_F2 (0x10250) +#define PCIX_CRB_WINDOW_F3 (0x10270) +#define PCIX_CRB_WINDOW_F4 (0x102ac) +#define PCIX_CRB_WINDOW_F5 (0x102bc) +#define PCIX_CRB_WINDOW_F6 (0x102cc) +#define PCIX_CRB_WINDOW_F7 (0x102dc) +#define PCIE_CRB_WINDOW_REG(func) (((func) < 4) ? \ + (PCIX_CRB_WINDOW_F0 + (0x20 * (func))) :\ + (PCIX_CRB_WINDOW_F4 + (0x10 * ((func)-4)))) + +#define PCIX_MN_WINDOW (0x10200) +#define PCIX_MN_WINDOW_F0 (0x10200) +#define PCIX_MN_WINDOW_F1 (0x10220) +#define PCIX_MN_WINDOW_F2 (0x10240) +#define PCIX_MN_WINDOW_F3 (0x10260) +#define PCIX_MN_WINDOW_F4 (0x102a0) +#define PCIX_MN_WINDOW_F5 (0x102b0) +#define PCIX_MN_WINDOW_F6 (0x102c0) +#define PCIX_MN_WINDOW_F7 (0x102d0) +#define PCIE_MN_WINDOW_REG(func) (((func) < 4) ? \ + (PCIX_MN_WINDOW_F0 + (0x20 * (func))) :\ + (PCIX_MN_WINDOW_F4 + (0x10 * ((func)-4)))) + +#define PCIX_SN_WINDOW (0x10208) +#define PCIX_SN_WINDOW_F0 (0x10208) +#define PCIX_SN_WINDOW_F1 (0x10228) +#define PCIX_SN_WINDOW_F2 (0x10248) +#define PCIX_SN_WINDOW_F3 (0x10268) +#define PCIX_SN_WINDOW_F4 (0x102a8) +#define PCIX_SN_WINDOW_F5 (0x102b8) +#define PCIX_SN_WINDOW_F6 (0x102c8) +#define PCIX_SN_WINDOW_F7 (0x102d8) +#define PCIE_SN_WINDOW_REG(func) (((func) < 4) ? \ + (PCIX_SN_WINDOW_F0 + (0x20 * (func))) :\ + (PCIX_SN_WINDOW_F4 + (0x10 * ((func)-4)))) + +#define PCIX_OCM_WINDOW (0x10800) +#define PCIX_OCM_WINDOW_REG(func) (PCIX_OCM_WINDOW + 0x20 * (func)) + +#define PCIX_TARGET_STATUS (0x10118) +#define PCIX_TARGET_STATUS_F1 (0x10160) +#define PCIX_TARGET_STATUS_F2 (0x10164) +#define PCIX_TARGET_STATUS_F3 (0x10168) +#define PCIX_TARGET_STATUS_F4 (0x10360) +#define PCIX_TARGET_STATUS_F5 (0x10364) +#define PCIX_TARGET_STATUS_F6 (0x10368) +#define PCIX_TARGET_STATUS_F7 (0x1036c) + +#define PCIX_TARGET_MASK (0x10128) +#define PCIX_TARGET_MASK_F1 (0x10170) +#define PCIX_TARGET_MASK_F2 (0x10174) +#define PCIX_TARGET_MASK_F3 (0x10178) +#define PCIX_TARGET_MASK_F4 (0x10370) +#define PCIX_TARGET_MASK_F5 (0x10374) +#define PCIX_TARGET_MASK_F6 (0x10378) +#define PCIX_TARGET_MASK_F7 (0x1037c) + +#define PCIX_MSI_F0 (0x13000) +#define PCIX_MSI_F1 (0x13004) +#define PCIX_MSI_F2 (0x13008) +#define PCIX_MSI_F3 (0x1300c) +#define PCIX_MSI_F4 (0x13010) +#define PCIX_MSI_F5 (0x13014) +#define PCIX_MSI_F6 (0x13018) +#define PCIX_MSI_F7 (0x1301c) +#define PCIX_MSI_F(i) (0x13000+((i)*4)) + +#define PCIX_PS_MEM_SPACE (0x90000) + +#define NETXEN_PCIX_PH_REG(reg) (NETXEN_CRB_PCIE + (reg)) +#define NETXEN_PCIX_PS_REG(reg) (NETXEN_CRB_PCIX_MD + (reg)) + +#define NETXEN_PCIE_REG(reg) (NETXEN_CRB_PCIE + (reg)) + +#define PCIE_MAX_DMA_XFER_SIZE (0x1404c) + +#define PCIE_DCR 0x00d8 + +#define PCIE_SEM0_LOCK (0x1c000) +#define PCIE_SEM0_UNLOCK (0x1c004) +#define PCIE_SEM1_LOCK (0x1c008) +#define PCIE_SEM1_UNLOCK (0x1c00c) +#define PCIE_SEM2_LOCK (0x1c010) /* Flash lock */ +#define PCIE_SEM2_UNLOCK (0x1c014) /* Flash unlock */ +#define PCIE_SEM3_LOCK (0x1c018) /* Phy lock */ +#define PCIE_SEM3_UNLOCK (0x1c01c) /* Phy unlock */ +#define PCIE_SEM4_LOCK (0x1c020) +#define PCIE_SEM4_UNLOCK (0x1c024) +#define PCIE_SEM5_LOCK (0x1c028) /* API lock */ +#define PCIE_SEM5_UNLOCK (0x1c02c) /* API unlock */ +#define PCIE_SEM6_LOCK (0x1c030) /* sw lock */ +#define PCIE_SEM6_UNLOCK (0x1c034) /* sw unlock */ +#define PCIE_SEM7_LOCK (0x1c038) /* crb win lock */ +#define PCIE_SEM7_UNLOCK (0x1c03c) /* crbwin unlock*/ +#define PCIE_SEM_LOCK(N) (PCIE_SEM0_LOCK + 8*(N)) +#define PCIE_SEM_UNLOCK(N) (PCIE_SEM0_UNLOCK + 8*(N)) + +#define PCIE_SETUP_FUNCTION (0x12040) +#define PCIE_SETUP_FUNCTION2 (0x12048) +#define PCIE_MISCCFG_RC (0x1206c) +#define PCIE_TGT_SPLIT_CHICKEN (0x12080) +#define PCIE_CHICKEN3 (0x120c8) + +#define ISR_INT_STATE_REG (NETXEN_PCIX_PS_REG(PCIE_MISCCFG_RC)) +#define PCIE_MAX_MASTER_SPLIT (0x14048) + +#define NETXEN_PORT_MODE_NONE 0 +#define NETXEN_PORT_MODE_XG 1 +#define NETXEN_PORT_MODE_GB 2 +#define NETXEN_PORT_MODE_802_3_AP 3 +#define NETXEN_PORT_MODE_AUTO_NEG 4 +#define NETXEN_PORT_MODE_AUTO_NEG_1G 5 +#define NETXEN_PORT_MODE_AUTO_NEG_XG 6 +#define NETXEN_PORT_MODE_ADDR (NETXEN_CAM_RAM(0x24)) +#define NETXEN_WOL_PORT_MODE (NETXEN_CAM_RAM(0x198)) + +#define NETXEN_WOL_CONFIG_NV (NETXEN_CAM_RAM(0x184)) +#define NETXEN_WOL_CONFIG (NETXEN_CAM_RAM(0x188)) + +#define NX_PEG_TUNE_MN_PRESENT 0x1 +#define NX_PEG_TUNE_CAPABILITY (NETXEN_CAM_RAM(0x02c)) + +#define NETXEN_DMA_WATCHDOG_CTRL (NETXEN_CAM_RAM(0x14)) +#define NETXEN_PEG_ALIVE_COUNTER (NETXEN_CAM_RAM(0xb0)) +#define NETXEN_PEG_HALT_STATUS1 (NETXEN_CAM_RAM(0xa8)) +#define NETXEN_PEG_HALT_STATUS2 (NETXEN_CAM_RAM(0xac)) +#define NX_CRB_DEV_REF_COUNT (NETXEN_CAM_RAM(0x138)) +#define NX_CRB_DEV_STATE (NETXEN_CAM_RAM(0x140)) + +/* Device State */ +#define NX_DEV_COLD 1 +#define NX_DEV_INITALIZING 2 +#define NX_DEV_READY 3 +#define NX_DEV_NEED_RESET 4 +#define NX_DEV_NEED_QUISCENT 5 +#define NX_DEV_NEED_AER 6 +#define NX_DEV_FAILED 7 + +#define NX_RCODE_DRIVER_INFO 0x20000000 +#define NX_RCODE_DRIVER_CAN_RELOAD 0x40000000 +#define NX_RCODE_FATAL_ERROR 0x80000000 +#define NX_FWERROR_PEGNUM(code) ((code) & 0xff) +#define NX_FWERROR_CODE(code) ((code >> 8) & 0xfffff) + +#define FW_POLL_DELAY (2 * HZ) +#define FW_FAIL_THRESH 3 +#define FW_POLL_THRESH 10 + +#define ISR_MSI_INT_TRIGGER(FUNC) (NETXEN_PCIX_PS_REG(PCIX_MSI_F(FUNC))) +#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200) + +/* + * PCI Interrupt Vector Values. + */ +#define PCIX_INT_VECTOR_BIT_F0 0x0080 +#define PCIX_INT_VECTOR_BIT_F1 0x0100 +#define PCIX_INT_VECTOR_BIT_F2 0x0200 +#define PCIX_INT_VECTOR_BIT_F3 0x0400 +#define PCIX_INT_VECTOR_BIT_F4 0x0800 +#define PCIX_INT_VECTOR_BIT_F5 0x1000 +#define PCIX_INT_VECTOR_BIT_F6 0x2000 +#define PCIX_INT_VECTOR_BIT_F7 0x4000 + +struct netxen_legacy_intr_set { + uint32_t int_vec_bit; + uint32_t tgt_status_reg; + uint32_t tgt_mask_reg; + uint32_t pci_int_reg; +}; + +#define NX_LEGACY_INTR_CONFIG \ +{ \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F0, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(0) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F1, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F1, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F1, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(1) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F2, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F2, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F2, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(2) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F3, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F3, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F3, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(3) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F4, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F4, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F4, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(4) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F5, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F5, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F5, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(5) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F6, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F6, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F6, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(6) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F7, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F7, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F7, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(7) }, \ +} + +#endif /* __NETXEN_NIC_HDR_H_ */ diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c new file mode 100644 index 000000000000..3f89e57cae50 --- /dev/null +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c @@ -0,0 +1,1976 @@ +/* + * Copyright (C) 2003 - 2009 NetXen, Inc. + * Copyright (C) 2009 - QLogic Corporation. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, + * MA 02111-1307, USA. + * + * The full GNU General Public License is included in this distribution + * in the file called "COPYING". + * + */ + +#include +#include "netxen_nic.h" +#include "netxen_nic_hw.h" + +#include + +#define MASK(n) ((1ULL<<(n))-1) +#define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | ((addr >> 25) & 0x3ff)) +#define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | ((addr >> 25) & 0x3ff)) +#define MS_WIN(addr) (addr & 0x0ffc0000) + +#define GET_MEM_OFFS_2M(addr) (addr & MASK(18)) + +#define CRB_BLK(off) ((off >> 20) & 0x3f) +#define CRB_SUBBLK(off) ((off >> 16) & 0xf) +#define CRB_WINDOW_2M (0x130060) +#define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000)) +#define CRB_INDIRECT_2M (0x1e0000UL) + +static void netxen_nic_io_write_128M(struct netxen_adapter *adapter, + void __iomem *addr, u32 data); +static u32 netxen_nic_io_read_128M(struct netxen_adapter *adapter, + void __iomem *addr); + +#ifndef readq +static inline u64 readq(void __iomem *addr) +{ + return readl(addr) | (((u64) readl(addr + 4)) << 32LL); +} +#endif + +#ifndef writeq +static inline void writeq(u64 val, void __iomem *addr) +{ + writel(((u32) (val)), (addr)); + writel(((u32) (val >> 32)), (addr + 4)); +} +#endif + +#define PCI_OFFSET_FIRST_RANGE(adapter, off) \ + ((adapter)->ahw.pci_base0 + (off)) +#define PCI_OFFSET_SECOND_RANGE(adapter, off) \ + ((adapter)->ahw.pci_base1 + (off) - SECOND_PAGE_GROUP_START) +#define PCI_OFFSET_THIRD_RANGE(adapter, off) \ + ((adapter)->ahw.pci_base2 + (off) - THIRD_PAGE_GROUP_START) + +static void __iomem *pci_base_offset(struct netxen_adapter *adapter, + unsigned long off) +{ + if (ADDR_IN_RANGE(off, FIRST_PAGE_GROUP_START, FIRST_PAGE_GROUP_END)) + return PCI_OFFSET_FIRST_RANGE(adapter, off); + + if (ADDR_IN_RANGE(off, SECOND_PAGE_GROUP_START, SECOND_PAGE_GROUP_END)) + return PCI_OFFSET_SECOND_RANGE(adapter, off); + + if (ADDR_IN_RANGE(off, THIRD_PAGE_GROUP_START, THIRD_PAGE_GROUP_END)) + return PCI_OFFSET_THIRD_RANGE(adapter, off); + + return NULL; +} + +static crb_128M_2M_block_map_t +crb_128M_2M_map[64] __cacheline_aligned_in_smp = { + {{{0, 0, 0, 0} } }, /* 0: PCI */ + {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */ + {1, 0x0110000, 0x0120000, 0x130000}, + {1, 0x0120000, 0x0122000, 0x124000}, + {1, 0x0130000, 0x0132000, 0x126000}, + {1, 0x0140000, 0x0142000, 0x128000}, + {1, 0x0150000, 0x0152000, 0x12a000}, + {1, 0x0160000, 0x0170000, 0x110000}, + {1, 0x0170000, 0x0172000, 0x12e000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {1, 0x01e0000, 0x01e0800, 0x122000}, + {0, 0x0000000, 0x0000000, 0x000000} } }, + {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */ + {{{0, 0, 0, 0} } }, /* 3: */ + {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */ + {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE */ + {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU */ + {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM */ + {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */ + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {1, 0x08f0000, 0x08f2000, 0x172000} } }, + {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1*/ + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {1, 0x09f0000, 0x09f2000, 0x176000} } }, + {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2*/ + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {1, 0x0af0000, 0x0af2000, 0x17a000} } }, + {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3*/ + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {1, 0x0bf0000, 0x0bf2000, 0x17e000} } }, + {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */ + {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */ + {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */ + {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */ + {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */ + {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */ + {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */ + {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */ + {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */ + {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */ + {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */ + {{{0, 0, 0, 0} } }, /* 23: */ + {{{0, 0, 0, 0} } }, /* 24: */ + {{{0, 0, 0, 0} } }, /* 25: */ + {{{0, 0, 0, 0} } }, /* 26: */ + {{{0, 0, 0, 0} } }, /* 27: */ + {{{0, 0, 0, 0} } }, /* 28: */ + {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */ + {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */ + {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */ + {{{0} } }, /* 32: PCI */ + {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */ + {1, 0x2110000, 0x2120000, 0x130000}, + {1, 0x2120000, 0x2122000, 0x124000}, + {1, 0x2130000, 0x2132000, 0x126000}, + {1, 0x2140000, 0x2142000, 0x128000}, + {1, 0x2150000, 0x2152000, 0x12a000}, + {1, 0x2160000, 0x2170000, 0x110000}, + {1, 0x2170000, 0x2172000, 0x12e000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000} } }, + {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */ + {{{0} } }, /* 35: */ + {{{0} } }, /* 36: */ + {{{0} } }, /* 37: */ + {{{0} } }, /* 38: */ + {{{0} } }, /* 39: */ + {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */ + {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */ + {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */ + {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */ + {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */ + {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */ + {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */ + {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */ + {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */ + {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */ + {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */ + {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */ + {{{0} } }, /* 52: */ + {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */ + {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */ + {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */ + {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */ + {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */ + {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */ + {{{0} } }, /* 59: I2C0 */ + {{{0} } }, /* 60: I2C1 */ + {{{1, 0x3d00000, 0x3d04000, 0x1d8000} } },/* 61: LPC */ + {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */ + {{{1, 0x3f00000, 0x3f01000, 0x168000} } } /* 63: P2NR0 */ +}; + +/* + * top 12 bits of crb internal address (hub, agent) + */ +static unsigned crb_hub_agt[64] = +{ + 0, + NETXEN_HW_CRB_HUB_AGT_ADR_PS, + NETXEN_HW_CRB_HUB_AGT_ADR_MN, + NETXEN_HW_CRB_HUB_AGT_ADR_MS, + 0, + NETXEN_HW_CRB_HUB_AGT_ADR_SRE, + NETXEN_HW_CRB_HUB_AGT_ADR_NIU, + NETXEN_HW_CRB_HUB_AGT_ADR_QMN, + NETXEN_HW_CRB_HUB_AGT_ADR_SQN0, + NETXEN_HW_CRB_HUB_AGT_ADR_SQN1, + NETXEN_HW_CRB_HUB_AGT_ADR_SQN2, + NETXEN_HW_CRB_HUB_AGT_ADR_SQN3, + NETXEN_HW_CRB_HUB_AGT_ADR_I2Q, + NETXEN_HW_CRB_HUB_AGT_ADR_TIMR, + NETXEN_HW_CRB_HUB_AGT_ADR_ROMUSB, + NETXEN_HW_CRB_HUB_AGT_ADR_PGN4, + NETXEN_HW_CRB_HUB_AGT_ADR_XDMA, + NETXEN_HW_CRB_HUB_AGT_ADR_PGN0, + NETXEN_HW_CRB_HUB_AGT_ADR_PGN1, + NETXEN_HW_CRB_HUB_AGT_ADR_PGN2, + NETXEN_HW_CRB_HUB_AGT_ADR_PGN3, + NETXEN_HW_CRB_HUB_AGT_ADR_PGND, + NETXEN_HW_CRB_HUB_AGT_ADR_PGNI, + NETXEN_HW_CRB_HUB_AGT_ADR_PGS0, + NETXEN_HW_CRB_HUB_AGT_ADR_PGS1, + NETXEN_HW_CRB_HUB_AGT_ADR_PGS2, + NETXEN_HW_CRB_HUB_AGT_ADR_PGS3, + 0, + NETXEN_HW_CRB_HUB_AGT_ADR_PGSI, + NETXEN_HW_CRB_HUB_AGT_ADR_SN, + 0, + NETXEN_HW_CRB_HUB_AGT_ADR_EG, + 0, + NETXEN_HW_CRB_HUB_AGT_ADR_PS, + NETXEN_HW_CRB_HUB_AGT_ADR_CAM, + 0, + 0, + 0, + 0, + 0, + NETXEN_HW_CRB_HUB_AGT_ADR_TIMR, + 0, + NETXEN_HW_CRB_HUB_AGT_ADR_RPMX1, + NETXEN_HW_CRB_HUB_AGT_ADR_RPMX2, + NETXEN_HW_CRB_HUB_AGT_ADR_RPMX3, + NETXEN_HW_CRB_HUB_AGT_ADR_RPMX4, + NETXEN_HW_CRB_HUB_AGT_ADR_RPMX5, + NETXEN_HW_CRB_HUB_AGT_ADR_RPMX6, + NETXEN_HW_CRB_HUB_AGT_ADR_RPMX7, + NETXEN_HW_CRB_HUB_AGT_ADR_XDMA, + NETXEN_HW_CRB_HUB_AGT_ADR_I2Q, + NETXEN_HW_CRB_HUB_AGT_ADR_ROMUSB, + 0, + NETXEN_HW_CRB_HUB_AGT_ADR_RPMX0, + NETXEN_HW_CRB_HUB_AGT_ADR_RPMX8, + NETXEN_HW_CRB_HUB_AGT_ADR_RPMX9, + NETXEN_HW_CRB_HUB_AGT_ADR_OCM0, + 0, + NETXEN_HW_CRB_HUB_AGT_ADR_SMB, + NETXEN_HW_CRB_HUB_AGT_ADR_I2C0, + NETXEN_HW_CRB_HUB_AGT_ADR_I2C1, + 0, + NETXEN_HW_CRB_HUB_AGT_ADR_PGNC, + 0, +}; + +/* PCI Windowing for DDR regions. */ + +#define NETXEN_WINDOW_ONE 0x2000000 /*CRB Window: bit 25 of CRB address */ + +#define NETXEN_PCIE_SEM_TIMEOUT 10000 + +static int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu); + +int +netxen_pcie_sem_lock(struct netxen_adapter *adapter, int sem, u32 id_reg) +{ + int done = 0, timeout = 0; + + while (!done) { + done = NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM_LOCK(sem))); + if (done == 1) + break; + if (++timeout >= NETXEN_PCIE_SEM_TIMEOUT) + return -EIO; + msleep(1); + } + + if (id_reg) + NXWR32(adapter, id_reg, adapter->portnum); + + return 0; +} + +void +netxen_pcie_sem_unlock(struct netxen_adapter *adapter, int sem) +{ + NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM_UNLOCK(sem))); +} + +static int netxen_niu_xg_init_port(struct netxen_adapter *adapter, int port) +{ + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_1+(0x10000*port), 0x1447); + NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0+(0x10000*port), 0x5); + } + + return 0; +} + +/* Disable an XG interface */ +static int netxen_niu_disable_xg_port(struct netxen_adapter *adapter) +{ + __u32 mac_cfg; + u32 port = adapter->physical_port; + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + return 0; + + if (port > NETXEN_NIU_MAX_XG_PORTS) + return -EINVAL; + + mac_cfg = 0; + if (NXWR32(adapter, + NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), mac_cfg)) + return -EIO; + return 0; +} + +#define NETXEN_UNICAST_ADDR(port, index) \ + (NETXEN_UNICAST_ADDR_BASE+(port*32)+(index*8)) +#define NETXEN_MCAST_ADDR(port, index) \ + (NETXEN_MULTICAST_ADDR_BASE+(port*0x80)+(index*8)) +#define MAC_HI(addr) \ + ((addr[2] << 16) | (addr[1] << 8) | (addr[0])) +#define MAC_LO(addr) \ + ((addr[5] << 16) | (addr[4] << 8) | (addr[3])) + +static int netxen_p2_nic_set_promisc(struct netxen_adapter *adapter, u32 mode) +{ + u32 mac_cfg; + u32 cnt = 0; + __u32 reg = 0x0200; + u32 port = adapter->physical_port; + u16 board_type = adapter->ahw.board_type; + + if (port > NETXEN_NIU_MAX_XG_PORTS) + return -EINVAL; + + mac_cfg = NXRD32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port)); + mac_cfg &= ~0x4; + NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), mac_cfg); + + if ((board_type == NETXEN_BRDTYPE_P2_SB31_10G_IMEZ) || + (board_type == NETXEN_BRDTYPE_P2_SB31_10G_HMEZ)) + reg = (0x20 << port); + + NXWR32(adapter, NETXEN_NIU_FRAME_COUNT_SELECT, reg); + + mdelay(10); + + while (NXRD32(adapter, NETXEN_NIU_FRAME_COUNT) && ++cnt < 20) + mdelay(10); + + if (cnt < 20) { + + reg = NXRD32(adapter, + NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port)); + + if (mode == NETXEN_NIU_PROMISC_MODE) + reg = (reg | 0x2000UL); + else + reg = (reg & ~0x2000UL); + + if (mode == NETXEN_NIU_ALLMULTI_MODE) + reg = (reg | 0x1000UL); + else + reg = (reg & ~0x1000UL); + + NXWR32(adapter, + NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port), reg); + } + + mac_cfg |= 0x4; + NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), mac_cfg); + + return 0; +} + +static int netxen_p2_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr) +{ + u32 mac_hi, mac_lo; + u32 reg_hi, reg_lo; + + u8 phy = adapter->physical_port; + + if (phy >= NETXEN_NIU_MAX_XG_PORTS) + return -EINVAL; + + mac_lo = ((u32)addr[0] << 16) | ((u32)addr[1] << 24); + mac_hi = addr[2] | ((u32)addr[3] << 8) | + ((u32)addr[4] << 16) | ((u32)addr[5] << 24); + + reg_lo = NETXEN_NIU_XGE_STATION_ADDR_0_1 + (0x10000 * phy); + reg_hi = NETXEN_NIU_XGE_STATION_ADDR_0_HI + (0x10000 * phy); + + /* write twice to flush */ + if (NXWR32(adapter, reg_lo, mac_lo) || NXWR32(adapter, reg_hi, mac_hi)) + return -EIO; + if (NXWR32(adapter, reg_lo, mac_lo) || NXWR32(adapter, reg_hi, mac_hi)) + return -EIO; + + return 0; +} + +static int +netxen_nic_enable_mcast_filter(struct netxen_adapter *adapter) +{ + u32 val = 0; + u16 port = adapter->physical_port; + u8 *addr = adapter->mac_addr; + + if (adapter->mc_enabled) + return 0; + + val = NXRD32(adapter, NETXEN_MAC_ADDR_CNTL_REG); + val |= (1UL << (28+port)); + NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val); + + /* add broadcast addr to filter */ + val = 0xffffff; + NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0), val); + NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0)+4, val); + + /* add station addr to filter */ + val = MAC_HI(addr); + NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1), val); + val = MAC_LO(addr); + NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1)+4, val); + + adapter->mc_enabled = 1; + return 0; +} + +static int +netxen_nic_disable_mcast_filter(struct netxen_adapter *adapter) +{ + u32 val = 0; + u16 port = adapter->physical_port; + u8 *addr = adapter->mac_addr; + + if (!adapter->mc_enabled) + return 0; + + val = NXRD32(adapter, NETXEN_MAC_ADDR_CNTL_REG); + val &= ~(1UL << (28+port)); + NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val); + + val = MAC_HI(addr); + NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0), val); + val = MAC_LO(addr); + NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0)+4, val); + + NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1), 0); + NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1)+4, 0); + + adapter->mc_enabled = 0; + return 0; +} + +static int +netxen_nic_set_mcast_addr(struct netxen_adapter *adapter, + int index, u8 *addr) +{ + u32 hi = 0, lo = 0; + u16 port = adapter->physical_port; + + lo = MAC_LO(addr); + hi = MAC_HI(addr); + + NXWR32(adapter, NETXEN_MCAST_ADDR(port, index), hi); + NXWR32(adapter, NETXEN_MCAST_ADDR(port, index)+4, lo); + + return 0; +} + +static void netxen_p2_nic_set_multi(struct net_device *netdev) +{ + struct netxen_adapter *adapter = netdev_priv(netdev); + struct netdev_hw_addr *ha; + u8 null_addr[6]; + int i; + + memset(null_addr, 0, 6); + + if (netdev->flags & IFF_PROMISC) { + + adapter->set_promisc(adapter, + NETXEN_NIU_PROMISC_MODE); + + /* Full promiscuous mode */ + netxen_nic_disable_mcast_filter(adapter); + + return; + } + + if (netdev_mc_empty(netdev)) { + adapter->set_promisc(adapter, + NETXEN_NIU_NON_PROMISC_MODE); + netxen_nic_disable_mcast_filter(adapter); + return; + } + + adapter->set_promisc(adapter, NETXEN_NIU_ALLMULTI_MODE); + if (netdev->flags & IFF_ALLMULTI || + netdev_mc_count(netdev) > adapter->max_mc_count) { + netxen_nic_disable_mcast_filter(adapter); + return; + } + + netxen_nic_enable_mcast_filter(adapter); + + i = 0; + netdev_for_each_mc_addr(ha, netdev) + netxen_nic_set_mcast_addr(adapter, i++, ha->addr); + + /* Clear out remaining addresses */ + while (i < adapter->max_mc_count) + netxen_nic_set_mcast_addr(adapter, i++, null_addr); +} + +static int +netxen_send_cmd_descs(struct netxen_adapter *adapter, + struct cmd_desc_type0 *cmd_desc_arr, int nr_desc) +{ + u32 i, producer, consumer; + struct netxen_cmd_buffer *pbuf; + struct cmd_desc_type0 *cmd_desc; + struct nx_host_tx_ring *tx_ring; + + i = 0; + + if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) + return -EIO; + + tx_ring = adapter->tx_ring; + __netif_tx_lock_bh(tx_ring->txq); + + producer = tx_ring->producer; + consumer = tx_ring->sw_consumer; + + if (nr_desc >= netxen_tx_avail(tx_ring)) { + netif_tx_stop_queue(tx_ring->txq); + smp_mb(); + if (netxen_tx_avail(tx_ring) > nr_desc) { + if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH) + netif_tx_wake_queue(tx_ring->txq); + } else { + __netif_tx_unlock_bh(tx_ring->txq); + return -EBUSY; + } + } + + do { + cmd_desc = &cmd_desc_arr[i]; + + pbuf = &tx_ring->cmd_buf_arr[producer]; + pbuf->skb = NULL; + pbuf->frag_count = 0; + + memcpy(&tx_ring->desc_head[producer], + &cmd_desc_arr[i], sizeof(struct cmd_desc_type0)); + + producer = get_next_index(producer, tx_ring->num_desc); + i++; + + } while (i != nr_desc); + + tx_ring->producer = producer; + + netxen_nic_update_cmd_producer(adapter, tx_ring); + + __netif_tx_unlock_bh(tx_ring->txq); + + return 0; +} + +static int +nx_p3_sre_macaddr_change(struct netxen_adapter *adapter, u8 *addr, unsigned op) +{ + nx_nic_req_t req; + nx_mac_req_t *mac_req; + u64 word; + + memset(&req, 0, sizeof(nx_nic_req_t)); + req.qhdr = cpu_to_le64(NX_NIC_REQUEST << 23); + + word = NX_MAC_EVENT | ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word); + + mac_req = (nx_mac_req_t *)&req.words[0]; + mac_req->op = op; + memcpy(mac_req->mac_addr, addr, 6); + + return netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); +} + +static int nx_p3_nic_add_mac(struct netxen_adapter *adapter, + const u8 *addr, struct list_head *del_list) +{ + struct list_head *head; + nx_mac_list_t *cur; + + /* look up if already exists */ + list_for_each(head, del_list) { + cur = list_entry(head, nx_mac_list_t, list); + + if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0) { + list_move_tail(head, &adapter->mac_list); + return 0; + } + } + + cur = kzalloc(sizeof(nx_mac_list_t), GFP_ATOMIC); + if (cur == NULL) { + printk(KERN_ERR "%s: failed to add mac address filter\n", + adapter->netdev->name); + return -ENOMEM; + } + memcpy(cur->mac_addr, addr, ETH_ALEN); + list_add_tail(&cur->list, &adapter->mac_list); + return nx_p3_sre_macaddr_change(adapter, + cur->mac_addr, NETXEN_MAC_ADD); +} + +static void netxen_p3_nic_set_multi(struct net_device *netdev) +{ + struct netxen_adapter *adapter = netdev_priv(netdev); + struct netdev_hw_addr *ha; + static const u8 bcast_addr[ETH_ALEN] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff + }; + u32 mode = VPORT_MISS_MODE_DROP; + LIST_HEAD(del_list); + struct list_head *head; + nx_mac_list_t *cur; + + if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) + return; + + list_splice_tail_init(&adapter->mac_list, &del_list); + + nx_p3_nic_add_mac(adapter, adapter->mac_addr, &del_list); + nx_p3_nic_add_mac(adapter, bcast_addr, &del_list); + + if (netdev->flags & IFF_PROMISC) { + mode = VPORT_MISS_MODE_ACCEPT_ALL; + goto send_fw_cmd; + } + + if ((netdev->flags & IFF_ALLMULTI) || + (netdev_mc_count(netdev) > adapter->max_mc_count)) { + mode = VPORT_MISS_MODE_ACCEPT_MULTI; + goto send_fw_cmd; + } + + if (!netdev_mc_empty(netdev)) { + netdev_for_each_mc_addr(ha, netdev) + nx_p3_nic_add_mac(adapter, ha->addr, &del_list); + } + +send_fw_cmd: + adapter->set_promisc(adapter, mode); + head = &del_list; + while (!list_empty(head)) { + cur = list_entry(head->next, nx_mac_list_t, list); + + nx_p3_sre_macaddr_change(adapter, + cur->mac_addr, NETXEN_MAC_DEL); + list_del(&cur->list); + kfree(cur); + } +} + +static int netxen_p3_nic_set_promisc(struct netxen_adapter *adapter, u32 mode) +{ + nx_nic_req_t req; + u64 word; + + memset(&req, 0, sizeof(nx_nic_req_t)); + + req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); + + word = NX_NIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE | + ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word); + + req.words[0] = cpu_to_le64(mode); + + return netxen_send_cmd_descs(adapter, + (struct cmd_desc_type0 *)&req, 1); +} + +void netxen_p3_free_mac_list(struct netxen_adapter *adapter) +{ + nx_mac_list_t *cur; + struct list_head *head = &adapter->mac_list; + + while (!list_empty(head)) { + cur = list_entry(head->next, nx_mac_list_t, list); + nx_p3_sre_macaddr_change(adapter, + cur->mac_addr, NETXEN_MAC_DEL); + list_del(&cur->list); + kfree(cur); + } +} + +static int netxen_p3_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr) +{ + /* assuming caller has already copied new addr to netdev */ + netxen_p3_nic_set_multi(adapter->netdev); + return 0; +} + +#define NETXEN_CONFIG_INTR_COALESCE 3 + +/* + * Send the interrupt coalescing parameter set by ethtool to the card. + */ +int netxen_config_intr_coalesce(struct netxen_adapter *adapter) +{ + nx_nic_req_t req; + u64 word[6]; + int rv, i; + + memset(&req, 0, sizeof(nx_nic_req_t)); + memset(word, 0, sizeof(word)); + + req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); + + word[0] = NETXEN_CONFIG_INTR_COALESCE | ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word[0]); + + memcpy(&word[0], &adapter->coal, sizeof(adapter->coal)); + for (i = 0; i < 6; i++) + req.words[i] = cpu_to_le64(word[i]); + + rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv != 0) { + printk(KERN_ERR "ERROR. Could not send " + "interrupt coalescing parameters\n"); + } + + return rv; +} + +int netxen_config_hw_lro(struct netxen_adapter *adapter, int enable) +{ + nx_nic_req_t req; + u64 word; + int rv = 0; + + if (!test_bit(__NX_FW_ATTACHED, &adapter->state)) + return 0; + + memset(&req, 0, sizeof(nx_nic_req_t)); + + req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); + + word = NX_NIC_H2C_OPCODE_CONFIG_HW_LRO | ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word); + + req.words[0] = cpu_to_le64(enable); + + rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv != 0) { + printk(KERN_ERR "ERROR. Could not send " + "configure hw lro request\n"); + } + + return rv; +} + +int netxen_config_bridged_mode(struct netxen_adapter *adapter, int enable) +{ + nx_nic_req_t req; + u64 word; + int rv = 0; + + if (!!(adapter->flags & NETXEN_NIC_BRIDGE_ENABLED) == enable) + return rv; + + memset(&req, 0, sizeof(nx_nic_req_t)); + + req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); + + word = NX_NIC_H2C_OPCODE_CONFIG_BRIDGING | + ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word); + + req.words[0] = cpu_to_le64(enable); + + rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv != 0) { + printk(KERN_ERR "ERROR. Could not send " + "configure bridge mode request\n"); + } + + adapter->flags ^= NETXEN_NIC_BRIDGE_ENABLED; + + return rv; +} + + +#define RSS_HASHTYPE_IP_TCP 0x3 + +int netxen_config_rss(struct netxen_adapter *adapter, int enable) +{ + nx_nic_req_t req; + u64 word; + int i, rv; + + static const u64 key[] = { + 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL, + 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL, + 0x255b0ec26d5a56daULL + }; + + + memset(&req, 0, sizeof(nx_nic_req_t)); + req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); + + word = NX_NIC_H2C_OPCODE_CONFIG_RSS | ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word); + + /* + * RSS request: + * bits 3-0: hash_method + * 5-4: hash_type_ipv4 + * 7-6: hash_type_ipv6 + * 8: enable + * 9: use indirection table + * 47-10: reserved + * 63-48: indirection table mask + */ + word = ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) | + ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) | + ((u64)(enable & 0x1) << 8) | + ((0x7ULL) << 48); + req.words[0] = cpu_to_le64(word); + for (i = 0; i < ARRAY_SIZE(key); i++) + req.words[i+1] = cpu_to_le64(key[i]); + + + rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv != 0) { + printk(KERN_ERR "%s: could not configure RSS\n", + adapter->netdev->name); + } + + return rv; +} + +int netxen_config_ipaddr(struct netxen_adapter *adapter, u32 ip, int cmd) +{ + nx_nic_req_t req; + u64 word; + int rv; + + memset(&req, 0, sizeof(nx_nic_req_t)); + req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); + + word = NX_NIC_H2C_OPCODE_CONFIG_IPADDR | ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word); + + req.words[0] = cpu_to_le64(cmd); + req.words[1] = cpu_to_le64(ip); + + rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv != 0) { + printk(KERN_ERR "%s: could not notify %s IP 0x%x reuqest\n", + adapter->netdev->name, + (cmd == NX_IP_UP) ? "Add" : "Remove", ip); + } + return rv; +} + +int netxen_linkevent_request(struct netxen_adapter *adapter, int enable) +{ + nx_nic_req_t req; + u64 word; + int rv; + + memset(&req, 0, sizeof(nx_nic_req_t)); + req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); + + word = NX_NIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word); + req.words[0] = cpu_to_le64(enable | (enable << 8)); + + rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv != 0) { + printk(KERN_ERR "%s: could not configure link notification\n", + adapter->netdev->name); + } + + return rv; +} + +int netxen_send_lro_cleanup(struct netxen_adapter *adapter) +{ + nx_nic_req_t req; + u64 word; + int rv; + + if (!test_bit(__NX_FW_ATTACHED, &adapter->state)) + return 0; + + memset(&req, 0, sizeof(nx_nic_req_t)); + req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); + + word = NX_NIC_H2C_OPCODE_LRO_REQUEST | + ((u64)adapter->portnum << 16) | + ((u64)NX_NIC_LRO_REQUEST_CLEANUP << 56) ; + + req.req_hdr = cpu_to_le64(word); + + rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv != 0) { + printk(KERN_ERR "%s: could not cleanup lro flows\n", + adapter->netdev->name); + } + return rv; +} + +/* + * netxen_nic_change_mtu - Change the Maximum Transfer Unit + * @returns 0 on success, negative on failure + */ + +#define MTU_FUDGE_FACTOR 100 + +int netxen_nic_change_mtu(struct net_device *netdev, int mtu) +{ + struct netxen_adapter *adapter = netdev_priv(netdev); + int max_mtu; + int rc = 0; + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + max_mtu = P3_MAX_MTU; + else + max_mtu = P2_MAX_MTU; + + if (mtu > max_mtu) { + printk(KERN_ERR "%s: mtu > %d bytes unsupported\n", + netdev->name, max_mtu); + return -EINVAL; + } + + if (adapter->set_mtu) + rc = adapter->set_mtu(adapter, mtu); + + if (!rc) + netdev->mtu = mtu; + + return rc; +} + +static int netxen_get_flash_block(struct netxen_adapter *adapter, int base, + int size, __le32 * buf) +{ + int i, v, addr; + __le32 *ptr32; + + addr = base; + ptr32 = buf; + for (i = 0; i < size / sizeof(u32); i++) { + if (netxen_rom_fast_read(adapter, addr, &v) == -1) + return -1; + *ptr32 = cpu_to_le32(v); + ptr32++; + addr += sizeof(u32); + } + if ((char *)buf + size > (char *)ptr32) { + __le32 local; + if (netxen_rom_fast_read(adapter, addr, &v) == -1) + return -1; + local = cpu_to_le32(v); + memcpy(ptr32, &local, (char *)buf + size - (char *)ptr32); + } + + return 0; +} + +int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac) +{ + __le32 *pmac = (__le32 *) mac; + u32 offset; + + offset = NX_FW_MAC_ADDR_OFFSET + (adapter->portnum * sizeof(u64)); + + if (netxen_get_flash_block(adapter, offset, sizeof(u64), pmac) == -1) + return -1; + + if (*mac == cpu_to_le64(~0ULL)) { + + offset = NX_OLD_MAC_ADDR_OFFSET + + (adapter->portnum * sizeof(u64)); + + if (netxen_get_flash_block(adapter, + offset, sizeof(u64), pmac) == -1) + return -1; + + if (*mac == cpu_to_le64(~0ULL)) + return -1; + } + return 0; +} + +int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac) +{ + uint32_t crbaddr, mac_hi, mac_lo; + int pci_func = adapter->ahw.pci_func; + + crbaddr = CRB_MAC_BLOCK_START + + (4 * ((pci_func/2) * 3)) + (4 * (pci_func & 1)); + + mac_lo = NXRD32(adapter, crbaddr); + mac_hi = NXRD32(adapter, crbaddr+4); + + if (pci_func & 1) + *mac = le64_to_cpu((mac_lo >> 16) | ((u64)mac_hi << 16)); + else + *mac = le64_to_cpu((u64)mac_lo | ((u64)mac_hi << 32)); + + return 0; +} + +/* + * Changes the CRB window to the specified window. + */ +static void +netxen_nic_pci_set_crbwindow_128M(struct netxen_adapter *adapter, + u32 window) +{ + void __iomem *offset; + int count = 10; + u8 func = adapter->ahw.pci_func; + + if (adapter->ahw.crb_win == window) + return; + + offset = PCI_OFFSET_SECOND_RANGE(adapter, + NETXEN_PCIX_PH_REG(PCIE_CRB_WINDOW_REG(func))); + + writel(window, offset); + do { + if (window == readl(offset)) + break; + + if (printk_ratelimit()) + dev_warn(&adapter->pdev->dev, + "failed to set CRB window to %d\n", + (window == NETXEN_WINDOW_ONE)); + udelay(1); + + } while (--count > 0); + + if (count > 0) + adapter->ahw.crb_win = window; +} + +/* + * Returns < 0 if off is not valid, + * 1 if window access is needed. 'off' is set to offset from + * CRB space in 128M pci map + * 0 if no window access is needed. 'off' is set to 2M addr + * In: 'off' is offset from base in 128M pci map + */ +static int +netxen_nic_pci_get_crb_addr_2M(struct netxen_adapter *adapter, + ulong off, void __iomem **addr) +{ + crb_128M_2M_sub_block_map_t *m; + + + if ((off >= NETXEN_CRB_MAX) || (off < NETXEN_PCI_CRBSPACE)) + return -EINVAL; + + off -= NETXEN_PCI_CRBSPACE; + + /* + * Try direct map + */ + m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)]; + + if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) { + *addr = adapter->ahw.pci_base0 + m->start_2M + + (off - m->start_128M); + return 0; + } + + /* + * Not in direct map, use crb window + */ + *addr = adapter->ahw.pci_base0 + CRB_INDIRECT_2M + + (off & MASK(16)); + return 1; +} + +/* + * In: 'off' is offset from CRB space in 128M pci map + * Out: 'off' is 2M pci map addr + * side effect: lock crb window + */ +static void +netxen_nic_pci_set_crbwindow_2M(struct netxen_adapter *adapter, ulong off) +{ + u32 window; + void __iomem *addr = adapter->ahw.pci_base0 + CRB_WINDOW_2M; + + off -= NETXEN_PCI_CRBSPACE; + + window = CRB_HI(off); + + writel(window, addr); + if (readl(addr) != window) { + if (printk_ratelimit()) + dev_warn(&adapter->pdev->dev, + "failed to set CRB window to %d off 0x%lx\n", + window, off); + } +} + +static void __iomem * +netxen_nic_map_indirect_address_128M(struct netxen_adapter *adapter, + ulong win_off, void __iomem **mem_ptr) +{ + ulong off = win_off; + void __iomem *addr; + resource_size_t mem_base; + + if (ADDR_IN_WINDOW1(win_off)) + off = NETXEN_CRB_NORMAL(win_off); + + addr = pci_base_offset(adapter, off); + if (addr) + return addr; + + if (adapter->ahw.pci_len0 == 0) + off -= NETXEN_PCI_CRBSPACE; + + mem_base = pci_resource_start(adapter->pdev, 0); + *mem_ptr = ioremap(mem_base + (off & PAGE_MASK), PAGE_SIZE); + if (*mem_ptr) + addr = *mem_ptr + (off & (PAGE_SIZE - 1)); + + return addr; +} + +static int +netxen_nic_hw_write_wx_128M(struct netxen_adapter *adapter, ulong off, u32 data) +{ + unsigned long flags; + void __iomem *addr, *mem_ptr = NULL; + + addr = netxen_nic_map_indirect_address_128M(adapter, off, &mem_ptr); + if (!addr) + return -EIO; + + if (ADDR_IN_WINDOW1(off)) { /* Window 1 */ + netxen_nic_io_write_128M(adapter, addr, data); + } else { /* Window 0 */ + write_lock_irqsave(&adapter->ahw.crb_lock, flags); + netxen_nic_pci_set_crbwindow_128M(adapter, 0); + writel(data, addr); + netxen_nic_pci_set_crbwindow_128M(adapter, + NETXEN_WINDOW_ONE); + write_unlock_irqrestore(&adapter->ahw.crb_lock, flags); + } + + if (mem_ptr) + iounmap(mem_ptr); + + return 0; +} + +static u32 +netxen_nic_hw_read_wx_128M(struct netxen_adapter *adapter, ulong off) +{ + unsigned long flags; + void __iomem *addr, *mem_ptr = NULL; + u32 data; + + addr = netxen_nic_map_indirect_address_128M(adapter, off, &mem_ptr); + if (!addr) + return -EIO; + + if (ADDR_IN_WINDOW1(off)) { /* Window 1 */ + data = netxen_nic_io_read_128M(adapter, addr); + } else { /* Window 0 */ + write_lock_irqsave(&adapter->ahw.crb_lock, flags); + netxen_nic_pci_set_crbwindow_128M(adapter, 0); + data = readl(addr); + netxen_nic_pci_set_crbwindow_128M(adapter, + NETXEN_WINDOW_ONE); + write_unlock_irqrestore(&adapter->ahw.crb_lock, flags); + } + + if (mem_ptr) + iounmap(mem_ptr); + + return data; +} + +static int +netxen_nic_hw_write_wx_2M(struct netxen_adapter *adapter, ulong off, u32 data) +{ + unsigned long flags; + int rv; + void __iomem *addr = NULL; + + rv = netxen_nic_pci_get_crb_addr_2M(adapter, off, &addr); + + if (rv == 0) { + writel(data, addr); + return 0; + } + + if (rv > 0) { + /* indirect access */ + write_lock_irqsave(&adapter->ahw.crb_lock, flags); + crb_win_lock(adapter); + netxen_nic_pci_set_crbwindow_2M(adapter, off); + writel(data, addr); + crb_win_unlock(adapter); + write_unlock_irqrestore(&adapter->ahw.crb_lock, flags); + return 0; + } + + dev_err(&adapter->pdev->dev, + "%s: invalid offset: 0x%016lx\n", __func__, off); + dump_stack(); + return -EIO; +} + +static u32 +netxen_nic_hw_read_wx_2M(struct netxen_adapter *adapter, ulong off) +{ + unsigned long flags; + int rv; + u32 data; + void __iomem *addr = NULL; + + rv = netxen_nic_pci_get_crb_addr_2M(adapter, off, &addr); + + if (rv == 0) + return readl(addr); + + if (rv > 0) { + /* indirect access */ + write_lock_irqsave(&adapter->ahw.crb_lock, flags); + crb_win_lock(adapter); + netxen_nic_pci_set_crbwindow_2M(adapter, off); + data = readl(addr); + crb_win_unlock(adapter); + write_unlock_irqrestore(&adapter->ahw.crb_lock, flags); + return data; + } + + dev_err(&adapter->pdev->dev, + "%s: invalid offset: 0x%016lx\n", __func__, off); + dump_stack(); + return -1; +} + +/* window 1 registers only */ +static void netxen_nic_io_write_128M(struct netxen_adapter *adapter, + void __iomem *addr, u32 data) +{ + read_lock(&adapter->ahw.crb_lock); + writel(data, addr); + read_unlock(&adapter->ahw.crb_lock); +} + +static u32 netxen_nic_io_read_128M(struct netxen_adapter *adapter, + void __iomem *addr) +{ + u32 val; + + read_lock(&adapter->ahw.crb_lock); + val = readl(addr); + read_unlock(&adapter->ahw.crb_lock); + + return val; +} + +static void netxen_nic_io_write_2M(struct netxen_adapter *adapter, + void __iomem *addr, u32 data) +{ + writel(data, addr); +} + +static u32 netxen_nic_io_read_2M(struct netxen_adapter *adapter, + void __iomem *addr) +{ + return readl(addr); +} + +void __iomem * +netxen_get_ioaddr(struct netxen_adapter *adapter, u32 offset) +{ + void __iomem *addr = NULL; + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + if ((offset < NETXEN_CRB_PCIX_HOST2) && + (offset > NETXEN_CRB_PCIX_HOST)) + addr = PCI_OFFSET_SECOND_RANGE(adapter, offset); + else + addr = NETXEN_CRB_NORMALIZE(adapter, offset); + } else { + WARN_ON(netxen_nic_pci_get_crb_addr_2M(adapter, + offset, &addr)); + } + + return addr; +} + +static int +netxen_nic_pci_set_window_128M(struct netxen_adapter *adapter, + u64 addr, u32 *start) +{ + if (ADDR_IN_RANGE(addr, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) { + *start = (addr - NETXEN_ADDR_OCM0 + NETXEN_PCI_OCM0); + return 0; + } else if (ADDR_IN_RANGE(addr, + NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) { + *start = (addr - NETXEN_ADDR_OCM1 + NETXEN_PCI_OCM1); + return 0; + } + + return -EIO; +} + +static int +netxen_nic_pci_set_window_2M(struct netxen_adapter *adapter, + u64 addr, u32 *start) +{ + u32 window; + + window = OCM_WIN(addr); + + writel(window, adapter->ahw.ocm_win_crb); + /* read back to flush */ + readl(adapter->ahw.ocm_win_crb); + + adapter->ahw.ocm_win = window; + *start = NETXEN_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr); + return 0; +} + +static int +netxen_nic_pci_mem_access_direct(struct netxen_adapter *adapter, u64 off, + u64 *data, int op) +{ + void __iomem *addr, *mem_ptr = NULL; + resource_size_t mem_base; + int ret; + u32 start; + + spin_lock(&adapter->ahw.mem_lock); + + ret = adapter->pci_set_window(adapter, off, &start); + if (ret != 0) + goto unlock; + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + addr = adapter->ahw.pci_base0 + start; + } else { + addr = pci_base_offset(adapter, start); + if (addr) + goto noremap; + + mem_base = pci_resource_start(adapter->pdev, 0) + + (start & PAGE_MASK); + mem_ptr = ioremap(mem_base, PAGE_SIZE); + if (mem_ptr == NULL) { + ret = -EIO; + goto unlock; + } + + addr = mem_ptr + (start & (PAGE_SIZE-1)); + } +noremap: + if (op == 0) /* read */ + *data = readq(addr); + else /* write */ + writeq(*data, addr); + +unlock: + spin_unlock(&adapter->ahw.mem_lock); + + if (mem_ptr) + iounmap(mem_ptr); + return ret; +} + +void +netxen_pci_camqm_read_2M(struct netxen_adapter *adapter, u64 off, u64 *data) +{ + void __iomem *addr = adapter->ahw.pci_base0 + + NETXEN_PCI_CAMQM_2M_BASE + (off - NETXEN_PCI_CAMQM); + + spin_lock(&adapter->ahw.mem_lock); + *data = readq(addr); + spin_unlock(&adapter->ahw.mem_lock); +} + +void +netxen_pci_camqm_write_2M(struct netxen_adapter *adapter, u64 off, u64 data) +{ + void __iomem *addr = adapter->ahw.pci_base0 + + NETXEN_PCI_CAMQM_2M_BASE + (off - NETXEN_PCI_CAMQM); + + spin_lock(&adapter->ahw.mem_lock); + writeq(data, addr); + spin_unlock(&adapter->ahw.mem_lock); +} + +#define MAX_CTL_CHECK 1000 + +static int +netxen_nic_pci_mem_write_128M(struct netxen_adapter *adapter, + u64 off, u64 data) +{ + int j, ret; + u32 temp, off_lo, off_hi, addr_hi, data_hi, data_lo; + void __iomem *mem_crb; + + /* Only 64-bit aligned access */ + if (off & 7) + return -EIO; + + /* P2 has different SIU and MIU test agent base addr */ + if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET, + NETXEN_ADDR_QDR_NET_MAX_P2)) { + mem_crb = pci_base_offset(adapter, + NETXEN_CRB_QDR_NET+SIU_TEST_AGT_BASE); + addr_hi = SIU_TEST_AGT_ADDR_HI; + data_lo = SIU_TEST_AGT_WRDATA_LO; + data_hi = SIU_TEST_AGT_WRDATA_HI; + off_lo = off & SIU_TEST_AGT_ADDR_MASK; + off_hi = SIU_TEST_AGT_UPPER_ADDR(off); + goto correct; + } + + if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { + mem_crb = pci_base_offset(adapter, + NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE); + addr_hi = MIU_TEST_AGT_ADDR_HI; + data_lo = MIU_TEST_AGT_WRDATA_LO; + data_hi = MIU_TEST_AGT_WRDATA_HI; + off_lo = off & MIU_TEST_AGT_ADDR_MASK; + off_hi = 0; + goto correct; + } + + if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX) || + ADDR_IN_RANGE(off, NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) { + if (adapter->ahw.pci_len0 != 0) { + return netxen_nic_pci_mem_access_direct(adapter, + off, &data, 1); + } + } + + return -EIO; + +correct: + spin_lock(&adapter->ahw.mem_lock); + netxen_nic_pci_set_crbwindow_128M(adapter, 0); + + writel(off_lo, (mem_crb + MIU_TEST_AGT_ADDR_LO)); + writel(off_hi, (mem_crb + addr_hi)); + writel(data & 0xffffffff, (mem_crb + data_lo)); + writel((data >> 32) & 0xffffffff, (mem_crb + data_hi)); + writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL)); + writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE), + (mem_crb + TEST_AGT_CTRL)); + + for (j = 0; j < MAX_CTL_CHECK; j++) { + temp = readl((mem_crb + TEST_AGT_CTRL)); + if ((temp & TA_CTL_BUSY) == 0) + break; + } + + if (j >= MAX_CTL_CHECK) { + if (printk_ratelimit()) + dev_err(&adapter->pdev->dev, + "failed to write through agent\n"); + ret = -EIO; + } else + ret = 0; + + netxen_nic_pci_set_crbwindow_128M(adapter, NETXEN_WINDOW_ONE); + spin_unlock(&adapter->ahw.mem_lock); + return ret; +} + +static int +netxen_nic_pci_mem_read_128M(struct netxen_adapter *adapter, + u64 off, u64 *data) +{ + int j, ret; + u32 temp, off_lo, off_hi, addr_hi, data_hi, data_lo; + u64 val; + void __iomem *mem_crb; + + /* Only 64-bit aligned access */ + if (off & 7) + return -EIO; + + /* P2 has different SIU and MIU test agent base addr */ + if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET, + NETXEN_ADDR_QDR_NET_MAX_P2)) { + mem_crb = pci_base_offset(adapter, + NETXEN_CRB_QDR_NET+SIU_TEST_AGT_BASE); + addr_hi = SIU_TEST_AGT_ADDR_HI; + data_lo = SIU_TEST_AGT_RDDATA_LO; + data_hi = SIU_TEST_AGT_RDDATA_HI; + off_lo = off & SIU_TEST_AGT_ADDR_MASK; + off_hi = SIU_TEST_AGT_UPPER_ADDR(off); + goto correct; + } + + if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { + mem_crb = pci_base_offset(adapter, + NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE); + addr_hi = MIU_TEST_AGT_ADDR_HI; + data_lo = MIU_TEST_AGT_RDDATA_LO; + data_hi = MIU_TEST_AGT_RDDATA_HI; + off_lo = off & MIU_TEST_AGT_ADDR_MASK; + off_hi = 0; + goto correct; + } + + if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX) || + ADDR_IN_RANGE(off, NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) { + if (adapter->ahw.pci_len0 != 0) { + return netxen_nic_pci_mem_access_direct(adapter, + off, data, 0); + } + } + + return -EIO; + +correct: + spin_lock(&adapter->ahw.mem_lock); + netxen_nic_pci_set_crbwindow_128M(adapter, 0); + + writel(off_lo, (mem_crb + MIU_TEST_AGT_ADDR_LO)); + writel(off_hi, (mem_crb + addr_hi)); + writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL)); + writel((TA_CTL_START|TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL)); + + for (j = 0; j < MAX_CTL_CHECK; j++) { + temp = readl(mem_crb + TEST_AGT_CTRL); + if ((temp & TA_CTL_BUSY) == 0) + break; + } + + if (j >= MAX_CTL_CHECK) { + if (printk_ratelimit()) + dev_err(&adapter->pdev->dev, + "failed to read through agent\n"); + ret = -EIO; + } else { + + temp = readl(mem_crb + data_hi); + val = ((u64)temp << 32); + val |= readl(mem_crb + data_lo); + *data = val; + ret = 0; + } + + netxen_nic_pci_set_crbwindow_128M(adapter, NETXEN_WINDOW_ONE); + spin_unlock(&adapter->ahw.mem_lock); + + return ret; +} + +static int +netxen_nic_pci_mem_write_2M(struct netxen_adapter *adapter, + u64 off, u64 data) +{ + int j, ret; + u32 temp, off8; + void __iomem *mem_crb; + + /* Only 64-bit aligned access */ + if (off & 7) + return -EIO; + + /* P3 onward, test agent base for MIU and SIU is same */ + if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET, + NETXEN_ADDR_QDR_NET_MAX_P3)) { + mem_crb = netxen_get_ioaddr(adapter, + NETXEN_CRB_QDR_NET+MIU_TEST_AGT_BASE); + goto correct; + } + + if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { + mem_crb = netxen_get_ioaddr(adapter, + NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE); + goto correct; + } + + if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) + return netxen_nic_pci_mem_access_direct(adapter, off, &data, 1); + + return -EIO; + +correct: + off8 = off & 0xfffffff8; + + spin_lock(&adapter->ahw.mem_lock); + + writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO)); + writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI)); + + writel(data & 0xffffffff, + mem_crb + MIU_TEST_AGT_WRDATA_LO); + writel((data >> 32) & 0xffffffff, + mem_crb + MIU_TEST_AGT_WRDATA_HI); + + writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL)); + writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE), + (mem_crb + TEST_AGT_CTRL)); + + for (j = 0; j < MAX_CTL_CHECK; j++) { + temp = readl(mem_crb + TEST_AGT_CTRL); + if ((temp & TA_CTL_BUSY) == 0) + break; + } + + if (j >= MAX_CTL_CHECK) { + if (printk_ratelimit()) + dev_err(&adapter->pdev->dev, + "failed to write through agent\n"); + ret = -EIO; + } else + ret = 0; + + spin_unlock(&adapter->ahw.mem_lock); + + return ret; +} + +static int +netxen_nic_pci_mem_read_2M(struct netxen_adapter *adapter, + u64 off, u64 *data) +{ + int j, ret; + u32 temp, off8; + u64 val; + void __iomem *mem_crb; + + /* Only 64-bit aligned access */ + if (off & 7) + return -EIO; + + /* P3 onward, test agent base for MIU and SIU is same */ + if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET, + NETXEN_ADDR_QDR_NET_MAX_P3)) { + mem_crb = netxen_get_ioaddr(adapter, + NETXEN_CRB_QDR_NET+MIU_TEST_AGT_BASE); + goto correct; + } + + if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { + mem_crb = netxen_get_ioaddr(adapter, + NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE); + goto correct; + } + + if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) { + return netxen_nic_pci_mem_access_direct(adapter, + off, data, 0); + } + + return -EIO; + +correct: + off8 = off & 0xfffffff8; + + spin_lock(&adapter->ahw.mem_lock); + + writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO)); + writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI)); + writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL)); + writel((TA_CTL_START | TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL)); + + for (j = 0; j < MAX_CTL_CHECK; j++) { + temp = readl(mem_crb + TEST_AGT_CTRL); + if ((temp & TA_CTL_BUSY) == 0) + break; + } + + if (j >= MAX_CTL_CHECK) { + if (printk_ratelimit()) + dev_err(&adapter->pdev->dev, + "failed to read through agent\n"); + ret = -EIO; + } else { + val = (u64)(readl(mem_crb + MIU_TEST_AGT_RDDATA_HI)) << 32; + val |= readl(mem_crb + MIU_TEST_AGT_RDDATA_LO); + *data = val; + ret = 0; + } + + spin_unlock(&adapter->ahw.mem_lock); + + return ret; +} + +void +netxen_setup_hwops(struct netxen_adapter *adapter) +{ + adapter->init_port = netxen_niu_xg_init_port; + adapter->stop_port = netxen_niu_disable_xg_port; + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + adapter->crb_read = netxen_nic_hw_read_wx_128M, + adapter->crb_write = netxen_nic_hw_write_wx_128M, + adapter->pci_set_window = netxen_nic_pci_set_window_128M, + adapter->pci_mem_read = netxen_nic_pci_mem_read_128M, + adapter->pci_mem_write = netxen_nic_pci_mem_write_128M, + adapter->io_read = netxen_nic_io_read_128M, + adapter->io_write = netxen_nic_io_write_128M, + + adapter->macaddr_set = netxen_p2_nic_set_mac_addr; + adapter->set_multi = netxen_p2_nic_set_multi; + adapter->set_mtu = netxen_nic_set_mtu_xgb; + adapter->set_promisc = netxen_p2_nic_set_promisc; + + } else { + adapter->crb_read = netxen_nic_hw_read_wx_2M, + adapter->crb_write = netxen_nic_hw_write_wx_2M, + adapter->pci_set_window = netxen_nic_pci_set_window_2M, + adapter->pci_mem_read = netxen_nic_pci_mem_read_2M, + adapter->pci_mem_write = netxen_nic_pci_mem_write_2M, + adapter->io_read = netxen_nic_io_read_2M, + adapter->io_write = netxen_nic_io_write_2M, + + adapter->set_mtu = nx_fw_cmd_set_mtu; + adapter->set_promisc = netxen_p3_nic_set_promisc; + adapter->macaddr_set = netxen_p3_nic_set_mac_addr; + adapter->set_multi = netxen_p3_nic_set_multi; + + adapter->phy_read = nx_fw_cmd_query_phy; + adapter->phy_write = nx_fw_cmd_set_phy; + } +} + +int netxen_nic_get_board_info(struct netxen_adapter *adapter) +{ + int offset, board_type, magic; + struct pci_dev *pdev = adapter->pdev; + + offset = NX_FW_MAGIC_OFFSET; + if (netxen_rom_fast_read(adapter, offset, &magic)) + return -EIO; + + if (magic != NETXEN_BDINFO_MAGIC) { + dev_err(&pdev->dev, "invalid board config, magic=%08x\n", + magic); + return -EIO; + } + + offset = NX_BRDTYPE_OFFSET; + if (netxen_rom_fast_read(adapter, offset, &board_type)) + return -EIO; + + if (board_type == NETXEN_BRDTYPE_P3_4_GB_MM) { + u32 gpio = NXRD32(adapter, NETXEN_ROMUSB_GLB_PAD_GPIO_I); + if ((gpio & 0x8000) == 0) + board_type = NETXEN_BRDTYPE_P3_10G_TP; + } + + adapter->ahw.board_type = board_type; + + switch (board_type) { + case NETXEN_BRDTYPE_P2_SB35_4G: + adapter->ahw.port_type = NETXEN_NIC_GBE; + break; + case NETXEN_BRDTYPE_P2_SB31_10G: + case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ: + case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ: + case NETXEN_BRDTYPE_P2_SB31_10G_CX4: + case NETXEN_BRDTYPE_P3_HMEZ: + case NETXEN_BRDTYPE_P3_XG_LOM: + case NETXEN_BRDTYPE_P3_10G_CX4: + case NETXEN_BRDTYPE_P3_10G_CX4_LP: + case NETXEN_BRDTYPE_P3_IMEZ: + case NETXEN_BRDTYPE_P3_10G_SFP_PLUS: + case NETXEN_BRDTYPE_P3_10G_SFP_CT: + case NETXEN_BRDTYPE_P3_10G_SFP_QT: + case NETXEN_BRDTYPE_P3_10G_XFP: + case NETXEN_BRDTYPE_P3_10000_BASE_T: + adapter->ahw.port_type = NETXEN_NIC_XGBE; + break; + case NETXEN_BRDTYPE_P1_BD: + case NETXEN_BRDTYPE_P1_SB: + case NETXEN_BRDTYPE_P1_SMAX: + case NETXEN_BRDTYPE_P1_SOCK: + case NETXEN_BRDTYPE_P3_REF_QG: + case NETXEN_BRDTYPE_P3_4_GB: + case NETXEN_BRDTYPE_P3_4_GB_MM: + adapter->ahw.port_type = NETXEN_NIC_GBE; + break; + case NETXEN_BRDTYPE_P3_10G_TP: + adapter->ahw.port_type = (adapter->portnum < 2) ? + NETXEN_NIC_XGBE : NETXEN_NIC_GBE; + break; + default: + dev_err(&pdev->dev, "unknown board type %x\n", board_type); + adapter->ahw.port_type = NETXEN_NIC_XGBE; + break; + } + + return 0; +} + +/* NIU access sections */ +static int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu) +{ + new_mtu += MTU_FUDGE_FACTOR; + if (adapter->physical_port == 0) + NXWR32(adapter, NETXEN_NIU_XGE_MAX_FRAME_SIZE, new_mtu); + else + NXWR32(adapter, NETXEN_NIU_XG1_MAX_FRAME_SIZE, new_mtu); + return 0; +} + +void netxen_nic_set_link_parameters(struct netxen_adapter *adapter) +{ + __u32 status; + __u32 autoneg; + __u32 port_mode; + + if (!netif_carrier_ok(adapter->netdev)) { + adapter->link_speed = 0; + adapter->link_duplex = -1; + adapter->link_autoneg = AUTONEG_ENABLE; + return; + } + + if (adapter->ahw.port_type == NETXEN_NIC_GBE) { + port_mode = NXRD32(adapter, NETXEN_PORT_MODE_ADDR); + if (port_mode == NETXEN_PORT_MODE_802_3_AP) { + adapter->link_speed = SPEED_1000; + adapter->link_duplex = DUPLEX_FULL; + adapter->link_autoneg = AUTONEG_DISABLE; + return; + } + + if (adapter->phy_read && + adapter->phy_read(adapter, + NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS, + &status) == 0) { + if (netxen_get_phy_link(status)) { + switch (netxen_get_phy_speed(status)) { + case 0: + adapter->link_speed = SPEED_10; + break; + case 1: + adapter->link_speed = SPEED_100; + break; + case 2: + adapter->link_speed = SPEED_1000; + break; + default: + adapter->link_speed = 0; + break; + } + switch (netxen_get_phy_duplex(status)) { + case 0: + adapter->link_duplex = DUPLEX_HALF; + break; + case 1: + adapter->link_duplex = DUPLEX_FULL; + break; + default: + adapter->link_duplex = -1; + break; + } + if (adapter->phy_read && + adapter->phy_read(adapter, + NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG, + &autoneg) != 0) + adapter->link_autoneg = autoneg; + } else + goto link_down; + } else { + link_down: + adapter->link_speed = 0; + adapter->link_duplex = -1; + } + } +} + +int +netxen_nic_wol_supported(struct netxen_adapter *adapter) +{ + u32 wol_cfg; + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + return 0; + + wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG_NV); + if (wol_cfg & (1UL << adapter->portnum)) { + wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG); + if (wol_cfg & (1 << adapter->portnum)) + return 1; + } + + return 0; +} diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h new file mode 100644 index 000000000000..e2c5b6f2df03 --- /dev/null +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h @@ -0,0 +1,287 @@ +/* + * Copyright (C) 2003 - 2009 NetXen, Inc. + * Copyright (C) 2009 - QLogic Corporation. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, + * MA 02111-1307, USA. + * + * The full GNU General Public License is included in this distribution + * in the file called "COPYING". + * + */ + +#ifndef __NETXEN_NIC_HW_H_ +#define __NETXEN_NIC_HW_H_ + +/* Hardware memory size of 128 meg */ +#define NETXEN_MEMADDR_MAX (128 * 1024 * 1024) + +struct netxen_adapter; + +#define NETXEN_PCI_MAPSIZE_BYTES (NETXEN_PCI_MAPSIZE << 20) + +void netxen_nic_set_link_parameters(struct netxen_adapter *adapter); + +/* Nibble or Byte mode for phy interface (GbE mode only) */ + +#define _netxen_crb_get_bit(var, bit) ((var >> bit) & 0x1) + +/* + * NIU GB MAC Config Register 0 (applies to GB0, GB1, GB2, GB3) + * + * Bit 0 : enable_tx => 1:enable frame xmit, 0:disable + * Bit 1 : tx_synced => R/O: xmit enable synched to xmit stream + * Bit 2 : enable_rx => 1:enable frame recv, 0:disable + * Bit 3 : rx_synced => R/O: recv enable synched to recv stream + * Bit 4 : tx_flowctl => 1:enable pause frame generation, 0:disable + * Bit 5 : rx_flowctl => 1:act on recv'd pause frames, 0:ignore + * Bit 8 : loopback => 1:loop MAC xmits to MAC recvs, 0:normal + * Bit 16: tx_reset_pb => 1:reset frame xmit protocol blk, 0:no-op + * Bit 17: rx_reset_pb => 1:reset frame recv protocol blk, 0:no-op + * Bit 18: tx_reset_mac => 1:reset data/ctl multiplexer blk, 0:no-op + * Bit 19: rx_reset_mac => 1:reset ctl frames & timers blk, 0:no-op + * Bit 31: soft_reset => 1:reset the MAC and the SERDES, 0:no-op + */ + +#define netxen_gb_tx_flowctl(config_word) \ + ((config_word) |= 1 << 4) +#define netxen_gb_rx_flowctl(config_word) \ + ((config_word) |= 1 << 5) +#define netxen_gb_tx_reset_pb(config_word) \ + ((config_word) |= 1 << 16) +#define netxen_gb_rx_reset_pb(config_word) \ + ((config_word) |= 1 << 17) +#define netxen_gb_tx_reset_mac(config_word) \ + ((config_word) |= 1 << 18) +#define netxen_gb_rx_reset_mac(config_word) \ + ((config_word) |= 1 << 19) + +#define netxen_gb_unset_tx_flowctl(config_word) \ + ((config_word) &= ~(1 << 4)) +#define netxen_gb_unset_rx_flowctl(config_word) \ + ((config_word) &= ~(1 << 5)) + +#define netxen_gb_get_tx_synced(config_word) \ + _netxen_crb_get_bit((config_word), 1) +#define netxen_gb_get_rx_synced(config_word) \ + _netxen_crb_get_bit((config_word), 3) +#define netxen_gb_get_tx_flowctl(config_word) \ + _netxen_crb_get_bit((config_word), 4) +#define netxen_gb_get_rx_flowctl(config_word) \ + _netxen_crb_get_bit((config_word), 5) +#define netxen_gb_get_soft_reset(config_word) \ + _netxen_crb_get_bit((config_word), 31) + +#define netxen_gb_get_stationaddress_low(config_word) ((config_word) >> 16) + +#define netxen_gb_set_mii_mgmt_clockselect(config_word, val) \ + ((config_word) |= ((val) & 0x07)) +#define netxen_gb_mii_mgmt_reset(config_word) \ + ((config_word) |= 1 << 31) +#define netxen_gb_mii_mgmt_unset(config_word) \ + ((config_word) &= ~(1 << 31)) + +/* + * NIU GB MII Mgmt Command Register (applies to GB0, GB1, GB2, GB3) + * Bit 0 : read_cycle => 1:perform single read cycle, 0:no-op + * Bit 1 : scan_cycle => 1:perform continuous read cycles, 0:no-op + */ + +#define netxen_gb_mii_mgmt_set_read_cycle(config_word) \ + ((config_word) |= 1 << 0) +#define netxen_gb_mii_mgmt_reg_addr(config_word, val) \ + ((config_word) |= ((val) & 0x1F)) +#define netxen_gb_mii_mgmt_phy_addr(config_word, val) \ + ((config_word) |= (((val) & 0x1F) << 8)) + +/* + * NIU GB MII Mgmt Indicators Register (applies to GB0, GB1, GB2, GB3) + * Read-only register. + * Bit 0 : busy => 1:performing an MII mgmt cycle, 0:idle + * Bit 1 : scanning => 1:scan operation in progress, 0:idle + * Bit 2 : notvalid => :mgmt result data not yet valid, 0:idle + */ +#define netxen_get_gb_mii_mgmt_busy(config_word) \ + _netxen_crb_get_bit(config_word, 0) +#define netxen_get_gb_mii_mgmt_scanning(config_word) \ + _netxen_crb_get_bit(config_word, 1) +#define netxen_get_gb_mii_mgmt_notvalid(config_word) \ + _netxen_crb_get_bit(config_word, 2) +/* + * NIU XG Pause Ctl Register + * + * Bit 0 : xg0_mask => 1:disable tx pause frames + * Bit 1 : xg0_request => 1:request single pause frame + * Bit 2 : xg0_on_off => 1:request is pause on, 0:off + * Bit 3 : xg1_mask => 1:disable tx pause frames + * Bit 4 : xg1_request => 1:request single pause frame + * Bit 5 : xg1_on_off => 1:request is pause on, 0:off + */ + +#define netxen_xg_set_xg0_mask(config_word) \ + ((config_word) |= 1 << 0) +#define netxen_xg_set_xg1_mask(config_word) \ + ((config_word) |= 1 << 3) + +#define netxen_xg_get_xg0_mask(config_word) \ + _netxen_crb_get_bit((config_word), 0) +#define netxen_xg_get_xg1_mask(config_word) \ + _netxen_crb_get_bit((config_word), 3) + +#define netxen_xg_unset_xg0_mask(config_word) \ + ((config_word) &= ~(1 << 0)) +#define netxen_xg_unset_xg1_mask(config_word) \ + ((config_word) &= ~(1 << 3)) + +/* + * NIU XG Pause Ctl Register + * + * Bit 0 : xg0_mask => 1:disable tx pause frames + * Bit 1 : xg0_request => 1:request single pause frame + * Bit 2 : xg0_on_off => 1:request is pause on, 0:off + * Bit 3 : xg1_mask => 1:disable tx pause frames + * Bit 4 : xg1_request => 1:request single pause frame + * Bit 5 : xg1_on_off => 1:request is pause on, 0:off + */ +#define netxen_gb_set_gb0_mask(config_word) \ + ((config_word) |= 1 << 0) +#define netxen_gb_set_gb1_mask(config_word) \ + ((config_word) |= 1 << 2) +#define netxen_gb_set_gb2_mask(config_word) \ + ((config_word) |= 1 << 4) +#define netxen_gb_set_gb3_mask(config_word) \ + ((config_word) |= 1 << 6) + +#define netxen_gb_get_gb0_mask(config_word) \ + _netxen_crb_get_bit((config_word), 0) +#define netxen_gb_get_gb1_mask(config_word) \ + _netxen_crb_get_bit((config_word), 2) +#define netxen_gb_get_gb2_mask(config_word) \ + _netxen_crb_get_bit((config_word), 4) +#define netxen_gb_get_gb3_mask(config_word) \ + _netxen_crb_get_bit((config_word), 6) + +#define netxen_gb_unset_gb0_mask(config_word) \ + ((config_word) &= ~(1 << 0)) +#define netxen_gb_unset_gb1_mask(config_word) \ + ((config_word) &= ~(1 << 2)) +#define netxen_gb_unset_gb2_mask(config_word) \ + ((config_word) &= ~(1 << 4)) +#define netxen_gb_unset_gb3_mask(config_word) \ + ((config_word) &= ~(1 << 6)) + + +/* + * PHY-Specific MII control/status registers. + */ +#define NETXEN_NIU_GB_MII_MGMT_ADDR_CONTROL 0 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_STATUS 1 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_ID_0 2 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_ID_1 3 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG 4 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_LNKPART 5 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG_MORE 6 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_NEXTPAGE_XMIT 7 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_LNKPART_NEXTPAGE 8 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_1000BT_CONTROL 9 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_1000BT_STATUS 10 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_EXTENDED_STATUS 15 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL 16 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS 17 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_INT_ENABLE 18 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_INT_STATUS 19 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL_MORE 20 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_RECV_ERROR_COUNT 21 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_LED_CONTROL 24 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_LED_OVERRIDE 25 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL_MORE_YET 26 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS_MORE 27 + +/* + * PHY-Specific Status Register (reg 17). + * + * Bit 0 : jabber => 1:jabber detected, 0:not + * Bit 1 : polarity => 1:polarity reversed, 0:normal + * Bit 2 : recvpause => 1:receive pause enabled, 0:disabled + * Bit 3 : xmitpause => 1:transmit pause enabled, 0:disabled + * Bit 4 : energydetect => 1:sleep, 0:active + * Bit 5 : downshift => 1:downshift, 0:no downshift + * Bit 6 : crossover => 1:MDIX (crossover), 0:MDI (no crossover) + * Bits 7-9 : cablelen => not valid in 10Mb/s mode + * 0:<50m, 1:50-80m, 2:80-110m, 3:110-140m, 4:>140m + * Bit 10 : link => 1:link up, 0:link down + * Bit 11 : resolved => 1:speed and duplex resolved, 0:not yet + * Bit 12 : pagercvd => 1:page received, 0:page not received + * Bit 13 : duplex => 1:full duplex, 0:half duplex + * Bits 14-15 : speed => 0:10Mb/s, 1:100Mb/s, 2:1000Mb/s, 3:rsvd + */ + +#define netxen_get_phy_speed(config_word) (((config_word) >> 14) & 0x03) + +#define netxen_set_phy_speed(config_word, val) \ + ((config_word) |= ((val & 0x03) << 14)) +#define netxen_set_phy_duplex(config_word) \ + ((config_word) |= 1 << 13) +#define netxen_clear_phy_duplex(config_word) \ + ((config_word) &= ~(1 << 13)) + +#define netxen_get_phy_link(config_word) \ + _netxen_crb_get_bit(config_word, 10) +#define netxen_get_phy_duplex(config_word) \ + _netxen_crb_get_bit(config_word, 13) + +/* + * NIU Mode Register. + * Bit 0 : enable FibreChannel + * Bit 1 : enable 10/100/1000 Ethernet + * Bit 2 : enable 10Gb Ethernet + */ + +#define netxen_get_niu_enable_ge(config_word) \ + _netxen_crb_get_bit(config_word, 1) + +#define NETXEN_NIU_NON_PROMISC_MODE 0 +#define NETXEN_NIU_PROMISC_MODE 1 +#define NETXEN_NIU_ALLMULTI_MODE 2 + +/* + * NIU XG MAC Config Register + * + * Bit 0 : tx_enable => 1:enable frame xmit, 0:disable + * Bit 2 : rx_enable => 1:enable frame recv, 0:disable + * Bit 4 : soft_reset => 1:reset the MAC , 0:no-op + * Bit 27: xaui_framer_reset + * Bit 28: xaui_rx_reset + * Bit 29: xaui_tx_reset + * Bit 30: xg_ingress_afifo_reset + * Bit 31: xg_egress_afifo_reset + */ + +#define netxen_xg_soft_reset(config_word) \ + ((config_word) |= 1 << 4) + +typedef struct { + unsigned valid; + unsigned start_128M; + unsigned end_128M; + unsigned start_2M; +} crb_128M_2M_sub_block_map_t; + +typedef struct { + crb_128M_2M_sub_block_map_t sub_block[16]; +} crb_128M_2M_block_map_t; + +#endif /* __NETXEN_NIC_HW_H_ */ diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c new file mode 100644 index 000000000000..d6c6357de6aa --- /dev/null +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c @@ -0,0 +1,1949 @@ +/* + * Copyright (C) 2003 - 2009 NetXen, Inc. + * Copyright (C) 2009 - QLogic Corporation. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, + * MA 02111-1307, USA. + * + * The full GNU General Public License is included in this distribution + * in the file called "COPYING". + * + */ + +#include +#include +#include +#include +#include "netxen_nic.h" +#include "netxen_nic_hw.h" + +struct crb_addr_pair { + u32 addr; + u32 data; +}; + +#define NETXEN_MAX_CRB_XFORM 60 +static unsigned int crb_addr_xform[NETXEN_MAX_CRB_XFORM]; +#define NETXEN_ADDR_ERROR (0xffffffff) + +#define crb_addr_transform(name) \ + crb_addr_xform[NETXEN_HW_PX_MAP_CRB_##name] = \ + NETXEN_HW_CRB_HUB_AGT_ADR_##name << 20 + +#define NETXEN_NIC_XDMA_RESET 0x8000ff + +static void +netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, + struct nx_host_rds_ring *rds_ring); +static int netxen_p3_has_mn(struct netxen_adapter *adapter); + +static void crb_addr_transform_setup(void) +{ + crb_addr_transform(XDMA); + crb_addr_transform(TIMR); + crb_addr_transform(SRE); + crb_addr_transform(SQN3); + crb_addr_transform(SQN2); + crb_addr_transform(SQN1); + crb_addr_transform(SQN0); + crb_addr_transform(SQS3); + crb_addr_transform(SQS2); + crb_addr_transform(SQS1); + crb_addr_transform(SQS0); + crb_addr_transform(RPMX7); + crb_addr_transform(RPMX6); + crb_addr_transform(RPMX5); + crb_addr_transform(RPMX4); + crb_addr_transform(RPMX3); + crb_addr_transform(RPMX2); + crb_addr_transform(RPMX1); + crb_addr_transform(RPMX0); + crb_addr_transform(ROMUSB); + crb_addr_transform(SN); + crb_addr_transform(QMN); + crb_addr_transform(QMS); + crb_addr_transform(PGNI); + crb_addr_transform(PGND); + crb_addr_transform(PGN3); + crb_addr_transform(PGN2); + crb_addr_transform(PGN1); + crb_addr_transform(PGN0); + crb_addr_transform(PGSI); + crb_addr_transform(PGSD); + crb_addr_transform(PGS3); + crb_addr_transform(PGS2); + crb_addr_transform(PGS1); + crb_addr_transform(PGS0); + crb_addr_transform(PS); + crb_addr_transform(PH); + crb_addr_transform(NIU); + crb_addr_transform(I2Q); + crb_addr_transform(EG); + crb_addr_transform(MN); + crb_addr_transform(MS); + crb_addr_transform(CAS2); + crb_addr_transform(CAS1); + crb_addr_transform(CAS0); + crb_addr_transform(CAM); + crb_addr_transform(C2C1); + crb_addr_transform(C2C0); + crb_addr_transform(SMB); + crb_addr_transform(OCM0); + crb_addr_transform(I2C0); +} + +void netxen_release_rx_buffers(struct netxen_adapter *adapter) +{ + struct netxen_recv_context *recv_ctx; + struct nx_host_rds_ring *rds_ring; + struct netxen_rx_buffer *rx_buf; + int i, ring; + + recv_ctx = &adapter->recv_ctx; + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &recv_ctx->rds_rings[ring]; + for (i = 0; i < rds_ring->num_desc; ++i) { + rx_buf = &(rds_ring->rx_buf_arr[i]); + if (rx_buf->state == NETXEN_BUFFER_FREE) + continue; + pci_unmap_single(adapter->pdev, + rx_buf->dma, + rds_ring->dma_size, + PCI_DMA_FROMDEVICE); + if (rx_buf->skb != NULL) + dev_kfree_skb_any(rx_buf->skb); + } + } +} + +void netxen_release_tx_buffers(struct netxen_adapter *adapter) +{ + struct netxen_cmd_buffer *cmd_buf; + struct netxen_skb_frag *buffrag; + int i, j; + struct nx_host_tx_ring *tx_ring = adapter->tx_ring; + + cmd_buf = tx_ring->cmd_buf_arr; + for (i = 0; i < tx_ring->num_desc; i++) { + buffrag = cmd_buf->frag_array; + if (buffrag->dma) { + pci_unmap_single(adapter->pdev, buffrag->dma, + buffrag->length, PCI_DMA_TODEVICE); + buffrag->dma = 0ULL; + } + for (j = 0; j < cmd_buf->frag_count; j++) { + buffrag++; + if (buffrag->dma) { + pci_unmap_page(adapter->pdev, buffrag->dma, + buffrag->length, + PCI_DMA_TODEVICE); + buffrag->dma = 0ULL; + } + } + if (cmd_buf->skb) { + dev_kfree_skb_any(cmd_buf->skb); + cmd_buf->skb = NULL; + } + cmd_buf++; + } +} + +void netxen_free_sw_resources(struct netxen_adapter *adapter) +{ + struct netxen_recv_context *recv_ctx; + struct nx_host_rds_ring *rds_ring; + struct nx_host_tx_ring *tx_ring; + int ring; + + recv_ctx = &adapter->recv_ctx; + + if (recv_ctx->rds_rings == NULL) + goto skip_rds; + + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &recv_ctx->rds_rings[ring]; + vfree(rds_ring->rx_buf_arr); + rds_ring->rx_buf_arr = NULL; + } + kfree(recv_ctx->rds_rings); + +skip_rds: + if (adapter->tx_ring == NULL) + return; + + tx_ring = adapter->tx_ring; + vfree(tx_ring->cmd_buf_arr); + kfree(tx_ring); + adapter->tx_ring = NULL; +} + +int netxen_alloc_sw_resources(struct netxen_adapter *adapter) +{ + struct netxen_recv_context *recv_ctx; + struct nx_host_rds_ring *rds_ring; + struct nx_host_sds_ring *sds_ring; + struct nx_host_tx_ring *tx_ring; + struct netxen_rx_buffer *rx_buf; + int ring, i, size; + + struct netxen_cmd_buffer *cmd_buf_arr; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + + size = sizeof(struct nx_host_tx_ring); + tx_ring = kzalloc(size, GFP_KERNEL); + if (tx_ring == NULL) { + dev_err(&pdev->dev, "%s: failed to allocate tx ring struct\n", + netdev->name); + return -ENOMEM; + } + adapter->tx_ring = tx_ring; + + tx_ring->num_desc = adapter->num_txd; + tx_ring->txq = netdev_get_tx_queue(netdev, 0); + + cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring)); + if (cmd_buf_arr == NULL) { + dev_err(&pdev->dev, "%s: failed to allocate cmd buffer ring\n", + netdev->name); + goto err_out; + } + tx_ring->cmd_buf_arr = cmd_buf_arr; + + recv_ctx = &adapter->recv_ctx; + + size = adapter->max_rds_rings * sizeof (struct nx_host_rds_ring); + rds_ring = kzalloc(size, GFP_KERNEL); + if (rds_ring == NULL) { + dev_err(&pdev->dev, "%s: failed to allocate rds ring struct\n", + netdev->name); + goto err_out; + } + recv_ctx->rds_rings = rds_ring; + + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &recv_ctx->rds_rings[ring]; + switch (ring) { + case RCV_RING_NORMAL: + rds_ring->num_desc = adapter->num_rxd; + if (adapter->ahw.cut_through) { + rds_ring->dma_size = + NX_CT_DEFAULT_RX_BUF_LEN; + rds_ring->skb_size = + NX_CT_DEFAULT_RX_BUF_LEN; + } else { + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + rds_ring->dma_size = + NX_P3_RX_BUF_MAX_LEN; + else + rds_ring->dma_size = + NX_P2_RX_BUF_MAX_LEN; + rds_ring->skb_size = + rds_ring->dma_size + NET_IP_ALIGN; + } + break; + + case RCV_RING_JUMBO: + rds_ring->num_desc = adapter->num_jumbo_rxd; + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + rds_ring->dma_size = + NX_P3_RX_JUMBO_BUF_MAX_LEN; + else + rds_ring->dma_size = + NX_P2_RX_JUMBO_BUF_MAX_LEN; + + if (adapter->capabilities & NX_CAP0_HW_LRO) + rds_ring->dma_size += NX_LRO_BUFFER_EXTRA; + + rds_ring->skb_size = + rds_ring->dma_size + NET_IP_ALIGN; + break; + + case RCV_RING_LRO: + rds_ring->num_desc = adapter->num_lro_rxd; + rds_ring->dma_size = NX_RX_LRO_BUFFER_LENGTH; + rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN; + break; + + } + rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring)); + if (rds_ring->rx_buf_arr == NULL) { + printk(KERN_ERR "%s: Failed to allocate " + "rx buffer ring %d\n", + netdev->name, ring); + /* free whatever was already allocated */ + goto err_out; + } + INIT_LIST_HEAD(&rds_ring->free_list); + /* + * Now go through all of them, set reference handles + * and put them in the queues. + */ + rx_buf = rds_ring->rx_buf_arr; + for (i = 0; i < rds_ring->num_desc; i++) { + list_add_tail(&rx_buf->list, + &rds_ring->free_list); + rx_buf->ref_handle = i; + rx_buf->state = NETXEN_BUFFER_FREE; + rx_buf++; + } + spin_lock_init(&rds_ring->lock); + } + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + sds_ring->irq = adapter->msix_entries[ring].vector; + sds_ring->adapter = adapter; + sds_ring->num_desc = adapter->num_rxd; + + for (i = 0; i < NUM_RCV_DESC_RINGS; i++) + INIT_LIST_HEAD(&sds_ring->free_list[i]); + } + + return 0; + +err_out: + netxen_free_sw_resources(adapter); + return -ENOMEM; +} + +/* + * netxen_decode_crb_addr(0 - utility to translate from internal Phantom CRB + * address to external PCI CRB address. + */ +static u32 netxen_decode_crb_addr(u32 addr) +{ + int i; + u32 base_addr, offset, pci_base; + + crb_addr_transform_setup(); + + pci_base = NETXEN_ADDR_ERROR; + base_addr = addr & 0xfff00000; + offset = addr & 0x000fffff; + + for (i = 0; i < NETXEN_MAX_CRB_XFORM; i++) { + if (crb_addr_xform[i] == base_addr) { + pci_base = i << 20; + break; + } + } + if (pci_base == NETXEN_ADDR_ERROR) + return pci_base; + else + return pci_base + offset; +} + +#define NETXEN_MAX_ROM_WAIT_USEC 100 + +static int netxen_wait_rom_done(struct netxen_adapter *adapter) +{ + long timeout = 0; + long done = 0; + + cond_resched(); + + while (done == 0) { + done = NXRD32(adapter, NETXEN_ROMUSB_GLB_STATUS); + done &= 2; + if (++timeout >= NETXEN_MAX_ROM_WAIT_USEC) { + dev_err(&adapter->pdev->dev, + "Timeout reached waiting for rom done"); + return -EIO; + } + udelay(1); + } + return 0; +} + +static int do_rom_fast_read(struct netxen_adapter *adapter, + int addr, int *valp) +{ + NXWR32(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr); + NXWR32(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); + NXWR32(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3); + NXWR32(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE, 0xb); + if (netxen_wait_rom_done(adapter)) { + printk("Error waiting for rom done\n"); + return -EIO; + } + /* reset abyte_cnt and dummy_byte_cnt */ + NXWR32(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0); + udelay(10); + NXWR32(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); + + *valp = NXRD32(adapter, NETXEN_ROMUSB_ROM_RDATA); + return 0; +} + +static int do_rom_fast_read_words(struct netxen_adapter *adapter, int addr, + u8 *bytes, size_t size) +{ + int addridx; + int ret = 0; + + for (addridx = addr; addridx < (addr + size); addridx += 4) { + int v; + ret = do_rom_fast_read(adapter, addridx, &v); + if (ret != 0) + break; + *(__le32 *)bytes = cpu_to_le32(v); + bytes += 4; + } + + return ret; +} + +int +netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr, + u8 *bytes, size_t size) +{ + int ret; + + ret = netxen_rom_lock(adapter); + if (ret < 0) + return ret; + + ret = do_rom_fast_read_words(adapter, addr, bytes, size); + + netxen_rom_unlock(adapter); + return ret; +} + +int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp) +{ + int ret; + + if (netxen_rom_lock(adapter) != 0) + return -EIO; + + ret = do_rom_fast_read(adapter, addr, valp); + netxen_rom_unlock(adapter); + return ret; +} + +#define NETXEN_BOARDTYPE 0x4008 +#define NETXEN_BOARDNUM 0x400c +#define NETXEN_CHIPNUM 0x4010 + +int netxen_pinit_from_rom(struct netxen_adapter *adapter) +{ + int addr, val; + int i, n, init_delay = 0; + struct crb_addr_pair *buf; + unsigned offset; + u32 off; + + /* resetall */ + netxen_rom_lock(adapter); + NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0xffffffff); + netxen_rom_unlock(adapter); + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + if (netxen_rom_fast_read(adapter, 0, &n) != 0 || + (n != 0xcafecafe) || + netxen_rom_fast_read(adapter, 4, &n) != 0) { + printk(KERN_ERR "%s: ERROR Reading crb_init area: " + "n: %08x\n", netxen_nic_driver_name, n); + return -EIO; + } + offset = n & 0xffffU; + n = (n >> 16) & 0xffffU; + } else { + if (netxen_rom_fast_read(adapter, 0, &n) != 0 || + !(n & 0x80000000)) { + printk(KERN_ERR "%s: ERROR Reading crb_init area: " + "n: %08x\n", netxen_nic_driver_name, n); + return -EIO; + } + offset = 1; + n &= ~0x80000000; + } + + if (n >= 1024) { + printk(KERN_ERR "%s:n=0x%x Error! NetXen card flash not" + " initialized.\n", __func__, n); + return -EIO; + } + + buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL); + if (buf == NULL) { + printk("%s: netxen_pinit_from_rom: Unable to calloc memory.\n", + netxen_nic_driver_name); + return -ENOMEM; + } + + for (i = 0; i < n; i++) { + if (netxen_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 || + netxen_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) { + kfree(buf); + return -EIO; + } + + buf[i].addr = addr; + buf[i].data = val; + + } + + for (i = 0; i < n; i++) { + + off = netxen_decode_crb_addr(buf[i].addr); + if (off == NETXEN_ADDR_ERROR) { + printk(KERN_ERR"CRB init value out of range %x\n", + buf[i].addr); + continue; + } + off += NETXEN_PCI_CRBSPACE; + + if (off & 1) + continue; + + /* skipping cold reboot MAGIC */ + if (off == NETXEN_CAM_RAM(0x1fc)) + continue; + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + if (off == (NETXEN_CRB_I2C0 + 0x1c)) + continue; + /* do not reset PCI */ + if (off == (ROMUSB_GLB + 0xbc)) + continue; + if (off == (ROMUSB_GLB + 0xa8)) + continue; + if (off == (ROMUSB_GLB + 0xc8)) /* core clock */ + continue; + if (off == (ROMUSB_GLB + 0x24)) /* MN clock */ + continue; + if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */ + continue; + if ((off & 0x0ff00000) == NETXEN_CRB_DDR_NET) + continue; + if (off == (NETXEN_CRB_PEG_NET_1 + 0x18) && + !NX_IS_REVISION_P3P(adapter->ahw.revision_id)) + buf[i].data = 0x1020; + /* skip the function enable register */ + if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION)) + continue; + if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION2)) + continue; + if ((off & 0x0ff00000) == NETXEN_CRB_SMB) + continue; + } + + init_delay = 1; + /* After writing this register, HW needs time for CRB */ + /* to quiet down (else crb_window returns 0xffffffff) */ + if (off == NETXEN_ROMUSB_GLB_SW_RESET) { + init_delay = 1000; + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + /* hold xdma in reset also */ + buf[i].data = NETXEN_NIC_XDMA_RESET; + buf[i].data = 0x8000ff; + } + } + + NXWR32(adapter, off, buf[i].data); + + msleep(init_delay); + } + kfree(buf); + + /* disable_peg_cache_all */ + + /* unreset_net_cache */ + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + val = NXRD32(adapter, NETXEN_ROMUSB_GLB_SW_RESET); + NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, (val & 0xffffff0f)); + } + + /* p2dn replyCount */ + NXWR32(adapter, NETXEN_CRB_PEG_NET_D + 0xec, 0x1e); + /* disable_peg_cache 0 */ + NXWR32(adapter, NETXEN_CRB_PEG_NET_D + 0x4c, 8); + /* disable_peg_cache 1 */ + NXWR32(adapter, NETXEN_CRB_PEG_NET_I + 0x4c, 8); + + /* peg_clr_all */ + + /* peg_clr 0 */ + NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0x8, 0); + NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0xc, 0); + /* peg_clr 1 */ + NXWR32(adapter, NETXEN_CRB_PEG_NET_1 + 0x8, 0); + NXWR32(adapter, NETXEN_CRB_PEG_NET_1 + 0xc, 0); + /* peg_clr 2 */ + NXWR32(adapter, NETXEN_CRB_PEG_NET_2 + 0x8, 0); + NXWR32(adapter, NETXEN_CRB_PEG_NET_2 + 0xc, 0); + /* peg_clr 3 */ + NXWR32(adapter, NETXEN_CRB_PEG_NET_3 + 0x8, 0); + NXWR32(adapter, NETXEN_CRB_PEG_NET_3 + 0xc, 0); + return 0; +} + +static struct uni_table_desc *nx_get_table_desc(const u8 *unirom, int section) +{ + uint32_t i; + struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0]; + __le32 entries = cpu_to_le32(directory->num_entries); + + for (i = 0; i < entries; i++) { + + __le32 offs = cpu_to_le32(directory->findex) + + (i * cpu_to_le32(directory->entry_size)); + __le32 tab_type = cpu_to_le32(*((u32 *)&unirom[offs] + 8)); + + if (tab_type == section) + return (struct uni_table_desc *) &unirom[offs]; + } + + return NULL; +} + +#define QLCNIC_FILEHEADER_SIZE (14 * 4) + +static int +netxen_nic_validate_header(struct netxen_adapter *adapter) + { + const u8 *unirom = adapter->fw->data; + struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0]; + u32 fw_file_size = adapter->fw->size; + u32 tab_size; + __le32 entries; + __le32 entry_size; + + if (fw_file_size < QLCNIC_FILEHEADER_SIZE) + return -EINVAL; + + entries = cpu_to_le32(directory->num_entries); + entry_size = cpu_to_le32(directory->entry_size); + tab_size = cpu_to_le32(directory->findex) + (entries * entry_size); + + if (fw_file_size < tab_size) + return -EINVAL; + + return 0; +} + +static int +netxen_nic_validate_bootld(struct netxen_adapter *adapter) +{ + struct uni_table_desc *tab_desc; + struct uni_data_desc *descr; + const u8 *unirom = adapter->fw->data; + __le32 idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] + + NX_UNI_BOOTLD_IDX_OFF)); + u32 offs; + u32 tab_size; + u32 data_size; + + tab_desc = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_BOOTLD); + + if (!tab_desc) + return -EINVAL; + + tab_size = cpu_to_le32(tab_desc->findex) + + (cpu_to_le32(tab_desc->entry_size) * (idx + 1)); + + if (adapter->fw->size < tab_size) + return -EINVAL; + + offs = cpu_to_le32(tab_desc->findex) + + (cpu_to_le32(tab_desc->entry_size) * (idx)); + descr = (struct uni_data_desc *)&unirom[offs]; + + data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size); + + if (adapter->fw->size < data_size) + return -EINVAL; + + return 0; +} + +static int +netxen_nic_validate_fw(struct netxen_adapter *adapter) +{ + struct uni_table_desc *tab_desc; + struct uni_data_desc *descr; + const u8 *unirom = adapter->fw->data; + __le32 idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] + + NX_UNI_FIRMWARE_IDX_OFF)); + u32 offs; + u32 tab_size; + u32 data_size; + + tab_desc = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_FW); + + if (!tab_desc) + return -EINVAL; + + tab_size = cpu_to_le32(tab_desc->findex) + + (cpu_to_le32(tab_desc->entry_size) * (idx + 1)); + + if (adapter->fw->size < tab_size) + return -EINVAL; + + offs = cpu_to_le32(tab_desc->findex) + + (cpu_to_le32(tab_desc->entry_size) * (idx)); + descr = (struct uni_data_desc *)&unirom[offs]; + data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size); + + if (adapter->fw->size < data_size) + return -EINVAL; + + return 0; +} + + +static int +netxen_nic_validate_product_offs(struct netxen_adapter *adapter) +{ + struct uni_table_desc *ptab_descr; + const u8 *unirom = adapter->fw->data; + int mn_present = (NX_IS_REVISION_P2(adapter->ahw.revision_id)) ? + 1 : netxen_p3_has_mn(adapter); + __le32 entries; + __le32 entry_size; + u32 tab_size; + u32 i; + + ptab_descr = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_PRODUCT_TBL); + if (ptab_descr == NULL) + return -EINVAL; + + entries = cpu_to_le32(ptab_descr->num_entries); + entry_size = cpu_to_le32(ptab_descr->entry_size); + tab_size = cpu_to_le32(ptab_descr->findex) + (entries * entry_size); + + if (adapter->fw->size < tab_size) + return -EINVAL; + +nomn: + for (i = 0; i < entries; i++) { + + __le32 flags, file_chiprev, offs; + u8 chiprev = adapter->ahw.revision_id; + uint32_t flagbit; + + offs = cpu_to_le32(ptab_descr->findex) + + (i * cpu_to_le32(ptab_descr->entry_size)); + flags = cpu_to_le32(*((int *)&unirom[offs] + NX_UNI_FLAGS_OFF)); + file_chiprev = cpu_to_le32(*((int *)&unirom[offs] + + NX_UNI_CHIP_REV_OFF)); + + flagbit = mn_present ? 1 : 2; + + if ((chiprev == file_chiprev) && + ((1ULL << flagbit) & flags)) { + adapter->file_prd_off = offs; + return 0; + } + } + + if (mn_present && NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + mn_present = 0; + goto nomn; + } + + return -EINVAL; +} + +static int +netxen_nic_validate_unified_romimage(struct netxen_adapter *adapter) +{ + if (netxen_nic_validate_header(adapter)) { + dev_err(&adapter->pdev->dev, + "unified image: header validation failed\n"); + return -EINVAL; + } + + if (netxen_nic_validate_product_offs(adapter)) { + dev_err(&adapter->pdev->dev, + "unified image: product validation failed\n"); + return -EINVAL; + } + + if (netxen_nic_validate_bootld(adapter)) { + dev_err(&adapter->pdev->dev, + "unified image: bootld validation failed\n"); + return -EINVAL; + } + + if (netxen_nic_validate_fw(adapter)) { + dev_err(&adapter->pdev->dev, + "unified image: firmware validation failed\n"); + return -EINVAL; + } + + return 0; +} + +static struct uni_data_desc *nx_get_data_desc(struct netxen_adapter *adapter, + u32 section, u32 idx_offset) +{ + const u8 *unirom = adapter->fw->data; + int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] + + idx_offset)); + struct uni_table_desc *tab_desc; + __le32 offs; + + tab_desc = nx_get_table_desc(unirom, section); + + if (tab_desc == NULL) + return NULL; + + offs = cpu_to_le32(tab_desc->findex) + + (cpu_to_le32(tab_desc->entry_size) * idx); + + return (struct uni_data_desc *)&unirom[offs]; +} + +static u8 * +nx_get_bootld_offs(struct netxen_adapter *adapter) +{ + u32 offs = NETXEN_BOOTLD_START; + + if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) + offs = cpu_to_le32((nx_get_data_desc(adapter, + NX_UNI_DIR_SECT_BOOTLD, + NX_UNI_BOOTLD_IDX_OFF))->findex); + + return (u8 *)&adapter->fw->data[offs]; +} + +static u8 * +nx_get_fw_offs(struct netxen_adapter *adapter) +{ + u32 offs = NETXEN_IMAGE_START; + + if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) + offs = cpu_to_le32((nx_get_data_desc(adapter, + NX_UNI_DIR_SECT_FW, + NX_UNI_FIRMWARE_IDX_OFF))->findex); + + return (u8 *)&adapter->fw->data[offs]; +} + +static __le32 +nx_get_fw_size(struct netxen_adapter *adapter) +{ + if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) + return cpu_to_le32((nx_get_data_desc(adapter, + NX_UNI_DIR_SECT_FW, + NX_UNI_FIRMWARE_IDX_OFF))->size); + else + return cpu_to_le32( + *(u32 *)&adapter->fw->data[NX_FW_SIZE_OFFSET]); +} + +static __le32 +nx_get_fw_version(struct netxen_adapter *adapter) +{ + struct uni_data_desc *fw_data_desc; + const struct firmware *fw = adapter->fw; + __le32 major, minor, sub; + const u8 *ver_str; + int i, ret = 0; + + if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) { + + fw_data_desc = nx_get_data_desc(adapter, + NX_UNI_DIR_SECT_FW, NX_UNI_FIRMWARE_IDX_OFF); + ver_str = fw->data + cpu_to_le32(fw_data_desc->findex) + + cpu_to_le32(fw_data_desc->size) - 17; + + for (i = 0; i < 12; i++) { + if (!strncmp(&ver_str[i], "REV=", 4)) { + ret = sscanf(&ver_str[i+4], "%u.%u.%u ", + &major, &minor, &sub); + break; + } + } + + if (ret != 3) + return 0; + + return major + (minor << 8) + (sub << 16); + + } else + return cpu_to_le32(*(u32 *)&fw->data[NX_FW_VERSION_OFFSET]); +} + +static __le32 +nx_get_bios_version(struct netxen_adapter *adapter) +{ + const struct firmware *fw = adapter->fw; + __le32 bios_ver, prd_off = adapter->file_prd_off; + + if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) { + bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off]) + + NX_UNI_BIOS_VERSION_OFF)); + return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) + + (bios_ver >> 24); + } else + return cpu_to_le32(*(u32 *)&fw->data[NX_BIOS_VERSION_OFFSET]); + +} + +int +netxen_need_fw_reset(struct netxen_adapter *adapter) +{ + u32 count, old_count; + u32 val, version, major, minor, build; + int i, timeout; + u8 fw_type; + + /* NX2031 firmware doesn't support heartbit */ + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + return 1; + + if (adapter->need_fw_reset) + return 1; + + /* last attempt had failed */ + if (NXRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED) + return 1; + + old_count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER); + + for (i = 0; i < 10; i++) { + + timeout = msleep_interruptible(200); + if (timeout) { + NXWR32(adapter, CRB_CMDPEG_STATE, + PHAN_INITIALIZE_FAILED); + return -EINTR; + } + + count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER); + if (count != old_count) + break; + } + + /* firmware is dead */ + if (count == old_count) + return 1; + + /* check if we have got newer or different file firmware */ + if (adapter->fw) { + + val = nx_get_fw_version(adapter); + + version = NETXEN_DECODE_VERSION(val); + + major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR); + minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR); + build = NXRD32(adapter, NETXEN_FW_VERSION_SUB); + + if (version > NETXEN_VERSION_CODE(major, minor, build)) + return 1; + + if (version == NETXEN_VERSION_CODE(major, minor, build) && + adapter->fw_type != NX_UNIFIED_ROMIMAGE) { + + val = NXRD32(adapter, NETXEN_MIU_MN_CONTROL); + fw_type = (val & 0x4) ? + NX_P3_CT_ROMIMAGE : NX_P3_MN_ROMIMAGE; + + if (adapter->fw_type != fw_type) + return 1; + } + } + + return 0; +} + +#define NETXEN_MIN_P3_FW_SUPP NETXEN_VERSION_CODE(4, 0, 505) + +int +netxen_check_flash_fw_compatibility(struct netxen_adapter *adapter) +{ + u32 flash_fw_ver, min_fw_ver; + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + return 0; + + if (netxen_rom_fast_read(adapter, + NX_FW_VERSION_OFFSET, (int *)&flash_fw_ver)) { + dev_err(&adapter->pdev->dev, "Unable to read flash fw" + "version\n"); + return -EIO; + } + + flash_fw_ver = NETXEN_DECODE_VERSION(flash_fw_ver); + min_fw_ver = NETXEN_MIN_P3_FW_SUPP; + if (flash_fw_ver >= min_fw_ver) + return 0; + + dev_info(&adapter->pdev->dev, "Flash fw[%d.%d.%d] is < min fw supported" + "[4.0.505]. Please update firmware on flash\n", + _major(flash_fw_ver), _minor(flash_fw_ver), + _build(flash_fw_ver)); + return -EINVAL; +} + +static char *fw_name[] = { + NX_P2_MN_ROMIMAGE_NAME, + NX_P3_CT_ROMIMAGE_NAME, + NX_P3_MN_ROMIMAGE_NAME, + NX_UNIFIED_ROMIMAGE_NAME, + NX_FLASH_ROMIMAGE_NAME, +}; + +int +netxen_load_firmware(struct netxen_adapter *adapter) +{ + u64 *ptr64; + u32 i, flashaddr, size; + const struct firmware *fw = adapter->fw; + struct pci_dev *pdev = adapter->pdev; + + dev_info(&pdev->dev, "loading firmware from %s\n", + fw_name[adapter->fw_type]); + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + NXWR32(adapter, NETXEN_ROMUSB_GLB_CAS_RST, 1); + + if (fw) { + __le64 data; + + size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START) / 8; + + ptr64 = (u64 *)nx_get_bootld_offs(adapter); + flashaddr = NETXEN_BOOTLD_START; + + for (i = 0; i < size; i++) { + data = cpu_to_le64(ptr64[i]); + + if (adapter->pci_mem_write(adapter, flashaddr, data)) + return -EIO; + + flashaddr += 8; + } + + size = (__force u32)nx_get_fw_size(adapter) / 8; + + ptr64 = (u64 *)nx_get_fw_offs(adapter); + flashaddr = NETXEN_IMAGE_START; + + for (i = 0; i < size; i++) { + data = cpu_to_le64(ptr64[i]); + + if (adapter->pci_mem_write(adapter, + flashaddr, data)) + return -EIO; + + flashaddr += 8; + } + + size = (__force u32)nx_get_fw_size(adapter) % 8; + if (size) { + data = cpu_to_le64(ptr64[i]); + + if (adapter->pci_mem_write(adapter, + flashaddr, data)) + return -EIO; + } + + } else { + u64 data; + u32 hi, lo; + + size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START) / 8; + flashaddr = NETXEN_BOOTLD_START; + + for (i = 0; i < size; i++) { + if (netxen_rom_fast_read(adapter, + flashaddr, (int *)&lo) != 0) + return -EIO; + if (netxen_rom_fast_read(adapter, + flashaddr + 4, (int *)&hi) != 0) + return -EIO; + + /* hi, lo are already in host endian byteorder */ + data = (((u64)hi << 32) | lo); + + if (adapter->pci_mem_write(adapter, + flashaddr, data)) + return -EIO; + + flashaddr += 8; + } + } + msleep(1); + + if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) { + NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0x18, 0x1020); + NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0x80001e); + } else if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0x80001d); + else { + NXWR32(adapter, NETXEN_ROMUSB_GLB_CHIP_CLK_CTRL, 0x3fff); + NXWR32(adapter, NETXEN_ROMUSB_GLB_CAS_RST, 0); + } + + return 0; +} + +static int +netxen_validate_firmware(struct netxen_adapter *adapter) +{ + __le32 val; + __le32 flash_fw_ver; + u32 file_fw_ver, min_ver, bios; + struct pci_dev *pdev = adapter->pdev; + const struct firmware *fw = adapter->fw; + u8 fw_type = adapter->fw_type; + u32 crbinit_fix_fw; + + if (fw_type == NX_UNIFIED_ROMIMAGE) { + if (netxen_nic_validate_unified_romimage(adapter)) + return -EINVAL; + } else { + val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_MAGIC_OFFSET]); + if ((__force u32)val != NETXEN_BDINFO_MAGIC) + return -EINVAL; + + if (fw->size < NX_FW_MIN_SIZE) + return -EINVAL; + } + + val = nx_get_fw_version(adapter); + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + min_ver = NETXEN_MIN_P3_FW_SUPP; + else + min_ver = NETXEN_VERSION_CODE(3, 4, 216); + + file_fw_ver = NETXEN_DECODE_VERSION(val); + + if ((_major(file_fw_ver) > _NETXEN_NIC_LINUX_MAJOR) || + (file_fw_ver < min_ver)) { + dev_err(&pdev->dev, + "%s: firmware version %d.%d.%d unsupported\n", + fw_name[fw_type], _major(file_fw_ver), _minor(file_fw_ver), + _build(file_fw_ver)); + return -EINVAL; + } + + val = nx_get_bios_version(adapter); + netxen_rom_fast_read(adapter, NX_BIOS_VERSION_OFFSET, (int *)&bios); + if ((__force u32)val != bios) { + dev_err(&pdev->dev, "%s: firmware bios is incompatible\n", + fw_name[fw_type]); + return -EINVAL; + } + + if (netxen_rom_fast_read(adapter, + NX_FW_VERSION_OFFSET, (int *)&flash_fw_ver)) { + dev_err(&pdev->dev, "Unable to read flash fw version\n"); + return -EIO; + } + flash_fw_ver = NETXEN_DECODE_VERSION(flash_fw_ver); + + /* New fw from file is not allowed, if fw on flash is < 4.0.554 */ + crbinit_fix_fw = NETXEN_VERSION_CODE(4, 0, 554); + if (file_fw_ver >= crbinit_fix_fw && flash_fw_ver < crbinit_fix_fw && + NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + dev_err(&pdev->dev, "Incompatibility detected between driver " + "and firmware version on flash. This configuration " + "is not recommended. Please update the firmware on " + "flash immediately\n"); + return -EINVAL; + } + + /* check if flashed firmware is newer only for no-mn and P2 case*/ + if (!netxen_p3_has_mn(adapter) || + NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + if (flash_fw_ver > file_fw_ver) { + dev_info(&pdev->dev, "%s: firmware is older than flash\n", + fw_name[fw_type]); + return -EINVAL; + } + } + + NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC); + return 0; +} + +static void +nx_get_next_fwtype(struct netxen_adapter *adapter) +{ + u8 fw_type; + + switch (adapter->fw_type) { + case NX_UNKNOWN_ROMIMAGE: + fw_type = NX_UNIFIED_ROMIMAGE; + break; + + case NX_UNIFIED_ROMIMAGE: + if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) + fw_type = NX_FLASH_ROMIMAGE; + else if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + fw_type = NX_P2_MN_ROMIMAGE; + else if (netxen_p3_has_mn(adapter)) + fw_type = NX_P3_MN_ROMIMAGE; + else + fw_type = NX_P3_CT_ROMIMAGE; + break; + + case NX_P3_MN_ROMIMAGE: + fw_type = NX_P3_CT_ROMIMAGE; + break; + + case NX_P2_MN_ROMIMAGE: + case NX_P3_CT_ROMIMAGE: + default: + fw_type = NX_FLASH_ROMIMAGE; + break; + } + + adapter->fw_type = fw_type; +} + +static int +netxen_p3_has_mn(struct netxen_adapter *adapter) +{ + u32 capability, flashed_ver; + capability = 0; + + /* NX2031 always had MN */ + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + return 1; + + netxen_rom_fast_read(adapter, + NX_FW_VERSION_OFFSET, (int *)&flashed_ver); + flashed_ver = NETXEN_DECODE_VERSION(flashed_ver); + + if (flashed_ver >= NETXEN_VERSION_CODE(4, 0, 220)) { + + capability = NXRD32(adapter, NX_PEG_TUNE_CAPABILITY); + if (capability & NX_PEG_TUNE_MN_PRESENT) + return 1; + } + return 0; +} + +void netxen_request_firmware(struct netxen_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + int rc = 0; + + adapter->fw_type = NX_UNKNOWN_ROMIMAGE; + +next: + nx_get_next_fwtype(adapter); + + if (adapter->fw_type == NX_FLASH_ROMIMAGE) { + adapter->fw = NULL; + } else { + rc = request_firmware(&adapter->fw, + fw_name[adapter->fw_type], &pdev->dev); + if (rc != 0) + goto next; + + rc = netxen_validate_firmware(adapter); + if (rc != 0) { + release_firmware(adapter->fw); + msleep(1); + goto next; + } + } +} + + +void +netxen_release_firmware(struct netxen_adapter *adapter) +{ + if (adapter->fw) + release_firmware(adapter->fw); + adapter->fw = NULL; +} + +int netxen_init_dummy_dma(struct netxen_adapter *adapter) +{ + u64 addr; + u32 hi, lo; + + if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) + return 0; + + adapter->dummy_dma.addr = pci_alloc_consistent(adapter->pdev, + NETXEN_HOST_DUMMY_DMA_SIZE, + &adapter->dummy_dma.phys_addr); + if (adapter->dummy_dma.addr == NULL) { + dev_err(&adapter->pdev->dev, + "ERROR: Could not allocate dummy DMA memory\n"); + return -ENOMEM; + } + + addr = (uint64_t) adapter->dummy_dma.phys_addr; + hi = (addr >> 32) & 0xffffffff; + lo = addr & 0xffffffff; + + NXWR32(adapter, CRB_HOST_DUMMY_BUF_ADDR_HI, hi); + NXWR32(adapter, CRB_HOST_DUMMY_BUF_ADDR_LO, lo); + + return 0; +} + +/* + * NetXen DMA watchdog control: + * + * Bit 0 : enabled => R/O: 1 watchdog active, 0 inactive + * Bit 1 : disable_request => 1 req disable dma watchdog + * Bit 2 : enable_request => 1 req enable dma watchdog + * Bit 3-31 : unused + */ +void netxen_free_dummy_dma(struct netxen_adapter *adapter) +{ + int i = 100; + u32 ctrl; + + if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) + return; + + if (!adapter->dummy_dma.addr) + return; + + ctrl = NXRD32(adapter, NETXEN_DMA_WATCHDOG_CTRL); + if ((ctrl & 0x1) != 0) { + NXWR32(adapter, NETXEN_DMA_WATCHDOG_CTRL, (ctrl | 0x2)); + + while ((ctrl & 0x1) != 0) { + + msleep(50); + + ctrl = NXRD32(adapter, NETXEN_DMA_WATCHDOG_CTRL); + + if (--i == 0) + break; + } + } + + if (i) { + pci_free_consistent(adapter->pdev, + NETXEN_HOST_DUMMY_DMA_SIZE, + adapter->dummy_dma.addr, + adapter->dummy_dma.phys_addr); + adapter->dummy_dma.addr = NULL; + } else + dev_err(&adapter->pdev->dev, "dma_watchdog_shutdown failed\n"); +} + +int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val) +{ + u32 val = 0; + int retries = 60; + + if (pegtune_val) + return 0; + + do { + val = NXRD32(adapter, CRB_CMDPEG_STATE); + + switch (val) { + case PHAN_INITIALIZE_COMPLETE: + case PHAN_INITIALIZE_ACK: + return 0; + case PHAN_INITIALIZE_FAILED: + goto out_err; + default: + break; + } + + msleep(500); + + } while (--retries); + + NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED); + +out_err: + dev_warn(&adapter->pdev->dev, "firmware init failed\n"); + return -EIO; +} + +static int +netxen_receive_peg_ready(struct netxen_adapter *adapter) +{ + u32 val = 0; + int retries = 2000; + + do { + val = NXRD32(adapter, CRB_RCVPEG_STATE); + + if (val == PHAN_PEG_RCV_INITIALIZED) + return 0; + + msleep(10); + + } while (--retries); + + if (!retries) { + printk(KERN_ERR "Receive Peg initialization not " + "complete, state: 0x%x.\n", val); + return -EIO; + } + + return 0; +} + +int netxen_init_firmware(struct netxen_adapter *adapter) +{ + int err; + + err = netxen_receive_peg_ready(adapter); + if (err) + return err; + + NXWR32(adapter, CRB_NIC_CAPABILITIES_HOST, INTR_SCHEME_PERPORT); + NXWR32(adapter, CRB_MPORT_MODE, MPORT_MULTI_FUNCTION_MODE); + NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK); + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + NXWR32(adapter, CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC); + + return err; +} + +static void +netxen_handle_linkevent(struct netxen_adapter *adapter, nx_fw_msg_t *msg) +{ + u32 cable_OUI; + u16 cable_len; + u16 link_speed; + u8 link_status, module, duplex, autoneg; + struct net_device *netdev = adapter->netdev; + + adapter->has_link_events = 1; + + cable_OUI = msg->body[1] & 0xffffffff; + cable_len = (msg->body[1] >> 32) & 0xffff; + link_speed = (msg->body[1] >> 48) & 0xffff; + + link_status = msg->body[2] & 0xff; + duplex = (msg->body[2] >> 16) & 0xff; + autoneg = (msg->body[2] >> 24) & 0xff; + + module = (msg->body[2] >> 8) & 0xff; + if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE) { + printk(KERN_INFO "%s: unsupported cable: OUI 0x%x, length %d\n", + netdev->name, cable_OUI, cable_len); + } else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN) { + printk(KERN_INFO "%s: unsupported cable length %d\n", + netdev->name, cable_len); + } + + netxen_advert_link_change(adapter, link_status); + + /* update link parameters */ + if (duplex == LINKEVENT_FULL_DUPLEX) + adapter->link_duplex = DUPLEX_FULL; + else + adapter->link_duplex = DUPLEX_HALF; + adapter->module_type = module; + adapter->link_autoneg = autoneg; + adapter->link_speed = link_speed; +} + +static void +netxen_handle_fw_message(int desc_cnt, int index, + struct nx_host_sds_ring *sds_ring) +{ + nx_fw_msg_t msg; + struct status_desc *desc; + int i = 0, opcode; + + while (desc_cnt > 0 && i < 8) { + desc = &sds_ring->desc_head[index]; + msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]); + msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]); + + index = get_next_index(index, sds_ring->num_desc); + desc_cnt--; + } + + opcode = netxen_get_nic_msg_opcode(msg.body[0]); + switch (opcode) { + case NX_NIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE: + netxen_handle_linkevent(sds_ring->adapter, &msg); + break; + default: + break; + } +} + +static int +netxen_alloc_rx_skb(struct netxen_adapter *adapter, + struct nx_host_rds_ring *rds_ring, + struct netxen_rx_buffer *buffer) +{ + struct sk_buff *skb; + dma_addr_t dma; + struct pci_dev *pdev = adapter->pdev; + + buffer->skb = dev_alloc_skb(rds_ring->skb_size); + if (!buffer->skb) + return 1; + + skb = buffer->skb; + + if (!adapter->ahw.cut_through) + skb_reserve(skb, 2); + + dma = pci_map_single(pdev, skb->data, + rds_ring->dma_size, PCI_DMA_FROMDEVICE); + + if (pci_dma_mapping_error(pdev, dma)) { + dev_kfree_skb_any(skb); + buffer->skb = NULL; + return 1; + } + + buffer->skb = skb; + buffer->dma = dma; + buffer->state = NETXEN_BUFFER_BUSY; + + return 0; +} + +static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter, + struct nx_host_rds_ring *rds_ring, u16 index, u16 cksum) +{ + struct netxen_rx_buffer *buffer; + struct sk_buff *skb; + + buffer = &rds_ring->rx_buf_arr[index]; + + pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size, + PCI_DMA_FROMDEVICE); + + skb = buffer->skb; + if (!skb) + goto no_skb; + + if (likely((adapter->netdev->features & NETIF_F_RXCSUM) + && cksum == STATUS_CKSUM_OK)) { + adapter->stats.csummed++; + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else + skb->ip_summed = CHECKSUM_NONE; + + skb->dev = adapter->netdev; + + buffer->skb = NULL; +no_skb: + buffer->state = NETXEN_BUFFER_FREE; + return skb; +} + +static struct netxen_rx_buffer * +netxen_process_rcv(struct netxen_adapter *adapter, + struct nx_host_sds_ring *sds_ring, + int ring, u64 sts_data0) +{ + struct net_device *netdev = adapter->netdev; + struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; + struct netxen_rx_buffer *buffer; + struct sk_buff *skb; + struct nx_host_rds_ring *rds_ring; + int index, length, cksum, pkt_offset; + + if (unlikely(ring >= adapter->max_rds_rings)) + return NULL; + + rds_ring = &recv_ctx->rds_rings[ring]; + + index = netxen_get_sts_refhandle(sts_data0); + if (unlikely(index >= rds_ring->num_desc)) + return NULL; + + buffer = &rds_ring->rx_buf_arr[index]; + + length = netxen_get_sts_totallength(sts_data0); + cksum = netxen_get_sts_status(sts_data0); + pkt_offset = netxen_get_sts_pkt_offset(sts_data0); + + skb = netxen_process_rxbuf(adapter, rds_ring, index, cksum); + if (!skb) + return buffer; + + if (length > rds_ring->skb_size) + skb_put(skb, rds_ring->skb_size); + else + skb_put(skb, length); + + + if (pkt_offset) + skb_pull(skb, pkt_offset); + + skb->protocol = eth_type_trans(skb, netdev); + + napi_gro_receive(&sds_ring->napi, skb); + + adapter->stats.rx_pkts++; + adapter->stats.rxbytes += length; + + return buffer; +} + +#define TCP_HDR_SIZE 20 +#define TCP_TS_OPTION_SIZE 12 +#define TCP_TS_HDR_SIZE (TCP_HDR_SIZE + TCP_TS_OPTION_SIZE) + +static struct netxen_rx_buffer * +netxen_process_lro(struct netxen_adapter *adapter, + struct nx_host_sds_ring *sds_ring, + int ring, u64 sts_data0, u64 sts_data1) +{ + struct net_device *netdev = adapter->netdev; + struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; + struct netxen_rx_buffer *buffer; + struct sk_buff *skb; + struct nx_host_rds_ring *rds_ring; + struct iphdr *iph; + struct tcphdr *th; + bool push, timestamp; + int l2_hdr_offset, l4_hdr_offset; + int index; + u16 lro_length, length, data_offset; + u32 seq_number; + u8 vhdr_len; + + if (unlikely(ring > adapter->max_rds_rings)) + return NULL; + + rds_ring = &recv_ctx->rds_rings[ring]; + + index = netxen_get_lro_sts_refhandle(sts_data0); + if (unlikely(index > rds_ring->num_desc)) + return NULL; + + buffer = &rds_ring->rx_buf_arr[index]; + + timestamp = netxen_get_lro_sts_timestamp(sts_data0); + lro_length = netxen_get_lro_sts_length(sts_data0); + l2_hdr_offset = netxen_get_lro_sts_l2_hdr_offset(sts_data0); + l4_hdr_offset = netxen_get_lro_sts_l4_hdr_offset(sts_data0); + push = netxen_get_lro_sts_push_flag(sts_data0); + seq_number = netxen_get_lro_sts_seq_number(sts_data1); + + skb = netxen_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK); + if (!skb) + return buffer; + + if (timestamp) + data_offset = l4_hdr_offset + TCP_TS_HDR_SIZE; + else + data_offset = l4_hdr_offset + TCP_HDR_SIZE; + + skb_put(skb, lro_length + data_offset); + + skb_pull(skb, l2_hdr_offset); + skb->protocol = eth_type_trans(skb, netdev); + + if (skb->protocol == htons(ETH_P_8021Q)) + vhdr_len = VLAN_HLEN; + iph = (struct iphdr *)(skb->data + vhdr_len); + th = (struct tcphdr *)((skb->data + vhdr_len) + (iph->ihl << 2)); + + length = (iph->ihl << 2) + (th->doff << 2) + lro_length; + iph->tot_len = htons(length); + iph->check = 0; + iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); + th->psh = push; + th->seq = htonl(seq_number); + + length = skb->len; + + netif_receive_skb(skb); + + adapter->stats.lro_pkts++; + adapter->stats.rxbytes += length; + + return buffer; +} + +#define netxen_merge_rx_buffers(list, head) \ + do { list_splice_tail_init(list, head); } while (0); + +int +netxen_process_rcv_ring(struct nx_host_sds_ring *sds_ring, int max) +{ + struct netxen_adapter *adapter = sds_ring->adapter; + + struct list_head *cur; + + struct status_desc *desc; + struct netxen_rx_buffer *rxbuf; + + u32 consumer = sds_ring->consumer; + + int count = 0; + u64 sts_data0, sts_data1; + int opcode, ring = 0, desc_cnt; + + while (count < max) { + desc = &sds_ring->desc_head[consumer]; + sts_data0 = le64_to_cpu(desc->status_desc_data[0]); + + if (!(sts_data0 & STATUS_OWNER_HOST)) + break; + + desc_cnt = netxen_get_sts_desc_cnt(sts_data0); + + opcode = netxen_get_sts_opcode(sts_data0); + + switch (opcode) { + case NETXEN_NIC_RXPKT_DESC: + case NETXEN_OLD_RXPKT_DESC: + case NETXEN_NIC_SYN_OFFLOAD: + ring = netxen_get_sts_type(sts_data0); + rxbuf = netxen_process_rcv(adapter, sds_ring, + ring, sts_data0); + break; + case NETXEN_NIC_LRO_DESC: + ring = netxen_get_lro_sts_type(sts_data0); + sts_data1 = le64_to_cpu(desc->status_desc_data[1]); + rxbuf = netxen_process_lro(adapter, sds_ring, + ring, sts_data0, sts_data1); + break; + case NETXEN_NIC_RESPONSE_DESC: + netxen_handle_fw_message(desc_cnt, consumer, sds_ring); + default: + goto skip; + } + + WARN_ON(desc_cnt > 1); + + if (rxbuf) + list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]); + +skip: + for (; desc_cnt > 0; desc_cnt--) { + desc = &sds_ring->desc_head[consumer]; + desc->status_desc_data[0] = + cpu_to_le64(STATUS_OWNER_PHANTOM); + consumer = get_next_index(consumer, sds_ring->num_desc); + } + count++; + } + + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + struct nx_host_rds_ring *rds_ring = + &adapter->recv_ctx.rds_rings[ring]; + + if (!list_empty(&sds_ring->free_list[ring])) { + list_for_each(cur, &sds_ring->free_list[ring]) { + rxbuf = list_entry(cur, + struct netxen_rx_buffer, list); + netxen_alloc_rx_skb(adapter, rds_ring, rxbuf); + } + spin_lock(&rds_ring->lock); + netxen_merge_rx_buffers(&sds_ring->free_list[ring], + &rds_ring->free_list); + spin_unlock(&rds_ring->lock); + } + + netxen_post_rx_buffers_nodb(adapter, rds_ring); + } + + if (count) { + sds_ring->consumer = consumer; + NXWRIO(adapter, sds_ring->crb_sts_consumer, consumer); + } + + return count; +} + +/* Process Command status ring */ +int netxen_process_cmd_ring(struct netxen_adapter *adapter) +{ + u32 sw_consumer, hw_consumer; + int count = 0, i; + struct netxen_cmd_buffer *buffer; + struct pci_dev *pdev = adapter->pdev; + struct net_device *netdev = adapter->netdev; + struct netxen_skb_frag *frag; + int done = 0; + struct nx_host_tx_ring *tx_ring = adapter->tx_ring; + + if (!spin_trylock(&adapter->tx_clean_lock)) + return 1; + + sw_consumer = tx_ring->sw_consumer; + hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); + + while (sw_consumer != hw_consumer) { + buffer = &tx_ring->cmd_buf_arr[sw_consumer]; + if (buffer->skb) { + frag = &buffer->frag_array[0]; + pci_unmap_single(pdev, frag->dma, frag->length, + PCI_DMA_TODEVICE); + frag->dma = 0ULL; + for (i = 1; i < buffer->frag_count; i++) { + frag++; /* Get the next frag */ + pci_unmap_page(pdev, frag->dma, frag->length, + PCI_DMA_TODEVICE); + frag->dma = 0ULL; + } + + adapter->stats.xmitfinished++; + dev_kfree_skb_any(buffer->skb); + buffer->skb = NULL; + } + + sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc); + if (++count >= MAX_STATUS_HANDLE) + break; + } + + if (count && netif_running(netdev)) { + tx_ring->sw_consumer = sw_consumer; + + smp_mb(); + + if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) + if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH) + netif_wake_queue(netdev); + adapter->tx_timeo_cnt = 0; + } + /* + * If everything is freed up to consumer then check if the ring is full + * If the ring is full then check if more needs to be freed and + * schedule the call back again. + * + * This happens when there are 2 CPUs. One could be freeing and the + * other filling it. If the ring is full when we get out of here and + * the card has already interrupted the host then the host can miss the + * interrupt. + * + * There is still a possible race condition and the host could miss an + * interrupt. The card has to take care of this. + */ + hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); + done = (sw_consumer == hw_consumer); + spin_unlock(&adapter->tx_clean_lock); + + return done; +} + +void +netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid, + struct nx_host_rds_ring *rds_ring) +{ + struct rcv_desc *pdesc; + struct netxen_rx_buffer *buffer; + int producer, count = 0; + netxen_ctx_msg msg = 0; + struct list_head *head; + + producer = rds_ring->producer; + + head = &rds_ring->free_list; + while (!list_empty(head)) { + + buffer = list_entry(head->next, struct netxen_rx_buffer, list); + + if (!buffer->skb) { + if (netxen_alloc_rx_skb(adapter, rds_ring, buffer)) + break; + } + + count++; + list_del(&buffer->list); + + /* make a rcv descriptor */ + pdesc = &rds_ring->desc_head[producer]; + pdesc->addr_buffer = cpu_to_le64(buffer->dma); + pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); + pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); + + producer = get_next_index(producer, rds_ring->num_desc); + } + + if (count) { + rds_ring->producer = producer; + NXWRIO(adapter, rds_ring->crb_rcv_producer, + (producer-1) & (rds_ring->num_desc-1)); + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + /* + * Write a doorbell msg to tell phanmon of change in + * receive ring producer + * Only for firmware version < 4.0.0 + */ + netxen_set_msg_peg_id(msg, NETXEN_RCV_PEG_DB_ID); + netxen_set_msg_privid(msg); + netxen_set_msg_count(msg, + ((producer - 1) & + (rds_ring->num_desc - 1))); + netxen_set_msg_ctxid(msg, adapter->portnum); + netxen_set_msg_opcode(msg, NETXEN_RCV_PRODUCER(ringid)); + NXWRIO(adapter, DB_NORMALIZE(adapter, + NETXEN_RCV_PRODUCER_OFFSET), msg); + } + } +} + +static void +netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, + struct nx_host_rds_ring *rds_ring) +{ + struct rcv_desc *pdesc; + struct netxen_rx_buffer *buffer; + int producer, count = 0; + struct list_head *head; + + if (!spin_trylock(&rds_ring->lock)) + return; + + producer = rds_ring->producer; + + head = &rds_ring->free_list; + while (!list_empty(head)) { + + buffer = list_entry(head->next, struct netxen_rx_buffer, list); + + if (!buffer->skb) { + if (netxen_alloc_rx_skb(adapter, rds_ring, buffer)) + break; + } + + count++; + list_del(&buffer->list); + + /* make a rcv descriptor */ + pdesc = &rds_ring->desc_head[producer]; + pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); + pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); + pdesc->addr_buffer = cpu_to_le64(buffer->dma); + + producer = get_next_index(producer, rds_ring->num_desc); + } + + if (count) { + rds_ring->producer = producer; + NXWRIO(adapter, rds_ring->crb_rcv_producer, + (producer - 1) & (rds_ring->num_desc - 1)); + } + spin_unlock(&rds_ring->lock); +} + +void netxen_nic_clear_stats(struct netxen_adapter *adapter) +{ + memset(&adapter->stats, 0, sizeof(adapter->stats)); +} + diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c new file mode 100644 index 000000000000..8c7fc32d781f --- /dev/null +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -0,0 +1,3100 @@ +/* + * Copyright (C) 2003 - 2009 NetXen, Inc. + * Copyright (C) 2009 - QLogic Corporation. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, + * MA 02111-1307, USA. + * + * The full GNU General Public License is included in this distribution + * in the file called "COPYING". + * + */ + +#include +#include +#include +#include "netxen_nic_hw.h" + +#include "netxen_nic.h" + +#include +#include +#include +#include +#include +#include +#include + +MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Intelligent Ethernet Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID); +MODULE_FIRMWARE(NX_UNIFIED_ROMIMAGE_NAME); + +char netxen_nic_driver_name[] = "netxen_nic"; +static char netxen_nic_driver_string[] = "QLogic/NetXen Network Driver v" + NETXEN_NIC_LINUX_VERSIONID; + +static int port_mode = NETXEN_PORT_MODE_AUTO_NEG; + +/* Default to restricted 1G auto-neg mode */ +static int wol_port_mode = 5; + +static int use_msi = 1; + +static int use_msi_x = 1; + +static int auto_fw_reset = AUTO_FW_RESET_ENABLED; +module_param(auto_fw_reset, int, 0644); +MODULE_PARM_DESC(auto_fw_reset,"Auto firmware reset (0=disabled, 1=enabled"); + +static int __devinit netxen_nic_probe(struct pci_dev *pdev, + const struct pci_device_id *ent); +static void __devexit netxen_nic_remove(struct pci_dev *pdev); +static int netxen_nic_open(struct net_device *netdev); +static int netxen_nic_close(struct net_device *netdev); +static netdev_tx_t netxen_nic_xmit_frame(struct sk_buff *, + struct net_device *); +static void netxen_tx_timeout(struct net_device *netdev); +static void netxen_tx_timeout_task(struct work_struct *work); +static void netxen_fw_poll_work(struct work_struct *work); +static void netxen_schedule_work(struct netxen_adapter *adapter, + work_func_t func, int delay); +static void netxen_cancel_fw_work(struct netxen_adapter *adapter); +static int netxen_nic_poll(struct napi_struct *napi, int budget); +#ifdef CONFIG_NET_POLL_CONTROLLER +static void netxen_nic_poll_controller(struct net_device *netdev); +#endif + +static void netxen_create_sysfs_entries(struct netxen_adapter *adapter); +static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter); +static void netxen_create_diag_entries(struct netxen_adapter *adapter); +static void netxen_remove_diag_entries(struct netxen_adapter *adapter); + +static int nx_dev_request_aer(struct netxen_adapter *adapter); +static int nx_decr_dev_ref_cnt(struct netxen_adapter *adapter); +static int netxen_can_start_firmware(struct netxen_adapter *adapter); + +static irqreturn_t netxen_intr(int irq, void *data); +static irqreturn_t netxen_msi_intr(int irq, void *data); +static irqreturn_t netxen_msix_intr(int irq, void *data); + +static void netxen_free_vlan_ip_list(struct netxen_adapter *); +static void netxen_restore_indev_addr(struct net_device *dev, unsigned long); +static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *stats); +static int netxen_nic_set_mac(struct net_device *netdev, void *p); + +/* PCI Device ID Table */ +#define ENTRY(device) \ + {PCI_DEVICE(PCI_VENDOR_ID_NETXEN, (device)), \ + .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0} + +static DEFINE_PCI_DEVICE_TABLE(netxen_pci_tbl) = { + ENTRY(PCI_DEVICE_ID_NX2031_10GXSR), + ENTRY(PCI_DEVICE_ID_NX2031_10GCX4), + ENTRY(PCI_DEVICE_ID_NX2031_4GCU), + ENTRY(PCI_DEVICE_ID_NX2031_IMEZ), + ENTRY(PCI_DEVICE_ID_NX2031_HMEZ), + ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT), + ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT2), + ENTRY(PCI_DEVICE_ID_NX3031), + {0,} +}; + +MODULE_DEVICE_TABLE(pci, netxen_pci_tbl); + +static uint32_t crb_cmd_producer[4] = { + CRB_CMD_PRODUCER_OFFSET, CRB_CMD_PRODUCER_OFFSET_1, + CRB_CMD_PRODUCER_OFFSET_2, CRB_CMD_PRODUCER_OFFSET_3 +}; + +void +netxen_nic_update_cmd_producer(struct netxen_adapter *adapter, + struct nx_host_tx_ring *tx_ring) +{ + NXWRIO(adapter, tx_ring->crb_cmd_producer, tx_ring->producer); +} + +static uint32_t crb_cmd_consumer[4] = { + CRB_CMD_CONSUMER_OFFSET, CRB_CMD_CONSUMER_OFFSET_1, + CRB_CMD_CONSUMER_OFFSET_2, CRB_CMD_CONSUMER_OFFSET_3 +}; + +static inline void +netxen_nic_update_cmd_consumer(struct netxen_adapter *adapter, + struct nx_host_tx_ring *tx_ring) +{ + NXWRIO(adapter, tx_ring->crb_cmd_consumer, tx_ring->sw_consumer); +} + +static uint32_t msi_tgt_status[8] = { + ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1, + ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3, + ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5, + ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7 +}; + +static struct netxen_legacy_intr_set legacy_intr[] = NX_LEGACY_INTR_CONFIG; + +static inline void netxen_nic_disable_int(struct nx_host_sds_ring *sds_ring) +{ + struct netxen_adapter *adapter = sds_ring->adapter; + + NXWRIO(adapter, sds_ring->crb_intr_mask, 0); +} + +static inline void netxen_nic_enable_int(struct nx_host_sds_ring *sds_ring) +{ + struct netxen_adapter *adapter = sds_ring->adapter; + + NXWRIO(adapter, sds_ring->crb_intr_mask, 0x1); + + if (!NETXEN_IS_MSI_FAMILY(adapter)) + NXWRIO(adapter, adapter->tgt_mask_reg, 0xfbff); +} + +static int +netxen_alloc_sds_rings(struct netxen_recv_context *recv_ctx, int count) +{ + int size = sizeof(struct nx_host_sds_ring) * count; + + recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL); + + return recv_ctx->sds_rings == NULL; +} + +static void +netxen_free_sds_rings(struct netxen_recv_context *recv_ctx) +{ + if (recv_ctx->sds_rings != NULL) + kfree(recv_ctx->sds_rings); + + recv_ctx->sds_rings = NULL; +} + +static int +netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev) +{ + int ring; + struct nx_host_sds_ring *sds_ring; + struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; + + if (netxen_alloc_sds_rings(recv_ctx, adapter->max_sds_rings)) + return -ENOMEM; + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + netif_napi_add(netdev, &sds_ring->napi, + netxen_nic_poll, NETXEN_NETDEV_WEIGHT); + } + + return 0; +} + +static void +netxen_napi_del(struct netxen_adapter *adapter) +{ + int ring; + struct nx_host_sds_ring *sds_ring; + struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + netif_napi_del(&sds_ring->napi); + } + + netxen_free_sds_rings(&adapter->recv_ctx); +} + +static void +netxen_napi_enable(struct netxen_adapter *adapter) +{ + int ring; + struct nx_host_sds_ring *sds_ring; + struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + napi_enable(&sds_ring->napi); + netxen_nic_enable_int(sds_ring); + } +} + +static void +netxen_napi_disable(struct netxen_adapter *adapter) +{ + int ring; + struct nx_host_sds_ring *sds_ring; + struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + netxen_nic_disable_int(sds_ring); + napi_synchronize(&sds_ring->napi); + napi_disable(&sds_ring->napi); + } +} + +static int nx_set_dma_mask(struct netxen_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + uint64_t mask, cmask; + + adapter->pci_using_dac = 0; + + mask = DMA_BIT_MASK(32); + cmask = DMA_BIT_MASK(32); + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { +#ifndef CONFIG_IA64 + mask = DMA_BIT_MASK(35); +#endif + } else { + mask = DMA_BIT_MASK(39); + cmask = mask; + } + + if (pci_set_dma_mask(pdev, mask) == 0 && + pci_set_consistent_dma_mask(pdev, cmask) == 0) { + adapter->pci_using_dac = 1; + return 0; + } + + return -EIO; +} + +/* Update addressable range if firmware supports it */ +static int +nx_update_dma_mask(struct netxen_adapter *adapter) +{ + int change, shift, err; + uint64_t mask, old_mask, old_cmask; + struct pci_dev *pdev = adapter->pdev; + + change = 0; + + shift = NXRD32(adapter, CRB_DMA_SHIFT); + if (shift > 32) + return 0; + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id) && (shift > 9)) + change = 1; + else if ((adapter->ahw.revision_id == NX_P2_C1) && (shift <= 4)) + change = 1; + + if (change) { + old_mask = pdev->dma_mask; + old_cmask = pdev->dev.coherent_dma_mask; + + mask = DMA_BIT_MASK(32+shift); + + err = pci_set_dma_mask(pdev, mask); + if (err) + goto err_out; + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + + err = pci_set_consistent_dma_mask(pdev, mask); + if (err) + goto err_out; + } + dev_info(&pdev->dev, "using %d-bit dma mask\n", 32+shift); + } + + return 0; + +err_out: + pci_set_dma_mask(pdev, old_mask); + pci_set_consistent_dma_mask(pdev, old_cmask); + return err; +} + +static int +netxen_check_hw_init(struct netxen_adapter *adapter, int first_boot) +{ + u32 val, timeout; + + if (first_boot == 0x55555555) { + /* This is the first boot after power up */ + NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC); + + if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) + return 0; + + /* PCI bus master workaround */ + first_boot = NXRD32(adapter, NETXEN_PCIE_REG(0x4)); + if (!(first_boot & 0x4)) { + first_boot |= 0x4; + NXWR32(adapter, NETXEN_PCIE_REG(0x4), first_boot); + NXRD32(adapter, NETXEN_PCIE_REG(0x4)); + } + + /* This is the first boot after power up */ + first_boot = NXRD32(adapter, NETXEN_ROMUSB_GLB_SW_RESET); + if (first_boot != 0x80000f) { + /* clear the register for future unloads/loads */ + NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), 0); + return -EIO; + } + + /* Start P2 boot loader */ + val = NXRD32(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE); + NXWR32(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE, val | 0x1); + timeout = 0; + do { + msleep(1); + val = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc)); + + if (++timeout > 5000) + return -EIO; + + } while (val == NETXEN_BDINFO_MAGIC); + } + return 0; +} + +static void netxen_set_port_mode(struct netxen_adapter *adapter) +{ + u32 val, data; + + val = adapter->ahw.board_type; + if ((val == NETXEN_BRDTYPE_P3_HMEZ) || + (val == NETXEN_BRDTYPE_P3_XG_LOM)) { + if (port_mode == NETXEN_PORT_MODE_802_3_AP) { + data = NETXEN_PORT_MODE_802_3_AP; + NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data); + } else if (port_mode == NETXEN_PORT_MODE_XG) { + data = NETXEN_PORT_MODE_XG; + NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data); + } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_1G) { + data = NETXEN_PORT_MODE_AUTO_NEG_1G; + NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data); + } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_XG) { + data = NETXEN_PORT_MODE_AUTO_NEG_XG; + NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data); + } else { + data = NETXEN_PORT_MODE_AUTO_NEG; + NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data); + } + + if ((wol_port_mode != NETXEN_PORT_MODE_802_3_AP) && + (wol_port_mode != NETXEN_PORT_MODE_XG) && + (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_1G) && + (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_XG)) { + wol_port_mode = NETXEN_PORT_MODE_AUTO_NEG; + } + NXWR32(adapter, NETXEN_WOL_PORT_MODE, wol_port_mode); + } +} + +static void netxen_set_msix_bit(struct pci_dev *pdev, int enable) +{ + u32 control; + int pos; + + pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); + if (pos) { + pci_read_config_dword(pdev, pos, &control); + if (enable) + control |= PCI_MSIX_FLAGS_ENABLE; + else + control = 0; + pci_write_config_dword(pdev, pos, control); + } +} + +static void netxen_init_msix_entries(struct netxen_adapter *adapter, int count) +{ + int i; + + for (i = 0; i < count; i++) + adapter->msix_entries[i].entry = i; +} + +static int +netxen_read_mac_addr(struct netxen_adapter *adapter) +{ + int i; + unsigned char *p; + u64 mac_addr; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + if (netxen_p3_get_mac_addr(adapter, &mac_addr) != 0) + return -EIO; + } else { + if (netxen_get_flash_mac_addr(adapter, &mac_addr) != 0) + return -EIO; + } + + p = (unsigned char *)&mac_addr; + for (i = 0; i < 6; i++) + netdev->dev_addr[i] = *(p + 5 - i); + + memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); + memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len); + + /* set station address */ + + if (!is_valid_ether_addr(netdev->perm_addr)) + dev_warn(&pdev->dev, "Bad MAC address %pM.\n", netdev->dev_addr); + + return 0; +} + +static int netxen_nic_set_mac(struct net_device *netdev, void *p) +{ + struct netxen_adapter *adapter = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EINVAL; + + if (netif_running(netdev)) { + netif_device_detach(netdev); + netxen_napi_disable(adapter); + } + + memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len); + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + adapter->macaddr_set(adapter, addr->sa_data); + + if (netif_running(netdev)) { + netif_device_attach(netdev); + netxen_napi_enable(adapter); + } + return 0; +} + +static void netxen_set_multicast_list(struct net_device *dev) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + + adapter->set_multi(dev); +} + +static u32 netxen_fix_features(struct net_device *dev, u32 features) +{ + if (!(features & NETIF_F_RXCSUM)) { + netdev_info(dev, "disabling LRO as RXCSUM is off\n"); + + features &= ~NETIF_F_LRO; + } + + return features; +} + +static int netxen_set_features(struct net_device *dev, u32 features) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + int hw_lro; + + if (!((dev->features ^ features) & NETIF_F_LRO)) + return 0; + + hw_lro = (features & NETIF_F_LRO) ? NETXEN_NIC_LRO_ENABLED + : NETXEN_NIC_LRO_DISABLED; + + if (netxen_config_hw_lro(adapter, hw_lro)) + return -EIO; + + if (!(features & NETIF_F_LRO) && netxen_send_lro_cleanup(adapter)) + return -EIO; + + return 0; +} + +static const struct net_device_ops netxen_netdev_ops = { + .ndo_open = netxen_nic_open, + .ndo_stop = netxen_nic_close, + .ndo_start_xmit = netxen_nic_xmit_frame, + .ndo_get_stats64 = netxen_nic_get_stats, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_multicast_list = netxen_set_multicast_list, + .ndo_set_mac_address = netxen_nic_set_mac, + .ndo_change_mtu = netxen_nic_change_mtu, + .ndo_tx_timeout = netxen_tx_timeout, + .ndo_fix_features = netxen_fix_features, + .ndo_set_features = netxen_set_features, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = netxen_nic_poll_controller, +#endif +}; + +static void +netxen_setup_intr(struct netxen_adapter *adapter) +{ + struct netxen_legacy_intr_set *legacy_intrp; + struct pci_dev *pdev = adapter->pdev; + int err, num_msix; + + if (adapter->rss_supported) { + num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ? + MSIX_ENTRIES_PER_ADAPTER : 2; + } else + num_msix = 1; + + adapter->max_sds_rings = 1; + + adapter->flags &= ~(NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED); + + if (adapter->ahw.revision_id >= NX_P3_B0) + legacy_intrp = &legacy_intr[adapter->ahw.pci_func]; + else + legacy_intrp = &legacy_intr[0]; + + adapter->int_vec_bit = legacy_intrp->int_vec_bit; + adapter->tgt_status_reg = netxen_get_ioaddr(adapter, + legacy_intrp->tgt_status_reg); + adapter->tgt_mask_reg = netxen_get_ioaddr(adapter, + legacy_intrp->tgt_mask_reg); + adapter->pci_int_reg = netxen_get_ioaddr(adapter, + legacy_intrp->pci_int_reg); + adapter->isr_int_vec = netxen_get_ioaddr(adapter, ISR_INT_VECTOR); + + if (adapter->ahw.revision_id >= NX_P3_B1) + adapter->crb_int_state_reg = netxen_get_ioaddr(adapter, + ISR_INT_STATE_REG); + else + adapter->crb_int_state_reg = netxen_get_ioaddr(adapter, + CRB_INT_VECTOR); + + netxen_set_msix_bit(pdev, 0); + + if (adapter->msix_supported) { + + netxen_init_msix_entries(adapter, num_msix); + err = pci_enable_msix(pdev, adapter->msix_entries, num_msix); + if (err == 0) { + adapter->flags |= NETXEN_NIC_MSIX_ENABLED; + netxen_set_msix_bit(pdev, 1); + + if (adapter->rss_supported) + adapter->max_sds_rings = num_msix; + + dev_info(&pdev->dev, "using msi-x interrupts\n"); + return; + } + + if (err > 0) + pci_disable_msix(pdev); + + /* fall through for msi */ + } + + if (use_msi && !pci_enable_msi(pdev)) { + adapter->flags |= NETXEN_NIC_MSI_ENABLED; + adapter->tgt_status_reg = netxen_get_ioaddr(adapter, + msi_tgt_status[adapter->ahw.pci_func]); + dev_info(&pdev->dev, "using msi interrupts\n"); + adapter->msix_entries[0].vector = pdev->irq; + return; + } + + dev_info(&pdev->dev, "using legacy interrupts\n"); + adapter->msix_entries[0].vector = pdev->irq; +} + +static void +netxen_teardown_intr(struct netxen_adapter *adapter) +{ + if (adapter->flags & NETXEN_NIC_MSIX_ENABLED) + pci_disable_msix(adapter->pdev); + if (adapter->flags & NETXEN_NIC_MSI_ENABLED) + pci_disable_msi(adapter->pdev); +} + +static void +netxen_cleanup_pci_map(struct netxen_adapter *adapter) +{ + if (adapter->ahw.db_base != NULL) + iounmap(adapter->ahw.db_base); + if (adapter->ahw.pci_base0 != NULL) + iounmap(adapter->ahw.pci_base0); + if (adapter->ahw.pci_base1 != NULL) + iounmap(adapter->ahw.pci_base1); + if (adapter->ahw.pci_base2 != NULL) + iounmap(adapter->ahw.pci_base2); +} + +static int +netxen_setup_pci_map(struct netxen_adapter *adapter) +{ + void __iomem *db_ptr = NULL; + + resource_size_t mem_base, db_base; + unsigned long mem_len, db_len = 0; + + struct pci_dev *pdev = adapter->pdev; + int pci_func = adapter->ahw.pci_func; + struct netxen_hardware_context *ahw = &adapter->ahw; + + int err = 0; + + /* + * Set the CRB window to invalid. If any register in window 0 is + * accessed it should set the window to 0 and then reset it to 1. + */ + adapter->ahw.crb_win = -1; + adapter->ahw.ocm_win = -1; + + /* remap phys address */ + mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */ + mem_len = pci_resource_len(pdev, 0); + + /* 128 Meg of memory */ + if (mem_len == NETXEN_PCI_128MB_SIZE) { + + ahw->pci_base0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE); + ahw->pci_base1 = ioremap(mem_base + SECOND_PAGE_GROUP_START, + SECOND_PAGE_GROUP_SIZE); + ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START, + THIRD_PAGE_GROUP_SIZE); + if (ahw->pci_base0 == NULL || ahw->pci_base1 == NULL || + ahw->pci_base2 == NULL) { + dev_err(&pdev->dev, "failed to map PCI bar 0\n"); + err = -EIO; + goto err_out; + } + + ahw->pci_len0 = FIRST_PAGE_GROUP_SIZE; + + } else if (mem_len == NETXEN_PCI_32MB_SIZE) { + + ahw->pci_base1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE); + ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START - + SECOND_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE); + if (ahw->pci_base1 == NULL || ahw->pci_base2 == NULL) { + dev_err(&pdev->dev, "failed to map PCI bar 0\n"); + err = -EIO; + goto err_out; + } + + } else if (mem_len == NETXEN_PCI_2MB_SIZE) { + + ahw->pci_base0 = pci_ioremap_bar(pdev, 0); + if (ahw->pci_base0 == NULL) { + dev_err(&pdev->dev, "failed to map PCI bar 0\n"); + return -EIO; + } + ahw->pci_len0 = mem_len; + } else { + return -EIO; + } + + netxen_setup_hwops(adapter); + + dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20)); + + if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) { + adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter, + NETXEN_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func))); + + } else if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter, + NETXEN_PCIX_PS_REG(PCIE_MN_WINDOW_REG(pci_func))); + } + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + goto skip_doorbell; + + db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */ + db_len = pci_resource_len(pdev, 4); + + if (db_len == 0) { + printk(KERN_ERR "%s: doorbell is disabled\n", + netxen_nic_driver_name); + err = -EIO; + goto err_out; + } + + db_ptr = ioremap(db_base, NETXEN_DB_MAPSIZE_BYTES); + if (!db_ptr) { + printk(KERN_ERR "%s: Failed to allocate doorbell map.", + netxen_nic_driver_name); + err = -EIO; + goto err_out; + } + +skip_doorbell: + adapter->ahw.db_base = db_ptr; + adapter->ahw.db_len = db_len; + return 0; + +err_out: + netxen_cleanup_pci_map(adapter); + return err; +} + +static void +netxen_check_options(struct netxen_adapter *adapter) +{ + u32 fw_major, fw_minor, fw_build; + char brd_name[NETXEN_MAX_SHORT_NAME]; + char serial_num[32]; + int i, offset, val; + int *ptr32; + struct pci_dev *pdev = adapter->pdev; + + adapter->driver_mismatch = 0; + + ptr32 = (int *)&serial_num; + offset = NX_FW_SERIAL_NUM_OFFSET; + for (i = 0; i < 8; i++) { + if (netxen_rom_fast_read(adapter, offset, &val) == -1) { + dev_err(&pdev->dev, "error reading board info\n"); + adapter->driver_mismatch = 1; + return; + } + ptr32[i] = cpu_to_le32(val); + offset += sizeof(u32); + } + + fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR); + fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR); + fw_build = NXRD32(adapter, NETXEN_FW_VERSION_SUB); + + adapter->fw_version = NETXEN_VERSION_CODE(fw_major, fw_minor, fw_build); + + if (adapter->portnum == 0) { + get_brd_name_by_type(adapter->ahw.board_type, brd_name); + + pr_info("%s: %s Board S/N %s Chip rev 0x%x\n", + module_name(THIS_MODULE), + brd_name, serial_num, adapter->ahw.revision_id); + } + + if (adapter->fw_version < NETXEN_VERSION_CODE(3, 4, 216)) { + adapter->driver_mismatch = 1; + dev_warn(&pdev->dev, "firmware version %d.%d.%d unsupported\n", + fw_major, fw_minor, fw_build); + return; + } + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + i = NXRD32(adapter, NETXEN_SRE_MISC); + adapter->ahw.cut_through = (i & 0x8000) ? 1 : 0; + } + + dev_info(&pdev->dev, "firmware v%d.%d.%d [%s]\n", + fw_major, fw_minor, fw_build, + adapter->ahw.cut_through ? "cut-through" : "legacy"); + + if (adapter->fw_version >= NETXEN_VERSION_CODE(4, 0, 222)) + adapter->capabilities = NXRD32(adapter, CRB_FW_CAPABILITIES_1); + + if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { + adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G; + adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; + } else if (adapter->ahw.port_type == NETXEN_NIC_GBE) { + adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G; + adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G; + } + + adapter->msix_supported = 0; + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + adapter->msix_supported = !!use_msi_x; + adapter->rss_supported = !!use_msi_x; + } else { + u32 flashed_ver = 0; + netxen_rom_fast_read(adapter, + NX_FW_VERSION_OFFSET, (int *)&flashed_ver); + flashed_ver = NETXEN_DECODE_VERSION(flashed_ver); + + if (flashed_ver >= NETXEN_VERSION_CODE(3, 4, 336)) { + switch (adapter->ahw.board_type) { + case NETXEN_BRDTYPE_P2_SB31_10G: + case NETXEN_BRDTYPE_P2_SB31_10G_CX4: + adapter->msix_supported = !!use_msi_x; + adapter->rss_supported = !!use_msi_x; + break; + default: + break; + } + } + } + + adapter->num_txd = MAX_CMD_DESCRIPTORS; + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + adapter->num_lro_rxd = MAX_LRO_RCV_DESCRIPTORS; + adapter->max_rds_rings = 3; + } else { + adapter->num_lro_rxd = 0; + adapter->max_rds_rings = 2; + } +} + +static int +netxen_start_firmware(struct netxen_adapter *adapter) +{ + int val, err, first_boot; + struct pci_dev *pdev = adapter->pdev; + + /* required for NX2031 dummy dma */ + err = nx_set_dma_mask(adapter); + if (err) + return err; + + if (!netxen_can_start_firmware(adapter)) + goto wait_init; + + first_boot = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc)); + + err = netxen_check_hw_init(adapter, first_boot); + if (err) { + dev_err(&pdev->dev, "error in init HW init sequence\n"); + return err; + } + + netxen_request_firmware(adapter); + + err = netxen_need_fw_reset(adapter); + if (err < 0) + goto err_out; + if (err == 0) + goto wait_init; + + if (first_boot != 0x55555555) { + NXWR32(adapter, CRB_CMDPEG_STATE, 0); + netxen_pinit_from_rom(adapter); + msleep(1); + } + + NXWR32(adapter, CRB_DMA_SHIFT, 0x55555555); + NXWR32(adapter, NETXEN_PEG_HALT_STATUS1, 0); + NXWR32(adapter, NETXEN_PEG_HALT_STATUS2, 0); + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + netxen_set_port_mode(adapter); + + err = netxen_load_firmware(adapter); + if (err) + goto err_out; + + netxen_release_firmware(adapter); + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + + /* Initialize multicast addr pool owners */ + val = 0x7654; + if (adapter->ahw.port_type == NETXEN_NIC_XGBE) + val |= 0x0f000000; + NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val); + + } + + err = netxen_init_dummy_dma(adapter); + if (err) + goto err_out; + + /* + * Tell the hardware our version number. + */ + val = (_NETXEN_NIC_LINUX_MAJOR << 16) + | ((_NETXEN_NIC_LINUX_MINOR << 8)) + | (_NETXEN_NIC_LINUX_SUBVERSION); + NXWR32(adapter, CRB_DRIVER_VERSION, val); + +wait_init: + /* Handshake with the card before we register the devices. */ + err = netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE); + if (err) { + netxen_free_dummy_dma(adapter); + goto err_out; + } + + NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_READY); + + nx_update_dma_mask(adapter); + + netxen_check_options(adapter); + + adapter->need_fw_reset = 0; + + /* fall through and release firmware */ + +err_out: + netxen_release_firmware(adapter); + return err; +} + +static int +netxen_nic_request_irq(struct netxen_adapter *adapter) +{ + irq_handler_t handler; + struct nx_host_sds_ring *sds_ring; + int err, ring; + + unsigned long flags = 0; + struct net_device *netdev = adapter->netdev; + struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; + + if (adapter->flags & NETXEN_NIC_MSIX_ENABLED) + handler = netxen_msix_intr; + else if (adapter->flags & NETXEN_NIC_MSI_ENABLED) + handler = netxen_msi_intr; + else { + flags |= IRQF_SHARED; + handler = netxen_intr; + } + adapter->irq = netdev->irq; + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + sprintf(sds_ring->name, "%s[%d]", netdev->name, ring); + err = request_irq(sds_ring->irq, handler, + flags, sds_ring->name, sds_ring); + if (err) + return err; + } + + return 0; +} + +static void +netxen_nic_free_irq(struct netxen_adapter *adapter) +{ + int ring; + struct nx_host_sds_ring *sds_ring; + + struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + free_irq(sds_ring->irq, sds_ring); + } +} + +static void +netxen_nic_init_coalesce_defaults(struct netxen_adapter *adapter) +{ + adapter->coal.flags = NETXEN_NIC_INTR_DEFAULT; + adapter->coal.normal.data.rx_time_us = + NETXEN_DEFAULT_INTR_COALESCE_RX_TIME_US; + adapter->coal.normal.data.rx_packets = + NETXEN_DEFAULT_INTR_COALESCE_RX_PACKETS; + adapter->coal.normal.data.tx_time_us = + NETXEN_DEFAULT_INTR_COALESCE_TX_TIME_US; + adapter->coal.normal.data.tx_packets = + NETXEN_DEFAULT_INTR_COALESCE_TX_PACKETS; +} + +/* with rtnl_lock */ +static int +__netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev) +{ + int err; + + if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) + return -EIO; + + err = adapter->init_port(adapter, adapter->physical_port); + if (err) { + printk(KERN_ERR "%s: Failed to initialize port %d\n", + netxen_nic_driver_name, adapter->portnum); + return err; + } + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + adapter->macaddr_set(adapter, adapter->mac_addr); + + adapter->set_multi(netdev); + adapter->set_mtu(adapter, netdev->mtu); + + adapter->ahw.linkup = 0; + + if (adapter->max_sds_rings > 1) + netxen_config_rss(adapter, 1); + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + netxen_config_intr_coalesce(adapter); + + if (netdev->features & NETIF_F_LRO) + netxen_config_hw_lro(adapter, NETXEN_NIC_LRO_ENABLED); + + netxen_napi_enable(adapter); + + if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION) + netxen_linkevent_request(adapter, 1); + else + netxen_nic_set_link_parameters(adapter); + + set_bit(__NX_DEV_UP, &adapter->state); + return 0; +} + +/* Usage: During resume and firmware recovery module.*/ + +static inline int +netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev) +{ + int err = 0; + + rtnl_lock(); + if (netif_running(netdev)) + err = __netxen_nic_up(adapter, netdev); + rtnl_unlock(); + + return err; +} + +/* with rtnl_lock */ +static void +__netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev) +{ + if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) + return; + + if (!test_and_clear_bit(__NX_DEV_UP, &adapter->state)) + return; + + smp_mb(); + spin_lock(&adapter->tx_clean_lock); + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION) + netxen_linkevent_request(adapter, 0); + + if (adapter->stop_port) + adapter->stop_port(adapter); + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + netxen_p3_free_mac_list(adapter); + + adapter->set_promisc(adapter, NETXEN_NIU_NON_PROMISC_MODE); + + netxen_napi_disable(adapter); + + netxen_release_tx_buffers(adapter); + spin_unlock(&adapter->tx_clean_lock); +} + +/* Usage: During suspend and firmware recovery module */ + +static inline void +netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev) +{ + rtnl_lock(); + if (netif_running(netdev)) + __netxen_nic_down(adapter, netdev); + rtnl_unlock(); + +} + +static int +netxen_nic_attach(struct netxen_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + int err, ring; + struct nx_host_rds_ring *rds_ring; + struct nx_host_tx_ring *tx_ring; + + if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) + return 0; + + err = netxen_init_firmware(adapter); + if (err) + return err; + + err = netxen_napi_add(adapter, netdev); + if (err) + return err; + + err = netxen_alloc_sw_resources(adapter); + if (err) { + printk(KERN_ERR "%s: Error in setting sw resources\n", + netdev->name); + return err; + } + + err = netxen_alloc_hw_resources(adapter); + if (err) { + printk(KERN_ERR "%s: Error in setting hw resources\n", + netdev->name); + goto err_out_free_sw; + } + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + tx_ring = adapter->tx_ring; + tx_ring->crb_cmd_producer = netxen_get_ioaddr(adapter, + crb_cmd_producer[adapter->portnum]); + tx_ring->crb_cmd_consumer = netxen_get_ioaddr(adapter, + crb_cmd_consumer[adapter->portnum]); + + tx_ring->producer = 0; + tx_ring->sw_consumer = 0; + + netxen_nic_update_cmd_producer(adapter, tx_ring); + netxen_nic_update_cmd_consumer(adapter, tx_ring); + } + + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &adapter->recv_ctx.rds_rings[ring]; + netxen_post_rx_buffers(adapter, ring, rds_ring); + } + + err = netxen_nic_request_irq(adapter); + if (err) { + dev_err(&pdev->dev, "%s: failed to setup interrupt\n", + netdev->name); + goto err_out_free_rxbuf; + } + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + netxen_nic_init_coalesce_defaults(adapter); + + netxen_create_sysfs_entries(adapter); + + adapter->is_up = NETXEN_ADAPTER_UP_MAGIC; + return 0; + +err_out_free_rxbuf: + netxen_release_rx_buffers(adapter); + netxen_free_hw_resources(adapter); +err_out_free_sw: + netxen_free_sw_resources(adapter); + return err; +} + +static void +netxen_nic_detach(struct netxen_adapter *adapter) +{ + if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) + return; + + netxen_remove_sysfs_entries(adapter); + + netxen_free_hw_resources(adapter); + netxen_release_rx_buffers(adapter); + netxen_nic_free_irq(adapter); + netxen_napi_del(adapter); + netxen_free_sw_resources(adapter); + + adapter->is_up = 0; +} + +int +netxen_nic_reset_context(struct netxen_adapter *adapter) +{ + int err = 0; + struct net_device *netdev = adapter->netdev; + + if (test_and_set_bit(__NX_RESETTING, &adapter->state)) + return -EBUSY; + + if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) { + + netif_device_detach(netdev); + + if (netif_running(netdev)) + __netxen_nic_down(adapter, netdev); + + netxen_nic_detach(adapter); + + if (netif_running(netdev)) { + err = netxen_nic_attach(adapter); + if (!err) + err = __netxen_nic_up(adapter, netdev); + + if (err) + goto done; + } + + netif_device_attach(netdev); + } + +done: + clear_bit(__NX_RESETTING, &adapter->state); + return err; +} + +static int +netxen_setup_netdev(struct netxen_adapter *adapter, + struct net_device *netdev) +{ + int err = 0; + struct pci_dev *pdev = adapter->pdev; + + adapter->mc_enabled = 0; + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + adapter->max_mc_count = 38; + else + adapter->max_mc_count = 16; + + netdev->netdev_ops = &netxen_netdev_ops; + netdev->watchdog_timeo = 5*HZ; + + netxen_nic_change_mtu(netdev, netdev->mtu); + + SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops); + + netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | + NETIF_F_RXCSUM; + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + netdev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6; + + netdev->vlan_features |= netdev->hw_features; + + if (adapter->pci_using_dac) { + netdev->features |= NETIF_F_HIGHDMA; + netdev->vlan_features |= NETIF_F_HIGHDMA; + } + + if (adapter->capabilities & NX_FW_CAPABILITY_FVLANTX) + netdev->hw_features |= NETIF_F_HW_VLAN_TX; + + if (adapter->capabilities & NX_FW_CAPABILITY_HW_LRO) + netdev->hw_features |= NETIF_F_LRO; + + netdev->features |= netdev->hw_features; + + netdev->irq = adapter->msix_entries[0].vector; + + INIT_WORK(&adapter->tx_timeout_task, netxen_tx_timeout_task); + + if (netxen_read_mac_addr(adapter)) + dev_warn(&pdev->dev, "failed to read mac addr\n"); + + netif_carrier_off(netdev); + + err = register_netdev(netdev); + if (err) { + dev_err(&pdev->dev, "failed to register net device\n"); + return err; + } + + return 0; +} + +#ifdef CONFIG_PCIEAER +static void netxen_mask_aer_correctable(struct netxen_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct pci_dev *root = pdev->bus->self; + u32 aer_pos; + + if (adapter->ahw.board_type != NETXEN_BRDTYPE_P3_4_GB_MM && + adapter->ahw.board_type != NETXEN_BRDTYPE_P3_10G_TP) + return; + + if (root->pcie_type != PCI_EXP_TYPE_ROOT_PORT) + return; + + aer_pos = pci_find_ext_capability(root, PCI_EXT_CAP_ID_ERR); + if (!aer_pos) + return; + + pci_write_config_dword(root, aer_pos + PCI_ERR_COR_MASK, 0xffff); +} +#endif + +static int __devinit +netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev = NULL; + struct netxen_adapter *adapter = NULL; + int i = 0, err; + int pci_func_id = PCI_FUNC(pdev->devfn); + uint8_t revision_id; + u32 val; + + if (pdev->revision >= NX_P3_A0 && pdev->revision <= NX_P3_B1) { + pr_warning("%s: chip revisions between 0x%x-0x%x " + "will not be enabled.\n", + module_name(THIS_MODULE), NX_P3_A0, NX_P3_B1); + return -ENODEV; + } + + if ((err = pci_enable_device(pdev))) + return err; + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + err = -ENODEV; + goto err_out_disable_pdev; + } + + if ((err = pci_request_regions(pdev, netxen_nic_driver_name))) + goto err_out_disable_pdev; + + if (NX_IS_REVISION_P3(pdev->revision)) + pci_enable_pcie_error_reporting(pdev); + + pci_set_master(pdev); + + netdev = alloc_etherdev(sizeof(struct netxen_adapter)); + if(!netdev) { + dev_err(&pdev->dev, "failed to allocate net_device\n"); + err = -ENOMEM; + goto err_out_free_res; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->ahw.pci_func = pci_func_id; + + revision_id = pdev->revision; + adapter->ahw.revision_id = revision_id; + + rwlock_init(&adapter->ahw.crb_lock); + spin_lock_init(&adapter->ahw.mem_lock); + + spin_lock_init(&adapter->tx_clean_lock); + INIT_LIST_HEAD(&adapter->mac_list); + INIT_LIST_HEAD(&adapter->vlan_ip_list); + + err = netxen_setup_pci_map(adapter); + if (err) + goto err_out_free_netdev; + + /* This will be reset for mezz cards */ + adapter->portnum = pci_func_id; + + err = netxen_nic_get_board_info(adapter); + if (err) { + dev_err(&pdev->dev, "Error getting board config info.\n"); + goto err_out_iounmap; + } + +#ifdef CONFIG_PCIEAER + netxen_mask_aer_correctable(adapter); +#endif + + /* Mezz cards have PCI function 0,2,3 enabled */ + switch (adapter->ahw.board_type) { + case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ: + case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ: + if (pci_func_id >= 2) + adapter->portnum = pci_func_id - 2; + break; + default: + break; + } + + err = netxen_check_flash_fw_compatibility(adapter); + if (err) + goto err_out_iounmap; + + if (adapter->portnum == 0) { + val = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); + if (val != 0xffffffff && val != 0) { + NXWR32(adapter, NX_CRB_DEV_REF_COUNT, 0); + adapter->need_fw_reset = 1; + } + } + + err = netxen_start_firmware(adapter); + if (err) + goto err_out_decr_ref; + + /* + * See if the firmware gave us a virtual-physical port mapping. + */ + adapter->physical_port = adapter->portnum; + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + i = NXRD32(adapter, CRB_V2P(adapter->portnum)); + if (i != 0x55555555) + adapter->physical_port = i; + } + + netxen_nic_clear_stats(adapter); + + netxen_setup_intr(adapter); + + err = netxen_setup_netdev(adapter, netdev); + if (err) + goto err_out_disable_msi; + + pci_set_drvdata(pdev, adapter); + + netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY); + + switch (adapter->ahw.port_type) { + case NETXEN_NIC_GBE: + dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n", + adapter->netdev->name); + break; + case NETXEN_NIC_XGBE: + dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n", + adapter->netdev->name); + break; + } + + netxen_create_diag_entries(adapter); + + return 0; + +err_out_disable_msi: + netxen_teardown_intr(adapter); + + netxen_free_dummy_dma(adapter); + +err_out_decr_ref: + nx_decr_dev_ref_cnt(adapter); + +err_out_iounmap: + netxen_cleanup_pci_map(adapter); + +err_out_free_netdev: + free_netdev(netdev); + +err_out_free_res: + pci_release_regions(pdev); + +err_out_disable_pdev: + pci_set_drvdata(pdev, NULL); + pci_disable_device(pdev); + return err; +} + +static void __devexit netxen_nic_remove(struct pci_dev *pdev) +{ + struct netxen_adapter *adapter; + struct net_device *netdev; + + adapter = pci_get_drvdata(pdev); + if (adapter == NULL) + return; + + netdev = adapter->netdev; + + netxen_cancel_fw_work(adapter); + + unregister_netdev(netdev); + + cancel_work_sync(&adapter->tx_timeout_task); + + netxen_free_vlan_ip_list(adapter); + netxen_nic_detach(adapter); + + nx_decr_dev_ref_cnt(adapter); + + if (adapter->portnum == 0) + netxen_free_dummy_dma(adapter); + + clear_bit(__NX_RESETTING, &adapter->state); + + netxen_teardown_intr(adapter); + + netxen_remove_diag_entries(adapter); + + netxen_cleanup_pci_map(adapter); + + netxen_release_firmware(adapter); + + if (NX_IS_REVISION_P3(pdev->revision)) + pci_disable_pcie_error_reporting(pdev); + + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + + free_netdev(netdev); +} + +static void netxen_nic_detach_func(struct netxen_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + netif_device_detach(netdev); + + netxen_cancel_fw_work(adapter); + + if (netif_running(netdev)) + netxen_nic_down(adapter, netdev); + + cancel_work_sync(&adapter->tx_timeout_task); + + netxen_nic_detach(adapter); + + if (adapter->portnum == 0) + netxen_free_dummy_dma(adapter); + + nx_decr_dev_ref_cnt(adapter); + + clear_bit(__NX_RESETTING, &adapter->state); +} + +static int netxen_nic_attach_func(struct pci_dev *pdev) +{ + struct netxen_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + int err; + + err = pci_enable_device(pdev); + if (err) + return err; + + pci_set_power_state(pdev, PCI_D0); + pci_set_master(pdev); + pci_restore_state(pdev); + + adapter->ahw.crb_win = -1; + adapter->ahw.ocm_win = -1; + + err = netxen_start_firmware(adapter); + if (err) { + dev_err(&pdev->dev, "failed to start firmware\n"); + return err; + } + + if (netif_running(netdev)) { + err = netxen_nic_attach(adapter); + if (err) + goto err_out; + + err = netxen_nic_up(adapter, netdev); + if (err) + goto err_out_detach; + + netxen_restore_indev_addr(netdev, NETDEV_UP); + } + + netif_device_attach(netdev); + netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY); + return 0; + +err_out_detach: + netxen_nic_detach(adapter); +err_out: + nx_decr_dev_ref_cnt(adapter); + return err; +} + +static pci_ers_result_t netxen_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct netxen_adapter *adapter = pci_get_drvdata(pdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (nx_dev_request_aer(adapter)) + return PCI_ERS_RESULT_RECOVERED; + + netxen_nic_detach_func(adapter); + + pci_disable_device(pdev); + + return PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t netxen_io_slot_reset(struct pci_dev *pdev) +{ + int err = 0; + + err = netxen_nic_attach_func(pdev); + + return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; +} + +static void netxen_io_resume(struct pci_dev *pdev) +{ + pci_cleanup_aer_uncorrect_error_status(pdev); +} + +static void netxen_nic_shutdown(struct pci_dev *pdev) +{ + struct netxen_adapter *adapter = pci_get_drvdata(pdev); + + netxen_nic_detach_func(adapter); + + if (pci_save_state(pdev)) + return; + + if (netxen_nic_wol_supported(adapter)) { + pci_enable_wake(pdev, PCI_D3cold, 1); + pci_enable_wake(pdev, PCI_D3hot, 1); + } + + pci_disable_device(pdev); +} + +#ifdef CONFIG_PM +static int +netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct netxen_adapter *adapter = pci_get_drvdata(pdev); + int retval; + + netxen_nic_detach_func(adapter); + + retval = pci_save_state(pdev); + if (retval) + return retval; + + if (netxen_nic_wol_supported(adapter)) { + pci_enable_wake(pdev, PCI_D3cold, 1); + pci_enable_wake(pdev, PCI_D3hot, 1); + } + + pci_disable_device(pdev); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + + return 0; +} + +static int +netxen_nic_resume(struct pci_dev *pdev) +{ + return netxen_nic_attach_func(pdev); +} +#endif + +static int netxen_nic_open(struct net_device *netdev) +{ + struct netxen_adapter *adapter = netdev_priv(netdev); + int err = 0; + + if (adapter->driver_mismatch) + return -EIO; + + err = netxen_nic_attach(adapter); + if (err) + return err; + + err = __netxen_nic_up(adapter, netdev); + if (err) + goto err_out; + + netif_start_queue(netdev); + + return 0; + +err_out: + netxen_nic_detach(adapter); + return err; +} + +/* + * netxen_nic_close - Disables a network interface entry point + */ +static int netxen_nic_close(struct net_device *netdev) +{ + struct netxen_adapter *adapter = netdev_priv(netdev); + + __netxen_nic_down(adapter, netdev); + return 0; +} + +static void +netxen_tso_check(struct net_device *netdev, + struct nx_host_tx_ring *tx_ring, + struct cmd_desc_type0 *first_desc, + struct sk_buff *skb) +{ + u8 opcode = TX_ETHER_PKT; + __be16 protocol = skb->protocol; + u16 flags = 0, vid = 0; + u32 producer; + int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0; + struct cmd_desc_type0 *hwdesc; + struct vlan_ethhdr *vh; + + if (protocol == cpu_to_be16(ETH_P_8021Q)) { + + vh = (struct vlan_ethhdr *)skb->data; + protocol = vh->h_vlan_encapsulated_proto; + flags = FLAGS_VLAN_TAGGED; + + } else if (vlan_tx_tag_present(skb)) { + + flags = FLAGS_VLAN_OOB; + vid = vlan_tx_tag_get(skb); + netxen_set_tx_vlan_tci(first_desc, vid); + vlan_oob = 1; + } + + if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) && + skb_shinfo(skb)->gso_size > 0) { + + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + + first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); + first_desc->total_hdr_length = hdr_len; + if (vlan_oob) { + first_desc->total_hdr_length += VLAN_HLEN; + first_desc->tcp_hdr_offset = VLAN_HLEN; + first_desc->ip_hdr_offset = VLAN_HLEN; + /* Only in case of TSO on vlan device */ + flags |= FLAGS_VLAN_TAGGED; + } + + opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ? + TX_TCP_LSO6 : TX_TCP_LSO; + tso = 1; + + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + u8 l4proto; + + if (protocol == cpu_to_be16(ETH_P_IP)) { + l4proto = ip_hdr(skb)->protocol; + + if (l4proto == IPPROTO_TCP) + opcode = TX_TCP_PKT; + else if(l4proto == IPPROTO_UDP) + opcode = TX_UDP_PKT; + } else if (protocol == cpu_to_be16(ETH_P_IPV6)) { + l4proto = ipv6_hdr(skb)->nexthdr; + + if (l4proto == IPPROTO_TCP) + opcode = TX_TCPV6_PKT; + else if(l4proto == IPPROTO_UDP) + opcode = TX_UDPV6_PKT; + } + } + + first_desc->tcp_hdr_offset += skb_transport_offset(skb); + first_desc->ip_hdr_offset += skb_network_offset(skb); + netxen_set_tx_flags_opcode(first_desc, flags, opcode); + + if (!tso) + return; + + /* For LSO, we need to copy the MAC/IP/TCP headers into + * the descriptor ring + */ + producer = tx_ring->producer; + copied = 0; + offset = 2; + + if (vlan_oob) { + /* Create a TSO vlan header template for firmware */ + + hwdesc = &tx_ring->desc_head[producer]; + tx_ring->cmd_buf_arr[producer].skb = NULL; + + copy_len = min((int)sizeof(struct cmd_desc_type0) - offset, + hdr_len + VLAN_HLEN); + + vh = (struct vlan_ethhdr *)((char *)hwdesc + 2); + skb_copy_from_linear_data(skb, vh, 12); + vh->h_vlan_proto = htons(ETH_P_8021Q); + vh->h_vlan_TCI = htons(vid); + skb_copy_from_linear_data_offset(skb, 12, + (char *)vh + 16, copy_len - 16); + + copied = copy_len - VLAN_HLEN; + offset = 0; + + producer = get_next_index(producer, tx_ring->num_desc); + } + + while (copied < hdr_len) { + + copy_len = min((int)sizeof(struct cmd_desc_type0) - offset, + (hdr_len - copied)); + + hwdesc = &tx_ring->desc_head[producer]; + tx_ring->cmd_buf_arr[producer].skb = NULL; + + skb_copy_from_linear_data_offset(skb, copied, + (char *)hwdesc + offset, copy_len); + + copied += copy_len; + offset = 0; + + producer = get_next_index(producer, tx_ring->num_desc); + } + + tx_ring->producer = producer; + barrier(); +} + +static int +netxen_map_tx_skb(struct pci_dev *pdev, + struct sk_buff *skb, struct netxen_cmd_buffer *pbuf) +{ + struct netxen_skb_frag *nf; + struct skb_frag_struct *frag; + int i, nr_frags; + dma_addr_t map; + + nr_frags = skb_shinfo(skb)->nr_frags; + nf = &pbuf->frag_array[0]; + + map = pci_map_single(pdev, skb->data, + skb_headlen(skb), PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(pdev, map)) + goto out_err; + + nf->dma = map; + nf->length = skb_headlen(skb); + + for (i = 0; i < nr_frags; i++) { + frag = &skb_shinfo(skb)->frags[i]; + nf = &pbuf->frag_array[i+1]; + + map = pci_map_page(pdev, frag->page, frag->page_offset, + frag->size, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(pdev, map)) + goto unwind; + + nf->dma = map; + nf->length = frag->size; + } + + return 0; + +unwind: + while (--i >= 0) { + nf = &pbuf->frag_array[i+1]; + pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE); + } + + nf = &pbuf->frag_array[0]; + pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE); + +out_err: + return -ENOMEM; +} + +static inline void +netxen_clear_cmddesc(u64 *desc) +{ + desc[0] = 0ULL; + desc[2] = 0ULL; +} + +static netdev_tx_t +netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct netxen_adapter *adapter = netdev_priv(netdev); + struct nx_host_tx_ring *tx_ring = adapter->tx_ring; + struct netxen_cmd_buffer *pbuf; + struct netxen_skb_frag *buffrag; + struct cmd_desc_type0 *hwdesc, *first_desc; + struct pci_dev *pdev; + int i, k; + int delta = 0; + struct skb_frag_struct *frag; + + u32 producer; + int frag_count, no_of_desc; + u32 num_txd = tx_ring->num_desc; + + frag_count = skb_shinfo(skb)->nr_frags + 1; + + /* 14 frags supported for normal packet and + * 32 frags supported for TSO packet + */ + if (!skb_is_gso(skb) && frag_count > NETXEN_MAX_FRAGS_PER_TX) { + + for (i = 0; i < (frag_count - NETXEN_MAX_FRAGS_PER_TX); i++) { + frag = &skb_shinfo(skb)->frags[i]; + delta += frag->size; + } + + if (!__pskb_pull_tail(skb, delta)) + goto drop_packet; + + frag_count = 1 + skb_shinfo(skb)->nr_frags; + } + /* 4 fragments per cmd des */ + no_of_desc = (frag_count + 3) >> 2; + + if (unlikely(netxen_tx_avail(tx_ring) <= TX_STOP_THRESH)) { + netif_stop_queue(netdev); + smp_mb(); + if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH) + netif_start_queue(netdev); + else + return NETDEV_TX_BUSY; + } + + producer = tx_ring->producer; + pbuf = &tx_ring->cmd_buf_arr[producer]; + + pdev = adapter->pdev; + + if (netxen_map_tx_skb(pdev, skb, pbuf)) + goto drop_packet; + + pbuf->skb = skb; + pbuf->frag_count = frag_count; + + first_desc = hwdesc = &tx_ring->desc_head[producer]; + netxen_clear_cmddesc((u64 *)hwdesc); + + netxen_set_tx_frags_len(first_desc, frag_count, skb->len); + netxen_set_tx_port(first_desc, adapter->portnum); + + for (i = 0; i < frag_count; i++) { + + k = i % 4; + + if ((k == 0) && (i > 0)) { + /* move to next desc.*/ + producer = get_next_index(producer, num_txd); + hwdesc = &tx_ring->desc_head[producer]; + netxen_clear_cmddesc((u64 *)hwdesc); + tx_ring->cmd_buf_arr[producer].skb = NULL; + } + + buffrag = &pbuf->frag_array[i]; + + hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length); + switch (k) { + case 0: + hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma); + break; + case 1: + hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma); + break; + case 2: + hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma); + break; + case 3: + hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma); + break; + } + } + + tx_ring->producer = get_next_index(producer, num_txd); + + netxen_tso_check(netdev, tx_ring, first_desc, skb); + + adapter->stats.txbytes += skb->len; + adapter->stats.xmitcalled++; + + netxen_nic_update_cmd_producer(adapter, tx_ring); + + return NETDEV_TX_OK; + +drop_packet: + adapter->stats.txdropped++; + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +static int netxen_nic_check_temp(struct netxen_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + uint32_t temp, temp_state, temp_val; + int rv = 0; + + temp = NXRD32(adapter, CRB_TEMP_STATE); + + temp_state = nx_get_temp_state(temp); + temp_val = nx_get_temp_val(temp); + + if (temp_state == NX_TEMP_PANIC) { + printk(KERN_ALERT + "%s: Device temperature %d degrees C exceeds" + " maximum allowed. Hardware has been shut down.\n", + netdev->name, temp_val); + rv = 1; + } else if (temp_state == NX_TEMP_WARN) { + if (adapter->temp == NX_TEMP_NORMAL) { + printk(KERN_ALERT + "%s: Device temperature %d degrees C " + "exceeds operating range." + " Immediate action needed.\n", + netdev->name, temp_val); + } + } else { + if (adapter->temp == NX_TEMP_WARN) { + printk(KERN_INFO + "%s: Device temperature is now %d degrees C" + " in normal range.\n", netdev->name, + temp_val); + } + } + adapter->temp = temp_state; + return rv; +} + +void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup) +{ + struct net_device *netdev = adapter->netdev; + + if (adapter->ahw.linkup && !linkup) { + printk(KERN_INFO "%s: %s NIC Link is down\n", + netxen_nic_driver_name, netdev->name); + adapter->ahw.linkup = 0; + if (netif_running(netdev)) { + netif_carrier_off(netdev); + netif_stop_queue(netdev); + } + adapter->link_changed = !adapter->has_link_events; + } else if (!adapter->ahw.linkup && linkup) { + printk(KERN_INFO "%s: %s NIC Link is up\n", + netxen_nic_driver_name, netdev->name); + adapter->ahw.linkup = 1; + if (netif_running(netdev)) { + netif_carrier_on(netdev); + netif_wake_queue(netdev); + } + adapter->link_changed = !adapter->has_link_events; + } +} + +static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter) +{ + u32 val, port, linkup; + + port = adapter->physical_port; + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + val = NXRD32(adapter, CRB_XG_STATE_P3); + val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val); + linkup = (val == XG_LINK_UP_P3); + } else { + val = NXRD32(adapter, CRB_XG_STATE); + val = (val >> port*8) & 0xff; + linkup = (val == XG_LINK_UP); + } + + netxen_advert_link_change(adapter, linkup); +} + +static void netxen_tx_timeout(struct net_device *netdev) +{ + struct netxen_adapter *adapter = netdev_priv(netdev); + + if (test_bit(__NX_RESETTING, &adapter->state)) + return; + + dev_err(&netdev->dev, "transmit timeout, resetting.\n"); + schedule_work(&adapter->tx_timeout_task); +} + +static void netxen_tx_timeout_task(struct work_struct *work) +{ + struct netxen_adapter *adapter = + container_of(work, struct netxen_adapter, tx_timeout_task); + + if (!netif_running(adapter->netdev)) + return; + + if (test_and_set_bit(__NX_RESETTING, &adapter->state)) + return; + + if (++adapter->tx_timeo_cnt >= NX_MAX_TX_TIMEOUTS) + goto request_reset; + + rtnl_lock(); + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + /* try to scrub interrupt */ + netxen_napi_disable(adapter); + + netxen_napi_enable(adapter); + + netif_wake_queue(adapter->netdev); + + clear_bit(__NX_RESETTING, &adapter->state); + } else { + clear_bit(__NX_RESETTING, &adapter->state); + if (netxen_nic_reset_context(adapter)) { + rtnl_unlock(); + goto request_reset; + } + } + adapter->netdev->trans_start = jiffies; + rtnl_unlock(); + return; + +request_reset: + adapter->need_fw_reset = 1; + clear_bit(__NX_RESETTING, &adapter->state); +} + +static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct netxen_adapter *adapter = netdev_priv(netdev); + + stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts; + stats->tx_packets = adapter->stats.xmitfinished; + stats->rx_bytes = adapter->stats.rxbytes; + stats->tx_bytes = adapter->stats.txbytes; + stats->rx_dropped = adapter->stats.rxdropped; + stats->tx_dropped = adapter->stats.txdropped; + + return stats; +} + +static irqreturn_t netxen_intr(int irq, void *data) +{ + struct nx_host_sds_ring *sds_ring = data; + struct netxen_adapter *adapter = sds_ring->adapter; + u32 status = 0; + + status = readl(adapter->isr_int_vec); + + if (!(status & adapter->int_vec_bit)) + return IRQ_NONE; + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + /* check interrupt state machine, to be sure */ + status = readl(adapter->crb_int_state_reg); + if (!ISR_LEGACY_INT_TRIGGERED(status)) + return IRQ_NONE; + + } else { + unsigned long our_int = 0; + + our_int = readl(adapter->crb_int_state_reg); + + /* not our interrupt */ + if (!test_and_clear_bit((7 + adapter->portnum), &our_int)) + return IRQ_NONE; + + /* claim interrupt */ + writel((our_int & 0xffffffff), adapter->crb_int_state_reg); + + /* clear interrupt */ + netxen_nic_disable_int(sds_ring); + } + + writel(0xffffffff, adapter->tgt_status_reg); + /* read twice to ensure write is flushed */ + readl(adapter->isr_int_vec); + readl(adapter->isr_int_vec); + + napi_schedule(&sds_ring->napi); + + return IRQ_HANDLED; +} + +static irqreturn_t netxen_msi_intr(int irq, void *data) +{ + struct nx_host_sds_ring *sds_ring = data; + struct netxen_adapter *adapter = sds_ring->adapter; + + /* clear interrupt */ + writel(0xffffffff, adapter->tgt_status_reg); + + napi_schedule(&sds_ring->napi); + return IRQ_HANDLED; +} + +static irqreturn_t netxen_msix_intr(int irq, void *data) +{ + struct nx_host_sds_ring *sds_ring = data; + + napi_schedule(&sds_ring->napi); + return IRQ_HANDLED; +} + +static int netxen_nic_poll(struct napi_struct *napi, int budget) +{ + struct nx_host_sds_ring *sds_ring = + container_of(napi, struct nx_host_sds_ring, napi); + + struct netxen_adapter *adapter = sds_ring->adapter; + + int tx_complete; + int work_done; + + tx_complete = netxen_process_cmd_ring(adapter); + + work_done = netxen_process_rcv_ring(sds_ring, budget); + + if ((work_done < budget) && tx_complete) { + napi_complete(&sds_ring->napi); + if (test_bit(__NX_DEV_UP, &adapter->state)) + netxen_nic_enable_int(sds_ring); + } + + return work_done; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void netxen_nic_poll_controller(struct net_device *netdev) +{ + int ring; + struct nx_host_sds_ring *sds_ring; + struct netxen_adapter *adapter = netdev_priv(netdev); + struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; + + disable_irq(adapter->irq); + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + netxen_intr(adapter->irq, sds_ring); + } + enable_irq(adapter->irq); +} +#endif + +static int +nx_incr_dev_ref_cnt(struct netxen_adapter *adapter) +{ + int count; + if (netxen_api_lock(adapter)) + return -EIO; + + count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); + + NXWR32(adapter, NX_CRB_DEV_REF_COUNT, ++count); + + netxen_api_unlock(adapter); + return count; +} + +static int +nx_decr_dev_ref_cnt(struct netxen_adapter *adapter) +{ + int count; + if (netxen_api_lock(adapter)) + return -EIO; + + count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); + WARN_ON(count == 0); + + NXWR32(adapter, NX_CRB_DEV_REF_COUNT, --count); + + if (count == 0) + NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_COLD); + + netxen_api_unlock(adapter); + return count; +} + +static int +nx_dev_request_aer(struct netxen_adapter *adapter) +{ + u32 state; + int ret = -EINVAL; + + if (netxen_api_lock(adapter)) + return ret; + + state = NXRD32(adapter, NX_CRB_DEV_STATE); + + if (state == NX_DEV_NEED_AER) + ret = 0; + else if (state == NX_DEV_READY) { + NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_AER); + ret = 0; + } + + netxen_api_unlock(adapter); + return ret; +} + +static int +nx_dev_request_reset(struct netxen_adapter *adapter) +{ + u32 state; + int ret = -EINVAL; + + if (netxen_api_lock(adapter)) + return ret; + + state = NXRD32(adapter, NX_CRB_DEV_STATE); + + if (state == NX_DEV_NEED_RESET) + ret = 0; + else if (state != NX_DEV_INITALIZING && state != NX_DEV_NEED_AER) { + NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_RESET); + ret = 0; + } + + netxen_api_unlock(adapter); + + return ret; +} + +static int +netxen_can_start_firmware(struct netxen_adapter *adapter) +{ + int count; + int can_start = 0; + + if (netxen_api_lock(adapter)) + return 0; + + count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); + + if ((count < 0) || (count >= NX_MAX_PCI_FUNC)) + count = 0; + + if (count == 0) { + can_start = 1; + NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_INITALIZING); + } + + NXWR32(adapter, NX_CRB_DEV_REF_COUNT, ++count); + + netxen_api_unlock(adapter); + + return can_start; +} + +static void +netxen_schedule_work(struct netxen_adapter *adapter, + work_func_t func, int delay) +{ + INIT_DELAYED_WORK(&adapter->fw_work, func); + schedule_delayed_work(&adapter->fw_work, delay); +} + +static void +netxen_cancel_fw_work(struct netxen_adapter *adapter) +{ + while (test_and_set_bit(__NX_RESETTING, &adapter->state)) + msleep(10); + + cancel_delayed_work_sync(&adapter->fw_work); +} + +static void +netxen_attach_work(struct work_struct *work) +{ + struct netxen_adapter *adapter = container_of(work, + struct netxen_adapter, fw_work.work); + struct net_device *netdev = adapter->netdev; + int err = 0; + + if (netif_running(netdev)) { + err = netxen_nic_attach(adapter); + if (err) + goto done; + + err = netxen_nic_up(adapter, netdev); + if (err) { + netxen_nic_detach(adapter); + goto done; + } + + netxen_restore_indev_addr(netdev, NETDEV_UP); + } + + netif_device_attach(netdev); + +done: + adapter->fw_fail_cnt = 0; + clear_bit(__NX_RESETTING, &adapter->state); + netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY); +} + +static void +netxen_fwinit_work(struct work_struct *work) +{ + struct netxen_adapter *adapter = container_of(work, + struct netxen_adapter, fw_work.work); + int dev_state; + + dev_state = NXRD32(adapter, NX_CRB_DEV_STATE); + + switch (dev_state) { + case NX_DEV_COLD: + case NX_DEV_READY: + if (!netxen_start_firmware(adapter)) { + netxen_schedule_work(adapter, netxen_attach_work, 0); + return; + } + break; + + case NX_DEV_NEED_RESET: + case NX_DEV_INITALIZING: + if (++adapter->fw_wait_cnt < FW_POLL_THRESH) { + netxen_schedule_work(adapter, + netxen_fwinit_work, 2 * FW_POLL_DELAY); + return; + } + + case NX_DEV_FAILED: + default: + nx_incr_dev_ref_cnt(adapter); + break; + } + + clear_bit(__NX_RESETTING, &adapter->state); +} + +static void +netxen_detach_work(struct work_struct *work) +{ + struct netxen_adapter *adapter = container_of(work, + struct netxen_adapter, fw_work.work); + struct net_device *netdev = adapter->netdev; + int ref_cnt, delay; + u32 status; + + netif_device_detach(netdev); + + netxen_nic_down(adapter, netdev); + + rtnl_lock(); + netxen_nic_detach(adapter); + rtnl_unlock(); + + status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1); + + if (status & NX_RCODE_FATAL_ERROR) + goto err_ret; + + if (adapter->temp == NX_TEMP_PANIC) + goto err_ret; + + ref_cnt = nx_decr_dev_ref_cnt(adapter); + + if (ref_cnt == -EIO) + goto err_ret; + + delay = (ref_cnt == 0) ? 0 : (2 * FW_POLL_DELAY); + + adapter->fw_wait_cnt = 0; + netxen_schedule_work(adapter, netxen_fwinit_work, delay); + + return; + +err_ret: + clear_bit(__NX_RESETTING, &adapter->state); +} + +static int +netxen_check_health(struct netxen_adapter *adapter) +{ + u32 state, heartbit; + struct net_device *netdev = adapter->netdev; + + state = NXRD32(adapter, NX_CRB_DEV_STATE); + if (state == NX_DEV_NEED_AER) + return 0; + + if (netxen_nic_check_temp(adapter)) + goto detach; + + if (adapter->need_fw_reset) { + if (nx_dev_request_reset(adapter)) + return 0; + goto detach; + } + + /* NX_DEV_NEED_RESET, this state can be marked in two cases + * 1. Tx timeout 2. Fw hang + * Send request to destroy context in case of tx timeout only + * and doesn't required in case of Fw hang + */ + if (state == NX_DEV_NEED_RESET) { + adapter->need_fw_reset = 1; + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + goto detach; + } + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + return 0; + + heartbit = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER); + if (heartbit != adapter->heartbit) { + adapter->heartbit = heartbit; + adapter->fw_fail_cnt = 0; + if (adapter->need_fw_reset) + goto detach; + return 0; + } + + if (++adapter->fw_fail_cnt < FW_FAIL_THRESH) + return 0; + + if (nx_dev_request_reset(adapter)) + return 0; + + clear_bit(__NX_FW_ATTACHED, &adapter->state); + + dev_info(&netdev->dev, "firmware hang detected\n"); + +detach: + if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) && + !test_and_set_bit(__NX_RESETTING, &adapter->state)) + netxen_schedule_work(adapter, netxen_detach_work, 0); + return 1; +} + +static void +netxen_fw_poll_work(struct work_struct *work) +{ + struct netxen_adapter *adapter = container_of(work, + struct netxen_adapter, fw_work.work); + + if (test_bit(__NX_RESETTING, &adapter->state)) + goto reschedule; + + if (test_bit(__NX_DEV_UP, &adapter->state)) { + if (!adapter->has_link_events) { + + netxen_nic_handle_phy_intr(adapter); + + if (adapter->link_changed) + netxen_nic_set_link_parameters(adapter); + } + } + + if (netxen_check_health(adapter)) + return; + +reschedule: + netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY); +} + +static ssize_t +netxen_store_bridged_mode(struct device *dev, + struct device_attribute *attr, const char *buf, size_t len) +{ + struct net_device *net = to_net_dev(dev); + struct netxen_adapter *adapter = netdev_priv(net); + unsigned long new; + int ret = -EINVAL; + + if (!(adapter->capabilities & NX_FW_CAPABILITY_BDG)) + goto err_out; + + if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) + goto err_out; + + if (strict_strtoul(buf, 2, &new)) + goto err_out; + + if (!netxen_config_bridged_mode(adapter, !!new)) + ret = len; + +err_out: + return ret; +} + +static ssize_t +netxen_show_bridged_mode(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *net = to_net_dev(dev); + struct netxen_adapter *adapter; + int bridged_mode = 0; + + adapter = netdev_priv(net); + + if (adapter->capabilities & NX_FW_CAPABILITY_BDG) + bridged_mode = !!(adapter->flags & NETXEN_NIC_BRIDGE_ENABLED); + + return sprintf(buf, "%d\n", bridged_mode); +} + +static struct device_attribute dev_attr_bridged_mode = { + .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)}, + .show = netxen_show_bridged_mode, + .store = netxen_store_bridged_mode, +}; + +static ssize_t +netxen_store_diag_mode(struct device *dev, + struct device_attribute *attr, const char *buf, size_t len) +{ + struct netxen_adapter *adapter = dev_get_drvdata(dev); + unsigned long new; + + if (strict_strtoul(buf, 2, &new)) + return -EINVAL; + + if (!!new != !!(adapter->flags & NETXEN_NIC_DIAG_ENABLED)) + adapter->flags ^= NETXEN_NIC_DIAG_ENABLED; + + return len; +} + +static ssize_t +netxen_show_diag_mode(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct netxen_adapter *adapter = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", + !!(adapter->flags & NETXEN_NIC_DIAG_ENABLED)); +} + +static struct device_attribute dev_attr_diag_mode = { + .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)}, + .show = netxen_show_diag_mode, + .store = netxen_store_diag_mode, +}; + +static int +netxen_sysfs_validate_crb(struct netxen_adapter *adapter, + loff_t offset, size_t size) +{ + size_t crb_size = 4; + + if (!(adapter->flags & NETXEN_NIC_DIAG_ENABLED)) + return -EIO; + + if (offset < NETXEN_PCI_CRBSPACE) { + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + return -EINVAL; + + if (ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM, + NETXEN_PCI_CAMQM_2M_END)) + crb_size = 8; + else + return -EINVAL; + } + + if ((size != crb_size) || (offset & (crb_size-1))) + return -EINVAL; + + return 0; +} + +static ssize_t +netxen_sysfs_read_crb(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct netxen_adapter *adapter = dev_get_drvdata(dev); + u32 data; + u64 qmdata; + int ret; + + ret = netxen_sysfs_validate_crb(adapter, offset, size); + if (ret != 0) + return ret; + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id) && + ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM, + NETXEN_PCI_CAMQM_2M_END)) { + netxen_pci_camqm_read_2M(adapter, offset, &qmdata); + memcpy(buf, &qmdata, size); + } else { + data = NXRD32(adapter, offset); + memcpy(buf, &data, size); + } + + return size; +} + +static ssize_t +netxen_sysfs_write_crb(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct netxen_adapter *adapter = dev_get_drvdata(dev); + u32 data; + u64 qmdata; + int ret; + + ret = netxen_sysfs_validate_crb(adapter, offset, size); + if (ret != 0) + return ret; + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id) && + ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM, + NETXEN_PCI_CAMQM_2M_END)) { + memcpy(&qmdata, buf, size); + netxen_pci_camqm_write_2M(adapter, offset, qmdata); + } else { + memcpy(&data, buf, size); + NXWR32(adapter, offset, data); + } + + return size; +} + +static int +netxen_sysfs_validate_mem(struct netxen_adapter *adapter, + loff_t offset, size_t size) +{ + if (!(adapter->flags & NETXEN_NIC_DIAG_ENABLED)) + return -EIO; + + if ((size != 8) || (offset & 0x7)) + return -EIO; + + return 0; +} + +static ssize_t +netxen_sysfs_read_mem(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct netxen_adapter *adapter = dev_get_drvdata(dev); + u64 data; + int ret; + + ret = netxen_sysfs_validate_mem(adapter, offset, size); + if (ret != 0) + return ret; + + if (adapter->pci_mem_read(adapter, offset, &data)) + return -EIO; + + memcpy(buf, &data, size); + + return size; +} + +static ssize_t netxen_sysfs_write_mem(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct netxen_adapter *adapter = dev_get_drvdata(dev); + u64 data; + int ret; + + ret = netxen_sysfs_validate_mem(adapter, offset, size); + if (ret != 0) + return ret; + + memcpy(&data, buf, size); + + if (adapter->pci_mem_write(adapter, offset, data)) + return -EIO; + + return size; +} + + +static struct bin_attribute bin_attr_crb = { + .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)}, + .size = 0, + .read = netxen_sysfs_read_crb, + .write = netxen_sysfs_write_crb, +}; + +static struct bin_attribute bin_attr_mem = { + .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)}, + .size = 0, + .read = netxen_sysfs_read_mem, + .write = netxen_sysfs_write_mem, +}; + + +static void +netxen_create_sysfs_entries(struct netxen_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct device *dev = &netdev->dev; + + if (adapter->capabilities & NX_FW_CAPABILITY_BDG) { + /* bridged_mode control */ + if (device_create_file(dev, &dev_attr_bridged_mode)) { + dev_warn(&netdev->dev, + "failed to create bridged_mode sysfs entry\n"); + } + } +} + +static void +netxen_remove_sysfs_entries(struct netxen_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct device *dev = &netdev->dev; + + if (adapter->capabilities & NX_FW_CAPABILITY_BDG) + device_remove_file(dev, &dev_attr_bridged_mode); +} + +static void +netxen_create_diag_entries(struct netxen_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct device *dev; + + dev = &pdev->dev; + if (device_create_file(dev, &dev_attr_diag_mode)) + dev_info(dev, "failed to create diag_mode sysfs entry\n"); + if (device_create_bin_file(dev, &bin_attr_crb)) + dev_info(dev, "failed to create crb sysfs entry\n"); + if (device_create_bin_file(dev, &bin_attr_mem)) + dev_info(dev, "failed to create mem sysfs entry\n"); +} + + +static void +netxen_remove_diag_entries(struct netxen_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct device *dev = &pdev->dev; + + device_remove_file(dev, &dev_attr_diag_mode); + device_remove_bin_file(dev, &bin_attr_crb); + device_remove_bin_file(dev, &bin_attr_mem); +} + +#ifdef CONFIG_INET + +#define is_netxen_netdev(dev) (dev->netdev_ops == &netxen_netdev_ops) + +static int +netxen_destip_supported(struct netxen_adapter *adapter) +{ + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + return 0; + + if (adapter->ahw.cut_through) + return 0; + + return 1; +} + +static void +netxen_free_vlan_ip_list(struct netxen_adapter *adapter) +{ + struct nx_vlan_ip_list *cur; + struct list_head *head = &adapter->vlan_ip_list; + + while (!list_empty(head)) { + cur = list_entry(head->next, struct nx_vlan_ip_list, list); + netxen_config_ipaddr(adapter, cur->ip_addr, NX_IP_DOWN); + list_del(&cur->list); + kfree(cur); + } + +} +static void +netxen_list_config_vlan_ip(struct netxen_adapter *adapter, + struct in_ifaddr *ifa, unsigned long event) +{ + struct net_device *dev; + struct nx_vlan_ip_list *cur, *tmp_cur; + struct list_head *head; + + dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL; + + if (dev == NULL) + return; + + if (!is_vlan_dev(dev)) + return; + + switch (event) { + case NX_IP_UP: + list_for_each(head, &adapter->vlan_ip_list) { + cur = list_entry(head, struct nx_vlan_ip_list, list); + + if (cur->ip_addr == ifa->ifa_address) + return; + } + + cur = kzalloc(sizeof(struct nx_vlan_ip_list), GFP_ATOMIC); + if (cur == NULL) { + printk(KERN_ERR "%s: failed to add vlan ip to list\n", + adapter->netdev->name); + return; + } + + cur->ip_addr = ifa->ifa_address; + list_add_tail(&cur->list, &adapter->vlan_ip_list); + break; + case NX_IP_DOWN: + list_for_each_entry_safe(cur, tmp_cur, + &adapter->vlan_ip_list, list) { + if (cur->ip_addr == ifa->ifa_address) { + list_del(&cur->list); + kfree(cur); + break; + } + } + } +} +static void +netxen_config_indev_addr(struct netxen_adapter *adapter, + struct net_device *dev, unsigned long event) +{ + struct in_device *indev; + + if (!netxen_destip_supported(adapter)) + return; + + indev = in_dev_get(dev); + if (!indev) + return; + + for_ifa(indev) { + switch (event) { + case NETDEV_UP: + netxen_config_ipaddr(adapter, + ifa->ifa_address, NX_IP_UP); + netxen_list_config_vlan_ip(adapter, ifa, NX_IP_UP); + break; + case NETDEV_DOWN: + netxen_config_ipaddr(adapter, + ifa->ifa_address, NX_IP_DOWN); + netxen_list_config_vlan_ip(adapter, ifa, NX_IP_DOWN); + break; + default: + break; + } + } endfor_ifa(indev); + + in_dev_put(indev); +} + +static void +netxen_restore_indev_addr(struct net_device *netdev, unsigned long event) + +{ + struct netxen_adapter *adapter = netdev_priv(netdev); + struct nx_vlan_ip_list *pos, *tmp_pos; + unsigned long ip_event; + + ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN; + netxen_config_indev_addr(adapter, netdev, event); + + list_for_each_entry_safe(pos, tmp_pos, &adapter->vlan_ip_list, list) { + netxen_config_ipaddr(adapter, pos->ip_addr, ip_event); + } +} + +static int netxen_netdev_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct netxen_adapter *adapter; + struct net_device *dev = (struct net_device *)ptr; + struct net_device *orig_dev = dev; + +recheck: + if (dev == NULL) + goto done; + + if (dev->priv_flags & IFF_802_1Q_VLAN) { + dev = vlan_dev_real_dev(dev); + goto recheck; + } + + if (!is_netxen_netdev(dev)) + goto done; + + adapter = netdev_priv(dev); + + if (!adapter) + goto done; + + if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) + goto done; + + netxen_config_indev_addr(adapter, orig_dev, event); +done: + return NOTIFY_DONE; +} + +static int +netxen_inetaddr_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct netxen_adapter *adapter; + struct net_device *dev; + + struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; + + dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL; + +recheck: + if (dev == NULL) + goto done; + + if (dev->priv_flags & IFF_802_1Q_VLAN) { + dev = vlan_dev_real_dev(dev); + goto recheck; + } + + if (!is_netxen_netdev(dev)) + goto done; + + adapter = netdev_priv(dev); + + if (!adapter || !netxen_destip_supported(adapter)) + goto done; + + if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) + goto done; + + switch (event) { + case NETDEV_UP: + netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_UP); + netxen_list_config_vlan_ip(adapter, ifa, NX_IP_UP); + break; + case NETDEV_DOWN: + netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_DOWN); + netxen_list_config_vlan_ip(adapter, ifa, NX_IP_DOWN); + break; + default: + break; + } + +done: + return NOTIFY_DONE; +} + +static struct notifier_block netxen_netdev_cb = { + .notifier_call = netxen_netdev_event, +}; + +static struct notifier_block netxen_inetaddr_cb = { + .notifier_call = netxen_inetaddr_event, +}; +#else +static void +netxen_restore_indev_addr(struct net_device *dev, unsigned long event) +{ } +static void +netxen_free_vlan_ip_list(struct netxen_adapter *adapter) +{ } +#endif + +static struct pci_error_handlers netxen_err_handler = { + .error_detected = netxen_io_error_detected, + .slot_reset = netxen_io_slot_reset, + .resume = netxen_io_resume, +}; + +static struct pci_driver netxen_driver = { + .name = netxen_nic_driver_name, + .id_table = netxen_pci_tbl, + .probe = netxen_nic_probe, + .remove = __devexit_p(netxen_nic_remove), +#ifdef CONFIG_PM + .suspend = netxen_nic_suspend, + .resume = netxen_nic_resume, +#endif + .shutdown = netxen_nic_shutdown, + .err_handler = &netxen_err_handler +}; + +static int __init netxen_init_module(void) +{ + printk(KERN_INFO "%s\n", netxen_nic_driver_string); + +#ifdef CONFIG_INET + register_netdevice_notifier(&netxen_netdev_cb); + register_inetaddr_notifier(&netxen_inetaddr_cb); +#endif + return pci_register_driver(&netxen_driver); +} + +module_init(netxen_init_module); + +static void __exit netxen_exit_module(void) +{ + pci_unregister_driver(&netxen_driver); + +#ifdef CONFIG_INET + unregister_inetaddr_notifier(&netxen_inetaddr_cb); + unregister_netdevice_notifier(&netxen_netdev_cb); +#endif +} + +module_exit(netxen_exit_module); diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c new file mode 100644 index 000000000000..ccde8061afa8 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -0,0 +1,3970 @@ +/* + * QLogic QLA3xxx NIC HBA Driver + * Copyright (c) 2003-2006 QLogic Corporation + * + * See LICENSE.qla3xxx for copyright and licensing details. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "qla3xxx.h" + +#define DRV_NAME "qla3xxx" +#define DRV_STRING "QLogic ISP3XXX Network Driver" +#define DRV_VERSION "v2.03.00-k5" + +static const char ql3xxx_driver_name[] = DRV_NAME; +static const char ql3xxx_driver_version[] = DRV_VERSION; + +#define TIMED_OUT_MSG \ +"Timed out waiting for management port to get free before issuing command\n" + +MODULE_AUTHOR("QLogic Corporation"); +MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " "); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +static const u32 default_msg + = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK + | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN; + +static int debug = -1; /* defaults above */ +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +static int msi; +module_param(msi, int, 0); +MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts."); + +static DEFINE_PCI_DEVICE_TABLE(ql3xxx_pci_tbl) = { + {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)}, + {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)}, + /* required last entry */ + {0,} +}; + +MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl); + +/* + * These are the known PHY's which are used + */ +enum PHY_DEVICE_TYPE { + PHY_TYPE_UNKNOWN = 0, + PHY_VITESSE_VSC8211, + PHY_AGERE_ET1011C, + MAX_PHY_DEV_TYPES +}; + +struct PHY_DEVICE_INFO { + const enum PHY_DEVICE_TYPE phyDevice; + const u32 phyIdOUI; + const u16 phyIdModel; + const char *name; +}; + +static const struct PHY_DEVICE_INFO PHY_DEVICES[] = { + {PHY_TYPE_UNKNOWN, 0x000000, 0x0, "PHY_TYPE_UNKNOWN"}, + {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"}, + {PHY_AGERE_ET1011C, 0x00a0bc, 0x1, "PHY_AGERE_ET1011C"}, +}; + + +/* + * Caller must take hw_lock. + */ +static int ql_sem_spinlock(struct ql3_adapter *qdev, + u32 sem_mask, u32 sem_bits) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 value; + unsigned int seconds = 3; + + do { + writel((sem_mask | sem_bits), + &port_regs->CommonRegs.semaphoreReg); + value = readl(&port_regs->CommonRegs.semaphoreReg); + if ((value & (sem_mask >> 16)) == sem_bits) + return 0; + ssleep(1); + } while (--seconds); + return -1; +} + +static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + writel(sem_mask, &port_regs->CommonRegs.semaphoreReg); + readl(&port_regs->CommonRegs.semaphoreReg); +} + +static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 value; + + writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg); + value = readl(&port_regs->CommonRegs.semaphoreReg); + return ((value & (sem_mask >> 16)) == sem_bits); +} + +/* + * Caller holds hw_lock. + */ +static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev) +{ + int i = 0; + + while (i < 10) { + if (i) + ssleep(1); + + if (ql_sem_lock(qdev, + QL_DRVR_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) + * 2) << 1)) { + netdev_printk(KERN_DEBUG, qdev->ndev, + "driver lock acquired\n"); + return 1; + } + } + + netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n"); + return 0; +} + +static void ql_set_register_page(struct ql3_adapter *qdev, u32 page) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + + writel(((ISP_CONTROL_NP_MASK << 16) | page), + &port_regs->CommonRegs.ispControlStatus); + readl(&port_regs->CommonRegs.ispControlStatus); + qdev->current_page = page; +} + +static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) +{ + u32 value; + unsigned long hw_flags; + + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + value = readl(reg); + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + + return value; +} + +static u32 ql_read_common_reg(struct ql3_adapter *qdev, u32 __iomem *reg) +{ + return readl(reg); +} + +static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) +{ + u32 value; + unsigned long hw_flags; + + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + + if (qdev->current_page != 0) + ql_set_register_page(qdev, 0); + value = readl(reg); + + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + return value; +} + +static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg) +{ + if (qdev->current_page != 0) + ql_set_register_page(qdev, 0); + return readl(reg); +} + +static void ql_write_common_reg_l(struct ql3_adapter *qdev, + u32 __iomem *reg, u32 value) +{ + unsigned long hw_flags; + + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + writel(value, reg); + readl(reg); + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); +} + +static void ql_write_common_reg(struct ql3_adapter *qdev, + u32 __iomem *reg, u32 value) +{ + writel(value, reg); + readl(reg); +} + +static void ql_write_nvram_reg(struct ql3_adapter *qdev, + u32 __iomem *reg, u32 value) +{ + writel(value, reg); + readl(reg); + udelay(1); +} + +static void ql_write_page0_reg(struct ql3_adapter *qdev, + u32 __iomem *reg, u32 value) +{ + if (qdev->current_page != 0) + ql_set_register_page(qdev, 0); + writel(value, reg); + readl(reg); +} + +/* + * Caller holds hw_lock. Only called during init. + */ +static void ql_write_page1_reg(struct ql3_adapter *qdev, + u32 __iomem *reg, u32 value) +{ + if (qdev->current_page != 1) + ql_set_register_page(qdev, 1); + writel(value, reg); + readl(reg); +} + +/* + * Caller holds hw_lock. Only called during init. + */ +static void ql_write_page2_reg(struct ql3_adapter *qdev, + u32 __iomem *reg, u32 value) +{ + if (qdev->current_page != 2) + ql_set_register_page(qdev, 2); + writel(value, reg); + readl(reg); +} + +static void ql_disable_interrupts(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + + ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, + (ISP_IMR_ENABLE_INT << 16)); + +} + +static void ql_enable_interrupts(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + + ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, + ((0xff << 16) | ISP_IMR_ENABLE_INT)); + +} + +static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev, + struct ql_rcv_buf_cb *lrg_buf_cb) +{ + dma_addr_t map; + int err; + lrg_buf_cb->next = NULL; + + if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */ + qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb; + } else { + qdev->lrg_buf_free_tail->next = lrg_buf_cb; + qdev->lrg_buf_free_tail = lrg_buf_cb; + } + + if (!lrg_buf_cb->skb) { + lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev, + qdev->lrg_buffer_len); + if (unlikely(!lrg_buf_cb->skb)) { + netdev_err(qdev->ndev, "failed netdev_alloc_skb()\n"); + qdev->lrg_buf_skb_check++; + } else { + /* + * We save some space to copy the ethhdr from first + * buffer + */ + skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE); + map = pci_map_single(qdev->pdev, + lrg_buf_cb->skb->data, + qdev->lrg_buffer_len - + QL_HEADER_SPACE, + PCI_DMA_FROMDEVICE); + err = pci_dma_mapping_error(qdev->pdev, map); + if (err) { + netdev_err(qdev->ndev, + "PCI mapping failed with error: %d\n", + err); + dev_kfree_skb(lrg_buf_cb->skb); + lrg_buf_cb->skb = NULL; + + qdev->lrg_buf_skb_check++; + return; + } + + lrg_buf_cb->buf_phy_addr_low = + cpu_to_le32(LS_64BITS(map)); + lrg_buf_cb->buf_phy_addr_high = + cpu_to_le32(MS_64BITS(map)); + dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); + dma_unmap_len_set(lrg_buf_cb, maplen, + qdev->lrg_buffer_len - + QL_HEADER_SPACE); + } + } + + qdev->lrg_buf_free_count++; +} + +static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter + *qdev) +{ + struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; + + if (lrg_buf_cb != NULL) { + qdev->lrg_buf_free_head = lrg_buf_cb->next; + if (qdev->lrg_buf_free_head == NULL) + qdev->lrg_buf_free_tail = NULL; + qdev->lrg_buf_free_count--; + } + + return lrg_buf_cb; +} + +static u32 addrBits = EEPROM_NO_ADDR_BITS; +static u32 dataBits = EEPROM_NO_DATA_BITS; + +static void fm93c56a_deselect(struct ql3_adapter *qdev); +static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr, + unsigned short *value); + +/* + * Caller holds hw_lock. + */ +static void fm93c56a_select(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; + + qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1; + ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); + ql_write_nvram_reg(qdev, spir, + ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data)); +} + +/* + * Caller holds hw_lock. + */ +static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr) +{ + int i; + u32 mask; + u32 dataBit; + u32 previousBit; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; + + /* Clock in a zero, then do the start bit */ + ql_write_nvram_reg(qdev, spir, + (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | + AUBURN_EEPROM_DO_1)); + ql_write_nvram_reg(qdev, spir, + (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | + AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_RISE)); + ql_write_nvram_reg(qdev, spir, + (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | + AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_FALL)); + + mask = 1 << (FM93C56A_CMD_BITS - 1); + /* Force the previous data bit to be different */ + previousBit = 0xffff; + for (i = 0; i < FM93C56A_CMD_BITS; i++) { + dataBit = (cmd & mask) + ? AUBURN_EEPROM_DO_1 + : AUBURN_EEPROM_DO_0; + if (previousBit != dataBit) { + /* If the bit changed, change the DO state to match */ + ql_write_nvram_reg(qdev, spir, + (ISP_NVRAM_MASK | + qdev->eeprom_cmd_data | dataBit)); + previousBit = dataBit; + } + ql_write_nvram_reg(qdev, spir, + (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | + dataBit | AUBURN_EEPROM_CLK_RISE)); + ql_write_nvram_reg(qdev, spir, + (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | + dataBit | AUBURN_EEPROM_CLK_FALL)); + cmd = cmd << 1; + } + + mask = 1 << (addrBits - 1); + /* Force the previous data bit to be different */ + previousBit = 0xffff; + for (i = 0; i < addrBits; i++) { + dataBit = (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 + : AUBURN_EEPROM_DO_0; + if (previousBit != dataBit) { + /* + * If the bit changed, then change the DO state to + * match + */ + ql_write_nvram_reg(qdev, spir, + (ISP_NVRAM_MASK | + qdev->eeprom_cmd_data | dataBit)); + previousBit = dataBit; + } + ql_write_nvram_reg(qdev, spir, + (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | + dataBit | AUBURN_EEPROM_CLK_RISE)); + ql_write_nvram_reg(qdev, spir, + (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | + dataBit | AUBURN_EEPROM_CLK_FALL)); + eepromAddr = eepromAddr << 1; + } +} + +/* + * Caller holds hw_lock. + */ +static void fm93c56a_deselect(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; + + qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0; + ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); +} + +/* + * Caller holds hw_lock. + */ +static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value) +{ + int i; + u32 data = 0; + u32 dataBit; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; + + /* Read the data bits */ + /* The first bit is a dummy. Clock right over it. */ + for (i = 0; i < dataBits; i++) { + ql_write_nvram_reg(qdev, spir, + ISP_NVRAM_MASK | qdev->eeprom_cmd_data | + AUBURN_EEPROM_CLK_RISE); + ql_write_nvram_reg(qdev, spir, + ISP_NVRAM_MASK | qdev->eeprom_cmd_data | + AUBURN_EEPROM_CLK_FALL); + dataBit = (ql_read_common_reg(qdev, spir) & + AUBURN_EEPROM_DI_1) ? 1 : 0; + data = (data << 1) | dataBit; + } + *value = (u16)data; +} + +/* + * Caller holds hw_lock. + */ +static void eeprom_readword(struct ql3_adapter *qdev, + u32 eepromAddr, unsigned short *value) +{ + fm93c56a_select(qdev); + fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr); + fm93c56a_datain(qdev, value); + fm93c56a_deselect(qdev); +} + +static void ql_set_mac_addr(struct net_device *ndev, u16 *addr) +{ + __le16 *p = (__le16 *)ndev->dev_addr; + p[0] = cpu_to_le16(addr[0]); + p[1] = cpu_to_le16(addr[1]); + p[2] = cpu_to_le16(addr[2]); +} + +static int ql_get_nvram_params(struct ql3_adapter *qdev) +{ + u16 *pEEPROMData; + u16 checksum = 0; + u32 index; + unsigned long hw_flags; + + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + + pEEPROMData = (u16 *)&qdev->nvram_data; + qdev->eeprom_cmd_data = 0; + if (ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * + 2) << 10)) { + pr_err("%s: Failed ql_sem_spinlock()\n", __func__); + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + return -1; + } + + for (index = 0; index < EEPROM_SIZE; index++) { + eeprom_readword(qdev, index, pEEPROMData); + checksum += *pEEPROMData; + pEEPROMData++; + } + ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK); + + if (checksum != 0) { + netdev_err(qdev->ndev, "checksum should be zero, is %x!!\n", + checksum); + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + return -1; + } + + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + return checksum; +} + +static const u32 PHYAddr[2] = { + PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS +}; + +static int ql_wait_for_mii_ready(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 temp; + int count = 1000; + + while (count) { + temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg); + if (!(temp & MAC_MII_STATUS_BSY)) + return 0; + udelay(10); + count--; + } + return -1; +} + +static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 scanControl; + + if (qdev->numPorts > 1) { + /* Auto scan will cycle through multiple ports */ + scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC; + } else { + scanControl = MAC_MII_CONTROL_SC; + } + + /* + * Scan register 1 of PHY/PETBI, + * Set up to scan both devices + * The autoscan starts from the first register, completes + * the last one before rolling over to the first + */ + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, + PHYAddr[0] | MII_SCAN_REGISTER); + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, + (scanControl) | + ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16)); +} + +static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev) +{ + u8 ret; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + + /* See if scan mode is enabled before we turn it off */ + if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) & + (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) { + /* Scan is enabled */ + ret = 1; + } else { + /* Scan is disabled */ + ret = 0; + } + + /* + * When disabling scan mode you must first change the MII register + * address + */ + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, + PHYAddr[0] | MII_SCAN_REGISTER); + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, + ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS | + MAC_MII_CONTROL_RC) << 16)); + + return ret; +} + +static int ql_mii_write_reg_ex(struct ql3_adapter *qdev, + u16 regAddr, u16 value, u32 phyAddr) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u8 scanWasEnabled; + + scanWasEnabled = ql_mii_disable_scan_mode(qdev); + + if (ql_wait_for_mii_ready(qdev)) { + netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); + return -1; + } + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, + phyAddr | regAddr); + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); + + /* Wait for write to complete 9/10/04 SJP */ + if (ql_wait_for_mii_ready(qdev)) { + netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); + return -1; + } + + if (scanWasEnabled) + ql_mii_enable_scan_mode(qdev); + + return 0; +} + +static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr, + u16 *value, u32 phyAddr) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u8 scanWasEnabled; + u32 temp; + + scanWasEnabled = ql_mii_disable_scan_mode(qdev); + + if (ql_wait_for_mii_ready(qdev)) { + netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); + return -1; + } + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, + phyAddr | regAddr); + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, + (MAC_MII_CONTROL_RC << 16)); + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, + (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC); + + /* Wait for the read to complete */ + if (ql_wait_for_mii_ready(qdev)) { + netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); + return -1; + } + + temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg); + *value = (u16) temp; + + if (scanWasEnabled) + ql_mii_enable_scan_mode(qdev); + + return 0; +} + +static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + + ql_mii_disable_scan_mode(qdev); + + if (ql_wait_for_mii_ready(qdev)) { + netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); + return -1; + } + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, + qdev->PHYAddr | regAddr); + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); + + /* Wait for write to complete. */ + if (ql_wait_for_mii_ready(qdev)) { + netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); + return -1; + } + + ql_mii_enable_scan_mode(qdev); + + return 0; +} + +static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value) +{ + u32 temp; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + + ql_mii_disable_scan_mode(qdev); + + if (ql_wait_for_mii_ready(qdev)) { + netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); + return -1; + } + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, + qdev->PHYAddr | regAddr); + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, + (MAC_MII_CONTROL_RC << 16)); + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, + (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC); + + /* Wait for the read to complete */ + if (ql_wait_for_mii_ready(qdev)) { + netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); + return -1; + } + + temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg); + *value = (u16) temp; + + ql_mii_enable_scan_mode(qdev); + + return 0; +} + +static void ql_petbi_reset(struct ql3_adapter *qdev) +{ + ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET); +} + +static void ql_petbi_start_neg(struct ql3_adapter *qdev) +{ + u16 reg; + + /* Enable Auto-negotiation sense */ + ql_mii_read_reg(qdev, PETBI_TBI_CTRL, ®); + reg |= PETBI_TBI_AUTO_SENSE; + ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg); + + ql_mii_write_reg(qdev, PETBI_NEG_ADVER, + PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX); + + ql_mii_write_reg(qdev, PETBI_CONTROL_REG, + PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG | + PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000); + +} + +static void ql_petbi_reset_ex(struct ql3_adapter *qdev) +{ + ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET, + PHYAddr[qdev->mac_index]); +} + +static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev) +{ + u16 reg; + + /* Enable Auto-negotiation sense */ + ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, ®, + PHYAddr[qdev->mac_index]); + reg |= PETBI_TBI_AUTO_SENSE; + ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg, + PHYAddr[qdev->mac_index]); + + ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER, + PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX, + PHYAddr[qdev->mac_index]); + + ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, + PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG | + PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000, + PHYAddr[qdev->mac_index]); +} + +static void ql_petbi_init(struct ql3_adapter *qdev) +{ + ql_petbi_reset(qdev); + ql_petbi_start_neg(qdev); +} + +static void ql_petbi_init_ex(struct ql3_adapter *qdev) +{ + ql_petbi_reset_ex(qdev); + ql_petbi_start_neg_ex(qdev); +} + +static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev) +{ + u16 reg; + + if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, ®) < 0) + return 0; + + return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE; +} + +static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr) +{ + netdev_info(qdev->ndev, "enabling Agere specific PHY\n"); + /* power down device bit 11 = 1 */ + ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr); + /* enable diagnostic mode bit 2 = 1 */ + ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr); + /* 1000MB amplitude adjust (see Agere errata) */ + ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr); + /* 1000MB amplitude adjust (see Agere errata) */ + ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr); + /* 100MB amplitude adjust (see Agere errata) */ + ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr); + /* 100MB amplitude adjust (see Agere errata) */ + ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr); + /* 10MB amplitude adjust (see Agere errata) */ + ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr); + /* 10MB amplitude adjust (see Agere errata) */ + ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr); + /* point to hidden reg 0x2806 */ + ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr); + /* Write new PHYAD w/bit 5 set */ + ql_mii_write_reg_ex(qdev, 0x11, + 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr); + /* + * Disable diagnostic mode bit 2 = 0 + * Power up device bit 11 = 0 + * Link up (on) and activity (blink) + */ + ql_mii_write_reg(qdev, 0x12, 0x840a); + ql_mii_write_reg(qdev, 0x00, 0x1140); + ql_mii_write_reg(qdev, 0x1c, 0xfaf0); +} + +static enum PHY_DEVICE_TYPE getPhyType(struct ql3_adapter *qdev, + u16 phyIdReg0, u16 phyIdReg1) +{ + enum PHY_DEVICE_TYPE result = PHY_TYPE_UNKNOWN; + u32 oui; + u16 model; + int i; + + if (phyIdReg0 == 0xffff) + return result; + + if (phyIdReg1 == 0xffff) + return result; + + /* oui is split between two registers */ + oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10); + + model = (phyIdReg1 & PHY_MODEL_MASK) >> 4; + + /* Scan table for this PHY */ + for (i = 0; i < MAX_PHY_DEV_TYPES; i++) { + if ((oui == PHY_DEVICES[i].phyIdOUI) && + (model == PHY_DEVICES[i].phyIdModel)) { + netdev_info(qdev->ndev, "Phy: %s\n", + PHY_DEVICES[i].name); + result = PHY_DEVICES[i].phyDevice; + break; + } + } + + return result; +} + +static int ql_phy_get_speed(struct ql3_adapter *qdev) +{ + u16 reg; + + switch (qdev->phyType) { + case PHY_AGERE_ET1011C: { + if (ql_mii_read_reg(qdev, 0x1A, ®) < 0) + return 0; + + reg = (reg >> 8) & 3; + break; + } + default: + if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) + return 0; + + reg = (((reg & 0x18) >> 3) & 3); + } + + switch (reg) { + case 2: + return SPEED_1000; + case 1: + return SPEED_100; + case 0: + return SPEED_10; + default: + return -1; + } +} + +static int ql_is_full_dup(struct ql3_adapter *qdev) +{ + u16 reg; + + switch (qdev->phyType) { + case PHY_AGERE_ET1011C: { + if (ql_mii_read_reg(qdev, 0x1A, ®)) + return 0; + + return ((reg & 0x0080) && (reg & 0x1000)) != 0; + } + case PHY_VITESSE_VSC8211: + default: { + if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) + return 0; + return (reg & PHY_AUX_DUPLEX_STAT) != 0; + } + } +} + +static int ql_is_phy_neg_pause(struct ql3_adapter *qdev) +{ + u16 reg; + + if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, ®) < 0) + return 0; + + return (reg & PHY_NEG_PAUSE) != 0; +} + +static int PHY_Setup(struct ql3_adapter *qdev) +{ + u16 reg1; + u16 reg2; + bool agereAddrChangeNeeded = false; + u32 miiAddr = 0; + int err; + + /* Determine the PHY we are using by reading the ID's */ + err = ql_mii_read_reg(qdev, PHY_ID_0_REG, ®1); + if (err != 0) { + netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG\n"); + return err; + } + + err = ql_mii_read_reg(qdev, PHY_ID_1_REG, ®2); + if (err != 0) { + netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG\n"); + return err; + } + + /* Check if we have a Agere PHY */ + if ((reg1 == 0xffff) || (reg2 == 0xffff)) { + + /* Determine which MII address we should be using + determined by the index of the card */ + if (qdev->mac_index == 0) + miiAddr = MII_AGERE_ADDR_1; + else + miiAddr = MII_AGERE_ADDR_2; + + err = ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, ®1, miiAddr); + if (err != 0) { + netdev_err(qdev->ndev, + "Could not read from reg PHY_ID_0_REG after Agere detected\n"); + return err; + } + + err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, ®2, miiAddr); + if (err != 0) { + netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG after Agere detected\n"); + return err; + } + + /* We need to remember to initialize the Agere PHY */ + agereAddrChangeNeeded = true; + } + + /* Determine the particular PHY we have on board to apply + PHY specific initializations */ + qdev->phyType = getPhyType(qdev, reg1, reg2); + + if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) { + /* need this here so address gets changed */ + phyAgereSpecificInit(qdev, miiAddr); + } else if (qdev->phyType == PHY_TYPE_UNKNOWN) { + netdev_err(qdev->ndev, "PHY is unknown\n"); + return -EIO; + } + + return 0; +} + +/* + * Caller holds hw_lock. + */ +static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 value; + + if (enable) + value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16)); + else + value = (MAC_CONFIG_REG_PE << 16); + + if (qdev->mac_index) + ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); + else + ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); +} + +/* + * Caller holds hw_lock. + */ +static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 value; + + if (enable) + value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16)); + else + value = (MAC_CONFIG_REG_SR << 16); + + if (qdev->mac_index) + ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); + else + ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); +} + +/* + * Caller holds hw_lock. + */ +static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 value; + + if (enable) + value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16)); + else + value = (MAC_CONFIG_REG_GM << 16); + + if (qdev->mac_index) + ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); + else + ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); +} + +/* + * Caller holds hw_lock. + */ +static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 value; + + if (enable) + value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16)); + else + value = (MAC_CONFIG_REG_FD << 16); + + if (qdev->mac_index) + ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); + else + ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); +} + +/* + * Caller holds hw_lock. + */ +static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 value; + + if (enable) + value = + ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) | + ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16)); + else + value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16); + + if (qdev->mac_index) + ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); + else + ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); +} + +/* + * Caller holds hw_lock. + */ +static int ql_is_fiber(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 bitToCheck = 0; + u32 temp; + + switch (qdev->mac_index) { + case 0: + bitToCheck = PORT_STATUS_SM0; + break; + case 1: + bitToCheck = PORT_STATUS_SM1; + break; + } + + temp = ql_read_page0_reg(qdev, &port_regs->portStatus); + return (temp & bitToCheck) != 0; +} + +static int ql_is_auto_cfg(struct ql3_adapter *qdev) +{ + u16 reg; + ql_mii_read_reg(qdev, 0x00, ®); + return (reg & 0x1000) != 0; +} + +/* + * Caller holds hw_lock. + */ +static int ql_is_auto_neg_complete(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 bitToCheck = 0; + u32 temp; + + switch (qdev->mac_index) { + case 0: + bitToCheck = PORT_STATUS_AC0; + break; + case 1: + bitToCheck = PORT_STATUS_AC1; + break; + } + + temp = ql_read_page0_reg(qdev, &port_regs->portStatus); + if (temp & bitToCheck) { + netif_info(qdev, link, qdev->ndev, "Auto-Negotiate complete\n"); + return 1; + } + netif_info(qdev, link, qdev->ndev, "Auto-Negotiate incomplete\n"); + return 0; +} + +/* + * ql_is_neg_pause() returns 1 if pause was negotiated to be on + */ +static int ql_is_neg_pause(struct ql3_adapter *qdev) +{ + if (ql_is_fiber(qdev)) + return ql_is_petbi_neg_pause(qdev); + else + return ql_is_phy_neg_pause(qdev); +} + +static int ql_auto_neg_error(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 bitToCheck = 0; + u32 temp; + + switch (qdev->mac_index) { + case 0: + bitToCheck = PORT_STATUS_AE0; + break; + case 1: + bitToCheck = PORT_STATUS_AE1; + break; + } + temp = ql_read_page0_reg(qdev, &port_regs->portStatus); + return (temp & bitToCheck) != 0; +} + +static u32 ql_get_link_speed(struct ql3_adapter *qdev) +{ + if (ql_is_fiber(qdev)) + return SPEED_1000; + else + return ql_phy_get_speed(qdev); +} + +static int ql_is_link_full_dup(struct ql3_adapter *qdev) +{ + if (ql_is_fiber(qdev)) + return 1; + else + return ql_is_full_dup(qdev); +} + +/* + * Caller holds hw_lock. + */ +static int ql_link_down_detect(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 bitToCheck = 0; + u32 temp; + + switch (qdev->mac_index) { + case 0: + bitToCheck = ISP_CONTROL_LINK_DN_0; + break; + case 1: + bitToCheck = ISP_CONTROL_LINK_DN_1; + break; + } + + temp = + ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); + return (temp & bitToCheck) != 0; +} + +/* + * Caller holds hw_lock. + */ +static int ql_link_down_detect_clear(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + + switch (qdev->mac_index) { + case 0: + ql_write_common_reg(qdev, + &port_regs->CommonRegs.ispControlStatus, + (ISP_CONTROL_LINK_DN_0) | + (ISP_CONTROL_LINK_DN_0 << 16)); + break; + + case 1: + ql_write_common_reg(qdev, + &port_regs->CommonRegs.ispControlStatus, + (ISP_CONTROL_LINK_DN_1) | + (ISP_CONTROL_LINK_DN_1 << 16)); + break; + + default: + return 1; + } + + return 0; +} + +/* + * Caller holds hw_lock. + */ +static int ql_this_adapter_controls_port(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 bitToCheck = 0; + u32 temp; + + switch (qdev->mac_index) { + case 0: + bitToCheck = PORT_STATUS_F1_ENABLED; + break; + case 1: + bitToCheck = PORT_STATUS_F3_ENABLED; + break; + default: + break; + } + + temp = ql_read_page0_reg(qdev, &port_regs->portStatus); + if (temp & bitToCheck) { + netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, + "not link master\n"); + return 0; + } + + netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "link master\n"); + return 1; +} + +static void ql_phy_reset_ex(struct ql3_adapter *qdev) +{ + ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET, + PHYAddr[qdev->mac_index]); +} + +static void ql_phy_start_neg_ex(struct ql3_adapter *qdev) +{ + u16 reg; + u16 portConfiguration; + + if (qdev->phyType == PHY_AGERE_ET1011C) + ql_mii_write_reg(qdev, 0x13, 0x0000); + /* turn off external loopback */ + + if (qdev->mac_index == 0) + portConfiguration = + qdev->nvram_data.macCfg_port0.portConfiguration; + else + portConfiguration = + qdev->nvram_data.macCfg_port1.portConfiguration; + + /* Some HBA's in the field are set to 0 and they need to + be reinterpreted with a default value */ + if (portConfiguration == 0) + portConfiguration = PORT_CONFIG_DEFAULT; + + /* Set the 1000 advertisements */ + ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, ®, + PHYAddr[qdev->mac_index]); + reg &= ~PHY_GIG_ALL_PARAMS; + + if (portConfiguration & PORT_CONFIG_1000MB_SPEED) { + if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) + reg |= PHY_GIG_ADV_1000F; + else + reg |= PHY_GIG_ADV_1000H; + } + + ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg, + PHYAddr[qdev->mac_index]); + + /* Set the 10/100 & pause negotiation advertisements */ + ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, ®, + PHYAddr[qdev->mac_index]); + reg &= ~PHY_NEG_ALL_PARAMS; + + if (portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED) + reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE; + + if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) { + if (portConfiguration & PORT_CONFIG_100MB_SPEED) + reg |= PHY_NEG_ADV_100F; + + if (portConfiguration & PORT_CONFIG_10MB_SPEED) + reg |= PHY_NEG_ADV_10F; + } + + if (portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) { + if (portConfiguration & PORT_CONFIG_100MB_SPEED) + reg |= PHY_NEG_ADV_100H; + + if (portConfiguration & PORT_CONFIG_10MB_SPEED) + reg |= PHY_NEG_ADV_10H; + } + + if (portConfiguration & PORT_CONFIG_1000MB_SPEED) + reg |= 1; + + ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg, + PHYAddr[qdev->mac_index]); + + ql_mii_read_reg_ex(qdev, CONTROL_REG, ®, PHYAddr[qdev->mac_index]); + + ql_mii_write_reg_ex(qdev, CONTROL_REG, + reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG, + PHYAddr[qdev->mac_index]); +} + +static void ql_phy_init_ex(struct ql3_adapter *qdev) +{ + ql_phy_reset_ex(qdev); + PHY_Setup(qdev); + ql_phy_start_neg_ex(qdev); +} + +/* + * Caller holds hw_lock. + */ +static u32 ql_get_link_state(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 bitToCheck = 0; + u32 temp, linkState; + + switch (qdev->mac_index) { + case 0: + bitToCheck = PORT_STATUS_UP0; + break; + case 1: + bitToCheck = PORT_STATUS_UP1; + break; + } + + temp = ql_read_page0_reg(qdev, &port_regs->portStatus); + if (temp & bitToCheck) + linkState = LS_UP; + else + linkState = LS_DOWN; + + return linkState; +} + +static int ql_port_start(struct ql3_adapter *qdev) +{ + if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * + 2) << 7)) { + netdev_err(qdev->ndev, "Could not get hw lock for GIO\n"); + return -1; + } + + if (ql_is_fiber(qdev)) { + ql_petbi_init(qdev); + } else { + /* Copper port */ + ql_phy_init_ex(qdev); + } + + ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); + return 0; +} + +static int ql_finish_auto_neg(struct ql3_adapter *qdev) +{ + + if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * + 2) << 7)) + return -1; + + if (!ql_auto_neg_error(qdev)) { + if (test_bit(QL_LINK_MASTER, &qdev->flags)) { + /* configure the MAC */ + netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, + "Configuring link\n"); + ql_mac_cfg_soft_reset(qdev, 1); + ql_mac_cfg_gig(qdev, + (ql_get_link_speed + (qdev) == + SPEED_1000)); + ql_mac_cfg_full_dup(qdev, + ql_is_link_full_dup + (qdev)); + ql_mac_cfg_pause(qdev, + ql_is_neg_pause + (qdev)); + ql_mac_cfg_soft_reset(qdev, 0); + + /* enable the MAC */ + netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, + "Enabling mac\n"); + ql_mac_enable(qdev, 1); + } + + qdev->port_link_state = LS_UP; + netif_start_queue(qdev->ndev); + netif_carrier_on(qdev->ndev); + netif_info(qdev, link, qdev->ndev, + "Link is up at %d Mbps, %s duplex\n", + ql_get_link_speed(qdev), + ql_is_link_full_dup(qdev) ? "full" : "half"); + + } else { /* Remote error detected */ + + if (test_bit(QL_LINK_MASTER, &qdev->flags)) { + netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, + "Remote error detected. Calling ql_port_start()\n"); + /* + * ql_port_start() is shared code and needs + * to lock the PHY on it's own. + */ + ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); + if (ql_port_start(qdev)) /* Restart port */ + return -1; + return 0; + } + } + ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); + return 0; +} + +static void ql_link_state_machine_work(struct work_struct *work) +{ + struct ql3_adapter *qdev = + container_of(work, struct ql3_adapter, link_state_work.work); + + u32 curr_link_state; + unsigned long hw_flags; + + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + + curr_link_state = ql_get_link_state(qdev); + + if (test_bit(QL_RESET_ACTIVE, &qdev->flags)) { + netif_info(qdev, link, qdev->ndev, + "Reset in progress, skip processing link state\n"); + + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + + /* Restart timer on 2 second interval. */ + mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); + + return; + } + + switch (qdev->port_link_state) { + default: + if (test_bit(QL_LINK_MASTER, &qdev->flags)) + ql_port_start(qdev); + qdev->port_link_state = LS_DOWN; + /* Fall Through */ + + case LS_DOWN: + if (curr_link_state == LS_UP) { + netif_info(qdev, link, qdev->ndev, "Link is up\n"); + if (ql_is_auto_neg_complete(qdev)) + ql_finish_auto_neg(qdev); + + if (qdev->port_link_state == LS_UP) + ql_link_down_detect_clear(qdev); + + qdev->port_link_state = LS_UP; + } + break; + + case LS_UP: + /* + * See if the link is currently down or went down and came + * back up + */ + if (curr_link_state == LS_DOWN) { + netif_info(qdev, link, qdev->ndev, "Link is down\n"); + qdev->port_link_state = LS_DOWN; + } + if (ql_link_down_detect(qdev)) + qdev->port_link_state = LS_DOWN; + break; + } + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + + /* Restart timer on 2 second interval. */ + mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); +} + +/* + * Caller must take hw_lock and QL_PHY_GIO_SEM. + */ +static void ql_get_phy_owner(struct ql3_adapter *qdev) +{ + if (ql_this_adapter_controls_port(qdev)) + set_bit(QL_LINK_MASTER, &qdev->flags); + else + clear_bit(QL_LINK_MASTER, &qdev->flags); +} + +/* + * Caller must take hw_lock and QL_PHY_GIO_SEM. + */ +static void ql_init_scan_mode(struct ql3_adapter *qdev) +{ + ql_mii_enable_scan_mode(qdev); + + if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { + if (ql_this_adapter_controls_port(qdev)) + ql_petbi_init_ex(qdev); + } else { + if (ql_this_adapter_controls_port(qdev)) + ql_phy_init_ex(qdev); + } +} + +/* + * MII_Setup needs to be called before taking the PHY out of reset + * so that the management interface clock speed can be set properly. + * It would be better if we had a way to disable MDC until after the + * PHY is out of reset, but we don't have that capability. + */ +static int ql_mii_setup(struct ql3_adapter *qdev) +{ + u32 reg; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + + if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * + 2) << 7)) + return -1; + + if (qdev->device_id == QL3032_DEVICE_ID) + ql_write_page0_reg(qdev, + &port_regs->macMIIMgmtControlReg, 0x0f00000); + + /* Divide 125MHz clock by 28 to meet PHY timing requirements */ + reg = MAC_MII_CONTROL_CLK_SEL_DIV28; + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, + reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16)); + + ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); + return 0; +} + +#define SUPPORTED_OPTICAL_MODES (SUPPORTED_1000baseT_Full | \ + SUPPORTED_FIBRE | \ + SUPPORTED_Autoneg) +#define SUPPORTED_TP_MODES (SUPPORTED_10baseT_Half | \ + SUPPORTED_10baseT_Full | \ + SUPPORTED_100baseT_Half | \ + SUPPORTED_100baseT_Full | \ + SUPPORTED_1000baseT_Half | \ + SUPPORTED_1000baseT_Full | \ + SUPPORTED_Autoneg | \ + SUPPORTED_TP) \ + +static u32 ql_supported_modes(struct ql3_adapter *qdev) +{ + if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) + return SUPPORTED_OPTICAL_MODES; + + return SUPPORTED_TP_MODES; +} + +static int ql_get_auto_cfg_status(struct ql3_adapter *qdev) +{ + int status; + unsigned long hw_flags; + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | + (qdev->mac_index) * 2) << 7)) { + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + return 0; + } + status = ql_is_auto_cfg(qdev); + ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + return status; +} + +static u32 ql_get_speed(struct ql3_adapter *qdev) +{ + u32 status; + unsigned long hw_flags; + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | + (qdev->mac_index) * 2) << 7)) { + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + return 0; + } + status = ql_get_link_speed(qdev); + ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + return status; +} + +static int ql_get_full_dup(struct ql3_adapter *qdev) +{ + int status; + unsigned long hw_flags; + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | + (qdev->mac_index) * 2) << 7)) { + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + return 0; + } + status = ql_is_link_full_dup(qdev); + ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + return status; +} + +static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) +{ + struct ql3_adapter *qdev = netdev_priv(ndev); + + ecmd->transceiver = XCVR_INTERNAL; + ecmd->supported = ql_supported_modes(qdev); + + if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { + ecmd->port = PORT_FIBRE; + } else { + ecmd->port = PORT_TP; + ecmd->phy_address = qdev->PHYAddr; + } + ecmd->advertising = ql_supported_modes(qdev); + ecmd->autoneg = ql_get_auto_cfg_status(qdev); + ethtool_cmd_speed_set(ecmd, ql_get_speed(qdev)); + ecmd->duplex = ql_get_full_dup(qdev); + return 0; +} + +static void ql_get_drvinfo(struct net_device *ndev, + struct ethtool_drvinfo *drvinfo) +{ + struct ql3_adapter *qdev = netdev_priv(ndev); + strncpy(drvinfo->driver, ql3xxx_driver_name, 32); + strncpy(drvinfo->version, ql3xxx_driver_version, 32); + strncpy(drvinfo->fw_version, "N/A", 32); + strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32); + drvinfo->regdump_len = 0; + drvinfo->eedump_len = 0; +} + +static u32 ql_get_msglevel(struct net_device *ndev) +{ + struct ql3_adapter *qdev = netdev_priv(ndev); + return qdev->msg_enable; +} + +static void ql_set_msglevel(struct net_device *ndev, u32 value) +{ + struct ql3_adapter *qdev = netdev_priv(ndev); + qdev->msg_enable = value; +} + +static void ql_get_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pause) +{ + struct ql3_adapter *qdev = netdev_priv(ndev); + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + + u32 reg; + if (qdev->mac_index == 0) + reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg); + else + reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg); + + pause->autoneg = ql_get_auto_cfg_status(qdev); + pause->rx_pause = (reg & MAC_CONFIG_REG_RF) >> 2; + pause->tx_pause = (reg & MAC_CONFIG_REG_TF) >> 1; +} + +static const struct ethtool_ops ql3xxx_ethtool_ops = { + .get_settings = ql_get_settings, + .get_drvinfo = ql_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_msglevel = ql_get_msglevel, + .set_msglevel = ql_set_msglevel, + .get_pauseparam = ql_get_pauseparam, +}; + +static int ql_populate_free_queue(struct ql3_adapter *qdev) +{ + struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; + dma_addr_t map; + int err; + + while (lrg_buf_cb) { + if (!lrg_buf_cb->skb) { + lrg_buf_cb->skb = + netdev_alloc_skb(qdev->ndev, + qdev->lrg_buffer_len); + if (unlikely(!lrg_buf_cb->skb)) { + netdev_printk(KERN_DEBUG, qdev->ndev, + "Failed netdev_alloc_skb()\n"); + break; + } else { + /* + * We save some space to copy the ethhdr from + * first buffer + */ + skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE); + map = pci_map_single(qdev->pdev, + lrg_buf_cb->skb->data, + qdev->lrg_buffer_len - + QL_HEADER_SPACE, + PCI_DMA_FROMDEVICE); + + err = pci_dma_mapping_error(qdev->pdev, map); + if (err) { + netdev_err(qdev->ndev, + "PCI mapping failed with error: %d\n", + err); + dev_kfree_skb(lrg_buf_cb->skb); + lrg_buf_cb->skb = NULL; + break; + } + + + lrg_buf_cb->buf_phy_addr_low = + cpu_to_le32(LS_64BITS(map)); + lrg_buf_cb->buf_phy_addr_high = + cpu_to_le32(MS_64BITS(map)); + dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); + dma_unmap_len_set(lrg_buf_cb, maplen, + qdev->lrg_buffer_len - + QL_HEADER_SPACE); + --qdev->lrg_buf_skb_check; + if (!qdev->lrg_buf_skb_check) + return 1; + } + } + lrg_buf_cb = lrg_buf_cb->next; + } + return 0; +} + +/* + * Caller holds hw_lock. + */ +static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + + if (qdev->small_buf_release_cnt >= 16) { + while (qdev->small_buf_release_cnt >= 16) { + qdev->small_buf_q_producer_index++; + + if (qdev->small_buf_q_producer_index == + NUM_SBUFQ_ENTRIES) + qdev->small_buf_q_producer_index = 0; + qdev->small_buf_release_cnt -= 8; + } + wmb(); + writel(qdev->small_buf_q_producer_index, + &port_regs->CommonRegs.rxSmallQProducerIndex); + } +} + +/* + * Caller holds hw_lock. + */ +static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev) +{ + struct bufq_addr_element *lrg_buf_q_ele; + int i; + struct ql_rcv_buf_cb *lrg_buf_cb; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + + if ((qdev->lrg_buf_free_count >= 8) && + (qdev->lrg_buf_release_cnt >= 16)) { + + if (qdev->lrg_buf_skb_check) + if (!ql_populate_free_queue(qdev)) + return; + + lrg_buf_q_ele = qdev->lrg_buf_next_free; + + while ((qdev->lrg_buf_release_cnt >= 16) && + (qdev->lrg_buf_free_count >= 8)) { + + for (i = 0; i < 8; i++) { + lrg_buf_cb = + ql_get_from_lrg_buf_free_list(qdev); + lrg_buf_q_ele->addr_high = + lrg_buf_cb->buf_phy_addr_high; + lrg_buf_q_ele->addr_low = + lrg_buf_cb->buf_phy_addr_low; + lrg_buf_q_ele++; + + qdev->lrg_buf_release_cnt--; + } + + qdev->lrg_buf_q_producer_index++; + + if (qdev->lrg_buf_q_producer_index == + qdev->num_lbufq_entries) + qdev->lrg_buf_q_producer_index = 0; + + if (qdev->lrg_buf_q_producer_index == + (qdev->num_lbufq_entries - 1)) { + lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr; + } + } + wmb(); + qdev->lrg_buf_next_free = lrg_buf_q_ele; + writel(qdev->lrg_buf_q_producer_index, + &port_regs->CommonRegs.rxLargeQProducerIndex); + } +} + +static void ql_process_mac_tx_intr(struct ql3_adapter *qdev, + struct ob_mac_iocb_rsp *mac_rsp) +{ + struct ql_tx_buf_cb *tx_cb; + int i; + int retval = 0; + + if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) { + netdev_warn(qdev->ndev, + "Frame too short but it was padded and sent\n"); + } + + tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; + + /* Check the transmit response flags for any errors */ + if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) { + netdev_err(qdev->ndev, + "Frame too short to be legal, frame not sent\n"); + + qdev->ndev->stats.tx_errors++; + retval = -EIO; + goto frame_not_sent; + } + + if (tx_cb->seg_count == 0) { + netdev_err(qdev->ndev, "tx_cb->seg_count == 0: %d\n", + mac_rsp->transaction_id); + + qdev->ndev->stats.tx_errors++; + retval = -EIO; + goto invalid_seg_count; + } + + pci_unmap_single(qdev->pdev, + dma_unmap_addr(&tx_cb->map[0], mapaddr), + dma_unmap_len(&tx_cb->map[0], maplen), + PCI_DMA_TODEVICE); + tx_cb->seg_count--; + if (tx_cb->seg_count) { + for (i = 1; i < tx_cb->seg_count; i++) { + pci_unmap_page(qdev->pdev, + dma_unmap_addr(&tx_cb->map[i], + mapaddr), + dma_unmap_len(&tx_cb->map[i], maplen), + PCI_DMA_TODEVICE); + } + } + qdev->ndev->stats.tx_packets++; + qdev->ndev->stats.tx_bytes += tx_cb->skb->len; + +frame_not_sent: + dev_kfree_skb_irq(tx_cb->skb); + tx_cb->skb = NULL; + +invalid_seg_count: + atomic_inc(&qdev->tx_count); +} + +static void ql_get_sbuf(struct ql3_adapter *qdev) +{ + if (++qdev->small_buf_index == NUM_SMALL_BUFFERS) + qdev->small_buf_index = 0; + qdev->small_buf_release_cnt++; +} + +static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev) +{ + struct ql_rcv_buf_cb *lrg_buf_cb = NULL; + lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index]; + qdev->lrg_buf_release_cnt++; + if (++qdev->lrg_buf_index == qdev->num_large_buffers) + qdev->lrg_buf_index = 0; + return lrg_buf_cb; +} + +/* + * The difference between 3022 and 3032 for inbound completions: + * 3022 uses two buffers per completion. The first buffer contains + * (some) header info, the second the remainder of the headers plus + * the data. For this chip we reserve some space at the top of the + * receive buffer so that the header info in buffer one can be + * prepended to the buffer two. Buffer two is the sent up while + * buffer one is returned to the hardware to be reused. + * 3032 receives all of it's data and headers in one buffer for a + * simpler process. 3032 also supports checksum verification as + * can be seen in ql_process_macip_rx_intr(). + */ +static void ql_process_mac_rx_intr(struct ql3_adapter *qdev, + struct ib_mac_iocb_rsp *ib_mac_rsp_ptr) +{ + struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; + struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; + struct sk_buff *skb; + u16 length = le16_to_cpu(ib_mac_rsp_ptr->length); + + /* + * Get the inbound address list (small buffer). + */ + ql_get_sbuf(qdev); + + if (qdev->device_id == QL3022_DEVICE_ID) + lrg_buf_cb1 = ql_get_lbuf(qdev); + + /* start of second buffer */ + lrg_buf_cb2 = ql_get_lbuf(qdev); + skb = lrg_buf_cb2->skb; + + qdev->ndev->stats.rx_packets++; + qdev->ndev->stats.rx_bytes += length; + + skb_put(skb, length); + pci_unmap_single(qdev->pdev, + dma_unmap_addr(lrg_buf_cb2, mapaddr), + dma_unmap_len(lrg_buf_cb2, maplen), + PCI_DMA_FROMDEVICE); + prefetch(skb->data); + skb_checksum_none_assert(skb); + skb->protocol = eth_type_trans(skb, qdev->ndev); + + netif_receive_skb(skb); + lrg_buf_cb2->skb = NULL; + + if (qdev->device_id == QL3022_DEVICE_ID) + ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); + ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); +} + +static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, + struct ib_ip_iocb_rsp *ib_ip_rsp_ptr) +{ + struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; + struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; + struct sk_buff *skb1 = NULL, *skb2; + struct net_device *ndev = qdev->ndev; + u16 length = le16_to_cpu(ib_ip_rsp_ptr->length); + u16 size = 0; + + /* + * Get the inbound address list (small buffer). + */ + + ql_get_sbuf(qdev); + + if (qdev->device_id == QL3022_DEVICE_ID) { + /* start of first buffer on 3022 */ + lrg_buf_cb1 = ql_get_lbuf(qdev); + skb1 = lrg_buf_cb1->skb; + size = ETH_HLEN; + if (*((u16 *) skb1->data) != 0xFFFF) + size += VLAN_ETH_HLEN - ETH_HLEN; + } + + /* start of second buffer */ + lrg_buf_cb2 = ql_get_lbuf(qdev); + skb2 = lrg_buf_cb2->skb; + + skb_put(skb2, length); /* Just the second buffer length here. */ + pci_unmap_single(qdev->pdev, + dma_unmap_addr(lrg_buf_cb2, mapaddr), + dma_unmap_len(lrg_buf_cb2, maplen), + PCI_DMA_FROMDEVICE); + prefetch(skb2->data); + + skb_checksum_none_assert(skb2); + if (qdev->device_id == QL3022_DEVICE_ID) { + /* + * Copy the ethhdr from first buffer to second. This + * is necessary for 3022 IP completions. + */ + skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN, + skb_push(skb2, size), size); + } else { + u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum); + if (checksum & + (IB_IP_IOCB_RSP_3032_ICE | + IB_IP_IOCB_RSP_3032_CE)) { + netdev_err(ndev, + "%s: Bad checksum for this %s packet, checksum = %x\n", + __func__, + ((checksum & IB_IP_IOCB_RSP_3032_TCP) ? + "TCP" : "UDP"), checksum); + } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) || + (checksum & IB_IP_IOCB_RSP_3032_UDP && + !(checksum & IB_IP_IOCB_RSP_3032_NUC))) { + skb2->ip_summed = CHECKSUM_UNNECESSARY; + } + } + skb2->protocol = eth_type_trans(skb2, qdev->ndev); + + netif_receive_skb(skb2); + ndev->stats.rx_packets++; + ndev->stats.rx_bytes += length; + lrg_buf_cb2->skb = NULL; + + if (qdev->device_id == QL3022_DEVICE_ID) + ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); + ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); +} + +static int ql_tx_rx_clean(struct ql3_adapter *qdev, + int *tx_cleaned, int *rx_cleaned, int work_to_do) +{ + struct net_rsp_iocb *net_rsp; + struct net_device *ndev = qdev->ndev; + int work_done = 0; + + /* While there are entries in the completion queue. */ + while ((le32_to_cpu(*(qdev->prsp_producer_index)) != + qdev->rsp_consumer_index) && (work_done < work_to_do)) { + + net_rsp = qdev->rsp_current; + rmb(); + /* + * Fix 4032 chip's undocumented "feature" where bit-8 is set + * if the inbound completion is for a VLAN. + */ + if (qdev->device_id == QL3032_DEVICE_ID) + net_rsp->opcode &= 0x7f; + switch (net_rsp->opcode) { + + case OPCODE_OB_MAC_IOCB_FN0: + case OPCODE_OB_MAC_IOCB_FN2: + ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *) + net_rsp); + (*tx_cleaned)++; + break; + + case OPCODE_IB_MAC_IOCB: + case OPCODE_IB_3032_MAC_IOCB: + ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *) + net_rsp); + (*rx_cleaned)++; + break; + + case OPCODE_IB_IP_IOCB: + case OPCODE_IB_3032_IP_IOCB: + ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *) + net_rsp); + (*rx_cleaned)++; + break; + default: { + u32 *tmp = (u32 *)net_rsp; + netdev_err(ndev, + "Hit default case, not handled!\n" + " dropping the packet, opcode = %x\n" + "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", + net_rsp->opcode, + (unsigned long int)tmp[0], + (unsigned long int)tmp[1], + (unsigned long int)tmp[2], + (unsigned long int)tmp[3]); + } + } + + qdev->rsp_consumer_index++; + + if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) { + qdev->rsp_consumer_index = 0; + qdev->rsp_current = qdev->rsp_q_virt_addr; + } else { + qdev->rsp_current++; + } + + work_done = *tx_cleaned + *rx_cleaned; + } + + return work_done; +} + +static int ql_poll(struct napi_struct *napi, int budget) +{ + struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi); + int rx_cleaned = 0, tx_cleaned = 0; + unsigned long hw_flags; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + + ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget); + + if (tx_cleaned + rx_cleaned != budget) { + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + __napi_complete(napi); + ql_update_small_bufq_prod_index(qdev); + ql_update_lrg_bufq_prod_index(qdev); + writel(qdev->rsp_consumer_index, + &port_regs->CommonRegs.rspQConsumerIndex); + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + + ql_enable_interrupts(qdev); + } + return tx_cleaned + rx_cleaned; +} + +static irqreturn_t ql3xxx_isr(int irq, void *dev_id) +{ + + struct net_device *ndev = dev_id; + struct ql3_adapter *qdev = netdev_priv(ndev); + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 value; + int handled = 1; + u32 var; + + value = ql_read_common_reg_l(qdev, + &port_regs->CommonRegs.ispControlStatus); + + if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) { + spin_lock(&qdev->adapter_lock); + netif_stop_queue(qdev->ndev); + netif_carrier_off(qdev->ndev); + ql_disable_interrupts(qdev); + qdev->port_link_state = LS_DOWN; + set_bit(QL_RESET_ACTIVE, &qdev->flags) ; + + if (value & ISP_CONTROL_FE) { + /* + * Chip Fatal Error. + */ + var = + ql_read_page0_reg_l(qdev, + &port_regs->PortFatalErrStatus); + netdev_warn(ndev, + "Resetting chip. PortFatalErrStatus register = 0x%x\n", + var); + set_bit(QL_RESET_START, &qdev->flags) ; + } else { + /* + * Soft Reset Requested. + */ + set_bit(QL_RESET_PER_SCSI, &qdev->flags) ; + netdev_err(ndev, + "Another function issued a reset to the chip. ISR value = %x\n", + value); + } + queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0); + spin_unlock(&qdev->adapter_lock); + } else if (value & ISP_IMR_DISABLE_CMPL_INT) { + ql_disable_interrupts(qdev); + if (likely(napi_schedule_prep(&qdev->napi))) + __napi_schedule(&qdev->napi); + } else + return IRQ_NONE; + + return IRQ_RETVAL(handled); +} + +/* + * Get the total number of segments needed for the given number of fragments. + * This is necessary because outbound address lists (OAL) will be used when + * more than two frags are given. Each address list has 5 addr/len pairs. + * The 5th pair in each OAL is used to point to the next OAL if more frags + * are coming. That is why the frags:segment count ratio is not linear. + */ +static int ql_get_seg_count(struct ql3_adapter *qdev, unsigned short frags) +{ + if (qdev->device_id == QL3022_DEVICE_ID) + return 1; + + if (frags <= 2) + return frags + 1; + else if (frags <= 6) + return frags + 2; + else if (frags <= 10) + return frags + 3; + else if (frags <= 14) + return frags + 4; + else if (frags <= 18) + return frags + 5; + return -1; +} + +static void ql_hw_csum_setup(const struct sk_buff *skb, + struct ob_mac_iocb_req *mac_iocb_ptr) +{ + const struct iphdr *ip = ip_hdr(skb); + + mac_iocb_ptr->ip_hdr_off = skb_network_offset(skb); + mac_iocb_ptr->ip_hdr_len = ip->ihl; + + if (ip->protocol == IPPROTO_TCP) { + mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC | + OB_3032MAC_IOCB_REQ_IC; + } else { + mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC | + OB_3032MAC_IOCB_REQ_IC; + } + +} + +/* + * Map the buffers for this transmit. + * This will return NETDEV_TX_BUSY or NETDEV_TX_OK based on success. + */ +static int ql_send_map(struct ql3_adapter *qdev, + struct ob_mac_iocb_req *mac_iocb_ptr, + struct ql_tx_buf_cb *tx_cb, + struct sk_buff *skb) +{ + struct oal *oal; + struct oal_entry *oal_entry; + int len = skb_headlen(skb); + dma_addr_t map; + int err; + int completed_segs, i; + int seg_cnt, seg = 0; + int frag_cnt = (int)skb_shinfo(skb)->nr_frags; + + seg_cnt = tx_cb->seg_count; + /* + * Map the skb buffer first. + */ + map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); + + err = pci_dma_mapping_error(qdev->pdev, map); + if (err) { + netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n", + err); + + return NETDEV_TX_BUSY; + } + + oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; + oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); + oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); + oal_entry->len = cpu_to_le32(len); + dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); + dma_unmap_len_set(&tx_cb->map[seg], maplen, len); + seg++; + + if (seg_cnt == 1) { + /* Terminate the last segment. */ + oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); + return NETDEV_TX_OK; + } + oal = tx_cb->oal; + for (completed_segs = 0; + completed_segs < frag_cnt; + completed_segs++, seg++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs]; + oal_entry++; + /* + * Check for continuation requirements. + * It's strange but necessary. + * Continuation entry points to outbound address list. + */ + if ((seg == 2 && seg_cnt > 3) || + (seg == 7 && seg_cnt > 8) || + (seg == 12 && seg_cnt > 13) || + (seg == 17 && seg_cnt > 18)) { + map = pci_map_single(qdev->pdev, oal, + sizeof(struct oal), + PCI_DMA_TODEVICE); + + err = pci_dma_mapping_error(qdev->pdev, map); + if (err) { + netdev_err(qdev->ndev, + "PCI mapping outbound address list with error: %d\n", + err); + goto map_error; + } + + oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); + oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); + oal_entry->len = cpu_to_le32(sizeof(struct oal) | + OAL_CONT_ENTRY); + dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); + dma_unmap_len_set(&tx_cb->map[seg], maplen, + sizeof(struct oal)); + oal_entry = (struct oal_entry *)oal; + oal++; + seg++; + } + + map = pci_map_page(qdev->pdev, frag->page, + frag->page_offset, frag->size, + PCI_DMA_TODEVICE); + + err = pci_dma_mapping_error(qdev->pdev, map); + if (err) { + netdev_err(qdev->ndev, + "PCI mapping frags failed with error: %d\n", + err); + goto map_error; + } + + oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); + oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); + oal_entry->len = cpu_to_le32(frag->size); + dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); + dma_unmap_len_set(&tx_cb->map[seg], maplen, frag->size); + } + /* Terminate the last segment. */ + oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); + return NETDEV_TX_OK; + +map_error: + /* A PCI mapping failed and now we will need to back out + * We need to traverse through the oal's and associated pages which + * have been mapped and now we must unmap them to clean up properly + */ + + seg = 1; + oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; + oal = tx_cb->oal; + for (i = 0; i < completed_segs; i++, seg++) { + oal_entry++; + + /* + * Check for continuation requirements. + * It's strange but necessary. + */ + + if ((seg == 2 && seg_cnt > 3) || + (seg == 7 && seg_cnt > 8) || + (seg == 12 && seg_cnt > 13) || + (seg == 17 && seg_cnt > 18)) { + pci_unmap_single(qdev->pdev, + dma_unmap_addr(&tx_cb->map[seg], mapaddr), + dma_unmap_len(&tx_cb->map[seg], maplen), + PCI_DMA_TODEVICE); + oal++; + seg++; + } + + pci_unmap_page(qdev->pdev, + dma_unmap_addr(&tx_cb->map[seg], mapaddr), + dma_unmap_len(&tx_cb->map[seg], maplen), + PCI_DMA_TODEVICE); + } + + pci_unmap_single(qdev->pdev, + dma_unmap_addr(&tx_cb->map[0], mapaddr), + dma_unmap_addr(&tx_cb->map[0], maplen), + PCI_DMA_TODEVICE); + + return NETDEV_TX_BUSY; + +} + +/* + * The difference between 3022 and 3032 sends: + * 3022 only supports a simple single segment transmission. + * 3032 supports checksumming and scatter/gather lists (fragments). + * The 3032 supports sglists by using the 3 addr/len pairs (ALP) + * in the IOCB plus a chain of outbound address lists (OAL) that + * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th) + * will be used to point to an OAL when more ALP entries are required. + * The IOCB is always the top of the chain followed by one or more + * OALs (when necessary). + */ +static netdev_tx_t ql3xxx_send(struct sk_buff *skb, + struct net_device *ndev) +{ + struct ql3_adapter *qdev = netdev_priv(ndev); + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + struct ql_tx_buf_cb *tx_cb; + u32 tot_len = skb->len; + struct ob_mac_iocb_req *mac_iocb_ptr; + + if (unlikely(atomic_read(&qdev->tx_count) < 2)) + return NETDEV_TX_BUSY; + + tx_cb = &qdev->tx_buf[qdev->req_producer_index]; + tx_cb->seg_count = ql_get_seg_count(qdev, + skb_shinfo(skb)->nr_frags); + if (tx_cb->seg_count == -1) { + netdev_err(ndev, "%s: invalid segment count!\n", __func__); + return NETDEV_TX_OK; + } + + mac_iocb_ptr = tx_cb->queue_entry; + memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req)); + mac_iocb_ptr->opcode = qdev->mac_ob_opcode; + mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X; + mac_iocb_ptr->flags |= qdev->mb_bit_mask; + mac_iocb_ptr->transaction_id = qdev->req_producer_index; + mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len); + tx_cb->skb = skb; + if (qdev->device_id == QL3032_DEVICE_ID && + skb->ip_summed == CHECKSUM_PARTIAL) + ql_hw_csum_setup(skb, mac_iocb_ptr); + + if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) != NETDEV_TX_OK) { + netdev_err(ndev, "%s: Could not map the segments!\n", __func__); + return NETDEV_TX_BUSY; + } + + wmb(); + qdev->req_producer_index++; + if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES) + qdev->req_producer_index = 0; + wmb(); + ql_write_common_reg_l(qdev, + &port_regs->CommonRegs.reqQProducerIndex, + qdev->req_producer_index); + + netif_printk(qdev, tx_queued, KERN_DEBUG, ndev, + "tx queued, slot %d, len %d\n", + qdev->req_producer_index, skb->len); + + atomic_dec(&qdev->tx_count); + return NETDEV_TX_OK; +} + +static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev) +{ + qdev->req_q_size = + (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req)); + + qdev->req_q_virt_addr = + pci_alloc_consistent(qdev->pdev, + (size_t) qdev->req_q_size, + &qdev->req_q_phy_addr); + + if ((qdev->req_q_virt_addr == NULL) || + LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) { + netdev_err(qdev->ndev, "reqQ failed\n"); + return -ENOMEM; + } + + qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb); + + qdev->rsp_q_virt_addr = + pci_alloc_consistent(qdev->pdev, + (size_t) qdev->rsp_q_size, + &qdev->rsp_q_phy_addr); + + if ((qdev->rsp_q_virt_addr == NULL) || + LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) { + netdev_err(qdev->ndev, "rspQ allocation failed\n"); + pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size, + qdev->req_q_virt_addr, + qdev->req_q_phy_addr); + return -ENOMEM; + } + + set_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags); + + return 0; +} + +static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev) +{ + if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags)) { + netdev_info(qdev->ndev, "Already done\n"); + return; + } + + pci_free_consistent(qdev->pdev, + qdev->req_q_size, + qdev->req_q_virt_addr, qdev->req_q_phy_addr); + + qdev->req_q_virt_addr = NULL; + + pci_free_consistent(qdev->pdev, + qdev->rsp_q_size, + qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr); + + qdev->rsp_q_virt_addr = NULL; + + clear_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags); +} + +static int ql_alloc_buffer_queues(struct ql3_adapter *qdev) +{ + /* Create Large Buffer Queue */ + qdev->lrg_buf_q_size = + qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry); + if (qdev->lrg_buf_q_size < PAGE_SIZE) + qdev->lrg_buf_q_alloc_size = PAGE_SIZE; + else + qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2; + + qdev->lrg_buf = + kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb), + GFP_KERNEL); + if (qdev->lrg_buf == NULL) { + netdev_err(qdev->ndev, "qdev->lrg_buf alloc failed\n"); + return -ENOMEM; + } + + qdev->lrg_buf_q_alloc_virt_addr = + pci_alloc_consistent(qdev->pdev, + qdev->lrg_buf_q_alloc_size, + &qdev->lrg_buf_q_alloc_phy_addr); + + if (qdev->lrg_buf_q_alloc_virt_addr == NULL) { + netdev_err(qdev->ndev, "lBufQ failed\n"); + return -ENOMEM; + } + qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr; + qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr; + + /* Create Small Buffer Queue */ + qdev->small_buf_q_size = + NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry); + if (qdev->small_buf_q_size < PAGE_SIZE) + qdev->small_buf_q_alloc_size = PAGE_SIZE; + else + qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2; + + qdev->small_buf_q_alloc_virt_addr = + pci_alloc_consistent(qdev->pdev, + qdev->small_buf_q_alloc_size, + &qdev->small_buf_q_alloc_phy_addr); + + if (qdev->small_buf_q_alloc_virt_addr == NULL) { + netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n"); + pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size, + qdev->lrg_buf_q_alloc_virt_addr, + qdev->lrg_buf_q_alloc_phy_addr); + return -ENOMEM; + } + + qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr; + qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr; + set_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags); + return 0; +} + +static void ql_free_buffer_queues(struct ql3_adapter *qdev) +{ + if (!test_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags)) { + netdev_info(qdev->ndev, "Already done\n"); + return; + } + kfree(qdev->lrg_buf); + pci_free_consistent(qdev->pdev, + qdev->lrg_buf_q_alloc_size, + qdev->lrg_buf_q_alloc_virt_addr, + qdev->lrg_buf_q_alloc_phy_addr); + + qdev->lrg_buf_q_virt_addr = NULL; + + pci_free_consistent(qdev->pdev, + qdev->small_buf_q_alloc_size, + qdev->small_buf_q_alloc_virt_addr, + qdev->small_buf_q_alloc_phy_addr); + + qdev->small_buf_q_virt_addr = NULL; + + clear_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags); +} + +static int ql_alloc_small_buffers(struct ql3_adapter *qdev) +{ + int i; + struct bufq_addr_element *small_buf_q_entry; + + /* Currently we allocate on one of memory and use it for smallbuffers */ + qdev->small_buf_total_size = + (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES * + QL_SMALL_BUFFER_SIZE); + + qdev->small_buf_virt_addr = + pci_alloc_consistent(qdev->pdev, + qdev->small_buf_total_size, + &qdev->small_buf_phy_addr); + + if (qdev->small_buf_virt_addr == NULL) { + netdev_err(qdev->ndev, "Failed to get small buffer memory\n"); + return -ENOMEM; + } + + qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr); + qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr); + + small_buf_q_entry = qdev->small_buf_q_virt_addr; + + /* Initialize the small buffer queue. */ + for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) { + small_buf_q_entry->addr_high = + cpu_to_le32(qdev->small_buf_phy_addr_high); + small_buf_q_entry->addr_low = + cpu_to_le32(qdev->small_buf_phy_addr_low + + (i * QL_SMALL_BUFFER_SIZE)); + small_buf_q_entry++; + } + qdev->small_buf_index = 0; + set_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags); + return 0; +} + +static void ql_free_small_buffers(struct ql3_adapter *qdev) +{ + if (!test_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags)) { + netdev_info(qdev->ndev, "Already done\n"); + return; + } + if (qdev->small_buf_virt_addr != NULL) { + pci_free_consistent(qdev->pdev, + qdev->small_buf_total_size, + qdev->small_buf_virt_addr, + qdev->small_buf_phy_addr); + + qdev->small_buf_virt_addr = NULL; + } +} + +static void ql_free_large_buffers(struct ql3_adapter *qdev) +{ + int i = 0; + struct ql_rcv_buf_cb *lrg_buf_cb; + + for (i = 0; i < qdev->num_large_buffers; i++) { + lrg_buf_cb = &qdev->lrg_buf[i]; + if (lrg_buf_cb->skb) { + dev_kfree_skb(lrg_buf_cb->skb); + pci_unmap_single(qdev->pdev, + dma_unmap_addr(lrg_buf_cb, mapaddr), + dma_unmap_len(lrg_buf_cb, maplen), + PCI_DMA_FROMDEVICE); + memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); + } else { + break; + } + } +} + +static void ql_init_large_buffers(struct ql3_adapter *qdev) +{ + int i; + struct ql_rcv_buf_cb *lrg_buf_cb; + struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr; + + for (i = 0; i < qdev->num_large_buffers; i++) { + lrg_buf_cb = &qdev->lrg_buf[i]; + buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high; + buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low; + buf_addr_ele++; + } + qdev->lrg_buf_index = 0; + qdev->lrg_buf_skb_check = 0; +} + +static int ql_alloc_large_buffers(struct ql3_adapter *qdev) +{ + int i; + struct ql_rcv_buf_cb *lrg_buf_cb; + struct sk_buff *skb; + dma_addr_t map; + int err; + + for (i = 0; i < qdev->num_large_buffers; i++) { + skb = netdev_alloc_skb(qdev->ndev, + qdev->lrg_buffer_len); + if (unlikely(!skb)) { + /* Better luck next round */ + netdev_err(qdev->ndev, + "large buff alloc failed for %d bytes at index %d\n", + qdev->lrg_buffer_len * 2, i); + ql_free_large_buffers(qdev); + return -ENOMEM; + } else { + + lrg_buf_cb = &qdev->lrg_buf[i]; + memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); + lrg_buf_cb->index = i; + lrg_buf_cb->skb = skb; + /* + * We save some space to copy the ethhdr from first + * buffer + */ + skb_reserve(skb, QL_HEADER_SPACE); + map = pci_map_single(qdev->pdev, + skb->data, + qdev->lrg_buffer_len - + QL_HEADER_SPACE, + PCI_DMA_FROMDEVICE); + + err = pci_dma_mapping_error(qdev->pdev, map); + if (err) { + netdev_err(qdev->ndev, + "PCI mapping failed with error: %d\n", + err); + ql_free_large_buffers(qdev); + return -ENOMEM; + } + + dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); + dma_unmap_len_set(lrg_buf_cb, maplen, + qdev->lrg_buffer_len - + QL_HEADER_SPACE); + lrg_buf_cb->buf_phy_addr_low = + cpu_to_le32(LS_64BITS(map)); + lrg_buf_cb->buf_phy_addr_high = + cpu_to_le32(MS_64BITS(map)); + } + } + return 0; +} + +static void ql_free_send_free_list(struct ql3_adapter *qdev) +{ + struct ql_tx_buf_cb *tx_cb; + int i; + + tx_cb = &qdev->tx_buf[0]; + for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { + kfree(tx_cb->oal); + tx_cb->oal = NULL; + tx_cb++; + } +} + +static int ql_create_send_free_list(struct ql3_adapter *qdev) +{ + struct ql_tx_buf_cb *tx_cb; + int i; + struct ob_mac_iocb_req *req_q_curr = qdev->req_q_virt_addr; + + /* Create free list of transmit buffers */ + for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { + + tx_cb = &qdev->tx_buf[i]; + tx_cb->skb = NULL; + tx_cb->queue_entry = req_q_curr; + req_q_curr++; + tx_cb->oal = kmalloc(512, GFP_KERNEL); + if (tx_cb->oal == NULL) + return -1; + } + return 0; +} + +static int ql_alloc_mem_resources(struct ql3_adapter *qdev) +{ + if (qdev->ndev->mtu == NORMAL_MTU_SIZE) { + qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES; + qdev->lrg_buffer_len = NORMAL_MTU_SIZE; + } else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) { + /* + * Bigger buffers, so less of them. + */ + qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES; + qdev->lrg_buffer_len = JUMBO_MTU_SIZE; + } else { + netdev_err(qdev->ndev, "Invalid mtu size: %d. Only %d and %d are accepted.\n", + qdev->ndev->mtu, NORMAL_MTU_SIZE, JUMBO_MTU_SIZE); + return -ENOMEM; + } + qdev->num_large_buffers = + qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY; + qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE; + qdev->max_frame_size = + (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE; + + /* + * First allocate a page of shared memory and use it for shadow + * locations of Network Request Queue Consumer Address Register and + * Network Completion Queue Producer Index Register + */ + qdev->shadow_reg_virt_addr = + pci_alloc_consistent(qdev->pdev, + PAGE_SIZE, &qdev->shadow_reg_phy_addr); + + if (qdev->shadow_reg_virt_addr != NULL) { + qdev->preq_consumer_index = qdev->shadow_reg_virt_addr; + qdev->req_consumer_index_phy_addr_high = + MS_64BITS(qdev->shadow_reg_phy_addr); + qdev->req_consumer_index_phy_addr_low = + LS_64BITS(qdev->shadow_reg_phy_addr); + + qdev->prsp_producer_index = + (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8); + qdev->rsp_producer_index_phy_addr_high = + qdev->req_consumer_index_phy_addr_high; + qdev->rsp_producer_index_phy_addr_low = + qdev->req_consumer_index_phy_addr_low + 8; + } else { + netdev_err(qdev->ndev, "shadowReg Alloc failed\n"); + return -ENOMEM; + } + + if (ql_alloc_net_req_rsp_queues(qdev) != 0) { + netdev_err(qdev->ndev, "ql_alloc_net_req_rsp_queues failed\n"); + goto err_req_rsp; + } + + if (ql_alloc_buffer_queues(qdev) != 0) { + netdev_err(qdev->ndev, "ql_alloc_buffer_queues failed\n"); + goto err_buffer_queues; + } + + if (ql_alloc_small_buffers(qdev) != 0) { + netdev_err(qdev->ndev, "ql_alloc_small_buffers failed\n"); + goto err_small_buffers; + } + + if (ql_alloc_large_buffers(qdev) != 0) { + netdev_err(qdev->ndev, "ql_alloc_large_buffers failed\n"); + goto err_small_buffers; + } + + /* Initialize the large buffer queue. */ + ql_init_large_buffers(qdev); + if (ql_create_send_free_list(qdev)) + goto err_free_list; + + qdev->rsp_current = qdev->rsp_q_virt_addr; + + return 0; +err_free_list: + ql_free_send_free_list(qdev); +err_small_buffers: + ql_free_buffer_queues(qdev); +err_buffer_queues: + ql_free_net_req_rsp_queues(qdev); +err_req_rsp: + pci_free_consistent(qdev->pdev, + PAGE_SIZE, + qdev->shadow_reg_virt_addr, + qdev->shadow_reg_phy_addr); + + return -ENOMEM; +} + +static void ql_free_mem_resources(struct ql3_adapter *qdev) +{ + ql_free_send_free_list(qdev); + ql_free_large_buffers(qdev); + ql_free_small_buffers(qdev); + ql_free_buffer_queues(qdev); + ql_free_net_req_rsp_queues(qdev); + if (qdev->shadow_reg_virt_addr != NULL) { + pci_free_consistent(qdev->pdev, + PAGE_SIZE, + qdev->shadow_reg_virt_addr, + qdev->shadow_reg_phy_addr); + qdev->shadow_reg_virt_addr = NULL; + } +} + +static int ql_init_misc_registers(struct ql3_adapter *qdev) +{ + struct ql3xxx_local_ram_registers __iomem *local_ram = + (void __iomem *)qdev->mem_map_registers; + + if (ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * + 2) << 4)) + return -1; + + ql_write_page2_reg(qdev, + &local_ram->bufletSize, qdev->nvram_data.bufletSize); + + ql_write_page2_reg(qdev, + &local_ram->maxBufletCount, + qdev->nvram_data.bufletCount); + + ql_write_page2_reg(qdev, + &local_ram->freeBufletThresholdLow, + (qdev->nvram_data.tcpWindowThreshold25 << 16) | + (qdev->nvram_data.tcpWindowThreshold0)); + + ql_write_page2_reg(qdev, + &local_ram->freeBufletThresholdHigh, + qdev->nvram_data.tcpWindowThreshold50); + + ql_write_page2_reg(qdev, + &local_ram->ipHashTableBase, + (qdev->nvram_data.ipHashTableBaseHi << 16) | + qdev->nvram_data.ipHashTableBaseLo); + ql_write_page2_reg(qdev, + &local_ram->ipHashTableCount, + qdev->nvram_data.ipHashTableSize); + ql_write_page2_reg(qdev, + &local_ram->tcpHashTableBase, + (qdev->nvram_data.tcpHashTableBaseHi << 16) | + qdev->nvram_data.tcpHashTableBaseLo); + ql_write_page2_reg(qdev, + &local_ram->tcpHashTableCount, + qdev->nvram_data.tcpHashTableSize); + ql_write_page2_reg(qdev, + &local_ram->ncbBase, + (qdev->nvram_data.ncbTableBaseHi << 16) | + qdev->nvram_data.ncbTableBaseLo); + ql_write_page2_reg(qdev, + &local_ram->maxNcbCount, + qdev->nvram_data.ncbTableSize); + ql_write_page2_reg(qdev, + &local_ram->drbBase, + (qdev->nvram_data.drbTableBaseHi << 16) | + qdev->nvram_data.drbTableBaseLo); + ql_write_page2_reg(qdev, + &local_ram->maxDrbCount, + qdev->nvram_data.drbTableSize); + ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK); + return 0; +} + +static int ql_adapter_initialize(struct ql3_adapter *qdev) +{ + u32 value; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; + struct ql3xxx_host_memory_registers __iomem *hmem_regs = + (void __iomem *)port_regs; + u32 delay = 10; + int status = 0; + unsigned long hw_flags = 0; + + if (ql_mii_setup(qdev)) + return -1; + + /* Bring out PHY out of reset */ + ql_write_common_reg(qdev, spir, + (ISP_SERIAL_PORT_IF_WE | + (ISP_SERIAL_PORT_IF_WE << 16))); + /* Give the PHY time to come out of reset. */ + mdelay(100); + qdev->port_link_state = LS_DOWN; + netif_carrier_off(qdev->ndev); + + /* V2 chip fix for ARS-39168. */ + ql_write_common_reg(qdev, spir, + (ISP_SERIAL_PORT_IF_SDE | + (ISP_SERIAL_PORT_IF_SDE << 16))); + + /* Request Queue Registers */ + *((u32 *)(qdev->preq_consumer_index)) = 0; + atomic_set(&qdev->tx_count, NUM_REQ_Q_ENTRIES); + qdev->req_producer_index = 0; + + ql_write_page1_reg(qdev, + &hmem_regs->reqConsumerIndexAddrHigh, + qdev->req_consumer_index_phy_addr_high); + ql_write_page1_reg(qdev, + &hmem_regs->reqConsumerIndexAddrLow, + qdev->req_consumer_index_phy_addr_low); + + ql_write_page1_reg(qdev, + &hmem_regs->reqBaseAddrHigh, + MS_64BITS(qdev->req_q_phy_addr)); + ql_write_page1_reg(qdev, + &hmem_regs->reqBaseAddrLow, + LS_64BITS(qdev->req_q_phy_addr)); + ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES); + + /* Response Queue Registers */ + *((__le16 *) (qdev->prsp_producer_index)) = 0; + qdev->rsp_consumer_index = 0; + qdev->rsp_current = qdev->rsp_q_virt_addr; + + ql_write_page1_reg(qdev, + &hmem_regs->rspProducerIndexAddrHigh, + qdev->rsp_producer_index_phy_addr_high); + + ql_write_page1_reg(qdev, + &hmem_regs->rspProducerIndexAddrLow, + qdev->rsp_producer_index_phy_addr_low); + + ql_write_page1_reg(qdev, + &hmem_regs->rspBaseAddrHigh, + MS_64BITS(qdev->rsp_q_phy_addr)); + + ql_write_page1_reg(qdev, + &hmem_regs->rspBaseAddrLow, + LS_64BITS(qdev->rsp_q_phy_addr)); + + ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES); + + /* Large Buffer Queue */ + ql_write_page1_reg(qdev, + &hmem_regs->rxLargeQBaseAddrHigh, + MS_64BITS(qdev->lrg_buf_q_phy_addr)); + + ql_write_page1_reg(qdev, + &hmem_regs->rxLargeQBaseAddrLow, + LS_64BITS(qdev->lrg_buf_q_phy_addr)); + + ql_write_page1_reg(qdev, + &hmem_regs->rxLargeQLength, + qdev->num_lbufq_entries); + + ql_write_page1_reg(qdev, + &hmem_regs->rxLargeBufferLength, + qdev->lrg_buffer_len); + + /* Small Buffer Queue */ + ql_write_page1_reg(qdev, + &hmem_regs->rxSmallQBaseAddrHigh, + MS_64BITS(qdev->small_buf_q_phy_addr)); + + ql_write_page1_reg(qdev, + &hmem_regs->rxSmallQBaseAddrLow, + LS_64BITS(qdev->small_buf_q_phy_addr)); + + ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES); + ql_write_page1_reg(qdev, + &hmem_regs->rxSmallBufferLength, + QL_SMALL_BUFFER_SIZE); + + qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1; + qdev->small_buf_release_cnt = 8; + qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1; + qdev->lrg_buf_release_cnt = 8; + qdev->lrg_buf_next_free = qdev->lrg_buf_q_virt_addr; + qdev->small_buf_index = 0; + qdev->lrg_buf_index = 0; + qdev->lrg_buf_free_count = 0; + qdev->lrg_buf_free_head = NULL; + qdev->lrg_buf_free_tail = NULL; + + ql_write_common_reg(qdev, + &port_regs->CommonRegs. + rxSmallQProducerIndex, + qdev->small_buf_q_producer_index); + ql_write_common_reg(qdev, + &port_regs->CommonRegs. + rxLargeQProducerIndex, + qdev->lrg_buf_q_producer_index); + + /* + * Find out if the chip has already been initialized. If it has, then + * we skip some of the initialization. + */ + clear_bit(QL_LINK_MASTER, &qdev->flags); + value = ql_read_page0_reg(qdev, &port_regs->portStatus); + if ((value & PORT_STATUS_IC) == 0) { + + /* Chip has not been configured yet, so let it rip. */ + if (ql_init_misc_registers(qdev)) { + status = -1; + goto out; + } + + value = qdev->nvram_data.tcpMaxWindowSize; + ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value); + + value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig; + + if (ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) + * 2) << 13)) { + status = -1; + goto out; + } + ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value); + ql_write_page0_reg(qdev, &port_regs->InternalChipConfig, + (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) << + 16) | (INTERNAL_CHIP_SD | + INTERNAL_CHIP_WE))); + ql_sem_unlock(qdev, QL_FLASH_SEM_MASK); + } + + if (qdev->mac_index) + ql_write_page0_reg(qdev, + &port_regs->mac1MaxFrameLengthReg, + qdev->max_frame_size); + else + ql_write_page0_reg(qdev, + &port_regs->mac0MaxFrameLengthReg, + qdev->max_frame_size); + + if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * + 2) << 7)) { + status = -1; + goto out; + } + + PHY_Setup(qdev); + ql_init_scan_mode(qdev); + ql_get_phy_owner(qdev); + + /* Load the MAC Configuration */ + + /* Program lower 32 bits of the MAC address */ + ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, + (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16)); + ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, + ((qdev->ndev->dev_addr[2] << 24) + | (qdev->ndev->dev_addr[3] << 16) + | (qdev->ndev->dev_addr[4] << 8) + | qdev->ndev->dev_addr[5])); + + /* Program top 16 bits of the MAC address */ + ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, + ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1)); + ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, + ((qdev->ndev->dev_addr[0] << 8) + | qdev->ndev->dev_addr[1])); + + /* Enable Primary MAC */ + ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, + ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) | + MAC_ADDR_INDIRECT_PTR_REG_PE)); + + /* Clear Primary and Secondary IP addresses */ + ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg, + ((IP_ADDR_INDEX_REG_MASK << 16) | + (qdev->mac_index << 2))); + ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0); + + ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg, + ((IP_ADDR_INDEX_REG_MASK << 16) | + ((qdev->mac_index << 2) + 1))); + ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0); + + ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); + + /* Indicate Configuration Complete */ + ql_write_page0_reg(qdev, + &port_regs->portControl, + ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC)); + + do { + value = ql_read_page0_reg(qdev, &port_regs->portStatus); + if (value & PORT_STATUS_IC) + break; + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + msleep(500); + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + } while (--delay); + + if (delay == 0) { + netdev_err(qdev->ndev, "Hw Initialization timeout\n"); + status = -1; + goto out; + } + + /* Enable Ethernet Function */ + if (qdev->device_id == QL3032_DEVICE_ID) { + value = + (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE | + QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4 | + QL3032_PORT_CONTROL_ET); + ql_write_page0_reg(qdev, &port_regs->functionControl, + ((value << 16) | value)); + } else { + value = + (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI | + PORT_CONTROL_HH); + ql_write_page0_reg(qdev, &port_regs->portControl, + ((value << 16) | value)); + } + + +out: + return status; +} + +/* + * Caller holds hw_lock. + */ +static int ql_adapter_reset(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + int status = 0; + u16 value; + int max_wait_time; + + set_bit(QL_RESET_ACTIVE, &qdev->flags); + clear_bit(QL_RESET_DONE, &qdev->flags); + + /* + * Issue soft reset to chip. + */ + netdev_printk(KERN_DEBUG, qdev->ndev, "Issue soft reset to chip\n"); + ql_write_common_reg(qdev, + &port_regs->CommonRegs.ispControlStatus, + ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR)); + + /* Wait 3 seconds for reset to complete. */ + netdev_printk(KERN_DEBUG, qdev->ndev, + "Wait 10 milliseconds for reset to complete\n"); + + /* Wait until the firmware tells us the Soft Reset is done */ + max_wait_time = 5; + do { + value = + ql_read_common_reg(qdev, + &port_regs->CommonRegs.ispControlStatus); + if ((value & ISP_CONTROL_SR) == 0) + break; + + ssleep(1); + } while ((--max_wait_time)); + + /* + * Also, make sure that the Network Reset Interrupt bit has been + * cleared after the soft reset has taken place. + */ + value = + ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); + if (value & ISP_CONTROL_RI) { + netdev_printk(KERN_DEBUG, qdev->ndev, + "clearing RI after reset\n"); + ql_write_common_reg(qdev, + &port_regs->CommonRegs. + ispControlStatus, + ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); + } + + if (max_wait_time == 0) { + /* Issue Force Soft Reset */ + ql_write_common_reg(qdev, + &port_regs->CommonRegs. + ispControlStatus, + ((ISP_CONTROL_FSR << 16) | + ISP_CONTROL_FSR)); + /* + * Wait until the firmware tells us the Force Soft Reset is + * done + */ + max_wait_time = 5; + do { + value = ql_read_common_reg(qdev, + &port_regs->CommonRegs. + ispControlStatus); + if ((value & ISP_CONTROL_FSR) == 0) + break; + ssleep(1); + } while ((--max_wait_time)); + } + if (max_wait_time == 0) + status = 1; + + clear_bit(QL_RESET_ACTIVE, &qdev->flags); + set_bit(QL_RESET_DONE, &qdev->flags); + return status; +} + +static void ql_set_mac_info(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 value, port_status; + u8 func_number; + + /* Get the function number */ + value = + ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus); + func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK); + port_status = ql_read_page0_reg(qdev, &port_regs->portStatus); + switch (value & ISP_CONTROL_FN_MASK) { + case ISP_CONTROL_FN0_NET: + qdev->mac_index = 0; + qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; + qdev->mb_bit_mask = FN0_MA_BITS_MASK; + qdev->PHYAddr = PORT0_PHY_ADDRESS; + if (port_status & PORT_STATUS_SM0) + set_bit(QL_LINK_OPTICAL, &qdev->flags); + else + clear_bit(QL_LINK_OPTICAL, &qdev->flags); + break; + + case ISP_CONTROL_FN1_NET: + qdev->mac_index = 1; + qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; + qdev->mb_bit_mask = FN1_MA_BITS_MASK; + qdev->PHYAddr = PORT1_PHY_ADDRESS; + if (port_status & PORT_STATUS_SM1) + set_bit(QL_LINK_OPTICAL, &qdev->flags); + else + clear_bit(QL_LINK_OPTICAL, &qdev->flags); + break; + + case ISP_CONTROL_FN0_SCSI: + case ISP_CONTROL_FN1_SCSI: + default: + netdev_printk(KERN_DEBUG, qdev->ndev, + "Invalid function number, ispControlStatus = 0x%x\n", + value); + break; + } + qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8; +} + +static void ql_display_dev_info(struct net_device *ndev) +{ + struct ql3_adapter *qdev = netdev_priv(ndev); + struct pci_dev *pdev = qdev->pdev; + + netdev_info(ndev, + "%s Adapter %d RevisionID %d found %s on PCI slot %d\n", + DRV_NAME, qdev->index, qdev->chip_rev_id, + qdev->device_id == QL3032_DEVICE_ID ? "QLA3032" : "QLA3022", + qdev->pci_slot); + netdev_info(ndev, "%s Interface\n", + test_bit(QL_LINK_OPTICAL, &qdev->flags) ? "OPTICAL" : "COPPER"); + + /* + * Print PCI bus width/type. + */ + netdev_info(ndev, "Bus interface is %s %s\n", + ((qdev->pci_width == 64) ? "64-bit" : "32-bit"), + ((qdev->pci_x) ? "PCI-X" : "PCI")); + + netdev_info(ndev, "mem IO base address adjusted = 0x%p\n", + qdev->mem_map_registers); + netdev_info(ndev, "Interrupt number = %d\n", pdev->irq); + + netif_info(qdev, probe, ndev, "MAC address %pM\n", ndev->dev_addr); +} + +static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset) +{ + struct net_device *ndev = qdev->ndev; + int retval = 0; + + netif_stop_queue(ndev); + netif_carrier_off(ndev); + + clear_bit(QL_ADAPTER_UP, &qdev->flags); + clear_bit(QL_LINK_MASTER, &qdev->flags); + + ql_disable_interrupts(qdev); + + free_irq(qdev->pdev->irq, ndev); + + if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) { + netdev_info(qdev->ndev, "calling pci_disable_msi()\n"); + clear_bit(QL_MSI_ENABLED, &qdev->flags); + pci_disable_msi(qdev->pdev); + } + + del_timer_sync(&qdev->adapter_timer); + + napi_disable(&qdev->napi); + + if (do_reset) { + int soft_reset; + unsigned long hw_flags; + + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + if (ql_wait_for_drvr_lock(qdev)) { + soft_reset = ql_adapter_reset(qdev); + if (soft_reset) { + netdev_err(ndev, "ql_adapter_reset(%d) FAILED!\n", + qdev->index); + } + netdev_err(ndev, + "Releasing driver lock via chip reset\n"); + } else { + netdev_err(ndev, + "Could not acquire driver lock to do reset!\n"); + retval = -1; + } + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + } + ql_free_mem_resources(qdev); + return retval; +} + +static int ql_adapter_up(struct ql3_adapter *qdev) +{ + struct net_device *ndev = qdev->ndev; + int err; + unsigned long irq_flags = IRQF_SHARED; + unsigned long hw_flags; + + if (ql_alloc_mem_resources(qdev)) { + netdev_err(ndev, "Unable to allocate buffers\n"); + return -ENOMEM; + } + + if (qdev->msi) { + if (pci_enable_msi(qdev->pdev)) { + netdev_err(ndev, + "User requested MSI, but MSI failed to initialize. Continuing without MSI.\n"); + qdev->msi = 0; + } else { + netdev_info(ndev, "MSI Enabled...\n"); + set_bit(QL_MSI_ENABLED, &qdev->flags); + irq_flags &= ~IRQF_SHARED; + } + } + + err = request_irq(qdev->pdev->irq, ql3xxx_isr, + irq_flags, ndev->name, ndev); + if (err) { + netdev_err(ndev, + "Failed to reserve interrupt %d - already in use\n", + qdev->pdev->irq); + goto err_irq; + } + + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + + err = ql_wait_for_drvr_lock(qdev); + if (err) { + err = ql_adapter_initialize(qdev); + if (err) { + netdev_err(ndev, "Unable to initialize adapter\n"); + goto err_init; + } + netdev_err(ndev, "Releasing driver lock\n"); + ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); + } else { + netdev_err(ndev, "Could not acquire driver lock\n"); + goto err_lock; + } + + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + + set_bit(QL_ADAPTER_UP, &qdev->flags); + + mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); + + napi_enable(&qdev->napi); + ql_enable_interrupts(qdev); + return 0; + +err_init: + ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); +err_lock: + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + free_irq(qdev->pdev->irq, ndev); +err_irq: + if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) { + netdev_info(ndev, "calling pci_disable_msi()\n"); + clear_bit(QL_MSI_ENABLED, &qdev->flags); + pci_disable_msi(qdev->pdev); + } + return err; +} + +static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset) +{ + if (ql_adapter_down(qdev, reset) || ql_adapter_up(qdev)) { + netdev_err(qdev->ndev, + "Driver up/down cycle failed, closing device\n"); + rtnl_lock(); + dev_close(qdev->ndev); + rtnl_unlock(); + return -1; + } + return 0; +} + +static int ql3xxx_close(struct net_device *ndev) +{ + struct ql3_adapter *qdev = netdev_priv(ndev); + + /* + * Wait for device to recover from a reset. + * (Rarely happens, but possible.) + */ + while (!test_bit(QL_ADAPTER_UP, &qdev->flags)) + msleep(50); + + ql_adapter_down(qdev, QL_DO_RESET); + return 0; +} + +static int ql3xxx_open(struct net_device *ndev) +{ + struct ql3_adapter *qdev = netdev_priv(ndev); + return ql_adapter_up(qdev); +} + +static int ql3xxx_set_mac_address(struct net_device *ndev, void *p) +{ + struct ql3_adapter *qdev = netdev_priv(ndev); + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + struct sockaddr *addr = p; + unsigned long hw_flags; + + if (netif_running(ndev)) + return -EBUSY; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); + + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + /* Program lower 32 bits of the MAC address */ + ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, + (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16)); + ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, + ((ndev->dev_addr[2] << 24) | (ndev-> + dev_addr[3] << 16) | + (ndev->dev_addr[4] << 8) | ndev->dev_addr[5])); + + /* Program top 16 bits of the MAC address */ + ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, + ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1)); + ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, + ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1])); + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + + return 0; +} + +static void ql3xxx_tx_timeout(struct net_device *ndev) +{ + struct ql3_adapter *qdev = netdev_priv(ndev); + + netdev_err(ndev, "Resetting...\n"); + /* + * Stop the queues, we've got a problem. + */ + netif_stop_queue(ndev); + + /* + * Wake up the worker to process this event. + */ + queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0); +} + +static void ql_reset_work(struct work_struct *work) +{ + struct ql3_adapter *qdev = + container_of(work, struct ql3_adapter, reset_work.work); + struct net_device *ndev = qdev->ndev; + u32 value; + struct ql_tx_buf_cb *tx_cb; + int max_wait_time, i; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + unsigned long hw_flags; + + if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) { + clear_bit(QL_LINK_MASTER, &qdev->flags); + + /* + * Loop through the active list and return the skb. + */ + for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { + int j; + tx_cb = &qdev->tx_buf[i]; + if (tx_cb->skb) { + netdev_printk(KERN_DEBUG, ndev, + "Freeing lost SKB\n"); + pci_unmap_single(qdev->pdev, + dma_unmap_addr(&tx_cb->map[0], + mapaddr), + dma_unmap_len(&tx_cb->map[0], maplen), + PCI_DMA_TODEVICE); + for (j = 1; j < tx_cb->seg_count; j++) { + pci_unmap_page(qdev->pdev, + dma_unmap_addr(&tx_cb->map[j], + mapaddr), + dma_unmap_len(&tx_cb->map[j], + maplen), + PCI_DMA_TODEVICE); + } + dev_kfree_skb(tx_cb->skb); + tx_cb->skb = NULL; + } + } + + netdev_err(ndev, "Clearing NRI after reset\n"); + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + ql_write_common_reg(qdev, + &port_regs->CommonRegs. + ispControlStatus, + ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); + /* + * Wait the for Soft Reset to Complete. + */ + max_wait_time = 10; + do { + value = ql_read_common_reg(qdev, + &port_regs->CommonRegs. + + ispControlStatus); + if ((value & ISP_CONTROL_SR) == 0) { + netdev_printk(KERN_DEBUG, ndev, + "reset completed\n"); + break; + } + + if (value & ISP_CONTROL_RI) { + netdev_printk(KERN_DEBUG, ndev, + "clearing NRI after reset\n"); + ql_write_common_reg(qdev, + &port_regs-> + CommonRegs. + ispControlStatus, + ((ISP_CONTROL_RI << + 16) | ISP_CONTROL_RI)); + } + + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + ssleep(1); + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + } while (--max_wait_time); + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + + if (value & ISP_CONTROL_SR) { + + /* + * Set the reset flags and clear the board again. + * Nothing else to do... + */ + netdev_err(ndev, + "Timed out waiting for reset to complete\n"); + netdev_err(ndev, "Do a reset\n"); + clear_bit(QL_RESET_PER_SCSI, &qdev->flags); + clear_bit(QL_RESET_START, &qdev->flags); + ql_cycle_adapter(qdev, QL_DO_RESET); + return; + } + + clear_bit(QL_RESET_ACTIVE, &qdev->flags); + clear_bit(QL_RESET_PER_SCSI, &qdev->flags); + clear_bit(QL_RESET_START, &qdev->flags); + ql_cycle_adapter(qdev, QL_NO_RESET); + } +} + +static void ql_tx_timeout_work(struct work_struct *work) +{ + struct ql3_adapter *qdev = + container_of(work, struct ql3_adapter, tx_timeout_work.work); + + ql_cycle_adapter(qdev, QL_DO_RESET); +} + +static void ql_get_board_info(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 value; + + value = ql_read_page0_reg_l(qdev, &port_regs->portStatus); + + qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12); + if (value & PORT_STATUS_64) + qdev->pci_width = 64; + else + qdev->pci_width = 32; + if (value & PORT_STATUS_X) + qdev->pci_x = 1; + else + qdev->pci_x = 0; + qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn); +} + +static void ql3xxx_timer(unsigned long ptr) +{ + struct ql3_adapter *qdev = (struct ql3_adapter *)ptr; + queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0); +} + +static const struct net_device_ops ql3xxx_netdev_ops = { + .ndo_open = ql3xxx_open, + .ndo_start_xmit = ql3xxx_send, + .ndo_stop = ql3xxx_close, + .ndo_set_multicast_list = NULL, /* not allowed on NIC side */ + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = ql3xxx_set_mac_address, + .ndo_tx_timeout = ql3xxx_tx_timeout, +}; + +static int __devinit ql3xxx_probe(struct pci_dev *pdev, + const struct pci_device_id *pci_entry) +{ + struct net_device *ndev = NULL; + struct ql3_adapter *qdev = NULL; + static int cards_found; + int uninitialized_var(pci_using_dac), err; + + err = pci_enable_device(pdev); + if (err) { + pr_err("%s cannot enable PCI device\n", pci_name(pdev)); + goto err_out; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + pr_err("%s cannot obtain PCI resources\n", pci_name(pdev)); + goto err_out_disable_pdev; + } + + pci_set_master(pdev); + + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + pci_using_dac = 1; + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { + pci_using_dac = 0; + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + } + + if (err) { + pr_err("%s no usable DMA configuration\n", pci_name(pdev)); + goto err_out_free_regions; + } + + ndev = alloc_etherdev(sizeof(struct ql3_adapter)); + if (!ndev) { + pr_err("%s could not alloc etherdev\n", pci_name(pdev)); + err = -ENOMEM; + goto err_out_free_regions; + } + + SET_NETDEV_DEV(ndev, &pdev->dev); + + pci_set_drvdata(pdev, ndev); + + qdev = netdev_priv(ndev); + qdev->index = cards_found; + qdev->ndev = ndev; + qdev->pdev = pdev; + qdev->device_id = pci_entry->device; + qdev->port_link_state = LS_DOWN; + if (msi) + qdev->msi = 1; + + qdev->msg_enable = netif_msg_init(debug, default_msg); + + if (pci_using_dac) + ndev->features |= NETIF_F_HIGHDMA; + if (qdev->device_id == QL3032_DEVICE_ID) + ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; + + qdev->mem_map_registers = pci_ioremap_bar(pdev, 1); + if (!qdev->mem_map_registers) { + pr_err("%s: cannot map device registers\n", pci_name(pdev)); + err = -EIO; + goto err_out_free_ndev; + } + + spin_lock_init(&qdev->adapter_lock); + spin_lock_init(&qdev->hw_lock); + + /* Set driver entry points */ + ndev->netdev_ops = &ql3xxx_netdev_ops; + SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops); + ndev->watchdog_timeo = 5 * HZ; + + netif_napi_add(ndev, &qdev->napi, ql_poll, 64); + + ndev->irq = pdev->irq; + + /* make sure the EEPROM is good */ + if (ql_get_nvram_params(qdev)) { + pr_alert("%s: Adapter #%d, Invalid NVRAM parameters\n", + __func__, qdev->index); + err = -EIO; + goto err_out_iounmap; + } + + ql_set_mac_info(qdev); + + /* Validate and set parameters */ + if (qdev->mac_index) { + ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ; + ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn2.macAddress); + } else { + ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ; + ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress); + } + memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); + + ndev->tx_queue_len = NUM_REQ_Q_ENTRIES; + + /* Record PCI bus information. */ + ql_get_board_info(qdev); + + /* + * Set the Maximum Memory Read Byte Count value. We do this to handle + * jumbo frames. + */ + if (qdev->pci_x) + pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036); + + err = register_netdev(ndev); + if (err) { + pr_err("%s: cannot register net device\n", pci_name(pdev)); + goto err_out_iounmap; + } + + /* we're going to reset, so assume we have no link for now */ + + netif_carrier_off(ndev); + netif_stop_queue(ndev); + + qdev->workqueue = create_singlethread_workqueue(ndev->name); + INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work); + INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work); + INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work); + + init_timer(&qdev->adapter_timer); + qdev->adapter_timer.function = ql3xxx_timer; + qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */ + qdev->adapter_timer.data = (unsigned long)qdev; + + if (!cards_found) { + pr_alert("%s\n", DRV_STRING); + pr_alert("Driver name: %s, Version: %s\n", + DRV_NAME, DRV_VERSION); + } + ql_display_dev_info(ndev); + + cards_found++; + return 0; + +err_out_iounmap: + iounmap(qdev->mem_map_registers); +err_out_free_ndev: + free_netdev(ndev); +err_out_free_regions: + pci_release_regions(pdev); +err_out_disable_pdev: + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); +err_out: + return err; +} + +static void __devexit ql3xxx_remove(struct pci_dev *pdev) +{ + struct net_device *ndev = pci_get_drvdata(pdev); + struct ql3_adapter *qdev = netdev_priv(ndev); + + unregister_netdev(ndev); + + ql_disable_interrupts(qdev); + + if (qdev->workqueue) { + cancel_delayed_work(&qdev->reset_work); + cancel_delayed_work(&qdev->tx_timeout_work); + destroy_workqueue(qdev->workqueue); + qdev->workqueue = NULL; + } + + iounmap(qdev->mem_map_registers); + pci_release_regions(pdev); + pci_set_drvdata(pdev, NULL); + free_netdev(ndev); +} + +static struct pci_driver ql3xxx_driver = { + + .name = DRV_NAME, + .id_table = ql3xxx_pci_tbl, + .probe = ql3xxx_probe, + .remove = __devexit_p(ql3xxx_remove), +}; + +static int __init ql3xxx_init_module(void) +{ + return pci_register_driver(&ql3xxx_driver); +} + +static void __exit ql3xxx_exit(void) +{ + pci_unregister_driver(&ql3xxx_driver); +} + +module_init(ql3xxx_init_module); +module_exit(ql3xxx_exit); diff --git a/drivers/net/ethernet/qlogic/qla3xxx.h b/drivers/net/ethernet/qlogic/qla3xxx.h new file mode 100644 index 000000000000..73e234366a82 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qla3xxx.h @@ -0,0 +1,1189 @@ +/* + * QLogic QLA3xxx NIC HBA Driver + * Copyright (c) 2003-2006 QLogic Corporation + * + * See LICENSE.qla3xxx for copyright and licensing details. + */ +#ifndef _QLA3XXX_H_ +#define _QLA3XXX_H_ + +/* + * IOCB Definitions... + */ +#pragma pack(1) + +#define OPCODE_OB_MAC_IOCB_FN0 0x01 +#define OPCODE_OB_MAC_IOCB_FN2 0x21 + +#define OPCODE_IB_MAC_IOCB 0xF9 +#define OPCODE_IB_3032_MAC_IOCB 0x09 +#define OPCODE_IB_IP_IOCB 0xFA +#define OPCODE_IB_3032_IP_IOCB 0x0A + +#define OPCODE_FUNC_ID_MASK 0x30 +#define OUTBOUND_MAC_IOCB 0x01 /* plus function bits */ + +#define FN0_MA_BITS_MASK 0x00 +#define FN1_MA_BITS_MASK 0x80 + +struct ob_mac_iocb_req { + u8 opcode; + u8 flags; +#define OB_MAC_IOCB_REQ_MA 0xe0 +#define OB_MAC_IOCB_REQ_F 0x10 +#define OB_MAC_IOCB_REQ_X 0x08 +#define OB_MAC_IOCB_REQ_D 0x02 +#define OB_MAC_IOCB_REQ_I 0x01 + u8 flags1; +#define OB_3032MAC_IOCB_REQ_IC 0x04 +#define OB_3032MAC_IOCB_REQ_TC 0x02 +#define OB_3032MAC_IOCB_REQ_UC 0x01 + u8 reserved0; + + u32 transaction_id; /* opaque for hardware */ + __le16 data_len; + u8 ip_hdr_off; + u8 ip_hdr_len; + __le32 reserved1; + __le32 reserved2; + __le32 buf_addr0_low; + __le32 buf_addr0_high; + __le32 buf_0_len; + __le32 buf_addr1_low; + __le32 buf_addr1_high; + __le32 buf_1_len; + __le32 buf_addr2_low; + __le32 buf_addr2_high; + __le32 buf_2_len; + __le32 reserved3; + __le32 reserved4; +}; +/* + * The following constants define control bits for buffer + * length fields for all IOCB's. + */ +#define OB_MAC_IOCB_REQ_E 0x80000000 /* Last valid buffer in list. */ +#define OB_MAC_IOCB_REQ_C 0x40000000 /* points to an OAL. (continuation) */ +#define OB_MAC_IOCB_REQ_L 0x20000000 /* Auburn local address pointer. */ +#define OB_MAC_IOCB_REQ_R 0x10000000 /* 32-bit address pointer. */ + +struct ob_mac_iocb_rsp { + u8 opcode; + u8 flags; +#define OB_MAC_IOCB_RSP_P 0x08 +#define OB_MAC_IOCB_RSP_L 0x04 +#define OB_MAC_IOCB_RSP_S 0x02 +#define OB_MAC_IOCB_RSP_I 0x01 + + __le16 reserved0; + u32 transaction_id; /* opaque for hardware */ + __le32 reserved1; + __le32 reserved2; +}; + +struct ib_mac_iocb_rsp { + u8 opcode; +#define IB_MAC_IOCB_RSP_V 0x80 + u8 flags; +#define IB_MAC_IOCB_RSP_S 0x80 +#define IB_MAC_IOCB_RSP_H1 0x40 +#define IB_MAC_IOCB_RSP_H0 0x20 +#define IB_MAC_IOCB_RSP_B 0x10 +#define IB_MAC_IOCB_RSP_M 0x08 +#define IB_MAC_IOCB_RSP_MA 0x07 + + __le16 length; + __le32 reserved; + __le32 ial_low; + __le32 ial_high; + +}; + +struct ob_ip_iocb_req { + u8 opcode; + __le16 flags; +#define OB_IP_IOCB_REQ_O 0x100 +#define OB_IP_IOCB_REQ_H 0x008 +#define OB_IP_IOCB_REQ_U 0x004 +#define OB_IP_IOCB_REQ_D 0x002 +#define OB_IP_IOCB_REQ_I 0x001 + + u8 reserved0; + + __le32 transaction_id; + __le16 data_len; + __le16 reserved1; + __le32 hncb_ptr_low; + __le32 hncb_ptr_high; + __le32 buf_addr0_low; + __le32 buf_addr0_high; + __le32 buf_0_len; + __le32 buf_addr1_low; + __le32 buf_addr1_high; + __le32 buf_1_len; + __le32 buf_addr2_low; + __le32 buf_addr2_high; + __le32 buf_2_len; + __le32 reserved2; + __le32 reserved3; +}; + +/* defines for BufferLength fields above */ +#define OB_IP_IOCB_REQ_E 0x80000000 +#define OB_IP_IOCB_REQ_C 0x40000000 +#define OB_IP_IOCB_REQ_L 0x20000000 +#define OB_IP_IOCB_REQ_R 0x10000000 + +struct ob_ip_iocb_rsp { + u8 opcode; + u8 flags; +#define OB_MAC_IOCB_RSP_H 0x10 +#define OB_MAC_IOCB_RSP_E 0x08 +#define OB_MAC_IOCB_RSP_L 0x04 +#define OB_MAC_IOCB_RSP_S 0x02 +#define OB_MAC_IOCB_RSP_I 0x01 + + __le16 reserved0; + __le32 transaction_id; + __le32 reserved1; + __le32 reserved2; +}; + +struct ib_ip_iocb_rsp { + u8 opcode; +#define IB_IP_IOCB_RSP_3032_V 0x80 +#define IB_IP_IOCB_RSP_3032_O 0x40 +#define IB_IP_IOCB_RSP_3032_I 0x20 +#define IB_IP_IOCB_RSP_3032_R 0x10 + u8 flags; +#define IB_IP_IOCB_RSP_S 0x80 +#define IB_IP_IOCB_RSP_H1 0x40 +#define IB_IP_IOCB_RSP_H0 0x20 +#define IB_IP_IOCB_RSP_B 0x10 +#define IB_IP_IOCB_RSP_M 0x08 +#define IB_IP_IOCB_RSP_MA 0x07 + + __le16 length; + __le16 checksum; +#define IB_IP_IOCB_RSP_3032_ICE 0x01 +#define IB_IP_IOCB_RSP_3032_CE 0x02 +#define IB_IP_IOCB_RSP_3032_NUC 0x04 +#define IB_IP_IOCB_RSP_3032_UDP 0x08 +#define IB_IP_IOCB_RSP_3032_TCP 0x10 +#define IB_IP_IOCB_RSP_3032_IPE 0x20 + __le16 reserved; +#define IB_IP_IOCB_RSP_R 0x01 + __le32 ial_low; + __le32 ial_high; +}; + +struct net_rsp_iocb { + u8 opcode; + u8 flags; + __le16 reserved0; + __le32 reserved[3]; +}; +#pragma pack() + +/* + * Register Definitions... + */ +#define PORT0_PHY_ADDRESS 0x1e00 +#define PORT1_PHY_ADDRESS 0x1f00 + +#define ETHERNET_CRC_SIZE 4 + +#define MII_SCAN_REGISTER 0x00000001 + +#define PHY_ID_0_REG 2 +#define PHY_ID_1_REG 3 + +#define PHY_OUI_1_MASK 0xfc00 +#define PHY_MODEL_MASK 0x03f0 + +/* Address for the Agere Phy */ +#define MII_AGERE_ADDR_1 0x00001000 +#define MII_AGERE_ADDR_2 0x00001100 + +/* 32-bit ispControlStatus */ +enum { + ISP_CONTROL_NP_MASK = 0x0003, + ISP_CONTROL_NP_PCSR = 0x0000, + ISP_CONTROL_NP_HMCR = 0x0001, + ISP_CONTROL_NP_LRAMCR = 0x0002, + ISP_CONTROL_NP_PSR = 0x0003, + ISP_CONTROL_RI = 0x0008, + ISP_CONTROL_CI = 0x0010, + ISP_CONTROL_PI = 0x0020, + ISP_CONTROL_IN = 0x0040, + ISP_CONTROL_BE = 0x0080, + ISP_CONTROL_FN_MASK = 0x0700, + ISP_CONTROL_FN0_NET = 0x0400, + ISP_CONTROL_FN0_SCSI = 0x0500, + ISP_CONTROL_FN1_NET = 0x0600, + ISP_CONTROL_FN1_SCSI = 0x0700, + ISP_CONTROL_LINK_DN_0 = 0x0800, + ISP_CONTROL_LINK_DN_1 = 0x1000, + ISP_CONTROL_FSR = 0x2000, + ISP_CONTROL_FE = 0x4000, + ISP_CONTROL_SR = 0x8000, +}; + +/* 32-bit ispInterruptMaskReg */ +enum { + ISP_IMR_ENABLE_INT = 0x0004, + ISP_IMR_DISABLE_RESET_INT = 0x0008, + ISP_IMR_DISABLE_CMPL_INT = 0x0010, + ISP_IMR_DISABLE_PROC_INT = 0x0020, +}; + +/* 32-bit serialPortInterfaceReg */ +enum { + ISP_SERIAL_PORT_IF_CLK = 0x0001, + ISP_SERIAL_PORT_IF_CS = 0x0002, + ISP_SERIAL_PORT_IF_D0 = 0x0004, + ISP_SERIAL_PORT_IF_DI = 0x0008, + ISP_NVRAM_MASK = (0x000F << 16), + ISP_SERIAL_PORT_IF_WE = 0x0010, + ISP_SERIAL_PORT_IF_NVR_MASK = 0x001F, + ISP_SERIAL_PORT_IF_SCI = 0x0400, + ISP_SERIAL_PORT_IF_SC0 = 0x0800, + ISP_SERIAL_PORT_IF_SCE = 0x1000, + ISP_SERIAL_PORT_IF_SDI = 0x2000, + ISP_SERIAL_PORT_IF_SDO = 0x4000, + ISP_SERIAL_PORT_IF_SDE = 0x8000, + ISP_SERIAL_PORT_IF_I2C_MASK = 0xFC00, +}; + +/* semaphoreReg */ +enum { + QL_RESOURCE_MASK_BASE_CODE = 0x7, + QL_RESOURCE_BITS_BASE_CODE = 0x4, + QL_DRVR_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 1), + QL_DDR_RAM_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 4), + QL_PHY_GIO_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 7), + QL_NVRAM_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 10), + QL_FLASH_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 13), + QL_DRVR_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (1 + 16)), + QL_DDR_RAM_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (4 + 16)), + QL_PHY_GIO_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (7 + 16)), + QL_NVRAM_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (10 + 16)), + QL_FLASH_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (13 + 16)), +}; + + /* + * QL3XXX memory-mapped registers + * QL3XXX has 4 "pages" of registers, each page occupying + * 256 bytes. Each page has a "common" area at the start and then + * page-specific registers after that. + */ +struct ql3xxx_common_registers { + u32 MB0; /* Offset 0x00 */ + u32 MB1; /* Offset 0x04 */ + u32 MB2; /* Offset 0x08 */ + u32 MB3; /* Offset 0x0c */ + u32 MB4; /* Offset 0x10 */ + u32 MB5; /* Offset 0x14 */ + u32 MB6; /* Offset 0x18 */ + u32 MB7; /* Offset 0x1c */ + u32 flashBiosAddr; + u32 flashBiosData; + u32 ispControlStatus; + u32 ispInterruptMaskReg; + u32 serialPortInterfaceReg; + u32 semaphoreReg; + u32 reqQProducerIndex; + u32 rspQConsumerIndex; + + u32 rxLargeQProducerIndex; + u32 rxSmallQProducerIndex; + u32 arcMadiCommand; + u32 arcMadiData; +}; + +enum { + EXT_HW_CONFIG_SP_MASK = 0x0006, + EXT_HW_CONFIG_SP_NONE = 0x0000, + EXT_HW_CONFIG_SP_BYTE_PARITY = 0x0002, + EXT_HW_CONFIG_SP_ECC = 0x0004, + EXT_HW_CONFIG_SP_ECCx = 0x0006, + EXT_HW_CONFIG_SIZE_MASK = 0x0060, + EXT_HW_CONFIG_SIZE_128M = 0x0000, + EXT_HW_CONFIG_SIZE_256M = 0x0020, + EXT_HW_CONFIG_SIZE_512M = 0x0040, + EXT_HW_CONFIG_SIZE_INVALID = 0x0060, + EXT_HW_CONFIG_PD = 0x0080, + EXT_HW_CONFIG_FW = 0x0200, + EXT_HW_CONFIG_US = 0x0400, + EXT_HW_CONFIG_DCS_MASK = 0x1800, + EXT_HW_CONFIG_DCS_9MA = 0x0000, + EXT_HW_CONFIG_DCS_15MA = 0x0800, + EXT_HW_CONFIG_DCS_18MA = 0x1000, + EXT_HW_CONFIG_DCS_24MA = 0x1800, + EXT_HW_CONFIG_DDS_MASK = 0x6000, + EXT_HW_CONFIG_DDS_9MA = 0x0000, + EXT_HW_CONFIG_DDS_15MA = 0x2000, + EXT_HW_CONFIG_DDS_18MA = 0x4000, + EXT_HW_CONFIG_DDS_24MA = 0x6000, +}; + +/* InternalChipConfig */ +enum { + INTERNAL_CHIP_DM = 0x0001, + INTERNAL_CHIP_SD = 0x0002, + INTERNAL_CHIP_RAP_MASK = 0x000C, + INTERNAL_CHIP_RAP_RR = 0x0000, + INTERNAL_CHIP_RAP_NRM = 0x0004, + INTERNAL_CHIP_RAP_ERM = 0x0008, + INTERNAL_CHIP_RAP_ERMx = 0x000C, + INTERNAL_CHIP_WE = 0x0010, + INTERNAL_CHIP_EF = 0x0020, + INTERNAL_CHIP_FR = 0x0040, + INTERNAL_CHIP_FW = 0x0080, + INTERNAL_CHIP_FI = 0x0100, + INTERNAL_CHIP_FT = 0x0200, +}; + +/* portControl */ +enum { + PORT_CONTROL_DS = 0x0001, + PORT_CONTROL_HH = 0x0002, + PORT_CONTROL_EI = 0x0004, + PORT_CONTROL_ET = 0x0008, + PORT_CONTROL_EF = 0x0010, + PORT_CONTROL_DRM = 0x0020, + PORT_CONTROL_RLB = 0x0040, + PORT_CONTROL_RCB = 0x0080, + PORT_CONTROL_MAC = 0x0100, + PORT_CONTROL_IPV = 0x0200, + PORT_CONTROL_IFP = 0x0400, + PORT_CONTROL_ITP = 0x0800, + PORT_CONTROL_FI = 0x1000, + PORT_CONTROL_DFP = 0x2000, + PORT_CONTROL_OI = 0x4000, + PORT_CONTROL_CC = 0x8000, +}; + +/* portStatus */ +enum { + PORT_STATUS_SM0 = 0x0001, + PORT_STATUS_SM1 = 0x0002, + PORT_STATUS_X = 0x0008, + PORT_STATUS_DL = 0x0080, + PORT_STATUS_IC = 0x0200, + PORT_STATUS_MRC = 0x0400, + PORT_STATUS_NL = 0x0800, + PORT_STATUS_REV_ID_MASK = 0x7000, + PORT_STATUS_REV_ID_1 = 0x1000, + PORT_STATUS_REV_ID_2 = 0x2000, + PORT_STATUS_REV_ID_3 = 0x3000, + PORT_STATUS_64 = 0x8000, + PORT_STATUS_UP0 = 0x10000, + PORT_STATUS_AC0 = 0x20000, + PORT_STATUS_AE0 = 0x40000, + PORT_STATUS_UP1 = 0x100000, + PORT_STATUS_AC1 = 0x200000, + PORT_STATUS_AE1 = 0x400000, + PORT_STATUS_F0_ENABLED = 0x1000000, + PORT_STATUS_F1_ENABLED = 0x2000000, + PORT_STATUS_F2_ENABLED = 0x4000000, + PORT_STATUS_F3_ENABLED = 0x8000000, +}; + +/* macMIIMgmtControlReg */ +enum { + MAC_ADDR_INDIRECT_PTR_REG_RP_MASK = 0x0003, + MAC_ADDR_INDIRECT_PTR_REG_RP_PRI_LWR = 0x0000, + MAC_ADDR_INDIRECT_PTR_REG_RP_PRI_UPR = 0x0001, + MAC_ADDR_INDIRECT_PTR_REG_RP_SEC_LWR = 0x0002, + MAC_ADDR_INDIRECT_PTR_REG_RP_SEC_UPR = 0x0003, + MAC_ADDR_INDIRECT_PTR_REG_PR = 0x0008, + MAC_ADDR_INDIRECT_PTR_REG_SS = 0x0010, + MAC_ADDR_INDIRECT_PTR_REG_SE = 0x0020, + MAC_ADDR_INDIRECT_PTR_REG_SP = 0x0040, + MAC_ADDR_INDIRECT_PTR_REG_PE = 0x0080, +}; + +/* macMIIMgmtControlReg */ +enum { + MAC_MII_CONTROL_RC = 0x0001, + MAC_MII_CONTROL_SC = 0x0002, + MAC_MII_CONTROL_AS = 0x0004, + MAC_MII_CONTROL_NP = 0x0008, + MAC_MII_CONTROL_CLK_SEL_MASK = 0x0070, + MAC_MII_CONTROL_CLK_SEL_DIV2 = 0x0000, + MAC_MII_CONTROL_CLK_SEL_DIV4 = 0x0010, + MAC_MII_CONTROL_CLK_SEL_DIV6 = 0x0020, + MAC_MII_CONTROL_CLK_SEL_DIV8 = 0x0030, + MAC_MII_CONTROL_CLK_SEL_DIV10 = 0x0040, + MAC_MII_CONTROL_CLK_SEL_DIV14 = 0x0050, + MAC_MII_CONTROL_CLK_SEL_DIV20 = 0x0060, + MAC_MII_CONTROL_CLK_SEL_DIV28 = 0x0070, + MAC_MII_CONTROL_RM = 0x8000, +}; + +/* macMIIStatusReg */ +enum { + MAC_MII_STATUS_BSY = 0x0001, + MAC_MII_STATUS_SC = 0x0002, + MAC_MII_STATUS_NV = 0x0004, +}; + +enum { + MAC_CONFIG_REG_PE = 0x0001, + MAC_CONFIG_REG_TF = 0x0002, + MAC_CONFIG_REG_RF = 0x0004, + MAC_CONFIG_REG_FD = 0x0008, + MAC_CONFIG_REG_GM = 0x0010, + MAC_CONFIG_REG_LB = 0x0020, + MAC_CONFIG_REG_SR = 0x8000, +}; + +enum { + MAC_HALF_DUPLEX_REG_ED = 0x10000, + MAC_HALF_DUPLEX_REG_NB = 0x20000, + MAC_HALF_DUPLEX_REG_BNB = 0x40000, + MAC_HALF_DUPLEX_REG_ALT = 0x80000, +}; + +enum { + IP_ADDR_INDEX_REG_MASK = 0x000f, + IP_ADDR_INDEX_REG_FUNC_0_PRI = 0x0000, + IP_ADDR_INDEX_REG_FUNC_0_SEC = 0x0001, + IP_ADDR_INDEX_REG_FUNC_1_PRI = 0x0002, + IP_ADDR_INDEX_REG_FUNC_1_SEC = 0x0003, + IP_ADDR_INDEX_REG_FUNC_2_PRI = 0x0004, + IP_ADDR_INDEX_REG_FUNC_2_SEC = 0x0005, + IP_ADDR_INDEX_REG_FUNC_3_PRI = 0x0006, + IP_ADDR_INDEX_REG_FUNC_3_SEC = 0x0007, + IP_ADDR_INDEX_REG_6 = 0x0008, + IP_ADDR_INDEX_REG_OFFSET_MASK = 0x0030, + IP_ADDR_INDEX_REG_E = 0x0040, +}; +enum { + QL3032_PORT_CONTROL_DS = 0x0001, + QL3032_PORT_CONTROL_HH = 0x0002, + QL3032_PORT_CONTROL_EIv6 = 0x0004, + QL3032_PORT_CONTROL_EIv4 = 0x0008, + QL3032_PORT_CONTROL_ET = 0x0010, + QL3032_PORT_CONTROL_EF = 0x0020, + QL3032_PORT_CONTROL_DRM = 0x0040, + QL3032_PORT_CONTROL_RLB = 0x0080, + QL3032_PORT_CONTROL_RCB = 0x0100, + QL3032_PORT_CONTROL_KIE = 0x0200, +}; + +enum { + PROBE_MUX_ADDR_REG_MUX_SEL_MASK = 0x003f, + PROBE_MUX_ADDR_REG_SYSCLK = 0x0000, + PROBE_MUX_ADDR_REG_PCICLK = 0x0040, + PROBE_MUX_ADDR_REG_NRXCLK = 0x0080, + PROBE_MUX_ADDR_REG_CPUCLK = 0x00C0, + PROBE_MUX_ADDR_REG_MODULE_SEL_MASK = 0x3f00, + PROBE_MUX_ADDR_REG_UP = 0x4000, + PROBE_MUX_ADDR_REG_RE = 0x8000, +}; + +enum { + STATISTICS_INDEX_REG_MASK = 0x01ff, + STATISTICS_INDEX_REG_MAC0_TX_FRAME = 0x0000, + STATISTICS_INDEX_REG_MAC0_TX_BYTES = 0x0001, + STATISTICS_INDEX_REG_MAC0_TX_STAT1 = 0x0002, + STATISTICS_INDEX_REG_MAC0_TX_STAT2 = 0x0003, + STATISTICS_INDEX_REG_MAC0_TX_STAT3 = 0x0004, + STATISTICS_INDEX_REG_MAC0_TX_STAT4 = 0x0005, + STATISTICS_INDEX_REG_MAC0_TX_STAT5 = 0x0006, + STATISTICS_INDEX_REG_MAC0_RX_FRAME = 0x0007, + STATISTICS_INDEX_REG_MAC0_RX_BYTES = 0x0008, + STATISTICS_INDEX_REG_MAC0_RX_STAT1 = 0x0009, + STATISTICS_INDEX_REG_MAC0_RX_STAT2 = 0x000a, + STATISTICS_INDEX_REG_MAC0_RX_STAT3 = 0x000b, + STATISTICS_INDEX_REG_MAC0_RX_ERR_CRC = 0x000c, + STATISTICS_INDEX_REG_MAC0_RX_ERR_ENC = 0x000d, + STATISTICS_INDEX_REG_MAC0_RX_ERR_LEN = 0x000e, + STATISTICS_INDEX_REG_MAC0_RX_STAT4 = 0x000f, + STATISTICS_INDEX_REG_MAC1_TX_FRAME = 0x0010, + STATISTICS_INDEX_REG_MAC1_TX_BYTES = 0x0011, + STATISTICS_INDEX_REG_MAC1_TX_STAT1 = 0x0012, + STATISTICS_INDEX_REG_MAC1_TX_STAT2 = 0x0013, + STATISTICS_INDEX_REG_MAC1_TX_STAT3 = 0x0014, + STATISTICS_INDEX_REG_MAC1_TX_STAT4 = 0x0015, + STATISTICS_INDEX_REG_MAC1_TX_STAT5 = 0x0016, + STATISTICS_INDEX_REG_MAC1_RX_FRAME = 0x0017, + STATISTICS_INDEX_REG_MAC1_RX_BYTES = 0x0018, + STATISTICS_INDEX_REG_MAC1_RX_STAT1 = 0x0019, + STATISTICS_INDEX_REG_MAC1_RX_STAT2 = 0x001a, + STATISTICS_INDEX_REG_MAC1_RX_STAT3 = 0x001b, + STATISTICS_INDEX_REG_MAC1_RX_ERR_CRC = 0x001c, + STATISTICS_INDEX_REG_MAC1_RX_ERR_ENC = 0x001d, + STATISTICS_INDEX_REG_MAC1_RX_ERR_LEN = 0x001e, + STATISTICS_INDEX_REG_MAC1_RX_STAT4 = 0x001f, + STATISTICS_INDEX_REG_IP_TX_PKTS = 0x0020, + STATISTICS_INDEX_REG_IP_TX_BYTES = 0x0021, + STATISTICS_INDEX_REG_IP_TX_FRAG = 0x0022, + STATISTICS_INDEX_REG_IP_RX_PKTS = 0x0023, + STATISTICS_INDEX_REG_IP_RX_BYTES = 0x0024, + STATISTICS_INDEX_REG_IP_RX_FRAG = 0x0025, + STATISTICS_INDEX_REG_IP_DGRM_REASSEMBLY = 0x0026, + STATISTICS_INDEX_REG_IP_V6_RX_PKTS = 0x0027, + STATISTICS_INDEX_REG_IP_RX_PKTERR = 0x0028, + STATISTICS_INDEX_REG_IP_REASSEMBLY_ERR = 0x0029, + STATISTICS_INDEX_REG_TCP_TX_SEG = 0x0030, + STATISTICS_INDEX_REG_TCP_TX_BYTES = 0x0031, + STATISTICS_INDEX_REG_TCP_RX_SEG = 0x0032, + STATISTICS_INDEX_REG_TCP_RX_BYTES = 0x0033, + STATISTICS_INDEX_REG_TCP_TIMER_EXP = 0x0034, + STATISTICS_INDEX_REG_TCP_RX_ACK = 0x0035, + STATISTICS_INDEX_REG_TCP_TX_ACK = 0x0036, + STATISTICS_INDEX_REG_TCP_RX_ERR = 0x0037, + STATISTICS_INDEX_REG_TCP_RX_WIN_PROBE = 0x0038, + STATISTICS_INDEX_REG_TCP_ECC_ERR_CORR = 0x003f, +}; + +enum { + PORT_FATAL_ERROR_STATUS_OFB_RE_MAC0 = 0x00000001, + PORT_FATAL_ERROR_STATUS_OFB_RE_MAC1 = 0x00000002, + PORT_FATAL_ERROR_STATUS_OFB_WE = 0x00000004, + PORT_FATAL_ERROR_STATUS_IFB_RE = 0x00000008, + PORT_FATAL_ERROR_STATUS_IFB_WE_MAC0 = 0x00000010, + PORT_FATAL_ERROR_STATUS_IFB_WE_MAC1 = 0x00000020, + PORT_FATAL_ERROR_STATUS_ODE_RE = 0x00000040, + PORT_FATAL_ERROR_STATUS_ODE_WE = 0x00000080, + PORT_FATAL_ERROR_STATUS_IDE_RE = 0x00000100, + PORT_FATAL_ERROR_STATUS_IDE_WE = 0x00000200, + PORT_FATAL_ERROR_STATUS_SDE_RE = 0x00000400, + PORT_FATAL_ERROR_STATUS_SDE_WE = 0x00000800, + PORT_FATAL_ERROR_STATUS_BLE = 0x00001000, + PORT_FATAL_ERROR_STATUS_SPE = 0x00002000, + PORT_FATAL_ERROR_STATUS_EP0 = 0x00004000, + PORT_FATAL_ERROR_STATUS_EP1 = 0x00008000, + PORT_FATAL_ERROR_STATUS_ICE = 0x00010000, + PORT_FATAL_ERROR_STATUS_ILE = 0x00020000, + PORT_FATAL_ERROR_STATUS_OPE = 0x00040000, + PORT_FATAL_ERROR_STATUS_TA = 0x00080000, + PORT_FATAL_ERROR_STATUS_MA = 0x00100000, + PORT_FATAL_ERROR_STATUS_SCE = 0x00200000, + PORT_FATAL_ERROR_STATUS_RPE = 0x00400000, + PORT_FATAL_ERROR_STATUS_MPE = 0x00800000, + PORT_FATAL_ERROR_STATUS_OCE = 0x01000000, +}; + +/* + * port control and status page - page 0 + */ + +struct ql3xxx_port_registers { + struct ql3xxx_common_registers CommonRegs; + + u32 ExternalHWConfig; + u32 InternalChipConfig; + u32 portControl; + u32 portStatus; + u32 macAddrIndirectPtrReg; + u32 macAddrDataReg; + u32 macMIIMgmtControlReg; + u32 macMIIMgmtAddrReg; + u32 macMIIMgmtDataReg; + u32 macMIIStatusReg; + u32 mac0ConfigReg; + u32 mac0IpgIfgReg; + u32 mac0HalfDuplexReg; + u32 mac0MaxFrameLengthReg; + u32 mac0PauseThresholdReg; + u32 mac1ConfigReg; + u32 mac1IpgIfgReg; + u32 mac1HalfDuplexReg; + u32 mac1MaxFrameLengthReg; + u32 mac1PauseThresholdReg; + u32 ipAddrIndexReg; + u32 ipAddrDataReg; + u32 ipReassemblyTimeout; + u32 tcpMaxWindow; + u32 currentTcpTimestamp[2]; + u32 internalRamRWAddrReg; + u32 internalRamWDataReg; + u32 reclaimedBufferAddrRegLow; + u32 reclaimedBufferAddrRegHigh; + u32 tcpConfiguration; + u32 functionControl; + u32 fpgaRevID; + u32 localRamAddr; + u32 localRamDataAutoIncr; + u32 localRamDataNonIncr; + u32 gpOutput; + u32 gpInput; + u32 probeMuxAddr; + u32 probeMuxData; + u32 statisticsIndexReg; + u32 statisticsReadDataRegAutoIncr; + u32 statisticsReadDataRegNoIncr; + u32 PortFatalErrStatus; +}; + +/* + * port host memory config page - page 1 + */ +struct ql3xxx_host_memory_registers { + struct ql3xxx_common_registers CommonRegs; + + u32 reserved[12]; + + /* Network Request Queue */ + u32 reqConsumerIndex; + u32 reqConsumerIndexAddrLow; + u32 reqConsumerIndexAddrHigh; + u32 reqBaseAddrLow; + u32 reqBaseAddrHigh; + u32 reqLength; + + /* Network Completion Queue */ + u32 rspProducerIndex; + u32 rspProducerIndexAddrLow; + u32 rspProducerIndexAddrHigh; + u32 rspBaseAddrLow; + u32 rspBaseAddrHigh; + u32 rspLength; + + /* RX Large Buffer Queue */ + u32 rxLargeQConsumerIndex; + u32 rxLargeQBaseAddrLow; + u32 rxLargeQBaseAddrHigh; + u32 rxLargeQLength; + u32 rxLargeBufferLength; + + /* RX Small Buffer Queue */ + u32 rxSmallQConsumerIndex; + u32 rxSmallQBaseAddrLow; + u32 rxSmallQBaseAddrHigh; + u32 rxSmallQLength; + u32 rxSmallBufferLength; + +}; + +/* + * port local RAM page - page 2 + */ +struct ql3xxx_local_ram_registers { + struct ql3xxx_common_registers CommonRegs; + u32 bufletSize; + u32 maxBufletCount; + u32 currentBufletCount; + u32 reserved; + u32 freeBufletThresholdLow; + u32 freeBufletThresholdHigh; + u32 ipHashTableBase; + u32 ipHashTableCount; + u32 tcpHashTableBase; + u32 tcpHashTableCount; + u32 ncbBase; + u32 maxNcbCount; + u32 currentNcbCount; + u32 drbBase; + u32 maxDrbCount; + u32 currentDrbCount; +}; + +/* + * definitions for Semaphore bits in Semaphore/Serial NVRAM interface register + */ + +#define LS_64BITS(x) (u32)(0xffffffff & ((u64)x)) +#define MS_64BITS(x) (u32)(0xffffffff & (((u64)x)>>16>>16) ) + +/* + * I/O register + */ + +enum { + CONTROL_REG = 0, + STATUS_REG = 1, + PHY_STAT_LINK_UP = 0x0004, + PHY_CTRL_LOOPBACK = 0x4000, + + PETBI_CONTROL_REG = 0x00, + PETBI_CTRL_ALL_PARAMS = 0x7140, + PETBI_CTRL_SOFT_RESET = 0x8000, + PETBI_CTRL_AUTO_NEG = 0x1000, + PETBI_CTRL_RESTART_NEG = 0x0200, + PETBI_CTRL_FULL_DUPLEX = 0x0100, + PETBI_CTRL_SPEED_1000 = 0x0040, + + PETBI_STATUS_REG = 0x01, + PETBI_STAT_NEG_DONE = 0x0020, + PETBI_STAT_LINK_UP = 0x0004, + + PETBI_NEG_ADVER = 0x04, + PETBI_NEG_PAUSE = 0x0080, + PETBI_NEG_PAUSE_MASK = 0x0180, + PETBI_NEG_DUPLEX = 0x0020, + PETBI_NEG_DUPLEX_MASK = 0x0060, + + PETBI_NEG_PARTNER = 0x05, + PETBI_NEG_ERROR_MASK = 0x3000, + + PETBI_EXPANSION_REG = 0x06, + PETBI_EXP_PAGE_RX = 0x0002, + + PHY_GIG_CONTROL = 9, + PHY_GIG_ENABLE_MAN = 0x1000, /* Enable Master/Slave Manual Config*/ + PHY_GIG_SET_MASTER = 0x0800, /* Set Master (slave if clear)*/ + PHY_GIG_ALL_PARAMS = 0x0300, + PHY_GIG_ADV_1000F = 0x0200, + PHY_GIG_ADV_1000H = 0x0100, + + PHY_NEG_ADVER = 4, + PHY_NEG_ALL_PARAMS = 0x0fe0, + PHY_NEG_ASY_PAUSE = 0x0800, + PHY_NEG_SYM_PAUSE = 0x0400, + PHY_NEG_ADV_SPEED = 0x01e0, + PHY_NEG_ADV_100F = 0x0100, + PHY_NEG_ADV_100H = 0x0080, + PHY_NEG_ADV_10F = 0x0040, + PHY_NEG_ADV_10H = 0x0020, + + PETBI_TBI_CTRL = 0x11, + PETBI_TBI_RESET = 0x8000, + PETBI_TBI_AUTO_SENSE = 0x0100, + PETBI_TBI_SERDES_MODE = 0x0010, + PETBI_TBI_SERDES_WRAP = 0x0002, + + AUX_CONTROL_STATUS = 0x1c, + PHY_AUX_NEG_DONE = 0x8000, + PHY_NEG_PARTNER = 5, + PHY_AUX_DUPLEX_STAT = 0x0020, + PHY_AUX_SPEED_STAT = 0x0018, + PHY_AUX_NO_HW_STRAP = 0x0004, + PHY_AUX_RESET_STICK = 0x0002, + PHY_NEG_PAUSE = 0x0400, + PHY_CTRL_SOFT_RESET = 0x8000, + PHY_CTRL_AUTO_NEG = 0x1000, + PHY_CTRL_RESTART_NEG = 0x0200, +}; +enum { +/* AM29LV Flash definitions */ + FM93C56A_START = 0x1, +/* Commands */ + FM93C56A_READ = 0x2, + FM93C56A_WEN = 0x0, + FM93C56A_WRITE = 0x1, + FM93C56A_WRITE_ALL = 0x0, + FM93C56A_WDS = 0x0, + FM93C56A_ERASE = 0x3, + FM93C56A_ERASE_ALL = 0x0, +/* Command Extensions */ + FM93C56A_WEN_EXT = 0x3, + FM93C56A_WRITE_ALL_EXT = 0x1, + FM93C56A_WDS_EXT = 0x0, + FM93C56A_ERASE_ALL_EXT = 0x2, +/* Special Bits */ + FM93C56A_READ_DUMMY_BITS = 1, + FM93C56A_READY = 0, + FM93C56A_BUSY = 1, + FM93C56A_CMD_BITS = 2, +/* AM29LV Flash definitions */ + FM93C56A_SIZE_8 = 0x100, + FM93C56A_SIZE_16 = 0x80, + FM93C66A_SIZE_8 = 0x200, + FM93C66A_SIZE_16 = 0x100, + FM93C86A_SIZE_16 = 0x400, +/* Address Bits */ + FM93C56A_NO_ADDR_BITS_16 = 8, + FM93C56A_NO_ADDR_BITS_8 = 9, + FM93C86A_NO_ADDR_BITS_16 = 10, +/* Data Bits */ + FM93C56A_DATA_BITS_16 = 16, + FM93C56A_DATA_BITS_8 = 8, +}; +enum { +/* Auburn Bits */ + AUBURN_EEPROM_DI = 0x8, + AUBURN_EEPROM_DI_0 = 0x0, + AUBURN_EEPROM_DI_1 = 0x8, + AUBURN_EEPROM_DO = 0x4, + AUBURN_EEPROM_DO_0 = 0x0, + AUBURN_EEPROM_DO_1 = 0x4, + AUBURN_EEPROM_CS = 0x2, + AUBURN_EEPROM_CS_0 = 0x0, + AUBURN_EEPROM_CS_1 = 0x2, + AUBURN_EEPROM_CLK_RISE = 0x1, + AUBURN_EEPROM_CLK_FALL = 0x0, +}; +enum {EEPROM_SIZE = FM93C86A_SIZE_16, + EEPROM_NO_ADDR_BITS = FM93C86A_NO_ADDR_BITS_16, + EEPROM_NO_DATA_BITS = FM93C56A_DATA_BITS_16, +}; + +/* + * MAC Config data structure + */ + struct eeprom_port_cfg { + u16 etherMtu_mac; + u16 pauseThreshold_mac; + u16 resumeThreshold_mac; + u16 portConfiguration; +#define PORT_CONFIG_DEFAULT 0xf700 +#define PORT_CONFIG_AUTO_NEG_ENABLED 0x8000 +#define PORT_CONFIG_SYM_PAUSE_ENABLED 0x4000 +#define PORT_CONFIG_FULL_DUPLEX_ENABLED 0x2000 +#define PORT_CONFIG_HALF_DUPLEX_ENABLED 0x1000 +#define PORT_CONFIG_1000MB_SPEED 0x0400 +#define PORT_CONFIG_100MB_SPEED 0x0200 +#define PORT_CONFIG_10MB_SPEED 0x0100 +#define PORT_CONFIG_LINK_SPEED_MASK 0x0F00 + u16 reserved[12]; + +}; + +/* + * BIOS data structure + */ +struct eeprom_bios_cfg { + u16 SpinDlyEn:1, disBios:1, EnMemMap:1, EnSelectBoot:1, Reserved:12; + + u8 bootID0:7, boodID0Valid:1; + u8 bootLun0[8]; + + u8 bootID1:7, boodID1Valid:1; + u8 bootLun1[8]; + + u16 MaxLunsTrgt; + u8 reserved[10]; +}; + +/* + * Function Specific Data structure + */ +struct eeprom_function_cfg { + u8 reserved[30]; + u16 macAddress[3]; + u16 macAddressSecondary[3]; + + u16 subsysVendorId; + u16 subsysDeviceId; +}; + +/* + * EEPROM format + */ +struct eeprom_data { + u8 asicId[4]; + u16 version_and_numPorts; /* together to avoid endianness crap */ + u16 boardId; + +#define EEPROM_BOARDID_STR_SIZE 16 +#define EEPROM_SERIAL_NUM_SIZE 16 + + u8 boardIdStr[16]; + u8 serialNumber[16]; + u16 extHwConfig; + struct eeprom_port_cfg macCfg_port0; + struct eeprom_port_cfg macCfg_port1; + u16 bufletSize; + u16 bufletCount; + u16 tcpWindowThreshold50; + u16 tcpWindowThreshold25; + u16 tcpWindowThreshold0; + u16 ipHashTableBaseHi; + u16 ipHashTableBaseLo; + u16 ipHashTableSize; + u16 tcpHashTableBaseHi; + u16 tcpHashTableBaseLo; + u16 tcpHashTableSize; + u16 ncbTableBaseHi; + u16 ncbTableBaseLo; + u16 ncbTableSize; + u16 drbTableBaseHi; + u16 drbTableBaseLo; + u16 drbTableSize; + u16 reserved_142[4]; + u16 ipReassemblyTimeout; + u16 tcpMaxWindowSize; + u16 ipSecurity; +#define IPSEC_CONFIG_PRESENT 0x0001 + u8 reserved_156[294]; + u16 qDebug[8]; + struct eeprom_function_cfg funcCfg_fn0; + u16 reserved_510; + u8 oemSpace[432]; + struct eeprom_bios_cfg biosCfg_fn1; + struct eeprom_function_cfg funcCfg_fn1; + u16 reserved_1022; + u8 reserved_1024[464]; + struct eeprom_function_cfg funcCfg_fn2; + u16 reserved_1534; + u8 reserved_1536[432]; + struct eeprom_bios_cfg biosCfg_fn3; + struct eeprom_function_cfg funcCfg_fn3; + u16 checksum; +}; + +/* + * General definitions... + */ + +/* + * Below are a number compiler switches for controlling driver behavior. + * Some are not supported under certain conditions and are notated as such. + */ + +#define QL3XXX_VENDOR_ID 0x1077 +#define QL3022_DEVICE_ID 0x3022 +#define QL3032_DEVICE_ID 0x3032 + +/* MTU & Frame Size stuff */ +#define NORMAL_MTU_SIZE ETH_DATA_LEN +#define JUMBO_MTU_SIZE 9000 +#define VLAN_ID_LEN 2 + +/* Request Queue Related Definitions */ +#define NUM_REQ_Q_ENTRIES 256 /* so that 64 * 64 = 4096 (1 page) */ + +/* Response Queue Related Definitions */ +#define NUM_RSP_Q_ENTRIES 256 /* so that 256 * 16 = 4096 (1 page) */ + +/* Transmit and Receive Buffers */ +#define NUM_LBUFQ_ENTRIES 128 +#define JUMBO_NUM_LBUFQ_ENTRIES 32 +#define NUM_SBUFQ_ENTRIES 64 +#define QL_SMALL_BUFFER_SIZE 32 +#define QL_ADDR_ELE_PER_BUFQ_ENTRY \ +(sizeof(struct lrg_buf_q_entry) / sizeof(struct bufq_addr_element)) + /* Each send has at least control block. This is how many we keep. */ +#define NUM_SMALL_BUFFERS NUM_SBUFQ_ENTRIES * QL_ADDR_ELE_PER_BUFQ_ENTRY + +#define QL_HEADER_SPACE 32 /* make header space at top of skb. */ +/* + * Large & Small Buffers for Receives + */ +struct lrg_buf_q_entry { + + __le32 addr0_lower; +#define IAL_LAST_ENTRY 0x00000001 +#define IAL_CONT_ENTRY 0x00000002 +#define IAL_FLAG_MASK 0x00000003 + __le32 addr0_upper; + __le32 addr1_lower; + __le32 addr1_upper; + __le32 addr2_lower; + __le32 addr2_upper; + __le32 addr3_lower; + __le32 addr3_upper; + __le32 addr4_lower; + __le32 addr4_upper; + __le32 addr5_lower; + __le32 addr5_upper; + __le32 addr6_lower; + __le32 addr6_upper; + __le32 addr7_lower; + __le32 addr7_upper; + +}; + +struct bufq_addr_element { + __le32 addr_low; + __le32 addr_high; +}; + +#define QL_NO_RESET 0 +#define QL_DO_RESET 1 + +enum link_state_t { + LS_UNKNOWN = 0, + LS_DOWN, + LS_DEGRADE, + LS_RECOVER, + LS_UP, +}; + +struct ql_rcv_buf_cb { + struct ql_rcv_buf_cb *next; + struct sk_buff *skb; + DEFINE_DMA_UNMAP_ADDR(mapaddr); + DEFINE_DMA_UNMAP_LEN(maplen); + __le32 buf_phy_addr_low; + __le32 buf_phy_addr_high; + int index; +}; + +/* + * Original IOCB has 3 sg entries: + * first points to skb-data area + * second points to first frag + * third points to next oal. + * OAL has 5 entries: + * 1 thru 4 point to frags + * fifth points to next oal. + */ +#define MAX_OAL_CNT ((MAX_SKB_FRAGS-1)/4 + 1) + +struct oal_entry { + __le32 dma_lo; + __le32 dma_hi; + __le32 len; +#define OAL_LAST_ENTRY 0x80000000 /* Last valid buffer in list. */ +#define OAL_CONT_ENTRY 0x40000000 /* points to an OAL. (continuation) */ +}; + +struct oal { + struct oal_entry oal_entry[5]; +}; + +struct map_list { + DEFINE_DMA_UNMAP_ADDR(mapaddr); + DEFINE_DMA_UNMAP_LEN(maplen); +}; + +struct ql_tx_buf_cb { + struct sk_buff *skb; + struct ob_mac_iocb_req *queue_entry ; + int seg_count; + struct oal *oal; + struct map_list map[MAX_SKB_FRAGS+1]; +}; + +/* definitions for type field */ +#define QL_BUF_TYPE_MACIOCB 0x01 +#define QL_BUF_TYPE_IPIOCB 0x02 +#define QL_BUF_TYPE_TCPIOCB 0x03 + +/* qdev->flags definitions. */ +enum { QL_RESET_DONE = 1, /* Reset finished. */ + QL_RESET_ACTIVE = 2, /* Waiting for reset to finish. */ + QL_RESET_START = 3, /* Please reset the chip. */ + QL_RESET_PER_SCSI = 4, /* SCSI driver requests reset. */ + QL_TX_TIMEOUT = 5, /* Timeout in progress. */ + QL_LINK_MASTER = 6, /* This driver controls the link. */ + QL_ADAPTER_UP = 7, /* Adapter has been brought up. */ + QL_THREAD_UP = 8, /* This flag is available. */ + QL_LINK_UP = 9, /* Link Status. */ + QL_ALLOC_REQ_RSP_Q_DONE = 10, + QL_ALLOC_BUFQS_DONE = 11, + QL_ALLOC_SMALL_BUF_DONE = 12, + QL_LINK_OPTICAL = 13, + QL_MSI_ENABLED = 14, +}; + +/* + * ql3_adapter - The main Adapter structure definition. + * This structure has all fields relevant to the hardware. + */ + +struct ql3_adapter { + u32 reserved_00; + unsigned long flags; + + /* PCI Configuration information for this device */ + struct pci_dev *pdev; + struct net_device *ndev; /* Parent NET device */ + + struct napi_struct napi; + + /* Hardware information */ + u8 chip_rev_id; + u8 pci_slot; + u8 pci_width; + u8 pci_x; + u32 msi; + int index; + struct timer_list adapter_timer; /* timer used for various functions */ + + spinlock_t adapter_lock; + spinlock_t hw_lock; + + /* PCI Bus Relative Register Addresses */ + u8 __iomem *mmap_virt_base; /* stores return value from ioremap() */ + struct ql3xxx_port_registers __iomem *mem_map_registers; + u32 current_page; /* tracks current register page */ + + u32 msg_enable; + u8 reserved_01[2]; + u8 reserved_02[2]; + + /* Page for Shadow Registers */ + void *shadow_reg_virt_addr; + dma_addr_t shadow_reg_phy_addr; + + /* Net Request Queue */ + u32 req_q_size; + u32 reserved_03; + struct ob_mac_iocb_req *req_q_virt_addr; + dma_addr_t req_q_phy_addr; + u16 req_producer_index; + u16 reserved_04; + u16 *preq_consumer_index; + u32 req_consumer_index_phy_addr_high; + u32 req_consumer_index_phy_addr_low; + atomic_t tx_count; + struct ql_tx_buf_cb tx_buf[NUM_REQ_Q_ENTRIES]; + + /* Net Response Queue */ + u32 rsp_q_size; + u32 eeprom_cmd_data; + struct net_rsp_iocb *rsp_q_virt_addr; + dma_addr_t rsp_q_phy_addr; + struct net_rsp_iocb *rsp_current; + u16 rsp_consumer_index; + u16 reserved_06; + volatile __le32 *prsp_producer_index; + u32 rsp_producer_index_phy_addr_high; + u32 rsp_producer_index_phy_addr_low; + + /* Large Buffer Queue */ + u32 lrg_buf_q_alloc_size; + u32 lrg_buf_q_size; + void *lrg_buf_q_alloc_virt_addr; + void *lrg_buf_q_virt_addr; + dma_addr_t lrg_buf_q_alloc_phy_addr; + dma_addr_t lrg_buf_q_phy_addr; + u32 lrg_buf_q_producer_index; + u32 lrg_buf_release_cnt; + struct bufq_addr_element *lrg_buf_next_free; + u32 num_large_buffers; + u32 num_lbufq_entries; + + /* Large (Receive) Buffers */ + struct ql_rcv_buf_cb *lrg_buf; + struct ql_rcv_buf_cb *lrg_buf_free_head; + struct ql_rcv_buf_cb *lrg_buf_free_tail; + u32 lrg_buf_free_count; + u32 lrg_buffer_len; + u32 lrg_buf_index; + u32 lrg_buf_skb_check; + + /* Small Buffer Queue */ + u32 small_buf_q_alloc_size; + u32 small_buf_q_size; + u32 small_buf_q_producer_index; + void *small_buf_q_alloc_virt_addr; + void *small_buf_q_virt_addr; + dma_addr_t small_buf_q_alloc_phy_addr; + dma_addr_t small_buf_q_phy_addr; + u32 small_buf_index; + + /* Small (Receive) Buffers */ + void *small_buf_virt_addr; + dma_addr_t small_buf_phy_addr; + u32 small_buf_phy_addr_low; + u32 small_buf_phy_addr_high; + u32 small_buf_release_cnt; + u32 small_buf_total_size; + + struct eeprom_data nvram_data; + u32 port_link_state; + + /* 4022 specific */ + u32 mac_index; /* Driver's MAC number can be 0 or 1 for first and second networking functions respectively */ + u32 PHYAddr; /* Address of PHY 0x1e00 Port 0 and 0x1f00 Port 1 */ + u32 mac_ob_opcode; /* Opcode to use on mac transmission */ + u32 mb_bit_mask; /* MA Bits mask to use on transmission */ + u32 numPorts; + struct workqueue_struct *workqueue; + struct delayed_work reset_work; + struct delayed_work tx_timeout_work; + struct delayed_work link_state_work; + u32 max_frame_size; + u32 device_id; + u16 phyType; +}; + +#endif /* _QLA3XXX_H_ */ diff --git a/drivers/net/ethernet/qlogic/qlcnic/Makefile b/drivers/net/ethernet/qlogic/qlcnic/Makefile new file mode 100644 index 000000000000..ddba83ef3f44 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/Makefile @@ -0,0 +1,8 @@ +# +# Makefile for Qlogic 1G/10G Ethernet Driver for CNA devices +# + +obj-$(CONFIG_QLCNIC) := qlcnic.o + +qlcnic-y := qlcnic_hw.o qlcnic_main.o qlcnic_init.o \ + qlcnic_ethtool.o qlcnic_ctx.o diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h new file mode 100644 index 000000000000..53c6e5dcf26c --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -0,0 +1,1555 @@ +/* + * QLogic qlcnic NIC Driver + * Copyright (c) 2009-2010 QLogic Corporation + * + * See LICENSE.qlcnic for copyright and licensing details. + */ + +#ifndef _QLCNIC_H_ +#define _QLCNIC_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include +#include +#include +#include + +#include "qlcnic_hdr.h" + +#define _QLCNIC_LINUX_MAJOR 5 +#define _QLCNIC_LINUX_MINOR 0 +#define _QLCNIC_LINUX_SUBVERSION 22 +#define QLCNIC_LINUX_VERSIONID "5.0.22" +#define QLCNIC_DRV_IDC_VER 0x01 +#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ + (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) + +#define QLCNIC_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) +#define _major(v) (((v) >> 24) & 0xff) +#define _minor(v) (((v) >> 16) & 0xff) +#define _build(v) ((v) & 0xffff) + +/* version in image has weird encoding: + * 7:0 - major + * 15:8 - minor + * 31:16 - build (little endian) + */ +#define QLCNIC_DECODE_VERSION(v) \ + QLCNIC_VERSION_CODE(((v) & 0xff), (((v) >> 8) & 0xff), ((v) >> 16)) + +#define QLCNIC_MIN_FW_VERSION QLCNIC_VERSION_CODE(4, 4, 2) +#define QLCNIC_NUM_FLASH_SECTORS (64) +#define QLCNIC_FLASH_SECTOR_SIZE (64 * 1024) +#define QLCNIC_FLASH_TOTAL_SIZE (QLCNIC_NUM_FLASH_SECTORS \ + * QLCNIC_FLASH_SECTOR_SIZE) + +#define RCV_DESC_RINGSIZE(rds_ring) \ + (sizeof(struct rcv_desc) * (rds_ring)->num_desc) +#define RCV_BUFF_RINGSIZE(rds_ring) \ + (sizeof(struct qlcnic_rx_buffer) * rds_ring->num_desc) +#define STATUS_DESC_RINGSIZE(sds_ring) \ + (sizeof(struct status_desc) * (sds_ring)->num_desc) +#define TX_BUFF_RINGSIZE(tx_ring) \ + (sizeof(struct qlcnic_cmd_buffer) * tx_ring->num_desc) +#define TX_DESC_RINGSIZE(tx_ring) \ + (sizeof(struct cmd_desc_type0) * tx_ring->num_desc) + +#define QLCNIC_P3P_A0 0x50 + +#define QLCNIC_IS_REVISION_P3P(REVISION) (REVISION >= QLCNIC_P3P_A0) + +#define FIRST_PAGE_GROUP_START 0 +#define FIRST_PAGE_GROUP_END 0x100000 + +#define P3P_MAX_MTU (9600) +#define P3P_MIN_MTU (68) +#define QLCNIC_MAX_ETHERHDR 32 /* This contains some padding */ + +#define QLCNIC_P3P_RX_BUF_MAX_LEN (QLCNIC_MAX_ETHERHDR + ETH_DATA_LEN) +#define QLCNIC_P3P_RX_JUMBO_BUF_MAX_LEN (QLCNIC_MAX_ETHERHDR + P3P_MAX_MTU) +#define QLCNIC_CT_DEFAULT_RX_BUF_LEN 2048 +#define QLCNIC_LRO_BUFFER_EXTRA 2048 + +/* Opcodes to be used with the commands */ +#define TX_ETHER_PKT 0x01 +#define TX_TCP_PKT 0x02 +#define TX_UDP_PKT 0x03 +#define TX_IP_PKT 0x04 +#define TX_TCP_LSO 0x05 +#define TX_TCP_LSO6 0x06 +#define TX_TCPV6_PKT 0x0b +#define TX_UDPV6_PKT 0x0c + +/* Tx defines */ +#define QLCNIC_MAX_FRAGS_PER_TX 14 +#define MAX_TSO_HEADER_DESC 2 +#define MGMT_CMD_DESC_RESV 4 +#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \ + + MGMT_CMD_DESC_RESV) +#define QLCNIC_MAX_TX_TIMEOUTS 2 + +/* + * Following are the states of the Phantom. Phantom will set them and + * Host will read to check if the fields are correct. + */ +#define PHAN_INITIALIZE_FAILED 0xffff +#define PHAN_INITIALIZE_COMPLETE 0xff01 + +/* Host writes the following to notify that it has done the init-handshake */ +#define PHAN_INITIALIZE_ACK 0xf00f +#define PHAN_PEG_RCV_INITIALIZED 0xff01 + +#define NUM_RCV_DESC_RINGS 3 + +#define RCV_RING_NORMAL 0 +#define RCV_RING_JUMBO 1 + +#define MIN_CMD_DESCRIPTORS 64 +#define MIN_RCV_DESCRIPTORS 64 +#define MIN_JUMBO_DESCRIPTORS 32 + +#define MAX_CMD_DESCRIPTORS 1024 +#define MAX_RCV_DESCRIPTORS_1G 4096 +#define MAX_RCV_DESCRIPTORS_10G 8192 +#define MAX_RCV_DESCRIPTORS_VF 2048 +#define MAX_JUMBO_RCV_DESCRIPTORS_1G 512 +#define MAX_JUMBO_RCV_DESCRIPTORS_10G 1024 + +#define DEFAULT_RCV_DESCRIPTORS_1G 2048 +#define DEFAULT_RCV_DESCRIPTORS_10G 4096 +#define DEFAULT_RCV_DESCRIPTORS_VF 1024 +#define MAX_RDS_RINGS 2 + +#define get_next_index(index, length) \ + (((index) + 1) & ((length) - 1)) + +/* + * Following data structures describe the descriptors that will be used. + * Added fileds of tcpHdrSize and ipHdrSize, The driver needs to do it only when + * we are doing LSO (above the 1500 size packet) only. + */ + +#define FLAGS_VLAN_TAGGED 0x10 +#define FLAGS_VLAN_OOB 0x40 + +#define qlcnic_set_tx_vlan_tci(cmd_desc, v) \ + (cmd_desc)->vlan_TCI = cpu_to_le16(v); +#define qlcnic_set_cmd_desc_port(cmd_desc, var) \ + ((cmd_desc)->port_ctxid |= ((var) & 0x0F)) +#define qlcnic_set_cmd_desc_ctxid(cmd_desc, var) \ + ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0)) + +#define qlcnic_set_tx_port(_desc, _port) \ + ((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0)) + +#define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \ + ((_desc)->flags_opcode |= \ + cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7))) + +#define qlcnic_set_tx_frags_len(_desc, _frags, _len) \ + ((_desc)->nfrags__length = \ + cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8))) + +struct cmd_desc_type0 { + u8 tcp_hdr_offset; /* For LSO only */ + u8 ip_hdr_offset; /* For LSO only */ + __le16 flags_opcode; /* 15:13 unused, 12:7 opcode, 6:0 flags */ + __le32 nfrags__length; /* 31:8 total len, 7:0 frag count */ + + __le64 addr_buffer2; + + __le16 reference_handle; + __le16 mss; + u8 port_ctxid; /* 7:4 ctxid 3:0 port */ + u8 total_hdr_length; /* LSO only : MAC+IP+TCP Hdr size */ + __le16 conn_id; /* IPSec offoad only */ + + __le64 addr_buffer3; + __le64 addr_buffer1; + + __le16 buffer_length[4]; + + __le64 addr_buffer4; + + u8 eth_addr[ETH_ALEN]; + __le16 vlan_TCI; + +} __attribute__ ((aligned(64))); + +/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */ +struct rcv_desc { + __le16 reference_handle; + __le16 reserved; + __le32 buffer_length; /* allocated buffer length (usually 2K) */ + __le64 addr_buffer; +} __packed; + +/* opcode field in status_desc */ +#define QLCNIC_SYN_OFFLOAD 0x03 +#define QLCNIC_RXPKT_DESC 0x04 +#define QLCNIC_OLD_RXPKT_DESC 0x3f +#define QLCNIC_RESPONSE_DESC 0x05 +#define QLCNIC_LRO_DESC 0x12 + +/* for status field in status_desc */ +#define STATUS_CKSUM_LOOP 0 +#define STATUS_CKSUM_OK 2 + +/* owner bits of status_desc */ +#define STATUS_OWNER_HOST (0x1ULL << 56) +#define STATUS_OWNER_PHANTOM (0x2ULL << 56) + +/* Status descriptor: + 0-3 port, 4-7 status, 8-11 type, 12-27 total_length + 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset + 53-55 desc_cnt, 56-57 owner, 58-63 opcode + */ +#define qlcnic_get_sts_port(sts_data) \ + ((sts_data) & 0x0F) +#define qlcnic_get_sts_status(sts_data) \ + (((sts_data) >> 4) & 0x0F) +#define qlcnic_get_sts_type(sts_data) \ + (((sts_data) >> 8) & 0x0F) +#define qlcnic_get_sts_totallength(sts_data) \ + (((sts_data) >> 12) & 0xFFFF) +#define qlcnic_get_sts_refhandle(sts_data) \ + (((sts_data) >> 28) & 0xFFFF) +#define qlcnic_get_sts_prot(sts_data) \ + (((sts_data) >> 44) & 0x0F) +#define qlcnic_get_sts_pkt_offset(sts_data) \ + (((sts_data) >> 48) & 0x1F) +#define qlcnic_get_sts_desc_cnt(sts_data) \ + (((sts_data) >> 53) & 0x7) +#define qlcnic_get_sts_opcode(sts_data) \ + (((sts_data) >> 58) & 0x03F) + +#define qlcnic_get_lro_sts_refhandle(sts_data) \ + ((sts_data) & 0x0FFFF) +#define qlcnic_get_lro_sts_length(sts_data) \ + (((sts_data) >> 16) & 0x0FFFF) +#define qlcnic_get_lro_sts_l2_hdr_offset(sts_data) \ + (((sts_data) >> 32) & 0x0FF) +#define qlcnic_get_lro_sts_l4_hdr_offset(sts_data) \ + (((sts_data) >> 40) & 0x0FF) +#define qlcnic_get_lro_sts_timestamp(sts_data) \ + (((sts_data) >> 48) & 0x1) +#define qlcnic_get_lro_sts_type(sts_data) \ + (((sts_data) >> 49) & 0x7) +#define qlcnic_get_lro_sts_push_flag(sts_data) \ + (((sts_data) >> 52) & 0x1) +#define qlcnic_get_lro_sts_seq_number(sts_data) \ + ((sts_data) & 0x0FFFFFFFF) + + +struct status_desc { + __le64 status_desc_data[2]; +} __attribute__ ((aligned(16))); + +/* UNIFIED ROMIMAGE */ +#define QLCNIC_UNI_FW_MIN_SIZE 0xc8000 +#define QLCNIC_UNI_DIR_SECT_PRODUCT_TBL 0x0 +#define QLCNIC_UNI_DIR_SECT_BOOTLD 0x6 +#define QLCNIC_UNI_DIR_SECT_FW 0x7 + +/*Offsets */ +#define QLCNIC_UNI_CHIP_REV_OFF 10 +#define QLCNIC_UNI_FLAGS_OFF 11 +#define QLCNIC_UNI_BIOS_VERSION_OFF 12 +#define QLCNIC_UNI_BOOTLD_IDX_OFF 27 +#define QLCNIC_UNI_FIRMWARE_IDX_OFF 29 + +struct uni_table_desc{ + u32 findex; + u32 num_entries; + u32 entry_size; + u32 reserved[5]; +}; + +struct uni_data_desc{ + u32 findex; + u32 size; + u32 reserved[5]; +}; + +/* Flash Defines and Structures */ +#define QLCNIC_FLT_LOCATION 0x3F1000 +#define QLCNIC_FW_IMAGE_REGION 0x74 +#define QLCNIC_BOOTLD_REGION 0X72 +struct qlcnic_flt_header { + u16 version; + u16 len; + u16 checksum; + u16 reserved; +}; + +struct qlcnic_flt_entry { + u8 region; + u8 reserved0; + u8 attrib; + u8 reserved1; + u32 size; + u32 start_addr; + u32 end_addr; +}; + +/* Magic number to let user know flash is programmed */ +#define QLCNIC_BDINFO_MAGIC 0x12345678 + +#define QLCNIC_BRDTYPE_P3P_REF_QG 0x0021 +#define QLCNIC_BRDTYPE_P3P_HMEZ 0x0022 +#define QLCNIC_BRDTYPE_P3P_10G_CX4_LP 0x0023 +#define QLCNIC_BRDTYPE_P3P_4_GB 0x0024 +#define QLCNIC_BRDTYPE_P3P_IMEZ 0x0025 +#define QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS 0x0026 +#define QLCNIC_BRDTYPE_P3P_10000_BASE_T 0x0027 +#define QLCNIC_BRDTYPE_P3P_XG_LOM 0x0028 +#define QLCNIC_BRDTYPE_P3P_4_GB_MM 0x0029 +#define QLCNIC_BRDTYPE_P3P_10G_SFP_CT 0x002a +#define QLCNIC_BRDTYPE_P3P_10G_SFP_QT 0x002b +#define QLCNIC_BRDTYPE_P3P_10G_CX4 0x0031 +#define QLCNIC_BRDTYPE_P3P_10G_XFP 0x0032 +#define QLCNIC_BRDTYPE_P3P_10G_TP 0x0080 + +#define QLCNIC_MSIX_TABLE_OFFSET 0x44 + +/* Flash memory map */ +#define QLCNIC_BRDCFG_START 0x4000 /* board config */ +#define QLCNIC_BOOTLD_START 0x10000 /* bootld */ +#define QLCNIC_IMAGE_START 0x43000 /* compressed image */ +#define QLCNIC_USER_START 0x3E8000 /* Firmare info */ + +#define QLCNIC_FW_VERSION_OFFSET (QLCNIC_USER_START+0x408) +#define QLCNIC_FW_SIZE_OFFSET (QLCNIC_USER_START+0x40c) +#define QLCNIC_FW_SERIAL_NUM_OFFSET (QLCNIC_USER_START+0x81c) +#define QLCNIC_BIOS_VERSION_OFFSET (QLCNIC_USER_START+0x83c) + +#define QLCNIC_BRDTYPE_OFFSET (QLCNIC_BRDCFG_START+0x8) +#define QLCNIC_FW_MAGIC_OFFSET (QLCNIC_BRDCFG_START+0x128) + +#define QLCNIC_FW_MIN_SIZE (0x3fffff) +#define QLCNIC_UNIFIED_ROMIMAGE 0 +#define QLCNIC_FLASH_ROMIMAGE 1 +#define QLCNIC_UNKNOWN_ROMIMAGE 0xff + +#define QLCNIC_UNIFIED_ROMIMAGE_NAME "phanfw.bin" +#define QLCNIC_FLASH_ROMIMAGE_NAME "flash" + +extern char qlcnic_driver_name[]; + +/* Number of status descriptors to handle per interrupt */ +#define MAX_STATUS_HANDLE (64) + +/* + * qlcnic_skb_frag{} is to contain mapping info for each SG list. This + * has to be freed when DMA is complete. This is part of qlcnic_tx_buffer{}. + */ +struct qlcnic_skb_frag { + u64 dma; + u64 length; +}; + +/* Following defines are for the state of the buffers */ +#define QLCNIC_BUFFER_FREE 0 +#define QLCNIC_BUFFER_BUSY 1 + +/* + * There will be one qlcnic_buffer per skb packet. These will be + * used to save the dma info for pci_unmap_page() + */ +struct qlcnic_cmd_buffer { + struct sk_buff *skb; + struct qlcnic_skb_frag frag_array[MAX_SKB_FRAGS + 1]; + u32 frag_count; +}; + +/* In rx_buffer, we do not need multiple fragments as is a single buffer */ +struct qlcnic_rx_buffer { + u16 ref_handle; + struct sk_buff *skb; + struct list_head list; + u64 dma; +}; + +/* Board types */ +#define QLCNIC_GBE 0x01 +#define QLCNIC_XGBE 0x02 + +/* + * Interrupt coalescing defaults. The defaults are for 1500 MTU. It is + * adjusted based on configured MTU. + */ +#define QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US 3 +#define QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS 256 + +#define QLCNIC_INTR_DEFAULT 0x04 +#define QLCNIC_CONFIG_INTR_COALESCE 3 + +struct qlcnic_nic_intr_coalesce { + u8 type; + u8 sts_ring_mask; + u16 rx_packets; + u16 rx_time_us; + u16 flag; + u32 timer_out; +}; + +struct qlcnic_dump_template_hdr { + __le32 type; + __le32 offset; + __le32 size; + __le32 cap_mask; + __le32 num_entries; + __le32 version; + __le32 timestamp; + __le32 checksum; + __le32 drv_cap_mask; + __le32 sys_info[3]; + __le32 saved_state[16]; + __le32 cap_sizes[8]; + __le32 rsvd[0]; +}; + +struct qlcnic_fw_dump { + u8 clr; /* flag to indicate if dump is cleared */ + u8 enable; /* enable/disable dump */ + u32 size; /* total size of the dump */ + void *data; /* dump data area */ + struct qlcnic_dump_template_hdr *tmpl_hdr; +}; + +/* + * One hardware_context{} per adapter + * contains interrupt info as well shared hardware info. + */ +struct qlcnic_hardware_context { + void __iomem *pci_base0; + void __iomem *ocm_win_crb; + + unsigned long pci_len0; + + rwlock_t crb_lock; + struct mutex mem_lock; + + u8 revision_id; + u8 pci_func; + u8 linkup; + u8 loopback_state; + u16 port_type; + u16 board_type; + + struct qlcnic_nic_intr_coalesce coal; + struct qlcnic_fw_dump fw_dump; +}; + +struct qlcnic_adapter_stats { + u64 xmitcalled; + u64 xmitfinished; + u64 rxdropped; + u64 txdropped; + u64 csummed; + u64 rx_pkts; + u64 lro_pkts; + u64 rxbytes; + u64 txbytes; + u64 lrobytes; + u64 lso_frames; + u64 xmit_on; + u64 xmit_off; + u64 skb_alloc_failure; + u64 null_rxbuf; + u64 rx_dma_map_error; + u64 tx_dma_map_error; +}; + +/* + * Rcv Descriptor Context. One such per Rcv Descriptor. There may + * be one Rcv Descriptor for normal packets, one for jumbo and may be others. + */ +struct qlcnic_host_rds_ring { + void __iomem *crb_rcv_producer; + struct rcv_desc *desc_head; + struct qlcnic_rx_buffer *rx_buf_arr; + u32 num_desc; + u32 producer; + u32 dma_size; + u32 skb_size; + u32 flags; + struct list_head free_list; + spinlock_t lock; + dma_addr_t phys_addr; +} ____cacheline_internodealigned_in_smp; + +struct qlcnic_host_sds_ring { + u32 consumer; + u32 num_desc; + void __iomem *crb_sts_consumer; + + struct status_desc *desc_head; + struct qlcnic_adapter *adapter; + struct napi_struct napi; + struct list_head free_list[NUM_RCV_DESC_RINGS]; + + void __iomem *crb_intr_mask; + int irq; + + dma_addr_t phys_addr; + char name[IFNAMSIZ+4]; +} ____cacheline_internodealigned_in_smp; + +struct qlcnic_host_tx_ring { + u32 producer; + u32 sw_consumer; + u32 num_desc; + void __iomem *crb_cmd_producer; + struct cmd_desc_type0 *desc_head; + struct qlcnic_cmd_buffer *cmd_buf_arr; + __le32 *hw_consumer; + + dma_addr_t phys_addr; + dma_addr_t hw_cons_phys_addr; + struct netdev_queue *txq; +} ____cacheline_internodealigned_in_smp; + +/* + * Receive context. There is one such structure per instance of the + * receive processing. Any state information that is relevant to + * the receive, and is must be in this structure. The global data may be + * present elsewhere. + */ +struct qlcnic_recv_context { + struct qlcnic_host_rds_ring *rds_rings; + struct qlcnic_host_sds_ring *sds_rings; + u32 state; + u16 context_id; + u16 virt_port; + +}; + +/* HW context creation */ + +#define QLCNIC_OS_CRB_RETRY_COUNT 4000 +#define QLCNIC_CDRP_SIGNATURE_MAKE(pcifn, version) \ + (((pcifn) & 0xff) | (((version) & 0xff) << 8) | (0xcafe << 16)) + +#define QLCNIC_CDRP_CMD_BIT 0x80000000 + +/* + * All responses must have the QLCNIC_CDRP_CMD_BIT cleared + * in the crb QLCNIC_CDRP_CRB_OFFSET. + */ +#define QLCNIC_CDRP_FORM_RSP(rsp) (rsp) +#define QLCNIC_CDRP_IS_RSP(rsp) (((rsp) & QLCNIC_CDRP_CMD_BIT) == 0) + +#define QLCNIC_CDRP_RSP_OK 0x00000001 +#define QLCNIC_CDRP_RSP_FAIL 0x00000002 +#define QLCNIC_CDRP_RSP_TIMEOUT 0x00000003 + +/* + * All commands must have the QLCNIC_CDRP_CMD_BIT set in + * the crb QLCNIC_CDRP_CRB_OFFSET. + */ +#define QLCNIC_CDRP_FORM_CMD(cmd) (QLCNIC_CDRP_CMD_BIT | (cmd)) +#define QLCNIC_CDRP_IS_CMD(cmd) (((cmd) & QLCNIC_CDRP_CMD_BIT) != 0) + +#define QLCNIC_CDRP_CMD_SUBMIT_CAPABILITIES 0x00000001 +#define QLCNIC_CDRP_CMD_READ_MAX_RDS_PER_CTX 0x00000002 +#define QLCNIC_CDRP_CMD_READ_MAX_SDS_PER_CTX 0x00000003 +#define QLCNIC_CDRP_CMD_READ_MAX_RULES_PER_CTX 0x00000004 +#define QLCNIC_CDRP_CMD_READ_MAX_RX_CTX 0x00000005 +#define QLCNIC_CDRP_CMD_READ_MAX_TX_CTX 0x00000006 +#define QLCNIC_CDRP_CMD_CREATE_RX_CTX 0x00000007 +#define QLCNIC_CDRP_CMD_DESTROY_RX_CTX 0x00000008 +#define QLCNIC_CDRP_CMD_CREATE_TX_CTX 0x00000009 +#define QLCNIC_CDRP_CMD_DESTROY_TX_CTX 0x0000000a +#define QLCNIC_CDRP_CMD_SET_MTU 0x00000012 +#define QLCNIC_CDRP_CMD_READ_PHY 0x00000013 +#define QLCNIC_CDRP_CMD_WRITE_PHY 0x00000014 +#define QLCNIC_CDRP_CMD_READ_HW_REG 0x00000015 +#define QLCNIC_CDRP_CMD_GET_FLOW_CTL 0x00000016 +#define QLCNIC_CDRP_CMD_SET_FLOW_CTL 0x00000017 +#define QLCNIC_CDRP_CMD_READ_MAX_MTU 0x00000018 +#define QLCNIC_CDRP_CMD_READ_MAX_LRO 0x00000019 +#define QLCNIC_CDRP_CMD_MAC_ADDRESS 0x0000001f + +#define QLCNIC_CDRP_CMD_GET_PCI_INFO 0x00000020 +#define QLCNIC_CDRP_CMD_GET_NIC_INFO 0x00000021 +#define QLCNIC_CDRP_CMD_SET_NIC_INFO 0x00000022 +#define QLCNIC_CDRP_CMD_GET_ESWITCH_CAPABILITY 0x00000024 +#define QLCNIC_CDRP_CMD_TOGGLE_ESWITCH 0x00000025 +#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATUS 0x00000026 +#define QLCNIC_CDRP_CMD_SET_PORTMIRRORING 0x00000027 +#define QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH 0x00000028 +#define QLCNIC_CDRP_CMD_GET_ESWITCH_PORT_CONFIG 0x00000029 +#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATS 0x0000002a +#define QLCNIC_CDRP_CMD_CONFIG_PORT 0x0000002E +#define QLCNIC_CDRP_CMD_TEMP_SIZE 0x0000002f +#define QLCNIC_CDRP_CMD_GET_TEMP_HDR 0x00000030 + +#define QLCNIC_RCODE_SUCCESS 0 +#define QLCNIC_RCODE_NOT_SUPPORTED 9 +#define QLCNIC_RCODE_TIMEOUT 17 +#define QLCNIC_DESTROY_CTX_RESET 0 + +/* + * Capabilities Announced + */ +#define QLCNIC_CAP0_LEGACY_CONTEXT (1) +#define QLCNIC_CAP0_LEGACY_MN (1 << 2) +#define QLCNIC_CAP0_LSO (1 << 6) +#define QLCNIC_CAP0_JUMBO_CONTIGUOUS (1 << 7) +#define QLCNIC_CAP0_LRO_CONTIGUOUS (1 << 8) +#define QLCNIC_CAP0_VALIDOFF (1 << 11) + +/* + * Context state + */ +#define QLCNIC_HOST_CTX_STATE_FREED 0 +#define QLCNIC_HOST_CTX_STATE_ACTIVE 2 + +/* + * Rx context + */ + +struct qlcnic_hostrq_sds_ring { + __le64 host_phys_addr; /* Ring base addr */ + __le32 ring_size; /* Ring entries */ + __le16 msi_index; + __le16 rsvd; /* Padding */ +} __packed; + +struct qlcnic_hostrq_rds_ring { + __le64 host_phys_addr; /* Ring base addr */ + __le64 buff_size; /* Packet buffer size */ + __le32 ring_size; /* Ring entries */ + __le32 ring_kind; /* Class of ring */ +} __packed; + +struct qlcnic_hostrq_rx_ctx { + __le64 host_rsp_dma_addr; /* Response dma'd here */ + __le32 capabilities[4]; /* Flag bit vector */ + __le32 host_int_crb_mode; /* Interrupt crb usage */ + __le32 host_rds_crb_mode; /* RDS crb usage */ + /* These ring offsets are relative to data[0] below */ + __le32 rds_ring_offset; /* Offset to RDS config */ + __le32 sds_ring_offset; /* Offset to SDS config */ + __le16 num_rds_rings; /* Count of RDS rings */ + __le16 num_sds_rings; /* Count of SDS rings */ + __le16 valid_field_offset; + u8 txrx_sds_binding; + u8 msix_handler; + u8 reserved[128]; /* reserve space for future expansion*/ + /* MUST BE 64-bit aligned. + The following is packed: + - N hostrq_rds_rings + - N hostrq_sds_rings */ + char data[0]; +} __packed; + +struct qlcnic_cardrsp_rds_ring{ + __le32 host_producer_crb; /* Crb to use */ + __le32 rsvd1; /* Padding */ +} __packed; + +struct qlcnic_cardrsp_sds_ring { + __le32 host_consumer_crb; /* Crb to use */ + __le32 interrupt_crb; /* Crb to use */ +} __packed; + +struct qlcnic_cardrsp_rx_ctx { + /* These ring offsets are relative to data[0] below */ + __le32 rds_ring_offset; /* Offset to RDS config */ + __le32 sds_ring_offset; /* Offset to SDS config */ + __le32 host_ctx_state; /* Starting State */ + __le32 num_fn_per_port; /* How many PCI fn share the port */ + __le16 num_rds_rings; /* Count of RDS rings */ + __le16 num_sds_rings; /* Count of SDS rings */ + __le16 context_id; /* Handle for context */ + u8 phys_port; /* Physical id of port */ + u8 virt_port; /* Virtual/Logical id of port */ + u8 reserved[128]; /* save space for future expansion */ + /* MUST BE 64-bit aligned. + The following is packed: + - N cardrsp_rds_rings + - N cardrs_sds_rings */ + char data[0]; +} __packed; + +#define SIZEOF_HOSTRQ_RX(HOSTRQ_RX, rds_rings, sds_rings) \ + (sizeof(HOSTRQ_RX) + \ + (rds_rings)*(sizeof(struct qlcnic_hostrq_rds_ring)) + \ + (sds_rings)*(sizeof(struct qlcnic_hostrq_sds_ring))) + +#define SIZEOF_CARDRSP_RX(CARDRSP_RX, rds_rings, sds_rings) \ + (sizeof(CARDRSP_RX) + \ + (rds_rings)*(sizeof(struct qlcnic_cardrsp_rds_ring)) + \ + (sds_rings)*(sizeof(struct qlcnic_cardrsp_sds_ring))) + +/* + * Tx context + */ + +struct qlcnic_hostrq_cds_ring { + __le64 host_phys_addr; /* Ring base addr */ + __le32 ring_size; /* Ring entries */ + __le32 rsvd; /* Padding */ +} __packed; + +struct qlcnic_hostrq_tx_ctx { + __le64 host_rsp_dma_addr; /* Response dma'd here */ + __le64 cmd_cons_dma_addr; /* */ + __le64 dummy_dma_addr; /* */ + __le32 capabilities[4]; /* Flag bit vector */ + __le32 host_int_crb_mode; /* Interrupt crb usage */ + __le32 rsvd1; /* Padding */ + __le16 rsvd2; /* Padding */ + __le16 interrupt_ctl; + __le16 msi_index; + __le16 rsvd3; /* Padding */ + struct qlcnic_hostrq_cds_ring cds_ring; /* Desc of cds ring */ + u8 reserved[128]; /* future expansion */ +} __packed; + +struct qlcnic_cardrsp_cds_ring { + __le32 host_producer_crb; /* Crb to use */ + __le32 interrupt_crb; /* Crb to use */ +} __packed; + +struct qlcnic_cardrsp_tx_ctx { + __le32 host_ctx_state; /* Starting state */ + __le16 context_id; /* Handle for context */ + u8 phys_port; /* Physical id of port */ + u8 virt_port; /* Virtual/Logical id of port */ + struct qlcnic_cardrsp_cds_ring cds_ring; /* Card cds settings */ + u8 reserved[128]; /* future expansion */ +} __packed; + +#define SIZEOF_HOSTRQ_TX(HOSTRQ_TX) (sizeof(HOSTRQ_TX)) +#define SIZEOF_CARDRSP_TX(CARDRSP_TX) (sizeof(CARDRSP_TX)) + +/* CRB */ + +#define QLCNIC_HOST_RDS_CRB_MODE_UNIQUE 0 +#define QLCNIC_HOST_RDS_CRB_MODE_SHARED 1 +#define QLCNIC_HOST_RDS_CRB_MODE_CUSTOM 2 +#define QLCNIC_HOST_RDS_CRB_MODE_MAX 3 + +#define QLCNIC_HOST_INT_CRB_MODE_UNIQUE 0 +#define QLCNIC_HOST_INT_CRB_MODE_SHARED 1 +#define QLCNIC_HOST_INT_CRB_MODE_NORX 2 +#define QLCNIC_HOST_INT_CRB_MODE_NOTX 3 +#define QLCNIC_HOST_INT_CRB_MODE_NORXTX 4 + + +/* MAC */ + +#define MC_COUNT_P3P 38 + +#define QLCNIC_MAC_NOOP 0 +#define QLCNIC_MAC_ADD 1 +#define QLCNIC_MAC_DEL 2 +#define QLCNIC_MAC_VLAN_ADD 3 +#define QLCNIC_MAC_VLAN_DEL 4 + +struct qlcnic_mac_list_s { + struct list_head list; + uint8_t mac_addr[ETH_ALEN+2]; +}; + +#define QLCNIC_HOST_REQUEST 0x13 +#define QLCNIC_REQUEST 0x14 + +#define QLCNIC_MAC_EVENT 0x1 + +#define QLCNIC_IP_UP 2 +#define QLCNIC_IP_DOWN 3 + +#define QLCNIC_ILB_MODE 0x1 +#define QLCNIC_ELB_MODE 0x2 + +#define QLCNIC_LINKEVENT 0x1 +#define QLCNIC_LB_RESPONSE 0x2 +#define QLCNIC_IS_LB_CONFIGURED(VAL) \ + (VAL == (QLCNIC_LINKEVENT | QLCNIC_LB_RESPONSE)) + +/* + * Driver --> Firmware + */ +#define QLCNIC_H2C_OPCODE_CONFIG_RSS 0x1 +#define QLCNIC_H2C_OPCODE_CONFIG_INTR_COALESCE 0x3 +#define QLCNIC_H2C_OPCODE_CONFIG_LED 0x4 +#define QLCNIC_H2C_OPCODE_LRO_REQUEST 0x7 +#define QLCNIC_H2C_OPCODE_SET_MAC_RECEIVE_MODE 0xc +#define QLCNIC_H2C_OPCODE_CONFIG_IPADDR 0x12 + +#define QLCNIC_H2C_OPCODE_GET_LINKEVENT 0x15 +#define QLCNIC_H2C_OPCODE_CONFIG_BRIDGING 0x17 +#define QLCNIC_H2C_OPCODE_CONFIG_HW_LRO 0x18 +#define QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK 0x13 + +/* + * Firmware --> Driver + */ + +#define QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK 0x8f +#define QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 141 + +#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */ +#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */ +#define VPORT_MISS_MODE_ACCEPT_MULTI 2 /* accept unmatched multicast */ + +#define QLCNIC_LRO_REQUEST_CLEANUP 4 + +/* Capabilites received */ +#define QLCNIC_FW_CAPABILITY_TSO BIT_1 +#define QLCNIC_FW_CAPABILITY_BDG BIT_8 +#define QLCNIC_FW_CAPABILITY_FVLANTX BIT_9 +#define QLCNIC_FW_CAPABILITY_HW_LRO BIT_10 +#define QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK BIT_27 + +/* module types */ +#define LINKEVENT_MODULE_NOT_PRESENT 1 +#define LINKEVENT_MODULE_OPTICAL_UNKNOWN 2 +#define LINKEVENT_MODULE_OPTICAL_SRLR 3 +#define LINKEVENT_MODULE_OPTICAL_LRM 4 +#define LINKEVENT_MODULE_OPTICAL_SFP_1G 5 +#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE 6 +#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN 7 +#define LINKEVENT_MODULE_TWINAX 8 + +#define LINKSPEED_10GBPS 10000 +#define LINKSPEED_1GBPS 1000 +#define LINKSPEED_100MBPS 100 +#define LINKSPEED_10MBPS 10 + +#define LINKSPEED_ENCODED_10MBPS 0 +#define LINKSPEED_ENCODED_100MBPS 1 +#define LINKSPEED_ENCODED_1GBPS 2 + +#define LINKEVENT_AUTONEG_DISABLED 0 +#define LINKEVENT_AUTONEG_ENABLED 1 + +#define LINKEVENT_HALF_DUPLEX 0 +#define LINKEVENT_FULL_DUPLEX 1 + +#define LINKEVENT_LINKSPEED_MBPS 0 +#define LINKEVENT_LINKSPEED_ENCODED 1 + +/* firmware response header: + * 63:58 - message type + * 57:56 - owner + * 55:53 - desc count + * 52:48 - reserved + * 47:40 - completion id + * 39:32 - opcode + * 31:16 - error code + * 15:00 - reserved + */ +#define qlcnic_get_nic_msg_opcode(msg_hdr) \ + ((msg_hdr >> 32) & 0xFF) + +struct qlcnic_fw_msg { + union { + struct { + u64 hdr; + u64 body[7]; + }; + u64 words[8]; + }; +}; + +struct qlcnic_nic_req { + __le64 qhdr; + __le64 req_hdr; + __le64 words[6]; +} __packed; + +struct qlcnic_mac_req { + u8 op; + u8 tag; + u8 mac_addr[6]; +}; + +struct qlcnic_vlan_req { + __le16 vlan_id; + __le16 rsvd[3]; +} __packed; + +struct qlcnic_ipaddr { + __be32 ipv4; + __be32 ipv6[4]; +}; + +#define QLCNIC_MSI_ENABLED 0x02 +#define QLCNIC_MSIX_ENABLED 0x04 +#define QLCNIC_LRO_ENABLED 0x08 +#define QLCNIC_LRO_DISABLED 0x00 +#define QLCNIC_BRIDGE_ENABLED 0X10 +#define QLCNIC_DIAG_ENABLED 0x20 +#define QLCNIC_ESWITCH_ENABLED 0x40 +#define QLCNIC_ADAPTER_INITIALIZED 0x80 +#define QLCNIC_TAGGING_ENABLED 0x100 +#define QLCNIC_MACSPOOF 0x200 +#define QLCNIC_MAC_OVERRIDE_DISABLED 0x400 +#define QLCNIC_PROMISC_DISABLED 0x800 +#define QLCNIC_NEED_FLR 0x1000 +#define QLCNIC_FW_RESET_OWNER 0x2000 +#define QLCNIC_FW_HANG 0x4000 +#define QLCNIC_IS_MSI_FAMILY(adapter) \ + ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED)) + +#define QLCNIC_DEF_NUM_STS_DESC_RINGS 4 +#define QLCNIC_MSIX_TBL_SPACE 8192 +#define QLCNIC_PCI_REG_MSIX_TBL 0x44 +#define QLCNIC_MSIX_TBL_PGSIZE 4096 + +#define QLCNIC_NETDEV_WEIGHT 128 +#define QLCNIC_ADAPTER_UP_MAGIC 777 + +#define __QLCNIC_FW_ATTACHED 0 +#define __QLCNIC_DEV_UP 1 +#define __QLCNIC_RESETTING 2 +#define __QLCNIC_START_FW 4 +#define __QLCNIC_AER 5 +#define __QLCNIC_DIAG_RES_ALLOC 6 + +#define QLCNIC_INTERRUPT_TEST 1 +#define QLCNIC_LOOPBACK_TEST 2 +#define QLCNIC_LED_TEST 3 + +#define QLCNIC_FILTER_AGE 80 +#define QLCNIC_READD_AGE 20 +#define QLCNIC_LB_MAX_FILTERS 64 + +/* QLCNIC Driver Error Code */ +#define QLCNIC_FW_NOT_RESPOND 51 +#define QLCNIC_TEST_IN_PROGRESS 52 +#define QLCNIC_UNDEFINED_ERROR 53 +#define QLCNIC_LB_CABLE_NOT_CONN 54 + +struct qlcnic_filter { + struct hlist_node fnode; + u8 faddr[ETH_ALEN]; + __le16 vlan_id; + unsigned long ftime; +}; + +struct qlcnic_filter_hash { + struct hlist_head *fhead; + u8 fnum; + u8 fmax; +}; + +struct qlcnic_adapter { + struct qlcnic_hardware_context *ahw; + struct qlcnic_recv_context *recv_ctx; + struct qlcnic_host_tx_ring *tx_ring; + struct net_device *netdev; + struct pci_dev *pdev; + + unsigned long state; + u32 flags; + + u16 num_txd; + u16 num_rxd; + u16 num_jumbo_rxd; + u16 max_rxd; + u16 max_jumbo_rxd; + + u8 max_rds_rings; + u8 max_sds_rings; + u8 msix_supported; + u8 portnum; + u8 physical_port; + u8 reset_context; + + u8 mc_enabled; + u8 max_mc_count; + u8 fw_wait_cnt; + u8 fw_fail_cnt; + u8 tx_timeo_cnt; + u8 need_fw_reset; + + u8 has_link_events; + u8 fw_type; + u16 tx_context_id; + u16 is_up; + + u16 link_speed; + u16 link_duplex; + u16 link_autoneg; + u16 module_type; + + u16 op_mode; + u16 switch_mode; + u16 max_tx_ques; + u16 max_rx_ques; + u16 max_mtu; + u16 pvid; + + u32 fw_hal_version; + u32 capabilities; + u32 irq; + u32 temp; + + u32 int_vec_bit; + u32 heartbeat; + + u8 max_mac_filters; + u8 dev_state; + u8 diag_test; + char diag_cnt; + u8 reset_ack_timeo; + u8 dev_init_timeo; + u16 msg_enable; + + u8 mac_addr[ETH_ALEN]; + + u64 dev_rst_time; + u8 mac_learn; + unsigned long vlans[BITS_TO_LONGS(VLAN_N_VID)]; + + struct qlcnic_npar_info *npars; + struct qlcnic_eswitch *eswitch; + struct qlcnic_nic_template *nic_ops; + + struct qlcnic_adapter_stats stats; + struct list_head mac_list; + + void __iomem *tgt_mask_reg; + void __iomem *tgt_status_reg; + void __iomem *crb_int_state_reg; + void __iomem *isr_int_vec; + + struct msix_entry *msix_entries; + + struct delayed_work fw_work; + + + struct qlcnic_filter_hash fhash; + + spinlock_t tx_clean_lock; + spinlock_t mac_learn_lock; + __le32 file_prd_off; /*File fw product offset*/ + u32 fw_version; + const struct firmware *fw; +}; + +struct qlcnic_info { + __le16 pci_func; + __le16 op_mode; /* 1 = Priv, 2 = NP, 3 = NP passthru */ + __le16 phys_port; + __le16 switch_mode; /* 0 = disabled, 1 = int, 2 = ext */ + + __le32 capabilities; + u8 max_mac_filters; + u8 reserved1; + __le16 max_mtu; + + __le16 max_tx_ques; + __le16 max_rx_ques; + __le16 min_tx_bw; + __le16 max_tx_bw; + u8 reserved2[104]; +} __packed; + +struct qlcnic_pci_info { + __le16 id; /* pci function id */ + __le16 active; /* 1 = Enabled */ + __le16 type; /* 1 = NIC, 2 = FCoE, 3 = iSCSI */ + __le16 default_port; /* default port number */ + + __le16 tx_min_bw; /* Multiple of 100mbpc */ + __le16 tx_max_bw; + __le16 reserved1[2]; + + u8 mac[ETH_ALEN]; + u8 reserved2[106]; +} __packed; + +struct qlcnic_npar_info { + u16 pvid; + u16 min_bw; + u16 max_bw; + u8 phy_port; + u8 type; + u8 active; + u8 enable_pm; + u8 dest_npar; + u8 discard_tagged; + u8 mac_override; + u8 mac_anti_spoof; + u8 promisc_mode; + u8 offload_flags; +}; + +struct qlcnic_eswitch { + u8 port; + u8 active_vports; + u8 active_vlans; + u8 active_ucast_filters; + u8 max_ucast_filters; + u8 max_active_vlans; + + u32 flags; +#define QLCNIC_SWITCH_ENABLE BIT_1 +#define QLCNIC_SWITCH_VLAN_FILTERING BIT_2 +#define QLCNIC_SWITCH_PROMISC_MODE BIT_3 +#define QLCNIC_SWITCH_PORT_MIRRORING BIT_4 +}; + + +/* Return codes for Error handling */ +#define QL_STATUS_INVALID_PARAM -1 + +#define MAX_BW 100 /* % of link speed */ +#define MAX_VLAN_ID 4095 +#define MIN_VLAN_ID 2 +#define DEFAULT_MAC_LEARN 1 + +#define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan < MAX_VLAN_ID) +#define IS_VALID_BW(bw) (bw <= MAX_BW) + +struct qlcnic_pci_func_cfg { + u16 func_type; + u16 min_bw; + u16 max_bw; + u16 port_num; + u8 pci_func; + u8 func_state; + u8 def_mac_addr[6]; +}; + +struct qlcnic_npar_func_cfg { + u32 fw_capab; + u16 port_num; + u16 min_bw; + u16 max_bw; + u16 max_tx_queues; + u16 max_rx_queues; + u8 pci_func; + u8 op_mode; +}; + +struct qlcnic_pm_func_cfg { + u8 pci_func; + u8 action; + u8 dest_npar; + u8 reserved[5]; +}; + +struct qlcnic_esw_func_cfg { + u16 vlan_id; + u8 op_mode; + u8 op_type; + u8 pci_func; + u8 host_vlan_tag; + u8 promisc_mode; + u8 discard_tagged; + u8 mac_override; + u8 mac_anti_spoof; + u8 offload_flags; + u8 reserved[5]; +}; + +#define QLCNIC_STATS_VERSION 1 +#define QLCNIC_STATS_PORT 1 +#define QLCNIC_STATS_ESWITCH 2 +#define QLCNIC_QUERY_RX_COUNTER 0 +#define QLCNIC_QUERY_TX_COUNTER 1 +#define QLCNIC_ESW_STATS_NOT_AVAIL 0xffffffffffffffffULL + +#define QLCNIC_ADD_ESW_STATS(VAL1, VAL2)\ +do { \ + if (((VAL1) == QLCNIC_ESW_STATS_NOT_AVAIL) && \ + ((VAL2) != QLCNIC_ESW_STATS_NOT_AVAIL)) \ + (VAL1) = (VAL2); \ + else if (((VAL1) != QLCNIC_ESW_STATS_NOT_AVAIL) && \ + ((VAL2) != QLCNIC_ESW_STATS_NOT_AVAIL)) \ + (VAL1) += (VAL2); \ +} while (0) + +struct __qlcnic_esw_statistics { + __le16 context_id; + __le16 version; + __le16 size; + __le16 unused; + __le64 unicast_frames; + __le64 multicast_frames; + __le64 broadcast_frames; + __le64 dropped_frames; + __le64 errors; + __le64 local_frames; + __le64 numbytes; + __le64 rsvd[3]; +} __packed; + +struct qlcnic_esw_statistics { + struct __qlcnic_esw_statistics rx; + struct __qlcnic_esw_statistics tx; +}; + +struct qlcnic_common_entry_hdr { + __le32 type; + __le32 offset; + __le32 cap_size; + u8 mask; + u8 rsvd[2]; + u8 flags; +} __packed; + +struct __crb { + __le32 addr; + u8 stride; + u8 rsvd1[3]; + __le32 data_size; + __le32 no_ops; + __le32 rsvd2[4]; +} __packed; + +struct __ctrl { + __le32 addr; + u8 stride; + u8 index_a; + __le16 timeout; + __le32 data_size; + __le32 no_ops; + u8 opcode; + u8 index_v; + u8 shl_val; + u8 shr_val; + __le32 val1; + __le32 val2; + __le32 val3; +} __packed; + +struct __cache { + __le32 addr; + __le16 stride; + __le16 init_tag_val; + __le32 size; + __le32 no_ops; + __le32 ctrl_addr; + __le32 ctrl_val; + __le32 read_addr; + u8 read_addr_stride; + u8 read_addr_num; + u8 rsvd1[2]; +} __packed; + +struct __ocm { + u8 rsvd[8]; + __le32 size; + __le32 no_ops; + u8 rsvd1[8]; + __le32 read_addr; + __le32 read_addr_stride; +} __packed; + +struct __mem { + u8 rsvd[24]; + __le32 addr; + __le32 size; +} __packed; + +struct __mux { + __le32 addr; + u8 rsvd[4]; + __le32 size; + __le32 no_ops; + __le32 val; + __le32 val_stride; + __le32 read_addr; + u8 rsvd2[4]; +} __packed; + +struct __queue { + __le32 sel_addr; + __le16 stride; + u8 rsvd[2]; + __le32 size; + __le32 no_ops; + u8 rsvd2[8]; + __le32 read_addr; + u8 read_addr_stride; + u8 read_addr_cnt; + u8 rsvd3[2]; +} __packed; + +struct qlcnic_dump_entry { + struct qlcnic_common_entry_hdr hdr; + union { + struct __crb crb; + struct __cache cache; + struct __ocm ocm; + struct __mem mem; + struct __mux mux; + struct __queue que; + struct __ctrl ctrl; + } region; +} __packed; + +enum op_codes { + QLCNIC_DUMP_NOP = 0, + QLCNIC_DUMP_READ_CRB = 1, + QLCNIC_DUMP_READ_MUX = 2, + QLCNIC_DUMP_QUEUE = 3, + QLCNIC_DUMP_BRD_CONFIG = 4, + QLCNIC_DUMP_READ_OCM = 6, + QLCNIC_DUMP_PEG_REG = 7, + QLCNIC_DUMP_L1_DTAG = 8, + QLCNIC_DUMP_L1_ITAG = 9, + QLCNIC_DUMP_L1_DATA = 11, + QLCNIC_DUMP_L1_INST = 12, + QLCNIC_DUMP_L2_DTAG = 21, + QLCNIC_DUMP_L2_ITAG = 22, + QLCNIC_DUMP_L2_DATA = 23, + QLCNIC_DUMP_L2_INST = 24, + QLCNIC_DUMP_READ_ROM = 71, + QLCNIC_DUMP_READ_MEM = 72, + QLCNIC_DUMP_READ_CTRL = 98, + QLCNIC_DUMP_TLHDR = 99, + QLCNIC_DUMP_RDEND = 255 +}; + +#define QLCNIC_DUMP_WCRB BIT_0 +#define QLCNIC_DUMP_RWCRB BIT_1 +#define QLCNIC_DUMP_ANDCRB BIT_2 +#define QLCNIC_DUMP_ORCRB BIT_3 +#define QLCNIC_DUMP_POLLCRB BIT_4 +#define QLCNIC_DUMP_RD_SAVE BIT_5 +#define QLCNIC_DUMP_WRT_SAVED BIT_6 +#define QLCNIC_DUMP_MOD_SAVE_ST BIT_7 +#define QLCNIC_DUMP_SKIP BIT_7 + +#define QLCNIC_DUMP_MASK_MIN 3 +#define QLCNIC_DUMP_MASK_DEF 0x1f +#define QLCNIC_DUMP_MASK_MAX 0xff +#define QLCNIC_FORCE_FW_DUMP_KEY 0xdeadfeed +#define QLCNIC_ENABLE_FW_DUMP 0xaddfeed +#define QLCNIC_DISABLE_FW_DUMP 0xbadfeed +#define QLCNIC_FORCE_FW_RESET 0xdeaddead + +struct qlcnic_dump_operations { + enum op_codes opcode; + u32 (*handler)(struct qlcnic_adapter *, + struct qlcnic_dump_entry *, u32 *); +}; + +int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter); +int qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config); + +u32 qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off); +int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *, ulong off, u32 data); +int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *, u64 off, u64 data); +int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *, u64 off, u64 *data); +void qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *, u64, u64 *); +void qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *, u64, u64); + +#define ADDR_IN_RANGE(addr, low, high) \ + (((addr) < (high)) && ((addr) >= (low))) + +#define QLCRD32(adapter, off) \ + (qlcnic_hw_read_wx_2M(adapter, off)) +#define QLCWR32(adapter, off, val) \ + (qlcnic_hw_write_wx_2M(adapter, off, val)) + +int qlcnic_pcie_sem_lock(struct qlcnic_adapter *, int, u32); +void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *, int); + +#define qlcnic_rom_lock(a) \ + qlcnic_pcie_sem_lock((a), 2, QLCNIC_ROM_LOCK_ID) +#define qlcnic_rom_unlock(a) \ + qlcnic_pcie_sem_unlock((a), 2) +#define qlcnic_phy_lock(a) \ + qlcnic_pcie_sem_lock((a), 3, QLCNIC_PHY_LOCK_ID) +#define qlcnic_phy_unlock(a) \ + qlcnic_pcie_sem_unlock((a), 3) +#define qlcnic_api_lock(a) \ + qlcnic_pcie_sem_lock((a), 5, 0) +#define qlcnic_api_unlock(a) \ + qlcnic_pcie_sem_unlock((a), 5) +#define qlcnic_sw_lock(a) \ + qlcnic_pcie_sem_lock((a), 6, 0) +#define qlcnic_sw_unlock(a) \ + qlcnic_pcie_sem_unlock((a), 6) +#define crb_win_lock(a) \ + qlcnic_pcie_sem_lock((a), 7, QLCNIC_CRB_WIN_LOCK_ID) +#define crb_win_unlock(a) \ + qlcnic_pcie_sem_unlock((a), 7) + +int qlcnic_get_board_info(struct qlcnic_adapter *adapter); +int qlcnic_wol_supported(struct qlcnic_adapter *adapter); +int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate); +void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter); +void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter); +int qlcnic_dump_fw(struct qlcnic_adapter *); + +/* Functions from qlcnic_init.c */ +int qlcnic_load_firmware(struct qlcnic_adapter *adapter); +int qlcnic_need_fw_reset(struct qlcnic_adapter *adapter); +void qlcnic_request_firmware(struct qlcnic_adapter *adapter); +void qlcnic_release_firmware(struct qlcnic_adapter *adapter); +int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter); +int qlcnic_setup_idc_param(struct qlcnic_adapter *adapter); +int qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter); + +int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, u32 addr, u32 *valp); +int qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr, + u8 *bytes, size_t size); +int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter); +void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter); + +void __iomem *qlcnic_get_ioaddr(struct qlcnic_adapter *, u32); + +int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter); +void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter); + +int qlcnic_fw_create_ctx(struct qlcnic_adapter *adapter); +void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter); + +void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter); +void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter); +void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter); + +int qlcnic_check_fw_status(struct qlcnic_adapter *adapter); +void qlcnic_watchdog_task(struct work_struct *work); +void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, + struct qlcnic_host_rds_ring *rds_ring); +int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max); +void qlcnic_set_multi(struct net_device *netdev); +void qlcnic_free_mac_list(struct qlcnic_adapter *adapter); +int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32); +int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter); +int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable); +int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, int cmd); +int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable); +void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup); + +int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu); +int qlcnic_change_mtu(struct net_device *netdev, int new_mtu); +u32 qlcnic_fix_features(struct net_device *netdev, u32 features); +int qlcnic_set_features(struct net_device *netdev, u32 features); +int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable); +int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable); +int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter); +void qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter, + struct qlcnic_host_tx_ring *tx_ring); +void qlcnic_fetch_mac(struct qlcnic_adapter *, u32, u32, u8, u8 *); +void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring); +void qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter); +int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode); + +/* Functions from qlcnic_ethtool.c */ +int qlcnic_check_loopback_buff(unsigned char *data, u8 mac[]); + +/* Functions from qlcnic_main.c */ +int qlcnic_reset_context(struct qlcnic_adapter *); +u32 qlcnic_issue_cmd(struct qlcnic_adapter *adapter, + u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd); +void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings); +int qlcnic_diag_alloc_res(struct net_device *netdev, int test); +netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev); +int qlcnic_validate_max_rss(struct net_device *netdev, u8 max_hw, u8 val); +int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data); +void qlcnic_dev_request_reset(struct qlcnic_adapter *); +void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter); + +/* Management functions */ +int qlcnic_get_mac_address(struct qlcnic_adapter *, u8*); +int qlcnic_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8); +int qlcnic_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *); +int qlcnic_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info*); + +/* eSwitch management functions */ +int qlcnic_config_switch_port(struct qlcnic_adapter *, + struct qlcnic_esw_func_cfg *); +int qlcnic_get_eswitch_port_config(struct qlcnic_adapter *, + struct qlcnic_esw_func_cfg *); +int qlcnic_config_port_mirroring(struct qlcnic_adapter *, u8, u8, u8); +int qlcnic_get_port_stats(struct qlcnic_adapter *, const u8, const u8, + struct __qlcnic_esw_statistics *); +int qlcnic_get_eswitch_stats(struct qlcnic_adapter *, const u8, u8, + struct __qlcnic_esw_statistics *); +int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, u8, u8, u8); +extern int qlcnic_config_tso; + +/* + * QLOGIC Board information + */ + +#define QLCNIC_MAX_BOARD_NAME_LEN 100 +struct qlcnic_brdinfo { + unsigned short vendor; + unsigned short device; + unsigned short sub_vendor; + unsigned short sub_device; + char short_name[QLCNIC_MAX_BOARD_NAME_LEN]; +}; + +static const struct qlcnic_brdinfo qlcnic_boards[] = { + {0x1077, 0x8020, 0x1077, 0x203, + "8200 Series Single Port 10GbE Converged Network Adapter " + "(TCP/IP Networking)"}, + {0x1077, 0x8020, 0x1077, 0x207, + "8200 Series Dual Port 10GbE Converged Network Adapter " + "(TCP/IP Networking)"}, + {0x1077, 0x8020, 0x1077, 0x20b, + "3200 Series Dual Port 10Gb Intelligent Ethernet Adapter"}, + {0x1077, 0x8020, 0x1077, 0x20c, + "3200 Series Quad Port 1Gb Intelligent Ethernet Adapter"}, + {0x1077, 0x8020, 0x1077, 0x20f, + "3200 Series Single Port 10Gb Intelligent Ethernet Adapter"}, + {0x1077, 0x8020, 0x103c, 0x3733, + "NC523SFP 10Gb 2-port Server Adapter"}, + {0x1077, 0x8020, 0x103c, 0x3346, + "CN1000Q Dual Port Converged Network Adapter"}, + {0x1077, 0x8020, 0x1077, 0x210, + "QME8242-k 10GbE Dual Port Mezzanine Card"}, + {0x1077, 0x8020, 0x0, 0x0, "cLOM8214 1/10GbE Controller"}, +}; + +#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(qlcnic_boards) + +static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring) +{ + if (likely(tx_ring->producer < tx_ring->sw_consumer)) + return tx_ring->sw_consumer - tx_ring->producer; + else + return tx_ring->sw_consumer + tx_ring->num_desc - + tx_ring->producer; +} + +extern const struct ethtool_ops qlcnic_ethtool_ops; + +struct qlcnic_nic_template { + int (*config_bridged_mode) (struct qlcnic_adapter *, u32); + int (*config_led) (struct qlcnic_adapter *, u32, u32); + int (*start_firmware) (struct qlcnic_adapter *); +}; + +#define QLCDB(adapter, lvl, _fmt, _args...) do { \ + if (NETIF_MSG_##lvl & adapter->msg_enable) \ + printk(KERN_INFO "%s: %s: " _fmt, \ + dev_name(&adapter->pdev->dev), \ + __func__, ##_args); \ + } while (0) + +#endif /* __QLCNIC_H_ */ diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c new file mode 100644 index 000000000000..b0d32ddd2ccb --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c @@ -0,0 +1,1117 @@ +/* + * QLogic qlcnic NIC Driver + * Copyright (c) 2009-2010 QLogic Corporation + * + * See LICENSE.qlcnic for copyright and licensing details. + */ + +#include "qlcnic.h" + +static u32 +qlcnic_poll_rsp(struct qlcnic_adapter *adapter) +{ + u32 rsp; + int timeout = 0; + + do { + /* give atleast 1ms for firmware to respond */ + msleep(1); + + if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT) + return QLCNIC_CDRP_RSP_TIMEOUT; + + rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET); + } while (!QLCNIC_CDRP_IS_RSP(rsp)); + + return rsp; +} + +u32 +qlcnic_issue_cmd(struct qlcnic_adapter *adapter, + u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd) +{ + u32 rsp; + u32 signature; + u32 rcode = QLCNIC_RCODE_SUCCESS; + struct pci_dev *pdev = adapter->pdev; + + signature = QLCNIC_CDRP_SIGNATURE_MAKE(pci_fn, version); + + /* Acquire semaphore before accessing CRB */ + if (qlcnic_api_lock(adapter)) + return QLCNIC_RCODE_TIMEOUT; + + QLCWR32(adapter, QLCNIC_SIGN_CRB_OFFSET, signature); + QLCWR32(adapter, QLCNIC_ARG1_CRB_OFFSET, arg1); + QLCWR32(adapter, QLCNIC_ARG2_CRB_OFFSET, arg2); + QLCWR32(adapter, QLCNIC_ARG3_CRB_OFFSET, arg3); + QLCWR32(adapter, QLCNIC_CDRP_CRB_OFFSET, QLCNIC_CDRP_FORM_CMD(cmd)); + + rsp = qlcnic_poll_rsp(adapter); + + if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) { + dev_err(&pdev->dev, "card response timeout.\n"); + rcode = QLCNIC_RCODE_TIMEOUT; + } else if (rsp == QLCNIC_CDRP_RSP_FAIL) { + rcode = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET); + dev_err(&pdev->dev, "failed card response code:0x%x\n", + rcode); + } + + /* Release semaphore */ + qlcnic_api_unlock(adapter); + + return rcode; +} + +static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer, u16 temp_size) +{ + uint64_t sum = 0; + int count = temp_size / sizeof(uint32_t); + while (count-- > 0) + sum += *temp_buffer++; + while (sum >> 32) + sum = (sum & 0xFFFFFFFF) + (sum >> 32); + return ~sum; +} + +int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter) +{ + int err, i; + u16 temp_size; + void *tmp_addr; + u32 version, csum, *template, *tmp_buf; + struct qlcnic_hardware_context *ahw; + struct qlcnic_dump_template_hdr *tmpl_hdr, *tmp_tmpl; + dma_addr_t tmp_addr_t = 0; + + ahw = adapter->ahw; + err = qlcnic_issue_cmd(adapter, + adapter->ahw->pci_func, + adapter->fw_hal_version, + 0, + 0, + 0, + QLCNIC_CDRP_CMD_TEMP_SIZE); + if (err != QLCNIC_RCODE_SUCCESS) { + err = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET); + dev_info(&adapter->pdev->dev, + "Can't get template size %d\n", err); + err = -EIO; + return err; + } + version = QLCRD32(adapter, QLCNIC_ARG3_CRB_OFFSET); + temp_size = QLCRD32(adapter, QLCNIC_ARG2_CRB_OFFSET); + if (!temp_size) + return -EIO; + + tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size, + &tmp_addr_t, GFP_KERNEL); + if (!tmp_addr) { + dev_err(&adapter->pdev->dev, + "Can't get memory for FW dump template\n"); + return -ENOMEM; + } + err = qlcnic_issue_cmd(adapter, + adapter->ahw->pci_func, + adapter->fw_hal_version, + LSD(tmp_addr_t), + MSD(tmp_addr_t), + temp_size, + QLCNIC_CDRP_CMD_GET_TEMP_HDR); + + if (err != QLCNIC_RCODE_SUCCESS) { + dev_err(&adapter->pdev->dev, + "Failed to get mini dump template header %d\n", err); + err = -EIO; + goto error; + } + tmp_tmpl = tmp_addr; + csum = qlcnic_temp_checksum((uint32_t *) tmp_addr, temp_size); + if (csum) { + dev_err(&adapter->pdev->dev, + "Template header checksum validation failed\n"); + err = -EIO; + goto error; + } + ahw->fw_dump.tmpl_hdr = vzalloc(temp_size); + if (!ahw->fw_dump.tmpl_hdr) { + err = -EIO; + goto error; + } + tmp_buf = tmp_addr; + template = (u32 *) ahw->fw_dump.tmpl_hdr; + for (i = 0; i < temp_size/sizeof(u32); i++) + *template++ = __le32_to_cpu(*tmp_buf++); + + tmpl_hdr = ahw->fw_dump.tmpl_hdr; + tmpl_hdr->drv_cap_mask = QLCNIC_DUMP_MASK_DEF; + ahw->fw_dump.enable = 1; +error: + dma_free_coherent(&adapter->pdev->dev, temp_size, tmp_addr, tmp_addr_t); + return err; +} + +int +qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu) +{ + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + + if (recv_ctx->state == QLCNIC_HOST_CTX_STATE_ACTIVE) { + if (qlcnic_issue_cmd(adapter, + adapter->ahw->pci_func, + adapter->fw_hal_version, + recv_ctx->context_id, + mtu, + 0, + QLCNIC_CDRP_CMD_SET_MTU)) { + + dev_err(&adapter->pdev->dev, "Failed to set mtu\n"); + return -EIO; + } + } + + return 0; +} + +static int +qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter) +{ + void *addr; + struct qlcnic_hostrq_rx_ctx *prq; + struct qlcnic_cardrsp_rx_ctx *prsp; + struct qlcnic_hostrq_rds_ring *prq_rds; + struct qlcnic_hostrq_sds_ring *prq_sds; + struct qlcnic_cardrsp_rds_ring *prsp_rds; + struct qlcnic_cardrsp_sds_ring *prsp_sds; + struct qlcnic_host_rds_ring *rds_ring; + struct qlcnic_host_sds_ring *sds_ring; + + dma_addr_t hostrq_phys_addr, cardrsp_phys_addr; + u64 phys_addr; + + u8 i, nrds_rings, nsds_rings; + size_t rq_size, rsp_size; + u32 cap, reg, val, reg2; + int err; + + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + + nrds_rings = adapter->max_rds_rings; + nsds_rings = adapter->max_sds_rings; + + rq_size = + SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings, + nsds_rings); + rsp_size = + SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings, + nsds_rings); + + addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size, + &hostrq_phys_addr, GFP_KERNEL); + if (addr == NULL) + return -ENOMEM; + prq = addr; + + addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size, + &cardrsp_phys_addr, GFP_KERNEL); + if (addr == NULL) { + err = -ENOMEM; + goto out_free_rq; + } + prsp = addr; + + prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr); + + cap = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN + | QLCNIC_CAP0_VALIDOFF); + cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS); + + prq->valid_field_offset = offsetof(struct qlcnic_hostrq_rx_ctx, + msix_handler); + prq->txrx_sds_binding = nsds_rings - 1; + + prq->capabilities[0] = cpu_to_le32(cap); + prq->host_int_crb_mode = + cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED); + prq->host_rds_crb_mode = + cpu_to_le32(QLCNIC_HOST_RDS_CRB_MODE_UNIQUE); + + prq->num_rds_rings = cpu_to_le16(nrds_rings); + prq->num_sds_rings = cpu_to_le16(nsds_rings); + prq->rds_ring_offset = 0; + + val = le32_to_cpu(prq->rds_ring_offset) + + (sizeof(struct qlcnic_hostrq_rds_ring) * nrds_rings); + prq->sds_ring_offset = cpu_to_le32(val); + + prq_rds = (struct qlcnic_hostrq_rds_ring *)(prq->data + + le32_to_cpu(prq->rds_ring_offset)); + + for (i = 0; i < nrds_rings; i++) { + + rds_ring = &recv_ctx->rds_rings[i]; + rds_ring->producer = 0; + + prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr); + prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc); + prq_rds[i].ring_kind = cpu_to_le32(i); + prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size); + } + + prq_sds = (struct qlcnic_hostrq_sds_ring *)(prq->data + + le32_to_cpu(prq->sds_ring_offset)); + + for (i = 0; i < nsds_rings; i++) { + + sds_ring = &recv_ctx->sds_rings[i]; + sds_ring->consumer = 0; + memset(sds_ring->desc_head, 0, STATUS_DESC_RINGSIZE(sds_ring)); + + prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr); + prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc); + prq_sds[i].msi_index = cpu_to_le16(i); + } + + phys_addr = hostrq_phys_addr; + err = qlcnic_issue_cmd(adapter, + adapter->ahw->pci_func, + adapter->fw_hal_version, + (u32)(phys_addr >> 32), + (u32)(phys_addr & 0xffffffff), + rq_size, + QLCNIC_CDRP_CMD_CREATE_RX_CTX); + if (err) { + dev_err(&adapter->pdev->dev, + "Failed to create rx ctx in firmware%d\n", err); + goto out_free_rsp; + } + + + prsp_rds = ((struct qlcnic_cardrsp_rds_ring *) + &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]); + + for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) { + rds_ring = &recv_ctx->rds_rings[i]; + + reg = le32_to_cpu(prsp_rds[i].host_producer_crb); + rds_ring->crb_rcv_producer = adapter->ahw->pci_base0 + reg; + } + + prsp_sds = ((struct qlcnic_cardrsp_sds_ring *) + &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]); + + for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) { + sds_ring = &recv_ctx->sds_rings[i]; + + reg = le32_to_cpu(prsp_sds[i].host_consumer_crb); + reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb); + + sds_ring->crb_sts_consumer = adapter->ahw->pci_base0 + reg; + sds_ring->crb_intr_mask = adapter->ahw->pci_base0 + reg2; + } + + recv_ctx->state = le32_to_cpu(prsp->host_ctx_state); + recv_ctx->context_id = le16_to_cpu(prsp->context_id); + recv_ctx->virt_port = prsp->virt_port; + +out_free_rsp: + dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp, + cardrsp_phys_addr); +out_free_rq: + dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr); + return err; +} + +static void +qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter) +{ + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + + if (qlcnic_issue_cmd(adapter, + adapter->ahw->pci_func, + adapter->fw_hal_version, + recv_ctx->context_id, + QLCNIC_DESTROY_CTX_RESET, + 0, + QLCNIC_CDRP_CMD_DESTROY_RX_CTX)) { + + dev_err(&adapter->pdev->dev, + "Failed to destroy rx ctx in firmware\n"); + } + + recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED; +} + +static int +qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hostrq_tx_ctx *prq; + struct qlcnic_hostrq_cds_ring *prq_cds; + struct qlcnic_cardrsp_tx_ctx *prsp; + void *rq_addr, *rsp_addr; + size_t rq_size, rsp_size; + u32 temp; + int err; + u64 phys_addr; + dma_addr_t rq_phys_addr, rsp_phys_addr; + struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; + + /* reset host resources */ + tx_ring->producer = 0; + tx_ring->sw_consumer = 0; + *(tx_ring->hw_consumer) = 0; + + rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx); + rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size, + &rq_phys_addr, GFP_KERNEL); + if (!rq_addr) + return -ENOMEM; + + rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx); + rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size, + &rsp_phys_addr, GFP_KERNEL); + if (!rsp_addr) { + err = -ENOMEM; + goto out_free_rq; + } + + memset(rq_addr, 0, rq_size); + prq = rq_addr; + + memset(rsp_addr, 0, rsp_size); + prsp = rsp_addr; + + prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr); + + temp = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN | + QLCNIC_CAP0_LSO); + prq->capabilities[0] = cpu_to_le32(temp); + + prq->host_int_crb_mode = + cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED); + + prq->interrupt_ctl = 0; + prq->msi_index = 0; + prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr); + + prq_cds = &prq->cds_ring; + + prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr); + prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc); + + phys_addr = rq_phys_addr; + err = qlcnic_issue_cmd(adapter, + adapter->ahw->pci_func, + adapter->fw_hal_version, + (u32)(phys_addr >> 32), + ((u32)phys_addr & 0xffffffff), + rq_size, + QLCNIC_CDRP_CMD_CREATE_TX_CTX); + + if (err == QLCNIC_RCODE_SUCCESS) { + temp = le32_to_cpu(prsp->cds_ring.host_producer_crb); + tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp; + + adapter->tx_context_id = + le16_to_cpu(prsp->context_id); + } else { + dev_err(&adapter->pdev->dev, + "Failed to create tx ctx in firmware%d\n", err); + err = -EIO; + } + + dma_free_coherent(&adapter->pdev->dev, rsp_size, rsp_addr, + rsp_phys_addr); + +out_free_rq: + dma_free_coherent(&adapter->pdev->dev, rq_size, rq_addr, rq_phys_addr); + + return err; +} + +static void +qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter) +{ + if (qlcnic_issue_cmd(adapter, + adapter->ahw->pci_func, + adapter->fw_hal_version, + adapter->tx_context_id, + QLCNIC_DESTROY_CTX_RESET, + 0, + QLCNIC_CDRP_CMD_DESTROY_TX_CTX)) { + + dev_err(&adapter->pdev->dev, + "Failed to destroy tx ctx in firmware\n"); + } +} + +int +qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config) +{ + return qlcnic_issue_cmd(adapter, + adapter->ahw->pci_func, + adapter->fw_hal_version, + config, + 0, + 0, + QLCNIC_CDRP_CMD_CONFIG_PORT); +} + +int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter) +{ + void *addr; + int err; + int ring; + struct qlcnic_recv_context *recv_ctx; + struct qlcnic_host_rds_ring *rds_ring; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_host_tx_ring *tx_ring; + + struct pci_dev *pdev = adapter->pdev; + + recv_ctx = adapter->recv_ctx; + tx_ring = adapter->tx_ring; + + tx_ring->hw_consumer = (__le32 *) dma_alloc_coherent(&pdev->dev, + sizeof(u32), &tx_ring->hw_cons_phys_addr, GFP_KERNEL); + if (tx_ring->hw_consumer == NULL) { + dev_err(&pdev->dev, "failed to allocate tx consumer\n"); + return -ENOMEM; + } + + /* cmd desc ring */ + addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring), + &tx_ring->phys_addr, GFP_KERNEL); + + if (addr == NULL) { + dev_err(&pdev->dev, "failed to allocate tx desc ring\n"); + err = -ENOMEM; + goto err_out_free; + } + + tx_ring->desc_head = addr; + + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &recv_ctx->rds_rings[ring]; + addr = dma_alloc_coherent(&adapter->pdev->dev, + RCV_DESC_RINGSIZE(rds_ring), + &rds_ring->phys_addr, GFP_KERNEL); + if (addr == NULL) { + dev_err(&pdev->dev, + "failed to allocate rds ring [%d]\n", ring); + err = -ENOMEM; + goto err_out_free; + } + rds_ring->desc_head = addr; + + } + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + + addr = dma_alloc_coherent(&adapter->pdev->dev, + STATUS_DESC_RINGSIZE(sds_ring), + &sds_ring->phys_addr, GFP_KERNEL); + if (addr == NULL) { + dev_err(&pdev->dev, + "failed to allocate sds ring [%d]\n", ring); + err = -ENOMEM; + goto err_out_free; + } + sds_ring->desc_head = addr; + } + + return 0; + +err_out_free: + qlcnic_free_hw_resources(adapter); + return err; +} + + +int qlcnic_fw_create_ctx(struct qlcnic_adapter *adapter) +{ + int err; + + if (adapter->flags & QLCNIC_NEED_FLR) { + pci_reset_function(adapter->pdev); + adapter->flags &= ~QLCNIC_NEED_FLR; + } + + err = qlcnic_fw_cmd_create_rx_ctx(adapter); + if (err) + return err; + + err = qlcnic_fw_cmd_create_tx_ctx(adapter); + if (err) { + qlcnic_fw_cmd_destroy_rx_ctx(adapter); + return err; + } + + set_bit(__QLCNIC_FW_ATTACHED, &adapter->state); + return 0; +} + +void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter) +{ + if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) { + qlcnic_fw_cmd_destroy_rx_ctx(adapter); + qlcnic_fw_cmd_destroy_tx_ctx(adapter); + + /* Allow dma queues to drain after context reset */ + msleep(20); + } +} + +void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter) +{ + struct qlcnic_recv_context *recv_ctx; + struct qlcnic_host_rds_ring *rds_ring; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_host_tx_ring *tx_ring; + int ring; + + recv_ctx = adapter->recv_ctx; + + tx_ring = adapter->tx_ring; + if (tx_ring->hw_consumer != NULL) { + dma_free_coherent(&adapter->pdev->dev, + sizeof(u32), + tx_ring->hw_consumer, + tx_ring->hw_cons_phys_addr); + tx_ring->hw_consumer = NULL; + } + + if (tx_ring->desc_head != NULL) { + dma_free_coherent(&adapter->pdev->dev, + TX_DESC_RINGSIZE(tx_ring), + tx_ring->desc_head, tx_ring->phys_addr); + tx_ring->desc_head = NULL; + } + + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &recv_ctx->rds_rings[ring]; + + if (rds_ring->desc_head != NULL) { + dma_free_coherent(&adapter->pdev->dev, + RCV_DESC_RINGSIZE(rds_ring), + rds_ring->desc_head, + rds_ring->phys_addr); + rds_ring->desc_head = NULL; + } + } + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + + if (sds_ring->desc_head != NULL) { + dma_free_coherent(&adapter->pdev->dev, + STATUS_DESC_RINGSIZE(sds_ring), + sds_ring->desc_head, + sds_ring->phys_addr); + sds_ring->desc_head = NULL; + } + } +} + + +/* Get MAC address of a NIC partition */ +int qlcnic_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac) +{ + int err; + u32 arg1; + + arg1 = adapter->ahw->pci_func | BIT_8; + err = qlcnic_issue_cmd(adapter, + adapter->ahw->pci_func, + adapter->fw_hal_version, + arg1, + 0, + 0, + QLCNIC_CDRP_CMD_MAC_ADDRESS); + + if (err == QLCNIC_RCODE_SUCCESS) + qlcnic_fetch_mac(adapter, QLCNIC_ARG1_CRB_OFFSET, + QLCNIC_ARG2_CRB_OFFSET, 0, mac); + else { + dev_err(&adapter->pdev->dev, + "Failed to get mac address%d\n", err); + err = -EIO; + } + + return err; +} + +/* Get info of a NIC partition */ +int qlcnic_get_nic_info(struct qlcnic_adapter *adapter, + struct qlcnic_info *npar_info, u8 func_id) +{ + int err; + dma_addr_t nic_dma_t; + struct qlcnic_info *nic_info; + void *nic_info_addr; + size_t nic_size = sizeof(struct qlcnic_info); + + nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size, + &nic_dma_t, GFP_KERNEL); + if (!nic_info_addr) + return -ENOMEM; + memset(nic_info_addr, 0, nic_size); + + nic_info = nic_info_addr; + err = qlcnic_issue_cmd(adapter, + adapter->ahw->pci_func, + adapter->fw_hal_version, + MSD(nic_dma_t), + LSD(nic_dma_t), + (func_id << 16 | nic_size), + QLCNIC_CDRP_CMD_GET_NIC_INFO); + + if (err == QLCNIC_RCODE_SUCCESS) { + npar_info->pci_func = le16_to_cpu(nic_info->pci_func); + npar_info->op_mode = le16_to_cpu(nic_info->op_mode); + npar_info->phys_port = le16_to_cpu(nic_info->phys_port); + npar_info->switch_mode = le16_to_cpu(nic_info->switch_mode); + npar_info->max_tx_ques = le16_to_cpu(nic_info->max_tx_ques); + npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques); + npar_info->min_tx_bw = le16_to_cpu(nic_info->min_tx_bw); + npar_info->max_tx_bw = le16_to_cpu(nic_info->max_tx_bw); + npar_info->capabilities = le32_to_cpu(nic_info->capabilities); + npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu); + + dev_info(&adapter->pdev->dev, + "phy port: %d switch_mode: %d,\n" + "\tmax_tx_q: %d max_rx_q: %d min_tx_bw: 0x%x,\n" + "\tmax_tx_bw: 0x%x max_mtu:0x%x, capabilities: 0x%x\n", + npar_info->phys_port, npar_info->switch_mode, + npar_info->max_tx_ques, npar_info->max_rx_ques, + npar_info->min_tx_bw, npar_info->max_tx_bw, + npar_info->max_mtu, npar_info->capabilities); + } else { + dev_err(&adapter->pdev->dev, + "Failed to get nic info%d\n", err); + err = -EIO; + } + + dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr, + nic_dma_t); + return err; +} + +/* Configure a NIC partition */ +int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic) +{ + int err = -EIO; + dma_addr_t nic_dma_t; + void *nic_info_addr; + struct qlcnic_info *nic_info; + size_t nic_size = sizeof(struct qlcnic_info); + + if (adapter->op_mode != QLCNIC_MGMT_FUNC) + return err; + + nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size, + &nic_dma_t, GFP_KERNEL); + if (!nic_info_addr) + return -ENOMEM; + + memset(nic_info_addr, 0, nic_size); + nic_info = nic_info_addr; + + nic_info->pci_func = cpu_to_le16(nic->pci_func); + nic_info->op_mode = cpu_to_le16(nic->op_mode); + nic_info->phys_port = cpu_to_le16(nic->phys_port); + nic_info->switch_mode = cpu_to_le16(nic->switch_mode); + nic_info->capabilities = cpu_to_le32(nic->capabilities); + nic_info->max_mac_filters = nic->max_mac_filters; + nic_info->max_tx_ques = cpu_to_le16(nic->max_tx_ques); + nic_info->max_rx_ques = cpu_to_le16(nic->max_rx_ques); + nic_info->min_tx_bw = cpu_to_le16(nic->min_tx_bw); + nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw); + + err = qlcnic_issue_cmd(adapter, + adapter->ahw->pci_func, + adapter->fw_hal_version, + MSD(nic_dma_t), + LSD(nic_dma_t), + ((nic->pci_func << 16) | nic_size), + QLCNIC_CDRP_CMD_SET_NIC_INFO); + + if (err != QLCNIC_RCODE_SUCCESS) { + dev_err(&adapter->pdev->dev, + "Failed to set nic info%d\n", err); + err = -EIO; + } + + dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr, + nic_dma_t); + return err; +} + +/* Get PCI Info of a partition */ +int qlcnic_get_pci_info(struct qlcnic_adapter *adapter, + struct qlcnic_pci_info *pci_info) +{ + int err = 0, i; + dma_addr_t pci_info_dma_t; + struct qlcnic_pci_info *npar; + void *pci_info_addr; + size_t npar_size = sizeof(struct qlcnic_pci_info); + size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC; + + pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size, + &pci_info_dma_t, GFP_KERNEL); + if (!pci_info_addr) + return -ENOMEM; + memset(pci_info_addr, 0, pci_size); + + npar = pci_info_addr; + err = qlcnic_issue_cmd(adapter, + adapter->ahw->pci_func, + adapter->fw_hal_version, + MSD(pci_info_dma_t), + LSD(pci_info_dma_t), + pci_size, + QLCNIC_CDRP_CMD_GET_PCI_INFO); + + if (err == QLCNIC_RCODE_SUCCESS) { + for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++, npar++, pci_info++) { + pci_info->id = le16_to_cpu(npar->id); + pci_info->active = le16_to_cpu(npar->active); + pci_info->type = le16_to_cpu(npar->type); + pci_info->default_port = + le16_to_cpu(npar->default_port); + pci_info->tx_min_bw = + le16_to_cpu(npar->tx_min_bw); + pci_info->tx_max_bw = + le16_to_cpu(npar->tx_max_bw); + memcpy(pci_info->mac, npar->mac, ETH_ALEN); + } + } else { + dev_err(&adapter->pdev->dev, + "Failed to get PCI Info%d\n", err); + err = -EIO; + } + + dma_free_coherent(&adapter->pdev->dev, pci_size, pci_info_addr, + pci_info_dma_t); + return err; +} + +/* Configure eSwitch for port mirroring */ +int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id, + u8 enable_mirroring, u8 pci_func) +{ + int err = -EIO; + u32 arg1; + + if (adapter->op_mode != QLCNIC_MGMT_FUNC || + !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) + return err; + + arg1 = id | (enable_mirroring ? BIT_4 : 0); + arg1 |= pci_func << 8; + + err = qlcnic_issue_cmd(adapter, + adapter->ahw->pci_func, + adapter->fw_hal_version, + arg1, + 0, + 0, + QLCNIC_CDRP_CMD_SET_PORTMIRRORING); + + if (err != QLCNIC_RCODE_SUCCESS) { + dev_err(&adapter->pdev->dev, + "Failed to configure port mirroring%d on eswitch:%d\n", + pci_func, id); + } else { + dev_info(&adapter->pdev->dev, + "Configured eSwitch %d for port mirroring:%d\n", + id, pci_func); + } + + return err; +} + +int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func, + const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) { + + size_t stats_size = sizeof(struct __qlcnic_esw_statistics); + struct __qlcnic_esw_statistics *stats; + dma_addr_t stats_dma_t; + void *stats_addr; + u32 arg1; + int err; + + if (esw_stats == NULL) + return -ENOMEM; + + if (adapter->op_mode != QLCNIC_MGMT_FUNC && + func != adapter->ahw->pci_func) { + dev_err(&adapter->pdev->dev, + "Not privilege to query stats for func=%d", func); + return -EIO; + } + + stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size, + &stats_dma_t, GFP_KERNEL); + if (!stats_addr) { + dev_err(&adapter->pdev->dev, "Unable to allocate memory\n"); + return -ENOMEM; + } + memset(stats_addr, 0, stats_size); + + arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12; + arg1 |= rx_tx << 15 | stats_size << 16; + + err = qlcnic_issue_cmd(adapter, + adapter->ahw->pci_func, + adapter->fw_hal_version, + arg1, + MSD(stats_dma_t), + LSD(stats_dma_t), + QLCNIC_CDRP_CMD_GET_ESWITCH_STATS); + + if (!err) { + stats = stats_addr; + esw_stats->context_id = le16_to_cpu(stats->context_id); + esw_stats->version = le16_to_cpu(stats->version); + esw_stats->size = le16_to_cpu(stats->size); + esw_stats->multicast_frames = + le64_to_cpu(stats->multicast_frames); + esw_stats->broadcast_frames = + le64_to_cpu(stats->broadcast_frames); + esw_stats->unicast_frames = le64_to_cpu(stats->unicast_frames); + esw_stats->dropped_frames = le64_to_cpu(stats->dropped_frames); + esw_stats->local_frames = le64_to_cpu(stats->local_frames); + esw_stats->errors = le64_to_cpu(stats->errors); + esw_stats->numbytes = le64_to_cpu(stats->numbytes); + } + + dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr, + stats_dma_t); + return err; +} + +int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch, + const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) { + + struct __qlcnic_esw_statistics port_stats; + u8 i; + int ret = -EIO; + + if (esw_stats == NULL) + return -ENOMEM; + if (adapter->op_mode != QLCNIC_MGMT_FUNC) + return -EIO; + if (adapter->npars == NULL) + return -EIO; + + memset(esw_stats, 0, sizeof(u64)); + esw_stats->unicast_frames = QLCNIC_ESW_STATS_NOT_AVAIL; + esw_stats->multicast_frames = QLCNIC_ESW_STATS_NOT_AVAIL; + esw_stats->broadcast_frames = QLCNIC_ESW_STATS_NOT_AVAIL; + esw_stats->dropped_frames = QLCNIC_ESW_STATS_NOT_AVAIL; + esw_stats->errors = QLCNIC_ESW_STATS_NOT_AVAIL; + esw_stats->local_frames = QLCNIC_ESW_STATS_NOT_AVAIL; + esw_stats->numbytes = QLCNIC_ESW_STATS_NOT_AVAIL; + esw_stats->context_id = eswitch; + + for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { + if (adapter->npars[i].phy_port != eswitch) + continue; + + memset(&port_stats, 0, sizeof(struct __qlcnic_esw_statistics)); + if (qlcnic_get_port_stats(adapter, i, rx_tx, &port_stats)) + continue; + + esw_stats->size = port_stats.size; + esw_stats->version = port_stats.version; + QLCNIC_ADD_ESW_STATS(esw_stats->unicast_frames, + port_stats.unicast_frames); + QLCNIC_ADD_ESW_STATS(esw_stats->multicast_frames, + port_stats.multicast_frames); + QLCNIC_ADD_ESW_STATS(esw_stats->broadcast_frames, + port_stats.broadcast_frames); + QLCNIC_ADD_ESW_STATS(esw_stats->dropped_frames, + port_stats.dropped_frames); + QLCNIC_ADD_ESW_STATS(esw_stats->errors, + port_stats.errors); + QLCNIC_ADD_ESW_STATS(esw_stats->local_frames, + port_stats.local_frames); + QLCNIC_ADD_ESW_STATS(esw_stats->numbytes, + port_stats.numbytes); + ret = 0; + } + return ret; +} + +int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw, + const u8 port, const u8 rx_tx) +{ + + u32 arg1; + + if (adapter->op_mode != QLCNIC_MGMT_FUNC) + return -EIO; + + if (func_esw == QLCNIC_STATS_PORT) { + if (port >= QLCNIC_MAX_PCI_FUNC) + goto err_ret; + } else if (func_esw == QLCNIC_STATS_ESWITCH) { + if (port >= QLCNIC_NIU_MAX_XG_PORTS) + goto err_ret; + } else { + goto err_ret; + } + + if (rx_tx > QLCNIC_QUERY_TX_COUNTER) + goto err_ret; + + arg1 = port | QLCNIC_STATS_VERSION << 8 | func_esw << 12; + arg1 |= BIT_14 | rx_tx << 15; + + return qlcnic_issue_cmd(adapter, + adapter->ahw->pci_func, + adapter->fw_hal_version, + arg1, + 0, + 0, + QLCNIC_CDRP_CMD_GET_ESWITCH_STATS); + +err_ret: + dev_err(&adapter->pdev->dev, "Invalid argument func_esw=%d port=%d" + "rx_ctx=%d\n", func_esw, port, rx_tx); + return -EIO; +} + +static int +__qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter, + u32 *arg1, u32 *arg2) +{ + int err = -EIO; + u8 pci_func; + pci_func = (*arg1 >> 8); + err = qlcnic_issue_cmd(adapter, + adapter->ahw->pci_func, + adapter->fw_hal_version, + *arg1, + 0, + 0, + QLCNIC_CDRP_CMD_GET_ESWITCH_PORT_CONFIG); + + if (err == QLCNIC_RCODE_SUCCESS) { + *arg1 = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET); + *arg2 = QLCRD32(adapter, QLCNIC_ARG2_CRB_OFFSET); + dev_info(&adapter->pdev->dev, + "eSwitch port config for pci func %d\n", pci_func); + } else { + dev_err(&adapter->pdev->dev, + "Failed to get eswitch port config for pci func %d\n", + pci_func); + } + return err; +} +/* Configure eSwitch port +op_mode = 0 for setting default port behavior +op_mode = 1 for setting vlan id +op_mode = 2 for deleting vlan id +op_type = 0 for vlan_id +op_type = 1 for port vlan_id +*/ +int qlcnic_config_switch_port(struct qlcnic_adapter *adapter, + struct qlcnic_esw_func_cfg *esw_cfg) +{ + int err = -EIO; + u32 arg1, arg2 = 0; + u8 pci_func; + + if (adapter->op_mode != QLCNIC_MGMT_FUNC) + return err; + pci_func = esw_cfg->pci_func; + arg1 = (adapter->npars[pci_func].phy_port & BIT_0); + arg1 |= (pci_func << 8); + + if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2)) + return err; + arg1 &= ~(0x0ff << 8); + arg1 |= (pci_func << 8); + arg1 &= ~(BIT_2 | BIT_3); + switch (esw_cfg->op_mode) { + case QLCNIC_PORT_DEFAULTS: + arg1 |= (BIT_4 | BIT_6 | BIT_7); + arg2 |= (BIT_0 | BIT_1); + if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) + arg2 |= (BIT_2 | BIT_3); + if (!(esw_cfg->discard_tagged)) + arg1 &= ~BIT_4; + if (!(esw_cfg->promisc_mode)) + arg1 &= ~BIT_6; + if (!(esw_cfg->mac_override)) + arg1 &= ~BIT_7; + if (!(esw_cfg->mac_anti_spoof)) + arg2 &= ~BIT_0; + if (!(esw_cfg->offload_flags & BIT_0)) + arg2 &= ~(BIT_1 | BIT_2 | BIT_3); + if (!(esw_cfg->offload_flags & BIT_1)) + arg2 &= ~BIT_2; + if (!(esw_cfg->offload_flags & BIT_2)) + arg2 &= ~BIT_3; + break; + case QLCNIC_ADD_VLAN: + arg1 |= (BIT_2 | BIT_5); + arg1 |= (esw_cfg->vlan_id << 16); + break; + case QLCNIC_DEL_VLAN: + arg1 |= (BIT_3 | BIT_5); + arg1 &= ~(0x0ffff << 16); + break; + default: + return err; + } + + err = qlcnic_issue_cmd(adapter, + adapter->ahw->pci_func, + adapter->fw_hal_version, + arg1, + arg2, + 0, + QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH); + + if (err != QLCNIC_RCODE_SUCCESS) { + dev_err(&adapter->pdev->dev, + "Failed to configure eswitch pci func %d\n", pci_func); + } else { + dev_info(&adapter->pdev->dev, + "Configured eSwitch for pci func %d\n", pci_func); + } + + return err; +} + +int +qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter, + struct qlcnic_esw_func_cfg *esw_cfg) +{ + u32 arg1, arg2; + u8 phy_port; + if (adapter->op_mode == QLCNIC_MGMT_FUNC) + phy_port = adapter->npars[esw_cfg->pci_func].phy_port; + else + phy_port = adapter->physical_port; + arg1 = phy_port; + arg1 |= (esw_cfg->pci_func << 8); + if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2)) + return -EIO; + + esw_cfg->discard_tagged = !!(arg1 & BIT_4); + esw_cfg->host_vlan_tag = !!(arg1 & BIT_5); + esw_cfg->promisc_mode = !!(arg1 & BIT_6); + esw_cfg->mac_override = !!(arg1 & BIT_7); + esw_cfg->vlan_id = LSW(arg1 >> 16); + esw_cfg->mac_anti_spoof = (arg2 & 0x1); + esw_cfg->offload_flags = ((arg2 >> 1) & 0x7); + + return 0; +} diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c new file mode 100644 index 000000000000..7c64f2ffc219 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -0,0 +1,1234 @@ +/* + * QLogic qlcnic NIC Driver + * Copyright (c) 2009-2010 QLogic Corporation + * + * See LICENSE.qlcnic for copyright and licensing details. + */ + +#include +#include +#include +#include +#include +#include + +#include "qlcnic.h" + +struct qlcnic_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define QLC_SIZEOF(m) FIELD_SIZEOF(struct qlcnic_adapter, m) +#define QLC_OFF(m) offsetof(struct qlcnic_adapter, m) + +static const struct qlcnic_stats qlcnic_gstrings_stats[] = { + {"xmit_called", + QLC_SIZEOF(stats.xmitcalled), QLC_OFF(stats.xmitcalled)}, + {"xmit_finished", + QLC_SIZEOF(stats.xmitfinished), QLC_OFF(stats.xmitfinished)}, + {"rx_dropped", + QLC_SIZEOF(stats.rxdropped), QLC_OFF(stats.rxdropped)}, + {"tx_dropped", + QLC_SIZEOF(stats.txdropped), QLC_OFF(stats.txdropped)}, + {"csummed", + QLC_SIZEOF(stats.csummed), QLC_OFF(stats.csummed)}, + {"rx_pkts", + QLC_SIZEOF(stats.rx_pkts), QLC_OFF(stats.rx_pkts)}, + {"lro_pkts", + QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)}, + {"rx_bytes", + QLC_SIZEOF(stats.rxbytes), QLC_OFF(stats.rxbytes)}, + {"tx_bytes", + QLC_SIZEOF(stats.txbytes), QLC_OFF(stats.txbytes)}, + {"lrobytes", + QLC_SIZEOF(stats.lrobytes), QLC_OFF(stats.lrobytes)}, + {"lso_frames", + QLC_SIZEOF(stats.lso_frames), QLC_OFF(stats.lso_frames)}, + {"xmit_on", + QLC_SIZEOF(stats.xmit_on), QLC_OFF(stats.xmit_on)}, + {"xmit_off", + QLC_SIZEOF(stats.xmit_off), QLC_OFF(stats.xmit_off)}, + {"skb_alloc_failure", QLC_SIZEOF(stats.skb_alloc_failure), + QLC_OFF(stats.skb_alloc_failure)}, + {"null rxbuf", + QLC_SIZEOF(stats.null_rxbuf), QLC_OFF(stats.null_rxbuf)}, + {"rx dma map error", QLC_SIZEOF(stats.rx_dma_map_error), + QLC_OFF(stats.rx_dma_map_error)}, + {"tx dma map error", QLC_SIZEOF(stats.tx_dma_map_error), + QLC_OFF(stats.tx_dma_map_error)}, + +}; + +static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = { + "rx unicast frames", + "rx multicast frames", + "rx broadcast frames", + "rx dropped frames", + "rx errors", + "rx local frames", + "rx numbytes", + "tx unicast frames", + "tx multicast frames", + "tx broadcast frames", + "tx dropped frames", + "tx errors", + "tx local frames", + "tx numbytes", +}; + +#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats) +#define QLCNIC_DEVICE_STATS_LEN ARRAY_SIZE(qlcnic_device_gstrings_stats) + +static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = { + "Register_Test_on_offline", + "Link_Test_on_offline", + "Interrupt_Test_offline", + "Internal_Loopback_offline", + "External_Loopback_offline" +}; + +#define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test) + +#define QLCNIC_RING_REGS_COUNT 20 +#define QLCNIC_RING_REGS_LEN (QLCNIC_RING_REGS_COUNT * sizeof(u32)) +#define QLCNIC_MAX_EEPROM_LEN 1024 + +static const u32 diag_registers[] = { + CRB_CMDPEG_STATE, + CRB_RCVPEG_STATE, + CRB_XG_STATE_P3P, + CRB_FW_CAPABILITIES_1, + ISR_INT_STATE_REG, + QLCNIC_CRB_DRV_ACTIVE, + QLCNIC_CRB_DEV_STATE, + QLCNIC_CRB_DRV_STATE, + QLCNIC_CRB_DRV_SCRATCH, + QLCNIC_CRB_DEV_PARTITION_INFO, + QLCNIC_CRB_DRV_IDC_VER, + QLCNIC_PEG_ALIVE_COUNTER, + QLCNIC_PEG_HALT_STATUS1, + QLCNIC_PEG_HALT_STATUS2, + QLCNIC_CRB_PEG_NET_0+0x3c, + QLCNIC_CRB_PEG_NET_1+0x3c, + QLCNIC_CRB_PEG_NET_2+0x3c, + QLCNIC_CRB_PEG_NET_4+0x3c, + -1 +}; + +#define QLCNIC_MGMT_API_VERSION 2 +#define QLCNIC_DEV_INFO_SIZE 1 +#define QLCNIC_ETHTOOL_REGS_VER 2 +static int qlcnic_get_regs_len(struct net_device *dev) +{ + return sizeof(diag_registers) + QLCNIC_RING_REGS_LEN + + QLCNIC_DEV_INFO_SIZE + 1; +} + +static int qlcnic_get_eeprom_len(struct net_device *dev) +{ + return QLCNIC_FLASH_TOTAL_SIZE; +} + +static void +qlcnic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + u32 fw_major, fw_minor, fw_build; + + fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR); + fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR); + fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB); + sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build); + + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); + strlcpy(drvinfo->driver, qlcnic_driver_name, 32); + strlcpy(drvinfo->version, QLCNIC_LINUX_VERSIONID, 32); +} + +static int +qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + int check_sfp_module = 0; + u16 pcifn = adapter->ahw->pci_func; + + /* read which mode */ + if (adapter->ahw->port_type == QLCNIC_GBE) { + ecmd->supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full); + + ecmd->advertising = (ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full); + + ethtool_cmd_speed_set(ecmd, adapter->link_speed); + ecmd->duplex = adapter->link_duplex; + ecmd->autoneg = adapter->link_autoneg; + + } else if (adapter->ahw->port_type == QLCNIC_XGBE) { + u32 val; + + val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR); + if (val == QLCNIC_PORT_MODE_802_3_AP) { + ecmd->supported = SUPPORTED_1000baseT_Full; + ecmd->advertising = ADVERTISED_1000baseT_Full; + } else { + ecmd->supported = SUPPORTED_10000baseT_Full; + ecmd->advertising = ADVERTISED_10000baseT_Full; + } + + if (netif_running(dev) && adapter->has_link_events) { + ethtool_cmd_speed_set(ecmd, adapter->link_speed); + ecmd->autoneg = adapter->link_autoneg; + ecmd->duplex = adapter->link_duplex; + goto skip; + } + + val = QLCRD32(adapter, P3P_LINK_SPEED_REG(pcifn)); + ethtool_cmd_speed_set(ecmd, P3P_LINK_SPEED_MHZ * + P3P_LINK_SPEED_VAL(pcifn, val)); + ecmd->duplex = DUPLEX_FULL; + ecmd->autoneg = AUTONEG_DISABLE; + } else + return -EIO; + +skip: + ecmd->phy_address = adapter->physical_port; + ecmd->transceiver = XCVR_EXTERNAL; + + switch (adapter->ahw->board_type) { + case QLCNIC_BRDTYPE_P3P_REF_QG: + case QLCNIC_BRDTYPE_P3P_4_GB: + case QLCNIC_BRDTYPE_P3P_4_GB_MM: + + ecmd->supported |= SUPPORTED_Autoneg; + ecmd->advertising |= ADVERTISED_Autoneg; + case QLCNIC_BRDTYPE_P3P_10G_CX4: + case QLCNIC_BRDTYPE_P3P_10G_CX4_LP: + case QLCNIC_BRDTYPE_P3P_10000_BASE_T: + ecmd->supported |= SUPPORTED_TP; + ecmd->advertising |= ADVERTISED_TP; + ecmd->port = PORT_TP; + ecmd->autoneg = adapter->link_autoneg; + break; + case QLCNIC_BRDTYPE_P3P_IMEZ: + case QLCNIC_BRDTYPE_P3P_XG_LOM: + case QLCNIC_BRDTYPE_P3P_HMEZ: + ecmd->supported |= SUPPORTED_MII; + ecmd->advertising |= ADVERTISED_MII; + ecmd->port = PORT_MII; + ecmd->autoneg = AUTONEG_DISABLE; + break; + case QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS: + case QLCNIC_BRDTYPE_P3P_10G_SFP_CT: + case QLCNIC_BRDTYPE_P3P_10G_SFP_QT: + ecmd->advertising |= ADVERTISED_TP; + ecmd->supported |= SUPPORTED_TP; + check_sfp_module = netif_running(dev) && + adapter->has_link_events; + case QLCNIC_BRDTYPE_P3P_10G_XFP: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_FIBRE; + ecmd->autoneg = AUTONEG_DISABLE; + break; + case QLCNIC_BRDTYPE_P3P_10G_TP: + if (adapter->ahw->port_type == QLCNIC_XGBE) { + ecmd->autoneg = AUTONEG_DISABLE; + ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP); + ecmd->advertising |= + (ADVERTISED_FIBRE | ADVERTISED_TP); + ecmd->port = PORT_FIBRE; + check_sfp_module = netif_running(dev) && + adapter->has_link_events; + } else { + ecmd->autoneg = AUTONEG_ENABLE; + ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); + ecmd->advertising |= + (ADVERTISED_TP | ADVERTISED_Autoneg); + ecmd->port = PORT_TP; + } + break; + default: + dev_err(&adapter->pdev->dev, "Unsupported board model %d\n", + adapter->ahw->board_type); + return -EIO; + } + + if (check_sfp_module) { + switch (adapter->module_type) { + case LINKEVENT_MODULE_OPTICAL_UNKNOWN: + case LINKEVENT_MODULE_OPTICAL_SRLR: + case LINKEVENT_MODULE_OPTICAL_LRM: + case LINKEVENT_MODULE_OPTICAL_SFP_1G: + ecmd->port = PORT_FIBRE; + break; + case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE: + case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN: + case LINKEVENT_MODULE_TWINAX: + ecmd->port = PORT_TP; + break; + default: + ecmd->port = PORT_OTHER; + } + } + + return 0; +} + +static int +qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + u32 config = 0; + u32 ret = 0; + struct qlcnic_adapter *adapter = netdev_priv(dev); + + if (adapter->ahw->port_type != QLCNIC_GBE) + return -EOPNOTSUPP; + + /* read which mode */ + if (ecmd->duplex) + config |= 0x1; + + if (ecmd->autoneg) + config |= 0x2; + + switch (ethtool_cmd_speed(ecmd)) { + case SPEED_10: + config |= (0 << 8); + break; + case SPEED_100: + config |= (1 << 8); + break; + case SPEED_1000: + config |= (10 << 8); + break; + default: + return -EIO; + } + + ret = qlcnic_fw_cmd_set_port(adapter, config); + + if (ret == QLCNIC_RCODE_NOT_SUPPORTED) + return -EOPNOTSUPP; + else if (ret) + return -EIO; + + adapter->link_speed = ethtool_cmd_speed(ecmd); + adapter->link_duplex = ecmd->duplex; + adapter->link_autoneg = ecmd->autoneg; + + if (!netif_running(dev)) + return 0; + + dev->netdev_ops->ndo_stop(dev); + return dev->netdev_ops->ndo_open(dev); +} + +static void +qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + struct qlcnic_host_sds_ring *sds_ring; + u32 *regs_buff = p; + int ring, i = 0, j = 0; + + memset(p, 0, qlcnic_get_regs_len(dev)); + regs->version = (QLCNIC_ETHTOOL_REGS_VER << 24) | + (adapter->ahw->revision_id << 16) | (adapter->pdev)->device; + + regs_buff[0] = (0xcafe0000 | (QLCNIC_DEV_INFO_SIZE & 0xffff)); + regs_buff[1] = QLCNIC_MGMT_API_VERSION; + + for (i = QLCNIC_DEV_INFO_SIZE + 1; diag_registers[j] != -1; j++, i++) + regs_buff[i] = QLCRD32(adapter, diag_registers[j]); + + if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) + return; + + regs_buff[i++] = 0xFFEFCDAB; /* Marker btw regs and ring count*/ + + regs_buff[i++] = 1; /* No. of tx ring */ + regs_buff[i++] = le32_to_cpu(*(adapter->tx_ring->hw_consumer)); + regs_buff[i++] = readl(adapter->tx_ring->crb_cmd_producer); + + regs_buff[i++] = 2; /* No. of rx ring */ + regs_buff[i++] = readl(recv_ctx->rds_rings[0].crb_rcv_producer); + regs_buff[i++] = readl(recv_ctx->rds_rings[1].crb_rcv_producer); + + regs_buff[i++] = adapter->max_sds_rings; + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &(recv_ctx->sds_rings[ring]); + regs_buff[i++] = readl(sds_ring->crb_sts_consumer); + } +} + +static u32 qlcnic_test_link(struct net_device *dev) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + u32 val; + + val = QLCRD32(adapter, CRB_XG_STATE_P3P); + val = XG_LINK_STATE_P3P(adapter->ahw->pci_func, val); + return (val == XG_LINK_UP_P3P) ? 0 : 1; +} + +static int +qlcnic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, + u8 *bytes) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + int offset; + int ret; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = (adapter->pdev)->vendor | + ((adapter->pdev)->device << 16); + offset = eeprom->offset; + + ret = qlcnic_rom_fast_read_words(adapter, offset, bytes, + eeprom->len); + if (ret < 0) + return ret; + + return 0; +} + +static void +qlcnic_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ring) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + + ring->rx_pending = adapter->num_rxd; + ring->rx_jumbo_pending = adapter->num_jumbo_rxd; + ring->tx_pending = adapter->num_txd; + + ring->rx_max_pending = adapter->max_rxd; + ring->rx_jumbo_max_pending = adapter->max_jumbo_rxd; + ring->tx_max_pending = MAX_CMD_DESCRIPTORS; + + ring->rx_mini_max_pending = 0; + ring->rx_mini_pending = 0; +} + +static u32 +qlcnic_validate_ringparam(u32 val, u32 min, u32 max, char *r_name) +{ + u32 num_desc; + num_desc = max(val, min); + num_desc = min(num_desc, max); + num_desc = roundup_pow_of_two(num_desc); + + if (val != num_desc) { + printk(KERN_INFO "%s: setting %s ring size %d instead of %d\n", + qlcnic_driver_name, r_name, num_desc, val); + } + + return num_desc; +} + +static int +qlcnic_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ring) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + u16 num_rxd, num_jumbo_rxd, num_txd; + + if (ring->rx_mini_pending) + return -EOPNOTSUPP; + + num_rxd = qlcnic_validate_ringparam(ring->rx_pending, + MIN_RCV_DESCRIPTORS, adapter->max_rxd, "rx"); + + num_jumbo_rxd = qlcnic_validate_ringparam(ring->rx_jumbo_pending, + MIN_JUMBO_DESCRIPTORS, adapter->max_jumbo_rxd, + "rx jumbo"); + + num_txd = qlcnic_validate_ringparam(ring->tx_pending, + MIN_CMD_DESCRIPTORS, MAX_CMD_DESCRIPTORS, "tx"); + + if (num_rxd == adapter->num_rxd && num_txd == adapter->num_txd && + num_jumbo_rxd == adapter->num_jumbo_rxd) + return 0; + + adapter->num_rxd = num_rxd; + adapter->num_jumbo_rxd = num_jumbo_rxd; + adapter->num_txd = num_txd; + + return qlcnic_reset_context(adapter); +} + +static void qlcnic_get_channels(struct net_device *dev, + struct ethtool_channels *channel) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + + channel->max_rx = rounddown_pow_of_two(min_t(int, + adapter->max_rx_ques, num_online_cpus())); + channel->max_tx = adapter->max_tx_ques; + + channel->rx_count = adapter->max_sds_rings; + channel->tx_count = adapter->max_tx_ques; +} + +static int qlcnic_set_channels(struct net_device *dev, + struct ethtool_channels *channel) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + int err; + + if (channel->other_count || channel->combined_count || + channel->tx_count != channel->max_tx) + return -EINVAL; + + err = qlcnic_validate_max_rss(dev, channel->max_rx, channel->rx_count); + if (err) + return err; + + err = qlcnic_set_max_rss(adapter, channel->rx_count); + netdev_info(dev, "allocated 0x%x sds rings\n", + adapter->max_sds_rings); + return err; +} + +static void +qlcnic_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + int port = adapter->physical_port; + __u32 val; + + if (adapter->ahw->port_type == QLCNIC_GBE) { + if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS)) + return; + /* get flow control settings */ + val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port)); + pause->rx_pause = qlcnic_gb_get_rx_flowctl(val); + val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL); + switch (port) { + case 0: + pause->tx_pause = !(qlcnic_gb_get_gb0_mask(val)); + break; + case 1: + pause->tx_pause = !(qlcnic_gb_get_gb1_mask(val)); + break; + case 2: + pause->tx_pause = !(qlcnic_gb_get_gb2_mask(val)); + break; + case 3: + default: + pause->tx_pause = !(qlcnic_gb_get_gb3_mask(val)); + break; + } + } else if (adapter->ahw->port_type == QLCNIC_XGBE) { + if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS)) + return; + pause->rx_pause = 1; + val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL); + if (port == 0) + pause->tx_pause = !(qlcnic_xg_get_xg0_mask(val)); + else + pause->tx_pause = !(qlcnic_xg_get_xg1_mask(val)); + } else { + dev_err(&netdev->dev, "Unknown board type: %x\n", + adapter->ahw->port_type); + } +} + +static int +qlcnic_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + int port = adapter->physical_port; + __u32 val; + + /* read mode */ + if (adapter->ahw->port_type == QLCNIC_GBE) { + if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS)) + return -EIO; + /* set flow control */ + val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port)); + + if (pause->rx_pause) + qlcnic_gb_rx_flowctl(val); + else + qlcnic_gb_unset_rx_flowctl(val); + + QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), + val); + /* set autoneg */ + val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL); + switch (port) { + case 0: + if (pause->tx_pause) + qlcnic_gb_unset_gb0_mask(val); + else + qlcnic_gb_set_gb0_mask(val); + break; + case 1: + if (pause->tx_pause) + qlcnic_gb_unset_gb1_mask(val); + else + qlcnic_gb_set_gb1_mask(val); + break; + case 2: + if (pause->tx_pause) + qlcnic_gb_unset_gb2_mask(val); + else + qlcnic_gb_set_gb2_mask(val); + break; + case 3: + default: + if (pause->tx_pause) + qlcnic_gb_unset_gb3_mask(val); + else + qlcnic_gb_set_gb3_mask(val); + break; + } + QLCWR32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, val); + } else if (adapter->ahw->port_type == QLCNIC_XGBE) { + if (!pause->rx_pause || pause->autoneg) + return -EOPNOTSUPP; + + if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS)) + return -EIO; + + val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL); + if (port == 0) { + if (pause->tx_pause) + qlcnic_xg_unset_xg0_mask(val); + else + qlcnic_xg_set_xg0_mask(val); + } else { + if (pause->tx_pause) + qlcnic_xg_unset_xg1_mask(val); + else + qlcnic_xg_set_xg1_mask(val); + } + QLCWR32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, val); + } else { + dev_err(&netdev->dev, "Unknown board type: %x\n", + adapter->ahw->port_type); + } + return 0; +} + +static int qlcnic_reg_test(struct net_device *dev) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + u32 data_read; + + data_read = QLCRD32(adapter, QLCNIC_PCIX_PH_REG(0)); + if ((data_read & 0xffff) != adapter->pdev->vendor) + return 1; + + return 0; +} + +static int qlcnic_get_sset_count(struct net_device *dev, int sset) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + switch (sset) { + case ETH_SS_TEST: + return QLCNIC_TEST_LEN; + case ETH_SS_STATS: + if (adapter->flags & QLCNIC_ESWITCH_ENABLED) + return QLCNIC_STATS_LEN + QLCNIC_DEVICE_STATS_LEN; + return QLCNIC_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static int qlcnic_irq_test(struct net_device *netdev) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + int max_sds_rings = adapter->max_sds_rings; + int ret; + + if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) + return -EIO; + + ret = qlcnic_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST); + if (ret) + goto clear_it; + + adapter->diag_cnt = 0; + ret = qlcnic_issue_cmd(adapter, adapter->ahw->pci_func, + adapter->fw_hal_version, adapter->ahw->pci_func, + 0, 0, 0x00000011); + if (ret) + goto done; + + msleep(10); + + ret = !adapter->diag_cnt; + +done: + qlcnic_diag_free_res(netdev, max_sds_rings); + +clear_it: + adapter->max_sds_rings = max_sds_rings; + clear_bit(__QLCNIC_RESETTING, &adapter->state); + return ret; +} + +#define QLCNIC_ILB_PKT_SIZE 64 +#define QLCNIC_NUM_ILB_PKT 16 +#define QLCNIC_ILB_MAX_RCV_LOOP 10 + +static void qlcnic_create_loopback_buff(unsigned char *data, u8 mac[]) +{ + unsigned char random_data[] = {0xa8, 0x06, 0x45, 0x00}; + + memset(data, 0x4e, QLCNIC_ILB_PKT_SIZE); + + memcpy(data, mac, ETH_ALEN); + memcpy(data + ETH_ALEN, mac, ETH_ALEN); + + memcpy(data + 2 * ETH_ALEN, random_data, sizeof(random_data)); +} + +int qlcnic_check_loopback_buff(unsigned char *data, u8 mac[]) +{ + unsigned char buff[QLCNIC_ILB_PKT_SIZE]; + qlcnic_create_loopback_buff(buff, mac); + return memcmp(data, buff, QLCNIC_ILB_PKT_SIZE); +} + +static int qlcnic_do_lb_test(struct qlcnic_adapter *adapter) +{ + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + struct qlcnic_host_sds_ring *sds_ring = &recv_ctx->sds_rings[0]; + struct sk_buff *skb; + int i, loop, cnt = 0; + + for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) { + skb = dev_alloc_skb(QLCNIC_ILB_PKT_SIZE); + qlcnic_create_loopback_buff(skb->data, adapter->mac_addr); + skb_put(skb, QLCNIC_ILB_PKT_SIZE); + + adapter->diag_cnt = 0; + qlcnic_xmit_frame(skb, adapter->netdev); + + loop = 0; + do { + msleep(1); + qlcnic_process_rcv_ring_diag(sds_ring); + if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) + break; + } while (!adapter->diag_cnt); + + dev_kfree_skb_any(skb); + + if (!adapter->diag_cnt) + dev_warn(&adapter->pdev->dev, "LB Test: %dth packet" + " not recevied\n", i + 1); + else + cnt++; + } + if (cnt != i) { + dev_warn(&adapter->pdev->dev, "LB Test failed\n"); + return -1; + } + return 0; +} + +static int qlcnic_loopback_test(struct net_device *netdev, u8 mode) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + int max_sds_rings = adapter->max_sds_rings; + struct qlcnic_host_sds_ring *sds_ring; + int loop = 0; + int ret; + + if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK)) { + netdev_info(netdev, "Firmware is not loopback test capable\n"); + return -EOPNOTSUPP; + } + + netdev_info(netdev, "%s loopback test in progress\n", + mode == QLCNIC_ILB_MODE ? "internal" : "external"); + if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) { + netdev_warn(netdev, "Loopback test not supported for non " + "privilege function\n"); + return 0; + } + + if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) + return -EBUSY; + + ret = qlcnic_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST); + if (ret) + goto clear_it; + + sds_ring = &adapter->recv_ctx->sds_rings[0]; + + ret = qlcnic_set_lb_mode(adapter, mode); + if (ret) + goto free_res; + + adapter->diag_cnt = 0; + do { + msleep(500); + qlcnic_process_rcv_ring_diag(sds_ring); + if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) { + netdev_info(netdev, "firmware didnt respond to loopback" + " configure request\n"); + ret = -QLCNIC_FW_NOT_RESPOND; + goto free_res; + } else if (adapter->diag_cnt) { + ret = adapter->diag_cnt; + goto free_res; + } + } while (!QLCNIC_IS_LB_CONFIGURED(adapter->ahw->loopback_state)); + + ret = qlcnic_do_lb_test(adapter); + + qlcnic_clear_lb_mode(adapter); + + free_res: + qlcnic_diag_free_res(netdev, max_sds_rings); + + clear_it: + adapter->max_sds_rings = max_sds_rings; + clear_bit(__QLCNIC_RESETTING, &adapter->state); + return ret; +} + +static void +qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test, + u64 *data) +{ + memset(data, 0, sizeof(u64) * QLCNIC_TEST_LEN); + + data[0] = qlcnic_reg_test(dev); + if (data[0]) + eth_test->flags |= ETH_TEST_FL_FAILED; + + data[1] = (u64) qlcnic_test_link(dev); + if (data[1]) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (eth_test->flags & ETH_TEST_FL_OFFLINE) { + data[2] = qlcnic_irq_test(dev); + if (data[2]) + eth_test->flags |= ETH_TEST_FL_FAILED; + + data[3] = qlcnic_loopback_test(dev, QLCNIC_ILB_MODE); + if (data[3]) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (eth_test->flags & ETH_TEST_FL_EXTERNAL_LB) { + data[4] = qlcnic_loopback_test(dev, QLCNIC_ELB_MODE); + if (data[4]) + eth_test->flags |= ETH_TEST_FL_FAILED; + eth_test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; + } + } +} + +static void +qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + int index, i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *qlcnic_gstrings_test, + QLCNIC_TEST_LEN * ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + for (index = 0; index < QLCNIC_STATS_LEN; index++) { + memcpy(data + index * ETH_GSTRING_LEN, + qlcnic_gstrings_stats[index].stat_string, + ETH_GSTRING_LEN); + } + if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) + return; + for (i = 0; i < QLCNIC_DEVICE_STATS_LEN; index++, i++) { + memcpy(data + index * ETH_GSTRING_LEN, + qlcnic_device_gstrings_stats[i], + ETH_GSTRING_LEN); + } + } +} + +#define QLCNIC_FILL_ESWITCH_STATS(VAL1) \ + (((VAL1) == QLCNIC_ESW_STATS_NOT_AVAIL) ? 0 : VAL1) + +static void +qlcnic_fill_device_stats(int *index, u64 *data, + struct __qlcnic_esw_statistics *stats) +{ + int ind = *index; + + data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->unicast_frames); + data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->multicast_frames); + data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->broadcast_frames); + data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->dropped_frames); + data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->errors); + data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->local_frames); + data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->numbytes); + + *index = ind; +} + +static void +qlcnic_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 * data) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + struct qlcnic_esw_statistics port_stats; + int index, ret; + + for (index = 0; index < QLCNIC_STATS_LEN; index++) { + char *p = + (char *)adapter + + qlcnic_gstrings_stats[index].stat_offset; + data[index] = + (qlcnic_gstrings_stats[index].sizeof_stat == + sizeof(u64)) ? *(u64 *)p:(*(u32 *)p); + } + + if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) + return; + + memset(&port_stats, 0, sizeof(struct qlcnic_esw_statistics)); + ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func, + QLCNIC_QUERY_RX_COUNTER, &port_stats.rx); + if (ret) + return; + + qlcnic_fill_device_stats(&index, data, &port_stats.rx); + + ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func, + QLCNIC_QUERY_TX_COUNTER, &port_stats.tx); + if (ret) + return; + + qlcnic_fill_device_stats(&index, data, &port_stats.tx); +} + +static int qlcnic_set_led(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + int max_sds_rings = adapter->max_sds_rings; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) { + if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) + return -EIO; + + if (qlcnic_diag_alloc_res(dev, QLCNIC_LED_TEST)) { + clear_bit(__QLCNIC_RESETTING, &adapter->state); + return -EIO; + } + set_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state); + } + + if (adapter->nic_ops->config_led(adapter, 1, 0xf) == 0) + return 0; + + dev_err(&adapter->pdev->dev, + "Failed to set LED blink state.\n"); + break; + + case ETHTOOL_ID_INACTIVE: + if (adapter->nic_ops->config_led(adapter, 0, 0xf)) + dev_err(&adapter->pdev->dev, + "Failed to reset LED blink state.\n"); + + break; + + default: + return -EINVAL; + } + + if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state)) { + qlcnic_diag_free_res(dev, max_sds_rings); + clear_bit(__QLCNIC_RESETTING, &adapter->state); + } + + return -EIO; +} + +static void +qlcnic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + u32 wol_cfg; + + wol->supported = 0; + wol->wolopts = 0; + + wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV); + if (wol_cfg & (1UL << adapter->portnum)) + wol->supported |= WAKE_MAGIC; + + wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG); + if (wol_cfg & (1UL << adapter->portnum)) + wol->wolopts |= WAKE_MAGIC; +} + +static int +qlcnic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + u32 wol_cfg; + + if (wol->wolopts & ~WAKE_MAGIC) + return -EOPNOTSUPP; + + wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV); + if (!(wol_cfg & (1 << adapter->portnum))) + return -EOPNOTSUPP; + + wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG); + if (wol->wolopts & WAKE_MAGIC) + wol_cfg |= 1UL << adapter->portnum; + else + wol_cfg &= ~(1UL << adapter->portnum); + + QLCWR32(adapter, QLCNIC_WOL_CONFIG, wol_cfg); + + return 0; +} + +/* + * Set the coalescing parameters. Currently only normal is supported. + * If rx_coalesce_usecs == 0 or rx_max_coalesced_frames == 0 then set the + * firmware coalescing to default. + */ +static int qlcnic_set_intr_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ethcoal) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + + if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) + return -EINVAL; + + /* + * Return Error if unsupported values or + * unsupported parameters are set. + */ + if (ethcoal->rx_coalesce_usecs > 0xffff || + ethcoal->rx_max_coalesced_frames > 0xffff || + ethcoal->tx_coalesce_usecs || + ethcoal->tx_max_coalesced_frames || + ethcoal->rx_coalesce_usecs_irq || + ethcoal->rx_max_coalesced_frames_irq || + ethcoal->tx_coalesce_usecs_irq || + ethcoal->tx_max_coalesced_frames_irq || + ethcoal->stats_block_coalesce_usecs || + ethcoal->use_adaptive_rx_coalesce || + ethcoal->use_adaptive_tx_coalesce || + ethcoal->pkt_rate_low || + ethcoal->rx_coalesce_usecs_low || + ethcoal->rx_max_coalesced_frames_low || + ethcoal->tx_coalesce_usecs_low || + ethcoal->tx_max_coalesced_frames_low || + ethcoal->pkt_rate_high || + ethcoal->rx_coalesce_usecs_high || + ethcoal->rx_max_coalesced_frames_high || + ethcoal->tx_coalesce_usecs_high || + ethcoal->tx_max_coalesced_frames_high) + return -EINVAL; + + if (!ethcoal->rx_coalesce_usecs || + !ethcoal->rx_max_coalesced_frames) { + adapter->ahw->coal.flag = QLCNIC_INTR_DEFAULT; + adapter->ahw->coal.rx_time_us = + QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US; + adapter->ahw->coal.rx_packets = + QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS; + } else { + adapter->ahw->coal.flag = 0; + adapter->ahw->coal.rx_time_us = ethcoal->rx_coalesce_usecs; + adapter->ahw->coal.rx_packets = + ethcoal->rx_max_coalesced_frames; + } + + qlcnic_config_intr_coalesce(adapter); + + return 0; +} + +static int qlcnic_get_intr_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ethcoal) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + + if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) + return -EINVAL; + + ethcoal->rx_coalesce_usecs = adapter->ahw->coal.rx_time_us; + ethcoal->rx_max_coalesced_frames = adapter->ahw->coal.rx_packets; + + return 0; +} + +static u32 qlcnic_get_msglevel(struct net_device *netdev) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + + return adapter->msg_enable; +} + +static void qlcnic_set_msglevel(struct net_device *netdev, u32 msglvl) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + + adapter->msg_enable = msglvl; +} + +static int +qlcnic_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; + + if (fw_dump->clr) + dump->len = fw_dump->tmpl_hdr->size + fw_dump->size; + else + dump->len = 0; + dump->flag = fw_dump->tmpl_hdr->drv_cap_mask; + dump->version = adapter->fw_version; + return 0; +} + +static int +qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump, + void *buffer) +{ + int i, copy_sz; + u32 *hdr_ptr, *data; + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; + + if (!fw_dump->clr) { + netdev_info(netdev, "Dump not available\n"); + qlcnic_api_unlock(adapter); + return -EINVAL; + } + /* Copy template header first */ + copy_sz = fw_dump->tmpl_hdr->size; + hdr_ptr = (u32 *) fw_dump->tmpl_hdr; + data = buffer; + for (i = 0; i < copy_sz/sizeof(u32); i++) + *data++ = cpu_to_le32(*hdr_ptr++); + + /* Copy captured dump data */ + memcpy(buffer + copy_sz, fw_dump->data, fw_dump->size); + dump->len = copy_sz + fw_dump->size; + dump->flag = fw_dump->tmpl_hdr->drv_cap_mask; + + /* Free dump area once data has been captured */ + vfree(fw_dump->data); + fw_dump->data = NULL; + fw_dump->clr = 0; + + return 0; +} + +static int +qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val) +{ + int ret = 0; + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; + + switch (val->flag) { + case QLCNIC_FORCE_FW_DUMP_KEY: + if (!fw_dump->enable) { + netdev_info(netdev, "FW dump not enabled\n"); + return ret; + } + if (fw_dump->clr) { + dev_info(&adapter->pdev->dev, + "Previous dump not cleared, not forcing dump\n"); + return ret; + } + netdev_info(netdev, "Forcing a FW dump\n"); + qlcnic_dev_request_reset(adapter); + break; + case QLCNIC_DISABLE_FW_DUMP: + if (fw_dump->enable) { + netdev_info(netdev, "Disabling FW dump\n"); + fw_dump->enable = 0; + } + break; + case QLCNIC_ENABLE_FW_DUMP: + if (!fw_dump->enable && fw_dump->tmpl_hdr) { + netdev_info(netdev, "Enabling FW dump\n"); + fw_dump->enable = 1; + } + break; + case QLCNIC_FORCE_FW_RESET: + netdev_info(netdev, "Forcing a FW reset\n"); + qlcnic_dev_request_reset(adapter); + adapter->flags &= ~QLCNIC_FW_RESET_OWNER; + break; + default: + if (val->flag > QLCNIC_DUMP_MASK_MAX || + val->flag < QLCNIC_DUMP_MASK_MIN) { + netdev_info(netdev, + "Invalid dump level: 0x%x\n", val->flag); + ret = -EINVAL; + goto out; + } + fw_dump->tmpl_hdr->drv_cap_mask = val->flag & 0xff; + netdev_info(netdev, "Driver mask changed to: 0x%x\n", + fw_dump->tmpl_hdr->drv_cap_mask); + } +out: + return ret; +} + +const struct ethtool_ops qlcnic_ethtool_ops = { + .get_settings = qlcnic_get_settings, + .set_settings = qlcnic_set_settings, + .get_drvinfo = qlcnic_get_drvinfo, + .get_regs_len = qlcnic_get_regs_len, + .get_regs = qlcnic_get_regs, + .get_link = ethtool_op_get_link, + .get_eeprom_len = qlcnic_get_eeprom_len, + .get_eeprom = qlcnic_get_eeprom, + .get_ringparam = qlcnic_get_ringparam, + .set_ringparam = qlcnic_set_ringparam, + .get_channels = qlcnic_get_channels, + .set_channels = qlcnic_set_channels, + .get_pauseparam = qlcnic_get_pauseparam, + .set_pauseparam = qlcnic_set_pauseparam, + .get_wol = qlcnic_get_wol, + .set_wol = qlcnic_set_wol, + .self_test = qlcnic_diag_test, + .get_strings = qlcnic_get_strings, + .get_ethtool_stats = qlcnic_get_ethtool_stats, + .get_sset_count = qlcnic_get_sset_count, + .get_coalesce = qlcnic_get_intr_coalesce, + .set_coalesce = qlcnic_set_intr_coalesce, + .set_phys_id = qlcnic_set_led, + .set_msglevel = qlcnic_set_msglevel, + .get_msglevel = qlcnic_get_msglevel, + .get_dump_flag = qlcnic_get_dump_flag, + .get_dump_data = qlcnic_get_dump_data, + .set_dump = qlcnic_set_dump, +}; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h new file mode 100644 index 000000000000..d14506f764e0 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h @@ -0,0 +1,1023 @@ +/* + * QLogic qlcnic NIC Driver + * Copyright (c) 2009-2010 QLogic Corporation + * + * See LICENSE.qlcnic for copyright and licensing details. + */ + +#ifndef __QLCNIC_HDR_H_ +#define __QLCNIC_HDR_H_ + +#include +#include + +/* + * The basic unit of access when reading/writing control registers. + */ + +enum { + QLCNIC_HW_H0_CH_HUB_ADR = 0x05, + QLCNIC_HW_H1_CH_HUB_ADR = 0x0E, + QLCNIC_HW_H2_CH_HUB_ADR = 0x03, + QLCNIC_HW_H3_CH_HUB_ADR = 0x01, + QLCNIC_HW_H4_CH_HUB_ADR = 0x06, + QLCNIC_HW_H5_CH_HUB_ADR = 0x07, + QLCNIC_HW_H6_CH_HUB_ADR = 0x08 +}; + +/* Hub 0 */ +enum { + QLCNIC_HW_MN_CRB_AGT_ADR = 0x15, + QLCNIC_HW_MS_CRB_AGT_ADR = 0x25 +}; + +/* Hub 1 */ +enum { + QLCNIC_HW_PS_CRB_AGT_ADR = 0x73, + QLCNIC_HW_SS_CRB_AGT_ADR = 0x20, + QLCNIC_HW_RPMX3_CRB_AGT_ADR = 0x0b, + QLCNIC_HW_QMS_CRB_AGT_ADR = 0x00, + QLCNIC_HW_SQGS0_CRB_AGT_ADR = 0x01, + QLCNIC_HW_SQGS1_CRB_AGT_ADR = 0x02, + QLCNIC_HW_SQGS2_CRB_AGT_ADR = 0x03, + QLCNIC_HW_SQGS3_CRB_AGT_ADR = 0x04, + QLCNIC_HW_C2C0_CRB_AGT_ADR = 0x58, + QLCNIC_HW_C2C1_CRB_AGT_ADR = 0x59, + QLCNIC_HW_C2C2_CRB_AGT_ADR = 0x5a, + QLCNIC_HW_RPMX2_CRB_AGT_ADR = 0x0a, + QLCNIC_HW_RPMX4_CRB_AGT_ADR = 0x0c, + QLCNIC_HW_RPMX7_CRB_AGT_ADR = 0x0f, + QLCNIC_HW_RPMX9_CRB_AGT_ADR = 0x12, + QLCNIC_HW_SMB_CRB_AGT_ADR = 0x18 +}; + +/* Hub 2 */ +enum { + QLCNIC_HW_NIU_CRB_AGT_ADR = 0x31, + QLCNIC_HW_I2C0_CRB_AGT_ADR = 0x19, + QLCNIC_HW_I2C1_CRB_AGT_ADR = 0x29, + + QLCNIC_HW_SN_CRB_AGT_ADR = 0x10, + QLCNIC_HW_I2Q_CRB_AGT_ADR = 0x20, + QLCNIC_HW_LPC_CRB_AGT_ADR = 0x22, + QLCNIC_HW_ROMUSB_CRB_AGT_ADR = 0x21, + QLCNIC_HW_QM_CRB_AGT_ADR = 0x66, + QLCNIC_HW_SQG0_CRB_AGT_ADR = 0x60, + QLCNIC_HW_SQG1_CRB_AGT_ADR = 0x61, + QLCNIC_HW_SQG2_CRB_AGT_ADR = 0x62, + QLCNIC_HW_SQG3_CRB_AGT_ADR = 0x63, + QLCNIC_HW_RPMX1_CRB_AGT_ADR = 0x09, + QLCNIC_HW_RPMX5_CRB_AGT_ADR = 0x0d, + QLCNIC_HW_RPMX6_CRB_AGT_ADR = 0x0e, + QLCNIC_HW_RPMX8_CRB_AGT_ADR = 0x11 +}; + +/* Hub 3 */ +enum { + QLCNIC_HW_PH_CRB_AGT_ADR = 0x1A, + QLCNIC_HW_SRE_CRB_AGT_ADR = 0x50, + QLCNIC_HW_EG_CRB_AGT_ADR = 0x51, + QLCNIC_HW_RPMX0_CRB_AGT_ADR = 0x08 +}; + +/* Hub 4 */ +enum { + QLCNIC_HW_PEGN0_CRB_AGT_ADR = 0x40, + QLCNIC_HW_PEGN1_CRB_AGT_ADR, + QLCNIC_HW_PEGN2_CRB_AGT_ADR, + QLCNIC_HW_PEGN3_CRB_AGT_ADR, + QLCNIC_HW_PEGNI_CRB_AGT_ADR, + QLCNIC_HW_PEGND_CRB_AGT_ADR, + QLCNIC_HW_PEGNC_CRB_AGT_ADR, + QLCNIC_HW_PEGR0_CRB_AGT_ADR, + QLCNIC_HW_PEGR1_CRB_AGT_ADR, + QLCNIC_HW_PEGR2_CRB_AGT_ADR, + QLCNIC_HW_PEGR3_CRB_AGT_ADR, + QLCNIC_HW_PEGN4_CRB_AGT_ADR +}; + +/* Hub 5 */ +enum { + QLCNIC_HW_PEGS0_CRB_AGT_ADR = 0x40, + QLCNIC_HW_PEGS1_CRB_AGT_ADR, + QLCNIC_HW_PEGS2_CRB_AGT_ADR, + QLCNIC_HW_PEGS3_CRB_AGT_ADR, + QLCNIC_HW_PEGSI_CRB_AGT_ADR, + QLCNIC_HW_PEGSD_CRB_AGT_ADR, + QLCNIC_HW_PEGSC_CRB_AGT_ADR +}; + +/* Hub 6 */ +enum { + QLCNIC_HW_CAS0_CRB_AGT_ADR = 0x46, + QLCNIC_HW_CAS1_CRB_AGT_ADR = 0x47, + QLCNIC_HW_CAS2_CRB_AGT_ADR = 0x48, + QLCNIC_HW_CAS3_CRB_AGT_ADR = 0x49, + QLCNIC_HW_NCM_CRB_AGT_ADR = 0x16, + QLCNIC_HW_TMR_CRB_AGT_ADR = 0x17, + QLCNIC_HW_XDMA_CRB_AGT_ADR = 0x05, + QLCNIC_HW_OCM0_CRB_AGT_ADR = 0x06, + QLCNIC_HW_OCM1_CRB_AGT_ADR = 0x07 +}; + +/* Floaters - non existent modules */ +#define QLCNIC_HW_EFC_RPMX0_CRB_AGT_ADR 0x67 + +/* This field defines PCI/X adr [25:20] of agents on the CRB */ +enum { + QLCNIC_HW_PX_MAP_CRB_PH = 0, + QLCNIC_HW_PX_MAP_CRB_PS, + QLCNIC_HW_PX_MAP_CRB_MN, + QLCNIC_HW_PX_MAP_CRB_MS, + QLCNIC_HW_PX_MAP_CRB_PGR1, + QLCNIC_HW_PX_MAP_CRB_SRE, + QLCNIC_HW_PX_MAP_CRB_NIU, + QLCNIC_HW_PX_MAP_CRB_QMN, + QLCNIC_HW_PX_MAP_CRB_SQN0, + QLCNIC_HW_PX_MAP_CRB_SQN1, + QLCNIC_HW_PX_MAP_CRB_SQN2, + QLCNIC_HW_PX_MAP_CRB_SQN3, + QLCNIC_HW_PX_MAP_CRB_QMS, + QLCNIC_HW_PX_MAP_CRB_SQS0, + QLCNIC_HW_PX_MAP_CRB_SQS1, + QLCNIC_HW_PX_MAP_CRB_SQS2, + QLCNIC_HW_PX_MAP_CRB_SQS3, + QLCNIC_HW_PX_MAP_CRB_PGN0, + QLCNIC_HW_PX_MAP_CRB_PGN1, + QLCNIC_HW_PX_MAP_CRB_PGN2, + QLCNIC_HW_PX_MAP_CRB_PGN3, + QLCNIC_HW_PX_MAP_CRB_PGND, + QLCNIC_HW_PX_MAP_CRB_PGNI, + QLCNIC_HW_PX_MAP_CRB_PGS0, + QLCNIC_HW_PX_MAP_CRB_PGS1, + QLCNIC_HW_PX_MAP_CRB_PGS2, + QLCNIC_HW_PX_MAP_CRB_PGS3, + QLCNIC_HW_PX_MAP_CRB_PGSD, + QLCNIC_HW_PX_MAP_CRB_PGSI, + QLCNIC_HW_PX_MAP_CRB_SN, + QLCNIC_HW_PX_MAP_CRB_PGR2, + QLCNIC_HW_PX_MAP_CRB_EG, + QLCNIC_HW_PX_MAP_CRB_PH2, + QLCNIC_HW_PX_MAP_CRB_PS2, + QLCNIC_HW_PX_MAP_CRB_CAM, + QLCNIC_HW_PX_MAP_CRB_CAS0, + QLCNIC_HW_PX_MAP_CRB_CAS1, + QLCNIC_HW_PX_MAP_CRB_CAS2, + QLCNIC_HW_PX_MAP_CRB_C2C0, + QLCNIC_HW_PX_MAP_CRB_C2C1, + QLCNIC_HW_PX_MAP_CRB_TIMR, + QLCNIC_HW_PX_MAP_CRB_PGR3, + QLCNIC_HW_PX_MAP_CRB_RPMX1, + QLCNIC_HW_PX_MAP_CRB_RPMX2, + QLCNIC_HW_PX_MAP_CRB_RPMX3, + QLCNIC_HW_PX_MAP_CRB_RPMX4, + QLCNIC_HW_PX_MAP_CRB_RPMX5, + QLCNIC_HW_PX_MAP_CRB_RPMX6, + QLCNIC_HW_PX_MAP_CRB_RPMX7, + QLCNIC_HW_PX_MAP_CRB_XDMA, + QLCNIC_HW_PX_MAP_CRB_I2Q, + QLCNIC_HW_PX_MAP_CRB_ROMUSB, + QLCNIC_HW_PX_MAP_CRB_CAS3, + QLCNIC_HW_PX_MAP_CRB_RPMX0, + QLCNIC_HW_PX_MAP_CRB_RPMX8, + QLCNIC_HW_PX_MAP_CRB_RPMX9, + QLCNIC_HW_PX_MAP_CRB_OCM0, + QLCNIC_HW_PX_MAP_CRB_OCM1, + QLCNIC_HW_PX_MAP_CRB_SMB, + QLCNIC_HW_PX_MAP_CRB_I2C0, + QLCNIC_HW_PX_MAP_CRB_I2C1, + QLCNIC_HW_PX_MAP_CRB_LPC, + QLCNIC_HW_PX_MAP_CRB_PGNC, + QLCNIC_HW_PX_MAP_CRB_PGR0 +}; + +#define BIT_0 0x1 +#define BIT_1 0x2 +#define BIT_2 0x4 +#define BIT_3 0x8 +#define BIT_4 0x10 +#define BIT_5 0x20 +#define BIT_6 0x40 +#define BIT_7 0x80 +#define BIT_8 0x100 +#define BIT_9 0x200 +#define BIT_10 0x400 +#define BIT_11 0x800 +#define BIT_12 0x1000 +#define BIT_13 0x2000 +#define BIT_14 0x4000 +#define BIT_15 0x8000 +#define BIT_16 0x10000 +#define BIT_17 0x20000 +#define BIT_18 0x40000 +#define BIT_19 0x80000 +#define BIT_20 0x100000 +#define BIT_21 0x200000 +#define BIT_22 0x400000 +#define BIT_23 0x800000 +#define BIT_24 0x1000000 +#define BIT_25 0x2000000 +#define BIT_26 0x4000000 +#define BIT_27 0x8000000 +#define BIT_28 0x10000000 +#define BIT_29 0x20000000 +#define BIT_30 0x40000000 +#define BIT_31 0x80000000 + +/* This field defines CRB adr [31:20] of the agents */ + +#define QLCNIC_HW_CRB_HUB_AGT_ADR_MN \ + ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_MN_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PH \ + ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_PH_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_MS \ + ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_MS_CRB_AGT_ADR) + +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PS \ + ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_PS_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_SS \ + ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SS_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3 \ + ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX3_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_QMS \ + ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_QMS_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS0 \ + ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS0_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS1 \ + ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS1_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS2 \ + ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS2_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS3 \ + ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS3_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_C2C0 \ + ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_C2C0_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_C2C1 \ + ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_C2C1_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2 \ + ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX2_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4 \ + ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX4_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7 \ + ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX7_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9 \ + ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX9_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_SMB \ + ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SMB_CRB_AGT_ADR) + +#define QLCNIC_HW_CRB_HUB_AGT_ADR_NIU \ + ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_NIU_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0 \ + ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_I2C0_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1 \ + ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_I2C1_CRB_AGT_ADR) + +#define QLCNIC_HW_CRB_HUB_AGT_ADR_SRE \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SRE_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_EG \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_EG_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0 \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX0_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_QMN \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_QM_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0 \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG0_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1 \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG1_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2 \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG2_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3 \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG3_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1 \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX1_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5 \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX5_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6 \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX6_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8 \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX8_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS0 \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS0_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS1 \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS1_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS2 \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS2_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS3 \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS3_CRB_AGT_ADR) + +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI \ + ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGNI_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGND \ + ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGND_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0 \ + ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN0_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1 \ + ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN1_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2 \ + ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN2_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3 \ + ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN3_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4 \ + ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN4_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC \ + ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGNC_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR0 \ + ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR0_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR1 \ + ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR1_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR2 \ + ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR2_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR3 \ + ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR3_CRB_AGT_ADR) + +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI \ + ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSI_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSD \ + ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSD_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0 \ + ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS0_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1 \ + ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS1_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2 \ + ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS2_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3 \ + ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS3_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSC \ + ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSC_CRB_AGT_ADR) + +#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAM \ + ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_NCM_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR \ + ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_TMR_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA \ + ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_XDMA_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_SN \ + ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_SN_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q \ + ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_I2Q_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB \ + ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_ROMUSB_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0 \ + ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_OCM0_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_OCM1 \ + ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_OCM1_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_LPC \ + ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_LPC_CRB_AGT_ADR) + +#define QLCNIC_SRE_MISC (QLCNIC_CRB_SRE + 0x0002c) + +#define QLCNIC_I2Q_CLR_PCI_HI (QLCNIC_CRB_I2Q + 0x00034) + +#define ROMUSB_GLB (QLCNIC_CRB_ROMUSB + 0x00000) +#define ROMUSB_ROM (QLCNIC_CRB_ROMUSB + 0x10000) + +#define QLCNIC_ROMUSB_GLB_STATUS (ROMUSB_GLB + 0x0004) +#define QLCNIC_ROMUSB_GLB_SW_RESET (ROMUSB_GLB + 0x0008) +#define QLCNIC_ROMUSB_GLB_PAD_GPIO_I (ROMUSB_GLB + 0x000c) +#define QLCNIC_ROMUSB_GLB_CAS_RST (ROMUSB_GLB + 0x0038) +#define QLCNIC_ROMUSB_GLB_TEST_MUX_SEL (ROMUSB_GLB + 0x0044) +#define QLCNIC_ROMUSB_GLB_PEGTUNE_DONE (ROMUSB_GLB + 0x005c) +#define QLCNIC_ROMUSB_GLB_CHIP_CLK_CTRL (ROMUSB_GLB + 0x00A8) + +#define QLCNIC_ROMUSB_GPIO(n) (ROMUSB_GLB + 0x60 + (4 * (n))) + +#define QLCNIC_ROMUSB_ROM_INSTR_OPCODE (ROMUSB_ROM + 0x0004) +#define QLCNIC_ROMUSB_ROM_ADDRESS (ROMUSB_ROM + 0x0008) +#define QLCNIC_ROMUSB_ROM_WDATA (ROMUSB_ROM + 0x000c) +#define QLCNIC_ROMUSB_ROM_ABYTE_CNT (ROMUSB_ROM + 0x0010) +#define QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT (ROMUSB_ROM + 0x0014) +#define QLCNIC_ROMUSB_ROM_RDATA (ROMUSB_ROM + 0x0018) + +/* Lock IDs for ROM lock */ +#define ROM_LOCK_DRIVER 0x0d417340 + +/****************************************************************************** +* +* Definitions specific to M25P flash +* +******************************************************************************* +*/ + +/* all are 1MB windows */ + +#define QLCNIC_PCI_CRB_WINDOWSIZE 0x00100000 +#define QLCNIC_PCI_CRB_WINDOW(A) \ + (QLCNIC_PCI_CRBSPACE + (A)*QLCNIC_PCI_CRB_WINDOWSIZE) + +#define QLCNIC_CRB_NIU QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_NIU) +#define QLCNIC_CRB_SRE QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SRE) +#define QLCNIC_CRB_ROMUSB \ + QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_ROMUSB) +#define QLCNIC_CRB_I2Q QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_I2Q) +#define QLCNIC_CRB_I2C0 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_I2C0) +#define QLCNIC_CRB_SMB QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SMB) +#define QLCNIC_CRB_MAX QLCNIC_PCI_CRB_WINDOW(64) + +#define QLCNIC_CRB_PCIX_HOST QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PH) +#define QLCNIC_CRB_PCIX_HOST2 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PH2) +#define QLCNIC_CRB_PEG_NET_0 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN0) +#define QLCNIC_CRB_PEG_NET_1 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN1) +#define QLCNIC_CRB_PEG_NET_2 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN2) +#define QLCNIC_CRB_PEG_NET_3 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN3) +#define QLCNIC_CRB_PEG_NET_4 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SQS2) +#define QLCNIC_CRB_PEG_NET_D QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGND) +#define QLCNIC_CRB_PEG_NET_I QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGNI) +#define QLCNIC_CRB_DDR_NET QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_MN) +#define QLCNIC_CRB_QDR_NET QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SN) + +#define QLCNIC_CRB_PCIX_MD QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PS) +#define QLCNIC_CRB_PCIE QLCNIC_CRB_PCIX_MD + +#define ISR_INT_VECTOR (QLCNIC_PCIX_PS_REG(PCIX_INT_VECTOR)) +#define ISR_INT_MASK (QLCNIC_PCIX_PS_REG(PCIX_INT_MASK)) +#define ISR_INT_MASK_SLOW (QLCNIC_PCIX_PS_REG(PCIX_INT_MASK)) +#define ISR_INT_TARGET_STATUS (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS)) +#define ISR_INT_TARGET_MASK (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK)) +#define ISR_INT_TARGET_STATUS_F1 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F1)) +#define ISR_INT_TARGET_MASK_F1 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F1)) +#define ISR_INT_TARGET_STATUS_F2 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F2)) +#define ISR_INT_TARGET_MASK_F2 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F2)) +#define ISR_INT_TARGET_STATUS_F3 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F3)) +#define ISR_INT_TARGET_MASK_F3 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F3)) +#define ISR_INT_TARGET_STATUS_F4 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F4)) +#define ISR_INT_TARGET_MASK_F4 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F4)) +#define ISR_INT_TARGET_STATUS_F5 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F5)) +#define ISR_INT_TARGET_MASK_F5 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F5)) +#define ISR_INT_TARGET_STATUS_F6 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F6)) +#define ISR_INT_TARGET_MASK_F6 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F6)) +#define ISR_INT_TARGET_STATUS_F7 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F7)) +#define ISR_INT_TARGET_MASK_F7 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F7)) + +#define QLCNIC_PCI_MN_2M (0) +#define QLCNIC_PCI_MS_2M (0x80000) +#define QLCNIC_PCI_OCM0_2M (0x000c0000UL) +#define QLCNIC_PCI_CRBSPACE (0x06000000UL) +#define QLCNIC_PCI_CAMQM (0x04800000UL) +#define QLCNIC_PCI_CAMQM_END (0x04800800UL) +#define QLCNIC_PCI_2MB_SIZE (0x00200000UL) +#define QLCNIC_PCI_CAMQM_2M_BASE (0x000ff800UL) + +#define QLCNIC_CRB_CAM QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_CAM) + +#define QLCNIC_ADDR_DDR_NET (0x0000000000000000ULL) +#define QLCNIC_ADDR_DDR_NET_MAX (0x000000000fffffffULL) +#define QLCNIC_ADDR_OCM0 (0x0000000200000000ULL) +#define QLCNIC_ADDR_OCM0_MAX (0x00000002000fffffULL) +#define QLCNIC_ADDR_OCM1 (0x0000000200400000ULL) +#define QLCNIC_ADDR_OCM1_MAX (0x00000002004fffffULL) +#define QLCNIC_ADDR_QDR_NET (0x0000000300000000ULL) +#define QLCNIC_ADDR_QDR_NET_MAX (0x0000000307ffffffULL) + +/* + * Register offsets for MN + */ +#define QLCNIC_MIU_CONTROL (0x000) +#define QLCNIC_MIU_MN_CONTROL (QLCNIC_CRB_DDR_NET+QLCNIC_MIU_CONTROL) + +/* 200ms delay in each loop */ +#define QLCNIC_NIU_PHY_WAITLEN 200000 +/* 10 seconds before we give up */ +#define QLCNIC_NIU_PHY_WAITMAX 50 +#define QLCNIC_NIU_MAX_GBE_PORTS 4 +#define QLCNIC_NIU_MAX_XG_PORTS 2 + +#define QLCNIC_NIU_MODE (QLCNIC_CRB_NIU + 0x00000) +#define QLCNIC_NIU_GB_PAUSE_CTL (QLCNIC_CRB_NIU + 0x0030c) +#define QLCNIC_NIU_XG_PAUSE_CTL (QLCNIC_CRB_NIU + 0x00098) + +#define QLCNIC_NIU_GB_MAC_CONFIG_0(I) \ + (QLCNIC_CRB_NIU + 0x30000 + (I)*0x10000) +#define QLCNIC_NIU_GB_MAC_CONFIG_1(I) \ + (QLCNIC_CRB_NIU + 0x30004 + (I)*0x10000) + + +#define TEST_AGT_CTRL (0x00) + +#define TA_CTL_START BIT_0 +#define TA_CTL_ENABLE BIT_1 +#define TA_CTL_WRITE BIT_2 +#define TA_CTL_BUSY BIT_3 + +/* + * Register offsets for MN + */ +#define MIU_TEST_AGT_BASE (0x90) + +#define MIU_TEST_AGT_ADDR_LO (0x04) +#define MIU_TEST_AGT_ADDR_HI (0x08) +#define MIU_TEST_AGT_WRDATA_LO (0x10) +#define MIU_TEST_AGT_WRDATA_HI (0x14) +#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x20) +#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x24) +#define MIU_TEST_AGT_WRDATA(i) (0x10+(0x10*((i)>>1))+(4*((i)&1))) +#define MIU_TEST_AGT_RDDATA_LO (0x18) +#define MIU_TEST_AGT_RDDATA_HI (0x1c) +#define MIU_TEST_AGT_RDDATA_UPPER_LO (0x28) +#define MIU_TEST_AGT_RDDATA_UPPER_HI (0x2c) +#define MIU_TEST_AGT_RDDATA(i) (0x18+(0x10*((i)>>1))+(4*((i)&1))) + +#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8 +#define MIU_TEST_AGT_UPPER_ADDR(off) (0) + +/* + * Register offsets for MS + */ +#define SIU_TEST_AGT_BASE (0x60) + +#define SIU_TEST_AGT_ADDR_LO (0x04) +#define SIU_TEST_AGT_ADDR_HI (0x18) +#define SIU_TEST_AGT_WRDATA_LO (0x08) +#define SIU_TEST_AGT_WRDATA_HI (0x0c) +#define SIU_TEST_AGT_WRDATA(i) (0x08+(4*(i))) +#define SIU_TEST_AGT_RDDATA_LO (0x10) +#define SIU_TEST_AGT_RDDATA_HI (0x14) +#define SIU_TEST_AGT_RDDATA(i) (0x10+(4*(i))) + +#define SIU_TEST_AGT_ADDR_MASK 0x3ffff8 +#define SIU_TEST_AGT_UPPER_ADDR(off) ((off)>>22) + +/* XG Link status */ +#define XG_LINK_UP 0x10 +#define XG_LINK_DOWN 0x20 + +#define XG_LINK_UP_P3P 0x01 +#define XG_LINK_DOWN_P3P 0x02 +#define XG_LINK_STATE_P3P_MASK 0xf +#define XG_LINK_STATE_P3P(pcifn, val) \ + (((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3P_MASK) + +#define P3P_LINK_SPEED_MHZ 100 +#define P3P_LINK_SPEED_MASK 0xff +#define P3P_LINK_SPEED_REG(pcifn) \ + (CRB_PF_LINK_SPEED_1 + (((pcifn) / 4) * 4)) +#define P3P_LINK_SPEED_VAL(pcifn, reg) \ + (((reg) >> (8 * ((pcifn) & 0x3))) & P3P_LINK_SPEED_MASK) + +#define QLCNIC_CAM_RAM_BASE (QLCNIC_CRB_CAM + 0x02000) +#define QLCNIC_CAM_RAM(reg) (QLCNIC_CAM_RAM_BASE + (reg)) +#define QLCNIC_FW_VERSION_MAJOR (QLCNIC_CAM_RAM(0x150)) +#define QLCNIC_FW_VERSION_MINOR (QLCNIC_CAM_RAM(0x154)) +#define QLCNIC_FW_VERSION_SUB (QLCNIC_CAM_RAM(0x158)) +#define QLCNIC_ROM_LOCK_ID (QLCNIC_CAM_RAM(0x100)) +#define QLCNIC_PHY_LOCK_ID (QLCNIC_CAM_RAM(0x120)) +#define QLCNIC_CRB_WIN_LOCK_ID (QLCNIC_CAM_RAM(0x124)) + +#define NIC_CRB_BASE (QLCNIC_CAM_RAM(0x200)) +#define NIC_CRB_BASE_2 (QLCNIC_CAM_RAM(0x700)) +#define QLCNIC_REG(X) (NIC_CRB_BASE+(X)) +#define QLCNIC_REG_2(X) (NIC_CRB_BASE_2+(X)) + +#define QLCNIC_CDRP_CRB_OFFSET (QLCNIC_REG(0x18)) +#define QLCNIC_ARG1_CRB_OFFSET (QLCNIC_REG(0x1c)) +#define QLCNIC_ARG2_CRB_OFFSET (QLCNIC_REG(0x20)) +#define QLCNIC_ARG3_CRB_OFFSET (QLCNIC_REG(0x24)) +#define QLCNIC_SIGN_CRB_OFFSET (QLCNIC_REG(0x28)) + +#define CRB_CMDPEG_STATE (QLCNIC_REG(0x50)) +#define CRB_RCVPEG_STATE (QLCNIC_REG(0x13c)) + +#define CRB_XG_STATE_P3P (QLCNIC_REG(0x98)) +#define CRB_PF_LINK_SPEED_1 (QLCNIC_REG(0xe8)) +#define CRB_PF_LINK_SPEED_2 (QLCNIC_REG(0xec)) + +#define CRB_TEMP_STATE (QLCNIC_REG(0x1b4)) + +#define CRB_V2P_0 (QLCNIC_REG(0x290)) +#define CRB_V2P(port) (CRB_V2P_0+((port)*4)) +#define CRB_DRIVER_VERSION (QLCNIC_REG(0x2a0)) + +#define CRB_FW_CAPABILITIES_1 (QLCNIC_CAM_RAM(0x128)) +#define CRB_MAC_BLOCK_START (QLCNIC_CAM_RAM(0x1c0)) + +/* + * CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address + * which can be read by the Phantom host to get producer/consumer indexes from + * Phantom/Casper. If it is not HOST_SHARED_MEMORY, then the following + * registers will be used for the addresses of the ring's shared memory + * on the Phantom. + */ + +#define qlcnic_get_temp_val(x) ((x) >> 16) +#define qlcnic_get_temp_state(x) ((x) & 0xffff) +#define qlcnic_encode_temp(val, state) (((val) << 16) | (state)) + +/* + * Temperature control. + */ +enum { + QLCNIC_TEMP_NORMAL = 0x1, /* Normal operating range */ + QLCNIC_TEMP_WARN, /* Sound alert, temperature getting high */ + QLCNIC_TEMP_PANIC /* Fatal error, hardware has shut down. */ +}; + +/* Lock IDs for PHY lock */ +#define PHY_LOCK_DRIVER 0x44524956 + +/* Used for PS PCI Memory access */ +#define PCIX_PS_OP_ADDR_LO (0x10000) +/* via CRB (PS side only) */ +#define PCIX_PS_OP_ADDR_HI (0x10004) + +#define PCIX_INT_VECTOR (0x10100) +#define PCIX_INT_MASK (0x10104) + +#define PCIX_OCM_WINDOW (0x10800) +#define PCIX_OCM_WINDOW_REG(func) (PCIX_OCM_WINDOW + 0x4 * (func)) + +#define PCIX_TARGET_STATUS (0x10118) +#define PCIX_TARGET_STATUS_F1 (0x10160) +#define PCIX_TARGET_STATUS_F2 (0x10164) +#define PCIX_TARGET_STATUS_F3 (0x10168) +#define PCIX_TARGET_STATUS_F4 (0x10360) +#define PCIX_TARGET_STATUS_F5 (0x10364) +#define PCIX_TARGET_STATUS_F6 (0x10368) +#define PCIX_TARGET_STATUS_F7 (0x1036c) + +#define PCIX_TARGET_MASK (0x10128) +#define PCIX_TARGET_MASK_F1 (0x10170) +#define PCIX_TARGET_MASK_F2 (0x10174) +#define PCIX_TARGET_MASK_F3 (0x10178) +#define PCIX_TARGET_MASK_F4 (0x10370) +#define PCIX_TARGET_MASK_F5 (0x10374) +#define PCIX_TARGET_MASK_F6 (0x10378) +#define PCIX_TARGET_MASK_F7 (0x1037c) + +#define PCIX_MSI_F(i) (0x13000+((i)*4)) + +#define QLCNIC_PCIX_PH_REG(reg) (QLCNIC_CRB_PCIE + (reg)) +#define QLCNIC_PCIX_PS_REG(reg) (QLCNIC_CRB_PCIX_MD + (reg)) +#define QLCNIC_PCIE_REG(reg) (QLCNIC_CRB_PCIE + (reg)) + +#define PCIE_SEM0_LOCK (0x1c000) +#define PCIE_SEM0_UNLOCK (0x1c004) +#define PCIE_SEM_LOCK(N) (PCIE_SEM0_LOCK + 8*(N)) +#define PCIE_SEM_UNLOCK(N) (PCIE_SEM0_UNLOCK + 8*(N)) + +#define PCIE_SETUP_FUNCTION (0x12040) +#define PCIE_SETUP_FUNCTION2 (0x12048) +#define PCIE_MISCCFG_RC (0x1206c) +#define PCIE_TGT_SPLIT_CHICKEN (0x12080) +#define PCIE_CHICKEN3 (0x120c8) + +#define ISR_INT_STATE_REG (QLCNIC_PCIX_PS_REG(PCIE_MISCCFG_RC)) +#define PCIE_MAX_MASTER_SPLIT (0x14048) + +#define QLCNIC_PORT_MODE_NONE 0 +#define QLCNIC_PORT_MODE_XG 1 +#define QLCNIC_PORT_MODE_GB 2 +#define QLCNIC_PORT_MODE_802_3_AP 3 +#define QLCNIC_PORT_MODE_AUTO_NEG 4 +#define QLCNIC_PORT_MODE_AUTO_NEG_1G 5 +#define QLCNIC_PORT_MODE_AUTO_NEG_XG 6 +#define QLCNIC_PORT_MODE_ADDR (QLCNIC_CAM_RAM(0x24)) +#define QLCNIC_WOL_PORT_MODE (QLCNIC_CAM_RAM(0x198)) + +#define QLCNIC_WOL_CONFIG_NV (QLCNIC_CAM_RAM(0x184)) +#define QLCNIC_WOL_CONFIG (QLCNIC_CAM_RAM(0x188)) + +#define QLCNIC_PEG_TUNE_MN_PRESENT 0x1 +#define QLCNIC_PEG_TUNE_CAPABILITY (QLCNIC_CAM_RAM(0x02c)) + +#define QLCNIC_DMA_WATCHDOG_CTRL (QLCNIC_CAM_RAM(0x14)) +#define QLCNIC_PEG_ALIVE_COUNTER (QLCNIC_CAM_RAM(0xb0)) +#define QLCNIC_PEG_HALT_STATUS1 (QLCNIC_CAM_RAM(0xa8)) +#define QLCNIC_PEG_HALT_STATUS2 (QLCNIC_CAM_RAM(0xac)) +#define QLCNIC_CRB_DRV_ACTIVE (QLCNIC_CAM_RAM(0x138)) +#define QLCNIC_CRB_DEV_STATE (QLCNIC_CAM_RAM(0x140)) + +#define QLCNIC_CRB_DRV_STATE (QLCNIC_CAM_RAM(0x144)) +#define QLCNIC_CRB_DRV_SCRATCH (QLCNIC_CAM_RAM(0x148)) +#define QLCNIC_CRB_DEV_PARTITION_INFO (QLCNIC_CAM_RAM(0x14c)) +#define QLCNIC_CRB_DRV_IDC_VER (QLCNIC_CAM_RAM(0x174)) +#define QLCNIC_CRB_DEV_NPAR_STATE (QLCNIC_CAM_RAM(0x19c)) +#define QLCNIC_ROM_DEV_INIT_TIMEOUT (0x3e885c) +#define QLCNIC_ROM_DRV_RESET_TIMEOUT (0x3e8860) + +/* Device State */ +#define QLCNIC_DEV_COLD 0x1 +#define QLCNIC_DEV_INITIALIZING 0x2 +#define QLCNIC_DEV_READY 0x3 +#define QLCNIC_DEV_NEED_RESET 0x4 +#define QLCNIC_DEV_NEED_QUISCENT 0x5 +#define QLCNIC_DEV_FAILED 0x6 +#define QLCNIC_DEV_QUISCENT 0x7 + +#define QLCNIC_DEV_NPAR_NON_OPER 0 /* NON Operational */ +#define QLCNIC_DEV_NPAR_OPER 1 /* NPAR Operational */ +#define QLCNIC_DEV_NPAR_OPER_TIMEO 30 /* Operational time out */ + +#define QLC_DEV_CHECK_ACTIVE(VAL, FN) ((VAL) & (1 << (FN * 4))) +#define QLC_DEV_SET_REF_CNT(VAL, FN) ((VAL) |= (1 << (FN * 4))) +#define QLC_DEV_CLR_REF_CNT(VAL, FN) ((VAL) &= ~(1 << (FN * 4))) +#define QLC_DEV_SET_RST_RDY(VAL, FN) ((VAL) |= (1 << (FN * 4))) +#define QLC_DEV_SET_QSCNT_RDY(VAL, FN) ((VAL) |= (2 << (FN * 4))) +#define QLC_DEV_CLR_RST_QSCNT(VAL, FN) ((VAL) &= ~(3 << (FN * 4))) + +#define QLC_DEV_GET_DRV(VAL, FN) (0xf & ((VAL) >> (FN * 4))) +#define QLC_DEV_SET_DRV(VAL, FN) ((VAL) << (FN * 4)) + +#define QLCNIC_TYPE_NIC 1 +#define QLCNIC_TYPE_FCOE 2 +#define QLCNIC_TYPE_ISCSI 3 + +#define QLCNIC_RCODE_DRIVER_INFO 0x20000000 +#define QLCNIC_RCODE_DRIVER_CAN_RELOAD BIT_30 +#define QLCNIC_RCODE_FATAL_ERROR BIT_31 +#define QLCNIC_FWERROR_PEGNUM(code) ((code) & 0xff) +#define QLCNIC_FWERROR_CODE(code) ((code >> 8) & 0xfffff) + +#define FW_POLL_DELAY (1 * HZ) +#define FW_FAIL_THRESH 2 + +#define QLCNIC_RESET_TIMEOUT_SECS 10 +#define QLCNIC_INIT_TIMEOUT_SECS 30 +#define QLCNIC_RCVPEG_CHECK_RETRY_COUNT 2000 +#define QLCNIC_RCVPEG_CHECK_DELAY 10 +#define QLCNIC_CMDPEG_CHECK_RETRY_COUNT 60 +#define QLCNIC_CMDPEG_CHECK_DELAY 500 +#define QLCNIC_HEARTBEAT_PERIOD_MSECS 200 +#define QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT 45 + +#define ISR_MSI_INT_TRIGGER(FUNC) (QLCNIC_PCIX_PS_REG(PCIX_MSI_F(FUNC))) +#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200) + +/* + * PCI Interrupt Vector Values. + */ +#define PCIX_INT_VECTOR_BIT_F0 0x0080 +#define PCIX_INT_VECTOR_BIT_F1 0x0100 +#define PCIX_INT_VECTOR_BIT_F2 0x0200 +#define PCIX_INT_VECTOR_BIT_F3 0x0400 +#define PCIX_INT_VECTOR_BIT_F4 0x0800 +#define PCIX_INT_VECTOR_BIT_F5 0x1000 +#define PCIX_INT_VECTOR_BIT_F6 0x2000 +#define PCIX_INT_VECTOR_BIT_F7 0x4000 + +struct qlcnic_legacy_intr_set { + u32 int_vec_bit; + u32 tgt_status_reg; + u32 tgt_mask_reg; + u32 pci_int_reg; +}; + +#define QLCNIC_FW_API 0x1b216c +#define QLCNIC_DRV_OP_MODE 0x1b2170 +#define QLCNIC_MSIX_BASE 0x132110 +#define QLCNIC_MAX_PCI_FUNC 8 +#define QLCNIC_MAX_VLAN_FILTERS 64 + +/* FW dump defines */ +#define MIU_TEST_CTR 0x41000090 +#define MIU_TEST_ADDR_LO 0x41000094 +#define MIU_TEST_ADDR_HI 0x41000098 +#define FLASH_ROM_WINDOW 0x42110030 +#define FLASH_ROM_DATA 0x42150000 + +static const u32 MIU_TEST_READ_DATA[] = { + 0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC, }; + +#define QLCNIC_FW_DUMP_REG1 0x00130060 +#define QLCNIC_FW_DUMP_REG2 0x001e0000 +#define QLCNIC_FLASH_SEM2_LK 0x0013C010 +#define QLCNIC_FLASH_SEM2_ULK 0x0013C014 +#define QLCNIC_FLASH_LOCK_ID 0x001B2100 + +#define QLCNIC_RD_DUMP_REG(addr, bar0, data) do { \ + writel((addr & 0xFFFF0000), (void *) (bar0 + \ + QLCNIC_FW_DUMP_REG1)); \ + readl((void *) (bar0 + QLCNIC_FW_DUMP_REG1)); \ + *data = readl((void *) (bar0 + QLCNIC_FW_DUMP_REG2 + \ + LSW(addr))); \ +} while (0) + +#define QLCNIC_WR_DUMP_REG(addr, bar0, data) do { \ + writel((addr & 0xFFFF0000), (void *) (bar0 + \ + QLCNIC_FW_DUMP_REG1)); \ + readl((void *) (bar0 + QLCNIC_FW_DUMP_REG1)); \ + writel(data, (void *) (bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr)));\ + readl((void *) (bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr))); \ +} while (0) + +/* PCI function operational mode */ +enum { + QLCNIC_MGMT_FUNC = 0, + QLCNIC_PRIV_FUNC = 1, + QLCNIC_NON_PRIV_FUNC = 2 +}; + +enum { + QLCNIC_PORT_DEFAULTS = 0, + QLCNIC_ADD_VLAN = 1, + QLCNIC_DEL_VLAN = 2 +}; + +#define QLC_DEV_DRV_DEFAULT 0x11111111 + +#define LSB(x) ((uint8_t)(x)) +#define MSB(x) ((uint8_t)((uint16_t)(x) >> 8)) + +#define LSW(x) ((uint16_t)((uint32_t)(x))) +#define MSW(x) ((uint16_t)((uint32_t)(x) >> 16)) + +#define LSD(x) ((uint32_t)((uint64_t)(x))) +#define MSD(x) ((uint32_t)((((uint64_t)(x)) >> 16) >> 16)) + +#define QLCNIC_LEGACY_INTR_CONFIG \ +{ \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F0, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(0) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F1, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F1, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F1, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(1) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F2, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F2, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F2, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(2) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F3, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F3, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F3, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(3) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F4, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F4, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F4, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(4) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F5, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F5, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F5, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(5) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F6, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F6, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F6, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(6) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F7, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F7, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F7, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(7) }, \ +} + +/* NIU REGS */ + +#define _qlcnic_crb_get_bit(var, bit) ((var >> bit) & 0x1) + +/* + * NIU GB MAC Config Register 0 (applies to GB0, GB1, GB2, GB3) + * + * Bit 0 : enable_tx => 1:enable frame xmit, 0:disable + * Bit 1 : tx_synced => R/O: xmit enable synched to xmit stream + * Bit 2 : enable_rx => 1:enable frame recv, 0:disable + * Bit 3 : rx_synced => R/O: recv enable synched to recv stream + * Bit 4 : tx_flowctl => 1:enable pause frame generation, 0:disable + * Bit 5 : rx_flowctl => 1:act on recv'd pause frames, 0:ignore + * Bit 8 : loopback => 1:loop MAC xmits to MAC recvs, 0:normal + * Bit 16: tx_reset_pb => 1:reset frame xmit protocol blk, 0:no-op + * Bit 17: rx_reset_pb => 1:reset frame recv protocol blk, 0:no-op + * Bit 18: tx_reset_mac => 1:reset data/ctl multiplexer blk, 0:no-op + * Bit 19: rx_reset_mac => 1:reset ctl frames & timers blk, 0:no-op + * Bit 31: soft_reset => 1:reset the MAC and the SERDES, 0:no-op + */ +#define qlcnic_gb_rx_flowctl(config_word) \ + ((config_word) |= 1 << 5) +#define qlcnic_gb_get_rx_flowctl(config_word) \ + _qlcnic_crb_get_bit((config_word), 5) +#define qlcnic_gb_unset_rx_flowctl(config_word) \ + ((config_word) &= ~(1 << 5)) + +/* + * NIU GB Pause Ctl Register + */ + +#define qlcnic_gb_set_gb0_mask(config_word) \ + ((config_word) |= 1 << 0) +#define qlcnic_gb_set_gb1_mask(config_word) \ + ((config_word) |= 1 << 2) +#define qlcnic_gb_set_gb2_mask(config_word) \ + ((config_word) |= 1 << 4) +#define qlcnic_gb_set_gb3_mask(config_word) \ + ((config_word) |= 1 << 6) + +#define qlcnic_gb_get_gb0_mask(config_word) \ + _qlcnic_crb_get_bit((config_word), 0) +#define qlcnic_gb_get_gb1_mask(config_word) \ + _qlcnic_crb_get_bit((config_word), 2) +#define qlcnic_gb_get_gb2_mask(config_word) \ + _qlcnic_crb_get_bit((config_word), 4) +#define qlcnic_gb_get_gb3_mask(config_word) \ + _qlcnic_crb_get_bit((config_word), 6) + +#define qlcnic_gb_unset_gb0_mask(config_word) \ + ((config_word) &= ~(1 << 0)) +#define qlcnic_gb_unset_gb1_mask(config_word) \ + ((config_word) &= ~(1 << 2)) +#define qlcnic_gb_unset_gb2_mask(config_word) \ + ((config_word) &= ~(1 << 4)) +#define qlcnic_gb_unset_gb3_mask(config_word) \ + ((config_word) &= ~(1 << 6)) + +/* + * NIU XG Pause Ctl Register + * + * Bit 0 : xg0_mask => 1:disable tx pause frames + * Bit 1 : xg0_request => 1:request single pause frame + * Bit 2 : xg0_on_off => 1:request is pause on, 0:off + * Bit 3 : xg1_mask => 1:disable tx pause frames + * Bit 4 : xg1_request => 1:request single pause frame + * Bit 5 : xg1_on_off => 1:request is pause on, 0:off + */ + +#define qlcnic_xg_set_xg0_mask(config_word) \ + ((config_word) |= 1 << 0) +#define qlcnic_xg_set_xg1_mask(config_word) \ + ((config_word) |= 1 << 3) + +#define qlcnic_xg_get_xg0_mask(config_word) \ + _qlcnic_crb_get_bit((config_word), 0) +#define qlcnic_xg_get_xg1_mask(config_word) \ + _qlcnic_crb_get_bit((config_word), 3) + +#define qlcnic_xg_unset_xg0_mask(config_word) \ + ((config_word) &= ~(1 << 0)) +#define qlcnic_xg_unset_xg1_mask(config_word) \ + ((config_word) &= ~(1 << 3)) + +/* + * NIU XG Pause Ctl Register + * + * Bit 0 : xg0_mask => 1:disable tx pause frames + * Bit 1 : xg0_request => 1:request single pause frame + * Bit 2 : xg0_on_off => 1:request is pause on, 0:off + * Bit 3 : xg1_mask => 1:disable tx pause frames + * Bit 4 : xg1_request => 1:request single pause frame + * Bit 5 : xg1_on_off => 1:request is pause on, 0:off + */ + +/* + * PHY-Specific MII control/status registers. + */ +#define QLCNIC_NIU_GB_MII_MGMT_ADDR_AUTONEG 4 +#define QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_STATUS 17 + +/* + * PHY-Specific Status Register (reg 17). + * + * Bit 0 : jabber => 1:jabber detected, 0:not + * Bit 1 : polarity => 1:polarity reversed, 0:normal + * Bit 2 : recvpause => 1:receive pause enabled, 0:disabled + * Bit 3 : xmitpause => 1:transmit pause enabled, 0:disabled + * Bit 4 : energydetect => 1:sleep, 0:active + * Bit 5 : downshift => 1:downshift, 0:no downshift + * Bit 6 : crossover => 1:MDIX (crossover), 0:MDI (no crossover) + * Bits 7-9 : cablelen => not valid in 10Mb/s mode + * 0:<50m, 1:50-80m, 2:80-110m, 3:110-140m, 4:>140m + * Bit 10 : link => 1:link up, 0:link down + * Bit 11 : resolved => 1:speed and duplex resolved, 0:not yet + * Bit 12 : pagercvd => 1:page received, 0:page not received + * Bit 13 : duplex => 1:full duplex, 0:half duplex + * Bits 14-15 : speed => 0:10Mb/s, 1:100Mb/s, 2:1000Mb/s, 3:rsvd + */ + +#define qlcnic_get_phy_speed(config_word) (((config_word) >> 14) & 0x03) + +#define qlcnic_set_phy_speed(config_word, val) \ + ((config_word) |= ((val & 0x03) << 14)) +#define qlcnic_set_phy_duplex(config_word) \ + ((config_word) |= 1 << 13) +#define qlcnic_clear_phy_duplex(config_word) \ + ((config_word) &= ~(1 << 13)) + +#define qlcnic_get_phy_link(config_word) \ + _qlcnic_crb_get_bit(config_word, 10) +#define qlcnic_get_phy_duplex(config_word) \ + _qlcnic_crb_get_bit(config_word, 13) + +#define QLCNIC_NIU_NON_PROMISC_MODE 0 +#define QLCNIC_NIU_PROMISC_MODE 1 +#define QLCNIC_NIU_ALLMULTI_MODE 2 + +struct crb_128M_2M_sub_block_map { + unsigned valid; + unsigned start_128M; + unsigned end_128M; + unsigned start_2M; +}; + +struct crb_128M_2M_block_map{ + struct crb_128M_2M_sub_block_map sub_block[16]; +}; +#endif /* __QLCNIC_HDR_H_ */ diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c new file mode 100644 index 000000000000..74e9d7b94965 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c @@ -0,0 +1,1787 @@ +/* + * QLogic qlcnic NIC Driver + * Copyright (c) 2009-2010 QLogic Corporation + * + * See LICENSE.qlcnic for copyright and licensing details. + */ + +#include "qlcnic.h" + +#include +#include +#include + +#define MASK(n) ((1ULL<<(n))-1) +#define OCM_WIN_P3P(addr) (addr & 0xffc0000) + +#define GET_MEM_OFFS_2M(addr) (addr & MASK(18)) + +#define CRB_BLK(off) ((off >> 20) & 0x3f) +#define CRB_SUBBLK(off) ((off >> 16) & 0xf) +#define CRB_WINDOW_2M (0x130060) +#define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000)) +#define CRB_INDIRECT_2M (0x1e0000UL) + + +#ifndef readq +static inline u64 readq(void __iomem *addr) +{ + return readl(addr) | (((u64) readl(addr + 4)) << 32LL); +} +#endif + +#ifndef writeq +static inline void writeq(u64 val, void __iomem *addr) +{ + writel(((u32) (val)), (addr)); + writel(((u32) (val >> 32)), (addr + 4)); +} +#endif + +static const struct crb_128M_2M_block_map +crb_128M_2M_map[64] __cacheline_aligned_in_smp = { + {{{0, 0, 0, 0} } }, /* 0: PCI */ + {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */ + {1, 0x0110000, 0x0120000, 0x130000}, + {1, 0x0120000, 0x0122000, 0x124000}, + {1, 0x0130000, 0x0132000, 0x126000}, + {1, 0x0140000, 0x0142000, 0x128000}, + {1, 0x0150000, 0x0152000, 0x12a000}, + {1, 0x0160000, 0x0170000, 0x110000}, + {1, 0x0170000, 0x0172000, 0x12e000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {1, 0x01e0000, 0x01e0800, 0x122000}, + {0, 0x0000000, 0x0000000, 0x000000} } }, + {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */ + {{{0, 0, 0, 0} } }, /* 3: */ + {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */ + {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE */ + {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU */ + {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM */ + {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */ + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {1, 0x08f0000, 0x08f2000, 0x172000} } }, + {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1*/ + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {1, 0x09f0000, 0x09f2000, 0x176000} } }, + {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2*/ + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {1, 0x0af0000, 0x0af2000, 0x17a000} } }, + {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3*/ + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {1, 0x0bf0000, 0x0bf2000, 0x17e000} } }, + {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */ + {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */ + {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */ + {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */ + {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */ + {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */ + {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */ + {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */ + {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */ + {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */ + {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */ + {{{0, 0, 0, 0} } }, /* 23: */ + {{{0, 0, 0, 0} } }, /* 24: */ + {{{0, 0, 0, 0} } }, /* 25: */ + {{{0, 0, 0, 0} } }, /* 26: */ + {{{0, 0, 0, 0} } }, /* 27: */ + {{{0, 0, 0, 0} } }, /* 28: */ + {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */ + {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */ + {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */ + {{{0} } }, /* 32: PCI */ + {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */ + {1, 0x2110000, 0x2120000, 0x130000}, + {1, 0x2120000, 0x2122000, 0x124000}, + {1, 0x2130000, 0x2132000, 0x126000}, + {1, 0x2140000, 0x2142000, 0x128000}, + {1, 0x2150000, 0x2152000, 0x12a000}, + {1, 0x2160000, 0x2170000, 0x110000}, + {1, 0x2170000, 0x2172000, 0x12e000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000} } }, + {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */ + {{{0} } }, /* 35: */ + {{{0} } }, /* 36: */ + {{{0} } }, /* 37: */ + {{{0} } }, /* 38: */ + {{{0} } }, /* 39: */ + {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */ + {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */ + {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */ + {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */ + {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */ + {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */ + {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */ + {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */ + {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */ + {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */ + {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */ + {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */ + {{{0} } }, /* 52: */ + {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */ + {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */ + {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */ + {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */ + {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */ + {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */ + {{{0} } }, /* 59: I2C0 */ + {{{0} } }, /* 60: I2C1 */ + {{{1, 0x3d00000, 0x3d04000, 0x1d8000} } },/* 61: LPC */ + {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */ + {{{1, 0x3f00000, 0x3f01000, 0x168000} } } /* 63: P2NR0 */ +}; + +/* + * top 12 bits of crb internal address (hub, agent) + */ +static const unsigned crb_hub_agt[64] = { + 0, + QLCNIC_HW_CRB_HUB_AGT_ADR_PS, + QLCNIC_HW_CRB_HUB_AGT_ADR_MN, + QLCNIC_HW_CRB_HUB_AGT_ADR_MS, + 0, + QLCNIC_HW_CRB_HUB_AGT_ADR_SRE, + QLCNIC_HW_CRB_HUB_AGT_ADR_NIU, + QLCNIC_HW_CRB_HUB_AGT_ADR_QMN, + QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0, + QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1, + QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2, + QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3, + QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q, + QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR, + QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB, + QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4, + QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA, + QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0, + QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1, + QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2, + QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3, + QLCNIC_HW_CRB_HUB_AGT_ADR_PGND, + QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI, + QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0, + QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1, + QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2, + QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3, + 0, + QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI, + QLCNIC_HW_CRB_HUB_AGT_ADR_SN, + 0, + QLCNIC_HW_CRB_HUB_AGT_ADR_EG, + 0, + QLCNIC_HW_CRB_HUB_AGT_ADR_PS, + QLCNIC_HW_CRB_HUB_AGT_ADR_CAM, + 0, + 0, + 0, + 0, + 0, + QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR, + 0, + QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1, + QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2, + QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3, + QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4, + QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5, + QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6, + QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7, + QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA, + QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q, + QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB, + 0, + QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0, + QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8, + QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9, + QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0, + 0, + QLCNIC_HW_CRB_HUB_AGT_ADR_SMB, + QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0, + QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1, + 0, + QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC, + 0, +}; + +/* PCI Windowing for DDR regions. */ + +#define QLCNIC_PCIE_SEM_TIMEOUT 10000 + +int +qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg) +{ + int done = 0, timeout = 0; + + while (!done) { + done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem))); + if (done == 1) + break; + if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) { + dev_err(&adapter->pdev->dev, + "Failed to acquire sem=%d lock; holdby=%d\n", + sem, id_reg ? QLCRD32(adapter, id_reg) : -1); + return -EIO; + } + msleep(1); + } + + if (id_reg) + QLCWR32(adapter, id_reg, adapter->portnum); + + return 0; +} + +void +qlcnic_pcie_sem_unlock(struct qlcnic_adapter *adapter, int sem) +{ + QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem))); +} + +static int +qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter, + struct cmd_desc_type0 *cmd_desc_arr, int nr_desc) +{ + u32 i, producer, consumer; + struct qlcnic_cmd_buffer *pbuf; + struct cmd_desc_type0 *cmd_desc; + struct qlcnic_host_tx_ring *tx_ring; + + i = 0; + + if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) + return -EIO; + + tx_ring = adapter->tx_ring; + __netif_tx_lock_bh(tx_ring->txq); + + producer = tx_ring->producer; + consumer = tx_ring->sw_consumer; + + if (nr_desc >= qlcnic_tx_avail(tx_ring)) { + netif_tx_stop_queue(tx_ring->txq); + smp_mb(); + if (qlcnic_tx_avail(tx_ring) > nr_desc) { + if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) + netif_tx_wake_queue(tx_ring->txq); + } else { + adapter->stats.xmit_off++; + __netif_tx_unlock_bh(tx_ring->txq); + return -EBUSY; + } + } + + do { + cmd_desc = &cmd_desc_arr[i]; + + pbuf = &tx_ring->cmd_buf_arr[producer]; + pbuf->skb = NULL; + pbuf->frag_count = 0; + + memcpy(&tx_ring->desc_head[producer], + &cmd_desc_arr[i], sizeof(struct cmd_desc_type0)); + + producer = get_next_index(producer, tx_ring->num_desc); + i++; + + } while (i != nr_desc); + + tx_ring->producer = producer; + + qlcnic_update_cmd_producer(adapter, tx_ring); + + __netif_tx_unlock_bh(tx_ring->txq); + + return 0; +} + +static int +qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr, + __le16 vlan_id, unsigned op) +{ + struct qlcnic_nic_req req; + struct qlcnic_mac_req *mac_req; + struct qlcnic_vlan_req *vlan_req; + u64 word; + + memset(&req, 0, sizeof(struct qlcnic_nic_req)); + req.qhdr = cpu_to_le64(QLCNIC_REQUEST << 23); + + word = QLCNIC_MAC_EVENT | ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word); + + mac_req = (struct qlcnic_mac_req *)&req.words[0]; + mac_req->op = op; + memcpy(mac_req->mac_addr, addr, 6); + + vlan_req = (struct qlcnic_vlan_req *)&req.words[1]; + vlan_req->vlan_id = vlan_id; + + return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); +} + +static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr) +{ + struct list_head *head; + struct qlcnic_mac_list_s *cur; + + /* look up if already exists */ + list_for_each(head, &adapter->mac_list) { + cur = list_entry(head, struct qlcnic_mac_list_s, list); + if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0) + return 0; + } + + cur = kzalloc(sizeof(struct qlcnic_mac_list_s), GFP_ATOMIC); + if (cur == NULL) { + dev_err(&adapter->netdev->dev, + "failed to add mac address filter\n"); + return -ENOMEM; + } + memcpy(cur->mac_addr, addr, ETH_ALEN); + + if (qlcnic_sre_macaddr_change(adapter, + cur->mac_addr, 0, QLCNIC_MAC_ADD)) { + kfree(cur); + return -EIO; + } + + list_add_tail(&cur->list, &adapter->mac_list); + return 0; +} + +void qlcnic_set_multi(struct net_device *netdev) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct netdev_hw_addr *ha; + static const u8 bcast_addr[ETH_ALEN] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff + }; + u32 mode = VPORT_MISS_MODE_DROP; + + if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) + return; + + qlcnic_nic_add_mac(adapter, adapter->mac_addr); + qlcnic_nic_add_mac(adapter, bcast_addr); + + if (netdev->flags & IFF_PROMISC) { + if (!(adapter->flags & QLCNIC_PROMISC_DISABLED)) + mode = VPORT_MISS_MODE_ACCEPT_ALL; + goto send_fw_cmd; + } + + if ((netdev->flags & IFF_ALLMULTI) || + (netdev_mc_count(netdev) > adapter->max_mc_count)) { + mode = VPORT_MISS_MODE_ACCEPT_MULTI; + goto send_fw_cmd; + } + + if (!netdev_mc_empty(netdev)) { + netdev_for_each_mc_addr(ha, netdev) { + qlcnic_nic_add_mac(adapter, ha->addr); + } + } + +send_fw_cmd: + if (mode == VPORT_MISS_MODE_ACCEPT_ALL) { + qlcnic_alloc_lb_filters_mem(adapter); + adapter->mac_learn = 1; + } else { + adapter->mac_learn = 0; + } + + qlcnic_nic_set_promisc(adapter, mode); +} + +int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode) +{ + struct qlcnic_nic_req req; + u64 word; + + memset(&req, 0, sizeof(struct qlcnic_nic_req)); + + req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); + + word = QLCNIC_H2C_OPCODE_SET_MAC_RECEIVE_MODE | + ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word); + + req.words[0] = cpu_to_le64(mode); + + return qlcnic_send_cmd_descs(adapter, + (struct cmd_desc_type0 *)&req, 1); +} + +void qlcnic_free_mac_list(struct qlcnic_adapter *adapter) +{ + struct qlcnic_mac_list_s *cur; + struct list_head *head = &adapter->mac_list; + + while (!list_empty(head)) { + cur = list_entry(head->next, struct qlcnic_mac_list_s, list); + qlcnic_sre_macaddr_change(adapter, + cur->mac_addr, 0, QLCNIC_MAC_DEL); + list_del(&cur->list); + kfree(cur); + } +} + +void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter) +{ + struct qlcnic_filter *tmp_fil; + struct hlist_node *tmp_hnode, *n; + struct hlist_head *head; + int i; + + for (i = 0; i < adapter->fhash.fmax; i++) { + head = &(adapter->fhash.fhead[i]); + + hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) + { + if (jiffies > + (QLCNIC_FILTER_AGE * HZ + tmp_fil->ftime)) { + qlcnic_sre_macaddr_change(adapter, + tmp_fil->faddr, tmp_fil->vlan_id, + tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL : + QLCNIC_MAC_DEL); + spin_lock_bh(&adapter->mac_learn_lock); + adapter->fhash.fnum--; + hlist_del(&tmp_fil->fnode); + spin_unlock_bh(&adapter->mac_learn_lock); + kfree(tmp_fil); + } + } + } +} + +void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter) +{ + struct qlcnic_filter *tmp_fil; + struct hlist_node *tmp_hnode, *n; + struct hlist_head *head; + int i; + + for (i = 0; i < adapter->fhash.fmax; i++) { + head = &(adapter->fhash.fhead[i]); + + hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) { + qlcnic_sre_macaddr_change(adapter, tmp_fil->faddr, + tmp_fil->vlan_id, tmp_fil->vlan_id ? + QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL); + spin_lock_bh(&adapter->mac_learn_lock); + adapter->fhash.fnum--; + hlist_del(&tmp_fil->fnode); + spin_unlock_bh(&adapter->mac_learn_lock); + kfree(tmp_fil); + } + } +} + +int qlcnic_set_fw_loopback(struct qlcnic_adapter *adapter, u8 flag) +{ + struct qlcnic_nic_req req; + int rv; + + memset(&req, 0, sizeof(struct qlcnic_nic_req)); + + req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); + req.req_hdr = cpu_to_le64(QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK | + ((u64) adapter->portnum << 16) | ((u64) 0x1 << 32)); + + req.words[0] = cpu_to_le64(flag); + + rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv != 0) + dev_err(&adapter->pdev->dev, "%sting loopback mode failed\n", + flag ? "Set" : "Reset"); + return rv; +} + +int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode) +{ + if (qlcnic_set_fw_loopback(adapter, mode)) + return -EIO; + + if (qlcnic_nic_set_promisc(adapter, VPORT_MISS_MODE_ACCEPT_ALL)) { + qlcnic_set_fw_loopback(adapter, mode); + return -EIO; + } + + msleep(1000); + return 0; +} + +void qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter) +{ + int mode = VPORT_MISS_MODE_DROP; + struct net_device *netdev = adapter->netdev; + + qlcnic_set_fw_loopback(adapter, 0); + + if (netdev->flags & IFF_PROMISC) + mode = VPORT_MISS_MODE_ACCEPT_ALL; + else if (netdev->flags & IFF_ALLMULTI) + mode = VPORT_MISS_MODE_ACCEPT_MULTI; + + qlcnic_nic_set_promisc(adapter, mode); + msleep(1000); +} + +/* + * Send the interrupt coalescing parameter set by ethtool to the card. + */ +int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter) +{ + struct qlcnic_nic_req req; + int rv; + + memset(&req, 0, sizeof(struct qlcnic_nic_req)); + + req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); + + req.req_hdr = cpu_to_le64(QLCNIC_CONFIG_INTR_COALESCE | + ((u64) adapter->portnum << 16)); + + req.words[0] = cpu_to_le64(((u64) adapter->ahw->coal.flag) << 32); + req.words[2] = cpu_to_le64(adapter->ahw->coal.rx_packets | + ((u64) adapter->ahw->coal.rx_time_us) << 16); + req.words[5] = cpu_to_le64(adapter->ahw->coal.timer_out | + ((u64) adapter->ahw->coal.type) << 32 | + ((u64) adapter->ahw->coal.sts_ring_mask) << 40); + rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv != 0) + dev_err(&adapter->netdev->dev, + "Could not send interrupt coalescing parameters\n"); + return rv; +} + +int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable) +{ + struct qlcnic_nic_req req; + u64 word; + int rv; + + if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) + return 0; + + memset(&req, 0, sizeof(struct qlcnic_nic_req)); + + req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); + + word = QLCNIC_H2C_OPCODE_CONFIG_HW_LRO | ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word); + + req.words[0] = cpu_to_le64(enable); + + rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv != 0) + dev_err(&adapter->netdev->dev, + "Could not send configure hw lro request\n"); + + return rv; +} + +int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable) +{ + struct qlcnic_nic_req req; + u64 word; + int rv; + + if (!!(adapter->flags & QLCNIC_BRIDGE_ENABLED) == enable) + return 0; + + memset(&req, 0, sizeof(struct qlcnic_nic_req)); + + req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); + + word = QLCNIC_H2C_OPCODE_CONFIG_BRIDGING | + ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word); + + req.words[0] = cpu_to_le64(enable); + + rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv != 0) + dev_err(&adapter->netdev->dev, + "Could not send configure bridge mode request\n"); + + adapter->flags ^= QLCNIC_BRIDGE_ENABLED; + + return rv; +} + + +#define RSS_HASHTYPE_IP_TCP 0x3 + +int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable) +{ + struct qlcnic_nic_req req; + u64 word; + int i, rv; + + static const u64 key[] = { + 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL, + 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL, + 0x255b0ec26d5a56daULL + }; + + memset(&req, 0, sizeof(struct qlcnic_nic_req)); + req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); + + word = QLCNIC_H2C_OPCODE_CONFIG_RSS | ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word); + + /* + * RSS request: + * bits 3-0: hash_method + * 5-4: hash_type_ipv4 + * 7-6: hash_type_ipv6 + * 8: enable + * 9: use indirection table + * 47-10: reserved + * 63-48: indirection table mask + */ + word = ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) | + ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) | + ((u64)(enable & 0x1) << 8) | + ((0x7ULL) << 48); + req.words[0] = cpu_to_le64(word); + for (i = 0; i < 5; i++) + req.words[i+1] = cpu_to_le64(key[i]); + + rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv != 0) + dev_err(&adapter->netdev->dev, "could not configure RSS\n"); + + return rv; +} + +int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, int cmd) +{ + struct qlcnic_nic_req req; + struct qlcnic_ipaddr *ipa; + u64 word; + int rv; + + memset(&req, 0, sizeof(struct qlcnic_nic_req)); + req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); + + word = QLCNIC_H2C_OPCODE_CONFIG_IPADDR | ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word); + + req.words[0] = cpu_to_le64(cmd); + ipa = (struct qlcnic_ipaddr *)&req.words[1]; + ipa->ipv4 = ip; + + rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv != 0) + dev_err(&adapter->netdev->dev, + "could not notify %s IP 0x%x reuqest\n", + (cmd == QLCNIC_IP_UP) ? "Add" : "Remove", ip); + + return rv; +} + +int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable) +{ + struct qlcnic_nic_req req; + u64 word; + int rv; + + memset(&req, 0, sizeof(struct qlcnic_nic_req)); + req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); + + word = QLCNIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word); + req.words[0] = cpu_to_le64(enable | (enable << 8)); + + rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv != 0) + dev_err(&adapter->netdev->dev, + "could not configure link notification\n"); + + return rv; +} + +int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter) +{ + struct qlcnic_nic_req req; + u64 word; + int rv; + + if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) + return 0; + + memset(&req, 0, sizeof(struct qlcnic_nic_req)); + req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); + + word = QLCNIC_H2C_OPCODE_LRO_REQUEST | + ((u64)adapter->portnum << 16) | + ((u64)QLCNIC_LRO_REQUEST_CLEANUP << 56) ; + + req.req_hdr = cpu_to_le64(word); + + rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv != 0) + dev_err(&adapter->netdev->dev, + "could not cleanup lro flows\n"); + + return rv; +} + +/* + * qlcnic_change_mtu - Change the Maximum Transfer Unit + * @returns 0 on success, negative on failure + */ + +int qlcnic_change_mtu(struct net_device *netdev, int mtu) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + int rc = 0; + + if (mtu < P3P_MIN_MTU || mtu > P3P_MAX_MTU) { + dev_err(&adapter->netdev->dev, "%d bytes < mtu < %d bytes" + " not supported\n", P3P_MAX_MTU, P3P_MIN_MTU); + return -EINVAL; + } + + rc = qlcnic_fw_cmd_set_mtu(adapter, mtu); + + if (!rc) + netdev->mtu = mtu; + + return rc; +} + + +u32 qlcnic_fix_features(struct net_device *netdev, u32 features) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + + if ((adapter->flags & QLCNIC_ESWITCH_ENABLED)) { + u32 changed = features ^ netdev->features; + features ^= changed & (NETIF_F_ALL_CSUM | NETIF_F_RXCSUM); + } + + if (!(features & NETIF_F_RXCSUM)) + features &= ~NETIF_F_LRO; + + return features; +} + + +int qlcnic_set_features(struct net_device *netdev, u32 features) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + u32 changed = netdev->features ^ features; + int hw_lro = (features & NETIF_F_LRO) ? QLCNIC_LRO_ENABLED : 0; + + if (!(changed & NETIF_F_LRO)) + return 0; + + netdev->features = features ^ NETIF_F_LRO; + + if (qlcnic_config_hw_lro(adapter, hw_lro)) + return -EIO; + + if ((hw_lro == 0) && qlcnic_send_lro_cleanup(adapter)) + return -EIO; + + return 0; +} + +/* + * Changes the CRB window to the specified window. + */ + /* Returns < 0 if off is not valid, + * 1 if window access is needed. 'off' is set to offset from + * CRB space in 128M pci map + * 0 if no window access is needed. 'off' is set to 2M addr + * In: 'off' is offset from base in 128M pci map + */ +static int +qlcnic_pci_get_crb_addr_2M(struct qlcnic_adapter *adapter, + ulong off, void __iomem **addr) +{ + const struct crb_128M_2M_sub_block_map *m; + + if ((off >= QLCNIC_CRB_MAX) || (off < QLCNIC_PCI_CRBSPACE)) + return -EINVAL; + + off -= QLCNIC_PCI_CRBSPACE; + + /* + * Try direct map + */ + m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)]; + + if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) { + *addr = adapter->ahw->pci_base0 + m->start_2M + + (off - m->start_128M); + return 0; + } + + /* + * Not in direct map, use crb window + */ + *addr = adapter->ahw->pci_base0 + CRB_INDIRECT_2M + (off & MASK(16)); + return 1; +} + +/* + * In: 'off' is offset from CRB space in 128M pci map + * Out: 'off' is 2M pci map addr + * side effect: lock crb window + */ +static int +qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off) +{ + u32 window; + void __iomem *addr = adapter->ahw->pci_base0 + CRB_WINDOW_2M; + + off -= QLCNIC_PCI_CRBSPACE; + + window = CRB_HI(off); + if (window == 0) { + dev_err(&adapter->pdev->dev, "Invalid offset 0x%lx\n", off); + return -EIO; + } + + writel(window, addr); + if (readl(addr) != window) { + if (printk_ratelimit()) + dev_warn(&adapter->pdev->dev, + "failed to set CRB window to %d off 0x%lx\n", + window, off); + return -EIO; + } + return 0; +} + +int +qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, u32 data) +{ + unsigned long flags; + int rv; + void __iomem *addr = NULL; + + rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr); + + if (rv == 0) { + writel(data, addr); + return 0; + } + + if (rv > 0) { + /* indirect access */ + write_lock_irqsave(&adapter->ahw->crb_lock, flags); + crb_win_lock(adapter); + rv = qlcnic_pci_set_crbwindow_2M(adapter, off); + if (!rv) + writel(data, addr); + crb_win_unlock(adapter); + write_unlock_irqrestore(&adapter->ahw->crb_lock, flags); + return rv; + } + + dev_err(&adapter->pdev->dev, + "%s: invalid offset: 0x%016lx\n", __func__, off); + dump_stack(); + return -EIO; +} + +u32 +qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off) +{ + unsigned long flags; + int rv; + u32 data = -1; + void __iomem *addr = NULL; + + rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr); + + if (rv == 0) + return readl(addr); + + if (rv > 0) { + /* indirect access */ + write_lock_irqsave(&adapter->ahw->crb_lock, flags); + crb_win_lock(adapter); + if (!qlcnic_pci_set_crbwindow_2M(adapter, off)) + data = readl(addr); + crb_win_unlock(adapter); + write_unlock_irqrestore(&adapter->ahw->crb_lock, flags); + return data; + } + + dev_err(&adapter->pdev->dev, + "%s: invalid offset: 0x%016lx\n", __func__, off); + dump_stack(); + return -1; +} + + +void __iomem * +qlcnic_get_ioaddr(struct qlcnic_adapter *adapter, u32 offset) +{ + void __iomem *addr = NULL; + + WARN_ON(qlcnic_pci_get_crb_addr_2M(adapter, offset, &addr)); + + return addr; +} + + +static int +qlcnic_pci_set_window_2M(struct qlcnic_adapter *adapter, + u64 addr, u32 *start) +{ + u32 window; + + window = OCM_WIN_P3P(addr); + + writel(window, adapter->ahw->ocm_win_crb); + /* read back to flush */ + readl(adapter->ahw->ocm_win_crb); + + *start = QLCNIC_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr); + return 0; +} + +static int +qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off, + u64 *data, int op) +{ + void __iomem *addr; + int ret; + u32 start; + + mutex_lock(&adapter->ahw->mem_lock); + + ret = qlcnic_pci_set_window_2M(adapter, off, &start); + if (ret != 0) + goto unlock; + + addr = adapter->ahw->pci_base0 + start; + + if (op == 0) /* read */ + *data = readq(addr); + else /* write */ + writeq(*data, addr); + +unlock: + mutex_unlock(&adapter->ahw->mem_lock); + + return ret; +} + +void +qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data) +{ + void __iomem *addr = adapter->ahw->pci_base0 + + QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM); + + mutex_lock(&adapter->ahw->mem_lock); + *data = readq(addr); + mutex_unlock(&adapter->ahw->mem_lock); +} + +void +qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *adapter, u64 off, u64 data) +{ + void __iomem *addr = adapter->ahw->pci_base0 + + QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM); + + mutex_lock(&adapter->ahw->mem_lock); + writeq(data, addr); + mutex_unlock(&adapter->ahw->mem_lock); +} + +#define MAX_CTL_CHECK 1000 + +int +qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter, + u64 off, u64 data) +{ + int i, j, ret; + u32 temp, off8; + void __iomem *mem_crb; + + /* Only 64-bit aligned access */ + if (off & 7) + return -EIO; + + /* P3 onward, test agent base for MIU and SIU is same */ + if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET, + QLCNIC_ADDR_QDR_NET_MAX)) { + mem_crb = qlcnic_get_ioaddr(adapter, + QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE); + goto correct; + } + + if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) { + mem_crb = qlcnic_get_ioaddr(adapter, + QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE); + goto correct; + } + + if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX)) + return qlcnic_pci_mem_access_direct(adapter, off, &data, 1); + + return -EIO; + +correct: + off8 = off & ~0xf; + + mutex_lock(&adapter->ahw->mem_lock); + + writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO)); + writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI)); + + i = 0; + writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL)); + writel((TA_CTL_START | TA_CTL_ENABLE), + (mem_crb + TEST_AGT_CTRL)); + + for (j = 0; j < MAX_CTL_CHECK; j++) { + temp = readl(mem_crb + TEST_AGT_CTRL); + if ((temp & TA_CTL_BUSY) == 0) + break; + } + + if (j >= MAX_CTL_CHECK) { + ret = -EIO; + goto done; + } + + i = (off & 0xf) ? 0 : 2; + writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i)), + mem_crb + MIU_TEST_AGT_WRDATA(i)); + writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i+1)), + mem_crb + MIU_TEST_AGT_WRDATA(i+1)); + i = (off & 0xf) ? 2 : 0; + + writel(data & 0xffffffff, + mem_crb + MIU_TEST_AGT_WRDATA(i)); + writel((data >> 32) & 0xffffffff, + mem_crb + MIU_TEST_AGT_WRDATA(i+1)); + + writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL)); + writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE), + (mem_crb + TEST_AGT_CTRL)); + + for (j = 0; j < MAX_CTL_CHECK; j++) { + temp = readl(mem_crb + TEST_AGT_CTRL); + if ((temp & TA_CTL_BUSY) == 0) + break; + } + + if (j >= MAX_CTL_CHECK) { + if (printk_ratelimit()) + dev_err(&adapter->pdev->dev, + "failed to write through agent\n"); + ret = -EIO; + } else + ret = 0; + +done: + mutex_unlock(&adapter->ahw->mem_lock); + + return ret; +} + +int +qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter, + u64 off, u64 *data) +{ + int j, ret; + u32 temp, off8; + u64 val; + void __iomem *mem_crb; + + /* Only 64-bit aligned access */ + if (off & 7) + return -EIO; + + /* P3 onward, test agent base for MIU and SIU is same */ + if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET, + QLCNIC_ADDR_QDR_NET_MAX)) { + mem_crb = qlcnic_get_ioaddr(adapter, + QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE); + goto correct; + } + + if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) { + mem_crb = qlcnic_get_ioaddr(adapter, + QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE); + goto correct; + } + + if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX)) { + return qlcnic_pci_mem_access_direct(adapter, + off, data, 0); + } + + return -EIO; + +correct: + off8 = off & ~0xf; + + mutex_lock(&adapter->ahw->mem_lock); + + writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO)); + writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI)); + writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL)); + writel((TA_CTL_START | TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL)); + + for (j = 0; j < MAX_CTL_CHECK; j++) { + temp = readl(mem_crb + TEST_AGT_CTRL); + if ((temp & TA_CTL_BUSY) == 0) + break; + } + + if (j >= MAX_CTL_CHECK) { + if (printk_ratelimit()) + dev_err(&adapter->pdev->dev, + "failed to read through agent\n"); + ret = -EIO; + } else { + off8 = MIU_TEST_AGT_RDDATA_LO; + if (off & 0xf) + off8 = MIU_TEST_AGT_RDDATA_UPPER_LO; + + temp = readl(mem_crb + off8 + 4); + val = (u64)temp << 32; + val |= readl(mem_crb + off8); + *data = val; + ret = 0; + } + + mutex_unlock(&adapter->ahw->mem_lock); + + return ret; +} + +int qlcnic_get_board_info(struct qlcnic_adapter *adapter) +{ + int offset, board_type, magic; + struct pci_dev *pdev = adapter->pdev; + + offset = QLCNIC_FW_MAGIC_OFFSET; + if (qlcnic_rom_fast_read(adapter, offset, &magic)) + return -EIO; + + if (magic != QLCNIC_BDINFO_MAGIC) { + dev_err(&pdev->dev, "invalid board config, magic=%08x\n", + magic); + return -EIO; + } + + offset = QLCNIC_BRDTYPE_OFFSET; + if (qlcnic_rom_fast_read(adapter, offset, &board_type)) + return -EIO; + + adapter->ahw->board_type = board_type; + + if (board_type == QLCNIC_BRDTYPE_P3P_4_GB_MM) { + u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I); + if ((gpio & 0x8000) == 0) + board_type = QLCNIC_BRDTYPE_P3P_10G_TP; + } + + switch (board_type) { + case QLCNIC_BRDTYPE_P3P_HMEZ: + case QLCNIC_BRDTYPE_P3P_XG_LOM: + case QLCNIC_BRDTYPE_P3P_10G_CX4: + case QLCNIC_BRDTYPE_P3P_10G_CX4_LP: + case QLCNIC_BRDTYPE_P3P_IMEZ: + case QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS: + case QLCNIC_BRDTYPE_P3P_10G_SFP_CT: + case QLCNIC_BRDTYPE_P3P_10G_SFP_QT: + case QLCNIC_BRDTYPE_P3P_10G_XFP: + case QLCNIC_BRDTYPE_P3P_10000_BASE_T: + adapter->ahw->port_type = QLCNIC_XGBE; + break; + case QLCNIC_BRDTYPE_P3P_REF_QG: + case QLCNIC_BRDTYPE_P3P_4_GB: + case QLCNIC_BRDTYPE_P3P_4_GB_MM: + adapter->ahw->port_type = QLCNIC_GBE; + break; + case QLCNIC_BRDTYPE_P3P_10G_TP: + adapter->ahw->port_type = (adapter->portnum < 2) ? + QLCNIC_XGBE : QLCNIC_GBE; + break; + default: + dev_err(&pdev->dev, "unknown board type %x\n", board_type); + adapter->ahw->port_type = QLCNIC_XGBE; + break; + } + + return 0; +} + +int +qlcnic_wol_supported(struct qlcnic_adapter *adapter) +{ + u32 wol_cfg; + + wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV); + if (wol_cfg & (1UL << adapter->portnum)) { + wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG); + if (wol_cfg & (1 << adapter->portnum)) + return 1; + } + + return 0; +} + +int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate) +{ + struct qlcnic_nic_req req; + int rv; + u64 word; + + memset(&req, 0, sizeof(struct qlcnic_nic_req)); + req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); + + word = QLCNIC_H2C_OPCODE_CONFIG_LED | ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word); + + req.words[0] = cpu_to_le64((u64)rate << 32); + req.words[1] = cpu_to_le64(state); + + rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv) + dev_err(&adapter->pdev->dev, "LED configuration failed.\n"); + + return rv; +} + +/* FW dump related functions */ +static u32 +qlcnic_dump_crb(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, + u32 *buffer) +{ + int i; + u32 addr, data; + struct __crb *crb = &entry->region.crb; + void __iomem *base = adapter->ahw->pci_base0; + + addr = crb->addr; + + for (i = 0; i < crb->no_ops; i++) { + QLCNIC_RD_DUMP_REG(addr, base, &data); + *buffer++ = cpu_to_le32(addr); + *buffer++ = cpu_to_le32(data); + addr += crb->stride; + } + return crb->no_ops * 2 * sizeof(u32); +} + +static u32 +qlcnic_dump_ctrl(struct qlcnic_adapter *adapter, + struct qlcnic_dump_entry *entry, u32 *buffer) +{ + int i, k, timeout = 0; + void __iomem *base = adapter->ahw->pci_base0; + u32 addr, data; + u8 opcode, no_ops; + struct __ctrl *ctr = &entry->region.ctrl; + struct qlcnic_dump_template_hdr *t_hdr = adapter->ahw->fw_dump.tmpl_hdr; + + addr = ctr->addr; + no_ops = ctr->no_ops; + + for (i = 0; i < no_ops; i++) { + k = 0; + opcode = 0; + for (k = 0; k < 8; k++) { + if (!(ctr->opcode & (1 << k))) + continue; + switch (1 << k) { + case QLCNIC_DUMP_WCRB: + QLCNIC_WR_DUMP_REG(addr, base, ctr->val1); + break; + case QLCNIC_DUMP_RWCRB: + QLCNIC_RD_DUMP_REG(addr, base, &data); + QLCNIC_WR_DUMP_REG(addr, base, data); + break; + case QLCNIC_DUMP_ANDCRB: + QLCNIC_RD_DUMP_REG(addr, base, &data); + QLCNIC_WR_DUMP_REG(addr, base, + (data & ctr->val2)); + break; + case QLCNIC_DUMP_ORCRB: + QLCNIC_RD_DUMP_REG(addr, base, &data); + QLCNIC_WR_DUMP_REG(addr, base, + (data | ctr->val3)); + break; + case QLCNIC_DUMP_POLLCRB: + while (timeout <= ctr->timeout) { + QLCNIC_RD_DUMP_REG(addr, base, &data); + if ((data & ctr->val2) == ctr->val1) + break; + msleep(1); + timeout++; + } + if (timeout > ctr->timeout) { + dev_info(&adapter->pdev->dev, + "Timed out, aborting poll CRB\n"); + return -EINVAL; + } + break; + case QLCNIC_DUMP_RD_SAVE: + if (ctr->index_a) + addr = t_hdr->saved_state[ctr->index_a]; + QLCNIC_RD_DUMP_REG(addr, base, &data); + t_hdr->saved_state[ctr->index_v] = data; + break; + case QLCNIC_DUMP_WRT_SAVED: + if (ctr->index_v) + data = t_hdr->saved_state[ctr->index_v]; + else + data = ctr->val1; + if (ctr->index_a) + addr = t_hdr->saved_state[ctr->index_a]; + QLCNIC_WR_DUMP_REG(addr, base, data); + break; + case QLCNIC_DUMP_MOD_SAVE_ST: + data = t_hdr->saved_state[ctr->index_v]; + data <<= ctr->shl_val; + data >>= ctr->shr_val; + if (ctr->val2) + data &= ctr->val2; + data |= ctr->val3; + data += ctr->val1; + t_hdr->saved_state[ctr->index_v] = data; + break; + default: + dev_info(&adapter->pdev->dev, + "Unknown opcode\n"); + break; + } + } + addr += ctr->stride; + } + return 0; +} + +static u32 +qlcnic_dump_mux(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, + u32 *buffer) +{ + int loop; + u32 val, data = 0; + struct __mux *mux = &entry->region.mux; + void __iomem *base = adapter->ahw->pci_base0; + + val = mux->val; + for (loop = 0; loop < mux->no_ops; loop++) { + QLCNIC_WR_DUMP_REG(mux->addr, base, val); + QLCNIC_RD_DUMP_REG(mux->read_addr, base, &data); + *buffer++ = cpu_to_le32(val); + *buffer++ = cpu_to_le32(data); + val += mux->val_stride; + } + return 2 * mux->no_ops * sizeof(u32); +} + +static u32 +qlcnic_dump_que(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, + u32 *buffer) +{ + int i, loop; + u32 cnt, addr, data, que_id = 0; + void __iomem *base = adapter->ahw->pci_base0; + struct __queue *que = &entry->region.que; + + addr = que->read_addr; + cnt = que->read_addr_cnt; + + for (loop = 0; loop < que->no_ops; loop++) { + QLCNIC_WR_DUMP_REG(que->sel_addr, base, que_id); + addr = que->read_addr; + for (i = 0; i < cnt; i++) { + QLCNIC_RD_DUMP_REG(addr, base, &data); + *buffer++ = cpu_to_le32(data); + addr += que->read_addr_stride; + } + que_id += que->stride; + } + return que->no_ops * cnt * sizeof(u32); +} + +static u32 +qlcnic_dump_ocm(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, + u32 *buffer) +{ + int i; + u32 data; + void __iomem *addr; + struct __ocm *ocm = &entry->region.ocm; + + addr = adapter->ahw->pci_base0 + ocm->read_addr; + for (i = 0; i < ocm->no_ops; i++) { + data = readl(addr); + *buffer++ = cpu_to_le32(data); + addr += ocm->read_addr_stride; + } + return ocm->no_ops * sizeof(u32); +} + +static u32 +qlcnic_read_rom(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, + u32 *buffer) +{ + int i, count = 0; + u32 fl_addr, size, val, lck_val, addr; + struct __mem *rom = &entry->region.mem; + void __iomem *base = adapter->ahw->pci_base0; + + fl_addr = rom->addr; + size = rom->size/4; +lock_try: + lck_val = readl(base + QLCNIC_FLASH_SEM2_LK); + if (!lck_val && count < MAX_CTL_CHECK) { + msleep(10); + count++; + goto lock_try; + } + writel(adapter->ahw->pci_func, (base + QLCNIC_FLASH_LOCK_ID)); + for (i = 0; i < size; i++) { + addr = fl_addr & 0xFFFF0000; + QLCNIC_WR_DUMP_REG(FLASH_ROM_WINDOW, base, addr); + addr = LSW(fl_addr) + FLASH_ROM_DATA; + QLCNIC_RD_DUMP_REG(addr, base, &val); + fl_addr += 4; + *buffer++ = cpu_to_le32(val); + } + readl(base + QLCNIC_FLASH_SEM2_ULK); + return rom->size; +} + +static u32 +qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter, + struct qlcnic_dump_entry *entry, u32 *buffer) +{ + int i; + u32 cnt, val, data, addr; + void __iomem *base = adapter->ahw->pci_base0; + struct __cache *l1 = &entry->region.cache; + + val = l1->init_tag_val; + + for (i = 0; i < l1->no_ops; i++) { + QLCNIC_WR_DUMP_REG(l1->addr, base, val); + QLCNIC_WR_DUMP_REG(l1->ctrl_addr, base, LSW(l1->ctrl_val)); + addr = l1->read_addr; + cnt = l1->read_addr_num; + while (cnt) { + QLCNIC_RD_DUMP_REG(addr, base, &data); + *buffer++ = cpu_to_le32(data); + addr += l1->read_addr_stride; + cnt--; + } + val += l1->stride; + } + return l1->no_ops * l1->read_addr_num * sizeof(u32); +} + +static u32 +qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter, + struct qlcnic_dump_entry *entry, u32 *buffer) +{ + int i; + u32 cnt, val, data, addr; + u8 poll_mask, poll_to, time_out = 0; + void __iomem *base = adapter->ahw->pci_base0; + struct __cache *l2 = &entry->region.cache; + + val = l2->init_tag_val; + poll_mask = LSB(MSW(l2->ctrl_val)); + poll_to = MSB(MSW(l2->ctrl_val)); + + for (i = 0; i < l2->no_ops; i++) { + QLCNIC_WR_DUMP_REG(l2->addr, base, val); + if (LSW(l2->ctrl_val)) + QLCNIC_WR_DUMP_REG(l2->ctrl_addr, base, + LSW(l2->ctrl_val)); + if (!poll_mask) + goto skip_poll; + do { + QLCNIC_RD_DUMP_REG(l2->ctrl_addr, base, &data); + if (!(data & poll_mask)) + break; + msleep(1); + time_out++; + } while (time_out <= poll_to); + + if (time_out > poll_to) { + dev_err(&adapter->pdev->dev, + "Timeout exceeded in %s, aborting dump\n", + __func__); + return -EINVAL; + } +skip_poll: + addr = l2->read_addr; + cnt = l2->read_addr_num; + while (cnt) { + QLCNIC_RD_DUMP_REG(addr, base, &data); + *buffer++ = cpu_to_le32(data); + addr += l2->read_addr_stride; + cnt--; + } + val += l2->stride; + } + return l2->no_ops * l2->read_addr_num * sizeof(u32); +} + +static u32 +qlcnic_read_memory(struct qlcnic_adapter *adapter, + struct qlcnic_dump_entry *entry, u32 *buffer) +{ + u32 addr, data, test, ret = 0; + int i, reg_read; + struct __mem *mem = &entry->region.mem; + void __iomem *base = adapter->ahw->pci_base0; + + reg_read = mem->size; + addr = mem->addr; + /* check for data size of multiple of 16 and 16 byte alignment */ + if ((addr & 0xf) || (reg_read%16)) { + dev_info(&adapter->pdev->dev, + "Unaligned memory addr:0x%x size:0x%x\n", + addr, reg_read); + return -EINVAL; + } + + mutex_lock(&adapter->ahw->mem_lock); + + while (reg_read != 0) { + QLCNIC_WR_DUMP_REG(MIU_TEST_ADDR_LO, base, addr); + QLCNIC_WR_DUMP_REG(MIU_TEST_ADDR_HI, base, 0); + QLCNIC_WR_DUMP_REG(MIU_TEST_CTR, base, + TA_CTL_ENABLE | TA_CTL_START); + + for (i = 0; i < MAX_CTL_CHECK; i++) { + QLCNIC_RD_DUMP_REG(MIU_TEST_CTR, base, &test); + if (!(test & TA_CTL_BUSY)) + break; + } + if (i == MAX_CTL_CHECK) { + if (printk_ratelimit()) { + dev_err(&adapter->pdev->dev, + "failed to read through agent\n"); + ret = -EINVAL; + goto out; + } + } + for (i = 0; i < 4; i++) { + QLCNIC_RD_DUMP_REG(MIU_TEST_READ_DATA[i], base, &data); + *buffer++ = cpu_to_le32(data); + } + addr += 16; + reg_read -= 16; + ret += 16; + } +out: + mutex_unlock(&adapter->ahw->mem_lock); + return mem->size; +} + +static u32 +qlcnic_dump_nop(struct qlcnic_adapter *adapter, + struct qlcnic_dump_entry *entry, u32 *buffer) +{ + entry->hdr.flags |= QLCNIC_DUMP_SKIP; + return 0; +} + +struct qlcnic_dump_operations fw_dump_ops[] = { + { QLCNIC_DUMP_NOP, qlcnic_dump_nop }, + { QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb }, + { QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux }, + { QLCNIC_DUMP_QUEUE, qlcnic_dump_que }, + { QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom }, + { QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm }, + { QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl }, + { QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache }, + { QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache }, + { QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache }, + { QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache }, + { QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache }, + { QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache }, + { QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache }, + { QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache }, + { QLCNIC_DUMP_READ_ROM, qlcnic_read_rom }, + { QLCNIC_DUMP_READ_MEM, qlcnic_read_memory }, + { QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl }, + { QLCNIC_DUMP_TLHDR, qlcnic_dump_nop }, + { QLCNIC_DUMP_RDEND, qlcnic_dump_nop }, +}; + +/* Walk the template and collect dump for each entry in the dump template */ +static int +qlcnic_valid_dump_entry(struct device *dev, struct qlcnic_dump_entry *entry, + u32 size) +{ + int ret = 1; + if (size != entry->hdr.cap_size) { + dev_info(dev, + "Invalidate dump, Type:%d\tMask:%d\tSize:%dCap_size:%d\n", + entry->hdr.type, entry->hdr.mask, size, entry->hdr.cap_size); + dev_info(dev, "Aborting further dump capture\n"); + ret = 0; + } + return ret; +} + +int qlcnic_dump_fw(struct qlcnic_adapter *adapter) +{ + u32 *buffer; + char mesg[64]; + char *msg[] = {mesg, NULL}; + int i, k, ops_cnt, ops_index, dump_size = 0; + u32 entry_offset, dump, no_entries, buf_offset = 0; + struct qlcnic_dump_entry *entry; + struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; + struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr; + + if (fw_dump->clr) { + dev_info(&adapter->pdev->dev, + "Previous dump not cleared, not capturing dump\n"); + return -EIO; + } + /* Calculate the size for dump data area only */ + for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++) + if (i & tmpl_hdr->drv_cap_mask) + dump_size += tmpl_hdr->cap_sizes[k]; + if (!dump_size) + return -EIO; + + fw_dump->data = vzalloc(dump_size); + if (!fw_dump->data) { + dev_info(&adapter->pdev->dev, + "Unable to allocate (%d KB) for fw dump\n", + dump_size/1024); + return -ENOMEM; + } + buffer = fw_dump->data; + fw_dump->size = dump_size; + no_entries = tmpl_hdr->num_entries; + ops_cnt = ARRAY_SIZE(fw_dump_ops); + entry_offset = tmpl_hdr->offset; + tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION; + tmpl_hdr->sys_info[1] = adapter->fw_version; + + for (i = 0; i < no_entries; i++) { + entry = (void *)tmpl_hdr + entry_offset; + if (!(entry->hdr.mask & tmpl_hdr->drv_cap_mask)) { + entry->hdr.flags |= QLCNIC_DUMP_SKIP; + entry_offset += entry->hdr.offset; + continue; + } + /* Find the handler for this entry */ + ops_index = 0; + while (ops_index < ops_cnt) { + if (entry->hdr.type == fw_dump_ops[ops_index].opcode) + break; + ops_index++; + } + if (ops_index == ops_cnt) { + dev_info(&adapter->pdev->dev, + "Invalid entry type %d, exiting dump\n", + entry->hdr.type); + goto error; + } + /* Collect dump for this entry */ + dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer); + if (dump && !qlcnic_valid_dump_entry(&adapter->pdev->dev, entry, + dump)) + entry->hdr.flags |= QLCNIC_DUMP_SKIP; + buf_offset += entry->hdr.cap_size; + entry_offset += entry->hdr.offset; + buffer = fw_dump->data + buf_offset; + } + if (dump_size != buf_offset) { + dev_info(&adapter->pdev->dev, + "Captured(%d) and expected size(%d) do not match\n", + buf_offset, dump_size); + goto error; + } else { + fw_dump->clr = 1; + snprintf(mesg, sizeof(mesg), "FW_DUMP=%s", + adapter->netdev->name); + dev_info(&adapter->pdev->dev, "Dump data, %d bytes captured\n", + fw_dump->size); + /* Send a udev event to notify availability of FW dump */ + kobject_uevent_env(&adapter->pdev->dev.kobj, KOBJ_CHANGE, msg); + return 0; + } +error: + vfree(fw_dump->data); + return -EINVAL; +} diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c new file mode 100644 index 000000000000..3b6741e4754d --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c @@ -0,0 +1,1898 @@ +/* + * QLogic qlcnic NIC Driver + * Copyright (c) 2009-2010 QLogic Corporation + * + * See LICENSE.qlcnic for copyright and licensing details. + */ + +#include +#include +#include +#include +#include "qlcnic.h" + +struct crb_addr_pair { + u32 addr; + u32 data; +}; + +#define QLCNIC_MAX_CRB_XFORM 60 +static unsigned int crb_addr_xform[QLCNIC_MAX_CRB_XFORM]; + +#define crb_addr_transform(name) \ + (crb_addr_xform[QLCNIC_HW_PX_MAP_CRB_##name] = \ + QLCNIC_HW_CRB_HUB_AGT_ADR_##name << 20) + +#define QLCNIC_ADDR_ERROR (0xffffffff) + +static void +qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter, + struct qlcnic_host_rds_ring *rds_ring); + +static int +qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter); + +static void crb_addr_transform_setup(void) +{ + crb_addr_transform(XDMA); + crb_addr_transform(TIMR); + crb_addr_transform(SRE); + crb_addr_transform(SQN3); + crb_addr_transform(SQN2); + crb_addr_transform(SQN1); + crb_addr_transform(SQN0); + crb_addr_transform(SQS3); + crb_addr_transform(SQS2); + crb_addr_transform(SQS1); + crb_addr_transform(SQS0); + crb_addr_transform(RPMX7); + crb_addr_transform(RPMX6); + crb_addr_transform(RPMX5); + crb_addr_transform(RPMX4); + crb_addr_transform(RPMX3); + crb_addr_transform(RPMX2); + crb_addr_transform(RPMX1); + crb_addr_transform(RPMX0); + crb_addr_transform(ROMUSB); + crb_addr_transform(SN); + crb_addr_transform(QMN); + crb_addr_transform(QMS); + crb_addr_transform(PGNI); + crb_addr_transform(PGND); + crb_addr_transform(PGN3); + crb_addr_transform(PGN2); + crb_addr_transform(PGN1); + crb_addr_transform(PGN0); + crb_addr_transform(PGSI); + crb_addr_transform(PGSD); + crb_addr_transform(PGS3); + crb_addr_transform(PGS2); + crb_addr_transform(PGS1); + crb_addr_transform(PGS0); + crb_addr_transform(PS); + crb_addr_transform(PH); + crb_addr_transform(NIU); + crb_addr_transform(I2Q); + crb_addr_transform(EG); + crb_addr_transform(MN); + crb_addr_transform(MS); + crb_addr_transform(CAS2); + crb_addr_transform(CAS1); + crb_addr_transform(CAS0); + crb_addr_transform(CAM); + crb_addr_transform(C2C1); + crb_addr_transform(C2C0); + crb_addr_transform(SMB); + crb_addr_transform(OCM0); + crb_addr_transform(I2C0); +} + +void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter) +{ + struct qlcnic_recv_context *recv_ctx; + struct qlcnic_host_rds_ring *rds_ring; + struct qlcnic_rx_buffer *rx_buf; + int i, ring; + + recv_ctx = adapter->recv_ctx; + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &recv_ctx->rds_rings[ring]; + for (i = 0; i < rds_ring->num_desc; ++i) { + rx_buf = &(rds_ring->rx_buf_arr[i]); + if (rx_buf->skb == NULL) + continue; + + pci_unmap_single(adapter->pdev, + rx_buf->dma, + rds_ring->dma_size, + PCI_DMA_FROMDEVICE); + + dev_kfree_skb_any(rx_buf->skb); + } + } +} + +void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter) +{ + struct qlcnic_recv_context *recv_ctx; + struct qlcnic_host_rds_ring *rds_ring; + struct qlcnic_rx_buffer *rx_buf; + int i, ring; + + recv_ctx = adapter->recv_ctx; + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &recv_ctx->rds_rings[ring]; + + INIT_LIST_HEAD(&rds_ring->free_list); + + rx_buf = rds_ring->rx_buf_arr; + for (i = 0; i < rds_ring->num_desc; i++) { + list_add_tail(&rx_buf->list, + &rds_ring->free_list); + rx_buf++; + } + } +} + +void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter) +{ + struct qlcnic_cmd_buffer *cmd_buf; + struct qlcnic_skb_frag *buffrag; + int i, j; + struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; + + cmd_buf = tx_ring->cmd_buf_arr; + for (i = 0; i < tx_ring->num_desc; i++) { + buffrag = cmd_buf->frag_array; + if (buffrag->dma) { + pci_unmap_single(adapter->pdev, buffrag->dma, + buffrag->length, PCI_DMA_TODEVICE); + buffrag->dma = 0ULL; + } + for (j = 0; j < cmd_buf->frag_count; j++) { + buffrag++; + if (buffrag->dma) { + pci_unmap_page(adapter->pdev, buffrag->dma, + buffrag->length, + PCI_DMA_TODEVICE); + buffrag->dma = 0ULL; + } + } + if (cmd_buf->skb) { + dev_kfree_skb_any(cmd_buf->skb); + cmd_buf->skb = NULL; + } + cmd_buf++; + } +} + +void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter) +{ + struct qlcnic_recv_context *recv_ctx; + struct qlcnic_host_rds_ring *rds_ring; + struct qlcnic_host_tx_ring *tx_ring; + int ring; + + recv_ctx = adapter->recv_ctx; + + if (recv_ctx->rds_rings == NULL) + goto skip_rds; + + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &recv_ctx->rds_rings[ring]; + vfree(rds_ring->rx_buf_arr); + rds_ring->rx_buf_arr = NULL; + } + kfree(recv_ctx->rds_rings); + +skip_rds: + if (adapter->tx_ring == NULL) + return; + + tx_ring = adapter->tx_ring; + vfree(tx_ring->cmd_buf_arr); + tx_ring->cmd_buf_arr = NULL; + kfree(adapter->tx_ring); + adapter->tx_ring = NULL; +} + +int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter) +{ + struct qlcnic_recv_context *recv_ctx; + struct qlcnic_host_rds_ring *rds_ring; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_host_tx_ring *tx_ring; + struct qlcnic_rx_buffer *rx_buf; + int ring, i, size; + + struct qlcnic_cmd_buffer *cmd_buf_arr; + struct net_device *netdev = adapter->netdev; + + size = sizeof(struct qlcnic_host_tx_ring); + tx_ring = kzalloc(size, GFP_KERNEL); + if (tx_ring == NULL) { + dev_err(&netdev->dev, "failed to allocate tx ring struct\n"); + return -ENOMEM; + } + adapter->tx_ring = tx_ring; + + tx_ring->num_desc = adapter->num_txd; + tx_ring->txq = netdev_get_tx_queue(netdev, 0); + + cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring)); + if (cmd_buf_arr == NULL) { + dev_err(&netdev->dev, "failed to allocate cmd buffer ring\n"); + goto err_out; + } + tx_ring->cmd_buf_arr = cmd_buf_arr; + + recv_ctx = adapter->recv_ctx; + + size = adapter->max_rds_rings * sizeof(struct qlcnic_host_rds_ring); + rds_ring = kzalloc(size, GFP_KERNEL); + if (rds_ring == NULL) { + dev_err(&netdev->dev, "failed to allocate rds ring struct\n"); + goto err_out; + } + recv_ctx->rds_rings = rds_ring; + + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &recv_ctx->rds_rings[ring]; + switch (ring) { + case RCV_RING_NORMAL: + rds_ring->num_desc = adapter->num_rxd; + rds_ring->dma_size = QLCNIC_P3P_RX_BUF_MAX_LEN; + rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN; + break; + + case RCV_RING_JUMBO: + rds_ring->num_desc = adapter->num_jumbo_rxd; + rds_ring->dma_size = + QLCNIC_P3P_RX_JUMBO_BUF_MAX_LEN; + + if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO) + rds_ring->dma_size += QLCNIC_LRO_BUFFER_EXTRA; + + rds_ring->skb_size = + rds_ring->dma_size + NET_IP_ALIGN; + break; + } + rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring)); + if (rds_ring->rx_buf_arr == NULL) { + dev_err(&netdev->dev, "Failed to allocate " + "rx buffer ring %d\n", ring); + goto err_out; + } + INIT_LIST_HEAD(&rds_ring->free_list); + /* + * Now go through all of them, set reference handles + * and put them in the queues. + */ + rx_buf = rds_ring->rx_buf_arr; + for (i = 0; i < rds_ring->num_desc; i++) { + list_add_tail(&rx_buf->list, + &rds_ring->free_list); + rx_buf->ref_handle = i; + rx_buf++; + } + spin_lock_init(&rds_ring->lock); + } + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + sds_ring->irq = adapter->msix_entries[ring].vector; + sds_ring->adapter = adapter; + sds_ring->num_desc = adapter->num_rxd; + + for (i = 0; i < NUM_RCV_DESC_RINGS; i++) + INIT_LIST_HEAD(&sds_ring->free_list[i]); + } + + return 0; + +err_out: + qlcnic_free_sw_resources(adapter); + return -ENOMEM; +} + +/* + * Utility to translate from internal Phantom CRB address + * to external PCI CRB address. + */ +static u32 qlcnic_decode_crb_addr(u32 addr) +{ + int i; + u32 base_addr, offset, pci_base; + + crb_addr_transform_setup(); + + pci_base = QLCNIC_ADDR_ERROR; + base_addr = addr & 0xfff00000; + offset = addr & 0x000fffff; + + for (i = 0; i < QLCNIC_MAX_CRB_XFORM; i++) { + if (crb_addr_xform[i] == base_addr) { + pci_base = i << 20; + break; + } + } + if (pci_base == QLCNIC_ADDR_ERROR) + return pci_base; + else + return pci_base + offset; +} + +#define QLCNIC_MAX_ROM_WAIT_USEC 100 + +static int qlcnic_wait_rom_done(struct qlcnic_adapter *adapter) +{ + long timeout = 0; + long done = 0; + + cond_resched(); + + while (done == 0) { + done = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_STATUS); + done &= 2; + if (++timeout >= QLCNIC_MAX_ROM_WAIT_USEC) { + dev_err(&adapter->pdev->dev, + "Timeout reached waiting for rom done"); + return -EIO; + } + udelay(1); + } + return 0; +} + +static int do_rom_fast_read(struct qlcnic_adapter *adapter, + u32 addr, u32 *valp) +{ + QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ADDRESS, addr); + QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); + QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 3); + QLCWR32(adapter, QLCNIC_ROMUSB_ROM_INSTR_OPCODE, 0xb); + if (qlcnic_wait_rom_done(adapter)) { + dev_err(&adapter->pdev->dev, "Error waiting for rom done\n"); + return -EIO; + } + /* reset abyte_cnt and dummy_byte_cnt */ + QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 0); + udelay(10); + QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); + + *valp = QLCRD32(adapter, QLCNIC_ROMUSB_ROM_RDATA); + return 0; +} + +static int do_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr, + u8 *bytes, size_t size) +{ + int addridx; + int ret = 0; + + for (addridx = addr; addridx < (addr + size); addridx += 4) { + int v; + ret = do_rom_fast_read(adapter, addridx, &v); + if (ret != 0) + break; + *(__le32 *)bytes = cpu_to_le32(v); + bytes += 4; + } + + return ret; +} + +int +qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr, + u8 *bytes, size_t size) +{ + int ret; + + ret = qlcnic_rom_lock(adapter); + if (ret < 0) + return ret; + + ret = do_rom_fast_read_words(adapter, addr, bytes, size); + + qlcnic_rom_unlock(adapter); + return ret; +} + +int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, u32 addr, u32 *valp) +{ + int ret; + + if (qlcnic_rom_lock(adapter) != 0) + return -EIO; + + ret = do_rom_fast_read(adapter, addr, valp); + qlcnic_rom_unlock(adapter); + return ret; +} + +int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter) +{ + int addr, val; + int i, n, init_delay; + struct crb_addr_pair *buf; + unsigned offset; + u32 off; + struct pci_dev *pdev = adapter->pdev; + + QLCWR32(adapter, CRB_CMDPEG_STATE, 0); + QLCWR32(adapter, CRB_RCVPEG_STATE, 0); + + qlcnic_rom_lock(adapter); + QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xfeffffff); + qlcnic_rom_unlock(adapter); + + /* Init HW CRB block */ + if (qlcnic_rom_fast_read(adapter, 0, &n) != 0 || (n != 0xcafecafe) || + qlcnic_rom_fast_read(adapter, 4, &n) != 0) { + dev_err(&pdev->dev, "ERROR Reading crb_init area: val:%x\n", n); + return -EIO; + } + offset = n & 0xffffU; + n = (n >> 16) & 0xffffU; + + if (n >= 1024) { + dev_err(&pdev->dev, "QLOGIC card flash not initialized.\n"); + return -EIO; + } + + buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL); + if (buf == NULL) { + dev_err(&pdev->dev, "Unable to calloc memory for rom read.\n"); + return -ENOMEM; + } + + for (i = 0; i < n; i++) { + if (qlcnic_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 || + qlcnic_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) { + kfree(buf); + return -EIO; + } + + buf[i].addr = addr; + buf[i].data = val; + } + + for (i = 0; i < n; i++) { + + off = qlcnic_decode_crb_addr(buf[i].addr); + if (off == QLCNIC_ADDR_ERROR) { + dev_err(&pdev->dev, "CRB init value out of range %x\n", + buf[i].addr); + continue; + } + off += QLCNIC_PCI_CRBSPACE; + + if (off & 1) + continue; + + /* skipping cold reboot MAGIC */ + if (off == QLCNIC_CAM_RAM(0x1fc)) + continue; + if (off == (QLCNIC_CRB_I2C0 + 0x1c)) + continue; + if (off == (ROMUSB_GLB + 0xbc)) /* do not reset PCI */ + continue; + if (off == (ROMUSB_GLB + 0xa8)) + continue; + if (off == (ROMUSB_GLB + 0xc8)) /* core clock */ + continue; + if (off == (ROMUSB_GLB + 0x24)) /* MN clock */ + continue; + if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */ + continue; + if ((off & 0x0ff00000) == QLCNIC_CRB_DDR_NET) + continue; + /* skip the function enable register */ + if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION)) + continue; + if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION2)) + continue; + if ((off & 0x0ff00000) == QLCNIC_CRB_SMB) + continue; + + init_delay = 1; + /* After writing this register, HW needs time for CRB */ + /* to quiet down (else crb_window returns 0xffffffff) */ + if (off == QLCNIC_ROMUSB_GLB_SW_RESET) + init_delay = 1000; + + QLCWR32(adapter, off, buf[i].data); + + msleep(init_delay); + } + kfree(buf); + + /* Initialize protocol process engine */ + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0xec, 0x1e); + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0x4c, 8); + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_I + 0x4c, 8); + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x8, 0); + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0xc, 0); + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x8, 0); + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0xc, 0); + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x8, 0); + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0xc, 0); + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x8, 0); + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0xc, 0); + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x8, 0); + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0xc, 0); + msleep(1); + QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0); + QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0); + return 0; +} + +static int qlcnic_cmd_peg_ready(struct qlcnic_adapter *adapter) +{ + u32 val; + int retries = QLCNIC_CMDPEG_CHECK_RETRY_COUNT; + + do { + val = QLCRD32(adapter, CRB_CMDPEG_STATE); + + switch (val) { + case PHAN_INITIALIZE_COMPLETE: + case PHAN_INITIALIZE_ACK: + return 0; + case PHAN_INITIALIZE_FAILED: + goto out_err; + default: + break; + } + + msleep(QLCNIC_CMDPEG_CHECK_DELAY); + + } while (--retries); + + QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED); + +out_err: + dev_err(&adapter->pdev->dev, "Command Peg initialization not " + "complete, state: 0x%x.\n", val); + return -EIO; +} + +static int +qlcnic_receive_peg_ready(struct qlcnic_adapter *adapter) +{ + u32 val; + int retries = QLCNIC_RCVPEG_CHECK_RETRY_COUNT; + + do { + val = QLCRD32(adapter, CRB_RCVPEG_STATE); + + if (val == PHAN_PEG_RCV_INITIALIZED) + return 0; + + msleep(QLCNIC_RCVPEG_CHECK_DELAY); + + } while (--retries); + + if (!retries) { + dev_err(&adapter->pdev->dev, "Receive Peg initialization not " + "complete, state: 0x%x.\n", val); + return -EIO; + } + + return 0; +} + +int +qlcnic_check_fw_status(struct qlcnic_adapter *adapter) +{ + int err; + + err = qlcnic_cmd_peg_ready(adapter); + if (err) + return err; + + err = qlcnic_receive_peg_ready(adapter); + if (err) + return err; + + QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK); + + return err; +} + +int +qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) { + + int timeo; + u32 val; + + val = QLCRD32(adapter, QLCNIC_CRB_DEV_PARTITION_INFO); + val = QLC_DEV_GET_DRV(val, adapter->portnum); + if ((val & 0x3) != QLCNIC_TYPE_NIC) { + dev_err(&adapter->pdev->dev, + "Not an Ethernet NIC func=%u\n", val); + return -EIO; + } + adapter->physical_port = (val >> 2); + if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DEV_INIT_TIMEOUT, &timeo)) + timeo = QLCNIC_INIT_TIMEOUT_SECS; + + adapter->dev_init_timeo = timeo; + + if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DRV_RESET_TIMEOUT, &timeo)) + timeo = QLCNIC_RESET_TIMEOUT_SECS; + + adapter->reset_ack_timeo = timeo; + + return 0; +} + +static int qlcnic_get_flt_entry(struct qlcnic_adapter *adapter, u8 region, + struct qlcnic_flt_entry *region_entry) +{ + struct qlcnic_flt_header flt_hdr; + struct qlcnic_flt_entry *flt_entry; + int i = 0, ret; + u32 entry_size; + + memset(region_entry, 0, sizeof(struct qlcnic_flt_entry)); + ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION, + (u8 *)&flt_hdr, + sizeof(struct qlcnic_flt_header)); + if (ret) { + dev_warn(&adapter->pdev->dev, + "error reading flash layout header\n"); + return -EIO; + } + + entry_size = flt_hdr.len - sizeof(struct qlcnic_flt_header); + flt_entry = (struct qlcnic_flt_entry *)vzalloc(entry_size); + if (flt_entry == NULL) { + dev_warn(&adapter->pdev->dev, "error allocating memory\n"); + return -EIO; + } + + ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION + + sizeof(struct qlcnic_flt_header), + (u8 *)flt_entry, entry_size); + if (ret) { + dev_warn(&adapter->pdev->dev, + "error reading flash layout entries\n"); + goto err_out; + } + + while (i < (entry_size/sizeof(struct qlcnic_flt_entry))) { + if (flt_entry[i].region == region) + break; + i++; + } + if (i >= (entry_size/sizeof(struct qlcnic_flt_entry))) { + dev_warn(&adapter->pdev->dev, + "region=%x not found in %d regions\n", region, i); + ret = -EIO; + goto err_out; + } + memcpy(region_entry, &flt_entry[i], sizeof(struct qlcnic_flt_entry)); + +err_out: + vfree(flt_entry); + return ret; +} + +int +qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter) +{ + struct qlcnic_flt_entry fw_entry; + u32 ver = -1, min_ver; + int ret; + + ret = qlcnic_get_flt_entry(adapter, QLCNIC_FW_IMAGE_REGION, &fw_entry); + if (!ret) + /* 0-4:-signature, 4-8:-fw version */ + qlcnic_rom_fast_read(adapter, fw_entry.start_addr + 4, + (int *)&ver); + else + qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET, + (int *)&ver); + + ver = QLCNIC_DECODE_VERSION(ver); + min_ver = QLCNIC_MIN_FW_VERSION; + + if (ver < min_ver) { + dev_err(&adapter->pdev->dev, + "firmware version %d.%d.%d unsupported." + "Min supported version %d.%d.%d\n", + _major(ver), _minor(ver), _build(ver), + _major(min_ver), _minor(min_ver), _build(min_ver)); + return -EINVAL; + } + + return 0; +} + +static int +qlcnic_has_mn(struct qlcnic_adapter *adapter) +{ + u32 capability; + capability = 0; + + capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY); + if (capability & QLCNIC_PEG_TUNE_MN_PRESENT) + return 1; + + return 0; +} + +static +struct uni_table_desc *qlcnic_get_table_desc(const u8 *unirom, int section) +{ + u32 i; + struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0]; + __le32 entries = cpu_to_le32(directory->num_entries); + + for (i = 0; i < entries; i++) { + + __le32 offs = cpu_to_le32(directory->findex) + + (i * cpu_to_le32(directory->entry_size)); + __le32 tab_type = cpu_to_le32(*((u32 *)&unirom[offs] + 8)); + + if (tab_type == section) + return (struct uni_table_desc *) &unirom[offs]; + } + + return NULL; +} + +#define FILEHEADER_SIZE (14 * 4) + +static int +qlcnic_validate_header(struct qlcnic_adapter *adapter) +{ + const u8 *unirom = adapter->fw->data; + struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0]; + __le32 fw_file_size = adapter->fw->size; + __le32 entries; + __le32 entry_size; + __le32 tab_size; + + if (fw_file_size < FILEHEADER_SIZE) + return -EINVAL; + + entries = cpu_to_le32(directory->num_entries); + entry_size = cpu_to_le32(directory->entry_size); + tab_size = cpu_to_le32(directory->findex) + (entries * entry_size); + + if (fw_file_size < tab_size) + return -EINVAL; + + return 0; +} + +static int +qlcnic_validate_bootld(struct qlcnic_adapter *adapter) +{ + struct uni_table_desc *tab_desc; + struct uni_data_desc *descr; + const u8 *unirom = adapter->fw->data; + int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] + + QLCNIC_UNI_BOOTLD_IDX_OFF)); + __le32 offs; + __le32 tab_size; + __le32 data_size; + + tab_desc = qlcnic_get_table_desc(unirom, QLCNIC_UNI_DIR_SECT_BOOTLD); + + if (!tab_desc) + return -EINVAL; + + tab_size = cpu_to_le32(tab_desc->findex) + + (cpu_to_le32(tab_desc->entry_size) * (idx + 1)); + + if (adapter->fw->size < tab_size) + return -EINVAL; + + offs = cpu_to_le32(tab_desc->findex) + + (cpu_to_le32(tab_desc->entry_size) * (idx)); + descr = (struct uni_data_desc *)&unirom[offs]; + + data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size); + + if (adapter->fw->size < data_size) + return -EINVAL; + + return 0; +} + +static int +qlcnic_validate_fw(struct qlcnic_adapter *adapter) +{ + struct uni_table_desc *tab_desc; + struct uni_data_desc *descr; + const u8 *unirom = adapter->fw->data; + int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] + + QLCNIC_UNI_FIRMWARE_IDX_OFF)); + __le32 offs; + __le32 tab_size; + __le32 data_size; + + tab_desc = qlcnic_get_table_desc(unirom, QLCNIC_UNI_DIR_SECT_FW); + + if (!tab_desc) + return -EINVAL; + + tab_size = cpu_to_le32(tab_desc->findex) + + (cpu_to_le32(tab_desc->entry_size) * (idx + 1)); + + if (adapter->fw->size < tab_size) + return -EINVAL; + + offs = cpu_to_le32(tab_desc->findex) + + (cpu_to_le32(tab_desc->entry_size) * (idx)); + descr = (struct uni_data_desc *)&unirom[offs]; + data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size); + + if (adapter->fw->size < data_size) + return -EINVAL; + + return 0; +} + +static int +qlcnic_validate_product_offs(struct qlcnic_adapter *adapter) +{ + struct uni_table_desc *ptab_descr; + const u8 *unirom = adapter->fw->data; + int mn_present = qlcnic_has_mn(adapter); + __le32 entries; + __le32 entry_size; + __le32 tab_size; + u32 i; + + ptab_descr = qlcnic_get_table_desc(unirom, + QLCNIC_UNI_DIR_SECT_PRODUCT_TBL); + if (!ptab_descr) + return -EINVAL; + + entries = cpu_to_le32(ptab_descr->num_entries); + entry_size = cpu_to_le32(ptab_descr->entry_size); + tab_size = cpu_to_le32(ptab_descr->findex) + (entries * entry_size); + + if (adapter->fw->size < tab_size) + return -EINVAL; + +nomn: + for (i = 0; i < entries; i++) { + + __le32 flags, file_chiprev, offs; + u8 chiprev = adapter->ahw->revision_id; + u32 flagbit; + + offs = cpu_to_le32(ptab_descr->findex) + + (i * cpu_to_le32(ptab_descr->entry_size)); + flags = cpu_to_le32(*((int *)&unirom[offs] + + QLCNIC_UNI_FLAGS_OFF)); + file_chiprev = cpu_to_le32(*((int *)&unirom[offs] + + QLCNIC_UNI_CHIP_REV_OFF)); + + flagbit = mn_present ? 1 : 2; + + if ((chiprev == file_chiprev) && + ((1ULL << flagbit) & flags)) { + adapter->file_prd_off = offs; + return 0; + } + } + if (mn_present) { + mn_present = 0; + goto nomn; + } + return -EINVAL; +} + +static int +qlcnic_validate_unified_romimage(struct qlcnic_adapter *adapter) +{ + if (qlcnic_validate_header(adapter)) { + dev_err(&adapter->pdev->dev, + "unified image: header validation failed\n"); + return -EINVAL; + } + + if (qlcnic_validate_product_offs(adapter)) { + dev_err(&adapter->pdev->dev, + "unified image: product validation failed\n"); + return -EINVAL; + } + + if (qlcnic_validate_bootld(adapter)) { + dev_err(&adapter->pdev->dev, + "unified image: bootld validation failed\n"); + return -EINVAL; + } + + if (qlcnic_validate_fw(adapter)) { + dev_err(&adapter->pdev->dev, + "unified image: firmware validation failed\n"); + return -EINVAL; + } + + return 0; +} + +static +struct uni_data_desc *qlcnic_get_data_desc(struct qlcnic_adapter *adapter, + u32 section, u32 idx_offset) +{ + const u8 *unirom = adapter->fw->data; + int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] + + idx_offset)); + struct uni_table_desc *tab_desc; + __le32 offs; + + tab_desc = qlcnic_get_table_desc(unirom, section); + + if (tab_desc == NULL) + return NULL; + + offs = cpu_to_le32(tab_desc->findex) + + (cpu_to_le32(tab_desc->entry_size) * idx); + + return (struct uni_data_desc *)&unirom[offs]; +} + +static u8 * +qlcnic_get_bootld_offs(struct qlcnic_adapter *adapter) +{ + u32 offs = QLCNIC_BOOTLD_START; + + if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE) + offs = cpu_to_le32((qlcnic_get_data_desc(adapter, + QLCNIC_UNI_DIR_SECT_BOOTLD, + QLCNIC_UNI_BOOTLD_IDX_OFF))->findex); + + return (u8 *)&adapter->fw->data[offs]; +} + +static u8 * +qlcnic_get_fw_offs(struct qlcnic_adapter *adapter) +{ + u32 offs = QLCNIC_IMAGE_START; + + if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE) + offs = cpu_to_le32((qlcnic_get_data_desc(adapter, + QLCNIC_UNI_DIR_SECT_FW, + QLCNIC_UNI_FIRMWARE_IDX_OFF))->findex); + + return (u8 *)&adapter->fw->data[offs]; +} + +static __le32 +qlcnic_get_fw_size(struct qlcnic_adapter *adapter) +{ + if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE) + return cpu_to_le32((qlcnic_get_data_desc(adapter, + QLCNIC_UNI_DIR_SECT_FW, + QLCNIC_UNI_FIRMWARE_IDX_OFF))->size); + else + return cpu_to_le32( + *(u32 *)&adapter->fw->data[QLCNIC_FW_SIZE_OFFSET]); +} + +static __le32 +qlcnic_get_fw_version(struct qlcnic_adapter *adapter) +{ + struct uni_data_desc *fw_data_desc; + const struct firmware *fw = adapter->fw; + __le32 major, minor, sub; + const u8 *ver_str; + int i, ret; + + if (adapter->fw_type != QLCNIC_UNIFIED_ROMIMAGE) + return cpu_to_le32(*(u32 *)&fw->data[QLCNIC_FW_VERSION_OFFSET]); + + fw_data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_FW, + QLCNIC_UNI_FIRMWARE_IDX_OFF); + ver_str = fw->data + cpu_to_le32(fw_data_desc->findex) + + cpu_to_le32(fw_data_desc->size) - 17; + + for (i = 0; i < 12; i++) { + if (!strncmp(&ver_str[i], "REV=", 4)) { + ret = sscanf(&ver_str[i+4], "%u.%u.%u ", + &major, &minor, &sub); + if (ret != 3) + return 0; + else + return major + (minor << 8) + (sub << 16); + } + } + + return 0; +} + +static __le32 +qlcnic_get_bios_version(struct qlcnic_adapter *adapter) +{ + const struct firmware *fw = adapter->fw; + __le32 bios_ver, prd_off = adapter->file_prd_off; + + if (adapter->fw_type != QLCNIC_UNIFIED_ROMIMAGE) + return cpu_to_le32( + *(u32 *)&fw->data[QLCNIC_BIOS_VERSION_OFFSET]); + + bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off]) + + QLCNIC_UNI_BIOS_VERSION_OFF)); + + return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) + (bios_ver >> 24); +} + +static void qlcnic_rom_lock_recovery(struct qlcnic_adapter *adapter) +{ + if (qlcnic_pcie_sem_lock(adapter, 2, QLCNIC_ROM_LOCK_ID)) + dev_info(&adapter->pdev->dev, "Resetting rom_lock\n"); + + qlcnic_pcie_sem_unlock(adapter, 2); +} + +static int +qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter) +{ + u32 heartbeat, ret = -EIO; + int retries = QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT; + + adapter->heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER); + + do { + msleep(QLCNIC_HEARTBEAT_PERIOD_MSECS); + heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER); + if (heartbeat != adapter->heartbeat) { + ret = QLCNIC_RCODE_SUCCESS; + break; + } + } while (--retries); + + return ret; +} + +int +qlcnic_need_fw_reset(struct qlcnic_adapter *adapter) +{ + if ((adapter->flags & QLCNIC_FW_HANG) || + qlcnic_check_fw_hearbeat(adapter)) { + qlcnic_rom_lock_recovery(adapter); + return 1; + } + + if (adapter->need_fw_reset) + return 1; + + if (adapter->fw) + return 1; + + return 0; +} + +static const char *fw_name[] = { + QLCNIC_UNIFIED_ROMIMAGE_NAME, + QLCNIC_FLASH_ROMIMAGE_NAME, +}; + +int +qlcnic_load_firmware(struct qlcnic_adapter *adapter) +{ + u64 *ptr64; + u32 i, flashaddr, size; + const struct firmware *fw = adapter->fw; + struct pci_dev *pdev = adapter->pdev; + + dev_info(&pdev->dev, "loading firmware from %s\n", + fw_name[adapter->fw_type]); + + if (fw) { + __le64 data; + + size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8; + + ptr64 = (u64 *)qlcnic_get_bootld_offs(adapter); + flashaddr = QLCNIC_BOOTLD_START; + + for (i = 0; i < size; i++) { + data = cpu_to_le64(ptr64[i]); + + if (qlcnic_pci_mem_write_2M(adapter, flashaddr, data)) + return -EIO; + + flashaddr += 8; + } + + size = (__force u32)qlcnic_get_fw_size(adapter) / 8; + + ptr64 = (u64 *)qlcnic_get_fw_offs(adapter); + flashaddr = QLCNIC_IMAGE_START; + + for (i = 0; i < size; i++) { + data = cpu_to_le64(ptr64[i]); + + if (qlcnic_pci_mem_write_2M(adapter, + flashaddr, data)) + return -EIO; + + flashaddr += 8; + } + + size = (__force u32)qlcnic_get_fw_size(adapter) % 8; + if (size) { + data = cpu_to_le64(ptr64[i]); + + if (qlcnic_pci_mem_write_2M(adapter, + flashaddr, data)) + return -EIO; + } + + } else { + u64 data; + u32 hi, lo; + int ret; + struct qlcnic_flt_entry bootld_entry; + + ret = qlcnic_get_flt_entry(adapter, QLCNIC_BOOTLD_REGION, + &bootld_entry); + if (!ret) { + size = bootld_entry.size / 8; + flashaddr = bootld_entry.start_addr; + } else { + size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8; + flashaddr = QLCNIC_BOOTLD_START; + dev_info(&pdev->dev, + "using legacy method to get flash fw region"); + } + + for (i = 0; i < size; i++) { + if (qlcnic_rom_fast_read(adapter, + flashaddr, (int *)&lo) != 0) + return -EIO; + if (qlcnic_rom_fast_read(adapter, + flashaddr + 4, (int *)&hi) != 0) + return -EIO; + + data = (((u64)hi << 32) | lo); + + if (qlcnic_pci_mem_write_2M(adapter, + flashaddr, data)) + return -EIO; + + flashaddr += 8; + } + } + msleep(1); + + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x18, 0x1020); + QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0x80001e); + return 0; +} + +static int +qlcnic_validate_firmware(struct qlcnic_adapter *adapter) +{ + __le32 val; + u32 ver, bios, min_size; + struct pci_dev *pdev = adapter->pdev; + const struct firmware *fw = adapter->fw; + u8 fw_type = adapter->fw_type; + + if (fw_type == QLCNIC_UNIFIED_ROMIMAGE) { + if (qlcnic_validate_unified_romimage(adapter)) + return -EINVAL; + + min_size = QLCNIC_UNI_FW_MIN_SIZE; + } else { + val = cpu_to_le32(*(u32 *)&fw->data[QLCNIC_FW_MAGIC_OFFSET]); + if ((__force u32)val != QLCNIC_BDINFO_MAGIC) + return -EINVAL; + + min_size = QLCNIC_FW_MIN_SIZE; + } + + if (fw->size < min_size) + return -EINVAL; + + val = qlcnic_get_fw_version(adapter); + ver = QLCNIC_DECODE_VERSION(val); + + if (ver < QLCNIC_MIN_FW_VERSION) { + dev_err(&pdev->dev, + "%s: firmware version %d.%d.%d unsupported\n", + fw_name[fw_type], _major(ver), _minor(ver), _build(ver)); + return -EINVAL; + } + + val = qlcnic_get_bios_version(adapter); + qlcnic_rom_fast_read(adapter, QLCNIC_BIOS_VERSION_OFFSET, (int *)&bios); + if ((__force u32)val != bios) { + dev_err(&pdev->dev, "%s: firmware bios is incompatible\n", + fw_name[fw_type]); + return -EINVAL; + } + + QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC); + return 0; +} + +static void +qlcnic_get_next_fwtype(struct qlcnic_adapter *adapter) +{ + u8 fw_type; + + switch (adapter->fw_type) { + case QLCNIC_UNKNOWN_ROMIMAGE: + fw_type = QLCNIC_UNIFIED_ROMIMAGE; + break; + + case QLCNIC_UNIFIED_ROMIMAGE: + default: + fw_type = QLCNIC_FLASH_ROMIMAGE; + break; + } + + adapter->fw_type = fw_type; +} + + + +void qlcnic_request_firmware(struct qlcnic_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + int rc; + + adapter->fw_type = QLCNIC_UNKNOWN_ROMIMAGE; + +next: + qlcnic_get_next_fwtype(adapter); + + if (adapter->fw_type == QLCNIC_FLASH_ROMIMAGE) { + adapter->fw = NULL; + } else { + rc = request_firmware(&adapter->fw, + fw_name[adapter->fw_type], &pdev->dev); + if (rc != 0) + goto next; + + rc = qlcnic_validate_firmware(adapter); + if (rc != 0) { + release_firmware(adapter->fw); + msleep(1); + goto next; + } + } +} + + +void +qlcnic_release_firmware(struct qlcnic_adapter *adapter) +{ + if (adapter->fw) + release_firmware(adapter->fw); + adapter->fw = NULL; +} + +static void +qlcnic_handle_linkevent(struct qlcnic_adapter *adapter, + struct qlcnic_fw_msg *msg) +{ + u32 cable_OUI; + u16 cable_len; + u16 link_speed; + u8 link_status, module, duplex, autoneg; + u8 lb_status = 0; + struct net_device *netdev = adapter->netdev; + + adapter->has_link_events = 1; + + cable_OUI = msg->body[1] & 0xffffffff; + cable_len = (msg->body[1] >> 32) & 0xffff; + link_speed = (msg->body[1] >> 48) & 0xffff; + + link_status = msg->body[2] & 0xff; + duplex = (msg->body[2] >> 16) & 0xff; + autoneg = (msg->body[2] >> 24) & 0xff; + lb_status = (msg->body[2] >> 32) & 0x3; + + module = (msg->body[2] >> 8) & 0xff; + if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE) + dev_info(&netdev->dev, "unsupported cable: OUI 0x%x, " + "length %d\n", cable_OUI, cable_len); + else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN) + dev_info(&netdev->dev, "unsupported cable length %d\n", + cable_len); + + if (!link_status && (lb_status == QLCNIC_ILB_MODE || + lb_status == QLCNIC_ELB_MODE)) + adapter->ahw->loopback_state |= QLCNIC_LINKEVENT; + + qlcnic_advert_link_change(adapter, link_status); + + if (duplex == LINKEVENT_FULL_DUPLEX) + adapter->link_duplex = DUPLEX_FULL; + else + adapter->link_duplex = DUPLEX_HALF; + + adapter->module_type = module; + adapter->link_autoneg = autoneg; + adapter->link_speed = link_speed; +} + +static void +qlcnic_handle_fw_message(int desc_cnt, int index, + struct qlcnic_host_sds_ring *sds_ring) +{ + struct qlcnic_fw_msg msg; + struct status_desc *desc; + struct qlcnic_adapter *adapter; + struct device *dev; + int i = 0, opcode, ret; + + while (desc_cnt > 0 && i < 8) { + desc = &sds_ring->desc_head[index]; + msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]); + msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]); + + index = get_next_index(index, sds_ring->num_desc); + desc_cnt--; + } + + adapter = sds_ring->adapter; + dev = &adapter->pdev->dev; + opcode = qlcnic_get_nic_msg_opcode(msg.body[0]); + + switch (opcode) { + case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE: + qlcnic_handle_linkevent(adapter, &msg); + break; + case QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK: + ret = (u32)(msg.body[1]); + switch (ret) { + case 0: + adapter->ahw->loopback_state |= QLCNIC_LB_RESPONSE; + break; + case 1: + dev_info(dev, "loopback already in progress\n"); + adapter->diag_cnt = -QLCNIC_TEST_IN_PROGRESS; + break; + case 2: + dev_info(dev, "loopback cable is not connected\n"); + adapter->diag_cnt = -QLCNIC_LB_CABLE_NOT_CONN; + break; + default: + dev_info(dev, "loopback configure request failed," + " ret %x\n", ret); + adapter->diag_cnt = -QLCNIC_UNDEFINED_ERROR; + break; + } + break; + default: + break; + } +} + +static int +qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter, + struct qlcnic_host_rds_ring *rds_ring, + struct qlcnic_rx_buffer *buffer) +{ + struct sk_buff *skb; + dma_addr_t dma; + struct pci_dev *pdev = adapter->pdev; + + skb = dev_alloc_skb(rds_ring->skb_size); + if (!skb) { + adapter->stats.skb_alloc_failure++; + return -ENOMEM; + } + + skb_reserve(skb, NET_IP_ALIGN); + + dma = pci_map_single(pdev, skb->data, + rds_ring->dma_size, PCI_DMA_FROMDEVICE); + + if (pci_dma_mapping_error(pdev, dma)) { + adapter->stats.rx_dma_map_error++; + dev_kfree_skb_any(skb); + return -ENOMEM; + } + + buffer->skb = skb; + buffer->dma = dma; + + return 0; +} + +static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter, + struct qlcnic_host_rds_ring *rds_ring, u16 index, u16 cksum) +{ + struct qlcnic_rx_buffer *buffer; + struct sk_buff *skb; + + buffer = &rds_ring->rx_buf_arr[index]; + + if (unlikely(buffer->skb == NULL)) { + WARN_ON(1); + return NULL; + } + + pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size, + PCI_DMA_FROMDEVICE); + + skb = buffer->skb; + + if (likely((adapter->netdev->features & NETIF_F_RXCSUM) && + (cksum == STATUS_CKSUM_OK || cksum == STATUS_CKSUM_LOOP))) { + adapter->stats.csummed++; + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else { + skb_checksum_none_assert(skb); + } + + skb->dev = adapter->netdev; + + buffer->skb = NULL; + + return skb; +} + +static inline int +qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter, struct sk_buff *skb, + u16 *vlan_tag) +{ + struct ethhdr *eth_hdr; + + if (!__vlan_get_tag(skb, vlan_tag)) { + eth_hdr = (struct ethhdr *) skb->data; + memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2); + skb_pull(skb, VLAN_HLEN); + } + if (!adapter->pvid) + return 0; + + if (*vlan_tag == adapter->pvid) { + /* Outer vlan tag. Packet should follow non-vlan path */ + *vlan_tag = 0xffff; + return 0; + } + if (adapter->flags & QLCNIC_TAGGING_ENABLED) + return 0; + + return -EINVAL; +} + +static struct qlcnic_rx_buffer * +qlcnic_process_rcv(struct qlcnic_adapter *adapter, + struct qlcnic_host_sds_ring *sds_ring, + int ring, u64 sts_data0) +{ + struct net_device *netdev = adapter->netdev; + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + struct qlcnic_rx_buffer *buffer; + struct sk_buff *skb; + struct qlcnic_host_rds_ring *rds_ring; + int index, length, cksum, pkt_offset; + u16 vid = 0xffff; + + if (unlikely(ring >= adapter->max_rds_rings)) + return NULL; + + rds_ring = &recv_ctx->rds_rings[ring]; + + index = qlcnic_get_sts_refhandle(sts_data0); + if (unlikely(index >= rds_ring->num_desc)) + return NULL; + + buffer = &rds_ring->rx_buf_arr[index]; + + length = qlcnic_get_sts_totallength(sts_data0); + cksum = qlcnic_get_sts_status(sts_data0); + pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0); + + skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum); + if (!skb) + return buffer; + + if (length > rds_ring->skb_size) + skb_put(skb, rds_ring->skb_size); + else + skb_put(skb, length); + + if (pkt_offset) + skb_pull(skb, pkt_offset); + + if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) { + adapter->stats.rxdropped++; + dev_kfree_skb(skb); + return buffer; + } + + skb->protocol = eth_type_trans(skb, netdev); + + if (vid != 0xffff) + __vlan_hwaccel_put_tag(skb, vid); + + napi_gro_receive(&sds_ring->napi, skb); + + adapter->stats.rx_pkts++; + adapter->stats.rxbytes += length; + + return buffer; +} + +#define QLC_TCP_HDR_SIZE 20 +#define QLC_TCP_TS_OPTION_SIZE 12 +#define QLC_TCP_TS_HDR_SIZE (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE) + +static struct qlcnic_rx_buffer * +qlcnic_process_lro(struct qlcnic_adapter *adapter, + struct qlcnic_host_sds_ring *sds_ring, + int ring, u64 sts_data0, u64 sts_data1) +{ + struct net_device *netdev = adapter->netdev; + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + struct qlcnic_rx_buffer *buffer; + struct sk_buff *skb; + struct qlcnic_host_rds_ring *rds_ring; + struct iphdr *iph; + struct tcphdr *th; + bool push, timestamp; + int l2_hdr_offset, l4_hdr_offset; + int index; + u16 lro_length, length, data_offset; + u32 seq_number; + u16 vid = 0xffff; + + if (unlikely(ring > adapter->max_rds_rings)) + return NULL; + + rds_ring = &recv_ctx->rds_rings[ring]; + + index = qlcnic_get_lro_sts_refhandle(sts_data0); + if (unlikely(index > rds_ring->num_desc)) + return NULL; + + buffer = &rds_ring->rx_buf_arr[index]; + + timestamp = qlcnic_get_lro_sts_timestamp(sts_data0); + lro_length = qlcnic_get_lro_sts_length(sts_data0); + l2_hdr_offset = qlcnic_get_lro_sts_l2_hdr_offset(sts_data0); + l4_hdr_offset = qlcnic_get_lro_sts_l4_hdr_offset(sts_data0); + push = qlcnic_get_lro_sts_push_flag(sts_data0); + seq_number = qlcnic_get_lro_sts_seq_number(sts_data1); + + skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK); + if (!skb) + return buffer; + + if (timestamp) + data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE; + else + data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE; + + skb_put(skb, lro_length + data_offset); + + skb_pull(skb, l2_hdr_offset); + + if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) { + adapter->stats.rxdropped++; + dev_kfree_skb(skb); + return buffer; + } + + skb->protocol = eth_type_trans(skb, netdev); + + iph = (struct iphdr *)skb->data; + th = (struct tcphdr *)(skb->data + (iph->ihl << 2)); + + length = (iph->ihl << 2) + (th->doff << 2) + lro_length; + iph->tot_len = htons(length); + iph->check = 0; + iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); + th->psh = push; + th->seq = htonl(seq_number); + + length = skb->len; + + if (vid != 0xffff) + __vlan_hwaccel_put_tag(skb, vid); + netif_receive_skb(skb); + + adapter->stats.lro_pkts++; + adapter->stats.lrobytes += length; + + return buffer; +} + +int +qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max) +{ + struct qlcnic_adapter *adapter = sds_ring->adapter; + struct list_head *cur; + struct status_desc *desc; + struct qlcnic_rx_buffer *rxbuf; + u64 sts_data0, sts_data1; + + int count = 0; + int opcode, ring, desc_cnt; + u32 consumer = sds_ring->consumer; + + while (count < max) { + desc = &sds_ring->desc_head[consumer]; + sts_data0 = le64_to_cpu(desc->status_desc_data[0]); + + if (!(sts_data0 & STATUS_OWNER_HOST)) + break; + + desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0); + opcode = qlcnic_get_sts_opcode(sts_data0); + + switch (opcode) { + case QLCNIC_RXPKT_DESC: + case QLCNIC_OLD_RXPKT_DESC: + case QLCNIC_SYN_OFFLOAD: + ring = qlcnic_get_sts_type(sts_data0); + rxbuf = qlcnic_process_rcv(adapter, sds_ring, + ring, sts_data0); + break; + case QLCNIC_LRO_DESC: + ring = qlcnic_get_lro_sts_type(sts_data0); + sts_data1 = le64_to_cpu(desc->status_desc_data[1]); + rxbuf = qlcnic_process_lro(adapter, sds_ring, + ring, sts_data0, sts_data1); + break; + case QLCNIC_RESPONSE_DESC: + qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring); + default: + goto skip; + } + + WARN_ON(desc_cnt > 1); + + if (likely(rxbuf)) + list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]); + else + adapter->stats.null_rxbuf++; + +skip: + for (; desc_cnt > 0; desc_cnt--) { + desc = &sds_ring->desc_head[consumer]; + desc->status_desc_data[0] = + cpu_to_le64(STATUS_OWNER_PHANTOM); + consumer = get_next_index(consumer, sds_ring->num_desc); + } + count++; + } + + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + struct qlcnic_host_rds_ring *rds_ring = + &adapter->recv_ctx->rds_rings[ring]; + + if (!list_empty(&sds_ring->free_list[ring])) { + list_for_each(cur, &sds_ring->free_list[ring]) { + rxbuf = list_entry(cur, + struct qlcnic_rx_buffer, list); + qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf); + } + spin_lock(&rds_ring->lock); + list_splice_tail_init(&sds_ring->free_list[ring], + &rds_ring->free_list); + spin_unlock(&rds_ring->lock); + } + + qlcnic_post_rx_buffers_nodb(adapter, rds_ring); + } + + if (count) { + sds_ring->consumer = consumer; + writel(consumer, sds_ring->crb_sts_consumer); + } + + return count; +} + +void +qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, + struct qlcnic_host_rds_ring *rds_ring) +{ + struct rcv_desc *pdesc; + struct qlcnic_rx_buffer *buffer; + int count = 0; + u32 producer; + struct list_head *head; + + producer = rds_ring->producer; + + head = &rds_ring->free_list; + while (!list_empty(head)) { + + buffer = list_entry(head->next, struct qlcnic_rx_buffer, list); + + if (!buffer->skb) { + if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer)) + break; + } + + count++; + list_del(&buffer->list); + + /* make a rcv descriptor */ + pdesc = &rds_ring->desc_head[producer]; + pdesc->addr_buffer = cpu_to_le64(buffer->dma); + pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); + pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); + + producer = get_next_index(producer, rds_ring->num_desc); + } + + if (count) { + rds_ring->producer = producer; + writel((producer-1) & (rds_ring->num_desc-1), + rds_ring->crb_rcv_producer); + } +} + +static void +qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter, + struct qlcnic_host_rds_ring *rds_ring) +{ + struct rcv_desc *pdesc; + struct qlcnic_rx_buffer *buffer; + int count = 0; + uint32_t producer; + struct list_head *head; + + if (!spin_trylock(&rds_ring->lock)) + return; + + producer = rds_ring->producer; + + head = &rds_ring->free_list; + while (!list_empty(head)) { + + buffer = list_entry(head->next, struct qlcnic_rx_buffer, list); + + if (!buffer->skb) { + if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer)) + break; + } + + count++; + list_del(&buffer->list); + + /* make a rcv descriptor */ + pdesc = &rds_ring->desc_head[producer]; + pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); + pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); + pdesc->addr_buffer = cpu_to_le64(buffer->dma); + + producer = get_next_index(producer, rds_ring->num_desc); + } + + if (count) { + rds_ring->producer = producer; + writel((producer - 1) & (rds_ring->num_desc - 1), + rds_ring->crb_rcv_producer); + } + spin_unlock(&rds_ring->lock); +} + +static void dump_skb(struct sk_buff *skb) +{ + int i; + unsigned char *data = skb->data; + + printk(KERN_INFO "\n"); + for (i = 0; i < skb->len; i++) { + printk(KERN_INFO "%02x ", data[i]); + if ((i & 0x0f) == 8) + printk(KERN_INFO "\n"); + } +} + +void qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter, + struct qlcnic_host_sds_ring *sds_ring, + int ring, u64 sts_data0) +{ + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + struct sk_buff *skb; + struct qlcnic_host_rds_ring *rds_ring; + int index, length, cksum, pkt_offset; + + if (unlikely(ring >= adapter->max_rds_rings)) + return; + + rds_ring = &recv_ctx->rds_rings[ring]; + + index = qlcnic_get_sts_refhandle(sts_data0); + length = qlcnic_get_sts_totallength(sts_data0); + if (unlikely(index >= rds_ring->num_desc)) + return; + + cksum = qlcnic_get_sts_status(sts_data0); + pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0); + + skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum); + if (!skb) + return; + + if (length > rds_ring->skb_size) + skb_put(skb, rds_ring->skb_size); + else + skb_put(skb, length); + + if (pkt_offset) + skb_pull(skb, pkt_offset); + + if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr)) + adapter->diag_cnt++; + else + dump_skb(skb); + + dev_kfree_skb_any(skb); + adapter->stats.rx_pkts++; + adapter->stats.rxbytes += length; + + return; +} + +void +qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring) +{ + struct qlcnic_adapter *adapter = sds_ring->adapter; + struct status_desc *desc; + u64 sts_data0; + int ring, opcode, desc_cnt; + + u32 consumer = sds_ring->consumer; + + desc = &sds_ring->desc_head[consumer]; + sts_data0 = le64_to_cpu(desc->status_desc_data[0]); + + if (!(sts_data0 & STATUS_OWNER_HOST)) + return; + + desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0); + opcode = qlcnic_get_sts_opcode(sts_data0); + switch (opcode) { + case QLCNIC_RESPONSE_DESC: + qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring); + break; + default: + ring = qlcnic_get_sts_type(sts_data0); + qlcnic_process_rcv_diag(adapter, sds_ring, ring, sts_data0); + break; + } + + for (; desc_cnt > 0; desc_cnt--) { + desc = &sds_ring->desc_head[consumer]; + desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM); + consumer = get_next_index(consumer, sds_ring->num_desc); + } + + sds_ring->consumer = consumer; + writel(consumer, sds_ring->crb_sts_consumer); +} + +void +qlcnic_fetch_mac(struct qlcnic_adapter *adapter, u32 off1, u32 off2, + u8 alt_mac, u8 *mac) +{ + u32 mac_low, mac_high; + int i; + + mac_low = QLCRD32(adapter, off1); + mac_high = QLCRD32(adapter, off2); + + if (alt_mac) { + mac_low |= (mac_low >> 16) | (mac_high << 16); + mac_high >>= 16; + } + + for (i = 0; i < 2; i++) + mac[i] = (u8)(mac_high >> ((1 - i) * 8)); + for (i = 2; i < 6; i++) + mac[i] = (u8)(mac_low >> ((5 - i) * 8)); +} diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c new file mode 100644 index 000000000000..ec8ef72d38d3 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -0,0 +1,4390 @@ +/* + * QLogic qlcnic NIC Driver + * Copyright (c) 2009-2010 QLogic Corporation + * + * See LICENSE.qlcnic for copyright and licensing details. + */ + +#include +#include +#include + +#include "qlcnic.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(QLCNIC_LINUX_VERSIONID); +MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME); + +char qlcnic_driver_name[] = "qlcnic"; +static const char qlcnic_driver_string[] = "QLogic 1/10 GbE " + "Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID; + +static struct workqueue_struct *qlcnic_wq; +static int qlcnic_mac_learn; +module_param(qlcnic_mac_learn, int, 0444); +MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)"); + +static int use_msi = 1; +module_param(use_msi, int, 0444); +MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled"); + +static int use_msi_x = 1; +module_param(use_msi_x, int, 0444); +MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled"); + +static int auto_fw_reset = 1; +module_param(auto_fw_reset, int, 0644); +MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled"); + +static int load_fw_file; +module_param(load_fw_file, int, 0444); +MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file"); + +static int qlcnic_config_npars; +module_param(qlcnic_config_npars, int, 0444); +MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled"); + +static int __devinit qlcnic_probe(struct pci_dev *pdev, + const struct pci_device_id *ent); +static void __devexit qlcnic_remove(struct pci_dev *pdev); +static int qlcnic_open(struct net_device *netdev); +static int qlcnic_close(struct net_device *netdev); +static void qlcnic_tx_timeout(struct net_device *netdev); +static void qlcnic_attach_work(struct work_struct *work); +static void qlcnic_fwinit_work(struct work_struct *work); +static void qlcnic_fw_poll_work(struct work_struct *work); +static void qlcnic_schedule_work(struct qlcnic_adapter *adapter, + work_func_t func, int delay); +static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter); +static int qlcnic_poll(struct napi_struct *napi, int budget); +static int qlcnic_rx_poll(struct napi_struct *napi, int budget); +#ifdef CONFIG_NET_POLL_CONTROLLER +static void qlcnic_poll_controller(struct net_device *netdev); +#endif + +static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter); +static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter); +static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter); +static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter); + +static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding); +static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8); +static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter); + +static irqreturn_t qlcnic_tmp_intr(int irq, void *data); +static irqreturn_t qlcnic_intr(int irq, void *data); +static irqreturn_t qlcnic_msi_intr(int irq, void *data); +static irqreturn_t qlcnic_msix_intr(int irq, void *data); + +static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev); +static void qlcnic_restore_indev_addr(struct net_device *dev, unsigned long); +static int qlcnic_start_firmware(struct qlcnic_adapter *); + +static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter); +static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *); +static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32); +static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32); +static int qlcnicvf_start_firmware(struct qlcnic_adapter *); +static void qlcnic_set_netdev_features(struct qlcnic_adapter *, + struct qlcnic_esw_func_cfg *); +static void qlcnic_vlan_rx_add(struct net_device *, u16); +static void qlcnic_vlan_rx_del(struct net_device *, u16); + +/* PCI Device ID Table */ +#define ENTRY(device) \ + {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \ + .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0} + +#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020 + +static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = { + ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X), + {0,} +}; + +MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl); + + +inline void +qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter, + struct qlcnic_host_tx_ring *tx_ring) +{ + writel(tx_ring->producer, tx_ring->crb_cmd_producer); +} + +static const u32 msi_tgt_status[8] = { + ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1, + ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3, + ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5, + ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7 +}; + +static const +struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG; + +static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring) +{ + writel(0, sds_ring->crb_intr_mask); +} + +static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring) +{ + struct qlcnic_adapter *adapter = sds_ring->adapter; + + writel(0x1, sds_ring->crb_intr_mask); + + if (!QLCNIC_IS_MSI_FAMILY(adapter)) + writel(0xfbff, adapter->tgt_mask_reg); +} + +static int +qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count) +{ + int size = sizeof(struct qlcnic_host_sds_ring) * count; + + recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL); + + return recv_ctx->sds_rings == NULL; +} + +static void +qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx) +{ + if (recv_ctx->sds_rings != NULL) + kfree(recv_ctx->sds_rings); + + recv_ctx->sds_rings = NULL; +} + +static int +qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev) +{ + int ring; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + + if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings)) + return -ENOMEM; + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + + if (ring == adapter->max_sds_rings - 1) + netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll, + QLCNIC_NETDEV_WEIGHT/adapter->max_sds_rings); + else + netif_napi_add(netdev, &sds_ring->napi, + qlcnic_rx_poll, QLCNIC_NETDEV_WEIGHT*2); + } + + return 0; +} + +static void +qlcnic_napi_del(struct qlcnic_adapter *adapter) +{ + int ring; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + netif_napi_del(&sds_ring->napi); + } + + qlcnic_free_sds_rings(adapter->recv_ctx); +} + +static void +qlcnic_napi_enable(struct qlcnic_adapter *adapter) +{ + int ring; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + + if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) + return; + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + napi_enable(&sds_ring->napi); + qlcnic_enable_int(sds_ring); + } +} + +static void +qlcnic_napi_disable(struct qlcnic_adapter *adapter) +{ + int ring; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + + if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) + return; + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + qlcnic_disable_int(sds_ring); + napi_synchronize(&sds_ring->napi); + napi_disable(&sds_ring->napi); + } +} + +static void qlcnic_clear_stats(struct qlcnic_adapter *adapter) +{ + memset(&adapter->stats, 0, sizeof(adapter->stats)); +} + +static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable) +{ + u32 control; + int pos; + + pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); + if (pos) { + pci_read_config_dword(pdev, pos, &control); + if (enable) + control |= PCI_MSIX_FLAGS_ENABLE; + else + control = 0; + pci_write_config_dword(pdev, pos, control); + } +} + +static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count) +{ + int i; + + for (i = 0; i < count; i++) + adapter->msix_entries[i].entry = i; +} + +static int +qlcnic_read_mac_addr(struct qlcnic_adapter *adapter) +{ + u8 mac_addr[ETH_ALEN]; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + + if (qlcnic_get_mac_address(adapter, mac_addr) != 0) + return -EIO; + + memcpy(netdev->dev_addr, mac_addr, ETH_ALEN); + memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); + memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len); + + /* set station address */ + + if (!is_valid_ether_addr(netdev->perm_addr)) + dev_warn(&pdev->dev, "Bad MAC address %pM.\n", + netdev->dev_addr); + + return 0; +} + +static int qlcnic_set_mac(struct net_device *netdev, void *p) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct sockaddr *addr = p; + + if ((adapter->flags & QLCNIC_MAC_OVERRIDE_DISABLED)) + return -EOPNOTSUPP; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EINVAL; + + if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { + netif_device_detach(netdev); + qlcnic_napi_disable(adapter); + } + + memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len); + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + qlcnic_set_multi(adapter->netdev); + + if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { + netif_device_attach(netdev); + qlcnic_napi_enable(adapter); + } + return 0; +} + +static const struct net_device_ops qlcnic_netdev_ops = { + .ndo_open = qlcnic_open, + .ndo_stop = qlcnic_close, + .ndo_start_xmit = qlcnic_xmit_frame, + .ndo_get_stats = qlcnic_get_stats, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_multicast_list = qlcnic_set_multi, + .ndo_set_mac_address = qlcnic_set_mac, + .ndo_change_mtu = qlcnic_change_mtu, + .ndo_fix_features = qlcnic_fix_features, + .ndo_set_features = qlcnic_set_features, + .ndo_tx_timeout = qlcnic_tx_timeout, + .ndo_vlan_rx_add_vid = qlcnic_vlan_rx_add, + .ndo_vlan_rx_kill_vid = qlcnic_vlan_rx_del, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = qlcnic_poll_controller, +#endif +}; + +static struct qlcnic_nic_template qlcnic_ops = { + .config_bridged_mode = qlcnic_config_bridged_mode, + .config_led = qlcnic_config_led, + .start_firmware = qlcnic_start_firmware +}; + +static struct qlcnic_nic_template qlcnic_vf_ops = { + .config_bridged_mode = qlcnicvf_config_bridged_mode, + .config_led = qlcnicvf_config_led, + .start_firmware = qlcnicvf_start_firmware +}; + +static int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix) +{ + struct pci_dev *pdev = adapter->pdev; + int err = -1; + + adapter->max_sds_rings = 1; + adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED); + qlcnic_set_msix_bit(pdev, 0); + + if (adapter->msix_supported) { + enable_msix: + qlcnic_init_msix_entries(adapter, num_msix); + err = pci_enable_msix(pdev, adapter->msix_entries, num_msix); + if (err == 0) { + adapter->flags |= QLCNIC_MSIX_ENABLED; + qlcnic_set_msix_bit(pdev, 1); + + adapter->max_sds_rings = num_msix; + + dev_info(&pdev->dev, "using msi-x interrupts\n"); + return err; + } + if (err > 0) { + num_msix = rounddown_pow_of_two(err); + if (num_msix) + goto enable_msix; + } + } + return err; +} + + +static void qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter) +{ + const struct qlcnic_legacy_intr_set *legacy_intrp; + struct pci_dev *pdev = adapter->pdev; + + if (use_msi && !pci_enable_msi(pdev)) { + adapter->flags |= QLCNIC_MSI_ENABLED; + adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter, + msi_tgt_status[adapter->ahw->pci_func]); + dev_info(&pdev->dev, "using msi interrupts\n"); + adapter->msix_entries[0].vector = pdev->irq; + return; + } + + legacy_intrp = &legacy_intr[adapter->ahw->pci_func]; + + adapter->int_vec_bit = legacy_intrp->int_vec_bit; + adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter, + legacy_intrp->tgt_status_reg); + adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter, + legacy_intrp->tgt_mask_reg); + adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR); + + adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter, + ISR_INT_STATE_REG); + dev_info(&pdev->dev, "using legacy interrupts\n"); + adapter->msix_entries[0].vector = pdev->irq; +} + +static void +qlcnic_setup_intr(struct qlcnic_adapter *adapter) +{ + int num_msix; + + if (adapter->msix_supported) { + num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(), + QLCNIC_DEF_NUM_STS_DESC_RINGS)); + } else + num_msix = 1; + + if (!qlcnic_enable_msix(adapter, num_msix)) + return; + + qlcnic_enable_msi_legacy(adapter); +} + +static void +qlcnic_teardown_intr(struct qlcnic_adapter *adapter) +{ + if (adapter->flags & QLCNIC_MSIX_ENABLED) + pci_disable_msix(adapter->pdev); + if (adapter->flags & QLCNIC_MSI_ENABLED) + pci_disable_msi(adapter->pdev); +} + +static void +qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter) +{ + if (adapter->ahw->pci_base0 != NULL) + iounmap(adapter->ahw->pci_base0); +} + +static int +qlcnic_init_pci_info(struct qlcnic_adapter *adapter) +{ + struct qlcnic_pci_info *pci_info; + int i, ret = 0; + u8 pfn; + + pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL); + if (!pci_info) + return -ENOMEM; + + adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) * + QLCNIC_MAX_PCI_FUNC, GFP_KERNEL); + if (!adapter->npars) { + ret = -ENOMEM; + goto err_pci_info; + } + + adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) * + QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL); + if (!adapter->eswitch) { + ret = -ENOMEM; + goto err_npars; + } + + ret = qlcnic_get_pci_info(adapter, pci_info); + if (ret) + goto err_eswitch; + + for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { + pfn = pci_info[i].id; + if (pfn > QLCNIC_MAX_PCI_FUNC) { + ret = QL_STATUS_INVALID_PARAM; + goto err_eswitch; + } + adapter->npars[pfn].active = (u8)pci_info[i].active; + adapter->npars[pfn].type = (u8)pci_info[i].type; + adapter->npars[pfn].phy_port = (u8)pci_info[i].default_port; + adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw; + adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw; + } + + for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++) + adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE; + + kfree(pci_info); + return 0; + +err_eswitch: + kfree(adapter->eswitch); + adapter->eswitch = NULL; +err_npars: + kfree(adapter->npars); + adapter->npars = NULL; +err_pci_info: + kfree(pci_info); + + return ret; +} + +static int +qlcnic_set_function_modes(struct qlcnic_adapter *adapter) +{ + u8 id; + u32 ref_count; + int i, ret = 1; + u32 data = QLCNIC_MGMT_FUNC; + void __iomem *priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE; + + /* If other drivers are not in use set their privilege level */ + ref_count = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE); + ret = qlcnic_api_lock(adapter); + if (ret) + goto err_lock; + + if (qlcnic_config_npars) { + for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { + id = i; + if (adapter->npars[i].type != QLCNIC_TYPE_NIC || + id == adapter->ahw->pci_func) + continue; + data |= (qlcnic_config_npars & + QLC_DEV_SET_DRV(0xf, id)); + } + } else { + data = readl(priv_op); + data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw->pci_func)) | + (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC, + adapter->ahw->pci_func)); + } + writel(data, priv_op); + qlcnic_api_unlock(adapter); +err_lock: + return ret; +} + +static void +qlcnic_check_vf(struct qlcnic_adapter *adapter) +{ + void __iomem *msix_base_addr; + void __iomem *priv_op; + u32 func; + u32 msix_base; + u32 op_mode, priv_level; + + /* Determine FW API version */ + adapter->fw_hal_version = readl(adapter->ahw->pci_base0 + + QLCNIC_FW_API); + + /* Find PCI function number */ + pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func); + msix_base_addr = adapter->ahw->pci_base0 + QLCNIC_MSIX_BASE; + msix_base = readl(msix_base_addr); + func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE; + adapter->ahw->pci_func = func; + + /* Determine function privilege level */ + priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE; + op_mode = readl(priv_op); + if (op_mode == QLC_DEV_DRV_DEFAULT) + priv_level = QLCNIC_MGMT_FUNC; + else + priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func); + + if (priv_level == QLCNIC_NON_PRIV_FUNC) { + adapter->op_mode = QLCNIC_NON_PRIV_FUNC; + dev_info(&adapter->pdev->dev, + "HAL Version: %d Non Privileged function\n", + adapter->fw_hal_version); + adapter->nic_ops = &qlcnic_vf_ops; + } else + adapter->nic_ops = &qlcnic_ops; +} + +static int +qlcnic_setup_pci_map(struct qlcnic_adapter *adapter) +{ + void __iomem *mem_ptr0 = NULL; + resource_size_t mem_base; + unsigned long mem_len, pci_len0 = 0; + + struct pci_dev *pdev = adapter->pdev; + + /* remap phys address */ + mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */ + mem_len = pci_resource_len(pdev, 0); + + if (mem_len == QLCNIC_PCI_2MB_SIZE) { + + mem_ptr0 = pci_ioremap_bar(pdev, 0); + if (mem_ptr0 == NULL) { + dev_err(&pdev->dev, "failed to map PCI bar 0\n"); + return -EIO; + } + pci_len0 = mem_len; + } else { + return -EIO; + } + + dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20)); + + adapter->ahw->pci_base0 = mem_ptr0; + adapter->ahw->pci_len0 = pci_len0; + + qlcnic_check_vf(adapter); + + adapter->ahw->ocm_win_crb = qlcnic_get_ioaddr(adapter, + QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG( + adapter->ahw->pci_func))); + + return 0; +} + +static void get_brd_name(struct qlcnic_adapter *adapter, char *name) +{ + struct pci_dev *pdev = adapter->pdev; + int i, found = 0; + + for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) { + if (qlcnic_boards[i].vendor == pdev->vendor && + qlcnic_boards[i].device == pdev->device && + qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor && + qlcnic_boards[i].sub_device == pdev->subsystem_device) { + sprintf(name, "%pM: %s" , + adapter->mac_addr, + qlcnic_boards[i].short_name); + found = 1; + break; + } + + } + + if (!found) + sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr); +} + +static void +qlcnic_check_options(struct qlcnic_adapter *adapter) +{ + u32 fw_major, fw_minor, fw_build, prev_fw_version; + struct pci_dev *pdev = adapter->pdev; + struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; + + prev_fw_version = adapter->fw_version; + + fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR); + fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR); + fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB); + + adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build); + + if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC) { + if (fw_dump->tmpl_hdr == NULL || + adapter->fw_version > prev_fw_version) { + if (fw_dump->tmpl_hdr) + vfree(fw_dump->tmpl_hdr); + if (!qlcnic_fw_cmd_get_minidump_temp(adapter)) + dev_info(&pdev->dev, + "Supports FW dump capability\n"); + } + } + + dev_info(&pdev->dev, "firmware v%d.%d.%d\n", + fw_major, fw_minor, fw_build); + if (adapter->ahw->port_type == QLCNIC_XGBE) { + if (adapter->flags & QLCNIC_ESWITCH_ENABLED) { + adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF; + adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF; + } else { + adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G; + adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G; + } + + adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; + adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; + + } else if (adapter->ahw->port_type == QLCNIC_GBE) { + adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G; + adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G; + adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G; + adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G; + } + + adapter->msix_supported = !!use_msi_x; + + adapter->num_txd = MAX_CMD_DESCRIPTORS; + + adapter->max_rds_rings = MAX_RDS_RINGS; +} + +static int +qlcnic_initialize_nic(struct qlcnic_adapter *adapter) +{ + int err; + struct qlcnic_info nic_info; + + err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw->pci_func); + if (err) + return err; + + adapter->physical_port = (u8)nic_info.phys_port; + adapter->switch_mode = nic_info.switch_mode; + adapter->max_tx_ques = nic_info.max_tx_ques; + adapter->max_rx_ques = nic_info.max_rx_ques; + adapter->capabilities = nic_info.capabilities; + adapter->max_mac_filters = nic_info.max_mac_filters; + adapter->max_mtu = nic_info.max_mtu; + + if (adapter->capabilities & BIT_6) + adapter->flags |= QLCNIC_ESWITCH_ENABLED; + else + adapter->flags &= ~QLCNIC_ESWITCH_ENABLED; + + return err; +} + +static void +qlcnic_set_vlan_config(struct qlcnic_adapter *adapter, + struct qlcnic_esw_func_cfg *esw_cfg) +{ + if (esw_cfg->discard_tagged) + adapter->flags &= ~QLCNIC_TAGGING_ENABLED; + else + adapter->flags |= QLCNIC_TAGGING_ENABLED; + + if (esw_cfg->vlan_id) + adapter->pvid = esw_cfg->vlan_id; + else + adapter->pvid = 0; +} + +static void +qlcnic_vlan_rx_add(struct net_device *netdev, u16 vid) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + set_bit(vid, adapter->vlans); +} + +static void +qlcnic_vlan_rx_del(struct net_device *netdev, u16 vid) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + + qlcnic_restore_indev_addr(netdev, NETDEV_DOWN); + clear_bit(vid, adapter->vlans); +} + +static void +qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter, + struct qlcnic_esw_func_cfg *esw_cfg) +{ + adapter->flags &= ~(QLCNIC_MACSPOOF | QLCNIC_MAC_OVERRIDE_DISABLED | + QLCNIC_PROMISC_DISABLED); + + if (esw_cfg->mac_anti_spoof) + adapter->flags |= QLCNIC_MACSPOOF; + + if (!esw_cfg->mac_override) + adapter->flags |= QLCNIC_MAC_OVERRIDE_DISABLED; + + if (!esw_cfg->promisc_mode) + adapter->flags |= QLCNIC_PROMISC_DISABLED; + + qlcnic_set_netdev_features(adapter, esw_cfg); +} + +static int +qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter) +{ + struct qlcnic_esw_func_cfg esw_cfg; + + if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) + return 0; + + esw_cfg.pci_func = adapter->ahw->pci_func; + if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg)) + return -EIO; + qlcnic_set_vlan_config(adapter, &esw_cfg); + qlcnic_set_eswitch_port_features(adapter, &esw_cfg); + + return 0; +} + +static void +qlcnic_set_netdev_features(struct qlcnic_adapter *adapter, + struct qlcnic_esw_func_cfg *esw_cfg) +{ + struct net_device *netdev = adapter->netdev; + unsigned long features, vlan_features; + + features = (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM | + NETIF_F_IPV6_CSUM | NETIF_F_GRO); + vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_FILTER); + + if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) { + features |= (NETIF_F_TSO | NETIF_F_TSO6); + vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6); + } + + if (netdev->features & NETIF_F_LRO) + features |= NETIF_F_LRO; + + if (esw_cfg->offload_flags & BIT_0) { + netdev->features |= features; + if (!(esw_cfg->offload_flags & BIT_1)) + netdev->features &= ~NETIF_F_TSO; + if (!(esw_cfg->offload_flags & BIT_2)) + netdev->features &= ~NETIF_F_TSO6; + } else { + netdev->features &= ~features; + } + + netdev->vlan_features = (features & vlan_features); +} + +static int +qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter) +{ + void __iomem *priv_op; + u32 op_mode, priv_level; + int err = 0; + + err = qlcnic_initialize_nic(adapter); + if (err) + return err; + + if (adapter->flags & QLCNIC_ADAPTER_INITIALIZED) + return 0; + + priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE; + op_mode = readl(priv_op); + priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func); + + if (op_mode == QLC_DEV_DRV_DEFAULT) + priv_level = QLCNIC_MGMT_FUNC; + else + priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func); + + if (adapter->flags & QLCNIC_ESWITCH_ENABLED) { + if (priv_level == QLCNIC_MGMT_FUNC) { + adapter->op_mode = QLCNIC_MGMT_FUNC; + err = qlcnic_init_pci_info(adapter); + if (err) + return err; + /* Set privilege level for other functions */ + qlcnic_set_function_modes(adapter); + dev_info(&adapter->pdev->dev, + "HAL Version: %d, Management function\n", + adapter->fw_hal_version); + } else if (priv_level == QLCNIC_PRIV_FUNC) { + adapter->op_mode = QLCNIC_PRIV_FUNC; + dev_info(&adapter->pdev->dev, + "HAL Version: %d, Privileged function\n", + adapter->fw_hal_version); + } + } + + adapter->flags |= QLCNIC_ADAPTER_INITIALIZED; + + return err; +} + +static int +qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter) +{ + struct qlcnic_esw_func_cfg esw_cfg; + struct qlcnic_npar_info *npar; + u8 i; + + if (adapter->need_fw_reset) + return 0; + + for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { + if (adapter->npars[i].type != QLCNIC_TYPE_NIC) + continue; + memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg)); + esw_cfg.pci_func = i; + esw_cfg.offload_flags = BIT_0; + esw_cfg.mac_override = BIT_0; + esw_cfg.promisc_mode = BIT_0; + if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) + esw_cfg.offload_flags |= (BIT_1 | BIT_2); + if (qlcnic_config_switch_port(adapter, &esw_cfg)) + return -EIO; + npar = &adapter->npars[i]; + npar->pvid = esw_cfg.vlan_id; + npar->mac_override = esw_cfg.mac_override; + npar->mac_anti_spoof = esw_cfg.mac_anti_spoof; + npar->discard_tagged = esw_cfg.discard_tagged; + npar->promisc_mode = esw_cfg.promisc_mode; + npar->offload_flags = esw_cfg.offload_flags; + } + + return 0; +} + +static int +qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter, + struct qlcnic_npar_info *npar, int pci_func) +{ + struct qlcnic_esw_func_cfg esw_cfg; + esw_cfg.op_mode = QLCNIC_PORT_DEFAULTS; + esw_cfg.pci_func = pci_func; + esw_cfg.vlan_id = npar->pvid; + esw_cfg.mac_override = npar->mac_override; + esw_cfg.discard_tagged = npar->discard_tagged; + esw_cfg.mac_anti_spoof = npar->mac_anti_spoof; + esw_cfg.offload_flags = npar->offload_flags; + esw_cfg.promisc_mode = npar->promisc_mode; + if (qlcnic_config_switch_port(adapter, &esw_cfg)) + return -EIO; + + esw_cfg.op_mode = QLCNIC_ADD_VLAN; + if (qlcnic_config_switch_port(adapter, &esw_cfg)) + return -EIO; + + return 0; +} + +static int +qlcnic_reset_npar_config(struct qlcnic_adapter *adapter) +{ + int i, err; + struct qlcnic_npar_info *npar; + struct qlcnic_info nic_info; + + if (!adapter->need_fw_reset) + return 0; + + /* Set the NPAR config data after FW reset */ + for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { + npar = &adapter->npars[i]; + if (npar->type != QLCNIC_TYPE_NIC) + continue; + err = qlcnic_get_nic_info(adapter, &nic_info, i); + if (err) + return err; + nic_info.min_tx_bw = npar->min_bw; + nic_info.max_tx_bw = npar->max_bw; + err = qlcnic_set_nic_info(adapter, &nic_info); + if (err) + return err; + + if (npar->enable_pm) { + err = qlcnic_config_port_mirroring(adapter, + npar->dest_npar, 1, i); + if (err) + return err; + } + err = qlcnic_reset_eswitch_config(adapter, npar, i); + if (err) + return err; + } + return 0; +} + +static int qlcnic_check_npar_opertional(struct qlcnic_adapter *adapter) +{ + u8 npar_opt_timeo = QLCNIC_DEV_NPAR_OPER_TIMEO; + u32 npar_state; + + if (adapter->op_mode == QLCNIC_MGMT_FUNC) + return 0; + + npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE); + while (npar_state != QLCNIC_DEV_NPAR_OPER && --npar_opt_timeo) { + msleep(1000); + npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE); + } + if (!npar_opt_timeo) { + dev_err(&adapter->pdev->dev, + "Waiting for NPAR state to opertional timeout\n"); + return -EIO; + } + return 0; +} + +static int +qlcnic_set_mgmt_operations(struct qlcnic_adapter *adapter) +{ + int err; + + if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) || + adapter->op_mode != QLCNIC_MGMT_FUNC) + return 0; + + err = qlcnic_set_default_offload_settings(adapter); + if (err) + return err; + + err = qlcnic_reset_npar_config(adapter); + if (err) + return err; + + qlcnic_dev_set_npar_ready(adapter); + + return err; +} + +static int +qlcnic_start_firmware(struct qlcnic_adapter *adapter) +{ + int err; + + err = qlcnic_can_start_firmware(adapter); + if (err < 0) + return err; + else if (!err) + goto check_fw_status; + + if (load_fw_file) + qlcnic_request_firmware(adapter); + else { + err = qlcnic_check_flash_fw_ver(adapter); + if (err) + goto err_out; + + adapter->fw_type = QLCNIC_FLASH_ROMIMAGE; + } + + err = qlcnic_need_fw_reset(adapter); + if (err == 0) + goto check_fw_status; + + err = qlcnic_pinit_from_rom(adapter); + if (err) + goto err_out; + + err = qlcnic_load_firmware(adapter); + if (err) + goto err_out; + + qlcnic_release_firmware(adapter); + QLCWR32(adapter, CRB_DRIVER_VERSION, QLCNIC_DRIVER_VERSION); + +check_fw_status: + err = qlcnic_check_fw_status(adapter); + if (err) + goto err_out; + + QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY); + qlcnic_idc_debug_info(adapter, 1); + + err = qlcnic_check_eswitch_mode(adapter); + if (err) { + dev_err(&adapter->pdev->dev, + "Memory allocation failed for eswitch\n"); + goto err_out; + } + err = qlcnic_set_mgmt_operations(adapter); + if (err) + goto err_out; + + qlcnic_check_options(adapter); + adapter->need_fw_reset = 0; + + qlcnic_release_firmware(adapter); + return 0; + +err_out: + QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED); + dev_err(&adapter->pdev->dev, "Device state set to failed\n"); + + qlcnic_release_firmware(adapter); + return err; +} + +static int +qlcnic_request_irq(struct qlcnic_adapter *adapter) +{ + irq_handler_t handler; + struct qlcnic_host_sds_ring *sds_ring; + int err, ring; + + unsigned long flags = 0; + struct net_device *netdev = adapter->netdev; + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + + if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) { + handler = qlcnic_tmp_intr; + if (!QLCNIC_IS_MSI_FAMILY(adapter)) + flags |= IRQF_SHARED; + + } else { + if (adapter->flags & QLCNIC_MSIX_ENABLED) + handler = qlcnic_msix_intr; + else if (adapter->flags & QLCNIC_MSI_ENABLED) + handler = qlcnic_msi_intr; + else { + flags |= IRQF_SHARED; + handler = qlcnic_intr; + } + } + adapter->irq = netdev->irq; + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + sprintf(sds_ring->name, "%s[%d]", netdev->name, ring); + err = request_irq(sds_ring->irq, handler, + flags, sds_ring->name, sds_ring); + if (err) + return err; + } + + return 0; +} + +static void +qlcnic_free_irq(struct qlcnic_adapter *adapter) +{ + int ring; + struct qlcnic_host_sds_ring *sds_ring; + + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + free_irq(sds_ring->irq, sds_ring); + } +} + +static int +__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev) +{ + int ring; + struct qlcnic_host_rds_ring *rds_ring; + + if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) + return -EIO; + + if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) + return 0; + if (qlcnic_set_eswitch_port_config(adapter)) + return -EIO; + + if (qlcnic_fw_create_ctx(adapter)) + return -EIO; + + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &adapter->recv_ctx->rds_rings[ring]; + qlcnic_post_rx_buffers(adapter, rds_ring); + } + + qlcnic_set_multi(netdev); + qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu); + + adapter->ahw->linkup = 0; + + if (adapter->max_sds_rings > 1) + qlcnic_config_rss(adapter, 1); + + qlcnic_config_intr_coalesce(adapter); + + if (netdev->features & NETIF_F_LRO) + qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED); + + qlcnic_napi_enable(adapter); + + qlcnic_linkevent_request(adapter, 1); + + adapter->reset_context = 0; + set_bit(__QLCNIC_DEV_UP, &adapter->state); + return 0; +} + +/* Usage: During resume and firmware recovery module.*/ + +static int +qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev) +{ + int err = 0; + + rtnl_lock(); + if (netif_running(netdev)) + err = __qlcnic_up(adapter, netdev); + rtnl_unlock(); + + return err; +} + +static void +__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev) +{ + if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) + return; + + if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state)) + return; + + smp_mb(); + spin_lock(&adapter->tx_clean_lock); + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + qlcnic_free_mac_list(adapter); + + if (adapter->fhash.fnum) + qlcnic_delete_lb_filters(adapter); + + qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE); + + qlcnic_napi_disable(adapter); + + qlcnic_fw_destroy_ctx(adapter); + + qlcnic_reset_rx_buffers_list(adapter); + qlcnic_release_tx_buffers(adapter); + spin_unlock(&adapter->tx_clean_lock); +} + +/* Usage: During suspend and firmware recovery module */ + +static void +qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev) +{ + rtnl_lock(); + if (netif_running(netdev)) + __qlcnic_down(adapter, netdev); + rtnl_unlock(); + +} + +static int +qlcnic_attach(struct qlcnic_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + int err; + + if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) + return 0; + + err = qlcnic_napi_add(adapter, netdev); + if (err) + return err; + + err = qlcnic_alloc_sw_resources(adapter); + if (err) { + dev_err(&pdev->dev, "Error in setting sw resources\n"); + goto err_out_napi_del; + } + + err = qlcnic_alloc_hw_resources(adapter); + if (err) { + dev_err(&pdev->dev, "Error in setting hw resources\n"); + goto err_out_free_sw; + } + + err = qlcnic_request_irq(adapter); + if (err) { + dev_err(&pdev->dev, "failed to setup interrupt\n"); + goto err_out_free_hw; + } + + qlcnic_create_sysfs_entries(adapter); + + adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC; + return 0; + +err_out_free_hw: + qlcnic_free_hw_resources(adapter); +err_out_free_sw: + qlcnic_free_sw_resources(adapter); +err_out_napi_del: + qlcnic_napi_del(adapter); + return err; +} + +static void +qlcnic_detach(struct qlcnic_adapter *adapter) +{ + if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) + return; + + qlcnic_remove_sysfs_entries(adapter); + + qlcnic_free_hw_resources(adapter); + qlcnic_release_rx_buffers(adapter); + qlcnic_free_irq(adapter); + qlcnic_napi_del(adapter); + qlcnic_free_sw_resources(adapter); + + adapter->is_up = 0; +} + +void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_host_sds_ring *sds_ring; + int ring; + + clear_bit(__QLCNIC_DEV_UP, &adapter->state); + if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) { + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &adapter->recv_ctx->sds_rings[ring]; + qlcnic_disable_int(sds_ring); + } + } + + qlcnic_fw_destroy_ctx(adapter); + + qlcnic_detach(adapter); + + adapter->diag_test = 0; + adapter->max_sds_rings = max_sds_rings; + + if (qlcnic_attach(adapter)) + goto out; + + if (netif_running(netdev)) + __qlcnic_up(adapter, netdev); +out: + netif_device_attach(netdev); +} + +static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter) +{ + int err = 0; + adapter->ahw = kzalloc(sizeof(struct qlcnic_hardware_context), + GFP_KERNEL); + if (!adapter->ahw) { + dev_err(&adapter->pdev->dev, + "Failed to allocate recv ctx resources for adapter\n"); + err = -ENOMEM; + goto err_out; + } + adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context), + GFP_KERNEL); + if (!adapter->recv_ctx) { + dev_err(&adapter->pdev->dev, + "Failed to allocate recv ctx resources for adapter\n"); + kfree(adapter->ahw); + adapter->ahw = NULL; + err = -ENOMEM; + goto err_out; + } + /* Initialize interrupt coalesce parameters */ + adapter->ahw->coal.flag = QLCNIC_INTR_DEFAULT; + adapter->ahw->coal.rx_time_us = QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US; + adapter->ahw->coal.rx_packets = QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS; +err_out: + return err; +} + +static void qlcnic_free_adapter_resources(struct qlcnic_adapter *adapter) +{ + kfree(adapter->recv_ctx); + adapter->recv_ctx = NULL; + + if (adapter->ahw->fw_dump.tmpl_hdr) { + vfree(adapter->ahw->fw_dump.tmpl_hdr); + adapter->ahw->fw_dump.tmpl_hdr = NULL; + } + kfree(adapter->ahw); + adapter->ahw = NULL; +} + +int qlcnic_diag_alloc_res(struct net_device *netdev, int test) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_host_rds_ring *rds_ring; + int ring; + int ret; + + netif_device_detach(netdev); + + if (netif_running(netdev)) + __qlcnic_down(adapter, netdev); + + qlcnic_detach(adapter); + + adapter->max_sds_rings = 1; + adapter->diag_test = test; + + ret = qlcnic_attach(adapter); + if (ret) { + netif_device_attach(netdev); + return ret; + } + + ret = qlcnic_fw_create_ctx(adapter); + if (ret) { + qlcnic_detach(adapter); + netif_device_attach(netdev); + return ret; + } + + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &adapter->recv_ctx->rds_rings[ring]; + qlcnic_post_rx_buffers(adapter, rds_ring); + } + + if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) { + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &adapter->recv_ctx->sds_rings[ring]; + qlcnic_enable_int(sds_ring); + } + } + + if (adapter->diag_test == QLCNIC_LOOPBACK_TEST) { + adapter->ahw->loopback_state = 0; + qlcnic_linkevent_request(adapter, 1); + } + + set_bit(__QLCNIC_DEV_UP, &adapter->state); + + return 0; +} + +/* Reset context in hardware only */ +static int +qlcnic_reset_hw_context(struct qlcnic_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) + return -EBUSY; + + netif_device_detach(netdev); + + qlcnic_down(adapter, netdev); + + qlcnic_up(adapter, netdev); + + netif_device_attach(netdev); + + clear_bit(__QLCNIC_RESETTING, &adapter->state); + return 0; +} + +int +qlcnic_reset_context(struct qlcnic_adapter *adapter) +{ + int err = 0; + struct net_device *netdev = adapter->netdev; + + if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) + return -EBUSY; + + if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) { + + netif_device_detach(netdev); + + if (netif_running(netdev)) + __qlcnic_down(adapter, netdev); + + qlcnic_detach(adapter); + + if (netif_running(netdev)) { + err = qlcnic_attach(adapter); + if (!err) + __qlcnic_up(adapter, netdev); + } + + netif_device_attach(netdev); + } + + clear_bit(__QLCNIC_RESETTING, &adapter->state); + return err; +} + +static int +qlcnic_setup_netdev(struct qlcnic_adapter *adapter, + struct net_device *netdev, u8 pci_using_dac) +{ + int err; + struct pci_dev *pdev = adapter->pdev; + + adapter->mc_enabled = 0; + adapter->max_mc_count = 38; + + netdev->netdev_ops = &qlcnic_netdev_ops; + netdev->watchdog_timeo = 5*HZ; + + qlcnic_change_mtu(netdev, netdev->mtu); + + SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops); + + netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; + + if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) + netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; + if (pci_using_dac) + netdev->hw_features |= NETIF_F_HIGHDMA; + + netdev->vlan_features = netdev->hw_features; + + if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX) + netdev->hw_features |= NETIF_F_HW_VLAN_TX; + if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO) + netdev->hw_features |= NETIF_F_LRO; + + netdev->features |= netdev->hw_features | + NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; + + netdev->irq = adapter->msix_entries[0].vector; + + err = register_netdev(netdev); + if (err) { + dev_err(&pdev->dev, "failed to register net device\n"); + return err; + } + + return 0; +} + +static int qlcnic_set_dma_mask(struct pci_dev *pdev, u8 *pci_using_dac) +{ + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && + !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) + *pci_using_dac = 1; + else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) && + !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) + *pci_using_dac = 0; + else { + dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n"); + return -EIO; + } + + return 0; +} + +static int +qlcnic_alloc_msix_entries(struct qlcnic_adapter *adapter, u16 count) +{ + adapter->msix_entries = kcalloc(count, sizeof(struct msix_entry), + GFP_KERNEL); + + if (adapter->msix_entries) + return 0; + + dev_err(&adapter->pdev->dev, "failed allocating msix_entries\n"); + return -ENOMEM; +} + +static int __devinit +qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev = NULL; + struct qlcnic_adapter *adapter = NULL; + int err; + uint8_t revision_id; + uint8_t pci_using_dac; + char brd_name[QLCNIC_MAX_BOARD_NAME_LEN]; + + err = pci_enable_device(pdev); + if (err) + return err; + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + err = -ENODEV; + goto err_out_disable_pdev; + } + + err = qlcnic_set_dma_mask(pdev, &pci_using_dac); + if (err) + goto err_out_disable_pdev; + + err = pci_request_regions(pdev, qlcnic_driver_name); + if (err) + goto err_out_disable_pdev; + + pci_set_master(pdev); + pci_enable_pcie_error_reporting(pdev); + + netdev = alloc_etherdev(sizeof(struct qlcnic_adapter)); + if (!netdev) { + dev_err(&pdev->dev, "failed to allocate net_device\n"); + err = -ENOMEM; + goto err_out_free_res; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + + if (qlcnic_alloc_adapter_resources(adapter)) + goto err_out_free_netdev; + + adapter->dev_rst_time = jiffies; + revision_id = pdev->revision; + adapter->ahw->revision_id = revision_id; + adapter->mac_learn = qlcnic_mac_learn; + + rwlock_init(&adapter->ahw->crb_lock); + mutex_init(&adapter->ahw->mem_lock); + + spin_lock_init(&adapter->tx_clean_lock); + INIT_LIST_HEAD(&adapter->mac_list); + + err = qlcnic_setup_pci_map(adapter); + if (err) + goto err_out_free_hw; + + /* This will be reset for mezz cards */ + adapter->portnum = adapter->ahw->pci_func; + + err = qlcnic_get_board_info(adapter); + if (err) { + dev_err(&pdev->dev, "Error getting board config info.\n"); + goto err_out_iounmap; + } + + err = qlcnic_setup_idc_param(adapter); + if (err) + goto err_out_iounmap; + + adapter->flags |= QLCNIC_NEED_FLR; + + err = adapter->nic_ops->start_firmware(adapter); + if (err) { + dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n"); + goto err_out_decr_ref; + } + + if (qlcnic_read_mac_addr(adapter)) + dev_warn(&pdev->dev, "failed to read mac addr\n"); + + if (adapter->portnum == 0) { + get_brd_name(adapter, brd_name); + + pr_info("%s: %s Board Chip rev 0x%x\n", + module_name(THIS_MODULE), + brd_name, adapter->ahw->revision_id); + } + + qlcnic_clear_stats(adapter); + + err = qlcnic_alloc_msix_entries(adapter, adapter->max_rx_ques); + if (err) + goto err_out_decr_ref; + + qlcnic_setup_intr(adapter); + + err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac); + if (err) + goto err_out_disable_msi; + + pci_set_drvdata(pdev, adapter); + + qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY); + + switch (adapter->ahw->port_type) { + case QLCNIC_GBE: + dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n", + adapter->netdev->name); + break; + case QLCNIC_XGBE: + dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n", + adapter->netdev->name); + break; + } + + if (adapter->mac_learn) + qlcnic_alloc_lb_filters_mem(adapter); + + qlcnic_create_diag_entries(adapter); + + return 0; + +err_out_disable_msi: + qlcnic_teardown_intr(adapter); + kfree(adapter->msix_entries); + +err_out_decr_ref: + qlcnic_clr_all_drv_state(adapter, 0); + +err_out_iounmap: + qlcnic_cleanup_pci_map(adapter); + +err_out_free_hw: + qlcnic_free_adapter_resources(adapter); + +err_out_free_netdev: + free_netdev(netdev); + +err_out_free_res: + pci_release_regions(pdev); + +err_out_disable_pdev: + pci_set_drvdata(pdev, NULL); + pci_disable_device(pdev); + return err; +} + +static void __devexit qlcnic_remove(struct pci_dev *pdev) +{ + struct qlcnic_adapter *adapter; + struct net_device *netdev; + + adapter = pci_get_drvdata(pdev); + if (adapter == NULL) + return; + + netdev = adapter->netdev; + + qlcnic_cancel_fw_work(adapter); + + unregister_netdev(netdev); + + qlcnic_detach(adapter); + + if (adapter->npars != NULL) + kfree(adapter->npars); + if (adapter->eswitch != NULL) + kfree(adapter->eswitch); + + qlcnic_clr_all_drv_state(adapter, 0); + + clear_bit(__QLCNIC_RESETTING, &adapter->state); + + qlcnic_free_lb_filters_mem(adapter); + + qlcnic_teardown_intr(adapter); + kfree(adapter->msix_entries); + + qlcnic_remove_diag_entries(adapter); + + qlcnic_cleanup_pci_map(adapter); + + qlcnic_release_firmware(adapter); + + pci_disable_pcie_error_reporting(pdev); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + + qlcnic_free_adapter_resources(adapter); + free_netdev(netdev); +} +static int __qlcnic_shutdown(struct pci_dev *pdev) +{ + struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + int retval; + + netif_device_detach(netdev); + + qlcnic_cancel_fw_work(adapter); + + if (netif_running(netdev)) + qlcnic_down(adapter, netdev); + + qlcnic_clr_all_drv_state(adapter, 0); + + clear_bit(__QLCNIC_RESETTING, &adapter->state); + + retval = pci_save_state(pdev); + if (retval) + return retval; + + if (qlcnic_wol_supported(adapter)) { + pci_enable_wake(pdev, PCI_D3cold, 1); + pci_enable_wake(pdev, PCI_D3hot, 1); + } + + return 0; +} + +static void qlcnic_shutdown(struct pci_dev *pdev) +{ + if (__qlcnic_shutdown(pdev)) + return; + + pci_disable_device(pdev); +} + +#ifdef CONFIG_PM +static int +qlcnic_suspend(struct pci_dev *pdev, pm_message_t state) +{ + int retval; + + retval = __qlcnic_shutdown(pdev); + if (retval) + return retval; + + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + return 0; +} + +static int +qlcnic_resume(struct pci_dev *pdev) +{ + struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + int err; + + err = pci_enable_device(pdev); + if (err) + return err; + + pci_set_power_state(pdev, PCI_D0); + pci_set_master(pdev); + pci_restore_state(pdev); + + err = adapter->nic_ops->start_firmware(adapter); + if (err) { + dev_err(&pdev->dev, "failed to start firmware\n"); + return err; + } + + if (netif_running(netdev)) { + err = qlcnic_up(adapter, netdev); + if (err) + goto done; + + qlcnic_restore_indev_addr(netdev, NETDEV_UP); + } +done: + netif_device_attach(netdev); + qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY); + return 0; +} +#endif + +static int qlcnic_open(struct net_device *netdev) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + int err; + + netif_carrier_off(netdev); + + err = qlcnic_attach(adapter); + if (err) + return err; + + err = __qlcnic_up(adapter, netdev); + if (err) + goto err_out; + + netif_start_queue(netdev); + + return 0; + +err_out: + qlcnic_detach(adapter); + return err; +} + +/* + * qlcnic_close - Disables a network interface entry point + */ +static int qlcnic_close(struct net_device *netdev) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + + __qlcnic_down(adapter, netdev); + return 0; +} + +void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter) +{ + void *head; + int i; + + if (adapter->fhash.fmax && adapter->fhash.fhead) + return; + + spin_lock_init(&adapter->mac_learn_lock); + + head = kcalloc(QLCNIC_LB_MAX_FILTERS, sizeof(struct hlist_head), + GFP_KERNEL); + if (!head) + return; + + adapter->fhash.fmax = QLCNIC_LB_MAX_FILTERS; + adapter->fhash.fhead = head; + + for (i = 0; i < adapter->fhash.fmax; i++) + INIT_HLIST_HEAD(&adapter->fhash.fhead[i]); +} + +static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter) +{ + if (adapter->fhash.fmax && adapter->fhash.fhead) + kfree(adapter->fhash.fhead); + + adapter->fhash.fhead = NULL; + adapter->fhash.fmax = 0; +} + +static void qlcnic_change_filter(struct qlcnic_adapter *adapter, + u64 uaddr, __le16 vlan_id, struct qlcnic_host_tx_ring *tx_ring) +{ + struct cmd_desc_type0 *hwdesc; + struct qlcnic_nic_req *req; + struct qlcnic_mac_req *mac_req; + struct qlcnic_vlan_req *vlan_req; + u32 producer; + u64 word; + + producer = tx_ring->producer; + hwdesc = &tx_ring->desc_head[tx_ring->producer]; + + req = (struct qlcnic_nic_req *)hwdesc; + memset(req, 0, sizeof(struct qlcnic_nic_req)); + req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23); + + word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16); + req->req_hdr = cpu_to_le64(word); + + mac_req = (struct qlcnic_mac_req *)&(req->words[0]); + mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD; + memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN); + + vlan_req = (struct qlcnic_vlan_req *)&req->words[1]; + vlan_req->vlan_id = vlan_id; + + tx_ring->producer = get_next_index(producer, tx_ring->num_desc); + smp_mb(); +} + +#define QLCNIC_MAC_HASH(MAC)\ + ((((MAC) & 0x70000) >> 0x10) | (((MAC) & 0x70000000000ULL) >> 0x25)) + +static void +qlcnic_send_filter(struct qlcnic_adapter *adapter, + struct qlcnic_host_tx_ring *tx_ring, + struct cmd_desc_type0 *first_desc, + struct sk_buff *skb) +{ + struct ethhdr *phdr = (struct ethhdr *)(skb->data); + struct qlcnic_filter *fil, *tmp_fil; + struct hlist_node *tmp_hnode, *n; + struct hlist_head *head; + u64 src_addr = 0; + __le16 vlan_id = 0; + u8 hindex; + + if (!compare_ether_addr(phdr->h_source, adapter->mac_addr)) + return; + + if (adapter->fhash.fnum >= adapter->fhash.fmax) + return; + + /* Only NPAR capable devices support vlan based learning*/ + if (adapter->flags & QLCNIC_ESWITCH_ENABLED) + vlan_id = first_desc->vlan_TCI; + memcpy(&src_addr, phdr->h_source, ETH_ALEN); + hindex = QLCNIC_MAC_HASH(src_addr) & (QLCNIC_LB_MAX_FILTERS - 1); + head = &(adapter->fhash.fhead[hindex]); + + hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) { + if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) && + tmp_fil->vlan_id == vlan_id) { + + if (jiffies > + (QLCNIC_READD_AGE * HZ + tmp_fil->ftime)) + qlcnic_change_filter(adapter, src_addr, vlan_id, + tx_ring); + tmp_fil->ftime = jiffies; + return; + } + } + + fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC); + if (!fil) + return; + + qlcnic_change_filter(adapter, src_addr, vlan_id, tx_ring); + + fil->ftime = jiffies; + fil->vlan_id = vlan_id; + memcpy(fil->faddr, &src_addr, ETH_ALEN); + spin_lock(&adapter->mac_learn_lock); + hlist_add_head(&(fil->fnode), head); + adapter->fhash.fnum++; + spin_unlock(&adapter->mac_learn_lock); +} + +static int +qlcnic_tx_pkt(struct qlcnic_adapter *adapter, + struct cmd_desc_type0 *first_desc, + struct sk_buff *skb) +{ + u8 opcode = 0, hdr_len = 0; + u16 flags = 0, vlan_tci = 0; + int copied, offset, copy_len; + struct cmd_desc_type0 *hwdesc; + struct vlan_ethhdr *vh; + struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; + u16 protocol = ntohs(skb->protocol); + u32 producer = tx_ring->producer; + + if (protocol == ETH_P_8021Q) { + vh = (struct vlan_ethhdr *)skb->data; + flags = FLAGS_VLAN_TAGGED; + vlan_tci = vh->h_vlan_TCI; + } else if (vlan_tx_tag_present(skb)) { + flags = FLAGS_VLAN_OOB; + vlan_tci = vlan_tx_tag_get(skb); + } + if (unlikely(adapter->pvid)) { + if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED)) + return -EIO; + if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED)) + goto set_flags; + + flags = FLAGS_VLAN_OOB; + vlan_tci = adapter->pvid; + } +set_flags: + qlcnic_set_tx_vlan_tci(first_desc, vlan_tci); + qlcnic_set_tx_flags_opcode(first_desc, flags, opcode); + + if (*(skb->data) & BIT_0) { + flags |= BIT_0; + memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN); + } + opcode = TX_ETHER_PKT; + if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) && + skb_shinfo(skb)->gso_size > 0) { + + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + + first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); + first_desc->total_hdr_length = hdr_len; + + opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO; + + /* For LSO, we need to copy the MAC/IP/TCP headers into + * the descriptor ring */ + copied = 0; + offset = 2; + + if (flags & FLAGS_VLAN_OOB) { + first_desc->total_hdr_length += VLAN_HLEN; + first_desc->tcp_hdr_offset = VLAN_HLEN; + first_desc->ip_hdr_offset = VLAN_HLEN; + /* Only in case of TSO on vlan device */ + flags |= FLAGS_VLAN_TAGGED; + + /* Create a TSO vlan header template for firmware */ + + hwdesc = &tx_ring->desc_head[producer]; + tx_ring->cmd_buf_arr[producer].skb = NULL; + + copy_len = min((int)sizeof(struct cmd_desc_type0) - + offset, hdr_len + VLAN_HLEN); + + vh = (struct vlan_ethhdr *)((char *) hwdesc + 2); + skb_copy_from_linear_data(skb, vh, 12); + vh->h_vlan_proto = htons(ETH_P_8021Q); + vh->h_vlan_TCI = htons(vlan_tci); + + skb_copy_from_linear_data_offset(skb, 12, + (char *)vh + 16, copy_len - 16); + + copied = copy_len - VLAN_HLEN; + offset = 0; + + producer = get_next_index(producer, tx_ring->num_desc); + } + + while (copied < hdr_len) { + + copy_len = min((int)sizeof(struct cmd_desc_type0) - + offset, (hdr_len - copied)); + + hwdesc = &tx_ring->desc_head[producer]; + tx_ring->cmd_buf_arr[producer].skb = NULL; + + skb_copy_from_linear_data_offset(skb, copied, + (char *) hwdesc + offset, copy_len); + + copied += copy_len; + offset = 0; + + producer = get_next_index(producer, tx_ring->num_desc); + } + + tx_ring->producer = producer; + smp_mb(); + adapter->stats.lso_frames++; + + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + u8 l4proto; + + if (protocol == ETH_P_IP) { + l4proto = ip_hdr(skb)->protocol; + + if (l4proto == IPPROTO_TCP) + opcode = TX_TCP_PKT; + else if (l4proto == IPPROTO_UDP) + opcode = TX_UDP_PKT; + } else if (protocol == ETH_P_IPV6) { + l4proto = ipv6_hdr(skb)->nexthdr; + + if (l4proto == IPPROTO_TCP) + opcode = TX_TCPV6_PKT; + else if (l4proto == IPPROTO_UDP) + opcode = TX_UDPV6_PKT; + } + } + first_desc->tcp_hdr_offset += skb_transport_offset(skb); + first_desc->ip_hdr_offset += skb_network_offset(skb); + qlcnic_set_tx_flags_opcode(first_desc, flags, opcode); + + return 0; +} + +static int +qlcnic_map_tx_skb(struct pci_dev *pdev, + struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf) +{ + struct qlcnic_skb_frag *nf; + struct skb_frag_struct *frag; + int i, nr_frags; + dma_addr_t map; + + nr_frags = skb_shinfo(skb)->nr_frags; + nf = &pbuf->frag_array[0]; + + map = pci_map_single(pdev, skb->data, + skb_headlen(skb), PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(pdev, map)) + goto out_err; + + nf->dma = map; + nf->length = skb_headlen(skb); + + for (i = 0; i < nr_frags; i++) { + frag = &skb_shinfo(skb)->frags[i]; + nf = &pbuf->frag_array[i+1]; + + map = pci_map_page(pdev, frag->page, frag->page_offset, + frag->size, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(pdev, map)) + goto unwind; + + nf->dma = map; + nf->length = frag->size; + } + + return 0; + +unwind: + while (--i >= 0) { + nf = &pbuf->frag_array[i+1]; + pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE); + } + + nf = &pbuf->frag_array[0]; + pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE); + +out_err: + return -ENOMEM; +} + +static void +qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb, + struct qlcnic_cmd_buffer *pbuf) +{ + struct qlcnic_skb_frag *nf = &pbuf->frag_array[0]; + int nr_frags = skb_shinfo(skb)->nr_frags; + int i; + + for (i = 0; i < nr_frags; i++) { + nf = &pbuf->frag_array[i+1]; + pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE); + } + + nf = &pbuf->frag_array[0]; + pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE); + pbuf->skb = NULL; +} + +static inline void +qlcnic_clear_cmddesc(u64 *desc) +{ + desc[0] = 0ULL; + desc[2] = 0ULL; + desc[7] = 0ULL; +} + +netdev_tx_t +qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; + struct qlcnic_cmd_buffer *pbuf; + struct qlcnic_skb_frag *buffrag; + struct cmd_desc_type0 *hwdesc, *first_desc; + struct pci_dev *pdev; + struct ethhdr *phdr; + int delta = 0; + int i, k; + + u32 producer; + int frag_count; + u32 num_txd = tx_ring->num_desc; + + if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) { + netif_stop_queue(netdev); + return NETDEV_TX_BUSY; + } + + if (adapter->flags & QLCNIC_MACSPOOF) { + phdr = (struct ethhdr *)skb->data; + if (compare_ether_addr(phdr->h_source, + adapter->mac_addr)) + goto drop_packet; + } + + frag_count = skb_shinfo(skb)->nr_frags + 1; + /* 14 frags supported for normal packet and + * 32 frags supported for TSO packet + */ + if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) { + + for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++) + delta += skb_shinfo(skb)->frags[i].size; + + if (!__pskb_pull_tail(skb, delta)) + goto drop_packet; + + frag_count = 1 + skb_shinfo(skb)->nr_frags; + } + + if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) { + netif_stop_queue(netdev); + if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) + netif_start_queue(netdev); + else { + adapter->stats.xmit_off++; + return NETDEV_TX_BUSY; + } + } + + producer = tx_ring->producer; + pbuf = &tx_ring->cmd_buf_arr[producer]; + + pdev = adapter->pdev; + + first_desc = hwdesc = &tx_ring->desc_head[producer]; + qlcnic_clear_cmddesc((u64 *)hwdesc); + + if (qlcnic_map_tx_skb(pdev, skb, pbuf)) { + adapter->stats.tx_dma_map_error++; + goto drop_packet; + } + + pbuf->skb = skb; + pbuf->frag_count = frag_count; + + qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len); + qlcnic_set_tx_port(first_desc, adapter->portnum); + + for (i = 0; i < frag_count; i++) { + + k = i % 4; + + if ((k == 0) && (i > 0)) { + /* move to next desc.*/ + producer = get_next_index(producer, num_txd); + hwdesc = &tx_ring->desc_head[producer]; + qlcnic_clear_cmddesc((u64 *)hwdesc); + tx_ring->cmd_buf_arr[producer].skb = NULL; + } + + buffrag = &pbuf->frag_array[i]; + + hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length); + switch (k) { + case 0: + hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma); + break; + case 1: + hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma); + break; + case 2: + hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma); + break; + case 3: + hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma); + break; + } + } + + tx_ring->producer = get_next_index(producer, num_txd); + smp_mb(); + + if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb))) + goto unwind_buff; + + if (adapter->mac_learn) + qlcnic_send_filter(adapter, tx_ring, first_desc, skb); + + adapter->stats.txbytes += skb->len; + adapter->stats.xmitcalled++; + + qlcnic_update_cmd_producer(adapter, tx_ring); + + return NETDEV_TX_OK; + +unwind_buff: + qlcnic_unmap_buffers(pdev, skb, pbuf); +drop_packet: + adapter->stats.txdropped++; + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +static int qlcnic_check_temp(struct qlcnic_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + u32 temp, temp_state, temp_val; + int rv = 0; + + temp = QLCRD32(adapter, CRB_TEMP_STATE); + + temp_state = qlcnic_get_temp_state(temp); + temp_val = qlcnic_get_temp_val(temp); + + if (temp_state == QLCNIC_TEMP_PANIC) { + dev_err(&netdev->dev, + "Device temperature %d degrees C exceeds" + " maximum allowed. Hardware has been shut down.\n", + temp_val); + rv = 1; + } else if (temp_state == QLCNIC_TEMP_WARN) { + if (adapter->temp == QLCNIC_TEMP_NORMAL) { + dev_err(&netdev->dev, + "Device temperature %d degrees C " + "exceeds operating range." + " Immediate action needed.\n", + temp_val); + } + } else { + if (adapter->temp == QLCNIC_TEMP_WARN) { + dev_info(&netdev->dev, + "Device temperature is now %d degrees C" + " in normal range.\n", temp_val); + } + } + adapter->temp = temp_state; + return rv; +} + +void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup) +{ + struct net_device *netdev = adapter->netdev; + + if (adapter->ahw->linkup && !linkup) { + netdev_info(netdev, "NIC Link is down\n"); + adapter->ahw->linkup = 0; + if (netif_running(netdev)) { + netif_carrier_off(netdev); + netif_stop_queue(netdev); + } + } else if (!adapter->ahw->linkup && linkup) { + netdev_info(netdev, "NIC Link is up\n"); + adapter->ahw->linkup = 1; + if (netif_running(netdev)) { + netif_carrier_on(netdev); + netif_wake_queue(netdev); + } + } +} + +static void qlcnic_tx_timeout(struct net_device *netdev) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + + if (test_bit(__QLCNIC_RESETTING, &adapter->state)) + return; + + dev_err(&netdev->dev, "transmit timeout, resetting.\n"); + + if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS) + adapter->need_fw_reset = 1; + else + adapter->reset_context = 1; +} + +static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct net_device_stats *stats = &netdev->stats; + + stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts; + stats->tx_packets = adapter->stats.xmitfinished; + stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes; + stats->tx_bytes = adapter->stats.txbytes; + stats->rx_dropped = adapter->stats.rxdropped; + stats->tx_dropped = adapter->stats.txdropped; + + return stats; +} + +static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter) +{ + u32 status; + + status = readl(adapter->isr_int_vec); + + if (!(status & adapter->int_vec_bit)) + return IRQ_NONE; + + /* check interrupt state machine, to be sure */ + status = readl(adapter->crb_int_state_reg); + if (!ISR_LEGACY_INT_TRIGGERED(status)) + return IRQ_NONE; + + writel(0xffffffff, adapter->tgt_status_reg); + /* read twice to ensure write is flushed */ + readl(adapter->isr_int_vec); + readl(adapter->isr_int_vec); + + return IRQ_HANDLED; +} + +static irqreturn_t qlcnic_tmp_intr(int irq, void *data) +{ + struct qlcnic_host_sds_ring *sds_ring = data; + struct qlcnic_adapter *adapter = sds_ring->adapter; + + if (adapter->flags & QLCNIC_MSIX_ENABLED) + goto done; + else if (adapter->flags & QLCNIC_MSI_ENABLED) { + writel(0xffffffff, adapter->tgt_status_reg); + goto done; + } + + if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE) + return IRQ_NONE; + +done: + adapter->diag_cnt++; + qlcnic_enable_int(sds_ring); + return IRQ_HANDLED; +} + +static irqreturn_t qlcnic_intr(int irq, void *data) +{ + struct qlcnic_host_sds_ring *sds_ring = data; + struct qlcnic_adapter *adapter = sds_ring->adapter; + + if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE) + return IRQ_NONE; + + napi_schedule(&sds_ring->napi); + + return IRQ_HANDLED; +} + +static irqreturn_t qlcnic_msi_intr(int irq, void *data) +{ + struct qlcnic_host_sds_ring *sds_ring = data; + struct qlcnic_adapter *adapter = sds_ring->adapter; + + /* clear interrupt */ + writel(0xffffffff, adapter->tgt_status_reg); + + napi_schedule(&sds_ring->napi); + return IRQ_HANDLED; +} + +static irqreturn_t qlcnic_msix_intr(int irq, void *data) +{ + struct qlcnic_host_sds_ring *sds_ring = data; + + napi_schedule(&sds_ring->napi); + return IRQ_HANDLED; +} + +static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter) +{ + u32 sw_consumer, hw_consumer; + int count = 0, i; + struct qlcnic_cmd_buffer *buffer; + struct pci_dev *pdev = adapter->pdev; + struct net_device *netdev = adapter->netdev; + struct qlcnic_skb_frag *frag; + int done; + struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; + + if (!spin_trylock(&adapter->tx_clean_lock)) + return 1; + + sw_consumer = tx_ring->sw_consumer; + hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); + + while (sw_consumer != hw_consumer) { + buffer = &tx_ring->cmd_buf_arr[sw_consumer]; + if (buffer->skb) { + frag = &buffer->frag_array[0]; + pci_unmap_single(pdev, frag->dma, frag->length, + PCI_DMA_TODEVICE); + frag->dma = 0ULL; + for (i = 1; i < buffer->frag_count; i++) { + frag++; + pci_unmap_page(pdev, frag->dma, frag->length, + PCI_DMA_TODEVICE); + frag->dma = 0ULL; + } + + adapter->stats.xmitfinished++; + dev_kfree_skb_any(buffer->skb); + buffer->skb = NULL; + } + + sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc); + if (++count >= MAX_STATUS_HANDLE) + break; + } + + if (count && netif_running(netdev)) { + tx_ring->sw_consumer = sw_consumer; + + smp_mb(); + + if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) { + if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) { + netif_wake_queue(netdev); + adapter->stats.xmit_on++; + } + } + adapter->tx_timeo_cnt = 0; + } + /* + * If everything is freed up to consumer then check if the ring is full + * If the ring is full then check if more needs to be freed and + * schedule the call back again. + * + * This happens when there are 2 CPUs. One could be freeing and the + * other filling it. If the ring is full when we get out of here and + * the card has already interrupted the host then the host can miss the + * interrupt. + * + * There is still a possible race condition and the host could miss an + * interrupt. The card has to take care of this. + */ + hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); + done = (sw_consumer == hw_consumer); + spin_unlock(&adapter->tx_clean_lock); + + return done; +} + +static int qlcnic_poll(struct napi_struct *napi, int budget) +{ + struct qlcnic_host_sds_ring *sds_ring = + container_of(napi, struct qlcnic_host_sds_ring, napi); + + struct qlcnic_adapter *adapter = sds_ring->adapter; + + int tx_complete; + int work_done; + + tx_complete = qlcnic_process_cmd_ring(adapter); + + work_done = qlcnic_process_rcv_ring(sds_ring, budget); + + if ((work_done < budget) && tx_complete) { + napi_complete(&sds_ring->napi); + if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) + qlcnic_enable_int(sds_ring); + } + + return work_done; +} + +static int qlcnic_rx_poll(struct napi_struct *napi, int budget) +{ + struct qlcnic_host_sds_ring *sds_ring = + container_of(napi, struct qlcnic_host_sds_ring, napi); + + struct qlcnic_adapter *adapter = sds_ring->adapter; + int work_done; + + work_done = qlcnic_process_rcv_ring(sds_ring, budget); + + if (work_done < budget) { + napi_complete(&sds_ring->napi); + if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) + qlcnic_enable_int(sds_ring); + } + + return work_done; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void qlcnic_poll_controller(struct net_device *netdev) +{ + int ring; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + + disable_irq(adapter->irq); + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + qlcnic_intr(adapter->irq, sds_ring); + } + enable_irq(adapter->irq); +} +#endif + +static void +qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding) +{ + u32 val; + + val = adapter->portnum & 0xf; + val |= encoding << 7; + val |= (jiffies - adapter->dev_rst_time) << 8; + + QLCWR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val); + adapter->dev_rst_time = jiffies; +} + +static int +qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state) +{ + u32 val; + + WARN_ON(state != QLCNIC_DEV_NEED_RESET && + state != QLCNIC_DEV_NEED_QUISCENT); + + if (qlcnic_api_lock(adapter)) + return -EIO; + + val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); + + if (state == QLCNIC_DEV_NEED_RESET) + QLC_DEV_SET_RST_RDY(val, adapter->portnum); + else if (state == QLCNIC_DEV_NEED_QUISCENT) + QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum); + + QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val); + + qlcnic_api_unlock(adapter); + + return 0; +} + +static int +qlcnic_clr_drv_state(struct qlcnic_adapter *adapter) +{ + u32 val; + + if (qlcnic_api_lock(adapter)) + return -EBUSY; + + val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); + QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum); + QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val); + + qlcnic_api_unlock(adapter); + + return 0; +} + +static void +qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed) +{ + u32 val; + + if (qlcnic_api_lock(adapter)) + goto err; + + val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE); + QLC_DEV_CLR_REF_CNT(val, adapter->portnum); + QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val); + + if (failed) { + QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED); + dev_info(&adapter->pdev->dev, + "Device state set to Failed. Please Reboot\n"); + } else if (!(val & 0x11111111)) + QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD); + + val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); + QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum); + QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val); + + qlcnic_api_unlock(adapter); +err: + adapter->fw_fail_cnt = 0; + adapter->flags &= ~QLCNIC_FW_HANG; + clear_bit(__QLCNIC_START_FW, &adapter->state); + clear_bit(__QLCNIC_RESETTING, &adapter->state); +} + +/* Grab api lock, before checking state */ +static int +qlcnic_check_drv_state(struct qlcnic_adapter *adapter) +{ + int act, state, active_mask; + + state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); + act = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE); + + if (adapter->flags & QLCNIC_FW_RESET_OWNER) { + active_mask = (~(1 << (adapter->ahw->pci_func * 4))); + act = act & active_mask; + } + + if (((state & 0x11111111) == (act & 0x11111111)) || + ((act & 0x11111111) == ((state >> 1) & 0x11111111))) + return 0; + else + return 1; +} + +static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter) +{ + u32 val = QLCRD32(adapter, QLCNIC_CRB_DRV_IDC_VER); + + if (val != QLCNIC_DRV_IDC_VER) { + dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's" + " idc ver = %x; reqd = %x\n", QLCNIC_DRV_IDC_VER, val); + } + + return 0; +} + +static int +qlcnic_can_start_firmware(struct qlcnic_adapter *adapter) +{ + u32 val, prev_state; + u8 dev_init_timeo = adapter->dev_init_timeo; + u8 portnum = adapter->portnum; + u8 ret; + + if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state)) + return 1; + + if (qlcnic_api_lock(adapter)) + return -1; + + val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE); + if (!(val & (1 << (portnum * 4)))) { + QLC_DEV_SET_REF_CNT(val, portnum); + QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val); + } + + prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); + QLCDB(adapter, HW, "Device state = %u\n", prev_state); + + switch (prev_state) { + case QLCNIC_DEV_COLD: + QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING); + QLCWR32(adapter, QLCNIC_CRB_DRV_IDC_VER, QLCNIC_DRV_IDC_VER); + qlcnic_idc_debug_info(adapter, 0); + qlcnic_api_unlock(adapter); + return 1; + + case QLCNIC_DEV_READY: + ret = qlcnic_check_idc_ver(adapter); + qlcnic_api_unlock(adapter); + return ret; + + case QLCNIC_DEV_NEED_RESET: + val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); + QLC_DEV_SET_RST_RDY(val, portnum); + QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val); + break; + + case QLCNIC_DEV_NEED_QUISCENT: + val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); + QLC_DEV_SET_QSCNT_RDY(val, portnum); + QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val); + break; + + case QLCNIC_DEV_FAILED: + dev_err(&adapter->pdev->dev, "Device in failed state.\n"); + qlcnic_api_unlock(adapter); + return -1; + + case QLCNIC_DEV_INITIALIZING: + case QLCNIC_DEV_QUISCENT: + break; + } + + qlcnic_api_unlock(adapter); + + do { + msleep(1000); + prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); + + if (prev_state == QLCNIC_DEV_QUISCENT) + continue; + } while ((prev_state != QLCNIC_DEV_READY) && --dev_init_timeo); + + if (!dev_init_timeo) { + dev_err(&adapter->pdev->dev, + "Waiting for device to initialize timeout\n"); + return -1; + } + + if (qlcnic_api_lock(adapter)) + return -1; + + val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); + QLC_DEV_CLR_RST_QSCNT(val, portnum); + QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val); + + ret = qlcnic_check_idc_ver(adapter); + qlcnic_api_unlock(adapter); + + return ret; +} + +static void +qlcnic_fwinit_work(struct work_struct *work) +{ + struct qlcnic_adapter *adapter = container_of(work, + struct qlcnic_adapter, fw_work.work); + u32 dev_state = 0xf; + u32 val; + + if (qlcnic_api_lock(adapter)) + goto err_ret; + + dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); + if (dev_state == QLCNIC_DEV_QUISCENT || + dev_state == QLCNIC_DEV_NEED_QUISCENT) { + qlcnic_api_unlock(adapter); + qlcnic_schedule_work(adapter, qlcnic_fwinit_work, + FW_POLL_DELAY * 2); + return; + } + + if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) { + qlcnic_api_unlock(adapter); + goto wait_npar; + } + + if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) { + dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n", + adapter->reset_ack_timeo); + goto skip_ack_check; + } + + if (!qlcnic_check_drv_state(adapter)) { +skip_ack_check: + dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); + + if (dev_state == QLCNIC_DEV_NEED_RESET) { + QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, + QLCNIC_DEV_INITIALIZING); + set_bit(__QLCNIC_START_FW, &adapter->state); + QLCDB(adapter, DRV, "Restarting fw\n"); + qlcnic_idc_debug_info(adapter, 0); + val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); + QLC_DEV_SET_RST_RDY(val, adapter->portnum); + QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val); + } + + qlcnic_api_unlock(adapter); + + rtnl_lock(); + if (adapter->ahw->fw_dump.enable && + (adapter->flags & QLCNIC_FW_RESET_OWNER)) { + QLCDB(adapter, DRV, "Take FW dump\n"); + qlcnic_dump_fw(adapter); + adapter->flags |= QLCNIC_FW_HANG; + } + rtnl_unlock(); + + adapter->flags &= ~QLCNIC_FW_RESET_OWNER; + if (!adapter->nic_ops->start_firmware(adapter)) { + qlcnic_schedule_work(adapter, qlcnic_attach_work, 0); + adapter->fw_wait_cnt = 0; + return; + } + goto err_ret; + } + + qlcnic_api_unlock(adapter); + +wait_npar: + dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); + QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state); + + switch (dev_state) { + case QLCNIC_DEV_READY: + if (!adapter->nic_ops->start_firmware(adapter)) { + qlcnic_schedule_work(adapter, qlcnic_attach_work, 0); + adapter->fw_wait_cnt = 0; + return; + } + case QLCNIC_DEV_FAILED: + break; + default: + qlcnic_schedule_work(adapter, + qlcnic_fwinit_work, FW_POLL_DELAY); + return; + } + +err_ret: + dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u " + "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt); + netif_device_attach(adapter->netdev); + qlcnic_clr_all_drv_state(adapter, 0); +} + +static void +qlcnic_detach_work(struct work_struct *work) +{ + struct qlcnic_adapter *adapter = container_of(work, + struct qlcnic_adapter, fw_work.work); + struct net_device *netdev = adapter->netdev; + u32 status; + + netif_device_detach(netdev); + + /* Dont grab rtnl lock during Quiscent mode */ + if (adapter->dev_state == QLCNIC_DEV_NEED_QUISCENT) { + if (netif_running(netdev)) + __qlcnic_down(adapter, netdev); + } else + qlcnic_down(adapter, netdev); + + status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1); + + if (status & QLCNIC_RCODE_FATAL_ERROR) + goto err_ret; + + if (adapter->temp == QLCNIC_TEMP_PANIC) + goto err_ret; + /* Dont ack if this instance is the reset owner */ + if (!(adapter->flags & QLCNIC_FW_RESET_OWNER)) { + if (qlcnic_set_drv_state(adapter, adapter->dev_state)) + goto err_ret; + } + + adapter->fw_wait_cnt = 0; + + qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY); + + return; + +err_ret: + dev_err(&adapter->pdev->dev, "detach failed; status=%d temp=%d\n", + status, adapter->temp); + netif_device_attach(netdev); + qlcnic_clr_all_drv_state(adapter, 1); +} + +/*Transit NPAR state to NON Operational */ +static void +qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter) +{ + u32 state; + + state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE); + if (state == QLCNIC_DEV_NPAR_NON_OPER) + return; + + if (qlcnic_api_lock(adapter)) + return; + QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER); + qlcnic_api_unlock(adapter); +} + +/*Transit to RESET state from READY state only */ +void +qlcnic_dev_request_reset(struct qlcnic_adapter *adapter) +{ + u32 state; + + adapter->need_fw_reset = 1; + if (qlcnic_api_lock(adapter)) + return; + + state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); + + if (state == QLCNIC_DEV_READY) { + QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET); + adapter->flags |= QLCNIC_FW_RESET_OWNER; + QLCDB(adapter, DRV, "NEED_RESET state set\n"); + qlcnic_idc_debug_info(adapter, 0); + } + + QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER); + qlcnic_api_unlock(adapter); +} + +/* Transit to NPAR READY state from NPAR NOT READY state */ +static void +qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter) +{ + if (qlcnic_api_lock(adapter)) + return; + + QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_OPER); + QLCDB(adapter, DRV, "NPAR operational state set\n"); + + qlcnic_api_unlock(adapter); +} + +static void +qlcnic_schedule_work(struct qlcnic_adapter *adapter, + work_func_t func, int delay) +{ + if (test_bit(__QLCNIC_AER, &adapter->state)) + return; + + INIT_DELAYED_WORK(&adapter->fw_work, func); + queue_delayed_work(qlcnic_wq, &adapter->fw_work, + round_jiffies_relative(delay)); +} + +static void +qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter) +{ + while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) + msleep(10); + + cancel_delayed_work_sync(&adapter->fw_work); +} + +static void +qlcnic_attach_work(struct work_struct *work) +{ + struct qlcnic_adapter *adapter = container_of(work, + struct qlcnic_adapter, fw_work.work); + struct net_device *netdev = adapter->netdev; + u32 npar_state; + + if (adapter->op_mode != QLCNIC_MGMT_FUNC) { + npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE); + if (adapter->fw_wait_cnt++ > QLCNIC_DEV_NPAR_OPER_TIMEO) + qlcnic_clr_all_drv_state(adapter, 0); + else if (npar_state != QLCNIC_DEV_NPAR_OPER) + qlcnic_schedule_work(adapter, qlcnic_attach_work, + FW_POLL_DELAY); + else + goto attach; + QLCDB(adapter, DRV, "Waiting for NPAR state to operational\n"); + return; + } +attach: + if (netif_running(netdev)) { + if (qlcnic_up(adapter, netdev)) + goto done; + + qlcnic_restore_indev_addr(netdev, NETDEV_UP); + } + +done: + netif_device_attach(netdev); + adapter->fw_fail_cnt = 0; + adapter->flags &= ~QLCNIC_FW_HANG; + clear_bit(__QLCNIC_RESETTING, &adapter->state); + + if (!qlcnic_clr_drv_state(adapter)) + qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, + FW_POLL_DELAY); +} + +static int +qlcnic_check_health(struct qlcnic_adapter *adapter) +{ + u32 state = 0, heartbeat; + struct net_device *netdev = adapter->netdev; + + if (qlcnic_check_temp(adapter)) + goto detach; + + if (adapter->need_fw_reset) + qlcnic_dev_request_reset(adapter); + + state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); + if (state == QLCNIC_DEV_NEED_RESET) { + qlcnic_set_npar_non_operational(adapter); + adapter->need_fw_reset = 1; + } else if (state == QLCNIC_DEV_NEED_QUISCENT) + goto detach; + + heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER); + if (heartbeat != adapter->heartbeat) { + adapter->heartbeat = heartbeat; + adapter->fw_fail_cnt = 0; + if (adapter->need_fw_reset) + goto detach; + + if (adapter->reset_context && auto_fw_reset) { + qlcnic_reset_hw_context(adapter); + adapter->netdev->trans_start = jiffies; + } + + return 0; + } + + if (++adapter->fw_fail_cnt < FW_FAIL_THRESH) + return 0; + + adapter->flags |= QLCNIC_FW_HANG; + + qlcnic_dev_request_reset(adapter); + + if (auto_fw_reset) + clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state); + + dev_info(&netdev->dev, "firmware hang detected\n"); + dev_info(&adapter->pdev->dev, "Dumping hw/fw registers\n" + "PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n" + "PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n" + "PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n" + "PEG_NET_4_PC: 0x%x\n", + QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1), + QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS2), + QLCRD32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x3c), + QLCRD32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x3c), + QLCRD32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x3c), + QLCRD32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x3c), + QLCRD32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c)); +detach: + adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state : + QLCNIC_DEV_NEED_RESET; + + if (auto_fw_reset && + !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) { + + qlcnic_schedule_work(adapter, qlcnic_detach_work, 0); + QLCDB(adapter, DRV, "fw recovery scheduled.\n"); + } + + return 1; +} + +static void +qlcnic_fw_poll_work(struct work_struct *work) +{ + struct qlcnic_adapter *adapter = container_of(work, + struct qlcnic_adapter, fw_work.work); + + if (test_bit(__QLCNIC_RESETTING, &adapter->state)) + goto reschedule; + + + if (qlcnic_check_health(adapter)) + return; + + if (adapter->fhash.fnum) + qlcnic_prune_lb_filters(adapter); + +reschedule: + qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY); +} + +static int qlcnic_is_first_func(struct pci_dev *pdev) +{ + struct pci_dev *oth_pdev; + int val = pdev->devfn; + + while (val-- > 0) { + oth_pdev = pci_get_domain_bus_and_slot(pci_domain_nr + (pdev->bus), pdev->bus->number, + PCI_DEVFN(PCI_SLOT(pdev->devfn), val)); + if (!oth_pdev) + continue; + + if (oth_pdev->current_state != PCI_D3cold) { + pci_dev_put(oth_pdev); + return 0; + } + pci_dev_put(oth_pdev); + } + return 1; +} + +static int qlcnic_attach_func(struct pci_dev *pdev) +{ + int err, first_func; + struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + + pdev->error_state = pci_channel_io_normal; + + err = pci_enable_device(pdev); + if (err) + return err; + + pci_set_power_state(pdev, PCI_D0); + pci_set_master(pdev); + pci_restore_state(pdev); + + first_func = qlcnic_is_first_func(pdev); + + if (qlcnic_api_lock(adapter)) + return -EINVAL; + + if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) { + adapter->need_fw_reset = 1; + set_bit(__QLCNIC_START_FW, &adapter->state); + QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING); + QLCDB(adapter, DRV, "Restarting fw\n"); + } + qlcnic_api_unlock(adapter); + + err = adapter->nic_ops->start_firmware(adapter); + if (err) + return err; + + qlcnic_clr_drv_state(adapter); + qlcnic_setup_intr(adapter); + + if (netif_running(netdev)) { + err = qlcnic_attach(adapter); + if (err) { + qlcnic_clr_all_drv_state(adapter, 1); + clear_bit(__QLCNIC_AER, &adapter->state); + netif_device_attach(netdev); + return err; + } + + err = qlcnic_up(adapter, netdev); + if (err) + goto done; + + qlcnic_restore_indev_addr(netdev, NETDEV_UP); + } + done: + netif_device_attach(netdev); + return err; +} + +static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (state == pci_channel_io_normal) + return PCI_ERS_RESULT_RECOVERED; + + set_bit(__QLCNIC_AER, &adapter->state); + netif_device_detach(netdev); + + cancel_delayed_work_sync(&adapter->fw_work); + + if (netif_running(netdev)) + qlcnic_down(adapter, netdev); + + qlcnic_detach(adapter); + qlcnic_teardown_intr(adapter); + + clear_bit(__QLCNIC_RESETTING, &adapter->state); + + pci_save_state(pdev); + pci_disable_device(pdev); + + return PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev) +{ + return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT : + PCI_ERS_RESULT_RECOVERED; +} + +static void qlcnic_io_resume(struct pci_dev *pdev) +{ + struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); + + pci_cleanup_aer_uncorrect_error_status(pdev); + + if (QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) == QLCNIC_DEV_READY && + test_and_clear_bit(__QLCNIC_AER, &adapter->state)) + qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, + FW_POLL_DELAY); +} + +static int +qlcnicvf_start_firmware(struct qlcnic_adapter *adapter) +{ + int err; + + err = qlcnic_can_start_firmware(adapter); + if (err) + return err; + + err = qlcnic_check_npar_opertional(adapter); + if (err) + return err; + + err = qlcnic_initialize_nic(adapter); + if (err) + return err; + + qlcnic_check_options(adapter); + + err = qlcnic_set_eswitch_port_config(adapter); + if (err) + return err; + + adapter->need_fw_reset = 0; + + return err; +} + +static int +qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable) +{ + return -EOPNOTSUPP; +} + +static int +qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate) +{ + return -EOPNOTSUPP; +} + +static ssize_t +qlcnic_store_bridged_mode(struct device *dev, + struct device_attribute *attr, const char *buf, size_t len) +{ + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + unsigned long new; + int ret = -EINVAL; + + if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)) + goto err_out; + + if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) + goto err_out; + + if (strict_strtoul(buf, 2, &new)) + goto err_out; + + if (!adapter->nic_ops->config_bridged_mode(adapter, !!new)) + ret = len; + +err_out: + return ret; +} + +static ssize_t +qlcnic_show_bridged_mode(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + int bridged_mode = 0; + + if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG) + bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED); + + return sprintf(buf, "%d\n", bridged_mode); +} + +static struct device_attribute dev_attr_bridged_mode = { + .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)}, + .show = qlcnic_show_bridged_mode, + .store = qlcnic_store_bridged_mode, +}; + +static ssize_t +qlcnic_store_diag_mode(struct device *dev, + struct device_attribute *attr, const char *buf, size_t len) +{ + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + unsigned long new; + + if (strict_strtoul(buf, 2, &new)) + return -EINVAL; + + if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED)) + adapter->flags ^= QLCNIC_DIAG_ENABLED; + + return len; +} + +static ssize_t +qlcnic_show_diag_mode(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", + !!(adapter->flags & QLCNIC_DIAG_ENABLED)); +} + +static struct device_attribute dev_attr_diag_mode = { + .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)}, + .show = qlcnic_show_diag_mode, + .store = qlcnic_store_diag_mode, +}; + +int qlcnic_validate_max_rss(struct net_device *netdev, u8 max_hw, u8 val) +{ + if (!use_msi_x && !use_msi) { + netdev_info(netdev, "no msix or msi support, hence no rss\n"); + return -EINVAL; + } + + if ((val > max_hw) || (val < 2) || !is_power_of_2(val)) { + netdev_info(netdev, "rss_ring valid range [2 - %x] in " + " powers of 2\n", max_hw); + return -EINVAL; + } + return 0; + +} + +int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data) +{ + struct net_device *netdev = adapter->netdev; + int err = 0; + + if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) + return -EBUSY; + + netif_device_detach(netdev); + if (netif_running(netdev)) + __qlcnic_down(adapter, netdev); + qlcnic_detach(adapter); + qlcnic_teardown_intr(adapter); + + if (qlcnic_enable_msix(adapter, data)) { + netdev_info(netdev, "failed setting max_rss; rss disabled\n"); + qlcnic_enable_msi_legacy(adapter); + } + + if (netif_running(netdev)) { + err = qlcnic_attach(adapter); + if (err) + goto done; + err = __qlcnic_up(adapter, netdev); + if (err) + goto done; + qlcnic_restore_indev_addr(netdev, NETDEV_UP); + } + done: + netif_device_attach(netdev); + clear_bit(__QLCNIC_RESETTING, &adapter->state); + return err; +} + +static int +qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter, + loff_t offset, size_t size) +{ + size_t crb_size = 4; + + if (!(adapter->flags & QLCNIC_DIAG_ENABLED)) + return -EIO; + + if (offset < QLCNIC_PCI_CRBSPACE) { + if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, + QLCNIC_PCI_CAMQM_END)) + crb_size = 8; + else + return -EINVAL; + } + + if ((size != crb_size) || (offset & (crb_size-1))) + return -EINVAL; + + return 0; +} + +static ssize_t +qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + u32 data; + u64 qmdata; + int ret; + + ret = qlcnic_sysfs_validate_crb(adapter, offset, size); + if (ret != 0) + return ret; + + if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) { + qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata); + memcpy(buf, &qmdata, size); + } else { + data = QLCRD32(adapter, offset); + memcpy(buf, &data, size); + } + return size; +} + +static ssize_t +qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + u32 data; + u64 qmdata; + int ret; + + ret = qlcnic_sysfs_validate_crb(adapter, offset, size); + if (ret != 0) + return ret; + + if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) { + memcpy(&qmdata, buf, size); + qlcnic_pci_camqm_write_2M(adapter, offset, qmdata); + } else { + memcpy(&data, buf, size); + QLCWR32(adapter, offset, data); + } + return size; +} + +static int +qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter, + loff_t offset, size_t size) +{ + if (!(adapter->flags & QLCNIC_DIAG_ENABLED)) + return -EIO; + + if ((size != 8) || (offset & 0x7)) + return -EIO; + + return 0; +} + +static ssize_t +qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + u64 data; + int ret; + + ret = qlcnic_sysfs_validate_mem(adapter, offset, size); + if (ret != 0) + return ret; + + if (qlcnic_pci_mem_read_2M(adapter, offset, &data)) + return -EIO; + + memcpy(buf, &data, size); + + return size; +} + +static ssize_t +qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + u64 data; + int ret; + + ret = qlcnic_sysfs_validate_mem(adapter, offset, size); + if (ret != 0) + return ret; + + memcpy(&data, buf, size); + + if (qlcnic_pci_mem_write_2M(adapter, offset, data)) + return -EIO; + + return size; +} + +static struct bin_attribute bin_attr_crb = { + .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)}, + .size = 0, + .read = qlcnic_sysfs_read_crb, + .write = qlcnic_sysfs_write_crb, +}; + +static struct bin_attribute bin_attr_mem = { + .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)}, + .size = 0, + .read = qlcnic_sysfs_read_mem, + .write = qlcnic_sysfs_write_mem, +}; + +static int +validate_pm_config(struct qlcnic_adapter *adapter, + struct qlcnic_pm_func_cfg *pm_cfg, int count) +{ + + u8 src_pci_func, s_esw_id, d_esw_id; + u8 dest_pci_func; + int i; + + for (i = 0; i < count; i++) { + src_pci_func = pm_cfg[i].pci_func; + dest_pci_func = pm_cfg[i].dest_npar; + if (src_pci_func >= QLCNIC_MAX_PCI_FUNC + || dest_pci_func >= QLCNIC_MAX_PCI_FUNC) + return QL_STATUS_INVALID_PARAM; + + if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC) + return QL_STATUS_INVALID_PARAM; + + if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC) + return QL_STATUS_INVALID_PARAM; + + s_esw_id = adapter->npars[src_pci_func].phy_port; + d_esw_id = adapter->npars[dest_pci_func].phy_port; + + if (s_esw_id != d_esw_id) + return QL_STATUS_INVALID_PARAM; + + } + return 0; + +} + +static ssize_t +qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + struct qlcnic_pm_func_cfg *pm_cfg; + u32 id, action, pci_func; + int count, rem, i, ret; + + count = size / sizeof(struct qlcnic_pm_func_cfg); + rem = size % sizeof(struct qlcnic_pm_func_cfg); + if (rem) + return QL_STATUS_INVALID_PARAM; + + pm_cfg = (struct qlcnic_pm_func_cfg *) buf; + + ret = validate_pm_config(adapter, pm_cfg, count); + if (ret) + return ret; + for (i = 0; i < count; i++) { + pci_func = pm_cfg[i].pci_func; + action = !!pm_cfg[i].action; + id = adapter->npars[pci_func].phy_port; + ret = qlcnic_config_port_mirroring(adapter, id, + action, pci_func); + if (ret) + return ret; + } + + for (i = 0; i < count; i++) { + pci_func = pm_cfg[i].pci_func; + id = adapter->npars[pci_func].phy_port; + adapter->npars[pci_func].enable_pm = !!pm_cfg[i].action; + adapter->npars[pci_func].dest_npar = id; + } + return size; +} + +static ssize_t +qlcnic_sysfs_read_pm_config(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC]; + int i; + + if (size != sizeof(pm_cfg)) + return QL_STATUS_INVALID_PARAM; + + for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { + if (adapter->npars[i].type != QLCNIC_TYPE_NIC) + continue; + pm_cfg[i].action = adapter->npars[i].enable_pm; + pm_cfg[i].dest_npar = 0; + pm_cfg[i].pci_func = i; + } + memcpy(buf, &pm_cfg, size); + + return size; +} + +static int +validate_esw_config(struct qlcnic_adapter *adapter, + struct qlcnic_esw_func_cfg *esw_cfg, int count) +{ + u32 op_mode; + u8 pci_func; + int i; + + op_mode = readl(adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE); + + for (i = 0; i < count; i++) { + pci_func = esw_cfg[i].pci_func; + if (pci_func >= QLCNIC_MAX_PCI_FUNC) + return QL_STATUS_INVALID_PARAM; + + if (adapter->op_mode == QLCNIC_MGMT_FUNC) + if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC) + return QL_STATUS_INVALID_PARAM; + + switch (esw_cfg[i].op_mode) { + case QLCNIC_PORT_DEFAULTS: + if (QLC_DEV_GET_DRV(op_mode, pci_func) != + QLCNIC_NON_PRIV_FUNC) { + if (esw_cfg[i].mac_anti_spoof != 0) + return QL_STATUS_INVALID_PARAM; + if (esw_cfg[i].mac_override != 1) + return QL_STATUS_INVALID_PARAM; + if (esw_cfg[i].promisc_mode != 1) + return QL_STATUS_INVALID_PARAM; + } + break; + case QLCNIC_ADD_VLAN: + if (!IS_VALID_VLAN(esw_cfg[i].vlan_id)) + return QL_STATUS_INVALID_PARAM; + if (!esw_cfg[i].op_type) + return QL_STATUS_INVALID_PARAM; + break; + case QLCNIC_DEL_VLAN: + if (!esw_cfg[i].op_type) + return QL_STATUS_INVALID_PARAM; + break; + default: + return QL_STATUS_INVALID_PARAM; + } + } + return 0; +} + +static ssize_t +qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + struct qlcnic_esw_func_cfg *esw_cfg; + struct qlcnic_npar_info *npar; + int count, rem, i, ret; + u8 pci_func, op_mode = 0; + + count = size / sizeof(struct qlcnic_esw_func_cfg); + rem = size % sizeof(struct qlcnic_esw_func_cfg); + if (rem) + return QL_STATUS_INVALID_PARAM; + + esw_cfg = (struct qlcnic_esw_func_cfg *) buf; + ret = validate_esw_config(adapter, esw_cfg, count); + if (ret) + return ret; + + for (i = 0; i < count; i++) { + if (adapter->op_mode == QLCNIC_MGMT_FUNC) + if (qlcnic_config_switch_port(adapter, &esw_cfg[i])) + return QL_STATUS_INVALID_PARAM; + + if (adapter->ahw->pci_func != esw_cfg[i].pci_func) + continue; + + op_mode = esw_cfg[i].op_mode; + qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]); + esw_cfg[i].op_mode = op_mode; + esw_cfg[i].pci_func = adapter->ahw->pci_func; + + switch (esw_cfg[i].op_mode) { + case QLCNIC_PORT_DEFAULTS: + qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]); + break; + case QLCNIC_ADD_VLAN: + qlcnic_set_vlan_config(adapter, &esw_cfg[i]); + break; + case QLCNIC_DEL_VLAN: + esw_cfg[i].vlan_id = 0; + qlcnic_set_vlan_config(adapter, &esw_cfg[i]); + break; + } + } + + if (adapter->op_mode != QLCNIC_MGMT_FUNC) + goto out; + + for (i = 0; i < count; i++) { + pci_func = esw_cfg[i].pci_func; + npar = &adapter->npars[pci_func]; + switch (esw_cfg[i].op_mode) { + case QLCNIC_PORT_DEFAULTS: + npar->promisc_mode = esw_cfg[i].promisc_mode; + npar->mac_override = esw_cfg[i].mac_override; + npar->offload_flags = esw_cfg[i].offload_flags; + npar->mac_anti_spoof = esw_cfg[i].mac_anti_spoof; + npar->discard_tagged = esw_cfg[i].discard_tagged; + break; + case QLCNIC_ADD_VLAN: + npar->pvid = esw_cfg[i].vlan_id; + break; + case QLCNIC_DEL_VLAN: + npar->pvid = 0; + break; + } + } +out: + return size; +} + +static ssize_t +qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC]; + u8 i; + + if (size != sizeof(esw_cfg)) + return QL_STATUS_INVALID_PARAM; + + for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { + if (adapter->npars[i].type != QLCNIC_TYPE_NIC) + continue; + esw_cfg[i].pci_func = i; + if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i])) + return QL_STATUS_INVALID_PARAM; + } + memcpy(buf, &esw_cfg, size); + + return size; +} + +static int +validate_npar_config(struct qlcnic_adapter *adapter, + struct qlcnic_npar_func_cfg *np_cfg, int count) +{ + u8 pci_func, i; + + for (i = 0; i < count; i++) { + pci_func = np_cfg[i].pci_func; + if (pci_func >= QLCNIC_MAX_PCI_FUNC) + return QL_STATUS_INVALID_PARAM; + + if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC) + return QL_STATUS_INVALID_PARAM; + + if (!IS_VALID_BW(np_cfg[i].min_bw) || + !IS_VALID_BW(np_cfg[i].max_bw)) + return QL_STATUS_INVALID_PARAM; + } + return 0; +} + +static ssize_t +qlcnic_sysfs_write_npar_config(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + struct qlcnic_info nic_info; + struct qlcnic_npar_func_cfg *np_cfg; + int i, count, rem, ret; + u8 pci_func; + + count = size / sizeof(struct qlcnic_npar_func_cfg); + rem = size % sizeof(struct qlcnic_npar_func_cfg); + if (rem) + return QL_STATUS_INVALID_PARAM; + + np_cfg = (struct qlcnic_npar_func_cfg *) buf; + ret = validate_npar_config(adapter, np_cfg, count); + if (ret) + return ret; + + for (i = 0; i < count ; i++) { + pci_func = np_cfg[i].pci_func; + ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func); + if (ret) + return ret; + nic_info.pci_func = pci_func; + nic_info.min_tx_bw = np_cfg[i].min_bw; + nic_info.max_tx_bw = np_cfg[i].max_bw; + ret = qlcnic_set_nic_info(adapter, &nic_info); + if (ret) + return ret; + adapter->npars[i].min_bw = nic_info.min_tx_bw; + adapter->npars[i].max_bw = nic_info.max_tx_bw; + } + + return size; + +} +static ssize_t +qlcnic_sysfs_read_npar_config(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + struct qlcnic_info nic_info; + struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC]; + int i, ret; + + if (size != sizeof(np_cfg)) + return QL_STATUS_INVALID_PARAM; + + for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) { + if (adapter->npars[i].type != QLCNIC_TYPE_NIC) + continue; + ret = qlcnic_get_nic_info(adapter, &nic_info, i); + if (ret) + return ret; + + np_cfg[i].pci_func = i; + np_cfg[i].op_mode = (u8)nic_info.op_mode; + np_cfg[i].port_num = nic_info.phys_port; + np_cfg[i].fw_capab = nic_info.capabilities; + np_cfg[i].min_bw = nic_info.min_tx_bw ; + np_cfg[i].max_bw = nic_info.max_tx_bw; + np_cfg[i].max_tx_queues = nic_info.max_tx_ques; + np_cfg[i].max_rx_queues = nic_info.max_rx_ques; + } + memcpy(buf, &np_cfg, size); + return size; +} + +static ssize_t +qlcnic_sysfs_get_port_stats(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + struct qlcnic_esw_statistics port_stats; + int ret; + + if (size != sizeof(struct qlcnic_esw_statistics)) + return QL_STATUS_INVALID_PARAM; + + if (offset >= QLCNIC_MAX_PCI_FUNC) + return QL_STATUS_INVALID_PARAM; + + memset(&port_stats, 0, size); + ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER, + &port_stats.rx); + if (ret) + return ret; + + ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER, + &port_stats.tx); + if (ret) + return ret; + + memcpy(buf, &port_stats, size); + return size; +} + +static ssize_t +qlcnic_sysfs_get_esw_stats(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + struct qlcnic_esw_statistics esw_stats; + int ret; + + if (size != sizeof(struct qlcnic_esw_statistics)) + return QL_STATUS_INVALID_PARAM; + + if (offset >= QLCNIC_NIU_MAX_XG_PORTS) + return QL_STATUS_INVALID_PARAM; + + memset(&esw_stats, 0, size); + ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER, + &esw_stats.rx); + if (ret) + return ret; + + ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER, + &esw_stats.tx); + if (ret) + return ret; + + memcpy(buf, &esw_stats, size); + return size; +} + +static ssize_t +qlcnic_sysfs_clear_esw_stats(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + int ret; + + if (offset >= QLCNIC_NIU_MAX_XG_PORTS) + return QL_STATUS_INVALID_PARAM; + + ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset, + QLCNIC_QUERY_RX_COUNTER); + if (ret) + return ret; + + ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset, + QLCNIC_QUERY_TX_COUNTER); + if (ret) + return ret; + + return size; +} + +static ssize_t +qlcnic_sysfs_clear_port_stats(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t offset, size_t size) +{ + + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + int ret; + + if (offset >= QLCNIC_MAX_PCI_FUNC) + return QL_STATUS_INVALID_PARAM; + + ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset, + QLCNIC_QUERY_RX_COUNTER); + if (ret) + return ret; + + ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset, + QLCNIC_QUERY_TX_COUNTER); + if (ret) + return ret; + + return size; +} + +static ssize_t +qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC]; + struct qlcnic_pci_info *pci_info; + int i, ret; + + if (size != sizeof(pci_cfg)) + return QL_STATUS_INVALID_PARAM; + + pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL); + if (!pci_info) + return -ENOMEM; + + ret = qlcnic_get_pci_info(adapter, pci_info); + if (ret) { + kfree(pci_info); + return ret; + } + + for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) { + pci_cfg[i].pci_func = pci_info[i].id; + pci_cfg[i].func_type = pci_info[i].type; + pci_cfg[i].port_num = pci_info[i].default_port; + pci_cfg[i].min_bw = pci_info[i].tx_min_bw; + pci_cfg[i].max_bw = pci_info[i].tx_max_bw; + memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN); + } + memcpy(buf, &pci_cfg, size); + kfree(pci_info); + return size; +} +static struct bin_attribute bin_attr_npar_config = { + .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)}, + .size = 0, + .read = qlcnic_sysfs_read_npar_config, + .write = qlcnic_sysfs_write_npar_config, +}; + +static struct bin_attribute bin_attr_pci_config = { + .attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)}, + .size = 0, + .read = qlcnic_sysfs_read_pci_config, + .write = NULL, +}; + +static struct bin_attribute bin_attr_port_stats = { + .attr = {.name = "port_stats", .mode = (S_IRUGO | S_IWUSR)}, + .size = 0, + .read = qlcnic_sysfs_get_port_stats, + .write = qlcnic_sysfs_clear_port_stats, +}; + +static struct bin_attribute bin_attr_esw_stats = { + .attr = {.name = "esw_stats", .mode = (S_IRUGO | S_IWUSR)}, + .size = 0, + .read = qlcnic_sysfs_get_esw_stats, + .write = qlcnic_sysfs_clear_esw_stats, +}; + +static struct bin_attribute bin_attr_esw_config = { + .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)}, + .size = 0, + .read = qlcnic_sysfs_read_esw_config, + .write = qlcnic_sysfs_write_esw_config, +}; + +static struct bin_attribute bin_attr_pm_config = { + .attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)}, + .size = 0, + .read = qlcnic_sysfs_read_pm_config, + .write = qlcnic_sysfs_write_pm_config, +}; + +static void +qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter) +{ + struct device *dev = &adapter->pdev->dev; + + if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG) + if (device_create_file(dev, &dev_attr_bridged_mode)) + dev_warn(dev, + "failed to create bridged_mode sysfs entry\n"); +} + +static void +qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter) +{ + struct device *dev = &adapter->pdev->dev; + + if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG) + device_remove_file(dev, &dev_attr_bridged_mode); +} + +static void +qlcnic_create_diag_entries(struct qlcnic_adapter *adapter) +{ + struct device *dev = &adapter->pdev->dev; + + if (device_create_bin_file(dev, &bin_attr_port_stats)) + dev_info(dev, "failed to create port stats sysfs entry"); + + if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) + return; + if (device_create_file(dev, &dev_attr_diag_mode)) + dev_info(dev, "failed to create diag_mode sysfs entry\n"); + if (device_create_bin_file(dev, &bin_attr_crb)) + dev_info(dev, "failed to create crb sysfs entry\n"); + if (device_create_bin_file(dev, &bin_attr_mem)) + dev_info(dev, "failed to create mem sysfs entry\n"); + if (device_create_bin_file(dev, &bin_attr_pci_config)) + dev_info(dev, "failed to create pci config sysfs entry"); + if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) + return; + if (device_create_bin_file(dev, &bin_attr_esw_config)) + dev_info(dev, "failed to create esw config sysfs entry"); + if (adapter->op_mode != QLCNIC_MGMT_FUNC) + return; + if (device_create_bin_file(dev, &bin_attr_npar_config)) + dev_info(dev, "failed to create npar config sysfs entry"); + if (device_create_bin_file(dev, &bin_attr_pm_config)) + dev_info(dev, "failed to create pm config sysfs entry"); + if (device_create_bin_file(dev, &bin_attr_esw_stats)) + dev_info(dev, "failed to create eswitch stats sysfs entry"); +} + +static void +qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter) +{ + struct device *dev = &adapter->pdev->dev; + + device_remove_bin_file(dev, &bin_attr_port_stats); + + if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) + return; + device_remove_file(dev, &dev_attr_diag_mode); + device_remove_bin_file(dev, &bin_attr_crb); + device_remove_bin_file(dev, &bin_attr_mem); + device_remove_bin_file(dev, &bin_attr_pci_config); + if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) + return; + device_remove_bin_file(dev, &bin_attr_esw_config); + if (adapter->op_mode != QLCNIC_MGMT_FUNC) + return; + device_remove_bin_file(dev, &bin_attr_npar_config); + device_remove_bin_file(dev, &bin_attr_pm_config); + device_remove_bin_file(dev, &bin_attr_esw_stats); +} + +#ifdef CONFIG_INET + +#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops) + +static void +qlcnic_config_indev_addr(struct qlcnic_adapter *adapter, + struct net_device *dev, unsigned long event) +{ + struct in_device *indev; + + indev = in_dev_get(dev); + if (!indev) + return; + + for_ifa(indev) { + switch (event) { + case NETDEV_UP: + qlcnic_config_ipaddr(adapter, + ifa->ifa_address, QLCNIC_IP_UP); + break; + case NETDEV_DOWN: + qlcnic_config_ipaddr(adapter, + ifa->ifa_address, QLCNIC_IP_DOWN); + break; + default: + break; + } + } endfor_ifa(indev); + + in_dev_put(indev); +} + +static void +qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct net_device *dev; + u16 vid; + + qlcnic_config_indev_addr(adapter, netdev, event); + + for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) { + dev = __vlan_find_dev_deep(netdev, vid); + if (!dev) + continue; + qlcnic_config_indev_addr(adapter, dev, event); + } +} + +static int qlcnic_netdev_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct qlcnic_adapter *adapter; + struct net_device *dev = (struct net_device *)ptr; + +recheck: + if (dev == NULL) + goto done; + + if (dev->priv_flags & IFF_802_1Q_VLAN) { + dev = vlan_dev_real_dev(dev); + goto recheck; + } + + if (!is_qlcnic_netdev(dev)) + goto done; + + adapter = netdev_priv(dev); + + if (!adapter) + goto done; + + if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) + goto done; + + qlcnic_config_indev_addr(adapter, dev, event); +done: + return NOTIFY_DONE; +} + +static int +qlcnic_inetaddr_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct qlcnic_adapter *adapter; + struct net_device *dev; + + struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; + + dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL; + +recheck: + if (dev == NULL) + goto done; + + if (dev->priv_flags & IFF_802_1Q_VLAN) { + dev = vlan_dev_real_dev(dev); + goto recheck; + } + + if (!is_qlcnic_netdev(dev)) + goto done; + + adapter = netdev_priv(dev); + + if (!adapter) + goto done; + + if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) + goto done; + + switch (event) { + case NETDEV_UP: + qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP); + break; + case NETDEV_DOWN: + qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN); + break; + default: + break; + } + +done: + return NOTIFY_DONE; +} + +static struct notifier_block qlcnic_netdev_cb = { + .notifier_call = qlcnic_netdev_event, +}; + +static struct notifier_block qlcnic_inetaddr_cb = { + .notifier_call = qlcnic_inetaddr_event, +}; +#else +static void +qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event) +{ } +#endif +static struct pci_error_handlers qlcnic_err_handler = { + .error_detected = qlcnic_io_error_detected, + .slot_reset = qlcnic_io_slot_reset, + .resume = qlcnic_io_resume, +}; + +static struct pci_driver qlcnic_driver = { + .name = qlcnic_driver_name, + .id_table = qlcnic_pci_tbl, + .probe = qlcnic_probe, + .remove = __devexit_p(qlcnic_remove), +#ifdef CONFIG_PM + .suspend = qlcnic_suspend, + .resume = qlcnic_resume, +#endif + .shutdown = qlcnic_shutdown, + .err_handler = &qlcnic_err_handler + +}; + +static int __init qlcnic_init_module(void) +{ + int ret; + + printk(KERN_INFO "%s\n", qlcnic_driver_string); + + qlcnic_wq = create_singlethread_workqueue("qlcnic"); + if (qlcnic_wq == NULL) { + printk(KERN_ERR "qlcnic: cannot create workqueue\n"); + return -ENOMEM; + } + +#ifdef CONFIG_INET + register_netdevice_notifier(&qlcnic_netdev_cb); + register_inetaddr_notifier(&qlcnic_inetaddr_cb); +#endif + + ret = pci_register_driver(&qlcnic_driver); + if (ret) { +#ifdef CONFIG_INET + unregister_inetaddr_notifier(&qlcnic_inetaddr_cb); + unregister_netdevice_notifier(&qlcnic_netdev_cb); +#endif + destroy_workqueue(qlcnic_wq); + } + + return ret; +} + +module_init(qlcnic_init_module); + +static void __exit qlcnic_exit_module(void) +{ + + pci_unregister_driver(&qlcnic_driver); + +#ifdef CONFIG_INET + unregister_inetaddr_notifier(&qlcnic_inetaddr_cb); + unregister_netdevice_notifier(&qlcnic_netdev_cb); +#endif + destroy_workqueue(qlcnic_wq); +} + +module_exit(qlcnic_exit_module); diff --git a/drivers/net/ethernet/qlogic/qlge/Makefile b/drivers/net/ethernet/qlogic/qlge/Makefile new file mode 100644 index 000000000000..8a197658d76f --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlge/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for the Qlogic 10GbE PCI Express ethernet driver +# + +obj-$(CONFIG_QLGE) += qlge.o + +qlge-objs := qlge_main.o qlge_dbg.o qlge_mpi.o qlge_ethtool.o diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h new file mode 100644 index 000000000000..8731f79c9efc --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlge/qlge.h @@ -0,0 +1,2334 @@ +/* + * QLogic QLA41xx NIC HBA Driver + * Copyright (c) 2003-2006 QLogic Corporation + * + * See LICENSE.qlge for copyright and licensing details. + */ +#ifndef _QLGE_H_ +#define _QLGE_H_ + +#include +#include +#include +#include +#include + +/* + * General definitions... + */ +#define DRV_NAME "qlge" +#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " +#define DRV_VERSION "v1.00.00.29.00.00-01" + +#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ + +#define QLGE_VENDOR_ID 0x1077 +#define QLGE_DEVICE_ID_8012 0x8012 +#define QLGE_DEVICE_ID_8000 0x8000 +#define MAX_CPUS 8 +#define MAX_TX_RINGS MAX_CPUS +#define MAX_RX_RINGS ((MAX_CPUS * 2) + 1) + +#define NUM_TX_RING_ENTRIES 256 +#define NUM_RX_RING_ENTRIES 256 + +#define NUM_SMALL_BUFFERS 512 +#define NUM_LARGE_BUFFERS 512 +#define DB_PAGE_SIZE 4096 + +/* Calculate the number of (4k) pages required to + * contain a buffer queue of the given length. + */ +#define MAX_DB_PAGES_PER_BQ(x) \ + (((x * sizeof(u64)) / DB_PAGE_SIZE) + \ + (((x * sizeof(u64)) % DB_PAGE_SIZE) ? 1 : 0)) + +#define RX_RING_SHADOW_SPACE (sizeof(u64) + \ + MAX_DB_PAGES_PER_BQ(NUM_SMALL_BUFFERS) * sizeof(u64) + \ + MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64)) +#define LARGE_BUFFER_MAX_SIZE 8192 +#define LARGE_BUFFER_MIN_SIZE 2048 + +#define MAX_CQ 128 +#define DFLT_COALESCE_WAIT 100 /* 100 usec wait for coalescing */ +#define MAX_INTER_FRAME_WAIT 10 /* 10 usec max interframe-wait for coalescing */ +#define DFLT_INTER_FRAME_WAIT (MAX_INTER_FRAME_WAIT/2) +#define UDELAY_COUNT 3 +#define UDELAY_DELAY 100 + + +#define TX_DESC_PER_IOCB 8 +/* The maximum number of frags we handle is based + * on PAGE_SIZE... + */ +#if (PAGE_SHIFT == 12) || (PAGE_SHIFT == 13) /* 4k & 8k pages */ +#define TX_DESC_PER_OAL ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2) +#else /* all other page sizes */ +#define TX_DESC_PER_OAL 0 +#endif + +/* Word shifting for converting 64-bit + * address to a series of 16-bit words. + * This is used for some MPI firmware + * mailbox commands. + */ +#define LSW(x) ((u16)(x)) +#define MSW(x) ((u16)((u32)(x) >> 16)) +#define LSD(x) ((u32)((u64)(x))) +#define MSD(x) ((u32)((((u64)(x)) >> 32))) + +/* MPI test register definitions. This register + * is used for determining alternate NIC function's + * PCI->func number. + */ +enum { + MPI_TEST_FUNC_PORT_CFG = 0x1002, + MPI_TEST_FUNC_PRB_CTL = 0x100e, + MPI_TEST_FUNC_PRB_EN = 0x18a20000, + MPI_TEST_FUNC_RST_STS = 0x100a, + MPI_TEST_FUNC_RST_FRC = 0x00000003, + MPI_TEST_NIC_FUNC_MASK = 0x00000007, + MPI_TEST_NIC1_FUNCTION_ENABLE = (1 << 0), + MPI_TEST_NIC1_FUNCTION_MASK = 0x0000000e, + MPI_TEST_NIC1_FUNC_SHIFT = 1, + MPI_TEST_NIC2_FUNCTION_ENABLE = (1 << 4), + MPI_TEST_NIC2_FUNCTION_MASK = 0x000000e0, + MPI_TEST_NIC2_FUNC_SHIFT = 5, + MPI_TEST_FC1_FUNCTION_ENABLE = (1 << 8), + MPI_TEST_FC1_FUNCTION_MASK = 0x00000e00, + MPI_TEST_FC1_FUNCTION_SHIFT = 9, + MPI_TEST_FC2_FUNCTION_ENABLE = (1 << 12), + MPI_TEST_FC2_FUNCTION_MASK = 0x0000e000, + MPI_TEST_FC2_FUNCTION_SHIFT = 13, + + MPI_NIC_READ = 0x00000000, + MPI_NIC_REG_BLOCK = 0x00020000, + MPI_NIC_FUNCTION_SHIFT = 6, +}; + +/* + * Processor Address Register (PROC_ADDR) bit definitions. + */ +enum { + + /* Misc. stuff */ + MAILBOX_COUNT = 16, + MAILBOX_TIMEOUT = 5, + + PROC_ADDR_RDY = (1 << 31), + PROC_ADDR_R = (1 << 30), + PROC_ADDR_ERR = (1 << 29), + PROC_ADDR_DA = (1 << 28), + PROC_ADDR_FUNC0_MBI = 0x00001180, + PROC_ADDR_FUNC0_MBO = (PROC_ADDR_FUNC0_MBI + MAILBOX_COUNT), + PROC_ADDR_FUNC0_CTL = 0x000011a1, + PROC_ADDR_FUNC2_MBI = 0x00001280, + PROC_ADDR_FUNC2_MBO = (PROC_ADDR_FUNC2_MBI + MAILBOX_COUNT), + PROC_ADDR_FUNC2_CTL = 0x000012a1, + PROC_ADDR_MPI_RISC = 0x00000000, + PROC_ADDR_MDE = 0x00010000, + PROC_ADDR_REGBLOCK = 0x00020000, + PROC_ADDR_RISC_REG = 0x00030000, +}; + +/* + * System Register (SYS) bit definitions. + */ +enum { + SYS_EFE = (1 << 0), + SYS_FAE = (1 << 1), + SYS_MDC = (1 << 2), + SYS_DST = (1 << 3), + SYS_DWC = (1 << 4), + SYS_EVW = (1 << 5), + SYS_OMP_DLY_MASK = 0x3f000000, + /* + * There are no values defined as of edit #15. + */ + SYS_ODI = (1 << 14), +}; + +/* + * Reset/Failover Register (RST_FO) bit definitions. + */ +enum { + RST_FO_TFO = (1 << 0), + RST_FO_RR_MASK = 0x00060000, + RST_FO_RR_CQ_CAM = 0x00000000, + RST_FO_RR_DROP = 0x00000002, + RST_FO_RR_DQ = 0x00000004, + RST_FO_RR_RCV_FUNC_CQ = 0x00000006, + RST_FO_FRB = (1 << 12), + RST_FO_MOP = (1 << 13), + RST_FO_REG = (1 << 14), + RST_FO_FR = (1 << 15), +}; + +/* + * Function Specific Control Register (FSC) bit definitions. + */ +enum { + FSC_DBRST_MASK = 0x00070000, + FSC_DBRST_256 = 0x00000000, + FSC_DBRST_512 = 0x00000001, + FSC_DBRST_768 = 0x00000002, + FSC_DBRST_1024 = 0x00000003, + FSC_DBL_MASK = 0x00180000, + FSC_DBL_DBRST = 0x00000000, + FSC_DBL_MAX_PLD = 0x00000008, + FSC_DBL_MAX_BRST = 0x00000010, + FSC_DBL_128_BYTES = 0x00000018, + FSC_EC = (1 << 5), + FSC_EPC_MASK = 0x00c00000, + FSC_EPC_INBOUND = (1 << 6), + FSC_EPC_OUTBOUND = (1 << 7), + FSC_VM_PAGESIZE_MASK = 0x07000000, + FSC_VM_PAGE_2K = 0x00000100, + FSC_VM_PAGE_4K = 0x00000200, + FSC_VM_PAGE_8K = 0x00000300, + FSC_VM_PAGE_64K = 0x00000600, + FSC_SH = (1 << 11), + FSC_DSB = (1 << 12), + FSC_STE = (1 << 13), + FSC_FE = (1 << 15), +}; + +/* + * Host Command Status Register (CSR) bit definitions. + */ +enum { + CSR_ERR_STS_MASK = 0x0000003f, + /* + * There are no valued defined as of edit #15. + */ + CSR_RR = (1 << 8), + CSR_HRI = (1 << 9), + CSR_RP = (1 << 10), + CSR_CMD_PARM_SHIFT = 22, + CSR_CMD_NOP = 0x00000000, + CSR_CMD_SET_RST = 0x10000000, + CSR_CMD_CLR_RST = 0x20000000, + CSR_CMD_SET_PAUSE = 0x30000000, + CSR_CMD_CLR_PAUSE = 0x40000000, + CSR_CMD_SET_H2R_INT = 0x50000000, + CSR_CMD_CLR_H2R_INT = 0x60000000, + CSR_CMD_PAR_EN = 0x70000000, + CSR_CMD_SET_BAD_PAR = 0x80000000, + CSR_CMD_CLR_BAD_PAR = 0x90000000, + CSR_CMD_CLR_R2PCI_INT = 0xa0000000, +}; + +/* + * Configuration Register (CFG) bit definitions. + */ +enum { + CFG_LRQ = (1 << 0), + CFG_DRQ = (1 << 1), + CFG_LR = (1 << 2), + CFG_DR = (1 << 3), + CFG_LE = (1 << 5), + CFG_LCQ = (1 << 6), + CFG_DCQ = (1 << 7), + CFG_Q_SHIFT = 8, + CFG_Q_MASK = 0x7f000000, +}; + +/* + * Status Register (STS) bit definitions. + */ +enum { + STS_FE = (1 << 0), + STS_PI = (1 << 1), + STS_PL0 = (1 << 2), + STS_PL1 = (1 << 3), + STS_PI0 = (1 << 4), + STS_PI1 = (1 << 5), + STS_FUNC_ID_MASK = 0x000000c0, + STS_FUNC_ID_SHIFT = 6, + STS_F0E = (1 << 8), + STS_F1E = (1 << 9), + STS_F2E = (1 << 10), + STS_F3E = (1 << 11), + STS_NFE = (1 << 12), +}; + +/* + * Interrupt Enable Register (INTR_EN) bit definitions. + */ +enum { + INTR_EN_INTR_MASK = 0x007f0000, + INTR_EN_TYPE_MASK = 0x03000000, + INTR_EN_TYPE_ENABLE = 0x00000100, + INTR_EN_TYPE_DISABLE = 0x00000200, + INTR_EN_TYPE_READ = 0x00000300, + INTR_EN_IHD = (1 << 13), + INTR_EN_IHD_MASK = (INTR_EN_IHD << 16), + INTR_EN_EI = (1 << 14), + INTR_EN_EN = (1 << 15), +}; + +/* + * Interrupt Mask Register (INTR_MASK) bit definitions. + */ +enum { + INTR_MASK_PI = (1 << 0), + INTR_MASK_HL0 = (1 << 1), + INTR_MASK_LH0 = (1 << 2), + INTR_MASK_HL1 = (1 << 3), + INTR_MASK_LH1 = (1 << 4), + INTR_MASK_SE = (1 << 5), + INTR_MASK_LSC = (1 << 6), + INTR_MASK_MC = (1 << 7), + INTR_MASK_LINK_IRQS = INTR_MASK_LSC | INTR_MASK_SE | INTR_MASK_MC, +}; + +/* + * Register (REV_ID) bit definitions. + */ +enum { + REV_ID_MASK = 0x0000000f, + REV_ID_NICROLL_SHIFT = 0, + REV_ID_NICREV_SHIFT = 4, + REV_ID_XGROLL_SHIFT = 8, + REV_ID_XGREV_SHIFT = 12, + REV_ID_CHIPREV_SHIFT = 28, +}; + +/* + * Force ECC Error Register (FRC_ECC_ERR) bit definitions. + */ +enum { + FRC_ECC_ERR_VW = (1 << 12), + FRC_ECC_ERR_VB = (1 << 13), + FRC_ECC_ERR_NI = (1 << 14), + FRC_ECC_ERR_NO = (1 << 15), + FRC_ECC_PFE_SHIFT = 16, + FRC_ECC_ERR_DO = (1 << 18), + FRC_ECC_P14 = (1 << 19), +}; + +/* + * Error Status Register (ERR_STS) bit definitions. + */ +enum { + ERR_STS_NOF = (1 << 0), + ERR_STS_NIF = (1 << 1), + ERR_STS_DRP = (1 << 2), + ERR_STS_XGP = (1 << 3), + ERR_STS_FOU = (1 << 4), + ERR_STS_FOC = (1 << 5), + ERR_STS_FOF = (1 << 6), + ERR_STS_FIU = (1 << 7), + ERR_STS_FIC = (1 << 8), + ERR_STS_FIF = (1 << 9), + ERR_STS_MOF = (1 << 10), + ERR_STS_TA = (1 << 11), + ERR_STS_MA = (1 << 12), + ERR_STS_MPE = (1 << 13), + ERR_STS_SCE = (1 << 14), + ERR_STS_STE = (1 << 15), + ERR_STS_FOW = (1 << 16), + ERR_STS_UE = (1 << 17), + ERR_STS_MCH = (1 << 26), + ERR_STS_LOC_SHIFT = 27, +}; + +/* + * RAM Debug Address Register (RAM_DBG_ADDR) bit definitions. + */ +enum { + RAM_DBG_ADDR_FW = (1 << 30), + RAM_DBG_ADDR_FR = (1 << 31), +}; + +/* + * Semaphore Register (SEM) bit definitions. + */ +enum { + /* + * Example: + * reg = SEM_XGMAC0_MASK | (SEM_SET << SEM_XGMAC0_SHIFT) + */ + SEM_CLEAR = 0, + SEM_SET = 1, + SEM_FORCE = 3, + SEM_XGMAC0_SHIFT = 0, + SEM_XGMAC1_SHIFT = 2, + SEM_ICB_SHIFT = 4, + SEM_MAC_ADDR_SHIFT = 6, + SEM_FLASH_SHIFT = 8, + SEM_PROBE_SHIFT = 10, + SEM_RT_IDX_SHIFT = 12, + SEM_PROC_REG_SHIFT = 14, + SEM_XGMAC0_MASK = 0x00030000, + SEM_XGMAC1_MASK = 0x000c0000, + SEM_ICB_MASK = 0x00300000, + SEM_MAC_ADDR_MASK = 0x00c00000, + SEM_FLASH_MASK = 0x03000000, + SEM_PROBE_MASK = 0x0c000000, + SEM_RT_IDX_MASK = 0x30000000, + SEM_PROC_REG_MASK = 0xc0000000, +}; + +/* + * 10G MAC Address Register (XGMAC_ADDR) bit definitions. + */ +enum { + XGMAC_ADDR_RDY = (1 << 31), + XGMAC_ADDR_R = (1 << 30), + XGMAC_ADDR_XME = (1 << 29), + + /* XGMAC control registers */ + PAUSE_SRC_LO = 0x00000100, + PAUSE_SRC_HI = 0x00000104, + GLOBAL_CFG = 0x00000108, + GLOBAL_CFG_RESET = (1 << 0), + GLOBAL_CFG_JUMBO = (1 << 6), + GLOBAL_CFG_TX_STAT_EN = (1 << 10), + GLOBAL_CFG_RX_STAT_EN = (1 << 11), + TX_CFG = 0x0000010c, + TX_CFG_RESET = (1 << 0), + TX_CFG_EN = (1 << 1), + TX_CFG_PREAM = (1 << 2), + RX_CFG = 0x00000110, + RX_CFG_RESET = (1 << 0), + RX_CFG_EN = (1 << 1), + RX_CFG_PREAM = (1 << 2), + FLOW_CTL = 0x0000011c, + PAUSE_OPCODE = 0x00000120, + PAUSE_TIMER = 0x00000124, + PAUSE_FRM_DEST_LO = 0x00000128, + PAUSE_FRM_DEST_HI = 0x0000012c, + MAC_TX_PARAMS = 0x00000134, + MAC_TX_PARAMS_JUMBO = (1 << 31), + MAC_TX_PARAMS_SIZE_SHIFT = 16, + MAC_RX_PARAMS = 0x00000138, + MAC_SYS_INT = 0x00000144, + MAC_SYS_INT_MASK = 0x00000148, + MAC_MGMT_INT = 0x0000014c, + MAC_MGMT_IN_MASK = 0x00000150, + EXT_ARB_MODE = 0x000001fc, + + /* XGMAC TX statistics registers */ + TX_PKTS = 0x00000200, + TX_BYTES = 0x00000208, + TX_MCAST_PKTS = 0x00000210, + TX_BCAST_PKTS = 0x00000218, + TX_UCAST_PKTS = 0x00000220, + TX_CTL_PKTS = 0x00000228, + TX_PAUSE_PKTS = 0x00000230, + TX_64_PKT = 0x00000238, + TX_65_TO_127_PKT = 0x00000240, + TX_128_TO_255_PKT = 0x00000248, + TX_256_511_PKT = 0x00000250, + TX_512_TO_1023_PKT = 0x00000258, + TX_1024_TO_1518_PKT = 0x00000260, + TX_1519_TO_MAX_PKT = 0x00000268, + TX_UNDERSIZE_PKT = 0x00000270, + TX_OVERSIZE_PKT = 0x00000278, + + /* XGMAC statistics control registers */ + RX_HALF_FULL_DET = 0x000002a0, + TX_HALF_FULL_DET = 0x000002a4, + RX_OVERFLOW_DET = 0x000002a8, + TX_OVERFLOW_DET = 0x000002ac, + RX_HALF_FULL_MASK = 0x000002b0, + TX_HALF_FULL_MASK = 0x000002b4, + RX_OVERFLOW_MASK = 0x000002b8, + TX_OVERFLOW_MASK = 0x000002bc, + STAT_CNT_CTL = 0x000002c0, + STAT_CNT_CTL_CLEAR_TX = (1 << 0), + STAT_CNT_CTL_CLEAR_RX = (1 << 1), + AUX_RX_HALF_FULL_DET = 0x000002d0, + AUX_TX_HALF_FULL_DET = 0x000002d4, + AUX_RX_OVERFLOW_DET = 0x000002d8, + AUX_TX_OVERFLOW_DET = 0x000002dc, + AUX_RX_HALF_FULL_MASK = 0x000002f0, + AUX_TX_HALF_FULL_MASK = 0x000002f4, + AUX_RX_OVERFLOW_MASK = 0x000002f8, + AUX_TX_OVERFLOW_MASK = 0x000002fc, + + /* XGMAC RX statistics registers */ + RX_BYTES = 0x00000300, + RX_BYTES_OK = 0x00000308, + RX_PKTS = 0x00000310, + RX_PKTS_OK = 0x00000318, + RX_BCAST_PKTS = 0x00000320, + RX_MCAST_PKTS = 0x00000328, + RX_UCAST_PKTS = 0x00000330, + RX_UNDERSIZE_PKTS = 0x00000338, + RX_OVERSIZE_PKTS = 0x00000340, + RX_JABBER_PKTS = 0x00000348, + RX_UNDERSIZE_FCERR_PKTS = 0x00000350, + RX_DROP_EVENTS = 0x00000358, + RX_FCERR_PKTS = 0x00000360, + RX_ALIGN_ERR = 0x00000368, + RX_SYMBOL_ERR = 0x00000370, + RX_MAC_ERR = 0x00000378, + RX_CTL_PKTS = 0x00000380, + RX_PAUSE_PKTS = 0x00000388, + RX_64_PKTS = 0x00000390, + RX_65_TO_127_PKTS = 0x00000398, + RX_128_255_PKTS = 0x000003a0, + RX_256_511_PKTS = 0x000003a8, + RX_512_TO_1023_PKTS = 0x000003b0, + RX_1024_TO_1518_PKTS = 0x000003b8, + RX_1519_TO_MAX_PKTS = 0x000003c0, + RX_LEN_ERR_PKTS = 0x000003c8, + + /* XGMAC MDIO control registers */ + MDIO_TX_DATA = 0x00000400, + MDIO_RX_DATA = 0x00000410, + MDIO_CMD = 0x00000420, + MDIO_PHY_ADDR = 0x00000430, + MDIO_PORT = 0x00000440, + MDIO_STATUS = 0x00000450, + + XGMAC_REGISTER_END = 0x00000740, +}; + +/* + * Enhanced Transmission Schedule Registers (NIC_ETS,CNA_ETS) bit definitions. + */ +enum { + ETS_QUEUE_SHIFT = 29, + ETS_REF = (1 << 26), + ETS_RS = (1 << 27), + ETS_P = (1 << 28), + ETS_FC_COS_SHIFT = 23, +}; + +/* + * Flash Address Register (FLASH_ADDR) bit definitions. + */ +enum { + FLASH_ADDR_RDY = (1 << 31), + FLASH_ADDR_R = (1 << 30), + FLASH_ADDR_ERR = (1 << 29), +}; + +/* + * Stop CQ Processing Register (CQ_STOP) bit definitions. + */ +enum { + CQ_STOP_QUEUE_MASK = (0x007f0000), + CQ_STOP_TYPE_MASK = (0x03000000), + CQ_STOP_TYPE_START = 0x00000100, + CQ_STOP_TYPE_STOP = 0x00000200, + CQ_STOP_TYPE_READ = 0x00000300, + CQ_STOP_EN = (1 << 15), +}; + +/* + * MAC Protocol Address Index Register (MAC_ADDR_IDX) bit definitions. + */ +enum { + MAC_ADDR_IDX_SHIFT = 4, + MAC_ADDR_TYPE_SHIFT = 16, + MAC_ADDR_TYPE_COUNT = 10, + MAC_ADDR_TYPE_MASK = 0x000f0000, + MAC_ADDR_TYPE_CAM_MAC = 0x00000000, + MAC_ADDR_TYPE_MULTI_MAC = 0x00010000, + MAC_ADDR_TYPE_VLAN = 0x00020000, + MAC_ADDR_TYPE_MULTI_FLTR = 0x00030000, + MAC_ADDR_TYPE_FC_MAC = 0x00040000, + MAC_ADDR_TYPE_MGMT_MAC = 0x00050000, + MAC_ADDR_TYPE_MGMT_VLAN = 0x00060000, + MAC_ADDR_TYPE_MGMT_V4 = 0x00070000, + MAC_ADDR_TYPE_MGMT_V6 = 0x00080000, + MAC_ADDR_TYPE_MGMT_TU_DP = 0x00090000, + MAC_ADDR_ADR = (1 << 25), + MAC_ADDR_RS = (1 << 26), + MAC_ADDR_E = (1 << 27), + MAC_ADDR_MR = (1 << 30), + MAC_ADDR_MW = (1 << 31), + MAX_MULTICAST_ENTRIES = 32, + + /* Entry count and words per entry + * for each address type in the filter. + */ + MAC_ADDR_MAX_CAM_ENTRIES = 512, + MAC_ADDR_MAX_CAM_WCOUNT = 3, + MAC_ADDR_MAX_MULTICAST_ENTRIES = 32, + MAC_ADDR_MAX_MULTICAST_WCOUNT = 2, + MAC_ADDR_MAX_VLAN_ENTRIES = 4096, + MAC_ADDR_MAX_VLAN_WCOUNT = 1, + MAC_ADDR_MAX_MCAST_FLTR_ENTRIES = 4096, + MAC_ADDR_MAX_MCAST_FLTR_WCOUNT = 1, + MAC_ADDR_MAX_FC_MAC_ENTRIES = 4, + MAC_ADDR_MAX_FC_MAC_WCOUNT = 2, + MAC_ADDR_MAX_MGMT_MAC_ENTRIES = 8, + MAC_ADDR_MAX_MGMT_MAC_WCOUNT = 2, + MAC_ADDR_MAX_MGMT_VLAN_ENTRIES = 16, + MAC_ADDR_MAX_MGMT_VLAN_WCOUNT = 1, + MAC_ADDR_MAX_MGMT_V4_ENTRIES = 4, + MAC_ADDR_MAX_MGMT_V4_WCOUNT = 1, + MAC_ADDR_MAX_MGMT_V6_ENTRIES = 4, + MAC_ADDR_MAX_MGMT_V6_WCOUNT = 4, + MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES = 4, + MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT = 1, +}; + +/* + * MAC Protocol Address Index Register (SPLT_HDR) bit definitions. + */ +enum { + SPLT_HDR_EP = (1 << 31), +}; + +/* + * FCoE Receive Configuration Register (FC_RCV_CFG) bit definitions. + */ +enum { + FC_RCV_CFG_ECT = (1 << 15), + FC_RCV_CFG_DFH = (1 << 20), + FC_RCV_CFG_DVF = (1 << 21), + FC_RCV_CFG_RCE = (1 << 27), + FC_RCV_CFG_RFE = (1 << 28), + FC_RCV_CFG_TEE = (1 << 29), + FC_RCV_CFG_TCE = (1 << 30), + FC_RCV_CFG_TFE = (1 << 31), +}; + +/* + * NIC Receive Configuration Register (NIC_RCV_CFG) bit definitions. + */ +enum { + NIC_RCV_CFG_PPE = (1 << 0), + NIC_RCV_CFG_VLAN_MASK = 0x00060000, + NIC_RCV_CFG_VLAN_ALL = 0x00000000, + NIC_RCV_CFG_VLAN_MATCH_ONLY = 0x00000002, + NIC_RCV_CFG_VLAN_MATCH_AND_NON = 0x00000004, + NIC_RCV_CFG_VLAN_NONE_AND_NON = 0x00000006, + NIC_RCV_CFG_RV = (1 << 3), + NIC_RCV_CFG_DFQ_MASK = (0x7f000000), + NIC_RCV_CFG_DFQ_SHIFT = 8, + NIC_RCV_CFG_DFQ = 0, /* HARDCODE default queue to 0. */ +}; + +/* + * Mgmt Receive Configuration Register (MGMT_RCV_CFG) bit definitions. + */ +enum { + MGMT_RCV_CFG_ARP = (1 << 0), + MGMT_RCV_CFG_DHC = (1 << 1), + MGMT_RCV_CFG_DHS = (1 << 2), + MGMT_RCV_CFG_NP = (1 << 3), + MGMT_RCV_CFG_I6N = (1 << 4), + MGMT_RCV_CFG_I6R = (1 << 5), + MGMT_RCV_CFG_DH6 = (1 << 6), + MGMT_RCV_CFG_UD1 = (1 << 7), + MGMT_RCV_CFG_UD0 = (1 << 8), + MGMT_RCV_CFG_BCT = (1 << 9), + MGMT_RCV_CFG_MCT = (1 << 10), + MGMT_RCV_CFG_DM = (1 << 11), + MGMT_RCV_CFG_RM = (1 << 12), + MGMT_RCV_CFG_STL = (1 << 13), + MGMT_RCV_CFG_VLAN_MASK = 0xc0000000, + MGMT_RCV_CFG_VLAN_ALL = 0x00000000, + MGMT_RCV_CFG_VLAN_MATCH_ONLY = 0x00004000, + MGMT_RCV_CFG_VLAN_MATCH_AND_NON = 0x00008000, + MGMT_RCV_CFG_VLAN_NONE_AND_NON = 0x0000c000, +}; + +/* + * Routing Index Register (RT_IDX) bit definitions. + */ +enum { + RT_IDX_IDX_SHIFT = 8, + RT_IDX_TYPE_MASK = 0x000f0000, + RT_IDX_TYPE_SHIFT = 16, + RT_IDX_TYPE_RT = 0x00000000, + RT_IDX_TYPE_RT_INV = 0x00010000, + RT_IDX_TYPE_NICQ = 0x00020000, + RT_IDX_TYPE_NICQ_INV = 0x00030000, + RT_IDX_DST_MASK = 0x00700000, + RT_IDX_DST_RSS = 0x00000000, + RT_IDX_DST_CAM_Q = 0x00100000, + RT_IDX_DST_COS_Q = 0x00200000, + RT_IDX_DST_DFLT_Q = 0x00300000, + RT_IDX_DST_DEST_Q = 0x00400000, + RT_IDX_RS = (1 << 26), + RT_IDX_E = (1 << 27), + RT_IDX_MR = (1 << 30), + RT_IDX_MW = (1 << 31), + + /* Nic Queue format - type 2 bits */ + RT_IDX_BCAST = (1 << 0), + RT_IDX_MCAST = (1 << 1), + RT_IDX_MCAST_MATCH = (1 << 2), + RT_IDX_MCAST_REG_MATCH = (1 << 3), + RT_IDX_MCAST_HASH_MATCH = (1 << 4), + RT_IDX_FC_MACH = (1 << 5), + RT_IDX_ETH_FCOE = (1 << 6), + RT_IDX_CAM_HIT = (1 << 7), + RT_IDX_CAM_BIT0 = (1 << 8), + RT_IDX_CAM_BIT1 = (1 << 9), + RT_IDX_VLAN_TAG = (1 << 10), + RT_IDX_VLAN_MATCH = (1 << 11), + RT_IDX_VLAN_FILTER = (1 << 12), + RT_IDX_ETH_SKIP1 = (1 << 13), + RT_IDX_ETH_SKIP2 = (1 << 14), + RT_IDX_BCAST_MCAST_MATCH = (1 << 15), + RT_IDX_802_3 = (1 << 16), + RT_IDX_LLDP = (1 << 17), + RT_IDX_UNUSED018 = (1 << 18), + RT_IDX_UNUSED019 = (1 << 19), + RT_IDX_UNUSED20 = (1 << 20), + RT_IDX_UNUSED21 = (1 << 21), + RT_IDX_ERR = (1 << 22), + RT_IDX_VALID = (1 << 23), + RT_IDX_TU_CSUM_ERR = (1 << 24), + RT_IDX_IP_CSUM_ERR = (1 << 25), + RT_IDX_MAC_ERR = (1 << 26), + RT_IDX_RSS_TCP6 = (1 << 27), + RT_IDX_RSS_TCP4 = (1 << 28), + RT_IDX_RSS_IPV6 = (1 << 29), + RT_IDX_RSS_IPV4 = (1 << 30), + RT_IDX_RSS_MATCH = (1 << 31), + + /* Hierarchy for the NIC Queue Mask */ + RT_IDX_ALL_ERR_SLOT = 0, + RT_IDX_MAC_ERR_SLOT = 0, + RT_IDX_IP_CSUM_ERR_SLOT = 1, + RT_IDX_TCP_UDP_CSUM_ERR_SLOT = 2, + RT_IDX_BCAST_SLOT = 3, + RT_IDX_MCAST_MATCH_SLOT = 4, + RT_IDX_ALLMULTI_SLOT = 5, + RT_IDX_UNUSED6_SLOT = 6, + RT_IDX_UNUSED7_SLOT = 7, + RT_IDX_RSS_MATCH_SLOT = 8, + RT_IDX_RSS_IPV4_SLOT = 8, + RT_IDX_RSS_IPV6_SLOT = 9, + RT_IDX_RSS_TCP4_SLOT = 10, + RT_IDX_RSS_TCP6_SLOT = 11, + RT_IDX_CAM_HIT_SLOT = 12, + RT_IDX_UNUSED013 = 13, + RT_IDX_UNUSED014 = 14, + RT_IDX_PROMISCUOUS_SLOT = 15, + RT_IDX_MAX_RT_SLOTS = 8, + RT_IDX_MAX_NIC_SLOTS = 16, +}; + +/* + * Serdes Address Register (XG_SERDES_ADDR) bit definitions. + */ +enum { + XG_SERDES_ADDR_RDY = (1 << 31), + XG_SERDES_ADDR_R = (1 << 30), + + XG_SERDES_ADDR_STS = 0x00001E06, + XG_SERDES_ADDR_XFI1_PWR_UP = 0x00000005, + XG_SERDES_ADDR_XFI2_PWR_UP = 0x0000000a, + XG_SERDES_ADDR_XAUI_PWR_DOWN = 0x00000001, + + /* Serdes coredump definitions. */ + XG_SERDES_XAUI_AN_START = 0x00000000, + XG_SERDES_XAUI_AN_END = 0x00000034, + XG_SERDES_XAUI_HSS_PCS_START = 0x00000800, + XG_SERDES_XAUI_HSS_PCS_END = 0x0000880, + XG_SERDES_XFI_AN_START = 0x00001000, + XG_SERDES_XFI_AN_END = 0x00001034, + XG_SERDES_XFI_TRAIN_START = 0x10001050, + XG_SERDES_XFI_TRAIN_END = 0x1000107C, + XG_SERDES_XFI_HSS_PCS_START = 0x00001800, + XG_SERDES_XFI_HSS_PCS_END = 0x00001838, + XG_SERDES_XFI_HSS_TX_START = 0x00001c00, + XG_SERDES_XFI_HSS_TX_END = 0x00001c1f, + XG_SERDES_XFI_HSS_RX_START = 0x00001c40, + XG_SERDES_XFI_HSS_RX_END = 0x00001c5f, + XG_SERDES_XFI_HSS_PLL_START = 0x00001e00, + XG_SERDES_XFI_HSS_PLL_END = 0x00001e1f, +}; + +/* + * NIC Probe Mux Address Register (PRB_MX_ADDR) bit definitions. + */ +enum { + PRB_MX_ADDR_ARE = (1 << 16), + PRB_MX_ADDR_UP = (1 << 15), + PRB_MX_ADDR_SWP = (1 << 14), + + /* Module select values. */ + PRB_MX_ADDR_MAX_MODS = 21, + PRB_MX_ADDR_MOD_SEL_SHIFT = 9, + PRB_MX_ADDR_MOD_SEL_TBD = 0, + PRB_MX_ADDR_MOD_SEL_IDE1 = 1, + PRB_MX_ADDR_MOD_SEL_IDE2 = 2, + PRB_MX_ADDR_MOD_SEL_FRB = 3, + PRB_MX_ADDR_MOD_SEL_ODE1 = 4, + PRB_MX_ADDR_MOD_SEL_ODE2 = 5, + PRB_MX_ADDR_MOD_SEL_DA1 = 6, + PRB_MX_ADDR_MOD_SEL_DA2 = 7, + PRB_MX_ADDR_MOD_SEL_IMP1 = 8, + PRB_MX_ADDR_MOD_SEL_IMP2 = 9, + PRB_MX_ADDR_MOD_SEL_OMP1 = 10, + PRB_MX_ADDR_MOD_SEL_OMP2 = 11, + PRB_MX_ADDR_MOD_SEL_ORS1 = 12, + PRB_MX_ADDR_MOD_SEL_ORS2 = 13, + PRB_MX_ADDR_MOD_SEL_REG = 14, + PRB_MX_ADDR_MOD_SEL_MAC1 = 16, + PRB_MX_ADDR_MOD_SEL_MAC2 = 17, + PRB_MX_ADDR_MOD_SEL_VQM1 = 18, + PRB_MX_ADDR_MOD_SEL_VQM2 = 19, + PRB_MX_ADDR_MOD_SEL_MOP = 20, + /* Bit fields indicating which modules + * are valid for each clock domain. + */ + PRB_MX_ADDR_VALID_SYS_MOD = 0x000f7ff7, + PRB_MX_ADDR_VALID_PCI_MOD = 0x000040c1, + PRB_MX_ADDR_VALID_XGM_MOD = 0x00037309, + PRB_MX_ADDR_VALID_FC_MOD = 0x00003001, + PRB_MX_ADDR_VALID_TOTAL = 34, + + /* Clock domain values. */ + PRB_MX_ADDR_CLOCK_SHIFT = 6, + PRB_MX_ADDR_SYS_CLOCK = 0, + PRB_MX_ADDR_PCI_CLOCK = 2, + PRB_MX_ADDR_FC_CLOCK = 5, + PRB_MX_ADDR_XGM_CLOCK = 6, + + PRB_MX_ADDR_MAX_MUX = 64, +}; + +/* + * Control Register Set Map + */ +enum { + PROC_ADDR = 0, /* Use semaphore */ + PROC_DATA = 0x04, /* Use semaphore */ + SYS = 0x08, + RST_FO = 0x0c, + FSC = 0x10, + CSR = 0x14, + LED = 0x18, + ICB_RID = 0x1c, /* Use semaphore */ + ICB_L = 0x20, /* Use semaphore */ + ICB_H = 0x24, /* Use semaphore */ + CFG = 0x28, + BIOS_ADDR = 0x2c, + STS = 0x30, + INTR_EN = 0x34, + INTR_MASK = 0x38, + ISR1 = 0x3c, + ISR2 = 0x40, + ISR3 = 0x44, + ISR4 = 0x48, + REV_ID = 0x4c, + FRC_ECC_ERR = 0x50, + ERR_STS = 0x54, + RAM_DBG_ADDR = 0x58, + RAM_DBG_DATA = 0x5c, + ECC_ERR_CNT = 0x60, + SEM = 0x64, + GPIO_1 = 0x68, /* Use semaphore */ + GPIO_2 = 0x6c, /* Use semaphore */ + GPIO_3 = 0x70, /* Use semaphore */ + RSVD2 = 0x74, + XGMAC_ADDR = 0x78, /* Use semaphore */ + XGMAC_DATA = 0x7c, /* Use semaphore */ + NIC_ETS = 0x80, + CNA_ETS = 0x84, + FLASH_ADDR = 0x88, /* Use semaphore */ + FLASH_DATA = 0x8c, /* Use semaphore */ + CQ_STOP = 0x90, + PAGE_TBL_RID = 0x94, + WQ_PAGE_TBL_LO = 0x98, + WQ_PAGE_TBL_HI = 0x9c, + CQ_PAGE_TBL_LO = 0xa0, + CQ_PAGE_TBL_HI = 0xa4, + MAC_ADDR_IDX = 0xa8, /* Use semaphore */ + MAC_ADDR_DATA = 0xac, /* Use semaphore */ + COS_DFLT_CQ1 = 0xb0, + COS_DFLT_CQ2 = 0xb4, + ETYPE_SKIP1 = 0xb8, + ETYPE_SKIP2 = 0xbc, + SPLT_HDR = 0xc0, + FC_PAUSE_THRES = 0xc4, + NIC_PAUSE_THRES = 0xc8, + FC_ETHERTYPE = 0xcc, + FC_RCV_CFG = 0xd0, + NIC_RCV_CFG = 0xd4, + FC_COS_TAGS = 0xd8, + NIC_COS_TAGS = 0xdc, + MGMT_RCV_CFG = 0xe0, + RT_IDX = 0xe4, + RT_DATA = 0xe8, + RSVD7 = 0xec, + XG_SERDES_ADDR = 0xf0, + XG_SERDES_DATA = 0xf4, + PRB_MX_ADDR = 0xf8, /* Use semaphore */ + PRB_MX_DATA = 0xfc, /* Use semaphore */ +}; + +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#define SMALL_BUFFER_SIZE 256 +#define SMALL_BUF_MAP_SIZE SMALL_BUFFER_SIZE +#define SPLT_SETTING FSC_DBRST_1024 +#define SPLT_LEN 0 +#define QLGE_SB_PAD 0 +#else +#define SMALL_BUFFER_SIZE 512 +#define SMALL_BUF_MAP_SIZE (SMALL_BUFFER_SIZE / 2) +#define SPLT_SETTING FSC_SH +#define SPLT_LEN (SPLT_HDR_EP | \ + min(SMALL_BUF_MAP_SIZE, 1023)) +#define QLGE_SB_PAD 32 +#endif + +/* + * CAM output format. + */ +enum { + CAM_OUT_ROUTE_FC = 0, + CAM_OUT_ROUTE_NIC = 1, + CAM_OUT_FUNC_SHIFT = 2, + CAM_OUT_RV = (1 << 4), + CAM_OUT_SH = (1 << 15), + CAM_OUT_CQ_ID_SHIFT = 5, +}; + +/* + * Mailbox definitions + */ +enum { + /* Asynchronous Event Notifications */ + AEN_SYS_ERR = 0x00008002, + AEN_LINK_UP = 0x00008011, + AEN_LINK_DOWN = 0x00008012, + AEN_IDC_CMPLT = 0x00008100, + AEN_IDC_REQ = 0x00008101, + AEN_IDC_EXT = 0x00008102, + AEN_DCBX_CHG = 0x00008110, + AEN_AEN_LOST = 0x00008120, + AEN_AEN_SFP_IN = 0x00008130, + AEN_AEN_SFP_OUT = 0x00008131, + AEN_FW_INIT_DONE = 0x00008400, + AEN_FW_INIT_FAIL = 0x00008401, + + /* Mailbox Command Opcodes. */ + MB_CMD_NOP = 0x00000000, + MB_CMD_EX_FW = 0x00000002, + MB_CMD_MB_TEST = 0x00000006, + MB_CMD_CSUM_TEST = 0x00000007, /* Verify Checksum */ + MB_CMD_ABOUT_FW = 0x00000008, + MB_CMD_COPY_RISC_RAM = 0x0000000a, + MB_CMD_LOAD_RISC_RAM = 0x0000000b, + MB_CMD_DUMP_RISC_RAM = 0x0000000c, + MB_CMD_WRITE_RAM = 0x0000000d, + MB_CMD_INIT_RISC_RAM = 0x0000000e, + MB_CMD_READ_RAM = 0x0000000f, + MB_CMD_STOP_FW = 0x00000014, + MB_CMD_MAKE_SYS_ERR = 0x0000002a, + MB_CMD_WRITE_SFP = 0x00000030, + MB_CMD_READ_SFP = 0x00000031, + MB_CMD_INIT_FW = 0x00000060, + MB_CMD_GET_IFCB = 0x00000061, + MB_CMD_GET_FW_STATE = 0x00000069, + MB_CMD_IDC_REQ = 0x00000100, /* Inter-Driver Communication */ + MB_CMD_IDC_ACK = 0x00000101, /* Inter-Driver Communication */ + MB_CMD_SET_WOL_MODE = 0x00000110, /* Wake On Lan */ + MB_WOL_DISABLE = 0, + MB_WOL_MAGIC_PKT = (1 << 1), + MB_WOL_FLTR = (1 << 2), + MB_WOL_UCAST = (1 << 3), + MB_WOL_MCAST = (1 << 4), + MB_WOL_BCAST = (1 << 5), + MB_WOL_LINK_UP = (1 << 6), + MB_WOL_LINK_DOWN = (1 << 7), + MB_WOL_MODE_ON = (1 << 16), /* Wake on Lan Mode on */ + MB_CMD_SET_WOL_FLTR = 0x00000111, /* Wake On Lan Filter */ + MB_CMD_CLEAR_WOL_FLTR = 0x00000112, /* Wake On Lan Filter */ + MB_CMD_SET_WOL_MAGIC = 0x00000113, /* Wake On Lan Magic Packet */ + MB_CMD_CLEAR_WOL_MAGIC = 0x00000114,/* Wake On Lan Magic Packet */ + MB_CMD_SET_WOL_IMMED = 0x00000115, + MB_CMD_PORT_RESET = 0x00000120, + MB_CMD_SET_PORT_CFG = 0x00000122, + MB_CMD_GET_PORT_CFG = 0x00000123, + MB_CMD_GET_LINK_STS = 0x00000124, + MB_CMD_SET_LED_CFG = 0x00000125, /* Set LED Configuration Register */ + QL_LED_BLINK = 0x03e803e8, + MB_CMD_GET_LED_CFG = 0x00000126, /* Get LED Configuration Register */ + MB_CMD_SET_MGMNT_TFK_CTL = 0x00000160, /* Set Mgmnt Traffic Control */ + MB_SET_MPI_TFK_STOP = (1 << 0), + MB_SET_MPI_TFK_RESUME = (1 << 1), + MB_CMD_GET_MGMNT_TFK_CTL = 0x00000161, /* Get Mgmnt Traffic Control */ + MB_GET_MPI_TFK_STOPPED = (1 << 0), + MB_GET_MPI_TFK_FIFO_EMPTY = (1 << 1), + /* Sub-commands for IDC request. + * This describes the reason for the + * IDC request. + */ + MB_CMD_IOP_NONE = 0x0000, + MB_CMD_IOP_PREP_UPDATE_MPI = 0x0001, + MB_CMD_IOP_COMP_UPDATE_MPI = 0x0002, + MB_CMD_IOP_PREP_LINK_DOWN = 0x0010, + MB_CMD_IOP_DVR_START = 0x0100, + MB_CMD_IOP_FLASH_ACC = 0x0101, + MB_CMD_IOP_RESTART_MPI = 0x0102, + MB_CMD_IOP_CORE_DUMP_MPI = 0x0103, + + /* Mailbox Command Status. */ + MB_CMD_STS_GOOD = 0x00004000, /* Success. */ + MB_CMD_STS_INTRMDT = 0x00001000, /* Intermediate Complete. */ + MB_CMD_STS_INVLD_CMD = 0x00004001, /* Invalid. */ + MB_CMD_STS_XFC_ERR = 0x00004002, /* Interface Error. */ + MB_CMD_STS_CSUM_ERR = 0x00004003, /* Csum Error. */ + MB_CMD_STS_ERR = 0x00004005, /* System Error. */ + MB_CMD_STS_PARAM_ERR = 0x00004006, /* Parameter Error. */ +}; + +struct mbox_params { + u32 mbox_in[MAILBOX_COUNT]; + u32 mbox_out[MAILBOX_COUNT]; + int in_count; + int out_count; +}; + +struct flash_params_8012 { + u8 dev_id_str[4]; + __le16 size; + __le16 csum; + __le16 ver; + __le16 sub_dev_id; + u8 mac_addr[6]; + __le16 res; +}; + +/* 8000 device's flash is a different structure + * at a different offset in flash. + */ +#define FUNC0_FLASH_OFFSET 0x140200 +#define FUNC1_FLASH_OFFSET 0x140600 + +/* Flash related data structures. */ +struct flash_params_8000 { + u8 dev_id_str[4]; /* "8000" */ + __le16 ver; + __le16 size; + __le16 csum; + __le16 reserved0; + __le16 total_size; + __le16 entry_count; + u8 data_type0; + u8 data_size0; + u8 mac_addr[6]; + u8 data_type1; + u8 data_size1; + u8 mac_addr1[6]; + u8 data_type2; + u8 data_size2; + __le16 vlan_id; + u8 data_type3; + u8 data_size3; + __le16 last; + u8 reserved1[464]; + __le16 subsys_ven_id; + __le16 subsys_dev_id; + u8 reserved2[4]; +}; + +union flash_params { + struct flash_params_8012 flash_params_8012; + struct flash_params_8000 flash_params_8000; +}; + +/* + * doorbell space for the rx ring context + */ +struct rx_doorbell_context { + u32 cnsmr_idx; /* 0x00 */ + u32 valid; /* 0x04 */ + u32 reserved[4]; /* 0x08-0x14 */ + u32 lbq_prod_idx; /* 0x18 */ + u32 sbq_prod_idx; /* 0x1c */ +}; + +/* + * doorbell space for the tx ring context + */ +struct tx_doorbell_context { + u32 prod_idx; /* 0x00 */ + u32 valid; /* 0x04 */ + u32 reserved[4]; /* 0x08-0x14 */ + u32 lbq_prod_idx; /* 0x18 */ + u32 sbq_prod_idx; /* 0x1c */ +}; + +/* DATA STRUCTURES SHARED WITH HARDWARE. */ +struct tx_buf_desc { + __le64 addr; + __le32 len; +#define TX_DESC_LEN_MASK 0x000fffff +#define TX_DESC_C 0x40000000 +#define TX_DESC_E 0x80000000 +} __packed; + +/* + * IOCB Definitions... + */ + +#define OPCODE_OB_MAC_IOCB 0x01 +#define OPCODE_OB_MAC_TSO_IOCB 0x02 +#define OPCODE_IB_MAC_IOCB 0x20 +#define OPCODE_IB_MPI_IOCB 0x21 +#define OPCODE_IB_AE_IOCB 0x3f + +struct ob_mac_iocb_req { + u8 opcode; + u8 flags1; +#define OB_MAC_IOCB_REQ_OI 0x01 +#define OB_MAC_IOCB_REQ_I 0x02 +#define OB_MAC_IOCB_REQ_D 0x08 +#define OB_MAC_IOCB_REQ_F 0x10 + u8 flags2; + u8 flags3; +#define OB_MAC_IOCB_DFP 0x02 +#define OB_MAC_IOCB_V 0x04 + __le32 reserved1[2]; + __le16 frame_len; +#define OB_MAC_IOCB_LEN_MASK 0x3ffff + __le16 reserved2; + u32 tid; + u32 txq_idx; + __le32 reserved3; + __le16 vlan_tci; + __le16 reserved4; + struct tx_buf_desc tbd[TX_DESC_PER_IOCB]; +} __packed; + +struct ob_mac_iocb_rsp { + u8 opcode; /* */ + u8 flags1; /* */ +#define OB_MAC_IOCB_RSP_OI 0x01 /* */ +#define OB_MAC_IOCB_RSP_I 0x02 /* */ +#define OB_MAC_IOCB_RSP_E 0x08 /* */ +#define OB_MAC_IOCB_RSP_S 0x10 /* too Short */ +#define OB_MAC_IOCB_RSP_L 0x20 /* too Large */ +#define OB_MAC_IOCB_RSP_P 0x40 /* Padded */ + u8 flags2; /* */ + u8 flags3; /* */ +#define OB_MAC_IOCB_RSP_B 0x80 /* */ + u32 tid; + u32 txq_idx; + __le32 reserved[13]; +} __packed; + +struct ob_mac_tso_iocb_req { + u8 opcode; + u8 flags1; +#define OB_MAC_TSO_IOCB_OI 0x01 +#define OB_MAC_TSO_IOCB_I 0x02 +#define OB_MAC_TSO_IOCB_D 0x08 +#define OB_MAC_TSO_IOCB_IP4 0x40 +#define OB_MAC_TSO_IOCB_IP6 0x80 + u8 flags2; +#define OB_MAC_TSO_IOCB_LSO 0x20 +#define OB_MAC_TSO_IOCB_UC 0x40 +#define OB_MAC_TSO_IOCB_TC 0x80 + u8 flags3; +#define OB_MAC_TSO_IOCB_IC 0x01 +#define OB_MAC_TSO_IOCB_DFP 0x02 +#define OB_MAC_TSO_IOCB_V 0x04 + __le32 reserved1[2]; + __le32 frame_len; + u32 tid; + u32 txq_idx; + __le16 total_hdrs_len; + __le16 net_trans_offset; +#define OB_MAC_TRANSPORT_HDR_SHIFT 6 + __le16 vlan_tci; + __le16 mss; + struct tx_buf_desc tbd[TX_DESC_PER_IOCB]; +} __packed; + +struct ob_mac_tso_iocb_rsp { + u8 opcode; + u8 flags1; +#define OB_MAC_TSO_IOCB_RSP_OI 0x01 +#define OB_MAC_TSO_IOCB_RSP_I 0x02 +#define OB_MAC_TSO_IOCB_RSP_E 0x08 +#define OB_MAC_TSO_IOCB_RSP_S 0x10 +#define OB_MAC_TSO_IOCB_RSP_L 0x20 +#define OB_MAC_TSO_IOCB_RSP_P 0x40 + u8 flags2; /* */ + u8 flags3; /* */ +#define OB_MAC_TSO_IOCB_RSP_B 0x8000 + u32 tid; + u32 txq_idx; + __le32 reserved2[13]; +} __packed; + +struct ib_mac_iocb_rsp { + u8 opcode; /* 0x20 */ + u8 flags1; +#define IB_MAC_IOCB_RSP_OI 0x01 /* Overide intr delay */ +#define IB_MAC_IOCB_RSP_I 0x02 /* Disble Intr Generation */ +#define IB_MAC_CSUM_ERR_MASK 0x1c /* A mask to use for csum errs */ +#define IB_MAC_IOCB_RSP_TE 0x04 /* Checksum error */ +#define IB_MAC_IOCB_RSP_NU 0x08 /* No checksum rcvd */ +#define IB_MAC_IOCB_RSP_IE 0x10 /* IPv4 checksum error */ +#define IB_MAC_IOCB_RSP_M_MASK 0x60 /* Multicast info */ +#define IB_MAC_IOCB_RSP_M_NONE 0x00 /* Not mcast frame */ +#define IB_MAC_IOCB_RSP_M_HASH 0x20 /* HASH mcast frame */ +#define IB_MAC_IOCB_RSP_M_REG 0x40 /* Registered mcast frame */ +#define IB_MAC_IOCB_RSP_M_PROM 0x60 /* Promiscuous mcast frame */ +#define IB_MAC_IOCB_RSP_B 0x80 /* Broadcast frame */ + u8 flags2; +#define IB_MAC_IOCB_RSP_P 0x01 /* Promiscuous frame */ +#define IB_MAC_IOCB_RSP_V 0x02 /* Vlan tag present */ +#define IB_MAC_IOCB_RSP_ERR_MASK 0x1c /* */ +#define IB_MAC_IOCB_RSP_ERR_CODE_ERR 0x04 +#define IB_MAC_IOCB_RSP_ERR_OVERSIZE 0x08 +#define IB_MAC_IOCB_RSP_ERR_UNDERSIZE 0x10 +#define IB_MAC_IOCB_RSP_ERR_PREAMBLE 0x14 +#define IB_MAC_IOCB_RSP_ERR_FRAME_LEN 0x18 +#define IB_MAC_IOCB_RSP_ERR_CRC 0x1c +#define IB_MAC_IOCB_RSP_U 0x20 /* UDP packet */ +#define IB_MAC_IOCB_RSP_T 0x40 /* TCP packet */ +#define IB_MAC_IOCB_RSP_FO 0x80 /* Failover port */ + u8 flags3; +#define IB_MAC_IOCB_RSP_RSS_MASK 0x07 /* RSS mask */ +#define IB_MAC_IOCB_RSP_M_NONE 0x00 /* No RSS match */ +#define IB_MAC_IOCB_RSP_M_IPV4 0x04 /* IPv4 RSS match */ +#define IB_MAC_IOCB_RSP_M_IPV6 0x02 /* IPv6 RSS match */ +#define IB_MAC_IOCB_RSP_M_TCP_V4 0x05 /* TCP with IPv4 */ +#define IB_MAC_IOCB_RSP_M_TCP_V6 0x03 /* TCP with IPv6 */ +#define IB_MAC_IOCB_RSP_V4 0x08 /* IPV4 */ +#define IB_MAC_IOCB_RSP_V6 0x10 /* IPV6 */ +#define IB_MAC_IOCB_RSP_IH 0x20 /* Split after IP header */ +#define IB_MAC_IOCB_RSP_DS 0x40 /* data is in small buffer */ +#define IB_MAC_IOCB_RSP_DL 0x80 /* data is in large buffer */ + __le32 data_len; /* */ + __le64 data_addr; /* */ + __le32 rss; /* */ + __le16 vlan_id; /* 12 bits */ +#define IB_MAC_IOCB_RSP_C 0x1000 /* VLAN CFI bit */ +#define IB_MAC_IOCB_RSP_COS_SHIFT 12 /* class of service value */ +#define IB_MAC_IOCB_RSP_VLAN_MASK 0x0ffff + + __le16 reserved1; + __le32 reserved2[6]; + u8 reserved3[3]; + u8 flags4; +#define IB_MAC_IOCB_RSP_HV 0x20 +#define IB_MAC_IOCB_RSP_HS 0x40 +#define IB_MAC_IOCB_RSP_HL 0x80 + __le32 hdr_len; /* */ + __le64 hdr_addr; /* */ +} __packed; + +struct ib_ae_iocb_rsp { + u8 opcode; + u8 flags1; +#define IB_AE_IOCB_RSP_OI 0x01 +#define IB_AE_IOCB_RSP_I 0x02 + u8 event; +#define LINK_UP_EVENT 0x00 +#define LINK_DOWN_EVENT 0x01 +#define CAM_LOOKUP_ERR_EVENT 0x06 +#define SOFT_ECC_ERROR_EVENT 0x07 +#define MGMT_ERR_EVENT 0x08 +#define TEN_GIG_MAC_EVENT 0x09 +#define GPI0_H2L_EVENT 0x10 +#define GPI0_L2H_EVENT 0x20 +#define GPI1_H2L_EVENT 0x11 +#define GPI1_L2H_EVENT 0x21 +#define PCI_ERR_ANON_BUF_RD 0x40 + u8 q_id; + __le32 reserved[15]; +} __packed; + +/* + * These three structures are for generic + * handling of ib and ob iocbs. + */ +struct ql_net_rsp_iocb { + u8 opcode; + u8 flags0; + __le16 length; + __le32 tid; + __le32 reserved[14]; +} __packed; + +struct net_req_iocb { + u8 opcode; + u8 flags0; + __le16 flags1; + __le32 tid; + __le32 reserved1[30]; +} __packed; + +/* + * tx ring initialization control block for chip. + * It is defined as: + * "Work Queue Initialization Control Block" + */ +struct wqicb { + __le16 len; +#define Q_LEN_V (1 << 4) +#define Q_LEN_CPP_CONT 0x0000 +#define Q_LEN_CPP_16 0x0001 +#define Q_LEN_CPP_32 0x0002 +#define Q_LEN_CPP_64 0x0003 +#define Q_LEN_CPP_512 0x0006 + __le16 flags; +#define Q_PRI_SHIFT 1 +#define Q_FLAGS_LC 0x1000 +#define Q_FLAGS_LB 0x2000 +#define Q_FLAGS_LI 0x4000 +#define Q_FLAGS_LO 0x8000 + __le16 cq_id_rss; +#define Q_CQ_ID_RSS_RV 0x8000 + __le16 rid; + __le64 addr; + __le64 cnsmr_idx_addr; +} __packed; + +/* + * rx ring initialization control block for chip. + * It is defined as: + * "Completion Queue Initialization Control Block" + */ +struct cqicb { + u8 msix_vect; + u8 reserved1; + u8 reserved2; + u8 flags; +#define FLAGS_LV 0x08 +#define FLAGS_LS 0x10 +#define FLAGS_LL 0x20 +#define FLAGS_LI 0x40 +#define FLAGS_LC 0x80 + __le16 len; +#define LEN_V (1 << 4) +#define LEN_CPP_CONT 0x0000 +#define LEN_CPP_32 0x0001 +#define LEN_CPP_64 0x0002 +#define LEN_CPP_128 0x0003 + __le16 rid; + __le64 addr; + __le64 prod_idx_addr; + __le16 pkt_delay; + __le16 irq_delay; + __le64 lbq_addr; + __le16 lbq_buf_size; + __le16 lbq_len; /* entry count */ + __le64 sbq_addr; + __le16 sbq_buf_size; + __le16 sbq_len; /* entry count */ +} __packed; + +struct ricb { + u8 base_cq; +#define RSS_L4K 0x80 + u8 flags; +#define RSS_L6K 0x01 +#define RSS_LI 0x02 +#define RSS_LB 0x04 +#define RSS_LM 0x08 +#define RSS_RI4 0x10 +#define RSS_RT4 0x20 +#define RSS_RI6 0x40 +#define RSS_RT6 0x80 + __le16 mask; + u8 hash_cq_id[1024]; + __le32 ipv6_hash_key[10]; + __le32 ipv4_hash_key[4]; +} __packed; + +/* SOFTWARE/DRIVER DATA STRUCTURES. */ + +struct oal { + struct tx_buf_desc oal[TX_DESC_PER_OAL]; +}; + +struct map_list { + DEFINE_DMA_UNMAP_ADDR(mapaddr); + DEFINE_DMA_UNMAP_LEN(maplen); +}; + +struct tx_ring_desc { + struct sk_buff *skb; + struct ob_mac_iocb_req *queue_entry; + u32 index; + struct oal oal; + struct map_list map[MAX_SKB_FRAGS + 1]; + int map_cnt; + struct tx_ring_desc *next; +}; + +struct page_chunk { + struct page *page; /* master page */ + char *va; /* virt addr for this chunk */ + u64 map; /* mapping for master */ + unsigned int offset; /* offset for this chunk */ + unsigned int last_flag; /* flag set for last chunk in page */ +}; + +struct bq_desc { + union { + struct page_chunk pg_chunk; + struct sk_buff *skb; + } p; + __le64 *addr; + u32 index; + DEFINE_DMA_UNMAP_ADDR(mapaddr); + DEFINE_DMA_UNMAP_LEN(maplen); +}; + +#define QL_TXQ_IDX(qdev, skb) (smp_processor_id()%(qdev->tx_ring_count)) + +struct tx_ring { + /* + * queue info. + */ + struct wqicb wqicb; /* structure used to inform chip of new queue */ + void *wq_base; /* pci_alloc:virtual addr for tx */ + dma_addr_t wq_base_dma; /* pci_alloc:dma addr for tx */ + __le32 *cnsmr_idx_sh_reg; /* shadow copy of consumer idx */ + dma_addr_t cnsmr_idx_sh_reg_dma; /* dma-shadow copy of consumer */ + u32 wq_size; /* size in bytes of queue area */ + u32 wq_len; /* number of entries in queue */ + void __iomem *prod_idx_db_reg; /* doorbell area index reg at offset 0x00 */ + void __iomem *valid_db_reg; /* doorbell area valid reg at offset 0x04 */ + u16 prod_idx; /* current value for prod idx */ + u16 cq_id; /* completion (rx) queue for tx completions */ + u8 wq_id; /* queue id for this entry */ + u8 reserved1[3]; + struct tx_ring_desc *q; /* descriptor list for the queue */ + spinlock_t lock; + atomic_t tx_count; /* counts down for every outstanding IO */ + atomic_t queue_stopped; /* Turns queue off when full. */ + struct delayed_work tx_work; + struct ql_adapter *qdev; + u64 tx_packets; + u64 tx_bytes; + u64 tx_errors; +}; + +/* + * Type of inbound queue. + */ +enum { + DEFAULT_Q = 2, /* Handles slow queue and chip/MPI events. */ + TX_Q = 3, /* Handles outbound completions. */ + RX_Q = 4, /* Handles inbound completions. */ +}; + +struct rx_ring { + struct cqicb cqicb; /* The chip's completion queue init control block. */ + + /* Completion queue elements. */ + void *cq_base; + dma_addr_t cq_base_dma; + u32 cq_size; + u32 cq_len; + u16 cq_id; + __le32 *prod_idx_sh_reg; /* Shadowed producer register. */ + dma_addr_t prod_idx_sh_reg_dma; + void __iomem *cnsmr_idx_db_reg; /* PCI doorbell mem area + 0 */ + u32 cnsmr_idx; /* current sw idx */ + struct ql_net_rsp_iocb *curr_entry; /* next entry on queue */ + void __iomem *valid_db_reg; /* PCI doorbell mem area + 0x04 */ + + /* Large buffer queue elements. */ + u32 lbq_len; /* entry count */ + u32 lbq_size; /* size in bytes of queue */ + u32 lbq_buf_size; + void *lbq_base; + dma_addr_t lbq_base_dma; + void *lbq_base_indirect; + dma_addr_t lbq_base_indirect_dma; + struct page_chunk pg_chunk; /* current page for chunks */ + struct bq_desc *lbq; /* array of control blocks */ + void __iomem *lbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x18 */ + u32 lbq_prod_idx; /* current sw prod idx */ + u32 lbq_curr_idx; /* next entry we expect */ + u32 lbq_clean_idx; /* beginning of new descs */ + u32 lbq_free_cnt; /* free buffer desc cnt */ + + /* Small buffer queue elements. */ + u32 sbq_len; /* entry count */ + u32 sbq_size; /* size in bytes of queue */ + u32 sbq_buf_size; + void *sbq_base; + dma_addr_t sbq_base_dma; + void *sbq_base_indirect; + dma_addr_t sbq_base_indirect_dma; + struct bq_desc *sbq; /* array of control blocks */ + void __iomem *sbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x1c */ + u32 sbq_prod_idx; /* current sw prod idx */ + u32 sbq_curr_idx; /* next entry we expect */ + u32 sbq_clean_idx; /* beginning of new descs */ + u32 sbq_free_cnt; /* free buffer desc cnt */ + + /* Misc. handler elements. */ + u32 type; /* Type of queue, tx, rx. */ + u32 irq; /* Which vector this ring is assigned. */ + u32 cpu; /* Which CPU this should run on. */ + char name[IFNAMSIZ + 5]; + struct napi_struct napi; + u8 reserved; + struct ql_adapter *qdev; + u64 rx_packets; + u64 rx_multicast; + u64 rx_bytes; + u64 rx_dropped; + u64 rx_errors; +}; + +/* + * RSS Initialization Control Block + */ +struct hash_id { + u8 value[4]; +}; + +struct nic_stats { + /* + * These stats come from offset 200h to 278h + * in the XGMAC register. + */ + u64 tx_pkts; + u64 tx_bytes; + u64 tx_mcast_pkts; + u64 tx_bcast_pkts; + u64 tx_ucast_pkts; + u64 tx_ctl_pkts; + u64 tx_pause_pkts; + u64 tx_64_pkt; + u64 tx_65_to_127_pkt; + u64 tx_128_to_255_pkt; + u64 tx_256_511_pkt; + u64 tx_512_to_1023_pkt; + u64 tx_1024_to_1518_pkt; + u64 tx_1519_to_max_pkt; + u64 tx_undersize_pkt; + u64 tx_oversize_pkt; + + /* + * These stats come from offset 300h to 3C8h + * in the XGMAC register. + */ + u64 rx_bytes; + u64 rx_bytes_ok; + u64 rx_pkts; + u64 rx_pkts_ok; + u64 rx_bcast_pkts; + u64 rx_mcast_pkts; + u64 rx_ucast_pkts; + u64 rx_undersize_pkts; + u64 rx_oversize_pkts; + u64 rx_jabber_pkts; + u64 rx_undersize_fcerr_pkts; + u64 rx_drop_events; + u64 rx_fcerr_pkts; + u64 rx_align_err; + u64 rx_symbol_err; + u64 rx_mac_err; + u64 rx_ctl_pkts; + u64 rx_pause_pkts; + u64 rx_64_pkts; + u64 rx_65_to_127_pkts; + u64 rx_128_255_pkts; + u64 rx_256_511_pkts; + u64 rx_512_to_1023_pkts; + u64 rx_1024_to_1518_pkts; + u64 rx_1519_to_max_pkts; + u64 rx_len_err_pkts; + /* + * These stats come from offset 500h to 5C8h + * in the XGMAC register. + */ + u64 tx_cbfc_pause_frames0; + u64 tx_cbfc_pause_frames1; + u64 tx_cbfc_pause_frames2; + u64 tx_cbfc_pause_frames3; + u64 tx_cbfc_pause_frames4; + u64 tx_cbfc_pause_frames5; + u64 tx_cbfc_pause_frames6; + u64 tx_cbfc_pause_frames7; + u64 rx_cbfc_pause_frames0; + u64 rx_cbfc_pause_frames1; + u64 rx_cbfc_pause_frames2; + u64 rx_cbfc_pause_frames3; + u64 rx_cbfc_pause_frames4; + u64 rx_cbfc_pause_frames5; + u64 rx_cbfc_pause_frames6; + u64 rx_cbfc_pause_frames7; + u64 rx_nic_fifo_drop; +}; + +/* Firmware coredump internal register address/length pairs. */ +enum { + MPI_CORE_REGS_ADDR = 0x00030000, + MPI_CORE_REGS_CNT = 127, + MPI_CORE_SH_REGS_CNT = 16, + TEST_REGS_ADDR = 0x00001000, + TEST_REGS_CNT = 23, + RMII_REGS_ADDR = 0x00001040, + RMII_REGS_CNT = 64, + FCMAC1_REGS_ADDR = 0x00001080, + FCMAC2_REGS_ADDR = 0x000010c0, + FCMAC_REGS_CNT = 64, + FC1_MBX_REGS_ADDR = 0x00001100, + FC2_MBX_REGS_ADDR = 0x00001240, + FC_MBX_REGS_CNT = 64, + IDE_REGS_ADDR = 0x00001140, + IDE_REGS_CNT = 64, + NIC1_MBX_REGS_ADDR = 0x00001180, + NIC2_MBX_REGS_ADDR = 0x00001280, + NIC_MBX_REGS_CNT = 64, + SMBUS_REGS_ADDR = 0x00001200, + SMBUS_REGS_CNT = 64, + I2C_REGS_ADDR = 0x00001fc0, + I2C_REGS_CNT = 64, + MEMC_REGS_ADDR = 0x00003000, + MEMC_REGS_CNT = 256, + PBUS_REGS_ADDR = 0x00007c00, + PBUS_REGS_CNT = 256, + MDE_REGS_ADDR = 0x00010000, + MDE_REGS_CNT = 6, + CODE_RAM_ADDR = 0x00020000, + CODE_RAM_CNT = 0x2000, + MEMC_RAM_ADDR = 0x00100000, + MEMC_RAM_CNT = 0x2000, +}; + +#define MPI_COREDUMP_COOKIE 0x5555aaaa +struct mpi_coredump_global_header { + u32 cookie; + u8 idString[16]; + u32 timeLo; + u32 timeHi; + u32 imageSize; + u32 headerSize; + u8 info[220]; +}; + +struct mpi_coredump_segment_header { + u32 cookie; + u32 segNum; + u32 segSize; + u32 extra; + u8 description[16]; +}; + +/* Firmware coredump header segment numbers. */ +enum { + CORE_SEG_NUM = 1, + TEST_LOGIC_SEG_NUM = 2, + RMII_SEG_NUM = 3, + FCMAC1_SEG_NUM = 4, + FCMAC2_SEG_NUM = 5, + FC1_MBOX_SEG_NUM = 6, + IDE_SEG_NUM = 7, + NIC1_MBOX_SEG_NUM = 8, + SMBUS_SEG_NUM = 9, + FC2_MBOX_SEG_NUM = 10, + NIC2_MBOX_SEG_NUM = 11, + I2C_SEG_NUM = 12, + MEMC_SEG_NUM = 13, + PBUS_SEG_NUM = 14, + MDE_SEG_NUM = 15, + NIC1_CONTROL_SEG_NUM = 16, + NIC2_CONTROL_SEG_NUM = 17, + NIC1_XGMAC_SEG_NUM = 18, + NIC2_XGMAC_SEG_NUM = 19, + WCS_RAM_SEG_NUM = 20, + MEMC_RAM_SEG_NUM = 21, + XAUI_AN_SEG_NUM = 22, + XAUI_HSS_PCS_SEG_NUM = 23, + XFI_AN_SEG_NUM = 24, + XFI_TRAIN_SEG_NUM = 25, + XFI_HSS_PCS_SEG_NUM = 26, + XFI_HSS_TX_SEG_NUM = 27, + XFI_HSS_RX_SEG_NUM = 28, + XFI_HSS_PLL_SEG_NUM = 29, + MISC_NIC_INFO_SEG_NUM = 30, + INTR_STATES_SEG_NUM = 31, + CAM_ENTRIES_SEG_NUM = 32, + ROUTING_WORDS_SEG_NUM = 33, + ETS_SEG_NUM = 34, + PROBE_DUMP_SEG_NUM = 35, + ROUTING_INDEX_SEG_NUM = 36, + MAC_PROTOCOL_SEG_NUM = 37, + XAUI2_AN_SEG_NUM = 38, + XAUI2_HSS_PCS_SEG_NUM = 39, + XFI2_AN_SEG_NUM = 40, + XFI2_TRAIN_SEG_NUM = 41, + XFI2_HSS_PCS_SEG_NUM = 42, + XFI2_HSS_TX_SEG_NUM = 43, + XFI2_HSS_RX_SEG_NUM = 44, + XFI2_HSS_PLL_SEG_NUM = 45, + SEM_REGS_SEG_NUM = 50 + +}; + +/* There are 64 generic NIC registers. */ +#define NIC_REGS_DUMP_WORD_COUNT 64 +/* XGMAC word count. */ +#define XGMAC_DUMP_WORD_COUNT (XGMAC_REGISTER_END / 4) +/* Word counts for the SERDES blocks. */ +#define XG_SERDES_XAUI_AN_COUNT 14 +#define XG_SERDES_XAUI_HSS_PCS_COUNT 33 +#define XG_SERDES_XFI_AN_COUNT 14 +#define XG_SERDES_XFI_TRAIN_COUNT 12 +#define XG_SERDES_XFI_HSS_PCS_COUNT 15 +#define XG_SERDES_XFI_HSS_TX_COUNT 32 +#define XG_SERDES_XFI_HSS_RX_COUNT 32 +#define XG_SERDES_XFI_HSS_PLL_COUNT 32 + +/* There are 2 CNA ETS and 8 NIC ETS registers. */ +#define ETS_REGS_DUMP_WORD_COUNT 10 + +/* Each probe mux entry stores the probe type plus 64 entries + * that are each each 64-bits in length. There are a total of + * 34 (PRB_MX_ADDR_VALID_TOTAL) valid probes. + */ +#define PRB_MX_ADDR_PRB_WORD_COUNT (1 + (PRB_MX_ADDR_MAX_MUX * 2)) +#define PRB_MX_DUMP_TOT_COUNT (PRB_MX_ADDR_PRB_WORD_COUNT * \ + PRB_MX_ADDR_VALID_TOTAL) +/* Each routing entry consists of 4 32-bit words. + * They are route type, index, index word, and result. + * There are 2 route blocks with 8 entries each and + * 2 NIC blocks with 16 entries each. + * The totol entries is 48 with 4 words each. + */ +#define RT_IDX_DUMP_ENTRIES 48 +#define RT_IDX_DUMP_WORDS_PER_ENTRY 4 +#define RT_IDX_DUMP_TOT_WORDS (RT_IDX_DUMP_ENTRIES * \ + RT_IDX_DUMP_WORDS_PER_ENTRY) +/* There are 10 address blocks in filter, each with + * different entry counts and different word-count-per-entry. + */ +#define MAC_ADDR_DUMP_ENTRIES \ + ((MAC_ADDR_MAX_CAM_ENTRIES * MAC_ADDR_MAX_CAM_WCOUNT) + \ + (MAC_ADDR_MAX_MULTICAST_ENTRIES * MAC_ADDR_MAX_MULTICAST_WCOUNT) + \ + (MAC_ADDR_MAX_VLAN_ENTRIES * MAC_ADDR_MAX_VLAN_WCOUNT) + \ + (MAC_ADDR_MAX_MCAST_FLTR_ENTRIES * MAC_ADDR_MAX_MCAST_FLTR_WCOUNT) + \ + (MAC_ADDR_MAX_FC_MAC_ENTRIES * MAC_ADDR_MAX_FC_MAC_WCOUNT) + \ + (MAC_ADDR_MAX_MGMT_MAC_ENTRIES * MAC_ADDR_MAX_MGMT_MAC_WCOUNT) + \ + (MAC_ADDR_MAX_MGMT_VLAN_ENTRIES * MAC_ADDR_MAX_MGMT_VLAN_WCOUNT) + \ + (MAC_ADDR_MAX_MGMT_V4_ENTRIES * MAC_ADDR_MAX_MGMT_V4_WCOUNT) + \ + (MAC_ADDR_MAX_MGMT_V6_ENTRIES * MAC_ADDR_MAX_MGMT_V6_WCOUNT) + \ + (MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES * MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT)) +#define MAC_ADDR_DUMP_WORDS_PER_ENTRY 2 +#define MAC_ADDR_DUMP_TOT_WORDS (MAC_ADDR_DUMP_ENTRIES * \ + MAC_ADDR_DUMP_WORDS_PER_ENTRY) +/* Maximum of 4 functions whose semaphore registeres are + * in the coredump. + */ +#define MAX_SEMAPHORE_FUNCTIONS 4 +/* Defines for access the MPI shadow registers. */ +#define RISC_124 0x0003007c +#define RISC_127 0x0003007f +#define SHADOW_OFFSET 0xb0000000 +#define SHADOW_REG_SHIFT 20 + +struct ql_nic_misc { + u32 rx_ring_count; + u32 tx_ring_count; + u32 intr_count; + u32 function; +}; + +struct ql_reg_dump { + + /* segment 0 */ + struct mpi_coredump_global_header mpi_global_header; + + /* segment 16 */ + struct mpi_coredump_segment_header nic_regs_seg_hdr; + u32 nic_regs[64]; + + /* segment 30 */ + struct mpi_coredump_segment_header misc_nic_seg_hdr; + struct ql_nic_misc misc_nic_info; + + /* segment 31 */ + /* one interrupt state for each CQ */ + struct mpi_coredump_segment_header intr_states_seg_hdr; + u32 intr_states[MAX_CPUS]; + + /* segment 32 */ + /* 3 cam words each for 16 unicast, + * 2 cam words for each of 32 multicast. + */ + struct mpi_coredump_segment_header cam_entries_seg_hdr; + u32 cam_entries[(16 * 3) + (32 * 3)]; + + /* segment 33 */ + struct mpi_coredump_segment_header nic_routing_words_seg_hdr; + u32 nic_routing_words[16]; + + /* segment 34 */ + struct mpi_coredump_segment_header ets_seg_hdr; + u32 ets[8+2]; +}; + +struct ql_mpi_coredump { + /* segment 0 */ + struct mpi_coredump_global_header mpi_global_header; + + /* segment 1 */ + struct mpi_coredump_segment_header core_regs_seg_hdr; + u32 mpi_core_regs[MPI_CORE_REGS_CNT]; + u32 mpi_core_sh_regs[MPI_CORE_SH_REGS_CNT]; + + /* segment 2 */ + struct mpi_coredump_segment_header test_logic_regs_seg_hdr; + u32 test_logic_regs[TEST_REGS_CNT]; + + /* segment 3 */ + struct mpi_coredump_segment_header rmii_regs_seg_hdr; + u32 rmii_regs[RMII_REGS_CNT]; + + /* segment 4 */ + struct mpi_coredump_segment_header fcmac1_regs_seg_hdr; + u32 fcmac1_regs[FCMAC_REGS_CNT]; + + /* segment 5 */ + struct mpi_coredump_segment_header fcmac2_regs_seg_hdr; + u32 fcmac2_regs[FCMAC_REGS_CNT]; + + /* segment 6 */ + struct mpi_coredump_segment_header fc1_mbx_regs_seg_hdr; + u32 fc1_mbx_regs[FC_MBX_REGS_CNT]; + + /* segment 7 */ + struct mpi_coredump_segment_header ide_regs_seg_hdr; + u32 ide_regs[IDE_REGS_CNT]; + + /* segment 8 */ + struct mpi_coredump_segment_header nic1_mbx_regs_seg_hdr; + u32 nic1_mbx_regs[NIC_MBX_REGS_CNT]; + + /* segment 9 */ + struct mpi_coredump_segment_header smbus_regs_seg_hdr; + u32 smbus_regs[SMBUS_REGS_CNT]; + + /* segment 10 */ + struct mpi_coredump_segment_header fc2_mbx_regs_seg_hdr; + u32 fc2_mbx_regs[FC_MBX_REGS_CNT]; + + /* segment 11 */ + struct mpi_coredump_segment_header nic2_mbx_regs_seg_hdr; + u32 nic2_mbx_regs[NIC_MBX_REGS_CNT]; + + /* segment 12 */ + struct mpi_coredump_segment_header i2c_regs_seg_hdr; + u32 i2c_regs[I2C_REGS_CNT]; + /* segment 13 */ + struct mpi_coredump_segment_header memc_regs_seg_hdr; + u32 memc_regs[MEMC_REGS_CNT]; + + /* segment 14 */ + struct mpi_coredump_segment_header pbus_regs_seg_hdr; + u32 pbus_regs[PBUS_REGS_CNT]; + + /* segment 15 */ + struct mpi_coredump_segment_header mde_regs_seg_hdr; + u32 mde_regs[MDE_REGS_CNT]; + + /* segment 16 */ + struct mpi_coredump_segment_header nic_regs_seg_hdr; + u32 nic_regs[NIC_REGS_DUMP_WORD_COUNT]; + + /* segment 17 */ + struct mpi_coredump_segment_header nic2_regs_seg_hdr; + u32 nic2_regs[NIC_REGS_DUMP_WORD_COUNT]; + + /* segment 18 */ + struct mpi_coredump_segment_header xgmac1_seg_hdr; + u32 xgmac1[XGMAC_DUMP_WORD_COUNT]; + + /* segment 19 */ + struct mpi_coredump_segment_header xgmac2_seg_hdr; + u32 xgmac2[XGMAC_DUMP_WORD_COUNT]; + + /* segment 20 */ + struct mpi_coredump_segment_header code_ram_seg_hdr; + u32 code_ram[CODE_RAM_CNT]; + + /* segment 21 */ + struct mpi_coredump_segment_header memc_ram_seg_hdr; + u32 memc_ram[MEMC_RAM_CNT]; + + /* segment 22 */ + struct mpi_coredump_segment_header xaui_an_hdr; + u32 serdes_xaui_an[XG_SERDES_XAUI_AN_COUNT]; + + /* segment 23 */ + struct mpi_coredump_segment_header xaui_hss_pcs_hdr; + u32 serdes_xaui_hss_pcs[XG_SERDES_XAUI_HSS_PCS_COUNT]; + + /* segment 24 */ + struct mpi_coredump_segment_header xfi_an_hdr; + u32 serdes_xfi_an[XG_SERDES_XFI_AN_COUNT]; + + /* segment 25 */ + struct mpi_coredump_segment_header xfi_train_hdr; + u32 serdes_xfi_train[XG_SERDES_XFI_TRAIN_COUNT]; + + /* segment 26 */ + struct mpi_coredump_segment_header xfi_hss_pcs_hdr; + u32 serdes_xfi_hss_pcs[XG_SERDES_XFI_HSS_PCS_COUNT]; + + /* segment 27 */ + struct mpi_coredump_segment_header xfi_hss_tx_hdr; + u32 serdes_xfi_hss_tx[XG_SERDES_XFI_HSS_TX_COUNT]; + + /* segment 28 */ + struct mpi_coredump_segment_header xfi_hss_rx_hdr; + u32 serdes_xfi_hss_rx[XG_SERDES_XFI_HSS_RX_COUNT]; + + /* segment 29 */ + struct mpi_coredump_segment_header xfi_hss_pll_hdr; + u32 serdes_xfi_hss_pll[XG_SERDES_XFI_HSS_PLL_COUNT]; + + /* segment 30 */ + struct mpi_coredump_segment_header misc_nic_seg_hdr; + struct ql_nic_misc misc_nic_info; + + /* segment 31 */ + /* one interrupt state for each CQ */ + struct mpi_coredump_segment_header intr_states_seg_hdr; + u32 intr_states[MAX_RX_RINGS]; + + /* segment 32 */ + /* 3 cam words each for 16 unicast, + * 2 cam words for each of 32 multicast. + */ + struct mpi_coredump_segment_header cam_entries_seg_hdr; + u32 cam_entries[(16 * 3) + (32 * 3)]; + + /* segment 33 */ + struct mpi_coredump_segment_header nic_routing_words_seg_hdr; + u32 nic_routing_words[16]; + /* segment 34 */ + struct mpi_coredump_segment_header ets_seg_hdr; + u32 ets[ETS_REGS_DUMP_WORD_COUNT]; + + /* segment 35 */ + struct mpi_coredump_segment_header probe_dump_seg_hdr; + u32 probe_dump[PRB_MX_DUMP_TOT_COUNT]; + + /* segment 36 */ + struct mpi_coredump_segment_header routing_reg_seg_hdr; + u32 routing_regs[RT_IDX_DUMP_TOT_WORDS]; + + /* segment 37 */ + struct mpi_coredump_segment_header mac_prot_reg_seg_hdr; + u32 mac_prot_regs[MAC_ADDR_DUMP_TOT_WORDS]; + + /* segment 38 */ + struct mpi_coredump_segment_header xaui2_an_hdr; + u32 serdes2_xaui_an[XG_SERDES_XAUI_AN_COUNT]; + + /* segment 39 */ + struct mpi_coredump_segment_header xaui2_hss_pcs_hdr; + u32 serdes2_xaui_hss_pcs[XG_SERDES_XAUI_HSS_PCS_COUNT]; + + /* segment 40 */ + struct mpi_coredump_segment_header xfi2_an_hdr; + u32 serdes2_xfi_an[XG_SERDES_XFI_AN_COUNT]; + + /* segment 41 */ + struct mpi_coredump_segment_header xfi2_train_hdr; + u32 serdes2_xfi_train[XG_SERDES_XFI_TRAIN_COUNT]; + + /* segment 42 */ + struct mpi_coredump_segment_header xfi2_hss_pcs_hdr; + u32 serdes2_xfi_hss_pcs[XG_SERDES_XFI_HSS_PCS_COUNT]; + + /* segment 43 */ + struct mpi_coredump_segment_header xfi2_hss_tx_hdr; + u32 serdes2_xfi_hss_tx[XG_SERDES_XFI_HSS_TX_COUNT]; + + /* segment 44 */ + struct mpi_coredump_segment_header xfi2_hss_rx_hdr; + u32 serdes2_xfi_hss_rx[XG_SERDES_XFI_HSS_RX_COUNT]; + + /* segment 45 */ + struct mpi_coredump_segment_header xfi2_hss_pll_hdr; + u32 serdes2_xfi_hss_pll[XG_SERDES_XFI_HSS_PLL_COUNT]; + + /* segment 50 */ + /* semaphore register for all 5 functions */ + struct mpi_coredump_segment_header sem_regs_seg_hdr; + u32 sem_regs[MAX_SEMAPHORE_FUNCTIONS]; +}; + +/* + * intr_context structure is used during initialization + * to hook the interrupts. It is also used in a single + * irq environment as a context to the ISR. + */ +struct intr_context { + struct ql_adapter *qdev; + u32 intr; + u32 irq_mask; /* Mask of which rings the vector services. */ + u32 hooked; + u32 intr_en_mask; /* value/mask used to enable this intr */ + u32 intr_dis_mask; /* value/mask used to disable this intr */ + u32 intr_read_mask; /* value/mask used to read this intr */ + char name[IFNAMSIZ * 2]; + atomic_t irq_cnt; /* irq_cnt is used in single vector + * environment. It's incremented for each + * irq handler that is scheduled. When each + * handler finishes it decrements irq_cnt and + * enables interrupts if it's zero. */ + irq_handler_t handler; +}; + +/* adapter flags definitions. */ +enum { + QL_ADAPTER_UP = 0, /* Adapter has been brought up. */ + QL_LEGACY_ENABLED = 1, + QL_MSI_ENABLED = 2, + QL_MSIX_ENABLED = 3, + QL_DMA64 = 4, + QL_PROMISCUOUS = 5, + QL_ALLMULTI = 6, + QL_PORT_CFG = 7, + QL_CAM_RT_SET = 8, + QL_SELFTEST = 9, + QL_LB_LINK_UP = 10, + QL_FRC_COREDUMP = 11, + QL_EEH_FATAL = 12, + QL_ASIC_RECOVERY = 14, /* We are in ascic recovery. */ +}; + +/* link_status bit definitions */ +enum { + STS_LOOPBACK_MASK = 0x00000700, + STS_LOOPBACK_PCS = 0x00000100, + STS_LOOPBACK_HSS = 0x00000200, + STS_LOOPBACK_EXT = 0x00000300, + STS_PAUSE_MASK = 0x000000c0, + STS_PAUSE_STD = 0x00000040, + STS_PAUSE_PRI = 0x00000080, + STS_SPEED_MASK = 0x00000038, + STS_SPEED_100Mb = 0x00000000, + STS_SPEED_1Gb = 0x00000008, + STS_SPEED_10Gb = 0x00000010, + STS_LINK_TYPE_MASK = 0x00000007, + STS_LINK_TYPE_XFI = 0x00000001, + STS_LINK_TYPE_XAUI = 0x00000002, + STS_LINK_TYPE_XFI_BP = 0x00000003, + STS_LINK_TYPE_XAUI_BP = 0x00000004, + STS_LINK_TYPE_10GBASET = 0x00000005, +}; + +/* link_config bit definitions */ +enum { + CFG_JUMBO_FRAME_SIZE = 0x00010000, + CFG_PAUSE_MASK = 0x00000060, + CFG_PAUSE_STD = 0x00000020, + CFG_PAUSE_PRI = 0x00000040, + CFG_DCBX = 0x00000010, + CFG_LOOPBACK_MASK = 0x00000007, + CFG_LOOPBACK_PCS = 0x00000002, + CFG_LOOPBACK_HSS = 0x00000004, + CFG_LOOPBACK_EXT = 0x00000006, + CFG_DEFAULT_MAX_FRAME_SIZE = 0x00002580, +}; + +struct nic_operations { + + int (*get_flash) (struct ql_adapter *); + int (*port_initialize) (struct ql_adapter *); +}; + +/* + * The main Adapter structure definition. + * This structure has all fields relevant to the hardware. + */ +struct ql_adapter { + struct ricb ricb; + unsigned long flags; + u32 wol; + + struct nic_stats nic_stats; + + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + + /* PCI Configuration information for this device */ + struct pci_dev *pdev; + struct net_device *ndev; /* Parent NET device */ + + /* Hardware information */ + u32 chip_rev_id; + u32 fw_rev_id; + u32 func; /* PCI function for this adapter */ + u32 alt_func; /* PCI function for alternate adapter */ + u32 port; /* Port number this adapter */ + + spinlock_t adapter_lock; + spinlock_t hw_lock; + spinlock_t stats_lock; + + /* PCI Bus Relative Register Addresses */ + void __iomem *reg_base; + void __iomem *doorbell_area; + u32 doorbell_area_size; + + u32 msg_enable; + + /* Page for Shadow Registers */ + void *rx_ring_shadow_reg_area; + dma_addr_t rx_ring_shadow_reg_dma; + void *tx_ring_shadow_reg_area; + dma_addr_t tx_ring_shadow_reg_dma; + + u32 mailbox_in; + u32 mailbox_out; + struct mbox_params idc_mbc; + struct mutex mpi_mutex; + + int tx_ring_size; + int rx_ring_size; + u32 intr_count; + struct msix_entry *msi_x_entry; + struct intr_context intr_context[MAX_RX_RINGS]; + + int tx_ring_count; /* One per online CPU. */ + u32 rss_ring_count; /* One per irq vector. */ + /* + * rx_ring_count = + * (CPU count * outbound completion rx_ring) + + * (irq_vector_cnt * inbound (RSS) completion rx_ring) + */ + int rx_ring_count; + int ring_mem_size; + void *ring_mem; + + struct rx_ring rx_ring[MAX_RX_RINGS]; + struct tx_ring tx_ring[MAX_TX_RINGS]; + unsigned int lbq_buf_order; + + int rx_csum; + u32 default_rx_queue; + + u16 rx_coalesce_usecs; /* cqicb->int_delay */ + u16 rx_max_coalesced_frames; /* cqicb->pkt_int_delay */ + u16 tx_coalesce_usecs; /* cqicb->int_delay */ + u16 tx_max_coalesced_frames; /* cqicb->pkt_int_delay */ + + u32 xg_sem_mask; + u32 port_link_up; + u32 port_init; + u32 link_status; + struct ql_mpi_coredump *mpi_coredump; + u32 core_is_dumped; + u32 link_config; + u32 led_config; + u32 max_frame_size; + + union flash_params flash; + + struct workqueue_struct *workqueue; + struct delayed_work asic_reset_work; + struct delayed_work mpi_reset_work; + struct delayed_work mpi_work; + struct delayed_work mpi_port_cfg_work; + struct delayed_work mpi_idc_work; + struct delayed_work mpi_core_to_log; + struct completion ide_completion; + const struct nic_operations *nic_ops; + u16 device_id; + struct timer_list timer; + atomic_t lb_count; + /* Keep local copy of current mac address. */ + char current_mac_addr[6]; +}; + +/* + * Typical Register accessor for memory mapped device. + */ +static inline u32 ql_read32(const struct ql_adapter *qdev, int reg) +{ + return readl(qdev->reg_base + reg); +} + +/* + * Typical Register accessor for memory mapped device. + */ +static inline void ql_write32(const struct ql_adapter *qdev, int reg, u32 val) +{ + writel(val, qdev->reg_base + reg); +} + +/* + * Doorbell Registers: + * Doorbell registers are virtual registers in the PCI memory space. + * The space is allocated by the chip during PCI initialization. The + * device driver finds the doorbell address in BAR 3 in PCI config space. + * The registers are used to control outbound and inbound queues. For + * example, the producer index for an outbound queue. Each queue uses + * 1 4k chunk of memory. The lower half of the space is for outbound + * queues. The upper half is for inbound queues. + */ +static inline void ql_write_db_reg(u32 val, void __iomem *addr) +{ + writel(val, addr); + mmiowb(); +} + +/* + * Shadow Registers: + * Outbound queues have a consumer index that is maintained by the chip. + * Inbound queues have a producer index that is maintained by the chip. + * For lower overhead, these registers are "shadowed" to host memory + * which allows the device driver to track the queue progress without + * PCI reads. When an entry is placed on an inbound queue, the chip will + * update the relevant index register and then copy the value to the + * shadow register in host memory. + */ +static inline u32 ql_read_sh_reg(__le32 *addr) +{ + u32 reg; + reg = le32_to_cpu(*addr); + rmb(); + return reg; +} + +extern char qlge_driver_name[]; +extern const char qlge_driver_version[]; +extern const struct ethtool_ops qlge_ethtool_ops; + +extern int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask); +extern void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask); +extern int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data); +extern int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index, + u32 *value); +extern int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value); +extern int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit, + u16 q_id); +void ql_queue_fw_error(struct ql_adapter *qdev); +void ql_mpi_work(struct work_struct *work); +void ql_mpi_reset_work(struct work_struct *work); +void ql_mpi_core_to_log(struct work_struct *work); +int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit); +void ql_queue_asic_error(struct ql_adapter *qdev); +u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr); +void ql_set_ethtool_ops(struct net_device *ndev); +int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data); +void ql_mpi_idc_work(struct work_struct *work); +void ql_mpi_port_cfg_work(struct work_struct *work); +int ql_mb_get_fw_state(struct ql_adapter *qdev); +int ql_cam_route_initialize(struct ql_adapter *qdev); +int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data); +int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data); +int ql_unpause_mpi_risc(struct ql_adapter *qdev); +int ql_pause_mpi_risc(struct ql_adapter *qdev); +int ql_hard_reset_mpi_risc(struct ql_adapter *qdev); +int ql_soft_reset_mpi_risc(struct ql_adapter *qdev); +int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf, + u32 ram_addr, int word_count); +int ql_core_dump(struct ql_adapter *qdev, + struct ql_mpi_coredump *mpi_coredump); +int ql_mb_about_fw(struct ql_adapter *qdev); +int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol); +int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol); +int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config); +int ql_mb_get_led_cfg(struct ql_adapter *qdev); +void ql_link_on(struct ql_adapter *qdev); +void ql_link_off(struct ql_adapter *qdev); +int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control); +int ql_mb_get_port_cfg(struct ql_adapter *qdev); +int ql_mb_set_port_cfg(struct ql_adapter *qdev); +int ql_wait_fifo_empty(struct ql_adapter *qdev); +void ql_get_dump(struct ql_adapter *qdev, void *buff); +void ql_gen_reg_dump(struct ql_adapter *qdev, + struct ql_reg_dump *mpi_coredump); +netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev); +void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *); +int ql_own_firmware(struct ql_adapter *qdev); +int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget); + +/* #define QL_ALL_DUMP */ +/* #define QL_REG_DUMP */ +/* #define QL_DEV_DUMP */ +/* #define QL_CB_DUMP */ +/* #define QL_IB_DUMP */ +/* #define QL_OB_DUMP */ + +#ifdef QL_REG_DUMP +extern void ql_dump_xgmac_control_regs(struct ql_adapter *qdev); +extern void ql_dump_routing_entries(struct ql_adapter *qdev); +extern void ql_dump_regs(struct ql_adapter *qdev); +#define QL_DUMP_REGS(qdev) ql_dump_regs(qdev) +#define QL_DUMP_ROUTE(qdev) ql_dump_routing_entries(qdev) +#define QL_DUMP_XGMAC_CONTROL_REGS(qdev) ql_dump_xgmac_control_regs(qdev) +#else +#define QL_DUMP_REGS(qdev) +#define QL_DUMP_ROUTE(qdev) +#define QL_DUMP_XGMAC_CONTROL_REGS(qdev) +#endif + +#ifdef QL_STAT_DUMP +extern void ql_dump_stat(struct ql_adapter *qdev); +#define QL_DUMP_STAT(qdev) ql_dump_stat(qdev) +#else +#define QL_DUMP_STAT(qdev) +#endif + +#ifdef QL_DEV_DUMP +extern void ql_dump_qdev(struct ql_adapter *qdev); +#define QL_DUMP_QDEV(qdev) ql_dump_qdev(qdev) +#else +#define QL_DUMP_QDEV(qdev) +#endif + +#ifdef QL_CB_DUMP +extern void ql_dump_wqicb(struct wqicb *wqicb); +extern void ql_dump_tx_ring(struct tx_ring *tx_ring); +extern void ql_dump_ricb(struct ricb *ricb); +extern void ql_dump_cqicb(struct cqicb *cqicb); +extern void ql_dump_rx_ring(struct rx_ring *rx_ring); +extern void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id); +#define QL_DUMP_RICB(ricb) ql_dump_ricb(ricb) +#define QL_DUMP_WQICB(wqicb) ql_dump_wqicb(wqicb) +#define QL_DUMP_TX_RING(tx_ring) ql_dump_tx_ring(tx_ring) +#define QL_DUMP_CQICB(cqicb) ql_dump_cqicb(cqicb) +#define QL_DUMP_RX_RING(rx_ring) ql_dump_rx_ring(rx_ring) +#define QL_DUMP_HW_CB(qdev, size, bit, q_id) \ + ql_dump_hw_cb(qdev, size, bit, q_id) +#else +#define QL_DUMP_RICB(ricb) +#define QL_DUMP_WQICB(wqicb) +#define QL_DUMP_TX_RING(tx_ring) +#define QL_DUMP_CQICB(cqicb) +#define QL_DUMP_RX_RING(rx_ring) +#define QL_DUMP_HW_CB(qdev, size, bit, q_id) +#endif + +#ifdef QL_OB_DUMP +extern void ql_dump_tx_desc(struct tx_buf_desc *tbd); +extern void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb); +extern void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp); +#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb) ql_dump_ob_mac_iocb(ob_mac_iocb) +#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp) ql_dump_ob_mac_rsp(ob_mac_rsp) +#else +#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb) +#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp) +#endif + +#ifdef QL_IB_DUMP +extern void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp); +#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp) ql_dump_ib_mac_rsp(ib_mac_rsp) +#else +#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp) +#endif + +#ifdef QL_ALL_DUMP +extern void ql_dump_all(struct ql_adapter *qdev); +#define QL_DUMP_ALL(qdev) ql_dump_all(qdev) +#else +#define QL_DUMP_ALL(qdev) +#endif + +#endif /* _QLGE_H_ */ diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c new file mode 100644 index 000000000000..fca804f36d61 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c @@ -0,0 +1,2044 @@ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include + +#include "qlge.h" + +/* Read a NIC register from the alternate function. */ +static u32 ql_read_other_func_reg(struct ql_adapter *qdev, + u32 reg) +{ + u32 register_to_read; + u32 reg_val; + unsigned int status = 0; + + register_to_read = MPI_NIC_REG_BLOCK + | MPI_NIC_READ + | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT) + | reg; + status = ql_read_mpi_reg(qdev, register_to_read, ®_val); + if (status != 0) + return 0xffffffff; + + return reg_val; +} + +/* Write a NIC register from the alternate function. */ +static int ql_write_other_func_reg(struct ql_adapter *qdev, + u32 reg, u32 reg_val) +{ + u32 register_to_read; + int status = 0; + + register_to_read = MPI_NIC_REG_BLOCK + | MPI_NIC_READ + | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT) + | reg; + status = ql_write_mpi_reg(qdev, register_to_read, reg_val); + + return status; +} + +static int ql_wait_other_func_reg_rdy(struct ql_adapter *qdev, u32 reg, + u32 bit, u32 err_bit) +{ + u32 temp; + int count = 10; + + while (count) { + temp = ql_read_other_func_reg(qdev, reg); + + /* check for errors */ + if (temp & err_bit) + return -1; + else if (temp & bit) + return 0; + mdelay(10); + count--; + } + return -1; +} + +static int ql_read_other_func_serdes_reg(struct ql_adapter *qdev, u32 reg, + u32 *data) +{ + int status; + + /* wait for reg to come ready */ + status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4, + XG_SERDES_ADDR_RDY, 0); + if (status) + goto exit; + + /* set up for reg read */ + ql_write_other_func_reg(qdev, XG_SERDES_ADDR/4, reg | PROC_ADDR_R); + + /* wait for reg to come ready */ + status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4, + XG_SERDES_ADDR_RDY, 0); + if (status) + goto exit; + + /* get the data */ + *data = ql_read_other_func_reg(qdev, (XG_SERDES_DATA / 4)); +exit: + return status; +} + +/* Read out the SERDES registers */ +static int ql_read_serdes_reg(struct ql_adapter *qdev, u32 reg, u32 * data) +{ + int status; + + /* wait for reg to come ready */ + status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0); + if (status) + goto exit; + + /* set up for reg read */ + ql_write32(qdev, XG_SERDES_ADDR, reg | PROC_ADDR_R); + + /* wait for reg to come ready */ + status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0); + if (status) + goto exit; + + /* get the data */ + *data = ql_read32(qdev, XG_SERDES_DATA); +exit: + return status; +} + +static void ql_get_both_serdes(struct ql_adapter *qdev, u32 addr, + u32 *direct_ptr, u32 *indirect_ptr, + unsigned int direct_valid, unsigned int indirect_valid) +{ + unsigned int status; + + status = 1; + if (direct_valid) + status = ql_read_serdes_reg(qdev, addr, direct_ptr); + /* Dead fill any failures or invalids. */ + if (status) + *direct_ptr = 0xDEADBEEF; + + status = 1; + if (indirect_valid) + status = ql_read_other_func_serdes_reg( + qdev, addr, indirect_ptr); + /* Dead fill any failures or invalids. */ + if (status) + *indirect_ptr = 0xDEADBEEF; +} + +static int ql_get_serdes_regs(struct ql_adapter *qdev, + struct ql_mpi_coredump *mpi_coredump) +{ + int status; + unsigned int xfi_direct_valid, xfi_indirect_valid, xaui_direct_valid; + unsigned int xaui_indirect_valid, i; + u32 *direct_ptr, temp; + u32 *indirect_ptr; + + xfi_direct_valid = xfi_indirect_valid = 0; + xaui_direct_valid = xaui_indirect_valid = 1; + + /* The XAUI needs to be read out per port */ + if (qdev->func & 1) { + /* We are NIC 2 */ + status = ql_read_other_func_serdes_reg(qdev, + XG_SERDES_XAUI_HSS_PCS_START, &temp); + if (status) + temp = XG_SERDES_ADDR_XAUI_PWR_DOWN; + if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) == + XG_SERDES_ADDR_XAUI_PWR_DOWN) + xaui_indirect_valid = 0; + + status = ql_read_serdes_reg(qdev, + XG_SERDES_XAUI_HSS_PCS_START, &temp); + if (status) + temp = XG_SERDES_ADDR_XAUI_PWR_DOWN; + + if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) == + XG_SERDES_ADDR_XAUI_PWR_DOWN) + xaui_direct_valid = 0; + } else { + /* We are NIC 1 */ + status = ql_read_other_func_serdes_reg(qdev, + XG_SERDES_XAUI_HSS_PCS_START, &temp); + if (status) + temp = XG_SERDES_ADDR_XAUI_PWR_DOWN; + if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) == + XG_SERDES_ADDR_XAUI_PWR_DOWN) + xaui_indirect_valid = 0; + + status = ql_read_serdes_reg(qdev, + XG_SERDES_XAUI_HSS_PCS_START, &temp); + if (status) + temp = XG_SERDES_ADDR_XAUI_PWR_DOWN; + if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) == + XG_SERDES_ADDR_XAUI_PWR_DOWN) + xaui_direct_valid = 0; + } + + /* + * XFI register is shared so only need to read one + * functions and then check the bits. + */ + status = ql_read_serdes_reg(qdev, XG_SERDES_ADDR_STS, &temp); + if (status) + temp = 0; + + if ((temp & XG_SERDES_ADDR_XFI1_PWR_UP) == + XG_SERDES_ADDR_XFI1_PWR_UP) { + /* now see if i'm NIC 1 or NIC 2 */ + if (qdev->func & 1) + /* I'm NIC 2, so the indirect (NIC1) xfi is up. */ + xfi_indirect_valid = 1; + else + xfi_direct_valid = 1; + } + if ((temp & XG_SERDES_ADDR_XFI2_PWR_UP) == + XG_SERDES_ADDR_XFI2_PWR_UP) { + /* now see if i'm NIC 1 or NIC 2 */ + if (qdev->func & 1) + /* I'm NIC 2, so the indirect (NIC1) xfi is up. */ + xfi_direct_valid = 1; + else + xfi_indirect_valid = 1; + } + + /* Get XAUI_AN register block. */ + if (qdev->func & 1) { + /* Function 2 is direct */ + direct_ptr = mpi_coredump->serdes2_xaui_an; + indirect_ptr = mpi_coredump->serdes_xaui_an; + } else { + /* Function 1 is direct */ + direct_ptr = mpi_coredump->serdes_xaui_an; + indirect_ptr = mpi_coredump->serdes2_xaui_an; + } + + for (i = 0; i <= 0x000000034; i += 4, direct_ptr++, indirect_ptr++) + ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, + xaui_direct_valid, xaui_indirect_valid); + + /* Get XAUI_HSS_PCS register block. */ + if (qdev->func & 1) { + direct_ptr = + mpi_coredump->serdes2_xaui_hss_pcs; + indirect_ptr = + mpi_coredump->serdes_xaui_hss_pcs; + } else { + direct_ptr = + mpi_coredump->serdes_xaui_hss_pcs; + indirect_ptr = + mpi_coredump->serdes2_xaui_hss_pcs; + } + + for (i = 0x800; i <= 0x880; i += 4, direct_ptr++, indirect_ptr++) + ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, + xaui_direct_valid, xaui_indirect_valid); + + /* Get XAUI_XFI_AN register block. */ + if (qdev->func & 1) { + direct_ptr = mpi_coredump->serdes2_xfi_an; + indirect_ptr = mpi_coredump->serdes_xfi_an; + } else { + direct_ptr = mpi_coredump->serdes_xfi_an; + indirect_ptr = mpi_coredump->serdes2_xfi_an; + } + + for (i = 0x1000; i <= 0x1034; i += 4, direct_ptr++, indirect_ptr++) + ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, + xfi_direct_valid, xfi_indirect_valid); + + /* Get XAUI_XFI_TRAIN register block. */ + if (qdev->func & 1) { + direct_ptr = mpi_coredump->serdes2_xfi_train; + indirect_ptr = + mpi_coredump->serdes_xfi_train; + } else { + direct_ptr = mpi_coredump->serdes_xfi_train; + indirect_ptr = + mpi_coredump->serdes2_xfi_train; + } + + for (i = 0x1050; i <= 0x107c; i += 4, direct_ptr++, indirect_ptr++) + ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, + xfi_direct_valid, xfi_indirect_valid); + + /* Get XAUI_XFI_HSS_PCS register block. */ + if (qdev->func & 1) { + direct_ptr = + mpi_coredump->serdes2_xfi_hss_pcs; + indirect_ptr = + mpi_coredump->serdes_xfi_hss_pcs; + } else { + direct_ptr = + mpi_coredump->serdes_xfi_hss_pcs; + indirect_ptr = + mpi_coredump->serdes2_xfi_hss_pcs; + } + + for (i = 0x1800; i <= 0x1838; i += 4, direct_ptr++, indirect_ptr++) + ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, + xfi_direct_valid, xfi_indirect_valid); + + /* Get XAUI_XFI_HSS_TX register block. */ + if (qdev->func & 1) { + direct_ptr = + mpi_coredump->serdes2_xfi_hss_tx; + indirect_ptr = + mpi_coredump->serdes_xfi_hss_tx; + } else { + direct_ptr = mpi_coredump->serdes_xfi_hss_tx; + indirect_ptr = + mpi_coredump->serdes2_xfi_hss_tx; + } + for (i = 0x1c00; i <= 0x1c1f; i++, direct_ptr++, indirect_ptr++) + ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, + xfi_direct_valid, xfi_indirect_valid); + + /* Get XAUI_XFI_HSS_RX register block. */ + if (qdev->func & 1) { + direct_ptr = + mpi_coredump->serdes2_xfi_hss_rx; + indirect_ptr = + mpi_coredump->serdes_xfi_hss_rx; + } else { + direct_ptr = mpi_coredump->serdes_xfi_hss_rx; + indirect_ptr = + mpi_coredump->serdes2_xfi_hss_rx; + } + + for (i = 0x1c40; i <= 0x1c5f; i++, direct_ptr++, indirect_ptr++) + ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, + xfi_direct_valid, xfi_indirect_valid); + + + /* Get XAUI_XFI_HSS_PLL register block. */ + if (qdev->func & 1) { + direct_ptr = + mpi_coredump->serdes2_xfi_hss_pll; + indirect_ptr = + mpi_coredump->serdes_xfi_hss_pll; + } else { + direct_ptr = + mpi_coredump->serdes_xfi_hss_pll; + indirect_ptr = + mpi_coredump->serdes2_xfi_hss_pll; + } + for (i = 0x1e00; i <= 0x1e1f; i++, direct_ptr++, indirect_ptr++) + ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, + xfi_direct_valid, xfi_indirect_valid); + return 0; +} + +static int ql_read_other_func_xgmac_reg(struct ql_adapter *qdev, u32 reg, + u32 *data) +{ + int status = 0; + + /* wait for reg to come ready */ + status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4, + XGMAC_ADDR_RDY, XGMAC_ADDR_XME); + if (status) + goto exit; + + /* set up for reg read */ + ql_write_other_func_reg(qdev, XGMAC_ADDR / 4, reg | XGMAC_ADDR_R); + + /* wait for reg to come ready */ + status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4, + XGMAC_ADDR_RDY, XGMAC_ADDR_XME); + if (status) + goto exit; + + /* get the data */ + *data = ql_read_other_func_reg(qdev, XGMAC_DATA / 4); +exit: + return status; +} + +/* Read the 400 xgmac control/statistics registers + * skipping unused locations. + */ +static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 * buf, + unsigned int other_function) +{ + int status = 0; + int i; + + for (i = PAUSE_SRC_LO; i < XGMAC_REGISTER_END; i += 4, buf++) { + /* We're reading 400 xgmac registers, but we filter out + * serveral locations that are non-responsive to reads. + */ + if ((i == 0x00000114) || + (i == 0x00000118) || + (i == 0x0000013c) || + (i == 0x00000140) || + (i > 0x00000150 && i < 0x000001fc) || + (i > 0x00000278 && i < 0x000002a0) || + (i > 0x000002c0 && i < 0x000002cf) || + (i > 0x000002dc && i < 0x000002f0) || + (i > 0x000003c8 && i < 0x00000400) || + (i > 0x00000400 && i < 0x00000410) || + (i > 0x00000410 && i < 0x00000420) || + (i > 0x00000420 && i < 0x00000430) || + (i > 0x00000430 && i < 0x00000440) || + (i > 0x00000440 && i < 0x00000450) || + (i > 0x00000450 && i < 0x00000500) || + (i > 0x0000054c && i < 0x00000568) || + (i > 0x000005c8 && i < 0x00000600)) { + if (other_function) + status = + ql_read_other_func_xgmac_reg(qdev, i, buf); + else + status = ql_read_xgmac_reg(qdev, i, buf); + + if (status) + *buf = 0xdeadbeef; + break; + } + } + return status; +} + +static int ql_get_ets_regs(struct ql_adapter *qdev, u32 * buf) +{ + int status = 0; + int i; + + for (i = 0; i < 8; i++, buf++) { + ql_write32(qdev, NIC_ETS, i << 29 | 0x08000000); + *buf = ql_read32(qdev, NIC_ETS); + } + + for (i = 0; i < 2; i++, buf++) { + ql_write32(qdev, CNA_ETS, i << 29 | 0x08000000); + *buf = ql_read32(qdev, CNA_ETS); + } + + return status; +} + +static void ql_get_intr_states(struct ql_adapter *qdev, u32 * buf) +{ + int i; + + for (i = 0; i < qdev->rx_ring_count; i++, buf++) { + ql_write32(qdev, INTR_EN, + qdev->intr_context[i].intr_read_mask); + *buf = ql_read32(qdev, INTR_EN); + } +} + +static int ql_get_cam_entries(struct ql_adapter *qdev, u32 * buf) +{ + int i, status; + u32 value[3]; + + status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); + if (status) + return status; + + for (i = 0; i < 16; i++) { + status = ql_get_mac_addr_reg(qdev, + MAC_ADDR_TYPE_CAM_MAC, i, value); + if (status) { + netif_err(qdev, drv, qdev->ndev, + "Failed read of mac index register\n"); + goto err; + } + *buf++ = value[0]; /* lower MAC address */ + *buf++ = value[1]; /* upper MAC address */ + *buf++ = value[2]; /* output */ + } + for (i = 0; i < 32; i++) { + status = ql_get_mac_addr_reg(qdev, + MAC_ADDR_TYPE_MULTI_MAC, i, value); + if (status) { + netif_err(qdev, drv, qdev->ndev, + "Failed read of mac index register\n"); + goto err; + } + *buf++ = value[0]; /* lower Mcast address */ + *buf++ = value[1]; /* upper Mcast address */ + } +err: + ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); + return status; +} + +static int ql_get_routing_entries(struct ql_adapter *qdev, u32 * buf) +{ + int status; + u32 value, i; + + status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); + if (status) + return status; + + for (i = 0; i < 16; i++) { + status = ql_get_routing_reg(qdev, i, &value); + if (status) { + netif_err(qdev, drv, qdev->ndev, + "Failed read of routing index register\n"); + goto err; + } else { + *buf++ = value; + } + } +err: + ql_sem_unlock(qdev, SEM_RT_IDX_MASK); + return status; +} + +/* Read the MPI Processor shadow registers */ +static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 * buf) +{ + u32 i; + int status; + + for (i = 0; i < MPI_CORE_SH_REGS_CNT; i++, buf++) { + status = ql_write_mpi_reg(qdev, RISC_124, + (SHADOW_OFFSET | i << SHADOW_REG_SHIFT)); + if (status) + goto end; + status = ql_read_mpi_reg(qdev, RISC_127, buf); + if (status) + goto end; + } +end: + return status; +} + +/* Read the MPI Processor core registers */ +static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 * buf, + u32 offset, u32 count) +{ + int i, status = 0; + for (i = 0; i < count; i++, buf++) { + status = ql_read_mpi_reg(qdev, offset + i, buf); + if (status) + return status; + } + return status; +} + +/* Read the ASIC probe dump */ +static unsigned int *ql_get_probe(struct ql_adapter *qdev, u32 clock, + u32 valid, u32 *buf) +{ + u32 module, mux_sel, probe, lo_val, hi_val; + + for (module = 0; module < PRB_MX_ADDR_MAX_MODS; module++) { + if (!((valid >> module) & 1)) + continue; + for (mux_sel = 0; mux_sel < PRB_MX_ADDR_MAX_MUX; mux_sel++) { + probe = clock + | PRB_MX_ADDR_ARE + | mux_sel + | (module << PRB_MX_ADDR_MOD_SEL_SHIFT); + ql_write32(qdev, PRB_MX_ADDR, probe); + lo_val = ql_read32(qdev, PRB_MX_DATA); + if (mux_sel == 0) { + *buf = probe; + buf++; + } + probe |= PRB_MX_ADDR_UP; + ql_write32(qdev, PRB_MX_ADDR, probe); + hi_val = ql_read32(qdev, PRB_MX_DATA); + *buf = lo_val; + buf++; + *buf = hi_val; + buf++; + } + } + return buf; +} + +static int ql_get_probe_dump(struct ql_adapter *qdev, unsigned int *buf) +{ + /* First we have to enable the probe mux */ + ql_write_mpi_reg(qdev, MPI_TEST_FUNC_PRB_CTL, MPI_TEST_FUNC_PRB_EN); + buf = ql_get_probe(qdev, PRB_MX_ADDR_SYS_CLOCK, + PRB_MX_ADDR_VALID_SYS_MOD, buf); + buf = ql_get_probe(qdev, PRB_MX_ADDR_PCI_CLOCK, + PRB_MX_ADDR_VALID_PCI_MOD, buf); + buf = ql_get_probe(qdev, PRB_MX_ADDR_XGM_CLOCK, + PRB_MX_ADDR_VALID_XGM_MOD, buf); + buf = ql_get_probe(qdev, PRB_MX_ADDR_FC_CLOCK, + PRB_MX_ADDR_VALID_FC_MOD, buf); + return 0; + +} + +/* Read out the routing index registers */ +static int ql_get_routing_index_registers(struct ql_adapter *qdev, u32 *buf) +{ + int status; + u32 type, index, index_max; + u32 result_index; + u32 result_data; + u32 val; + + status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); + if (status) + return status; + + for (type = 0; type < 4; type++) { + if (type < 2) + index_max = 8; + else + index_max = 16; + for (index = 0; index < index_max; index++) { + val = RT_IDX_RS + | (type << RT_IDX_TYPE_SHIFT) + | (index << RT_IDX_IDX_SHIFT); + ql_write32(qdev, RT_IDX, val); + result_index = 0; + while ((result_index & RT_IDX_MR) == 0) + result_index = ql_read32(qdev, RT_IDX); + result_data = ql_read32(qdev, RT_DATA); + *buf = type; + buf++; + *buf = index; + buf++; + *buf = result_index; + buf++; + *buf = result_data; + buf++; + } + } + ql_sem_unlock(qdev, SEM_RT_IDX_MASK); + return status; +} + +/* Read out the MAC protocol registers */ +static void ql_get_mac_protocol_registers(struct ql_adapter *qdev, u32 *buf) +{ + u32 result_index, result_data; + u32 type; + u32 index; + u32 offset; + u32 val; + u32 initial_val = MAC_ADDR_RS; + u32 max_index; + u32 max_offset; + + for (type = 0; type < MAC_ADDR_TYPE_COUNT; type++) { + switch (type) { + + case 0: /* CAM */ + initial_val |= MAC_ADDR_ADR; + max_index = MAC_ADDR_MAX_CAM_ENTRIES; + max_offset = MAC_ADDR_MAX_CAM_WCOUNT; + break; + case 1: /* Multicast MAC Address */ + max_index = MAC_ADDR_MAX_CAM_WCOUNT; + max_offset = MAC_ADDR_MAX_CAM_WCOUNT; + break; + case 2: /* VLAN filter mask */ + case 3: /* MC filter mask */ + max_index = MAC_ADDR_MAX_CAM_WCOUNT; + max_offset = MAC_ADDR_MAX_CAM_WCOUNT; + break; + case 4: /* FC MAC addresses */ + max_index = MAC_ADDR_MAX_FC_MAC_ENTRIES; + max_offset = MAC_ADDR_MAX_FC_MAC_WCOUNT; + break; + case 5: /* Mgmt MAC addresses */ + max_index = MAC_ADDR_MAX_MGMT_MAC_ENTRIES; + max_offset = MAC_ADDR_MAX_MGMT_MAC_WCOUNT; + break; + case 6: /* Mgmt VLAN addresses */ + max_index = MAC_ADDR_MAX_MGMT_VLAN_ENTRIES; + max_offset = MAC_ADDR_MAX_MGMT_VLAN_WCOUNT; + break; + case 7: /* Mgmt IPv4 address */ + max_index = MAC_ADDR_MAX_MGMT_V4_ENTRIES; + max_offset = MAC_ADDR_MAX_MGMT_V4_WCOUNT; + break; + case 8: /* Mgmt IPv6 address */ + max_index = MAC_ADDR_MAX_MGMT_V6_ENTRIES; + max_offset = MAC_ADDR_MAX_MGMT_V6_WCOUNT; + break; + case 9: /* Mgmt TCP/UDP Dest port */ + max_index = MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES; + max_offset = MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT; + break; + default: + pr_err("Bad type!!! 0x%08x\n", type); + max_index = 0; + max_offset = 0; + break; + } + for (index = 0; index < max_index; index++) { + for (offset = 0; offset < max_offset; offset++) { + val = initial_val + | (type << MAC_ADDR_TYPE_SHIFT) + | (index << MAC_ADDR_IDX_SHIFT) + | (offset); + ql_write32(qdev, MAC_ADDR_IDX, val); + result_index = 0; + while ((result_index & MAC_ADDR_MR) == 0) { + result_index = ql_read32(qdev, + MAC_ADDR_IDX); + } + result_data = ql_read32(qdev, MAC_ADDR_DATA); + *buf = result_index; + buf++; + *buf = result_data; + buf++; + } + } + } +} + +static void ql_get_sem_registers(struct ql_adapter *qdev, u32 *buf) +{ + u32 func_num, reg, reg_val; + int status; + + for (func_num = 0; func_num < MAX_SEMAPHORE_FUNCTIONS ; func_num++) { + reg = MPI_NIC_REG_BLOCK + | (func_num << MPI_NIC_FUNCTION_SHIFT) + | (SEM / 4); + status = ql_read_mpi_reg(qdev, reg, ®_val); + *buf = reg_val; + /* if the read failed then dead fill the element. */ + if (!status) + *buf = 0xdeadbeef; + buf++; + } +} + +/* Create a coredump segment header */ +static void ql_build_coredump_seg_header( + struct mpi_coredump_segment_header *seg_hdr, + u32 seg_number, u32 seg_size, u8 *desc) +{ + memset(seg_hdr, 0, sizeof(struct mpi_coredump_segment_header)); + seg_hdr->cookie = MPI_COREDUMP_COOKIE; + seg_hdr->segNum = seg_number; + seg_hdr->segSize = seg_size; + memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1); +} + +/* + * This function should be called when a coredump / probedump + * is to be extracted from the HBA. It is assumed there is a + * qdev structure that contains the base address of the register + * space for this function as well as a coredump structure that + * will contain the dump. + */ +int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) +{ + int status; + int i; + + if (!mpi_coredump) { + netif_err(qdev, drv, qdev->ndev, "No memory available\n"); + return -ENOMEM; + } + + /* Try to get the spinlock, but dont worry if + * it isn't available. If the firmware died it + * might be holding the sem. + */ + ql_sem_spinlock(qdev, SEM_PROC_REG_MASK); + + status = ql_pause_mpi_risc(qdev); + if (status) { + netif_err(qdev, drv, qdev->ndev, + "Failed RISC pause. Status = 0x%.08x\n", status); + goto err; + } + + /* Insert the global header */ + memset(&(mpi_coredump->mpi_global_header), 0, + sizeof(struct mpi_coredump_global_header)); + mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE; + mpi_coredump->mpi_global_header.headerSize = + sizeof(struct mpi_coredump_global_header); + mpi_coredump->mpi_global_header.imageSize = + sizeof(struct ql_mpi_coredump); + memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump", + sizeof(mpi_coredump->mpi_global_header.idString)); + + /* Get generic NIC reg dump */ + ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr, + NIC1_CONTROL_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->nic_regs), "NIC1 Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->nic2_regs_seg_hdr, + NIC2_CONTROL_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->nic2_regs), "NIC2 Registers"); + + /* Get XGMac registers. (Segment 18, Rev C. step 21) */ + ql_build_coredump_seg_header(&mpi_coredump->xgmac1_seg_hdr, + NIC1_XGMAC_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->xgmac1), "NIC1 XGMac Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->xgmac2_seg_hdr, + NIC2_XGMAC_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->xgmac2), "NIC2 XGMac Registers"); + + if (qdev->func & 1) { + /* Odd means our function is NIC 2 */ + for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++) + mpi_coredump->nic2_regs[i] = + ql_read32(qdev, i * sizeof(u32)); + + for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++) + mpi_coredump->nic_regs[i] = + ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4); + + ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 0); + ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 1); + } else { + /* Even means our function is NIC 1 */ + for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++) + mpi_coredump->nic_regs[i] = + ql_read32(qdev, i * sizeof(u32)); + for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++) + mpi_coredump->nic2_regs[i] = + ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4); + + ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 0); + ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 1); + } + + /* Rev C. Step 20a */ + ql_build_coredump_seg_header(&mpi_coredump->xaui_an_hdr, + XAUI_AN_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes_xaui_an), + "XAUI AN Registers"); + + /* Rev C. Step 20b */ + ql_build_coredump_seg_header(&mpi_coredump->xaui_hss_pcs_hdr, + XAUI_HSS_PCS_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes_xaui_hss_pcs), + "XAUI HSS PCS Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->xfi_an_hdr, XFI_AN_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes_xfi_an), + "XFI AN Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->xfi_train_hdr, + XFI_TRAIN_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes_xfi_train), + "XFI TRAIN Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pcs_hdr, + XFI_HSS_PCS_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes_xfi_hss_pcs), + "XFI HSS PCS Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_tx_hdr, + XFI_HSS_TX_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes_xfi_hss_tx), + "XFI HSS TX Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_rx_hdr, + XFI_HSS_RX_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes_xfi_hss_rx), + "XFI HSS RX Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pll_hdr, + XFI_HSS_PLL_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes_xfi_hss_pll), + "XFI HSS PLL Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->xaui2_an_hdr, + XAUI2_AN_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes2_xaui_an), + "XAUI2 AN Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->xaui2_hss_pcs_hdr, + XAUI2_HSS_PCS_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes2_xaui_hss_pcs), + "XAUI2 HSS PCS Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->xfi2_an_hdr, + XFI2_AN_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes2_xfi_an), + "XFI2 AN Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->xfi2_train_hdr, + XFI2_TRAIN_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes2_xfi_train), + "XFI2 TRAIN Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pcs_hdr, + XFI2_HSS_PCS_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes2_xfi_hss_pcs), + "XFI2 HSS PCS Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_tx_hdr, + XFI2_HSS_TX_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes2_xfi_hss_tx), + "XFI2 HSS TX Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_rx_hdr, + XFI2_HSS_RX_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes2_xfi_hss_rx), + "XFI2 HSS RX Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pll_hdr, + XFI2_HSS_PLL_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes2_xfi_hss_pll), + "XFI2 HSS PLL Registers"); + + status = ql_get_serdes_regs(qdev, mpi_coredump); + if (status) { + netif_err(qdev, drv, qdev->ndev, + "Failed Dump of Serdes Registers. Status = 0x%.08x\n", + status); + goto err; + } + + ql_build_coredump_seg_header(&mpi_coredump->core_regs_seg_hdr, + CORE_SEG_NUM, + sizeof(mpi_coredump->core_regs_seg_hdr) + + sizeof(mpi_coredump->mpi_core_regs) + + sizeof(mpi_coredump->mpi_core_sh_regs), + "Core Registers"); + + /* Get the MPI Core Registers */ + status = ql_get_mpi_regs(qdev, &mpi_coredump->mpi_core_regs[0], + MPI_CORE_REGS_ADDR, MPI_CORE_REGS_CNT); + if (status) + goto err; + /* Get the 16 MPI shadow registers */ + status = ql_get_mpi_shadow_regs(qdev, + &mpi_coredump->mpi_core_sh_regs[0]); + if (status) + goto err; + + /* Get the Test Logic Registers */ + ql_build_coredump_seg_header(&mpi_coredump->test_logic_regs_seg_hdr, + TEST_LOGIC_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->test_logic_regs), + "Test Logic Regs"); + status = ql_get_mpi_regs(qdev, &mpi_coredump->test_logic_regs[0], + TEST_REGS_ADDR, TEST_REGS_CNT); + if (status) + goto err; + + /* Get the RMII Registers */ + ql_build_coredump_seg_header(&mpi_coredump->rmii_regs_seg_hdr, + RMII_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->rmii_regs), + "RMII Registers"); + status = ql_get_mpi_regs(qdev, &mpi_coredump->rmii_regs[0], + RMII_REGS_ADDR, RMII_REGS_CNT); + if (status) + goto err; + + /* Get the FCMAC1 Registers */ + ql_build_coredump_seg_header(&mpi_coredump->fcmac1_regs_seg_hdr, + FCMAC1_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->fcmac1_regs), + "FCMAC1 Registers"); + status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac1_regs[0], + FCMAC1_REGS_ADDR, FCMAC_REGS_CNT); + if (status) + goto err; + + /* Get the FCMAC2 Registers */ + + ql_build_coredump_seg_header(&mpi_coredump->fcmac2_regs_seg_hdr, + FCMAC2_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->fcmac2_regs), + "FCMAC2 Registers"); + + status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac2_regs[0], + FCMAC2_REGS_ADDR, FCMAC_REGS_CNT); + if (status) + goto err; + + /* Get the FC1 MBX Registers */ + ql_build_coredump_seg_header(&mpi_coredump->fc1_mbx_regs_seg_hdr, + FC1_MBOX_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->fc1_mbx_regs), + "FC1 MBox Regs"); + status = ql_get_mpi_regs(qdev, &mpi_coredump->fc1_mbx_regs[0], + FC1_MBX_REGS_ADDR, FC_MBX_REGS_CNT); + if (status) + goto err; + + /* Get the IDE Registers */ + ql_build_coredump_seg_header(&mpi_coredump->ide_regs_seg_hdr, + IDE_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->ide_regs), + "IDE Registers"); + status = ql_get_mpi_regs(qdev, &mpi_coredump->ide_regs[0], + IDE_REGS_ADDR, IDE_REGS_CNT); + if (status) + goto err; + + /* Get the NIC1 MBX Registers */ + ql_build_coredump_seg_header(&mpi_coredump->nic1_mbx_regs_seg_hdr, + NIC1_MBOX_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->nic1_mbx_regs), + "NIC1 MBox Regs"); + status = ql_get_mpi_regs(qdev, &mpi_coredump->nic1_mbx_regs[0], + NIC1_MBX_REGS_ADDR, NIC_MBX_REGS_CNT); + if (status) + goto err; + + /* Get the SMBus Registers */ + ql_build_coredump_seg_header(&mpi_coredump->smbus_regs_seg_hdr, + SMBUS_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->smbus_regs), + "SMBus Registers"); + status = ql_get_mpi_regs(qdev, &mpi_coredump->smbus_regs[0], + SMBUS_REGS_ADDR, SMBUS_REGS_CNT); + if (status) + goto err; + + /* Get the FC2 MBX Registers */ + ql_build_coredump_seg_header(&mpi_coredump->fc2_mbx_regs_seg_hdr, + FC2_MBOX_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->fc2_mbx_regs), + "FC2 MBox Regs"); + status = ql_get_mpi_regs(qdev, &mpi_coredump->fc2_mbx_regs[0], + FC2_MBX_REGS_ADDR, FC_MBX_REGS_CNT); + if (status) + goto err; + + /* Get the NIC2 MBX Registers */ + ql_build_coredump_seg_header(&mpi_coredump->nic2_mbx_regs_seg_hdr, + NIC2_MBOX_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->nic2_mbx_regs), + "NIC2 MBox Regs"); + status = ql_get_mpi_regs(qdev, &mpi_coredump->nic2_mbx_regs[0], + NIC2_MBX_REGS_ADDR, NIC_MBX_REGS_CNT); + if (status) + goto err; + + /* Get the I2C Registers */ + ql_build_coredump_seg_header(&mpi_coredump->i2c_regs_seg_hdr, + I2C_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->i2c_regs), + "I2C Registers"); + status = ql_get_mpi_regs(qdev, &mpi_coredump->i2c_regs[0], + I2C_REGS_ADDR, I2C_REGS_CNT); + if (status) + goto err; + + /* Get the MEMC Registers */ + ql_build_coredump_seg_header(&mpi_coredump->memc_regs_seg_hdr, + MEMC_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->memc_regs), + "MEMC Registers"); + status = ql_get_mpi_regs(qdev, &mpi_coredump->memc_regs[0], + MEMC_REGS_ADDR, MEMC_REGS_CNT); + if (status) + goto err; + + /* Get the PBus Registers */ + ql_build_coredump_seg_header(&mpi_coredump->pbus_regs_seg_hdr, + PBUS_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->pbus_regs), + "PBUS Registers"); + status = ql_get_mpi_regs(qdev, &mpi_coredump->pbus_regs[0], + PBUS_REGS_ADDR, PBUS_REGS_CNT); + if (status) + goto err; + + /* Get the MDE Registers */ + ql_build_coredump_seg_header(&mpi_coredump->mde_regs_seg_hdr, + MDE_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->mde_regs), + "MDE Registers"); + status = ql_get_mpi_regs(qdev, &mpi_coredump->mde_regs[0], + MDE_REGS_ADDR, MDE_REGS_CNT); + if (status) + goto err; + + ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr, + MISC_NIC_INFO_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->misc_nic_info), + "MISC NIC INFO"); + mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count; + mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count; + mpi_coredump->misc_nic_info.intr_count = qdev->intr_count; + mpi_coredump->misc_nic_info.function = qdev->func; + + /* Segment 31 */ + /* Get indexed register values. */ + ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr, + INTR_STATES_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->intr_states), + "INTR States"); + ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]); + + ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr, + CAM_ENTRIES_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->cam_entries), + "CAM Entries"); + status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]); + if (status) + goto err; + + ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr, + ROUTING_WORDS_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->nic_routing_words), + "Routing Words"); + status = ql_get_routing_entries(qdev, + &mpi_coredump->nic_routing_words[0]); + if (status) + goto err; + + /* Segment 34 (Rev C. step 23) */ + ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr, + ETS_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->ets), + "ETS Registers"); + status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]); + if (status) + goto err; + + ql_build_coredump_seg_header(&mpi_coredump->probe_dump_seg_hdr, + PROBE_DUMP_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->probe_dump), + "Probe Dump"); + ql_get_probe_dump(qdev, &mpi_coredump->probe_dump[0]); + + ql_build_coredump_seg_header(&mpi_coredump->routing_reg_seg_hdr, + ROUTING_INDEX_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->routing_regs), + "Routing Regs"); + status = ql_get_routing_index_registers(qdev, + &mpi_coredump->routing_regs[0]); + if (status) + goto err; + + ql_build_coredump_seg_header(&mpi_coredump->mac_prot_reg_seg_hdr, + MAC_PROTOCOL_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->mac_prot_regs), + "MAC Prot Regs"); + ql_get_mac_protocol_registers(qdev, &mpi_coredump->mac_prot_regs[0]); + + /* Get the semaphore registers for all 5 functions */ + ql_build_coredump_seg_header(&mpi_coredump->sem_regs_seg_hdr, + SEM_REGS_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->sem_regs), "Sem Registers"); + + ql_get_sem_registers(qdev, &mpi_coredump->sem_regs[0]); + + /* Prevent the mpi restarting while we dump the memory.*/ + ql_write_mpi_reg(qdev, MPI_TEST_FUNC_RST_STS, MPI_TEST_FUNC_RST_FRC); + + /* clear the pause */ + status = ql_unpause_mpi_risc(qdev); + if (status) { + netif_err(qdev, drv, qdev->ndev, + "Failed RISC unpause. Status = 0x%.08x\n", status); + goto err; + } + + /* Reset the RISC so we can dump RAM */ + status = ql_hard_reset_mpi_risc(qdev); + if (status) { + netif_err(qdev, drv, qdev->ndev, + "Failed RISC reset. Status = 0x%.08x\n", status); + goto err; + } + + ql_build_coredump_seg_header(&mpi_coredump->code_ram_seg_hdr, + WCS_RAM_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->code_ram), + "WCS RAM"); + status = ql_dump_risc_ram_area(qdev, &mpi_coredump->code_ram[0], + CODE_RAM_ADDR, CODE_RAM_CNT); + if (status) { + netif_err(qdev, drv, qdev->ndev, + "Failed Dump of CODE RAM. Status = 0x%.08x\n", + status); + goto err; + } + + /* Insert the segment header */ + ql_build_coredump_seg_header(&mpi_coredump->memc_ram_seg_hdr, + MEMC_RAM_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->memc_ram), + "MEMC RAM"); + status = ql_dump_risc_ram_area(qdev, &mpi_coredump->memc_ram[0], + MEMC_RAM_ADDR, MEMC_RAM_CNT); + if (status) { + netif_err(qdev, drv, qdev->ndev, + "Failed Dump of MEMC RAM. Status = 0x%.08x\n", + status); + goto err; + } +err: + ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */ + return status; + +} + +static void ql_get_core_dump(struct ql_adapter *qdev) +{ + if (!ql_own_firmware(qdev)) { + netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n"); + return; + } + + if (!netif_running(qdev->ndev)) { + netif_err(qdev, ifup, qdev->ndev, + "Force Coredump can only be done from interface that is up\n"); + return; + } + ql_queue_fw_error(qdev); +} + +void ql_gen_reg_dump(struct ql_adapter *qdev, + struct ql_reg_dump *mpi_coredump) +{ + int i, status; + + + memset(&(mpi_coredump->mpi_global_header), 0, + sizeof(struct mpi_coredump_global_header)); + mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE; + mpi_coredump->mpi_global_header.headerSize = + sizeof(struct mpi_coredump_global_header); + mpi_coredump->mpi_global_header.imageSize = + sizeof(struct ql_reg_dump); + memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump", + sizeof(mpi_coredump->mpi_global_header.idString)); + + + /* segment 16 */ + ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr, + MISC_NIC_INFO_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->misc_nic_info), + "MISC NIC INFO"); + mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count; + mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count; + mpi_coredump->misc_nic_info.intr_count = qdev->intr_count; + mpi_coredump->misc_nic_info.function = qdev->func; + + /* Segment 16, Rev C. Step 18 */ + ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr, + NIC1_CONTROL_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->nic_regs), + "NIC Registers"); + /* Get generic reg dump */ + for (i = 0; i < 64; i++) + mpi_coredump->nic_regs[i] = ql_read32(qdev, i * sizeof(u32)); + + /* Segment 31 */ + /* Get indexed register values. */ + ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr, + INTR_STATES_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->intr_states), + "INTR States"); + ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]); + + ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr, + CAM_ENTRIES_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->cam_entries), + "CAM Entries"); + status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]); + if (status) + return; + + ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr, + ROUTING_WORDS_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->nic_routing_words), + "Routing Words"); + status = ql_get_routing_entries(qdev, + &mpi_coredump->nic_routing_words[0]); + if (status) + return; + + /* Segment 34 (Rev C. step 23) */ + ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr, + ETS_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->ets), + "ETS Registers"); + status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]); + if (status) + return; +} + +void ql_get_dump(struct ql_adapter *qdev, void *buff) +{ + /* + * If the dump has already been taken and is stored + * in our internal buffer and if force dump is set then + * just start the spool to dump it to the log file + * and also, take a snapshot of the general regs to + * to the user's buffer or else take complete dump + * to the user's buffer if force is not set. + */ + + if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) { + if (!ql_core_dump(qdev, buff)) + ql_soft_reset_mpi_risc(qdev); + else + netif_err(qdev, drv, qdev->ndev, "coredump failed!\n"); + } else { + ql_gen_reg_dump(qdev, buff); + ql_get_core_dump(qdev); + } +} + +/* Coredump to messages log file using separate worker thread */ +void ql_mpi_core_to_log(struct work_struct *work) +{ + struct ql_adapter *qdev = + container_of(work, struct ql_adapter, mpi_core_to_log.work); + u32 *tmp, count; + int i; + + count = sizeof(struct ql_mpi_coredump) / sizeof(u32); + tmp = (u32 *)qdev->mpi_coredump; + netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev, + "Core is dumping to log file!\n"); + + for (i = 0; i < count; i += 8) { + pr_err("%.08x: %.08x %.08x %.08x %.08x %.08x " + "%.08x %.08x %.08x\n", i, + tmp[i + 0], + tmp[i + 1], + tmp[i + 2], + tmp[i + 3], + tmp[i + 4], + tmp[i + 5], + tmp[i + 6], + tmp[i + 7]); + msleep(5); + } +} + +#ifdef QL_REG_DUMP +static void ql_dump_intr_states(struct ql_adapter *qdev) +{ + int i; + u32 value; + for (i = 0; i < qdev->intr_count; i++) { + ql_write32(qdev, INTR_EN, qdev->intr_context[i].intr_read_mask); + value = ql_read32(qdev, INTR_EN); + pr_err("%s: Interrupt %d is %s\n", + qdev->ndev->name, i, + (value & INTR_EN_EN ? "enabled" : "disabled")); + } +} + +#define DUMP_XGMAC(qdev, reg) \ +do { \ + u32 data; \ + ql_read_xgmac_reg(qdev, reg, &data); \ + pr_err("%s: %s = 0x%.08x\n", qdev->ndev->name, #reg, data); \ +} while (0) + +void ql_dump_xgmac_control_regs(struct ql_adapter *qdev) +{ + if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) { + pr_err("%s: Couldn't get xgmac sem\n", __func__); + return; + } + DUMP_XGMAC(qdev, PAUSE_SRC_LO); + DUMP_XGMAC(qdev, PAUSE_SRC_HI); + DUMP_XGMAC(qdev, GLOBAL_CFG); + DUMP_XGMAC(qdev, TX_CFG); + DUMP_XGMAC(qdev, RX_CFG); + DUMP_XGMAC(qdev, FLOW_CTL); + DUMP_XGMAC(qdev, PAUSE_OPCODE); + DUMP_XGMAC(qdev, PAUSE_TIMER); + DUMP_XGMAC(qdev, PAUSE_FRM_DEST_LO); + DUMP_XGMAC(qdev, PAUSE_FRM_DEST_HI); + DUMP_XGMAC(qdev, MAC_TX_PARAMS); + DUMP_XGMAC(qdev, MAC_RX_PARAMS); + DUMP_XGMAC(qdev, MAC_SYS_INT); + DUMP_XGMAC(qdev, MAC_SYS_INT_MASK); + DUMP_XGMAC(qdev, MAC_MGMT_INT); + DUMP_XGMAC(qdev, MAC_MGMT_IN_MASK); + DUMP_XGMAC(qdev, EXT_ARB_MODE); + ql_sem_unlock(qdev, qdev->xg_sem_mask); +} + +static void ql_dump_ets_regs(struct ql_adapter *qdev) +{ +} + +static void ql_dump_cam_entries(struct ql_adapter *qdev) +{ + int i; + u32 value[3]; + + i = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); + if (i) + return; + for (i = 0; i < 4; i++) { + if (ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value)) { + pr_err("%s: Failed read of mac index register\n", + __func__); + return; + } else { + if (value[0]) + pr_err("%s: CAM index %d CAM Lookup Lower = 0x%.08x:%.08x, Output = 0x%.08x\n", + qdev->ndev->name, i, value[1], value[0], + value[2]); + } + } + for (i = 0; i < 32; i++) { + if (ql_get_mac_addr_reg + (qdev, MAC_ADDR_TYPE_MULTI_MAC, i, value)) { + pr_err("%s: Failed read of mac index register\n", + __func__); + return; + } else { + if (value[0]) + pr_err("%s: MCAST index %d CAM Lookup Lower = 0x%.08x:%.08x\n", + qdev->ndev->name, i, value[1], value[0]); + } + } + ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); +} + +void ql_dump_routing_entries(struct ql_adapter *qdev) +{ + int i; + u32 value; + i = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); + if (i) + return; + for (i = 0; i < 16; i++) { + value = 0; + if (ql_get_routing_reg(qdev, i, &value)) { + pr_err("%s: Failed read of routing index register\n", + __func__); + return; + } else { + if (value) + pr_err("%s: Routing Mask %d = 0x%.08x\n", + qdev->ndev->name, i, value); + } + } + ql_sem_unlock(qdev, SEM_RT_IDX_MASK); +} + +#define DUMP_REG(qdev, reg) \ + pr_err("%-32s= 0x%x\n", #reg, ql_read32(qdev, reg)) + +void ql_dump_regs(struct ql_adapter *qdev) +{ + pr_err("reg dump for function #%d\n", qdev->func); + DUMP_REG(qdev, SYS); + DUMP_REG(qdev, RST_FO); + DUMP_REG(qdev, FSC); + DUMP_REG(qdev, CSR); + DUMP_REG(qdev, ICB_RID); + DUMP_REG(qdev, ICB_L); + DUMP_REG(qdev, ICB_H); + DUMP_REG(qdev, CFG); + DUMP_REG(qdev, BIOS_ADDR); + DUMP_REG(qdev, STS); + DUMP_REG(qdev, INTR_EN); + DUMP_REG(qdev, INTR_MASK); + DUMP_REG(qdev, ISR1); + DUMP_REG(qdev, ISR2); + DUMP_REG(qdev, ISR3); + DUMP_REG(qdev, ISR4); + DUMP_REG(qdev, REV_ID); + DUMP_REG(qdev, FRC_ECC_ERR); + DUMP_REG(qdev, ERR_STS); + DUMP_REG(qdev, RAM_DBG_ADDR); + DUMP_REG(qdev, RAM_DBG_DATA); + DUMP_REG(qdev, ECC_ERR_CNT); + DUMP_REG(qdev, SEM); + DUMP_REG(qdev, GPIO_1); + DUMP_REG(qdev, GPIO_2); + DUMP_REG(qdev, GPIO_3); + DUMP_REG(qdev, XGMAC_ADDR); + DUMP_REG(qdev, XGMAC_DATA); + DUMP_REG(qdev, NIC_ETS); + DUMP_REG(qdev, CNA_ETS); + DUMP_REG(qdev, FLASH_ADDR); + DUMP_REG(qdev, FLASH_DATA); + DUMP_REG(qdev, CQ_STOP); + DUMP_REG(qdev, PAGE_TBL_RID); + DUMP_REG(qdev, WQ_PAGE_TBL_LO); + DUMP_REG(qdev, WQ_PAGE_TBL_HI); + DUMP_REG(qdev, CQ_PAGE_TBL_LO); + DUMP_REG(qdev, CQ_PAGE_TBL_HI); + DUMP_REG(qdev, COS_DFLT_CQ1); + DUMP_REG(qdev, COS_DFLT_CQ2); + DUMP_REG(qdev, SPLT_HDR); + DUMP_REG(qdev, FC_PAUSE_THRES); + DUMP_REG(qdev, NIC_PAUSE_THRES); + DUMP_REG(qdev, FC_ETHERTYPE); + DUMP_REG(qdev, FC_RCV_CFG); + DUMP_REG(qdev, NIC_RCV_CFG); + DUMP_REG(qdev, FC_COS_TAGS); + DUMP_REG(qdev, NIC_COS_TAGS); + DUMP_REG(qdev, MGMT_RCV_CFG); + DUMP_REG(qdev, XG_SERDES_ADDR); + DUMP_REG(qdev, XG_SERDES_DATA); + DUMP_REG(qdev, PRB_MX_ADDR); + DUMP_REG(qdev, PRB_MX_DATA); + ql_dump_intr_states(qdev); + ql_dump_xgmac_control_regs(qdev); + ql_dump_ets_regs(qdev); + ql_dump_cam_entries(qdev); + ql_dump_routing_entries(qdev); +} +#endif + +#ifdef QL_STAT_DUMP + +#define DUMP_STAT(qdev, stat) \ + pr_err("%s = %ld\n", #stat, (unsigned long)qdev->nic_stats.stat) + +void ql_dump_stat(struct ql_adapter *qdev) +{ + pr_err("%s: Enter\n", __func__); + DUMP_STAT(qdev, tx_pkts); + DUMP_STAT(qdev, tx_bytes); + DUMP_STAT(qdev, tx_mcast_pkts); + DUMP_STAT(qdev, tx_bcast_pkts); + DUMP_STAT(qdev, tx_ucast_pkts); + DUMP_STAT(qdev, tx_ctl_pkts); + DUMP_STAT(qdev, tx_pause_pkts); + DUMP_STAT(qdev, tx_64_pkt); + DUMP_STAT(qdev, tx_65_to_127_pkt); + DUMP_STAT(qdev, tx_128_to_255_pkt); + DUMP_STAT(qdev, tx_256_511_pkt); + DUMP_STAT(qdev, tx_512_to_1023_pkt); + DUMP_STAT(qdev, tx_1024_to_1518_pkt); + DUMP_STAT(qdev, tx_1519_to_max_pkt); + DUMP_STAT(qdev, tx_undersize_pkt); + DUMP_STAT(qdev, tx_oversize_pkt); + DUMP_STAT(qdev, rx_bytes); + DUMP_STAT(qdev, rx_bytes_ok); + DUMP_STAT(qdev, rx_pkts); + DUMP_STAT(qdev, rx_pkts_ok); + DUMP_STAT(qdev, rx_bcast_pkts); + DUMP_STAT(qdev, rx_mcast_pkts); + DUMP_STAT(qdev, rx_ucast_pkts); + DUMP_STAT(qdev, rx_undersize_pkts); + DUMP_STAT(qdev, rx_oversize_pkts); + DUMP_STAT(qdev, rx_jabber_pkts); + DUMP_STAT(qdev, rx_undersize_fcerr_pkts); + DUMP_STAT(qdev, rx_drop_events); + DUMP_STAT(qdev, rx_fcerr_pkts); + DUMP_STAT(qdev, rx_align_err); + DUMP_STAT(qdev, rx_symbol_err); + DUMP_STAT(qdev, rx_mac_err); + DUMP_STAT(qdev, rx_ctl_pkts); + DUMP_STAT(qdev, rx_pause_pkts); + DUMP_STAT(qdev, rx_64_pkts); + DUMP_STAT(qdev, rx_65_to_127_pkts); + DUMP_STAT(qdev, rx_128_255_pkts); + DUMP_STAT(qdev, rx_256_511_pkts); + DUMP_STAT(qdev, rx_512_to_1023_pkts); + DUMP_STAT(qdev, rx_1024_to_1518_pkts); + DUMP_STAT(qdev, rx_1519_to_max_pkts); + DUMP_STAT(qdev, rx_len_err_pkts); +}; +#endif + +#ifdef QL_DEV_DUMP + +#define DUMP_QDEV_FIELD(qdev, type, field) \ + pr_err("qdev->%-24s = " type "\n", #field, qdev->field) +#define DUMP_QDEV_DMA_FIELD(qdev, field) \ + pr_err("qdev->%-24s = %llx\n", #field, (unsigned long long)qdev->field) +#define DUMP_QDEV_ARRAY(qdev, type, array, index, field) \ + pr_err("%s[%d].%s = " type "\n", \ + #array, index, #field, qdev->array[index].field); +void ql_dump_qdev(struct ql_adapter *qdev) +{ + int i; + DUMP_QDEV_FIELD(qdev, "%lx", flags); + DUMP_QDEV_FIELD(qdev, "%p", vlgrp); + DUMP_QDEV_FIELD(qdev, "%p", pdev); + DUMP_QDEV_FIELD(qdev, "%p", ndev); + DUMP_QDEV_FIELD(qdev, "%d", chip_rev_id); + DUMP_QDEV_FIELD(qdev, "%p", reg_base); + DUMP_QDEV_FIELD(qdev, "%p", doorbell_area); + DUMP_QDEV_FIELD(qdev, "%d", doorbell_area_size); + DUMP_QDEV_FIELD(qdev, "%x", msg_enable); + DUMP_QDEV_FIELD(qdev, "%p", rx_ring_shadow_reg_area); + DUMP_QDEV_DMA_FIELD(qdev, rx_ring_shadow_reg_dma); + DUMP_QDEV_FIELD(qdev, "%p", tx_ring_shadow_reg_area); + DUMP_QDEV_DMA_FIELD(qdev, tx_ring_shadow_reg_dma); + DUMP_QDEV_FIELD(qdev, "%d", intr_count); + if (qdev->msi_x_entry) + for (i = 0; i < qdev->intr_count; i++) { + DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, vector); + DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, entry); + } + for (i = 0; i < qdev->intr_count; i++) { + DUMP_QDEV_ARRAY(qdev, "%p", intr_context, i, qdev); + DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, intr); + DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, hooked); + DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_en_mask); + DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_dis_mask); + DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_read_mask); + } + DUMP_QDEV_FIELD(qdev, "%d", tx_ring_count); + DUMP_QDEV_FIELD(qdev, "%d", rx_ring_count); + DUMP_QDEV_FIELD(qdev, "%d", ring_mem_size); + DUMP_QDEV_FIELD(qdev, "%p", ring_mem); + DUMP_QDEV_FIELD(qdev, "%d", intr_count); + DUMP_QDEV_FIELD(qdev, "%p", tx_ring); + DUMP_QDEV_FIELD(qdev, "%d", rss_ring_count); + DUMP_QDEV_FIELD(qdev, "%p", rx_ring); + DUMP_QDEV_FIELD(qdev, "%d", default_rx_queue); + DUMP_QDEV_FIELD(qdev, "0x%08x", xg_sem_mask); + DUMP_QDEV_FIELD(qdev, "0x%08x", port_link_up); + DUMP_QDEV_FIELD(qdev, "0x%08x", port_init); +} +#endif + +#ifdef QL_CB_DUMP +void ql_dump_wqicb(struct wqicb *wqicb) +{ + pr_err("Dumping wqicb stuff...\n"); + pr_err("wqicb->len = 0x%x\n", le16_to_cpu(wqicb->len)); + pr_err("wqicb->flags = %x\n", le16_to_cpu(wqicb->flags)); + pr_err("wqicb->cq_id_rss = %d\n", + le16_to_cpu(wqicb->cq_id_rss)); + pr_err("wqicb->rid = 0x%x\n", le16_to_cpu(wqicb->rid)); + pr_err("wqicb->wq_addr = 0x%llx\n", + (unsigned long long) le64_to_cpu(wqicb->addr)); + pr_err("wqicb->wq_cnsmr_idx_addr = 0x%llx\n", + (unsigned long long) le64_to_cpu(wqicb->cnsmr_idx_addr)); +} + +void ql_dump_tx_ring(struct tx_ring *tx_ring) +{ + if (tx_ring == NULL) + return; + pr_err("===================== Dumping tx_ring %d ===============\n", + tx_ring->wq_id); + pr_err("tx_ring->base = %p\n", tx_ring->wq_base); + pr_err("tx_ring->base_dma = 0x%llx\n", + (unsigned long long) tx_ring->wq_base_dma); + pr_err("tx_ring->cnsmr_idx_sh_reg, addr = 0x%p, value = %d\n", + tx_ring->cnsmr_idx_sh_reg, + tx_ring->cnsmr_idx_sh_reg + ? ql_read_sh_reg(tx_ring->cnsmr_idx_sh_reg) : 0); + pr_err("tx_ring->size = %d\n", tx_ring->wq_size); + pr_err("tx_ring->len = %d\n", tx_ring->wq_len); + pr_err("tx_ring->prod_idx_db_reg = %p\n", tx_ring->prod_idx_db_reg); + pr_err("tx_ring->valid_db_reg = %p\n", tx_ring->valid_db_reg); + pr_err("tx_ring->prod_idx = %d\n", tx_ring->prod_idx); + pr_err("tx_ring->cq_id = %d\n", tx_ring->cq_id); + pr_err("tx_ring->wq_id = %d\n", tx_ring->wq_id); + pr_err("tx_ring->q = %p\n", tx_ring->q); + pr_err("tx_ring->tx_count = %d\n", atomic_read(&tx_ring->tx_count)); +} + +void ql_dump_ricb(struct ricb *ricb) +{ + int i; + pr_err("===================== Dumping ricb ===============\n"); + pr_err("Dumping ricb stuff...\n"); + + pr_err("ricb->base_cq = %d\n", ricb->base_cq & 0x1f); + pr_err("ricb->flags = %s%s%s%s%s%s%s%s%s\n", + ricb->base_cq & RSS_L4K ? "RSS_L4K " : "", + ricb->flags & RSS_L6K ? "RSS_L6K " : "", + ricb->flags & RSS_LI ? "RSS_LI " : "", + ricb->flags & RSS_LB ? "RSS_LB " : "", + ricb->flags & RSS_LM ? "RSS_LM " : "", + ricb->flags & RSS_RI4 ? "RSS_RI4 " : "", + ricb->flags & RSS_RT4 ? "RSS_RT4 " : "", + ricb->flags & RSS_RI6 ? "RSS_RI6 " : "", + ricb->flags & RSS_RT6 ? "RSS_RT6 " : ""); + pr_err("ricb->mask = 0x%.04x\n", le16_to_cpu(ricb->mask)); + for (i = 0; i < 16; i++) + pr_err("ricb->hash_cq_id[%d] = 0x%.08x\n", i, + le32_to_cpu(ricb->hash_cq_id[i])); + for (i = 0; i < 10; i++) + pr_err("ricb->ipv6_hash_key[%d] = 0x%.08x\n", i, + le32_to_cpu(ricb->ipv6_hash_key[i])); + for (i = 0; i < 4; i++) + pr_err("ricb->ipv4_hash_key[%d] = 0x%.08x\n", i, + le32_to_cpu(ricb->ipv4_hash_key[i])); +} + +void ql_dump_cqicb(struct cqicb *cqicb) +{ + pr_err("Dumping cqicb stuff...\n"); + + pr_err("cqicb->msix_vect = %d\n", cqicb->msix_vect); + pr_err("cqicb->flags = %x\n", cqicb->flags); + pr_err("cqicb->len = %d\n", le16_to_cpu(cqicb->len)); + pr_err("cqicb->addr = 0x%llx\n", + (unsigned long long) le64_to_cpu(cqicb->addr)); + pr_err("cqicb->prod_idx_addr = 0x%llx\n", + (unsigned long long) le64_to_cpu(cqicb->prod_idx_addr)); + pr_err("cqicb->pkt_delay = 0x%.04x\n", + le16_to_cpu(cqicb->pkt_delay)); + pr_err("cqicb->irq_delay = 0x%.04x\n", + le16_to_cpu(cqicb->irq_delay)); + pr_err("cqicb->lbq_addr = 0x%llx\n", + (unsigned long long) le64_to_cpu(cqicb->lbq_addr)); + pr_err("cqicb->lbq_buf_size = 0x%.04x\n", + le16_to_cpu(cqicb->lbq_buf_size)); + pr_err("cqicb->lbq_len = 0x%.04x\n", + le16_to_cpu(cqicb->lbq_len)); + pr_err("cqicb->sbq_addr = 0x%llx\n", + (unsigned long long) le64_to_cpu(cqicb->sbq_addr)); + pr_err("cqicb->sbq_buf_size = 0x%.04x\n", + le16_to_cpu(cqicb->sbq_buf_size)); + pr_err("cqicb->sbq_len = 0x%.04x\n", + le16_to_cpu(cqicb->sbq_len)); +} + +void ql_dump_rx_ring(struct rx_ring *rx_ring) +{ + if (rx_ring == NULL) + return; + pr_err("===================== Dumping rx_ring %d ===============\n", + rx_ring->cq_id); + pr_err("Dumping rx_ring %d, type = %s%s%s\n", + rx_ring->cq_id, rx_ring->type == DEFAULT_Q ? "DEFAULT" : "", + rx_ring->type == TX_Q ? "OUTBOUND COMPLETIONS" : "", + rx_ring->type == RX_Q ? "INBOUND_COMPLETIONS" : ""); + pr_err("rx_ring->cqicb = %p\n", &rx_ring->cqicb); + pr_err("rx_ring->cq_base = %p\n", rx_ring->cq_base); + pr_err("rx_ring->cq_base_dma = %llx\n", + (unsigned long long) rx_ring->cq_base_dma); + pr_err("rx_ring->cq_size = %d\n", rx_ring->cq_size); + pr_err("rx_ring->cq_len = %d\n", rx_ring->cq_len); + pr_err("rx_ring->prod_idx_sh_reg, addr = 0x%p, value = %d\n", + rx_ring->prod_idx_sh_reg, + rx_ring->prod_idx_sh_reg + ? ql_read_sh_reg(rx_ring->prod_idx_sh_reg) : 0); + pr_err("rx_ring->prod_idx_sh_reg_dma = %llx\n", + (unsigned long long) rx_ring->prod_idx_sh_reg_dma); + pr_err("rx_ring->cnsmr_idx_db_reg = %p\n", + rx_ring->cnsmr_idx_db_reg); + pr_err("rx_ring->cnsmr_idx = %d\n", rx_ring->cnsmr_idx); + pr_err("rx_ring->curr_entry = %p\n", rx_ring->curr_entry); + pr_err("rx_ring->valid_db_reg = %p\n", rx_ring->valid_db_reg); + + pr_err("rx_ring->lbq_base = %p\n", rx_ring->lbq_base); + pr_err("rx_ring->lbq_base_dma = %llx\n", + (unsigned long long) rx_ring->lbq_base_dma); + pr_err("rx_ring->lbq_base_indirect = %p\n", + rx_ring->lbq_base_indirect); + pr_err("rx_ring->lbq_base_indirect_dma = %llx\n", + (unsigned long long) rx_ring->lbq_base_indirect_dma); + pr_err("rx_ring->lbq = %p\n", rx_ring->lbq); + pr_err("rx_ring->lbq_len = %d\n", rx_ring->lbq_len); + pr_err("rx_ring->lbq_size = %d\n", rx_ring->lbq_size); + pr_err("rx_ring->lbq_prod_idx_db_reg = %p\n", + rx_ring->lbq_prod_idx_db_reg); + pr_err("rx_ring->lbq_prod_idx = %d\n", rx_ring->lbq_prod_idx); + pr_err("rx_ring->lbq_curr_idx = %d\n", rx_ring->lbq_curr_idx); + pr_err("rx_ring->lbq_clean_idx = %d\n", rx_ring->lbq_clean_idx); + pr_err("rx_ring->lbq_free_cnt = %d\n", rx_ring->lbq_free_cnt); + pr_err("rx_ring->lbq_buf_size = %d\n", rx_ring->lbq_buf_size); + + pr_err("rx_ring->sbq_base = %p\n", rx_ring->sbq_base); + pr_err("rx_ring->sbq_base_dma = %llx\n", + (unsigned long long) rx_ring->sbq_base_dma); + pr_err("rx_ring->sbq_base_indirect = %p\n", + rx_ring->sbq_base_indirect); + pr_err("rx_ring->sbq_base_indirect_dma = %llx\n", + (unsigned long long) rx_ring->sbq_base_indirect_dma); + pr_err("rx_ring->sbq = %p\n", rx_ring->sbq); + pr_err("rx_ring->sbq_len = %d\n", rx_ring->sbq_len); + pr_err("rx_ring->sbq_size = %d\n", rx_ring->sbq_size); + pr_err("rx_ring->sbq_prod_idx_db_reg addr = %p\n", + rx_ring->sbq_prod_idx_db_reg); + pr_err("rx_ring->sbq_prod_idx = %d\n", rx_ring->sbq_prod_idx); + pr_err("rx_ring->sbq_curr_idx = %d\n", rx_ring->sbq_curr_idx); + pr_err("rx_ring->sbq_clean_idx = %d\n", rx_ring->sbq_clean_idx); + pr_err("rx_ring->sbq_free_cnt = %d\n", rx_ring->sbq_free_cnt); + pr_err("rx_ring->sbq_buf_size = %d\n", rx_ring->sbq_buf_size); + pr_err("rx_ring->cq_id = %d\n", rx_ring->cq_id); + pr_err("rx_ring->irq = %d\n", rx_ring->irq); + pr_err("rx_ring->cpu = %d\n", rx_ring->cpu); + pr_err("rx_ring->qdev = %p\n", rx_ring->qdev); +} + +void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id) +{ + void *ptr; + + pr_err("%s: Enter\n", __func__); + + ptr = kmalloc(size, GFP_ATOMIC); + if (ptr == NULL) { + pr_err("%s: Couldn't allocate a buffer\n", __func__); + return; + } + + if (ql_write_cfg(qdev, ptr, size, bit, q_id)) { + pr_err("%s: Failed to upload control block!\n", __func__); + goto fail_it; + } + switch (bit) { + case CFG_DRQ: + ql_dump_wqicb((struct wqicb *)ptr); + break; + case CFG_DCQ: + ql_dump_cqicb((struct cqicb *)ptr); + break; + case CFG_DR: + ql_dump_ricb((struct ricb *)ptr); + break; + default: + pr_err("%s: Invalid bit value = %x\n", __func__, bit); + break; + } +fail_it: + kfree(ptr); +} +#endif + +#ifdef QL_OB_DUMP +void ql_dump_tx_desc(struct tx_buf_desc *tbd) +{ + pr_err("tbd->addr = 0x%llx\n", + le64_to_cpu((u64) tbd->addr)); + pr_err("tbd->len = %d\n", + le32_to_cpu(tbd->len & TX_DESC_LEN_MASK)); + pr_err("tbd->flags = %s %s\n", + tbd->len & TX_DESC_C ? "C" : ".", + tbd->len & TX_DESC_E ? "E" : "."); + tbd++; + pr_err("tbd->addr = 0x%llx\n", + le64_to_cpu((u64) tbd->addr)); + pr_err("tbd->len = %d\n", + le32_to_cpu(tbd->len & TX_DESC_LEN_MASK)); + pr_err("tbd->flags = %s %s\n", + tbd->len & TX_DESC_C ? "C" : ".", + tbd->len & TX_DESC_E ? "E" : "."); + tbd++; + pr_err("tbd->addr = 0x%llx\n", + le64_to_cpu((u64) tbd->addr)); + pr_err("tbd->len = %d\n", + le32_to_cpu(tbd->len & TX_DESC_LEN_MASK)); + pr_err("tbd->flags = %s %s\n", + tbd->len & TX_DESC_C ? "C" : ".", + tbd->len & TX_DESC_E ? "E" : "."); + +} + +void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb) +{ + struct ob_mac_tso_iocb_req *ob_mac_tso_iocb = + (struct ob_mac_tso_iocb_req *)ob_mac_iocb; + struct tx_buf_desc *tbd; + u16 frame_len; + + pr_err("%s\n", __func__); + pr_err("opcode = %s\n", + (ob_mac_iocb->opcode == OPCODE_OB_MAC_IOCB) ? "MAC" : "TSO"); + pr_err("flags1 = %s %s %s %s %s\n", + ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_OI ? "OI" : "", + ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_I ? "I" : "", + ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_D ? "D" : "", + ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP4 ? "IP4" : "", + ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP6 ? "IP6" : ""); + pr_err("flags2 = %s %s %s\n", + ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "", + ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "", + ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : ""); + pr_err("flags3 = %s %s %s\n", + ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "", + ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "", + ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : ""); + pr_err("tid = %x\n", ob_mac_iocb->tid); + pr_err("txq_idx = %d\n", ob_mac_iocb->txq_idx); + pr_err("vlan_tci = %x\n", ob_mac_tso_iocb->vlan_tci); + if (ob_mac_iocb->opcode == OPCODE_OB_MAC_TSO_IOCB) { + pr_err("frame_len = %d\n", + le32_to_cpu(ob_mac_tso_iocb->frame_len)); + pr_err("mss = %d\n", + le16_to_cpu(ob_mac_tso_iocb->mss)); + pr_err("prot_hdr_len = %d\n", + le16_to_cpu(ob_mac_tso_iocb->total_hdrs_len)); + pr_err("hdr_offset = 0x%.04x\n", + le16_to_cpu(ob_mac_tso_iocb->net_trans_offset)); + frame_len = le32_to_cpu(ob_mac_tso_iocb->frame_len); + } else { + pr_err("frame_len = %d\n", + le16_to_cpu(ob_mac_iocb->frame_len)); + frame_len = le16_to_cpu(ob_mac_iocb->frame_len); + } + tbd = &ob_mac_iocb->tbd[0]; + ql_dump_tx_desc(tbd); +} + +void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp) +{ + pr_err("%s\n", __func__); + pr_err("opcode = %d\n", ob_mac_rsp->opcode); + pr_err("flags = %s %s %s %s %s %s %s\n", + ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_OI ? "OI" : ".", + ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_I ? "I" : ".", + ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_E ? "E" : ".", + ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_S ? "S" : ".", + ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_L ? "L" : ".", + ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_P ? "P" : ".", + ob_mac_rsp->flags2 & OB_MAC_IOCB_RSP_B ? "B" : "."); + pr_err("tid = %x\n", ob_mac_rsp->tid); +} +#endif + +#ifdef QL_IB_DUMP +void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp) +{ + pr_err("%s\n", __func__); + pr_err("opcode = 0x%x\n", ib_mac_rsp->opcode); + pr_err("flags1 = %s%s%s%s%s%s\n", + ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_OI ? "OI " : "", + ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_I ? "I " : "", + ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_TE ? "TE " : "", + ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU ? "NU " : "", + ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_IE ? "IE " : "", + ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_B ? "B " : ""); + + if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) + pr_err("%s%s%s Multicast\n", + (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == + IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "", + (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == + IB_MAC_IOCB_RSP_M_REG ? "Registered" : "", + (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == + IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : ""); + + pr_err("flags2 = %s%s%s%s%s\n", + (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) ? "P " : "", + (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? "V " : "", + (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) ? "U " : "", + (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ? "T " : "", + (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_FO) ? "FO " : ""); + + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) + pr_err("%s%s%s%s%s error\n", + (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == + IB_MAC_IOCB_RSP_ERR_OVERSIZE ? "oversize" : "", + (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == + IB_MAC_IOCB_RSP_ERR_UNDERSIZE ? "undersize" : "", + (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == + IB_MAC_IOCB_RSP_ERR_PREAMBLE ? "preamble" : "", + (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == + IB_MAC_IOCB_RSP_ERR_FRAME_LEN ? "frame length" : "", + (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == + IB_MAC_IOCB_RSP_ERR_CRC ? "CRC" : ""); + + pr_err("flags3 = %s%s\n", + ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS ? "DS " : "", + ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL ? "DL " : ""); + + if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) + pr_err("RSS flags = %s%s%s%s\n", + ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == + IB_MAC_IOCB_RSP_M_IPV4) ? "IPv4 RSS" : "", + ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == + IB_MAC_IOCB_RSP_M_IPV6) ? "IPv6 RSS " : "", + ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == + IB_MAC_IOCB_RSP_M_TCP_V4) ? "TCP/IPv4 RSS" : "", + ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == + IB_MAC_IOCB_RSP_M_TCP_V6) ? "TCP/IPv6 RSS" : ""); + + pr_err("data_len = %d\n", + le32_to_cpu(ib_mac_rsp->data_len)); + pr_err("data_addr = 0x%llx\n", + (unsigned long long) le64_to_cpu(ib_mac_rsp->data_addr)); + if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) + pr_err("rss = %x\n", + le32_to_cpu(ib_mac_rsp->rss)); + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) + pr_err("vlan_id = %x\n", + le16_to_cpu(ib_mac_rsp->vlan_id)); + + pr_err("flags4 = %s%s%s\n", + ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV ? "HV " : "", + ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS ? "HS " : "", + ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL ? "HL " : ""); + + if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) { + pr_err("hdr length = %d\n", + le32_to_cpu(ib_mac_rsp->hdr_len)); + pr_err("hdr addr = 0x%llx\n", + (unsigned long long) le64_to_cpu(ib_mac_rsp->hdr_addr)); + } +} +#endif + +#ifdef QL_ALL_DUMP +void ql_dump_all(struct ql_adapter *qdev) +{ + int i; + + QL_DUMP_REGS(qdev); + QL_DUMP_QDEV(qdev); + for (i = 0; i < qdev->tx_ring_count; i++) { + QL_DUMP_TX_RING(&qdev->tx_ring[i]); + QL_DUMP_WQICB((struct wqicb *)&qdev->tx_ring[i]); + } + for (i = 0; i < qdev->rx_ring_count; i++) { + QL_DUMP_RX_RING(&qdev->rx_ring[i]); + QL_DUMP_CQICB((struct cqicb *)&qdev->rx_ring[i]); + } +} +#endif diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c new file mode 100644 index 000000000000..9b67bfea035f --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c @@ -0,0 +1,688 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#include "qlge.h" + +static const char ql_gstrings_test[][ETH_GSTRING_LEN] = { + "Loopback test (offline)" +}; +#define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN) + +static int ql_update_ring_coalescing(struct ql_adapter *qdev) +{ + int i, status = 0; + struct rx_ring *rx_ring; + struct cqicb *cqicb; + + if (!netif_running(qdev->ndev)) + return status; + + /* Skip the default queue, and update the outbound handler + * queues if they changed. + */ + cqicb = (struct cqicb *)&qdev->rx_ring[qdev->rss_ring_count]; + if (le16_to_cpu(cqicb->irq_delay) != qdev->tx_coalesce_usecs || + le16_to_cpu(cqicb->pkt_delay) != + qdev->tx_max_coalesced_frames) { + for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) { + rx_ring = &qdev->rx_ring[i]; + cqicb = (struct cqicb *)rx_ring; + cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs); + cqicb->pkt_delay = + cpu_to_le16(qdev->tx_max_coalesced_frames); + cqicb->flags = FLAGS_LI; + status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb), + CFG_LCQ, rx_ring->cq_id); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Failed to load CQICB.\n"); + goto exit; + } + } + } + + /* Update the inbound (RSS) handler queues if they changed. */ + cqicb = (struct cqicb *)&qdev->rx_ring[0]; + if (le16_to_cpu(cqicb->irq_delay) != qdev->rx_coalesce_usecs || + le16_to_cpu(cqicb->pkt_delay) != + qdev->rx_max_coalesced_frames) { + for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) { + rx_ring = &qdev->rx_ring[i]; + cqicb = (struct cqicb *)rx_ring; + cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs); + cqicb->pkt_delay = + cpu_to_le16(qdev->rx_max_coalesced_frames); + cqicb->flags = FLAGS_LI; + status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb), + CFG_LCQ, rx_ring->cq_id); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Failed to load CQICB.\n"); + goto exit; + } + } + } +exit: + return status; +} + +static void ql_update_stats(struct ql_adapter *qdev) +{ + u32 i; + u64 data; + u64 *iter = &qdev->nic_stats.tx_pkts; + + spin_lock(&qdev->stats_lock); + if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) { + netif_err(qdev, drv, qdev->ndev, + "Couldn't get xgmac sem.\n"); + goto quit; + } + /* + * Get TX statistics. + */ + for (i = 0x200; i < 0x280; i += 8) { + if (ql_read_xgmac_reg64(qdev, i, &data)) { + netif_err(qdev, drv, qdev->ndev, + "Error reading status register 0x%.04x.\n", + i); + goto end; + } else + *iter = data; + iter++; + } + + /* + * Get RX statistics. + */ + for (i = 0x300; i < 0x3d0; i += 8) { + if (ql_read_xgmac_reg64(qdev, i, &data)) { + netif_err(qdev, drv, qdev->ndev, + "Error reading status register 0x%.04x.\n", + i); + goto end; + } else + *iter = data; + iter++; + } + + /* + * Get Per-priority TX pause frame counter statistics. + */ + for (i = 0x500; i < 0x540; i += 8) { + if (ql_read_xgmac_reg64(qdev, i, &data)) { + netif_err(qdev, drv, qdev->ndev, + "Error reading status register 0x%.04x.\n", + i); + goto end; + } else + *iter = data; + iter++; + } + + /* + * Get Per-priority RX pause frame counter statistics. + */ + for (i = 0x568; i < 0x5a8; i += 8) { + if (ql_read_xgmac_reg64(qdev, i, &data)) { + netif_err(qdev, drv, qdev->ndev, + "Error reading status register 0x%.04x.\n", + i); + goto end; + } else + *iter = data; + iter++; + } + + /* + * Get RX NIC FIFO DROP statistics. + */ + if (ql_read_xgmac_reg64(qdev, 0x5b8, &data)) { + netif_err(qdev, drv, qdev->ndev, + "Error reading status register 0x%.04x.\n", i); + goto end; + } else + *iter = data; +end: + ql_sem_unlock(qdev, qdev->xg_sem_mask); +quit: + spin_unlock(&qdev->stats_lock); + + QL_DUMP_STAT(qdev); +} + +static char ql_stats_str_arr[][ETH_GSTRING_LEN] = { + {"tx_pkts"}, + {"tx_bytes"}, + {"tx_mcast_pkts"}, + {"tx_bcast_pkts"}, + {"tx_ucast_pkts"}, + {"tx_ctl_pkts"}, + {"tx_pause_pkts"}, + {"tx_64_pkts"}, + {"tx_65_to_127_pkts"}, + {"tx_128_to_255_pkts"}, + {"tx_256_511_pkts"}, + {"tx_512_to_1023_pkts"}, + {"tx_1024_to_1518_pkts"}, + {"tx_1519_to_max_pkts"}, + {"tx_undersize_pkts"}, + {"tx_oversize_pkts"}, + {"rx_bytes"}, + {"rx_bytes_ok"}, + {"rx_pkts"}, + {"rx_pkts_ok"}, + {"rx_bcast_pkts"}, + {"rx_mcast_pkts"}, + {"rx_ucast_pkts"}, + {"rx_undersize_pkts"}, + {"rx_oversize_pkts"}, + {"rx_jabber_pkts"}, + {"rx_undersize_fcerr_pkts"}, + {"rx_drop_events"}, + {"rx_fcerr_pkts"}, + {"rx_align_err"}, + {"rx_symbol_err"}, + {"rx_mac_err"}, + {"rx_ctl_pkts"}, + {"rx_pause_pkts"}, + {"rx_64_pkts"}, + {"rx_65_to_127_pkts"}, + {"rx_128_255_pkts"}, + {"rx_256_511_pkts"}, + {"rx_512_to_1023_pkts"}, + {"rx_1024_to_1518_pkts"}, + {"rx_1519_to_max_pkts"}, + {"rx_len_err_pkts"}, + {"tx_cbfc_pause_frames0"}, + {"tx_cbfc_pause_frames1"}, + {"tx_cbfc_pause_frames2"}, + {"tx_cbfc_pause_frames3"}, + {"tx_cbfc_pause_frames4"}, + {"tx_cbfc_pause_frames5"}, + {"tx_cbfc_pause_frames6"}, + {"tx_cbfc_pause_frames7"}, + {"rx_cbfc_pause_frames0"}, + {"rx_cbfc_pause_frames1"}, + {"rx_cbfc_pause_frames2"}, + {"rx_cbfc_pause_frames3"}, + {"rx_cbfc_pause_frames4"}, + {"rx_cbfc_pause_frames5"}, + {"rx_cbfc_pause_frames6"}, + {"rx_cbfc_pause_frames7"}, + {"rx_nic_fifo_drop"}, +}; + +static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf) +{ + switch (stringset) { + case ETH_SS_STATS: + memcpy(buf, ql_stats_str_arr, sizeof(ql_stats_str_arr)); + break; + } +} + +static int ql_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return QLGE_TEST_LEN; + case ETH_SS_STATS: + return ARRAY_SIZE(ql_stats_str_arr); + default: + return -EOPNOTSUPP; + } +} + +static void +ql_get_ethtool_stats(struct net_device *ndev, + struct ethtool_stats *stats, u64 *data) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + struct nic_stats *s = &qdev->nic_stats; + + ql_update_stats(qdev); + + *data++ = s->tx_pkts; + *data++ = s->tx_bytes; + *data++ = s->tx_mcast_pkts; + *data++ = s->tx_bcast_pkts; + *data++ = s->tx_ucast_pkts; + *data++ = s->tx_ctl_pkts; + *data++ = s->tx_pause_pkts; + *data++ = s->tx_64_pkt; + *data++ = s->tx_65_to_127_pkt; + *data++ = s->tx_128_to_255_pkt; + *data++ = s->tx_256_511_pkt; + *data++ = s->tx_512_to_1023_pkt; + *data++ = s->tx_1024_to_1518_pkt; + *data++ = s->tx_1519_to_max_pkt; + *data++ = s->tx_undersize_pkt; + *data++ = s->tx_oversize_pkt; + *data++ = s->rx_bytes; + *data++ = s->rx_bytes_ok; + *data++ = s->rx_pkts; + *data++ = s->rx_pkts_ok; + *data++ = s->rx_bcast_pkts; + *data++ = s->rx_mcast_pkts; + *data++ = s->rx_ucast_pkts; + *data++ = s->rx_undersize_pkts; + *data++ = s->rx_oversize_pkts; + *data++ = s->rx_jabber_pkts; + *data++ = s->rx_undersize_fcerr_pkts; + *data++ = s->rx_drop_events; + *data++ = s->rx_fcerr_pkts; + *data++ = s->rx_align_err; + *data++ = s->rx_symbol_err; + *data++ = s->rx_mac_err; + *data++ = s->rx_ctl_pkts; + *data++ = s->rx_pause_pkts; + *data++ = s->rx_64_pkts; + *data++ = s->rx_65_to_127_pkts; + *data++ = s->rx_128_255_pkts; + *data++ = s->rx_256_511_pkts; + *data++ = s->rx_512_to_1023_pkts; + *data++ = s->rx_1024_to_1518_pkts; + *data++ = s->rx_1519_to_max_pkts; + *data++ = s->rx_len_err_pkts; + *data++ = s->tx_cbfc_pause_frames0; + *data++ = s->tx_cbfc_pause_frames1; + *data++ = s->tx_cbfc_pause_frames2; + *data++ = s->tx_cbfc_pause_frames3; + *data++ = s->tx_cbfc_pause_frames4; + *data++ = s->tx_cbfc_pause_frames5; + *data++ = s->tx_cbfc_pause_frames6; + *data++ = s->tx_cbfc_pause_frames7; + *data++ = s->rx_cbfc_pause_frames0; + *data++ = s->rx_cbfc_pause_frames1; + *data++ = s->rx_cbfc_pause_frames2; + *data++ = s->rx_cbfc_pause_frames3; + *data++ = s->rx_cbfc_pause_frames4; + *data++ = s->rx_cbfc_pause_frames5; + *data++ = s->rx_cbfc_pause_frames6; + *data++ = s->rx_cbfc_pause_frames7; + *data++ = s->rx_nic_fifo_drop; +} + +static int ql_get_settings(struct net_device *ndev, + struct ethtool_cmd *ecmd) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + + ecmd->supported = SUPPORTED_10000baseT_Full; + ecmd->advertising = ADVERTISED_10000baseT_Full; + ecmd->autoneg = AUTONEG_ENABLE; + ecmd->transceiver = XCVR_EXTERNAL; + if ((qdev->link_status & STS_LINK_TYPE_MASK) == + STS_LINK_TYPE_10GBASET) { + ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); + ecmd->advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg); + ecmd->port = PORT_TP; + } else { + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_FIBRE; + } + + ethtool_cmd_speed_set(ecmd, SPEED_10000); + ecmd->duplex = DUPLEX_FULL; + + return 0; +} + +static void ql_get_drvinfo(struct net_device *ndev, + struct ethtool_drvinfo *drvinfo) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + strncpy(drvinfo->driver, qlge_driver_name, 32); + strncpy(drvinfo->version, qlge_driver_version, 32); + snprintf(drvinfo->fw_version, 32, "v%d.%d.%d", + (qdev->fw_rev_id & 0x00ff0000) >> 16, + (qdev->fw_rev_id & 0x0000ff00) >> 8, + (qdev->fw_rev_id & 0x000000ff)); + strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32); + drvinfo->n_stats = 0; + drvinfo->testinfo_len = 0; + if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) + drvinfo->regdump_len = sizeof(struct ql_mpi_coredump); + else + drvinfo->regdump_len = sizeof(struct ql_reg_dump); + drvinfo->eedump_len = 0; +} + +static void ql_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + /* What we support. */ + wol->supported = WAKE_MAGIC; + /* What we've currently got set. */ + wol->wolopts = qdev->wol; +} + +static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + int status; + + if (wol->wolopts & ~WAKE_MAGIC) + return -EINVAL; + qdev->wol = wol->wolopts; + + netif_info(qdev, drv, qdev->ndev, "Set wol option 0x%x\n", qdev->wol); + if (!qdev->wol) { + u32 wol = 0; + status = ql_mb_wol_mode(qdev, wol); + netif_err(qdev, drv, qdev->ndev, "WOL %s (wol code 0x%x)\n", + status == 0 ? "cleared successfully" : "clear failed", + wol); + } + + return 0; +} + +static int ql_set_phys_id(struct net_device *ndev, + enum ethtool_phys_id_state state) + +{ + struct ql_adapter *qdev = netdev_priv(ndev); + + switch (state) { + case ETHTOOL_ID_ACTIVE: + /* Save the current LED settings */ + if (ql_mb_get_led_cfg(qdev)) + return -EIO; + + /* Start blinking */ + ql_mb_set_led_cfg(qdev, QL_LED_BLINK); + return 0; + + case ETHTOOL_ID_INACTIVE: + /* Restore LED settings */ + if (ql_mb_set_led_cfg(qdev, qdev->led_config)) + return -EIO; + return 0; + + default: + return -EINVAL; + } +} + +static int ql_start_loopback(struct ql_adapter *qdev) +{ + if (netif_carrier_ok(qdev->ndev)) { + set_bit(QL_LB_LINK_UP, &qdev->flags); + netif_carrier_off(qdev->ndev); + } else + clear_bit(QL_LB_LINK_UP, &qdev->flags); + qdev->link_config |= CFG_LOOPBACK_PCS; + return ql_mb_set_port_cfg(qdev); +} + +static void ql_stop_loopback(struct ql_adapter *qdev) +{ + qdev->link_config &= ~CFG_LOOPBACK_PCS; + ql_mb_set_port_cfg(qdev); + if (test_bit(QL_LB_LINK_UP, &qdev->flags)) { + netif_carrier_on(qdev->ndev); + clear_bit(QL_LB_LINK_UP, &qdev->flags); + } +} + +static void ql_create_lb_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + memset(skb->data, 0xFF, frame_size); + frame_size &= ~1; + memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); + memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); + memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); +} + +void ql_check_lb_frame(struct ql_adapter *qdev, + struct sk_buff *skb) +{ + unsigned int frame_size = skb->len; + + if ((*(skb->data + 3) == 0xFF) && + (*(skb->data + frame_size / 2 + 10) == 0xBE) && + (*(skb->data + frame_size / 2 + 12) == 0xAF)) { + atomic_dec(&qdev->lb_count); + return; + } +} + +static int ql_run_loopback_test(struct ql_adapter *qdev) +{ + int i; + netdev_tx_t rc; + struct sk_buff *skb; + unsigned int size = SMALL_BUF_MAP_SIZE; + + for (i = 0; i < 64; i++) { + skb = netdev_alloc_skb(qdev->ndev, size); + if (!skb) + return -ENOMEM; + + skb->queue_mapping = 0; + skb_put(skb, size); + ql_create_lb_frame(skb, size); + rc = ql_lb_send(skb, qdev->ndev); + if (rc != NETDEV_TX_OK) + return -EPIPE; + atomic_inc(&qdev->lb_count); + } + /* Give queue time to settle before testing results. */ + msleep(2); + ql_clean_lb_rx_ring(&qdev->rx_ring[0], 128); + return atomic_read(&qdev->lb_count) ? -EIO : 0; +} + +static int ql_loopback_test(struct ql_adapter *qdev, u64 *data) +{ + *data = ql_start_loopback(qdev); + if (*data) + goto out; + *data = ql_run_loopback_test(qdev); +out: + ql_stop_loopback(qdev); + return *data; +} + +static void ql_self_test(struct net_device *ndev, + struct ethtool_test *eth_test, u64 *data) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + + if (netif_running(ndev)) { + set_bit(QL_SELFTEST, &qdev->flags); + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + /* Offline tests */ + if (ql_loopback_test(qdev, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + } else { + /* Online tests */ + data[0] = 0; + } + clear_bit(QL_SELFTEST, &qdev->flags); + /* Give link time to come up after + * port configuration changes. + */ + msleep_interruptible(4 * 1000); + } else { + netif_err(qdev, drv, qdev->ndev, + "is down, Loopback test will fail.\n"); + eth_test->flags |= ETH_TEST_FL_FAILED; + } +} + +static int ql_get_regs_len(struct net_device *ndev) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + + if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) + return sizeof(struct ql_mpi_coredump); + else + return sizeof(struct ql_reg_dump); +} + +static void ql_get_regs(struct net_device *ndev, + struct ethtool_regs *regs, void *p) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + + ql_get_dump(qdev, p); + qdev->core_is_dumped = 0; + if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) + regs->len = sizeof(struct ql_mpi_coredump); + else + regs->len = sizeof(struct ql_reg_dump); +} + +static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) +{ + struct ql_adapter *qdev = netdev_priv(dev); + + c->rx_coalesce_usecs = qdev->rx_coalesce_usecs; + c->tx_coalesce_usecs = qdev->tx_coalesce_usecs; + + /* This chip coalesces as follows: + * If a packet arrives, hold off interrupts until + * cqicb->int_delay expires, but if no other packets arrive don't + * wait longer than cqicb->pkt_int_delay. But ethtool doesn't use a + * timer to coalesce on a frame basis. So, we have to take ethtool's + * max_coalesced_frames value and convert it to a delay in microseconds. + * We do this by using a basic thoughput of 1,000,000 frames per + * second @ (1024 bytes). This means one frame per usec. So it's a + * simple one to one ratio. + */ + c->rx_max_coalesced_frames = qdev->rx_max_coalesced_frames; + c->tx_max_coalesced_frames = qdev->tx_max_coalesced_frames; + + return 0; +} + +static int ql_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *c) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + + /* Validate user parameters. */ + if (c->rx_coalesce_usecs > qdev->rx_ring_size / 2) + return -EINVAL; + /* Don't wait more than 10 usec. */ + if (c->rx_max_coalesced_frames > MAX_INTER_FRAME_WAIT) + return -EINVAL; + if (c->tx_coalesce_usecs > qdev->tx_ring_size / 2) + return -EINVAL; + if (c->tx_max_coalesced_frames > MAX_INTER_FRAME_WAIT) + return -EINVAL; + + /* Verify a change took place before updating the hardware. */ + if (qdev->rx_coalesce_usecs == c->rx_coalesce_usecs && + qdev->tx_coalesce_usecs == c->tx_coalesce_usecs && + qdev->rx_max_coalesced_frames == c->rx_max_coalesced_frames && + qdev->tx_max_coalesced_frames == c->tx_max_coalesced_frames) + return 0; + + qdev->rx_coalesce_usecs = c->rx_coalesce_usecs; + qdev->tx_coalesce_usecs = c->tx_coalesce_usecs; + qdev->rx_max_coalesced_frames = c->rx_max_coalesced_frames; + qdev->tx_max_coalesced_frames = c->tx_max_coalesced_frames; + + return ql_update_ring_coalescing(qdev); +} + +static void ql_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct ql_adapter *qdev = netdev_priv(netdev); + + ql_mb_get_port_cfg(qdev); + if (qdev->link_config & CFG_PAUSE_STD) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int ql_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct ql_adapter *qdev = netdev_priv(netdev); + int status = 0; + + if ((pause->rx_pause) && (pause->tx_pause)) + qdev->link_config |= CFG_PAUSE_STD; + else if (!pause->rx_pause && !pause->tx_pause) + qdev->link_config &= ~CFG_PAUSE_STD; + else + return -EINVAL; + + status = ql_mb_set_port_cfg(qdev); + return status; +} + +static u32 ql_get_msglevel(struct net_device *ndev) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + return qdev->msg_enable; +} + +static void ql_set_msglevel(struct net_device *ndev, u32 value) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + qdev->msg_enable = value; +} + +const struct ethtool_ops qlge_ethtool_ops = { + .get_settings = ql_get_settings, + .get_drvinfo = ql_get_drvinfo, + .get_wol = ql_get_wol, + .set_wol = ql_set_wol, + .get_regs_len = ql_get_regs_len, + .get_regs = ql_get_regs, + .get_msglevel = ql_get_msglevel, + .set_msglevel = ql_set_msglevel, + .get_link = ethtool_op_get_link, + .set_phys_id = ql_set_phys_id, + .self_test = ql_self_test, + .get_pauseparam = ql_get_pauseparam, + .set_pauseparam = ql_set_pauseparam, + .get_coalesce = ql_get_coalesce, + .set_coalesce = ql_set_coalesce, + .get_sset_count = ql_get_sset_count, + .get_strings = ql_get_strings, + .get_ethtool_stats = ql_get_ethtool_stats, +}; + diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c new file mode 100644 index 000000000000..f07e96ec8843 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -0,0 +1,4987 @@ +/* + * QLogic qlge NIC HBA Driver + * Copyright (c) 2003-2008 QLogic Corporation + * See LICENSE.qlge for copyright and licensing details. + * Author: Linux qlge network device driver by + * Ron Mercer + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "qlge.h" + +char qlge_driver_name[] = DRV_NAME; +const char qlge_driver_version[] = DRV_VERSION; + +MODULE_AUTHOR("Ron Mercer "); +MODULE_DESCRIPTION(DRV_STRING " "); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +static const u32 default_msg = + NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | +/* NETIF_MSG_TIMER | */ + NETIF_MSG_IFDOWN | + NETIF_MSG_IFUP | + NETIF_MSG_RX_ERR | + NETIF_MSG_TX_ERR | +/* NETIF_MSG_TX_QUEUED | */ +/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */ +/* NETIF_MSG_PKTDATA | */ + NETIF_MSG_HW | NETIF_MSG_WOL | 0; + +static int debug = -1; /* defaults above */ +module_param(debug, int, 0664); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +#define MSIX_IRQ 0 +#define MSI_IRQ 1 +#define LEG_IRQ 2 +static int qlge_irq_type = MSIX_IRQ; +module_param(qlge_irq_type, int, 0664); +MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy."); + +static int qlge_mpi_coredump; +module_param(qlge_mpi_coredump, int, 0); +MODULE_PARM_DESC(qlge_mpi_coredump, + "Option to enable MPI firmware dump. " + "Default is OFF - Do Not allocate memory. "); + +static int qlge_force_coredump; +module_param(qlge_force_coredump, int, 0); +MODULE_PARM_DESC(qlge_force_coredump, + "Option to allow force of firmware core dump. " + "Default is OFF - Do not allow."); + +static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = { + {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)}, + {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)}, + /* required last entry */ + {0,} +}; + +MODULE_DEVICE_TABLE(pci, qlge_pci_tbl); + +static int ql_wol(struct ql_adapter *qdev); +static void qlge_set_multicast_list(struct net_device *ndev); + +/* This hardware semaphore causes exclusive access to + * resources shared between the NIC driver, MPI firmware, + * FCOE firmware and the FC driver. + */ +static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask) +{ + u32 sem_bits = 0; + + switch (sem_mask) { + case SEM_XGMAC0_MASK: + sem_bits = SEM_SET << SEM_XGMAC0_SHIFT; + break; + case SEM_XGMAC1_MASK: + sem_bits = SEM_SET << SEM_XGMAC1_SHIFT; + break; + case SEM_ICB_MASK: + sem_bits = SEM_SET << SEM_ICB_SHIFT; + break; + case SEM_MAC_ADDR_MASK: + sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT; + break; + case SEM_FLASH_MASK: + sem_bits = SEM_SET << SEM_FLASH_SHIFT; + break; + case SEM_PROBE_MASK: + sem_bits = SEM_SET << SEM_PROBE_SHIFT; + break; + case SEM_RT_IDX_MASK: + sem_bits = SEM_SET << SEM_RT_IDX_SHIFT; + break; + case SEM_PROC_REG_MASK: + sem_bits = SEM_SET << SEM_PROC_REG_SHIFT; + break; + default: + netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n"); + return -EINVAL; + } + + ql_write32(qdev, SEM, sem_bits | sem_mask); + return !(ql_read32(qdev, SEM) & sem_bits); +} + +int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask) +{ + unsigned int wait_count = 30; + do { + if (!ql_sem_trylock(qdev, sem_mask)) + return 0; + udelay(100); + } while (--wait_count); + return -ETIMEDOUT; +} + +void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask) +{ + ql_write32(qdev, SEM, sem_mask); + ql_read32(qdev, SEM); /* flush */ +} + +/* This function waits for a specific bit to come ready + * in a given register. It is used mostly by the initialize + * process, but is also used in kernel thread API such as + * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid. + */ +int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit) +{ + u32 temp; + int count = UDELAY_COUNT; + + while (count) { + temp = ql_read32(qdev, reg); + + /* check for errors */ + if (temp & err_bit) { + netif_alert(qdev, probe, qdev->ndev, + "register 0x%.08x access error, value = 0x%.08x!.\n", + reg, temp); + return -EIO; + } else if (temp & bit) + return 0; + udelay(UDELAY_DELAY); + count--; + } + netif_alert(qdev, probe, qdev->ndev, + "Timed out waiting for reg %x to come ready.\n", reg); + return -ETIMEDOUT; +} + +/* The CFG register is used to download TX and RX control blocks + * to the chip. This function waits for an operation to complete. + */ +static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit) +{ + int count = UDELAY_COUNT; + u32 temp; + + while (count) { + temp = ql_read32(qdev, CFG); + if (temp & CFG_LE) + return -EIO; + if (!(temp & bit)) + return 0; + udelay(UDELAY_DELAY); + count--; + } + return -ETIMEDOUT; +} + + +/* Used to issue init control blocks to hw. Maps control block, + * sets address, triggers download, waits for completion. + */ +int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit, + u16 q_id) +{ + u64 map; + int status = 0; + int direction; + u32 mask; + u32 value; + + direction = + (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE : + PCI_DMA_FROMDEVICE; + + map = pci_map_single(qdev->pdev, ptr, size, direction); + if (pci_dma_mapping_error(qdev->pdev, map)) { + netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n"); + return -ENOMEM; + } + + status = ql_sem_spinlock(qdev, SEM_ICB_MASK); + if (status) + return status; + + status = ql_wait_cfg(qdev, bit); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Timed out waiting for CFG to come ready.\n"); + goto exit; + } + + ql_write32(qdev, ICB_L, (u32) map); + ql_write32(qdev, ICB_H, (u32) (map >> 32)); + + mask = CFG_Q_MASK | (bit << 16); + value = bit | (q_id << CFG_Q_SHIFT); + ql_write32(qdev, CFG, (mask | value)); + + /* + * Wait for the bit to clear after signaling hw. + */ + status = ql_wait_cfg(qdev, bit); +exit: + ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */ + pci_unmap_single(qdev->pdev, map, size, direction); + return status; +} + +/* Get a specific MAC address from the CAM. Used for debug and reg dump. */ +int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index, + u32 *value) +{ + u32 offset = 0; + int status; + + switch (type) { + case MAC_ADDR_TYPE_MULTI_MAC: + case MAC_ADDR_TYPE_CAM_MAC: + { + status = + ql_wait_reg_rdy(qdev, + MAC_ADDR_IDX, MAC_ADDR_MW, 0); + if (status) + goto exit; + ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ + (index << MAC_ADDR_IDX_SHIFT) | /* index */ + MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */ + status = + ql_wait_reg_rdy(qdev, + MAC_ADDR_IDX, MAC_ADDR_MR, 0); + if (status) + goto exit; + *value++ = ql_read32(qdev, MAC_ADDR_DATA); + status = + ql_wait_reg_rdy(qdev, + MAC_ADDR_IDX, MAC_ADDR_MW, 0); + if (status) + goto exit; + ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ + (index << MAC_ADDR_IDX_SHIFT) | /* index */ + MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */ + status = + ql_wait_reg_rdy(qdev, + MAC_ADDR_IDX, MAC_ADDR_MR, 0); + if (status) + goto exit; + *value++ = ql_read32(qdev, MAC_ADDR_DATA); + if (type == MAC_ADDR_TYPE_CAM_MAC) { + status = + ql_wait_reg_rdy(qdev, + MAC_ADDR_IDX, MAC_ADDR_MW, 0); + if (status) + goto exit; + ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ + (index << MAC_ADDR_IDX_SHIFT) | /* index */ + MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */ + status = + ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, + MAC_ADDR_MR, 0); + if (status) + goto exit; + *value++ = ql_read32(qdev, MAC_ADDR_DATA); + } + break; + } + case MAC_ADDR_TYPE_VLAN: + case MAC_ADDR_TYPE_MULTI_FLTR: + default: + netif_crit(qdev, ifup, qdev->ndev, + "Address type %d not yet supported.\n", type); + status = -EPERM; + } +exit: + return status; +} + +/* Set up a MAC, multicast or VLAN address for the + * inbound frame matching. + */ +static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type, + u16 index) +{ + u32 offset = 0; + int status = 0; + + switch (type) { + case MAC_ADDR_TYPE_MULTI_MAC: + { + u32 upper = (addr[0] << 8) | addr[1]; + u32 lower = (addr[2] << 24) | (addr[3] << 16) | + (addr[4] << 8) | (addr[5]); + + status = + ql_wait_reg_rdy(qdev, + MAC_ADDR_IDX, MAC_ADDR_MW, 0); + if (status) + goto exit; + ql_write32(qdev, MAC_ADDR_IDX, (offset++) | + (index << MAC_ADDR_IDX_SHIFT) | + type | MAC_ADDR_E); + ql_write32(qdev, MAC_ADDR_DATA, lower); + status = + ql_wait_reg_rdy(qdev, + MAC_ADDR_IDX, MAC_ADDR_MW, 0); + if (status) + goto exit; + ql_write32(qdev, MAC_ADDR_IDX, (offset++) | + (index << MAC_ADDR_IDX_SHIFT) | + type | MAC_ADDR_E); + + ql_write32(qdev, MAC_ADDR_DATA, upper); + status = + ql_wait_reg_rdy(qdev, + MAC_ADDR_IDX, MAC_ADDR_MW, 0); + if (status) + goto exit; + break; + } + case MAC_ADDR_TYPE_CAM_MAC: + { + u32 cam_output; + u32 upper = (addr[0] << 8) | addr[1]; + u32 lower = + (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | + (addr[5]); + + netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, + "Adding %s address %pM at index %d in the CAM.\n", + type == MAC_ADDR_TYPE_MULTI_MAC ? + "MULTICAST" : "UNICAST", + addr, index); + + status = + ql_wait_reg_rdy(qdev, + MAC_ADDR_IDX, MAC_ADDR_MW, 0); + if (status) + goto exit; + ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ + (index << MAC_ADDR_IDX_SHIFT) | /* index */ + type); /* type */ + ql_write32(qdev, MAC_ADDR_DATA, lower); + status = + ql_wait_reg_rdy(qdev, + MAC_ADDR_IDX, MAC_ADDR_MW, 0); + if (status) + goto exit; + ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ + (index << MAC_ADDR_IDX_SHIFT) | /* index */ + type); /* type */ + ql_write32(qdev, MAC_ADDR_DATA, upper); + status = + ql_wait_reg_rdy(qdev, + MAC_ADDR_IDX, MAC_ADDR_MW, 0); + if (status) + goto exit; + ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */ + (index << MAC_ADDR_IDX_SHIFT) | /* index */ + type); /* type */ + /* This field should also include the queue id + and possibly the function id. Right now we hardcode + the route field to NIC core. + */ + cam_output = (CAM_OUT_ROUTE_NIC | + (qdev-> + func << CAM_OUT_FUNC_SHIFT) | + (0 << CAM_OUT_CQ_ID_SHIFT)); + if (qdev->ndev->features & NETIF_F_HW_VLAN_RX) + cam_output |= CAM_OUT_RV; + /* route to NIC core */ + ql_write32(qdev, MAC_ADDR_DATA, cam_output); + break; + } + case MAC_ADDR_TYPE_VLAN: + { + u32 enable_bit = *((u32 *) &addr[0]); + /* For VLAN, the addr actually holds a bit that + * either enables or disables the vlan id we are + * addressing. It's either MAC_ADDR_E on or off. + * That's bit-27 we're talking about. + */ + netif_info(qdev, ifup, qdev->ndev, + "%s VLAN ID %d %s the CAM.\n", + enable_bit ? "Adding" : "Removing", + index, + enable_bit ? "to" : "from"); + + status = + ql_wait_reg_rdy(qdev, + MAC_ADDR_IDX, MAC_ADDR_MW, 0); + if (status) + goto exit; + ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */ + (index << MAC_ADDR_IDX_SHIFT) | /* index */ + type | /* type */ + enable_bit); /* enable/disable */ + break; + } + case MAC_ADDR_TYPE_MULTI_FLTR: + default: + netif_crit(qdev, ifup, qdev->ndev, + "Address type %d not yet supported.\n", type); + status = -EPERM; + } +exit: + return status; +} + +/* Set or clear MAC address in hardware. We sometimes + * have to clear it to prevent wrong frame routing + * especially in a bonding environment. + */ +static int ql_set_mac_addr(struct ql_adapter *qdev, int set) +{ + int status; + char zero_mac_addr[ETH_ALEN]; + char *addr; + + if (set) { + addr = &qdev->current_mac_addr[0]; + netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, + "Set Mac addr %pM\n", addr); + } else { + memset(zero_mac_addr, 0, ETH_ALEN); + addr = &zero_mac_addr[0]; + netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, + "Clearing MAC address\n"); + } + status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); + if (status) + return status; + status = ql_set_mac_addr_reg(qdev, (u8 *) addr, + MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ); + ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); + if (status) + netif_err(qdev, ifup, qdev->ndev, + "Failed to init mac address.\n"); + return status; +} + +void ql_link_on(struct ql_adapter *qdev) +{ + netif_err(qdev, link, qdev->ndev, "Link is up.\n"); + netif_carrier_on(qdev->ndev); + ql_set_mac_addr(qdev, 1); +} + +void ql_link_off(struct ql_adapter *qdev) +{ + netif_err(qdev, link, qdev->ndev, "Link is down.\n"); + netif_carrier_off(qdev->ndev); + ql_set_mac_addr(qdev, 0); +} + +/* Get a specific frame routing value from the CAM. + * Used for debug and reg dump. + */ +int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value) +{ + int status = 0; + + status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0); + if (status) + goto exit; + + ql_write32(qdev, RT_IDX, + RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT)); + status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0); + if (status) + goto exit; + *value = ql_read32(qdev, RT_DATA); +exit: + return status; +} + +/* The NIC function for this chip has 16 routing indexes. Each one can be used + * to route different frame types to various inbound queues. We send broadcast/ + * multicast/error frames to the default queue for slow handling, + * and CAM hit/RSS frames to the fast handling queues. + */ +static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask, + int enable) +{ + int status = -EINVAL; /* Return error if no mask match. */ + u32 value = 0; + + netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, + "%s %s mask %s the routing reg.\n", + enable ? "Adding" : "Removing", + index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" : + index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" : + index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" : + index == RT_IDX_BCAST_SLOT ? "BROADCAST" : + index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" : + index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" : + index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" : + index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" : + index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" : + index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" : + index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" : + index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" : + index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" : + index == RT_IDX_UNUSED013 ? "UNUSED13" : + index == RT_IDX_UNUSED014 ? "UNUSED14" : + index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" : + "(Bad index != RT_IDX)", + enable ? "to" : "from"); + + switch (mask) { + case RT_IDX_CAM_HIT: + { + value = RT_IDX_DST_CAM_Q | /* dest */ + RT_IDX_TYPE_NICQ | /* type */ + (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */ + break; + } + case RT_IDX_VALID: /* Promiscuous Mode frames. */ + { + value = RT_IDX_DST_DFLT_Q | /* dest */ + RT_IDX_TYPE_NICQ | /* type */ + (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */ + break; + } + case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */ + { + value = RT_IDX_DST_DFLT_Q | /* dest */ + RT_IDX_TYPE_NICQ | /* type */ + (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */ + break; + } + case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */ + { + value = RT_IDX_DST_DFLT_Q | /* dest */ + RT_IDX_TYPE_NICQ | /* type */ + (RT_IDX_IP_CSUM_ERR_SLOT << + RT_IDX_IDX_SHIFT); /* index */ + break; + } + case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */ + { + value = RT_IDX_DST_DFLT_Q | /* dest */ + RT_IDX_TYPE_NICQ | /* type */ + (RT_IDX_TCP_UDP_CSUM_ERR_SLOT << + RT_IDX_IDX_SHIFT); /* index */ + break; + } + case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */ + { + value = RT_IDX_DST_DFLT_Q | /* dest */ + RT_IDX_TYPE_NICQ | /* type */ + (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */ + break; + } + case RT_IDX_MCAST: /* Pass up All Multicast frames. */ + { + value = RT_IDX_DST_DFLT_Q | /* dest */ + RT_IDX_TYPE_NICQ | /* type */ + (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */ + break; + } + case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */ + { + value = RT_IDX_DST_DFLT_Q | /* dest */ + RT_IDX_TYPE_NICQ | /* type */ + (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */ + break; + } + case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */ + { + value = RT_IDX_DST_RSS | /* dest */ + RT_IDX_TYPE_NICQ | /* type */ + (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */ + break; + } + case 0: /* Clear the E-bit on an entry. */ + { + value = RT_IDX_DST_DFLT_Q | /* dest */ + RT_IDX_TYPE_NICQ | /* type */ + (index << RT_IDX_IDX_SHIFT);/* index */ + break; + } + default: + netif_err(qdev, ifup, qdev->ndev, + "Mask type %d not yet supported.\n", mask); + status = -EPERM; + goto exit; + } + + if (value) { + status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0); + if (status) + goto exit; + value |= (enable ? RT_IDX_E : 0); + ql_write32(qdev, RT_IDX, value); + ql_write32(qdev, RT_DATA, enable ? mask : 0); + } +exit: + return status; +} + +static void ql_enable_interrupts(struct ql_adapter *qdev) +{ + ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI); +} + +static void ql_disable_interrupts(struct ql_adapter *qdev) +{ + ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16)); +} + +/* If we're running with multiple MSI-X vectors then we enable on the fly. + * Otherwise, we may have multiple outstanding workers and don't want to + * enable until the last one finishes. In this case, the irq_cnt gets + * incremented every time we queue a worker and decremented every time + * a worker finishes. Once it hits zero we enable the interrupt. + */ +u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr) +{ + u32 var = 0; + unsigned long hw_flags = 0; + struct intr_context *ctx = qdev->intr_context + intr; + + if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) { + /* Always enable if we're MSIX multi interrupts and + * it's not the default (zeroeth) interrupt. + */ + ql_write32(qdev, INTR_EN, + ctx->intr_en_mask); + var = ql_read32(qdev, STS); + return var; + } + + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + if (atomic_dec_and_test(&ctx->irq_cnt)) { + ql_write32(qdev, INTR_EN, + ctx->intr_en_mask); + var = ql_read32(qdev, STS); + } + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + return var; +} + +static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr) +{ + u32 var = 0; + struct intr_context *ctx; + + /* HW disables for us if we're MSIX multi interrupts and + * it's not the default (zeroeth) interrupt. + */ + if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) + return 0; + + ctx = qdev->intr_context + intr; + spin_lock(&qdev->hw_lock); + if (!atomic_read(&ctx->irq_cnt)) { + ql_write32(qdev, INTR_EN, + ctx->intr_dis_mask); + var = ql_read32(qdev, STS); + } + atomic_inc(&ctx->irq_cnt); + spin_unlock(&qdev->hw_lock); + return var; +} + +static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev) +{ + int i; + for (i = 0; i < qdev->intr_count; i++) { + /* The enable call does a atomic_dec_and_test + * and enables only if the result is zero. + * So we precharge it here. + */ + if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) || + i == 0)) + atomic_set(&qdev->intr_context[i].irq_cnt, 1); + ql_enable_completion_interrupt(qdev, i); + } + +} + +static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str) +{ + int status, i; + u16 csum = 0; + __le16 *flash = (__le16 *)&qdev->flash; + + status = strncmp((char *)&qdev->flash, str, 4); + if (status) { + netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n"); + return status; + } + + for (i = 0; i < size; i++) + csum += le16_to_cpu(*flash++); + + if (csum) + netif_err(qdev, ifup, qdev->ndev, + "Invalid flash checksum, csum = 0x%.04x.\n", csum); + + return csum; +} + +static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data) +{ + int status = 0; + /* wait for reg to come ready */ + status = ql_wait_reg_rdy(qdev, + FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR); + if (status) + goto exit; + /* set up for reg read */ + ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset); + /* wait for reg to come ready */ + status = ql_wait_reg_rdy(qdev, + FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR); + if (status) + goto exit; + /* This data is stored on flash as an array of + * __le32. Since ql_read32() returns cpu endian + * we need to swap it back. + */ + *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA)); +exit: + return status; +} + +static int ql_get_8000_flash_params(struct ql_adapter *qdev) +{ + u32 i, size; + int status; + __le32 *p = (__le32 *)&qdev->flash; + u32 offset; + u8 mac_addr[6]; + + /* Get flash offset for function and adjust + * for dword access. + */ + if (!qdev->port) + offset = FUNC0_FLASH_OFFSET / sizeof(u32); + else + offset = FUNC1_FLASH_OFFSET / sizeof(u32); + + if (ql_sem_spinlock(qdev, SEM_FLASH_MASK)) + return -ETIMEDOUT; + + size = sizeof(struct flash_params_8000) / sizeof(u32); + for (i = 0; i < size; i++, p++) { + status = ql_read_flash_word(qdev, i+offset, p); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Error reading flash.\n"); + goto exit; + } + } + + status = ql_validate_flash(qdev, + sizeof(struct flash_params_8000) / sizeof(u16), + "8000"); + if (status) { + netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n"); + status = -EINVAL; + goto exit; + } + + /* Extract either manufacturer or BOFM modified + * MAC address. + */ + if (qdev->flash.flash_params_8000.data_type1 == 2) + memcpy(mac_addr, + qdev->flash.flash_params_8000.mac_addr1, + qdev->ndev->addr_len); + else + memcpy(mac_addr, + qdev->flash.flash_params_8000.mac_addr, + qdev->ndev->addr_len); + + if (!is_valid_ether_addr(mac_addr)) { + netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n"); + status = -EINVAL; + goto exit; + } + + memcpy(qdev->ndev->dev_addr, + mac_addr, + qdev->ndev->addr_len); + +exit: + ql_sem_unlock(qdev, SEM_FLASH_MASK); + return status; +} + +static int ql_get_8012_flash_params(struct ql_adapter *qdev) +{ + int i; + int status; + __le32 *p = (__le32 *)&qdev->flash; + u32 offset = 0; + u32 size = sizeof(struct flash_params_8012) / sizeof(u32); + + /* Second function's parameters follow the first + * function's. + */ + if (qdev->port) + offset = size; + + if (ql_sem_spinlock(qdev, SEM_FLASH_MASK)) + return -ETIMEDOUT; + + for (i = 0; i < size; i++, p++) { + status = ql_read_flash_word(qdev, i+offset, p); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Error reading flash.\n"); + goto exit; + } + + } + + status = ql_validate_flash(qdev, + sizeof(struct flash_params_8012) / sizeof(u16), + "8012"); + if (status) { + netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n"); + status = -EINVAL; + goto exit; + } + + if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) { + status = -EINVAL; + goto exit; + } + + memcpy(qdev->ndev->dev_addr, + qdev->flash.flash_params_8012.mac_addr, + qdev->ndev->addr_len); + +exit: + ql_sem_unlock(qdev, SEM_FLASH_MASK); + return status; +} + +/* xgmac register are located behind the xgmac_addr and xgmac_data + * register pair. Each read/write requires us to wait for the ready + * bit before reading/writing the data. + */ +static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data) +{ + int status; + /* wait for reg to come ready */ + status = ql_wait_reg_rdy(qdev, + XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME); + if (status) + return status; + /* write the data to the data reg */ + ql_write32(qdev, XGMAC_DATA, data); + /* trigger the write */ + ql_write32(qdev, XGMAC_ADDR, reg); + return status; +} + +/* xgmac register are located behind the xgmac_addr and xgmac_data + * register pair. Each read/write requires us to wait for the ready + * bit before reading/writing the data. + */ +int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data) +{ + int status = 0; + /* wait for reg to come ready */ + status = ql_wait_reg_rdy(qdev, + XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME); + if (status) + goto exit; + /* set up for reg read */ + ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R); + /* wait for reg to come ready */ + status = ql_wait_reg_rdy(qdev, + XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME); + if (status) + goto exit; + /* get the data */ + *data = ql_read32(qdev, XGMAC_DATA); +exit: + return status; +} + +/* This is used for reading the 64-bit statistics regs. */ +int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data) +{ + int status = 0; + u32 hi = 0; + u32 lo = 0; + + status = ql_read_xgmac_reg(qdev, reg, &lo); + if (status) + goto exit; + + status = ql_read_xgmac_reg(qdev, reg + 4, &hi); + if (status) + goto exit; + + *data = (u64) lo | ((u64) hi << 32); + +exit: + return status; +} + +static int ql_8000_port_initialize(struct ql_adapter *qdev) +{ + int status; + /* + * Get MPI firmware version for driver banner + * and ethool info. + */ + status = ql_mb_about_fw(qdev); + if (status) + goto exit; + status = ql_mb_get_fw_state(qdev); + if (status) + goto exit; + /* Wake up a worker to get/set the TX/RX frame sizes. */ + queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0); +exit: + return status; +} + +/* Take the MAC Core out of reset. + * Enable statistics counting. + * Take the transmitter/receiver out of reset. + * This functionality may be done in the MPI firmware at a + * later date. + */ +static int ql_8012_port_initialize(struct ql_adapter *qdev) +{ + int status = 0; + u32 data; + + if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) { + /* Another function has the semaphore, so + * wait for the port init bit to come ready. + */ + netif_info(qdev, link, qdev->ndev, + "Another function has the semaphore, so wait for the port init bit to come ready.\n"); + status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0); + if (status) { + netif_crit(qdev, link, qdev->ndev, + "Port initialize timed out.\n"); + } + return status; + } + + netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n"); + /* Set the core reset. */ + status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data); + if (status) + goto end; + data |= GLOBAL_CFG_RESET; + status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data); + if (status) + goto end; + + /* Clear the core reset and turn on jumbo for receiver. */ + data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */ + data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */ + data |= GLOBAL_CFG_TX_STAT_EN; + data |= GLOBAL_CFG_RX_STAT_EN; + status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data); + if (status) + goto end; + + /* Enable transmitter, and clear it's reset. */ + status = ql_read_xgmac_reg(qdev, TX_CFG, &data); + if (status) + goto end; + data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */ + data |= TX_CFG_EN; /* Enable the transmitter. */ + status = ql_write_xgmac_reg(qdev, TX_CFG, data); + if (status) + goto end; + + /* Enable receiver and clear it's reset. */ + status = ql_read_xgmac_reg(qdev, RX_CFG, &data); + if (status) + goto end; + data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */ + data |= RX_CFG_EN; /* Enable the receiver. */ + status = ql_write_xgmac_reg(qdev, RX_CFG, data); + if (status) + goto end; + + /* Turn on jumbo. */ + status = + ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16)); + if (status) + goto end; + status = + ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580); + if (status) + goto end; + + /* Signal to the world that the port is enabled. */ + ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init)); +end: + ql_sem_unlock(qdev, qdev->xg_sem_mask); + return status; +} + +static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev) +{ + return PAGE_SIZE << qdev->lbq_buf_order; +} + +/* Get the next large buffer. */ +static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring) +{ + struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx]; + rx_ring->lbq_curr_idx++; + if (rx_ring->lbq_curr_idx == rx_ring->lbq_len) + rx_ring->lbq_curr_idx = 0; + rx_ring->lbq_free_cnt++; + return lbq_desc; +} + +static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev, + struct rx_ring *rx_ring) +{ + struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring); + + pci_dma_sync_single_for_cpu(qdev->pdev, + dma_unmap_addr(lbq_desc, mapaddr), + rx_ring->lbq_buf_size, + PCI_DMA_FROMDEVICE); + + /* If it's the last chunk of our master page then + * we unmap it. + */ + if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size) + == ql_lbq_block_size(qdev)) + pci_unmap_page(qdev->pdev, + lbq_desc->p.pg_chunk.map, + ql_lbq_block_size(qdev), + PCI_DMA_FROMDEVICE); + return lbq_desc; +} + +/* Get the next small buffer. */ +static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring) +{ + struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx]; + rx_ring->sbq_curr_idx++; + if (rx_ring->sbq_curr_idx == rx_ring->sbq_len) + rx_ring->sbq_curr_idx = 0; + rx_ring->sbq_free_cnt++; + return sbq_desc; +} + +/* Update an rx ring index. */ +static void ql_update_cq(struct rx_ring *rx_ring) +{ + rx_ring->cnsmr_idx++; + rx_ring->curr_entry++; + if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) { + rx_ring->cnsmr_idx = 0; + rx_ring->curr_entry = rx_ring->cq_base; + } +} + +static void ql_write_cq_idx(struct rx_ring *rx_ring) +{ + ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg); +} + +static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring, + struct bq_desc *lbq_desc) +{ + if (!rx_ring->pg_chunk.page) { + u64 map; + rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP | + GFP_ATOMIC, + qdev->lbq_buf_order); + if (unlikely(!rx_ring->pg_chunk.page)) { + netif_err(qdev, drv, qdev->ndev, + "page allocation failed.\n"); + return -ENOMEM; + } + rx_ring->pg_chunk.offset = 0; + map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page, + 0, ql_lbq_block_size(qdev), + PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(qdev->pdev, map)) { + __free_pages(rx_ring->pg_chunk.page, + qdev->lbq_buf_order); + netif_err(qdev, drv, qdev->ndev, + "PCI mapping failed.\n"); + return -ENOMEM; + } + rx_ring->pg_chunk.map = map; + rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page); + } + + /* Copy the current master pg_chunk info + * to the current descriptor. + */ + lbq_desc->p.pg_chunk = rx_ring->pg_chunk; + + /* Adjust the master page chunk for next + * buffer get. + */ + rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size; + if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) { + rx_ring->pg_chunk.page = NULL; + lbq_desc->p.pg_chunk.last_flag = 1; + } else { + rx_ring->pg_chunk.va += rx_ring->lbq_buf_size; + get_page(rx_ring->pg_chunk.page); + lbq_desc->p.pg_chunk.last_flag = 0; + } + return 0; +} +/* Process (refill) a large buffer queue. */ +static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) +{ + u32 clean_idx = rx_ring->lbq_clean_idx; + u32 start_idx = clean_idx; + struct bq_desc *lbq_desc; + u64 map; + int i; + + while (rx_ring->lbq_free_cnt > 32) { + for (i = 0; i < 16; i++) { + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "lbq: try cleaning clean_idx = %d.\n", + clean_idx); + lbq_desc = &rx_ring->lbq[clean_idx]; + if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) { + netif_err(qdev, ifup, qdev->ndev, + "Could not get a page chunk.\n"); + return; + } + + map = lbq_desc->p.pg_chunk.map + + lbq_desc->p.pg_chunk.offset; + dma_unmap_addr_set(lbq_desc, mapaddr, map); + dma_unmap_len_set(lbq_desc, maplen, + rx_ring->lbq_buf_size); + *lbq_desc->addr = cpu_to_le64(map); + + pci_dma_sync_single_for_device(qdev->pdev, map, + rx_ring->lbq_buf_size, + PCI_DMA_FROMDEVICE); + clean_idx++; + if (clean_idx == rx_ring->lbq_len) + clean_idx = 0; + } + + rx_ring->lbq_clean_idx = clean_idx; + rx_ring->lbq_prod_idx += 16; + if (rx_ring->lbq_prod_idx == rx_ring->lbq_len) + rx_ring->lbq_prod_idx = 0; + rx_ring->lbq_free_cnt -= 16; + } + + if (start_idx != clean_idx) { + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "lbq: updating prod idx = %d.\n", + rx_ring->lbq_prod_idx); + ql_write_db_reg(rx_ring->lbq_prod_idx, + rx_ring->lbq_prod_idx_db_reg); + } +} + +/* Process (refill) a small buffer queue. */ +static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) +{ + u32 clean_idx = rx_ring->sbq_clean_idx; + u32 start_idx = clean_idx; + struct bq_desc *sbq_desc; + u64 map; + int i; + + while (rx_ring->sbq_free_cnt > 16) { + for (i = 0; i < 16; i++) { + sbq_desc = &rx_ring->sbq[clean_idx]; + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "sbq: try cleaning clean_idx = %d.\n", + clean_idx); + if (sbq_desc->p.skb == NULL) { + netif_printk(qdev, rx_status, KERN_DEBUG, + qdev->ndev, + "sbq: getting new skb for index %d.\n", + sbq_desc->index); + sbq_desc->p.skb = + netdev_alloc_skb(qdev->ndev, + SMALL_BUFFER_SIZE); + if (sbq_desc->p.skb == NULL) { + netif_err(qdev, probe, qdev->ndev, + "Couldn't get an skb.\n"); + rx_ring->sbq_clean_idx = clean_idx; + return; + } + skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD); + map = pci_map_single(qdev->pdev, + sbq_desc->p.skb->data, + rx_ring->sbq_buf_size, + PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(qdev->pdev, map)) { + netif_err(qdev, ifup, qdev->ndev, + "PCI mapping failed.\n"); + rx_ring->sbq_clean_idx = clean_idx; + dev_kfree_skb_any(sbq_desc->p.skb); + sbq_desc->p.skb = NULL; + return; + } + dma_unmap_addr_set(sbq_desc, mapaddr, map); + dma_unmap_len_set(sbq_desc, maplen, + rx_ring->sbq_buf_size); + *sbq_desc->addr = cpu_to_le64(map); + } + + clean_idx++; + if (clean_idx == rx_ring->sbq_len) + clean_idx = 0; + } + rx_ring->sbq_clean_idx = clean_idx; + rx_ring->sbq_prod_idx += 16; + if (rx_ring->sbq_prod_idx == rx_ring->sbq_len) + rx_ring->sbq_prod_idx = 0; + rx_ring->sbq_free_cnt -= 16; + } + + if (start_idx != clean_idx) { + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "sbq: updating prod idx = %d.\n", + rx_ring->sbq_prod_idx); + ql_write_db_reg(rx_ring->sbq_prod_idx, + rx_ring->sbq_prod_idx_db_reg); + } +} + +static void ql_update_buffer_queues(struct ql_adapter *qdev, + struct rx_ring *rx_ring) +{ + ql_update_sbq(qdev, rx_ring); + ql_update_lbq(qdev, rx_ring); +} + +/* Unmaps tx buffers. Can be called from send() if a pci mapping + * fails at some stage, or from the interrupt when a tx completes. + */ +static void ql_unmap_send(struct ql_adapter *qdev, + struct tx_ring_desc *tx_ring_desc, int mapped) +{ + int i; + for (i = 0; i < mapped; i++) { + if (i == 0 || (i == 7 && mapped > 7)) { + /* + * Unmap the skb->data area, or the + * external sglist (AKA the Outbound + * Address List (OAL)). + * If its the zeroeth element, then it's + * the skb->data area. If it's the 7th + * element and there is more than 6 frags, + * then its an OAL. + */ + if (i == 7) { + netif_printk(qdev, tx_done, KERN_DEBUG, + qdev->ndev, + "unmapping OAL area.\n"); + } + pci_unmap_single(qdev->pdev, + dma_unmap_addr(&tx_ring_desc->map[i], + mapaddr), + dma_unmap_len(&tx_ring_desc->map[i], + maplen), + PCI_DMA_TODEVICE); + } else { + netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev, + "unmapping frag %d.\n", i); + pci_unmap_page(qdev->pdev, + dma_unmap_addr(&tx_ring_desc->map[i], + mapaddr), + dma_unmap_len(&tx_ring_desc->map[i], + maplen), PCI_DMA_TODEVICE); + } + } + +} + +/* Map the buffers for this transmit. This will return + * NETDEV_TX_BUSY or NETDEV_TX_OK based on success. + */ +static int ql_map_send(struct ql_adapter *qdev, + struct ob_mac_iocb_req *mac_iocb_ptr, + struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc) +{ + int len = skb_headlen(skb); + dma_addr_t map; + int frag_idx, err, map_idx = 0; + struct tx_buf_desc *tbd = mac_iocb_ptr->tbd; + int frag_cnt = skb_shinfo(skb)->nr_frags; + + if (frag_cnt) { + netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev, + "frag_cnt = %d.\n", frag_cnt); + } + /* + * Map the skb buffer first. + */ + map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); + + err = pci_dma_mapping_error(qdev->pdev, map); + if (err) { + netif_err(qdev, tx_queued, qdev->ndev, + "PCI mapping failed with error: %d\n", err); + + return NETDEV_TX_BUSY; + } + + tbd->len = cpu_to_le32(len); + tbd->addr = cpu_to_le64(map); + dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map); + dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len); + map_idx++; + + /* + * This loop fills the remainder of the 8 address descriptors + * in the IOCB. If there are more than 7 fragments, then the + * eighth address desc will point to an external list (OAL). + * When this happens, the remainder of the frags will be stored + * in this list. + */ + for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx]; + tbd++; + if (frag_idx == 6 && frag_cnt > 7) { + /* Let's tack on an sglist. + * Our control block will now + * look like this: + * iocb->seg[0] = skb->data + * iocb->seg[1] = frag[0] + * iocb->seg[2] = frag[1] + * iocb->seg[3] = frag[2] + * iocb->seg[4] = frag[3] + * iocb->seg[5] = frag[4] + * iocb->seg[6] = frag[5] + * iocb->seg[7] = ptr to OAL (external sglist) + * oal->seg[0] = frag[6] + * oal->seg[1] = frag[7] + * oal->seg[2] = frag[8] + * oal->seg[3] = frag[9] + * oal->seg[4] = frag[10] + * etc... + */ + /* Tack on the OAL in the eighth segment of IOCB. */ + map = pci_map_single(qdev->pdev, &tx_ring_desc->oal, + sizeof(struct oal), + PCI_DMA_TODEVICE); + err = pci_dma_mapping_error(qdev->pdev, map); + if (err) { + netif_err(qdev, tx_queued, qdev->ndev, + "PCI mapping outbound address list with error: %d\n", + err); + goto map_error; + } + + tbd->addr = cpu_to_le64(map); + /* + * The length is the number of fragments + * that remain to be mapped times the length + * of our sglist (OAL). + */ + tbd->len = + cpu_to_le32((sizeof(struct tx_buf_desc) * + (frag_cnt - frag_idx)) | TX_DESC_C); + dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, + map); + dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, + sizeof(struct oal)); + tbd = (struct tx_buf_desc *)&tx_ring_desc->oal; + map_idx++; + } + + map = + pci_map_page(qdev->pdev, frag->page, + frag->page_offset, frag->size, + PCI_DMA_TODEVICE); + + err = pci_dma_mapping_error(qdev->pdev, map); + if (err) { + netif_err(qdev, tx_queued, qdev->ndev, + "PCI mapping frags failed with error: %d.\n", + err); + goto map_error; + } + + tbd->addr = cpu_to_le64(map); + tbd->len = cpu_to_le32(frag->size); + dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map); + dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, + frag->size); + + } + /* Save the number of segments we've mapped. */ + tx_ring_desc->map_cnt = map_idx; + /* Terminate the last segment. */ + tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E); + return NETDEV_TX_OK; + +map_error: + /* + * If the first frag mapping failed, then i will be zero. + * This causes the unmap of the skb->data area. Otherwise + * we pass in the number of frags that mapped successfully + * so they can be umapped. + */ + ql_unmap_send(qdev, tx_ring_desc, map_idx); + return NETDEV_TX_BUSY; +} + +/* Process an inbound completion from an rx ring. */ +static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev, + struct rx_ring *rx_ring, + struct ib_mac_iocb_rsp *ib_mac_rsp, + u32 length, + u16 vlan_id) +{ + struct sk_buff *skb; + struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); + struct skb_frag_struct *rx_frag; + int nr_frags; + struct napi_struct *napi = &rx_ring->napi; + + napi->dev = qdev->ndev; + + skb = napi_get_frags(napi); + if (!skb) { + netif_err(qdev, drv, qdev->ndev, + "Couldn't get an skb, exiting.\n"); + rx_ring->rx_dropped++; + put_page(lbq_desc->p.pg_chunk.page); + return; + } + prefetch(lbq_desc->p.pg_chunk.va); + rx_frag = skb_shinfo(skb)->frags; + nr_frags = skb_shinfo(skb)->nr_frags; + rx_frag += nr_frags; + rx_frag->page = lbq_desc->p.pg_chunk.page; + rx_frag->page_offset = lbq_desc->p.pg_chunk.offset; + rx_frag->size = length; + + skb->len += length; + skb->data_len += length; + skb->truesize += length; + skb_shinfo(skb)->nr_frags++; + + rx_ring->rx_packets++; + rx_ring->rx_bytes += length; + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb_record_rx_queue(skb, rx_ring->cq_id); + if (vlan_id != 0xffff) + __vlan_hwaccel_put_tag(skb, vlan_id); + napi_gro_frags(napi); +} + +/* Process an inbound completion from an rx ring. */ +static void ql_process_mac_rx_page(struct ql_adapter *qdev, + struct rx_ring *rx_ring, + struct ib_mac_iocb_rsp *ib_mac_rsp, + u32 length, + u16 vlan_id) +{ + struct net_device *ndev = qdev->ndev; + struct sk_buff *skb = NULL; + void *addr; + struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); + struct napi_struct *napi = &rx_ring->napi; + + skb = netdev_alloc_skb(ndev, length); + if (!skb) { + netif_err(qdev, drv, qdev->ndev, + "Couldn't get an skb, need to unwind!.\n"); + rx_ring->rx_dropped++; + put_page(lbq_desc->p.pg_chunk.page); + return; + } + + addr = lbq_desc->p.pg_chunk.va; + prefetch(addr); + + + /* Frame error, so drop the packet. */ + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { + netif_info(qdev, drv, qdev->ndev, + "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2); + rx_ring->rx_errors++; + goto err_out; + } + + /* The max framesize filter on this chip is set higher than + * MTU since FCoE uses 2k frames. + */ + if (skb->len > ndev->mtu + ETH_HLEN) { + netif_err(qdev, drv, qdev->ndev, + "Segment too small, dropping.\n"); + rx_ring->rx_dropped++; + goto err_out; + } + memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN); + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", + length); + skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page, + lbq_desc->p.pg_chunk.offset+ETH_HLEN, + length-ETH_HLEN); + skb->len += length-ETH_HLEN; + skb->data_len += length-ETH_HLEN; + skb->truesize += length-ETH_HLEN; + + rx_ring->rx_packets++; + rx_ring->rx_bytes += skb->len; + skb->protocol = eth_type_trans(skb, ndev); + skb_checksum_none_assert(skb); + + if ((ndev->features & NETIF_F_RXCSUM) && + !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { + /* TCP frame. */ + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "TCP checksum done!\n"); + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && + (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { + /* Unfragmented ipv4 UDP frame. */ + struct iphdr *iph = (struct iphdr *) skb->data; + if (!(iph->frag_off & + cpu_to_be16(IP_MF|IP_OFFSET))) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + netif_printk(qdev, rx_status, KERN_DEBUG, + qdev->ndev, + "TCP checksum done!\n"); + } + } + } + + skb_record_rx_queue(skb, rx_ring->cq_id); + if (vlan_id != 0xffff) + __vlan_hwaccel_put_tag(skb, vlan_id); + if (skb->ip_summed == CHECKSUM_UNNECESSARY) + napi_gro_receive(napi, skb); + else + netif_receive_skb(skb); + return; +err_out: + dev_kfree_skb_any(skb); + put_page(lbq_desc->p.pg_chunk.page); +} + +/* Process an inbound completion from an rx ring. */ +static void ql_process_mac_rx_skb(struct ql_adapter *qdev, + struct rx_ring *rx_ring, + struct ib_mac_iocb_rsp *ib_mac_rsp, + u32 length, + u16 vlan_id) +{ + struct net_device *ndev = qdev->ndev; + struct sk_buff *skb = NULL; + struct sk_buff *new_skb = NULL; + struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring); + + skb = sbq_desc->p.skb; + /* Allocate new_skb and copy */ + new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN); + if (new_skb == NULL) { + netif_err(qdev, probe, qdev->ndev, + "No skb available, drop the packet.\n"); + rx_ring->rx_dropped++; + return; + } + skb_reserve(new_skb, NET_IP_ALIGN); + memcpy(skb_put(new_skb, length), skb->data, length); + skb = new_skb; + + /* Frame error, so drop the packet. */ + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { + netif_info(qdev, drv, qdev->ndev, + "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2); + dev_kfree_skb_any(skb); + rx_ring->rx_errors++; + return; + } + + /* loopback self test for ethtool */ + if (test_bit(QL_SELFTEST, &qdev->flags)) { + ql_check_lb_frame(qdev, skb); + dev_kfree_skb_any(skb); + return; + } + + /* The max framesize filter on this chip is set higher than + * MTU since FCoE uses 2k frames. + */ + if (skb->len > ndev->mtu + ETH_HLEN) { + dev_kfree_skb_any(skb); + rx_ring->rx_dropped++; + return; + } + + prefetch(skb->data); + skb->dev = ndev; + if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) { + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "%s Multicast.\n", + (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == + IB_MAC_IOCB_RSP_M_HASH ? "Hash" : + (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == + IB_MAC_IOCB_RSP_M_REG ? "Registered" : + (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == + IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : ""); + } + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "Promiscuous Packet.\n"); + + rx_ring->rx_packets++; + rx_ring->rx_bytes += skb->len; + skb->protocol = eth_type_trans(skb, ndev); + skb_checksum_none_assert(skb); + + /* If rx checksum is on, and there are no + * csum or frame errors. + */ + if ((ndev->features & NETIF_F_RXCSUM) && + !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { + /* TCP frame. */ + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "TCP checksum done!\n"); + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && + (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { + /* Unfragmented ipv4 UDP frame. */ + struct iphdr *iph = (struct iphdr *) skb->data; + if (!(iph->frag_off & + ntohs(IP_MF|IP_OFFSET))) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + netif_printk(qdev, rx_status, KERN_DEBUG, + qdev->ndev, + "TCP checksum done!\n"); + } + } + } + + skb_record_rx_queue(skb, rx_ring->cq_id); + if (vlan_id != 0xffff) + __vlan_hwaccel_put_tag(skb, vlan_id); + if (skb->ip_summed == CHECKSUM_UNNECESSARY) + napi_gro_receive(&rx_ring->napi, skb); + else + netif_receive_skb(skb); +} + +static void ql_realign_skb(struct sk_buff *skb, int len) +{ + void *temp_addr = skb->data; + + /* Undo the skb_reserve(skb,32) we did before + * giving to hardware, and realign data on + * a 2-byte boundary. + */ + skb->data -= QLGE_SB_PAD - NET_IP_ALIGN; + skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN; + skb_copy_to_linear_data(skb, temp_addr, + (unsigned int)len); +} + +/* + * This function builds an skb for the given inbound + * completion. It will be rewritten for readability in the near + * future, but for not it works well. + */ +static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, + struct rx_ring *rx_ring, + struct ib_mac_iocb_rsp *ib_mac_rsp) +{ + struct bq_desc *lbq_desc; + struct bq_desc *sbq_desc; + struct sk_buff *skb = NULL; + u32 length = le32_to_cpu(ib_mac_rsp->data_len); + u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len); + + /* + * Handle the header buffer if present. + */ + if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV && + ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) { + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "Header of %d bytes in small buffer.\n", hdr_len); + /* + * Headers fit nicely into a small buffer. + */ + sbq_desc = ql_get_curr_sbuf(rx_ring); + pci_unmap_single(qdev->pdev, + dma_unmap_addr(sbq_desc, mapaddr), + dma_unmap_len(sbq_desc, maplen), + PCI_DMA_FROMDEVICE); + skb = sbq_desc->p.skb; + ql_realign_skb(skb, hdr_len); + skb_put(skb, hdr_len); + sbq_desc->p.skb = NULL; + } + + /* + * Handle the data buffer(s). + */ + if (unlikely(!length)) { /* Is there data too? */ + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "No Data buffer in this packet.\n"); + return skb; + } + + if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) { + if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) { + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "Headers in small, data of %d bytes in small, combine them.\n", + length); + /* + * Data is less than small buffer size so it's + * stuffed in a small buffer. + * For this case we append the data + * from the "data" small buffer to the "header" small + * buffer. + */ + sbq_desc = ql_get_curr_sbuf(rx_ring); + pci_dma_sync_single_for_cpu(qdev->pdev, + dma_unmap_addr + (sbq_desc, mapaddr), + dma_unmap_len + (sbq_desc, maplen), + PCI_DMA_FROMDEVICE); + memcpy(skb_put(skb, length), + sbq_desc->p.skb->data, length); + pci_dma_sync_single_for_device(qdev->pdev, + dma_unmap_addr + (sbq_desc, + mapaddr), + dma_unmap_len + (sbq_desc, + maplen), + PCI_DMA_FROMDEVICE); + } else { + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "%d bytes in a single small buffer.\n", + length); + sbq_desc = ql_get_curr_sbuf(rx_ring); + skb = sbq_desc->p.skb; + ql_realign_skb(skb, length); + skb_put(skb, length); + pci_unmap_single(qdev->pdev, + dma_unmap_addr(sbq_desc, + mapaddr), + dma_unmap_len(sbq_desc, + maplen), + PCI_DMA_FROMDEVICE); + sbq_desc->p.skb = NULL; + } + } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) { + if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) { + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "Header in small, %d bytes in large. Chain large to small!\n", + length); + /* + * The data is in a single large buffer. We + * chain it to the header buffer's skb and let + * it rip. + */ + lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "Chaining page at offset = %d, for %d bytes to skb.\n", + lbq_desc->p.pg_chunk.offset, length); + skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page, + lbq_desc->p.pg_chunk.offset, + length); + skb->len += length; + skb->data_len += length; + skb->truesize += length; + } else { + /* + * The headers and data are in a single large buffer. We + * copy it to a new skb and let it go. This can happen with + * jumbo mtu on a non-TCP/UDP frame. + */ + lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); + skb = netdev_alloc_skb(qdev->ndev, length); + if (skb == NULL) { + netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev, + "No skb available, drop the packet.\n"); + return NULL; + } + pci_unmap_page(qdev->pdev, + dma_unmap_addr(lbq_desc, + mapaddr), + dma_unmap_len(lbq_desc, maplen), + PCI_DMA_FROMDEVICE); + skb_reserve(skb, NET_IP_ALIGN); + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", + length); + skb_fill_page_desc(skb, 0, + lbq_desc->p.pg_chunk.page, + lbq_desc->p.pg_chunk.offset, + length); + skb->len += length; + skb->data_len += length; + skb->truesize += length; + length -= length; + __pskb_pull_tail(skb, + (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? + VLAN_ETH_HLEN : ETH_HLEN); + } + } else { + /* + * The data is in a chain of large buffers + * pointed to by a small buffer. We loop + * thru and chain them to the our small header + * buffer's skb. + * frags: There are 18 max frags and our small + * buffer will hold 32 of them. The thing is, + * we'll use 3 max for our 9000 byte jumbo + * frames. If the MTU goes up we could + * eventually be in trouble. + */ + int size, i = 0; + sbq_desc = ql_get_curr_sbuf(rx_ring); + pci_unmap_single(qdev->pdev, + dma_unmap_addr(sbq_desc, mapaddr), + dma_unmap_len(sbq_desc, maplen), + PCI_DMA_FROMDEVICE); + if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) { + /* + * This is an non TCP/UDP IP frame, so + * the headers aren't split into a small + * buffer. We have to use the small buffer + * that contains our sg list as our skb to + * send upstairs. Copy the sg list here to + * a local buffer and use it to find the + * pages to chain. + */ + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "%d bytes of headers & data in chain of large.\n", + length); + skb = sbq_desc->p.skb; + sbq_desc->p.skb = NULL; + skb_reserve(skb, NET_IP_ALIGN); + } + while (length > 0) { + lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); + size = (length < rx_ring->lbq_buf_size) ? length : + rx_ring->lbq_buf_size; + + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "Adding page %d to skb for %d bytes.\n", + i, size); + skb_fill_page_desc(skb, i, + lbq_desc->p.pg_chunk.page, + lbq_desc->p.pg_chunk.offset, + size); + skb->len += size; + skb->data_len += size; + skb->truesize += size; + length -= size; + i++; + } + __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? + VLAN_ETH_HLEN : ETH_HLEN); + } + return skb; +} + +/* Process an inbound completion from an rx ring. */ +static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev, + struct rx_ring *rx_ring, + struct ib_mac_iocb_rsp *ib_mac_rsp, + u16 vlan_id) +{ + struct net_device *ndev = qdev->ndev; + struct sk_buff *skb = NULL; + + QL_DUMP_IB_MAC_RSP(ib_mac_rsp); + + skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp); + if (unlikely(!skb)) { + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "No skb available, drop packet.\n"); + rx_ring->rx_dropped++; + return; + } + + /* Frame error, so drop the packet. */ + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { + netif_info(qdev, drv, qdev->ndev, + "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2); + dev_kfree_skb_any(skb); + rx_ring->rx_errors++; + return; + } + + /* The max framesize filter on this chip is set higher than + * MTU since FCoE uses 2k frames. + */ + if (skb->len > ndev->mtu + ETH_HLEN) { + dev_kfree_skb_any(skb); + rx_ring->rx_dropped++; + return; + } + + /* loopback self test for ethtool */ + if (test_bit(QL_SELFTEST, &qdev->flags)) { + ql_check_lb_frame(qdev, skb); + dev_kfree_skb_any(skb); + return; + } + + prefetch(skb->data); + skb->dev = ndev; + if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) { + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n", + (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == + IB_MAC_IOCB_RSP_M_HASH ? "Hash" : + (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == + IB_MAC_IOCB_RSP_M_REG ? "Registered" : + (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == + IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : ""); + rx_ring->rx_multicast++; + } + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) { + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "Promiscuous Packet.\n"); + } + + skb->protocol = eth_type_trans(skb, ndev); + skb_checksum_none_assert(skb); + + /* If rx checksum is on, and there are no + * csum or frame errors. + */ + if ((ndev->features & NETIF_F_RXCSUM) && + !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { + /* TCP frame. */ + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "TCP checksum done!\n"); + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && + (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { + /* Unfragmented ipv4 UDP frame. */ + struct iphdr *iph = (struct iphdr *) skb->data; + if (!(iph->frag_off & + ntohs(IP_MF|IP_OFFSET))) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "TCP checksum done!\n"); + } + } + } + + rx_ring->rx_packets++; + rx_ring->rx_bytes += skb->len; + skb_record_rx_queue(skb, rx_ring->cq_id); + if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0)) + __vlan_hwaccel_put_tag(skb, vlan_id); + if (skb->ip_summed == CHECKSUM_UNNECESSARY) + napi_gro_receive(&rx_ring->napi, skb); + else + netif_receive_skb(skb); +} + +/* Process an inbound completion from an rx ring. */ +static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev, + struct rx_ring *rx_ring, + struct ib_mac_iocb_rsp *ib_mac_rsp) +{ + u32 length = le32_to_cpu(ib_mac_rsp->data_len); + u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? + ((le16_to_cpu(ib_mac_rsp->vlan_id) & + IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff; + + QL_DUMP_IB_MAC_RSP(ib_mac_rsp); + + if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) { + /* The data and headers are split into + * separate buffers. + */ + ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp, + vlan_id); + } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) { + /* The data fit in a single small buffer. + * Allocate a new skb, copy the data and + * return the buffer to the free pool. + */ + ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp, + length, vlan_id); + } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) && + !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) && + (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) { + /* TCP packet in a page chunk that's been checksummed. + * Tack it on to our GRO skb and let it go. + */ + ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp, + length, vlan_id); + } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) { + /* Non-TCP packet in a page chunk. Allocate an + * skb, tack it on frags, and send it up. + */ + ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp, + length, vlan_id); + } else { + /* Non-TCP/UDP large frames that span multiple buffers + * can be processed corrrectly by the split frame logic. + */ + ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp, + vlan_id); + } + + return (unsigned long)length; +} + +/* Process an outbound completion from an rx ring. */ +static void ql_process_mac_tx_intr(struct ql_adapter *qdev, + struct ob_mac_iocb_rsp *mac_rsp) +{ + struct tx_ring *tx_ring; + struct tx_ring_desc *tx_ring_desc; + + QL_DUMP_OB_MAC_RSP(mac_rsp); + tx_ring = &qdev->tx_ring[mac_rsp->txq_idx]; + tx_ring_desc = &tx_ring->q[mac_rsp->tid]; + ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt); + tx_ring->tx_bytes += (tx_ring_desc->skb)->len; + tx_ring->tx_packets++; + dev_kfree_skb(tx_ring_desc->skb); + tx_ring_desc->skb = NULL; + + if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E | + OB_MAC_IOCB_RSP_S | + OB_MAC_IOCB_RSP_L | + OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) { + if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) { + netif_warn(qdev, tx_done, qdev->ndev, + "Total descriptor length did not match transfer length.\n"); + } + if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) { + netif_warn(qdev, tx_done, qdev->ndev, + "Frame too short to be valid, not sent.\n"); + } + if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) { + netif_warn(qdev, tx_done, qdev->ndev, + "Frame too long, but sent anyway.\n"); + } + if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) { + netif_warn(qdev, tx_done, qdev->ndev, + "PCI backplane error. Frame not sent.\n"); + } + } + atomic_inc(&tx_ring->tx_count); +} + +/* Fire up a handler to reset the MPI processor. */ +void ql_queue_fw_error(struct ql_adapter *qdev) +{ + ql_link_off(qdev); + queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0); +} + +void ql_queue_asic_error(struct ql_adapter *qdev) +{ + ql_link_off(qdev); + ql_disable_interrupts(qdev); + /* Clear adapter up bit to signal the recovery + * process that it shouldn't kill the reset worker + * thread + */ + clear_bit(QL_ADAPTER_UP, &qdev->flags); + /* Set asic recovery bit to indicate reset process that we are + * in fatal error recovery process rather than normal close + */ + set_bit(QL_ASIC_RECOVERY, &qdev->flags); + queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0); +} + +static void ql_process_chip_ae_intr(struct ql_adapter *qdev, + struct ib_ae_iocb_rsp *ib_ae_rsp) +{ + switch (ib_ae_rsp->event) { + case MGMT_ERR_EVENT: + netif_err(qdev, rx_err, qdev->ndev, + "Management Processor Fatal Error.\n"); + ql_queue_fw_error(qdev); + return; + + case CAM_LOOKUP_ERR_EVENT: + netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n"); + netdev_err(qdev->ndev, "This event shouldn't occur.\n"); + ql_queue_asic_error(qdev); + return; + + case SOFT_ECC_ERROR_EVENT: + netdev_err(qdev->ndev, "Soft ECC error detected.\n"); + ql_queue_asic_error(qdev); + break; + + case PCI_ERR_ANON_BUF_RD: + netdev_err(qdev->ndev, "PCI error occurred when reading " + "anonymous buffers from rx_ring %d.\n", + ib_ae_rsp->q_id); + ql_queue_asic_error(qdev); + break; + + default: + netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n", + ib_ae_rsp->event); + ql_queue_asic_error(qdev); + break; + } +} + +static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring) +{ + struct ql_adapter *qdev = rx_ring->qdev; + u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); + struct ob_mac_iocb_rsp *net_rsp = NULL; + int count = 0; + + struct tx_ring *tx_ring; + /* While there are entries in the completion queue. */ + while (prod != rx_ring->cnsmr_idx) { + + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "cq_id = %d, prod = %d, cnsmr = %d.\n.", + rx_ring->cq_id, prod, rx_ring->cnsmr_idx); + + net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry; + rmb(); + switch (net_rsp->opcode) { + + case OPCODE_OB_MAC_TSO_IOCB: + case OPCODE_OB_MAC_IOCB: + ql_process_mac_tx_intr(qdev, net_rsp); + break; + default: + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "Hit default case, not handled! dropping the packet, opcode = %x.\n", + net_rsp->opcode); + } + count++; + ql_update_cq(rx_ring); + prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); + } + if (!net_rsp) + return 0; + ql_write_cq_idx(rx_ring); + tx_ring = &qdev->tx_ring[net_rsp->txq_idx]; + if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) { + if (atomic_read(&tx_ring->queue_stopped) && + (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4))) + /* + * The queue got stopped because the tx_ring was full. + * Wake it up, because it's now at least 25% empty. + */ + netif_wake_subqueue(qdev->ndev, tx_ring->wq_id); + } + + return count; +} + +static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget) +{ + struct ql_adapter *qdev = rx_ring->qdev; + u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); + struct ql_net_rsp_iocb *net_rsp; + int count = 0; + + /* While there are entries in the completion queue. */ + while (prod != rx_ring->cnsmr_idx) { + + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "cq_id = %d, prod = %d, cnsmr = %d.\n.", + rx_ring->cq_id, prod, rx_ring->cnsmr_idx); + + net_rsp = rx_ring->curr_entry; + rmb(); + switch (net_rsp->opcode) { + case OPCODE_IB_MAC_IOCB: + ql_process_mac_rx_intr(qdev, rx_ring, + (struct ib_mac_iocb_rsp *) + net_rsp); + break; + + case OPCODE_IB_AE_IOCB: + ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *) + net_rsp); + break; + default: + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "Hit default case, not handled! dropping the packet, opcode = %x.\n", + net_rsp->opcode); + break; + } + count++; + ql_update_cq(rx_ring); + prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); + if (count == budget) + break; + } + ql_update_buffer_queues(qdev, rx_ring); + ql_write_cq_idx(rx_ring); + return count; +} + +static int ql_napi_poll_msix(struct napi_struct *napi, int budget) +{ + struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi); + struct ql_adapter *qdev = rx_ring->qdev; + struct rx_ring *trx_ring; + int i, work_done = 0; + struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id]; + + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id); + + /* Service the TX rings first. They start + * right after the RSS rings. */ + for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) { + trx_ring = &qdev->rx_ring[i]; + /* If this TX completion ring belongs to this vector and + * it's not empty then service it. + */ + if ((ctx->irq_mask & (1 << trx_ring->cq_id)) && + (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) != + trx_ring->cnsmr_idx)) { + netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev, + "%s: Servicing TX completion ring %d.\n", + __func__, trx_ring->cq_id); + ql_clean_outbound_rx_ring(trx_ring); + } + } + + /* + * Now service the RSS ring if it's active. + */ + if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != + rx_ring->cnsmr_idx) { + netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev, + "%s: Servicing RX completion ring %d.\n", + __func__, rx_ring->cq_id); + work_done = ql_clean_inbound_rx_ring(rx_ring, budget); + } + + if (work_done < budget) { + napi_complete(napi); + ql_enable_completion_interrupt(qdev, rx_ring->irq); + } + return work_done; +} + +static void qlge_vlan_mode(struct net_device *ndev, u32 features) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + + if (features & NETIF_F_HW_VLAN_RX) { + netif_printk(qdev, ifup, KERN_DEBUG, ndev, + "Turning on VLAN in NIC_RCV_CFG.\n"); + ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK | + NIC_RCV_CFG_VLAN_MATCH_AND_NON); + } else { + netif_printk(qdev, ifup, KERN_DEBUG, ndev, + "Turning off VLAN in NIC_RCV_CFG.\n"); + ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK); + } +} + +static u32 qlge_fix_features(struct net_device *ndev, u32 features) +{ + /* + * Since there is no support for separate rx/tx vlan accel + * enable/disable make sure tx flag is always in same state as rx. + */ + if (features & NETIF_F_HW_VLAN_RX) + features |= NETIF_F_HW_VLAN_TX; + else + features &= ~NETIF_F_HW_VLAN_TX; + + return features; +} + +static int qlge_set_features(struct net_device *ndev, u32 features) +{ + u32 changed = ndev->features ^ features; + + if (changed & NETIF_F_HW_VLAN_RX) + qlge_vlan_mode(ndev, features); + + return 0; +} + +static void __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid) +{ + u32 enable_bit = MAC_ADDR_E; + + if (ql_set_mac_addr_reg + (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) { + netif_err(qdev, ifup, qdev->ndev, + "Failed to init vlan address.\n"); + } +} + +static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + int status; + + status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); + if (status) + return; + + __qlge_vlan_rx_add_vid(qdev, vid); + set_bit(vid, qdev->active_vlans); + + ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); +} + +static void __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid) +{ + u32 enable_bit = 0; + + if (ql_set_mac_addr_reg + (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) { + netif_err(qdev, ifup, qdev->ndev, + "Failed to clear vlan address.\n"); + } +} + +static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + int status; + + status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); + if (status) + return; + + __qlge_vlan_rx_kill_vid(qdev, vid); + clear_bit(vid, qdev->active_vlans); + + ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); +} + +static void qlge_restore_vlan(struct ql_adapter *qdev) +{ + int status; + u16 vid; + + status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); + if (status) + return; + + for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID) + __qlge_vlan_rx_add_vid(qdev, vid); + + ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); +} + +/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */ +static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id) +{ + struct rx_ring *rx_ring = dev_id; + napi_schedule(&rx_ring->napi); + return IRQ_HANDLED; +} + +/* This handles a fatal error, MPI activity, and the default + * rx_ring in an MSI-X multiple vector environment. + * In MSI/Legacy environment it also process the rest of + * the rx_rings. + */ +static irqreturn_t qlge_isr(int irq, void *dev_id) +{ + struct rx_ring *rx_ring = dev_id; + struct ql_adapter *qdev = rx_ring->qdev; + struct intr_context *intr_context = &qdev->intr_context[0]; + u32 var; + int work_done = 0; + + spin_lock(&qdev->hw_lock); + if (atomic_read(&qdev->intr_context[0].irq_cnt)) { + netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev, + "Shared Interrupt, Not ours!\n"); + spin_unlock(&qdev->hw_lock); + return IRQ_NONE; + } + spin_unlock(&qdev->hw_lock); + + var = ql_disable_completion_interrupt(qdev, intr_context->intr); + + /* + * Check for fatal error. + */ + if (var & STS_FE) { + ql_queue_asic_error(qdev); + netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var); + var = ql_read32(qdev, ERR_STS); + netdev_err(qdev->ndev, "Resetting chip. " + "Error Status Register = 0x%x\n", var); + return IRQ_HANDLED; + } + + /* + * Check MPI processor activity. + */ + if ((var & STS_PI) && + (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) { + /* + * We've got an async event or mailbox completion. + * Handle it and clear the source of the interrupt. + */ + netif_err(qdev, intr, qdev->ndev, + "Got MPI processor interrupt.\n"); + ql_disable_completion_interrupt(qdev, intr_context->intr); + ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); + queue_delayed_work_on(smp_processor_id(), + qdev->workqueue, &qdev->mpi_work, 0); + work_done++; + } + + /* + * Get the bit-mask that shows the active queues for this + * pass. Compare it to the queues that this irq services + * and call napi if there's a match. + */ + var = ql_read32(qdev, ISR1); + if (var & intr_context->irq_mask) { + netif_info(qdev, intr, qdev->ndev, + "Waking handler for rx_ring[0].\n"); + ql_disable_completion_interrupt(qdev, intr_context->intr); + napi_schedule(&rx_ring->napi); + work_done++; + } + ql_enable_completion_interrupt(qdev, intr_context->intr); + return work_done ? IRQ_HANDLED : IRQ_NONE; +} + +static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr) +{ + + if (skb_is_gso(skb)) { + int err; + if (skb_header_cloned(skb)) { + err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + if (err) + return err; + } + + mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB; + mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC; + mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len); + mac_iocb_ptr->total_hdrs_len = + cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb)); + mac_iocb_ptr->net_trans_offset = + cpu_to_le16(skb_network_offset(skb) | + skb_transport_offset(skb) + << OB_MAC_TRANSPORT_HDR_SHIFT); + mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); + mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO; + if (likely(skb->protocol == htons(ETH_P_IP))) { + struct iphdr *iph = ip_hdr(skb); + iph->check = 0; + mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + } else if (skb->protocol == htons(ETH_P_IPV6)) { + mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6; + tcp_hdr(skb)->check = + ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + } + return 1; + } + return 0; +} + +static void ql_hw_csum_setup(struct sk_buff *skb, + struct ob_mac_tso_iocb_req *mac_iocb_ptr) +{ + int len; + struct iphdr *iph = ip_hdr(skb); + __sum16 *check; + mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB; + mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len); + mac_iocb_ptr->net_trans_offset = + cpu_to_le16(skb_network_offset(skb) | + skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT); + + mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4; + len = (ntohs(iph->tot_len) - (iph->ihl << 2)); + if (likely(iph->protocol == IPPROTO_TCP)) { + check = &(tcp_hdr(skb)->check); + mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC; + mac_iocb_ptr->total_hdrs_len = + cpu_to_le16(skb_transport_offset(skb) + + (tcp_hdr(skb)->doff << 2)); + } else { + check = &(udp_hdr(skb)->check); + mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC; + mac_iocb_ptr->total_hdrs_len = + cpu_to_le16(skb_transport_offset(skb) + + sizeof(struct udphdr)); + } + *check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, len, iph->protocol, 0); +} + +static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev) +{ + struct tx_ring_desc *tx_ring_desc; + struct ob_mac_iocb_req *mac_iocb_ptr; + struct ql_adapter *qdev = netdev_priv(ndev); + int tso; + struct tx_ring *tx_ring; + u32 tx_ring_idx = (u32) skb->queue_mapping; + + tx_ring = &qdev->tx_ring[tx_ring_idx]; + + if (skb_padto(skb, ETH_ZLEN)) + return NETDEV_TX_OK; + + if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) { + netif_info(qdev, tx_queued, qdev->ndev, + "%s: shutting down tx queue %d du to lack of resources.\n", + __func__, tx_ring_idx); + netif_stop_subqueue(ndev, tx_ring->wq_id); + atomic_inc(&tx_ring->queue_stopped); + tx_ring->tx_errors++; + return NETDEV_TX_BUSY; + } + tx_ring_desc = &tx_ring->q[tx_ring->prod_idx]; + mac_iocb_ptr = tx_ring_desc->queue_entry; + memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr)); + + mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB; + mac_iocb_ptr->tid = tx_ring_desc->index; + /* We use the upper 32-bits to store the tx queue for this IO. + * When we get the completion we can use it to establish the context. + */ + mac_iocb_ptr->txq_idx = tx_ring_idx; + tx_ring_desc->skb = skb; + + mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len); + + if (vlan_tx_tag_present(skb)) { + netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev, + "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb)); + mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V; + mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb)); + } + tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr); + if (tso < 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) { + ql_hw_csum_setup(skb, + (struct ob_mac_tso_iocb_req *)mac_iocb_ptr); + } + if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) != + NETDEV_TX_OK) { + netif_err(qdev, tx_queued, qdev->ndev, + "Could not map the segments.\n"); + tx_ring->tx_errors++; + return NETDEV_TX_BUSY; + } + QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr); + tx_ring->prod_idx++; + if (tx_ring->prod_idx == tx_ring->wq_len) + tx_ring->prod_idx = 0; + wmb(); + + ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg); + netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev, + "tx queued, slot %d, len %d\n", + tx_ring->prod_idx, skb->len); + + atomic_dec(&tx_ring->tx_count); + return NETDEV_TX_OK; +} + + +static void ql_free_shadow_space(struct ql_adapter *qdev) +{ + if (qdev->rx_ring_shadow_reg_area) { + pci_free_consistent(qdev->pdev, + PAGE_SIZE, + qdev->rx_ring_shadow_reg_area, + qdev->rx_ring_shadow_reg_dma); + qdev->rx_ring_shadow_reg_area = NULL; + } + if (qdev->tx_ring_shadow_reg_area) { + pci_free_consistent(qdev->pdev, + PAGE_SIZE, + qdev->tx_ring_shadow_reg_area, + qdev->tx_ring_shadow_reg_dma); + qdev->tx_ring_shadow_reg_area = NULL; + } +} + +static int ql_alloc_shadow_space(struct ql_adapter *qdev) +{ + qdev->rx_ring_shadow_reg_area = + pci_alloc_consistent(qdev->pdev, + PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma); + if (qdev->rx_ring_shadow_reg_area == NULL) { + netif_err(qdev, ifup, qdev->ndev, + "Allocation of RX shadow space failed.\n"); + return -ENOMEM; + } + memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE); + qdev->tx_ring_shadow_reg_area = + pci_alloc_consistent(qdev->pdev, PAGE_SIZE, + &qdev->tx_ring_shadow_reg_dma); + if (qdev->tx_ring_shadow_reg_area == NULL) { + netif_err(qdev, ifup, qdev->ndev, + "Allocation of TX shadow space failed.\n"); + goto err_wqp_sh_area; + } + memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE); + return 0; + +err_wqp_sh_area: + pci_free_consistent(qdev->pdev, + PAGE_SIZE, + qdev->rx_ring_shadow_reg_area, + qdev->rx_ring_shadow_reg_dma); + return -ENOMEM; +} + +static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring) +{ + struct tx_ring_desc *tx_ring_desc; + int i; + struct ob_mac_iocb_req *mac_iocb_ptr; + + mac_iocb_ptr = tx_ring->wq_base; + tx_ring_desc = tx_ring->q; + for (i = 0; i < tx_ring->wq_len; i++) { + tx_ring_desc->index = i; + tx_ring_desc->skb = NULL; + tx_ring_desc->queue_entry = mac_iocb_ptr; + mac_iocb_ptr++; + tx_ring_desc++; + } + atomic_set(&tx_ring->tx_count, tx_ring->wq_len); + atomic_set(&tx_ring->queue_stopped, 0); +} + +static void ql_free_tx_resources(struct ql_adapter *qdev, + struct tx_ring *tx_ring) +{ + if (tx_ring->wq_base) { + pci_free_consistent(qdev->pdev, tx_ring->wq_size, + tx_ring->wq_base, tx_ring->wq_base_dma); + tx_ring->wq_base = NULL; + } + kfree(tx_ring->q); + tx_ring->q = NULL; +} + +static int ql_alloc_tx_resources(struct ql_adapter *qdev, + struct tx_ring *tx_ring) +{ + tx_ring->wq_base = + pci_alloc_consistent(qdev->pdev, tx_ring->wq_size, + &tx_ring->wq_base_dma); + + if ((tx_ring->wq_base == NULL) || + tx_ring->wq_base_dma & WQ_ADDR_ALIGN) { + netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n"); + return -ENOMEM; + } + tx_ring->q = + kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL); + if (tx_ring->q == NULL) + goto err; + + return 0; +err: + pci_free_consistent(qdev->pdev, tx_ring->wq_size, + tx_ring->wq_base, tx_ring->wq_base_dma); + return -ENOMEM; +} + +static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) +{ + struct bq_desc *lbq_desc; + + uint32_t curr_idx, clean_idx; + + curr_idx = rx_ring->lbq_curr_idx; + clean_idx = rx_ring->lbq_clean_idx; + while (curr_idx != clean_idx) { + lbq_desc = &rx_ring->lbq[curr_idx]; + + if (lbq_desc->p.pg_chunk.last_flag) { + pci_unmap_page(qdev->pdev, + lbq_desc->p.pg_chunk.map, + ql_lbq_block_size(qdev), + PCI_DMA_FROMDEVICE); + lbq_desc->p.pg_chunk.last_flag = 0; + } + + put_page(lbq_desc->p.pg_chunk.page); + lbq_desc->p.pg_chunk.page = NULL; + + if (++curr_idx == rx_ring->lbq_len) + curr_idx = 0; + + } +} + +static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) +{ + int i; + struct bq_desc *sbq_desc; + + for (i = 0; i < rx_ring->sbq_len; i++) { + sbq_desc = &rx_ring->sbq[i]; + if (sbq_desc == NULL) { + netif_err(qdev, ifup, qdev->ndev, + "sbq_desc %d is NULL.\n", i); + return; + } + if (sbq_desc->p.skb) { + pci_unmap_single(qdev->pdev, + dma_unmap_addr(sbq_desc, mapaddr), + dma_unmap_len(sbq_desc, maplen), + PCI_DMA_FROMDEVICE); + dev_kfree_skb(sbq_desc->p.skb); + sbq_desc->p.skb = NULL; + } + } +} + +/* Free all large and small rx buffers associated + * with the completion queues for this device. + */ +static void ql_free_rx_buffers(struct ql_adapter *qdev) +{ + int i; + struct rx_ring *rx_ring; + + for (i = 0; i < qdev->rx_ring_count; i++) { + rx_ring = &qdev->rx_ring[i]; + if (rx_ring->lbq) + ql_free_lbq_buffers(qdev, rx_ring); + if (rx_ring->sbq) + ql_free_sbq_buffers(qdev, rx_ring); + } +} + +static void ql_alloc_rx_buffers(struct ql_adapter *qdev) +{ + struct rx_ring *rx_ring; + int i; + + for (i = 0; i < qdev->rx_ring_count; i++) { + rx_ring = &qdev->rx_ring[i]; + if (rx_ring->type != TX_Q) + ql_update_buffer_queues(qdev, rx_ring); + } +} + +static void ql_init_lbq_ring(struct ql_adapter *qdev, + struct rx_ring *rx_ring) +{ + int i; + struct bq_desc *lbq_desc; + __le64 *bq = rx_ring->lbq_base; + + memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc)); + for (i = 0; i < rx_ring->lbq_len; i++) { + lbq_desc = &rx_ring->lbq[i]; + memset(lbq_desc, 0, sizeof(*lbq_desc)); + lbq_desc->index = i; + lbq_desc->addr = bq; + bq++; + } +} + +static void ql_init_sbq_ring(struct ql_adapter *qdev, + struct rx_ring *rx_ring) +{ + int i; + struct bq_desc *sbq_desc; + __le64 *bq = rx_ring->sbq_base; + + memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc)); + for (i = 0; i < rx_ring->sbq_len; i++) { + sbq_desc = &rx_ring->sbq[i]; + memset(sbq_desc, 0, sizeof(*sbq_desc)); + sbq_desc->index = i; + sbq_desc->addr = bq; + bq++; + } +} + +static void ql_free_rx_resources(struct ql_adapter *qdev, + struct rx_ring *rx_ring) +{ + /* Free the small buffer queue. */ + if (rx_ring->sbq_base) { + pci_free_consistent(qdev->pdev, + rx_ring->sbq_size, + rx_ring->sbq_base, rx_ring->sbq_base_dma); + rx_ring->sbq_base = NULL; + } + + /* Free the small buffer queue control blocks. */ + kfree(rx_ring->sbq); + rx_ring->sbq = NULL; + + /* Free the large buffer queue. */ + if (rx_ring->lbq_base) { + pci_free_consistent(qdev->pdev, + rx_ring->lbq_size, + rx_ring->lbq_base, rx_ring->lbq_base_dma); + rx_ring->lbq_base = NULL; + } + + /* Free the large buffer queue control blocks. */ + kfree(rx_ring->lbq); + rx_ring->lbq = NULL; + + /* Free the rx queue. */ + if (rx_ring->cq_base) { + pci_free_consistent(qdev->pdev, + rx_ring->cq_size, + rx_ring->cq_base, rx_ring->cq_base_dma); + rx_ring->cq_base = NULL; + } +} + +/* Allocate queues and buffers for this completions queue based + * on the values in the parameter structure. */ +static int ql_alloc_rx_resources(struct ql_adapter *qdev, + struct rx_ring *rx_ring) +{ + + /* + * Allocate the completion queue for this rx_ring. + */ + rx_ring->cq_base = + pci_alloc_consistent(qdev->pdev, rx_ring->cq_size, + &rx_ring->cq_base_dma); + + if (rx_ring->cq_base == NULL) { + netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n"); + return -ENOMEM; + } + + if (rx_ring->sbq_len) { + /* + * Allocate small buffer queue. + */ + rx_ring->sbq_base = + pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size, + &rx_ring->sbq_base_dma); + + if (rx_ring->sbq_base == NULL) { + netif_err(qdev, ifup, qdev->ndev, + "Small buffer queue allocation failed.\n"); + goto err_mem; + } + + /* + * Allocate small buffer queue control blocks. + */ + rx_ring->sbq = + kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc), + GFP_KERNEL); + if (rx_ring->sbq == NULL) { + netif_err(qdev, ifup, qdev->ndev, + "Small buffer queue control block allocation failed.\n"); + goto err_mem; + } + + ql_init_sbq_ring(qdev, rx_ring); + } + + if (rx_ring->lbq_len) { + /* + * Allocate large buffer queue. + */ + rx_ring->lbq_base = + pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size, + &rx_ring->lbq_base_dma); + + if (rx_ring->lbq_base == NULL) { + netif_err(qdev, ifup, qdev->ndev, + "Large buffer queue allocation failed.\n"); + goto err_mem; + } + /* + * Allocate large buffer queue control blocks. + */ + rx_ring->lbq = + kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc), + GFP_KERNEL); + if (rx_ring->lbq == NULL) { + netif_err(qdev, ifup, qdev->ndev, + "Large buffer queue control block allocation failed.\n"); + goto err_mem; + } + + ql_init_lbq_ring(qdev, rx_ring); + } + + return 0; + +err_mem: + ql_free_rx_resources(qdev, rx_ring); + return -ENOMEM; +} + +static void ql_tx_ring_clean(struct ql_adapter *qdev) +{ + struct tx_ring *tx_ring; + struct tx_ring_desc *tx_ring_desc; + int i, j; + + /* + * Loop through all queues and free + * any resources. + */ + for (j = 0; j < qdev->tx_ring_count; j++) { + tx_ring = &qdev->tx_ring[j]; + for (i = 0; i < tx_ring->wq_len; i++) { + tx_ring_desc = &tx_ring->q[i]; + if (tx_ring_desc && tx_ring_desc->skb) { + netif_err(qdev, ifdown, qdev->ndev, + "Freeing lost SKB %p, from queue %d, index %d.\n", + tx_ring_desc->skb, j, + tx_ring_desc->index); + ql_unmap_send(qdev, tx_ring_desc, + tx_ring_desc->map_cnt); + dev_kfree_skb(tx_ring_desc->skb); + tx_ring_desc->skb = NULL; + } + } + } +} + +static void ql_free_mem_resources(struct ql_adapter *qdev) +{ + int i; + + for (i = 0; i < qdev->tx_ring_count; i++) + ql_free_tx_resources(qdev, &qdev->tx_ring[i]); + for (i = 0; i < qdev->rx_ring_count; i++) + ql_free_rx_resources(qdev, &qdev->rx_ring[i]); + ql_free_shadow_space(qdev); +} + +static int ql_alloc_mem_resources(struct ql_adapter *qdev) +{ + int i; + + /* Allocate space for our shadow registers and such. */ + if (ql_alloc_shadow_space(qdev)) + return -ENOMEM; + + for (i = 0; i < qdev->rx_ring_count; i++) { + if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) { + netif_err(qdev, ifup, qdev->ndev, + "RX resource allocation failed.\n"); + goto err_mem; + } + } + /* Allocate tx queue resources */ + for (i = 0; i < qdev->tx_ring_count; i++) { + if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) { + netif_err(qdev, ifup, qdev->ndev, + "TX resource allocation failed.\n"); + goto err_mem; + } + } + return 0; + +err_mem: + ql_free_mem_resources(qdev); + return -ENOMEM; +} + +/* Set up the rx ring control block and pass it to the chip. + * The control block is defined as + * "Completion Queue Initialization Control Block", or cqicb. + */ +static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) +{ + struct cqicb *cqicb = &rx_ring->cqicb; + void *shadow_reg = qdev->rx_ring_shadow_reg_area + + (rx_ring->cq_id * RX_RING_SHADOW_SPACE); + u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma + + (rx_ring->cq_id * RX_RING_SHADOW_SPACE); + void __iomem *doorbell_area = + qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id)); + int err = 0; + u16 bq_len; + u64 tmp; + __le64 *base_indirect_ptr; + int page_entries; + + /* Set up the shadow registers for this ring. */ + rx_ring->prod_idx_sh_reg = shadow_reg; + rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma; + *rx_ring->prod_idx_sh_reg = 0; + shadow_reg += sizeof(u64); + shadow_reg_dma += sizeof(u64); + rx_ring->lbq_base_indirect = shadow_reg; + rx_ring->lbq_base_indirect_dma = shadow_reg_dma; + shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len)); + shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len)); + rx_ring->sbq_base_indirect = shadow_reg; + rx_ring->sbq_base_indirect_dma = shadow_reg_dma; + + /* PCI doorbell mem area + 0x00 for consumer index register */ + rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area; + rx_ring->cnsmr_idx = 0; + rx_ring->curr_entry = rx_ring->cq_base; + + /* PCI doorbell mem area + 0x04 for valid register */ + rx_ring->valid_db_reg = doorbell_area + 0x04; + + /* PCI doorbell mem area + 0x18 for large buffer consumer */ + rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18); + + /* PCI doorbell mem area + 0x1c */ + rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c); + + memset((void *)cqicb, 0, sizeof(struct cqicb)); + cqicb->msix_vect = rx_ring->irq; + + bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len; + cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT); + + cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma); + + cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma); + + /* + * Set up the control block load flags. + */ + cqicb->flags = FLAGS_LC | /* Load queue base address */ + FLAGS_LV | /* Load MSI-X vector */ + FLAGS_LI; /* Load irq delay values */ + if (rx_ring->lbq_len) { + cqicb->flags |= FLAGS_LL; /* Load lbq values */ + tmp = (u64)rx_ring->lbq_base_dma; + base_indirect_ptr = rx_ring->lbq_base_indirect; + page_entries = 0; + do { + *base_indirect_ptr = cpu_to_le64(tmp); + tmp += DB_PAGE_SIZE; + base_indirect_ptr++; + page_entries++; + } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len)); + cqicb->lbq_addr = + cpu_to_le64(rx_ring->lbq_base_indirect_dma); + bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 : + (u16) rx_ring->lbq_buf_size; + cqicb->lbq_buf_size = cpu_to_le16(bq_len); + bq_len = (rx_ring->lbq_len == 65536) ? 0 : + (u16) rx_ring->lbq_len; + cqicb->lbq_len = cpu_to_le16(bq_len); + rx_ring->lbq_prod_idx = 0; + rx_ring->lbq_curr_idx = 0; + rx_ring->lbq_clean_idx = 0; + rx_ring->lbq_free_cnt = rx_ring->lbq_len; + } + if (rx_ring->sbq_len) { + cqicb->flags |= FLAGS_LS; /* Load sbq values */ + tmp = (u64)rx_ring->sbq_base_dma; + base_indirect_ptr = rx_ring->sbq_base_indirect; + page_entries = 0; + do { + *base_indirect_ptr = cpu_to_le64(tmp); + tmp += DB_PAGE_SIZE; + base_indirect_ptr++; + page_entries++; + } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len)); + cqicb->sbq_addr = + cpu_to_le64(rx_ring->sbq_base_indirect_dma); + cqicb->sbq_buf_size = + cpu_to_le16((u16)(rx_ring->sbq_buf_size)); + bq_len = (rx_ring->sbq_len == 65536) ? 0 : + (u16) rx_ring->sbq_len; + cqicb->sbq_len = cpu_to_le16(bq_len); + rx_ring->sbq_prod_idx = 0; + rx_ring->sbq_curr_idx = 0; + rx_ring->sbq_clean_idx = 0; + rx_ring->sbq_free_cnt = rx_ring->sbq_len; + } + switch (rx_ring->type) { + case TX_Q: + cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs); + cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames); + break; + case RX_Q: + /* Inbound completion handling rx_rings run in + * separate NAPI contexts. + */ + netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix, + 64); + cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs); + cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames); + break; + default: + netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, + "Invalid rx_ring->type = %d.\n", rx_ring->type); + } + netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, + "Initializing rx work queue.\n"); + err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb), + CFG_LCQ, rx_ring->cq_id); + if (err) { + netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n"); + return err; + } + return err; +} + +static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring) +{ + struct wqicb *wqicb = (struct wqicb *)tx_ring; + void __iomem *doorbell_area = + qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id); + void *shadow_reg = qdev->tx_ring_shadow_reg_area + + (tx_ring->wq_id * sizeof(u64)); + u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma + + (tx_ring->wq_id * sizeof(u64)); + int err = 0; + + /* + * Assign doorbell registers for this tx_ring. + */ + /* TX PCI doorbell mem area for tx producer index */ + tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area; + tx_ring->prod_idx = 0; + /* TX PCI doorbell mem area + 0x04 */ + tx_ring->valid_db_reg = doorbell_area + 0x04; + + /* + * Assign shadow registers for this tx_ring. + */ + tx_ring->cnsmr_idx_sh_reg = shadow_reg; + tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma; + + wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT); + wqicb->flags = cpu_to_le16(Q_FLAGS_LC | + Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO); + wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id); + wqicb->rid = 0; + wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma); + + wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma); + + ql_init_tx_ring(qdev, tx_ring); + + err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ, + (u16) tx_ring->wq_id); + if (err) { + netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n"); + return err; + } + netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, + "Successfully loaded WQICB.\n"); + return err; +} + +static void ql_disable_msix(struct ql_adapter *qdev) +{ + if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) { + pci_disable_msix(qdev->pdev); + clear_bit(QL_MSIX_ENABLED, &qdev->flags); + kfree(qdev->msi_x_entry); + qdev->msi_x_entry = NULL; + } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) { + pci_disable_msi(qdev->pdev); + clear_bit(QL_MSI_ENABLED, &qdev->flags); + } +} + +/* We start by trying to get the number of vectors + * stored in qdev->intr_count. If we don't get that + * many then we reduce the count and try again. + */ +static void ql_enable_msix(struct ql_adapter *qdev) +{ + int i, err; + + /* Get the MSIX vectors. */ + if (qlge_irq_type == MSIX_IRQ) { + /* Try to alloc space for the msix struct, + * if it fails then go to MSI/legacy. + */ + qdev->msi_x_entry = kcalloc(qdev->intr_count, + sizeof(struct msix_entry), + GFP_KERNEL); + if (!qdev->msi_x_entry) { + qlge_irq_type = MSI_IRQ; + goto msi; + } + + for (i = 0; i < qdev->intr_count; i++) + qdev->msi_x_entry[i].entry = i; + + /* Loop to get our vectors. We start with + * what we want and settle for what we get. + */ + do { + err = pci_enable_msix(qdev->pdev, + qdev->msi_x_entry, qdev->intr_count); + if (err > 0) + qdev->intr_count = err; + } while (err > 0); + + if (err < 0) { + kfree(qdev->msi_x_entry); + qdev->msi_x_entry = NULL; + netif_warn(qdev, ifup, qdev->ndev, + "MSI-X Enable failed, trying MSI.\n"); + qdev->intr_count = 1; + qlge_irq_type = MSI_IRQ; + } else if (err == 0) { + set_bit(QL_MSIX_ENABLED, &qdev->flags); + netif_info(qdev, ifup, qdev->ndev, + "MSI-X Enabled, got %d vectors.\n", + qdev->intr_count); + return; + } + } +msi: + qdev->intr_count = 1; + if (qlge_irq_type == MSI_IRQ) { + if (!pci_enable_msi(qdev->pdev)) { + set_bit(QL_MSI_ENABLED, &qdev->flags); + netif_info(qdev, ifup, qdev->ndev, + "Running with MSI interrupts.\n"); + return; + } + } + qlge_irq_type = LEG_IRQ; + netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, + "Running with legacy interrupts.\n"); +} + +/* Each vector services 1 RSS ring and and 1 or more + * TX completion rings. This function loops through + * the TX completion rings and assigns the vector that + * will service it. An example would be if there are + * 2 vectors (so 2 RSS rings) and 8 TX completion rings. + * This would mean that vector 0 would service RSS ring 0 + * and TX completion rings 0,1,2 and 3. Vector 1 would + * service RSS ring 1 and TX completion rings 4,5,6 and 7. + */ +static void ql_set_tx_vect(struct ql_adapter *qdev) +{ + int i, j, vect; + u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count; + + if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) { + /* Assign irq vectors to TX rx_rings.*/ + for (vect = 0, j = 0, i = qdev->rss_ring_count; + i < qdev->rx_ring_count; i++) { + if (j == tx_rings_per_vector) { + vect++; + j = 0; + } + qdev->rx_ring[i].irq = vect; + j++; + } + } else { + /* For single vector all rings have an irq + * of zero. + */ + for (i = 0; i < qdev->rx_ring_count; i++) + qdev->rx_ring[i].irq = 0; + } +} + +/* Set the interrupt mask for this vector. Each vector + * will service 1 RSS ring and 1 or more TX completion + * rings. This function sets up a bit mask per vector + * that indicates which rings it services. + */ +static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx) +{ + int j, vect = ctx->intr; + u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count; + + if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) { + /* Add the RSS ring serviced by this vector + * to the mask. + */ + ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id); + /* Add the TX ring(s) serviced by this vector + * to the mask. */ + for (j = 0; j < tx_rings_per_vector; j++) { + ctx->irq_mask |= + (1 << qdev->rx_ring[qdev->rss_ring_count + + (vect * tx_rings_per_vector) + j].cq_id); + } + } else { + /* For single vector we just shift each queue's + * ID into the mask. + */ + for (j = 0; j < qdev->rx_ring_count; j++) + ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id); + } +} + +/* + * Here we build the intr_context structures based on + * our rx_ring count and intr vector count. + * The intr_context structure is used to hook each vector + * to possibly different handlers. + */ +static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev) +{ + int i = 0; + struct intr_context *intr_context = &qdev->intr_context[0]; + + if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) { + /* Each rx_ring has it's + * own intr_context since we have separate + * vectors for each queue. + */ + for (i = 0; i < qdev->intr_count; i++, intr_context++) { + qdev->rx_ring[i].irq = i; + intr_context->intr = i; + intr_context->qdev = qdev; + /* Set up this vector's bit-mask that indicates + * which queues it services. + */ + ql_set_irq_mask(qdev, intr_context); + /* + * We set up each vectors enable/disable/read bits so + * there's no bit/mask calculations in the critical path. + */ + intr_context->intr_en_mask = + INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | + INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD + | i; + intr_context->intr_dis_mask = + INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | + INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK | + INTR_EN_IHD | i; + intr_context->intr_read_mask = + INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | + INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD | + i; + if (i == 0) { + /* The first vector/queue handles + * broadcast/multicast, fatal errors, + * and firmware events. This in addition + * to normal inbound NAPI processing. + */ + intr_context->handler = qlge_isr; + sprintf(intr_context->name, "%s-rx-%d", + qdev->ndev->name, i); + } else { + /* + * Inbound queues handle unicast frames only. + */ + intr_context->handler = qlge_msix_rx_isr; + sprintf(intr_context->name, "%s-rx-%d", + qdev->ndev->name, i); + } + } + } else { + /* + * All rx_rings use the same intr_context since + * there is only one vector. + */ + intr_context->intr = 0; + intr_context->qdev = qdev; + /* + * We set up each vectors enable/disable/read bits so + * there's no bit/mask calculations in the critical path. + */ + intr_context->intr_en_mask = + INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE; + intr_context->intr_dis_mask = + INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | + INTR_EN_TYPE_DISABLE; + intr_context->intr_read_mask = + INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ; + /* + * Single interrupt means one handler for all rings. + */ + intr_context->handler = qlge_isr; + sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name); + /* Set up this vector's bit-mask that indicates + * which queues it services. In this case there is + * a single vector so it will service all RSS and + * TX completion rings. + */ + ql_set_irq_mask(qdev, intr_context); + } + /* Tell the TX completion rings which MSIx vector + * they will be using. + */ + ql_set_tx_vect(qdev); +} + +static void ql_free_irq(struct ql_adapter *qdev) +{ + int i; + struct intr_context *intr_context = &qdev->intr_context[0]; + + for (i = 0; i < qdev->intr_count; i++, intr_context++) { + if (intr_context->hooked) { + if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) { + free_irq(qdev->msi_x_entry[i].vector, + &qdev->rx_ring[i]); + netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev, + "freeing msix interrupt %d.\n", i); + } else { + free_irq(qdev->pdev->irq, &qdev->rx_ring[0]); + netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev, + "freeing msi interrupt %d.\n", i); + } + } + } + ql_disable_msix(qdev); +} + +static int ql_request_irq(struct ql_adapter *qdev) +{ + int i; + int status = 0; + struct pci_dev *pdev = qdev->pdev; + struct intr_context *intr_context = &qdev->intr_context[0]; + + ql_resolve_queues_to_irqs(qdev); + + for (i = 0; i < qdev->intr_count; i++, intr_context++) { + atomic_set(&intr_context->irq_cnt, 0); + if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) { + status = request_irq(qdev->msi_x_entry[i].vector, + intr_context->handler, + 0, + intr_context->name, + &qdev->rx_ring[i]); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Failed request for MSIX interrupt %d.\n", + i); + goto err_irq; + } else { + netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, + "Hooked intr %d, queue type %s, with name %s.\n", + i, + qdev->rx_ring[i].type == DEFAULT_Q ? + "DEFAULT_Q" : + qdev->rx_ring[i].type == TX_Q ? + "TX_Q" : + qdev->rx_ring[i].type == RX_Q ? + "RX_Q" : "", + intr_context->name); + } + } else { + netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, + "trying msi or legacy interrupts.\n"); + netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, + "%s: irq = %d.\n", __func__, pdev->irq); + netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, + "%s: context->name = %s.\n", __func__, + intr_context->name); + netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, + "%s: dev_id = 0x%p.\n", __func__, + &qdev->rx_ring[0]); + status = + request_irq(pdev->irq, qlge_isr, + test_bit(QL_MSI_ENABLED, + &qdev-> + flags) ? 0 : IRQF_SHARED, + intr_context->name, &qdev->rx_ring[0]); + if (status) + goto err_irq; + + netif_err(qdev, ifup, qdev->ndev, + "Hooked intr %d, queue type %s, with name %s.\n", + i, + qdev->rx_ring[0].type == DEFAULT_Q ? + "DEFAULT_Q" : + qdev->rx_ring[0].type == TX_Q ? "TX_Q" : + qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "", + intr_context->name); + } + intr_context->hooked = 1; + } + return status; +err_irq: + netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n"); + ql_free_irq(qdev); + return status; +} + +static int ql_start_rss(struct ql_adapter *qdev) +{ + static const u8 init_hash_seed[] = { + 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, + 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, + 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4, + 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c, + 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa + }; + struct ricb *ricb = &qdev->ricb; + int status = 0; + int i; + u8 *hash_id = (u8 *) ricb->hash_cq_id; + + memset((void *)ricb, 0, sizeof(*ricb)); + + ricb->base_cq = RSS_L4K; + ricb->flags = + (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6); + ricb->mask = cpu_to_le16((u16)(0x3ff)); + + /* + * Fill out the Indirection Table. + */ + for (i = 0; i < 1024; i++) + hash_id[i] = (i & (qdev->rss_ring_count - 1)); + + memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40); + memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16); + + netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n"); + + status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0); + if (status) { + netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n"); + return status; + } + netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, + "Successfully loaded RICB.\n"); + return status; +} + +static int ql_clear_routing_entries(struct ql_adapter *qdev) +{ + int i, status = 0; + + status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); + if (status) + return status; + /* Clear all the entries in the routing table. */ + for (i = 0; i < 16; i++) { + status = ql_set_routing_reg(qdev, i, 0, 0); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Failed to init routing register for CAM packets.\n"); + break; + } + } + ql_sem_unlock(qdev, SEM_RT_IDX_MASK); + return status; +} + +/* Initialize the frame-to-queue routing. */ +static int ql_route_initialize(struct ql_adapter *qdev) +{ + int status = 0; + + /* Clear all the entries in the routing table. */ + status = ql_clear_routing_entries(qdev); + if (status) + return status; + + status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); + if (status) + return status; + + status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT, + RT_IDX_IP_CSUM_ERR, 1); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Failed to init routing register " + "for IP CSUM error packets.\n"); + goto exit; + } + status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT, + RT_IDX_TU_CSUM_ERR, 1); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Failed to init routing register " + "for TCP/UDP CSUM error packets.\n"); + goto exit; + } + status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Failed to init routing register for broadcast packets.\n"); + goto exit; + } + /* If we have more than one inbound queue, then turn on RSS in the + * routing block. + */ + if (qdev->rss_ring_count > 1) { + status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT, + RT_IDX_RSS_MATCH, 1); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Failed to init routing register for MATCH RSS packets.\n"); + goto exit; + } + } + + status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT, + RT_IDX_CAM_HIT, 1); + if (status) + netif_err(qdev, ifup, qdev->ndev, + "Failed to init routing register for CAM packets.\n"); +exit: + ql_sem_unlock(qdev, SEM_RT_IDX_MASK); + return status; +} + +int ql_cam_route_initialize(struct ql_adapter *qdev) +{ + int status, set; + + /* If check if the link is up and use to + * determine if we are setting or clearing + * the MAC address in the CAM. + */ + set = ql_read32(qdev, STS); + set &= qdev->port_link_up; + status = ql_set_mac_addr(qdev, set); + if (status) { + netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n"); + return status; + } + + status = ql_route_initialize(qdev); + if (status) + netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n"); + + return status; +} + +static int ql_adapter_initialize(struct ql_adapter *qdev) +{ + u32 value, mask; + int i; + int status = 0; + + /* + * Set up the System register to halt on errors. + */ + value = SYS_EFE | SYS_FAE; + mask = value << 16; + ql_write32(qdev, SYS, mask | value); + + /* Set the default queue, and VLAN behavior. */ + value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV; + mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16); + ql_write32(qdev, NIC_RCV_CFG, (mask | value)); + + /* Set the MPI interrupt to enabled. */ + ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); + + /* Enable the function, set pagesize, enable error checking. */ + value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND | + FSC_EC | FSC_VM_PAGE_4K; + value |= SPLT_SETTING; + + /* Set/clear header splitting. */ + mask = FSC_VM_PAGESIZE_MASK | + FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16); + ql_write32(qdev, FSC, mask | value); + + ql_write32(qdev, SPLT_HDR, SPLT_LEN); + + /* Set RX packet routing to use port/pci function on which the + * packet arrived on in addition to usual frame routing. + * This is helpful on bonding where both interfaces can have + * the same MAC address. + */ + ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ); + /* Reroute all packets to our Interface. + * They may have been routed to MPI firmware + * due to WOL. + */ + value = ql_read32(qdev, MGMT_RCV_CFG); + value &= ~MGMT_RCV_CFG_RM; + mask = 0xffff0000; + + /* Sticky reg needs clearing due to WOL. */ + ql_write32(qdev, MGMT_RCV_CFG, mask); + ql_write32(qdev, MGMT_RCV_CFG, mask | value); + + /* Default WOL is enable on Mezz cards */ + if (qdev->pdev->subsystem_device == 0x0068 || + qdev->pdev->subsystem_device == 0x0180) + qdev->wol = WAKE_MAGIC; + + /* Start up the rx queues. */ + for (i = 0; i < qdev->rx_ring_count; i++) { + status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Failed to start rx ring[%d].\n", i); + return status; + } + } + + /* If there is more than one inbound completion queue + * then download a RICB to configure RSS. + */ + if (qdev->rss_ring_count > 1) { + status = ql_start_rss(qdev); + if (status) { + netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n"); + return status; + } + } + + /* Start up the tx queues. */ + for (i = 0; i < qdev->tx_ring_count; i++) { + status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Failed to start tx ring[%d].\n", i); + return status; + } + } + + /* Initialize the port and set the max framesize. */ + status = qdev->nic_ops->port_initialize(qdev); + if (status) + netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n"); + + /* Set up the MAC address and frame routing filter. */ + status = ql_cam_route_initialize(qdev); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Failed to init CAM/Routing tables.\n"); + return status; + } + + /* Start NAPI for the RSS queues. */ + for (i = 0; i < qdev->rss_ring_count; i++) { + netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, + "Enabling NAPI for rx_ring[%d].\n", i); + napi_enable(&qdev->rx_ring[i].napi); + } + + return status; +} + +/* Issue soft reset to chip. */ +static int ql_adapter_reset(struct ql_adapter *qdev) +{ + u32 value; + int status = 0; + unsigned long end_jiffies; + + /* Clear all the entries in the routing table. */ + status = ql_clear_routing_entries(qdev); + if (status) { + netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n"); + return status; + } + + end_jiffies = jiffies + + max((unsigned long)1, usecs_to_jiffies(30)); + + /* Check if bit is set then skip the mailbox command and + * clear the bit, else we are in normal reset process. + */ + if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) { + /* Stop management traffic. */ + ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP); + + /* Wait for the NIC and MGMNT FIFOs to empty. */ + ql_wait_fifo_empty(qdev); + } else + clear_bit(QL_ASIC_RECOVERY, &qdev->flags); + + ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR); + + do { + value = ql_read32(qdev, RST_FO); + if ((value & RST_FO_FR) == 0) + break; + cpu_relax(); + } while (time_before(jiffies, end_jiffies)); + + if (value & RST_FO_FR) { + netif_err(qdev, ifdown, qdev->ndev, + "ETIMEDOUT!!! errored out of resetting the chip!\n"); + status = -ETIMEDOUT; + } + + /* Resume management traffic. */ + ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME); + return status; +} + +static void ql_display_dev_info(struct net_device *ndev) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + + netif_info(qdev, probe, qdev->ndev, + "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, " + "XG Roll = %d, XG Rev = %d.\n", + qdev->func, + qdev->port, + qdev->chip_rev_id & 0x0000000f, + qdev->chip_rev_id >> 4 & 0x0000000f, + qdev->chip_rev_id >> 8 & 0x0000000f, + qdev->chip_rev_id >> 12 & 0x0000000f); + netif_info(qdev, probe, qdev->ndev, + "MAC address %pM\n", ndev->dev_addr); +} + +static int ql_wol(struct ql_adapter *qdev) +{ + int status = 0; + u32 wol = MB_WOL_DISABLE; + + /* The CAM is still intact after a reset, but if we + * are doing WOL, then we may need to program the + * routing regs. We would also need to issue the mailbox + * commands to instruct the MPI what to do per the ethtool + * settings. + */ + + if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST | + WAKE_MCAST | WAKE_BCAST)) { + netif_err(qdev, ifdown, qdev->ndev, + "Unsupported WOL paramter. qdev->wol = 0x%x.\n", + qdev->wol); + return -EINVAL; + } + + if (qdev->wol & WAKE_MAGIC) { + status = ql_mb_wol_set_magic(qdev, 1); + if (status) { + netif_err(qdev, ifdown, qdev->ndev, + "Failed to set magic packet on %s.\n", + qdev->ndev->name); + return status; + } else + netif_info(qdev, drv, qdev->ndev, + "Enabled magic packet successfully on %s.\n", + qdev->ndev->name); + + wol |= MB_WOL_MAGIC_PKT; + } + + if (qdev->wol) { + wol |= MB_WOL_MODE_ON; + status = ql_mb_wol_mode(qdev, wol); + netif_err(qdev, drv, qdev->ndev, + "WOL %s (wol code 0x%x) on %s\n", + (status == 0) ? "Successfully set" : "Failed", + wol, qdev->ndev->name); + } + + return status; +} + +static void ql_cancel_all_work_sync(struct ql_adapter *qdev) +{ + + /* Don't kill the reset worker thread if we + * are in the process of recovery. + */ + if (test_bit(QL_ADAPTER_UP, &qdev->flags)) + cancel_delayed_work_sync(&qdev->asic_reset_work); + cancel_delayed_work_sync(&qdev->mpi_reset_work); + cancel_delayed_work_sync(&qdev->mpi_work); + cancel_delayed_work_sync(&qdev->mpi_idc_work); + cancel_delayed_work_sync(&qdev->mpi_core_to_log); + cancel_delayed_work_sync(&qdev->mpi_port_cfg_work); +} + +static int ql_adapter_down(struct ql_adapter *qdev) +{ + int i, status = 0; + + ql_link_off(qdev); + + ql_cancel_all_work_sync(qdev); + + for (i = 0; i < qdev->rss_ring_count; i++) + napi_disable(&qdev->rx_ring[i].napi); + + clear_bit(QL_ADAPTER_UP, &qdev->flags); + + ql_disable_interrupts(qdev); + + ql_tx_ring_clean(qdev); + + /* Call netif_napi_del() from common point. + */ + for (i = 0; i < qdev->rss_ring_count; i++) + netif_napi_del(&qdev->rx_ring[i].napi); + + status = ql_adapter_reset(qdev); + if (status) + netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n", + qdev->func); + ql_free_rx_buffers(qdev); + + return status; +} + +static int ql_adapter_up(struct ql_adapter *qdev) +{ + int err = 0; + + err = ql_adapter_initialize(qdev); + if (err) { + netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n"); + goto err_init; + } + set_bit(QL_ADAPTER_UP, &qdev->flags); + ql_alloc_rx_buffers(qdev); + /* If the port is initialized and the + * link is up the turn on the carrier. + */ + if ((ql_read32(qdev, STS) & qdev->port_init) && + (ql_read32(qdev, STS) & qdev->port_link_up)) + ql_link_on(qdev); + /* Restore rx mode. */ + clear_bit(QL_ALLMULTI, &qdev->flags); + clear_bit(QL_PROMISCUOUS, &qdev->flags); + qlge_set_multicast_list(qdev->ndev); + + /* Restore vlan setting. */ + qlge_restore_vlan(qdev); + + ql_enable_interrupts(qdev); + ql_enable_all_completion_interrupts(qdev); + netif_tx_start_all_queues(qdev->ndev); + + return 0; +err_init: + ql_adapter_reset(qdev); + return err; +} + +static void ql_release_adapter_resources(struct ql_adapter *qdev) +{ + ql_free_mem_resources(qdev); + ql_free_irq(qdev); +} + +static int ql_get_adapter_resources(struct ql_adapter *qdev) +{ + int status = 0; + + if (ql_alloc_mem_resources(qdev)) { + netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n"); + return -ENOMEM; + } + status = ql_request_irq(qdev); + return status; +} + +static int qlge_close(struct net_device *ndev) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + + /* If we hit pci_channel_io_perm_failure + * failure condition, then we already + * brought the adapter down. + */ + if (test_bit(QL_EEH_FATAL, &qdev->flags)) { + netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n"); + clear_bit(QL_EEH_FATAL, &qdev->flags); + return 0; + } + + /* + * Wait for device to recover from a reset. + * (Rarely happens, but possible.) + */ + while (!test_bit(QL_ADAPTER_UP, &qdev->flags)) + msleep(1); + ql_adapter_down(qdev); + ql_release_adapter_resources(qdev); + return 0; +} + +static int ql_configure_rings(struct ql_adapter *qdev) +{ + int i; + struct rx_ring *rx_ring; + struct tx_ring *tx_ring; + int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus()); + unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ? + LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE; + + qdev->lbq_buf_order = get_order(lbq_buf_len); + + /* In a perfect world we have one RSS ring for each CPU + * and each has it's own vector. To do that we ask for + * cpu_cnt vectors. ql_enable_msix() will adjust the + * vector count to what we actually get. We then + * allocate an RSS ring for each. + * Essentially, we are doing min(cpu_count, msix_vector_count). + */ + qdev->intr_count = cpu_cnt; + ql_enable_msix(qdev); + /* Adjust the RSS ring count to the actual vector count. */ + qdev->rss_ring_count = qdev->intr_count; + qdev->tx_ring_count = cpu_cnt; + qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count; + + for (i = 0; i < qdev->tx_ring_count; i++) { + tx_ring = &qdev->tx_ring[i]; + memset((void *)tx_ring, 0, sizeof(*tx_ring)); + tx_ring->qdev = qdev; + tx_ring->wq_id = i; + tx_ring->wq_len = qdev->tx_ring_size; + tx_ring->wq_size = + tx_ring->wq_len * sizeof(struct ob_mac_iocb_req); + + /* + * The completion queue ID for the tx rings start + * immediately after the rss rings. + */ + tx_ring->cq_id = qdev->rss_ring_count + i; + } + + for (i = 0; i < qdev->rx_ring_count; i++) { + rx_ring = &qdev->rx_ring[i]; + memset((void *)rx_ring, 0, sizeof(*rx_ring)); + rx_ring->qdev = qdev; + rx_ring->cq_id = i; + rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */ + if (i < qdev->rss_ring_count) { + /* + * Inbound (RSS) queues. + */ + rx_ring->cq_len = qdev->rx_ring_size; + rx_ring->cq_size = + rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb); + rx_ring->lbq_len = NUM_LARGE_BUFFERS; + rx_ring->lbq_size = + rx_ring->lbq_len * sizeof(__le64); + rx_ring->lbq_buf_size = (u16)lbq_buf_len; + netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, + "lbq_buf_size %d, order = %d\n", + rx_ring->lbq_buf_size, + qdev->lbq_buf_order); + rx_ring->sbq_len = NUM_SMALL_BUFFERS; + rx_ring->sbq_size = + rx_ring->sbq_len * sizeof(__le64); + rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE; + rx_ring->type = RX_Q; + } else { + /* + * Outbound queue handles outbound completions only. + */ + /* outbound cq is same size as tx_ring it services. */ + rx_ring->cq_len = qdev->tx_ring_size; + rx_ring->cq_size = + rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb); + rx_ring->lbq_len = 0; + rx_ring->lbq_size = 0; + rx_ring->lbq_buf_size = 0; + rx_ring->sbq_len = 0; + rx_ring->sbq_size = 0; + rx_ring->sbq_buf_size = 0; + rx_ring->type = TX_Q; + } + } + return 0; +} + +static int qlge_open(struct net_device *ndev) +{ + int err = 0; + struct ql_adapter *qdev = netdev_priv(ndev); + + err = ql_adapter_reset(qdev); + if (err) + return err; + + err = ql_configure_rings(qdev); + if (err) + return err; + + err = ql_get_adapter_resources(qdev); + if (err) + goto error_up; + + err = ql_adapter_up(qdev); + if (err) + goto error_up; + + return err; + +error_up: + ql_release_adapter_resources(qdev); + return err; +} + +static int ql_change_rx_buffers(struct ql_adapter *qdev) +{ + struct rx_ring *rx_ring; + int i, status; + u32 lbq_buf_len; + + /* Wait for an outstanding reset to complete. */ + if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) { + int i = 3; + while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) { + netif_err(qdev, ifup, qdev->ndev, + "Waiting for adapter UP...\n"); + ssleep(1); + } + + if (!i) { + netif_err(qdev, ifup, qdev->ndev, + "Timed out waiting for adapter UP\n"); + return -ETIMEDOUT; + } + } + + status = ql_adapter_down(qdev); + if (status) + goto error; + + /* Get the new rx buffer size. */ + lbq_buf_len = (qdev->ndev->mtu > 1500) ? + LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE; + qdev->lbq_buf_order = get_order(lbq_buf_len); + + for (i = 0; i < qdev->rss_ring_count; i++) { + rx_ring = &qdev->rx_ring[i]; + /* Set the new size. */ + rx_ring->lbq_buf_size = lbq_buf_len; + } + + status = ql_adapter_up(qdev); + if (status) + goto error; + + return status; +error: + netif_alert(qdev, ifup, qdev->ndev, + "Driver up/down cycle failed, closing device.\n"); + set_bit(QL_ADAPTER_UP, &qdev->flags); + dev_close(qdev->ndev); + return status; +} + +static int qlge_change_mtu(struct net_device *ndev, int new_mtu) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + int status; + + if (ndev->mtu == 1500 && new_mtu == 9000) { + netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n"); + } else if (ndev->mtu == 9000 && new_mtu == 1500) { + netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n"); + } else + return -EINVAL; + + queue_delayed_work(qdev->workqueue, + &qdev->mpi_port_cfg_work, 3*HZ); + + ndev->mtu = new_mtu; + + if (!netif_running(qdev->ndev)) { + return 0; + } + + status = ql_change_rx_buffers(qdev); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Changing MTU failed.\n"); + } + + return status; +} + +static struct net_device_stats *qlge_get_stats(struct net_device + *ndev) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + struct rx_ring *rx_ring = &qdev->rx_ring[0]; + struct tx_ring *tx_ring = &qdev->tx_ring[0]; + unsigned long pkts, mcast, dropped, errors, bytes; + int i; + + /* Get RX stats. */ + pkts = mcast = dropped = errors = bytes = 0; + for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) { + pkts += rx_ring->rx_packets; + bytes += rx_ring->rx_bytes; + dropped += rx_ring->rx_dropped; + errors += rx_ring->rx_errors; + mcast += rx_ring->rx_multicast; + } + ndev->stats.rx_packets = pkts; + ndev->stats.rx_bytes = bytes; + ndev->stats.rx_dropped = dropped; + ndev->stats.rx_errors = errors; + ndev->stats.multicast = mcast; + + /* Get TX stats. */ + pkts = errors = bytes = 0; + for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) { + pkts += tx_ring->tx_packets; + bytes += tx_ring->tx_bytes; + errors += tx_ring->tx_errors; + } + ndev->stats.tx_packets = pkts; + ndev->stats.tx_bytes = bytes; + ndev->stats.tx_errors = errors; + return &ndev->stats; +} + +static void qlge_set_multicast_list(struct net_device *ndev) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + struct netdev_hw_addr *ha; + int i, status; + + status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); + if (status) + return; + /* + * Set or clear promiscuous mode if a + * transition is taking place. + */ + if (ndev->flags & IFF_PROMISC) { + if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) { + if (ql_set_routing_reg + (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) { + netif_err(qdev, hw, qdev->ndev, + "Failed to set promiscuous mode.\n"); + } else { + set_bit(QL_PROMISCUOUS, &qdev->flags); + } + } + } else { + if (test_bit(QL_PROMISCUOUS, &qdev->flags)) { + if (ql_set_routing_reg + (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) { + netif_err(qdev, hw, qdev->ndev, + "Failed to clear promiscuous mode.\n"); + } else { + clear_bit(QL_PROMISCUOUS, &qdev->flags); + } + } + } + + /* + * Set or clear all multicast mode if a + * transition is taking place. + */ + if ((ndev->flags & IFF_ALLMULTI) || + (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) { + if (!test_bit(QL_ALLMULTI, &qdev->flags)) { + if (ql_set_routing_reg + (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) { + netif_err(qdev, hw, qdev->ndev, + "Failed to set all-multi mode.\n"); + } else { + set_bit(QL_ALLMULTI, &qdev->flags); + } + } + } else { + if (test_bit(QL_ALLMULTI, &qdev->flags)) { + if (ql_set_routing_reg + (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) { + netif_err(qdev, hw, qdev->ndev, + "Failed to clear all-multi mode.\n"); + } else { + clear_bit(QL_ALLMULTI, &qdev->flags); + } + } + } + + if (!netdev_mc_empty(ndev)) { + status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); + if (status) + goto exit; + i = 0; + netdev_for_each_mc_addr(ha, ndev) { + if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr, + MAC_ADDR_TYPE_MULTI_MAC, i)) { + netif_err(qdev, hw, qdev->ndev, + "Failed to loadmulticast address.\n"); + ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); + goto exit; + } + i++; + } + ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); + if (ql_set_routing_reg + (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) { + netif_err(qdev, hw, qdev->ndev, + "Failed to set multicast match mode.\n"); + } else { + set_bit(QL_ALLMULTI, &qdev->flags); + } + } +exit: + ql_sem_unlock(qdev, SEM_RT_IDX_MASK); +} + +static int qlge_set_mac_address(struct net_device *ndev, void *p) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + struct sockaddr *addr = p; + int status; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); + /* Update local copy of current mac address. */ + memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len); + + status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); + if (status) + return status; + status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr, + MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ); + if (status) + netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n"); + ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); + return status; +} + +static void qlge_tx_timeout(struct net_device *ndev) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + ql_queue_asic_error(qdev); +} + +static void ql_asic_reset_work(struct work_struct *work) +{ + struct ql_adapter *qdev = + container_of(work, struct ql_adapter, asic_reset_work.work); + int status; + rtnl_lock(); + status = ql_adapter_down(qdev); + if (status) + goto error; + + status = ql_adapter_up(qdev); + if (status) + goto error; + + /* Restore rx mode. */ + clear_bit(QL_ALLMULTI, &qdev->flags); + clear_bit(QL_PROMISCUOUS, &qdev->flags); + qlge_set_multicast_list(qdev->ndev); + + rtnl_unlock(); + return; +error: + netif_alert(qdev, ifup, qdev->ndev, + "Driver up/down cycle failed, closing device\n"); + + set_bit(QL_ADAPTER_UP, &qdev->flags); + dev_close(qdev->ndev); + rtnl_unlock(); +} + +static const struct nic_operations qla8012_nic_ops = { + .get_flash = ql_get_8012_flash_params, + .port_initialize = ql_8012_port_initialize, +}; + +static const struct nic_operations qla8000_nic_ops = { + .get_flash = ql_get_8000_flash_params, + .port_initialize = ql_8000_port_initialize, +}; + +/* Find the pcie function number for the other NIC + * on this chip. Since both NIC functions share a + * common firmware we have the lowest enabled function + * do any common work. Examples would be resetting + * after a fatal firmware error, or doing a firmware + * coredump. + */ +static int ql_get_alt_pcie_func(struct ql_adapter *qdev) +{ + int status = 0; + u32 temp; + u32 nic_func1, nic_func2; + + status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG, + &temp); + if (status) + return status; + + nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) & + MPI_TEST_NIC_FUNC_MASK); + nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) & + MPI_TEST_NIC_FUNC_MASK); + + if (qdev->func == nic_func1) + qdev->alt_func = nic_func2; + else if (qdev->func == nic_func2) + qdev->alt_func = nic_func1; + else + status = -EIO; + + return status; +} + +static int ql_get_board_info(struct ql_adapter *qdev) +{ + int status; + qdev->func = + (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT; + if (qdev->func > 3) + return -EIO; + + status = ql_get_alt_pcie_func(qdev); + if (status) + return status; + + qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1; + if (qdev->port) { + qdev->xg_sem_mask = SEM_XGMAC1_MASK; + qdev->port_link_up = STS_PL1; + qdev->port_init = STS_PI1; + qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI; + qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO; + } else { + qdev->xg_sem_mask = SEM_XGMAC0_MASK; + qdev->port_link_up = STS_PL0; + qdev->port_init = STS_PI0; + qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI; + qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO; + } + qdev->chip_rev_id = ql_read32(qdev, REV_ID); + qdev->device_id = qdev->pdev->device; + if (qdev->device_id == QLGE_DEVICE_ID_8012) + qdev->nic_ops = &qla8012_nic_ops; + else if (qdev->device_id == QLGE_DEVICE_ID_8000) + qdev->nic_ops = &qla8000_nic_ops; + return status; +} + +static void ql_release_all(struct pci_dev *pdev) +{ + struct net_device *ndev = pci_get_drvdata(pdev); + struct ql_adapter *qdev = netdev_priv(ndev); + + if (qdev->workqueue) { + destroy_workqueue(qdev->workqueue); + qdev->workqueue = NULL; + } + + if (qdev->reg_base) + iounmap(qdev->reg_base); + if (qdev->doorbell_area) + iounmap(qdev->doorbell_area); + vfree(qdev->mpi_coredump); + pci_release_regions(pdev); + pci_set_drvdata(pdev, NULL); +} + +static int __devinit ql_init_device(struct pci_dev *pdev, + struct net_device *ndev, int cards_found) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + int err = 0; + + memset((void *)qdev, 0, sizeof(*qdev)); + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "PCI device enable failed.\n"); + return err; + } + + qdev->ndev = ndev; + qdev->pdev = pdev; + pci_set_drvdata(pdev, ndev); + + /* Set PCIe read request size */ + err = pcie_set_readrq(pdev, 4096); + if (err) { + dev_err(&pdev->dev, "Set readrq failed.\n"); + goto err_out1; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + dev_err(&pdev->dev, "PCI region request failed.\n"); + return err; + } + + pci_set_master(pdev); + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + set_bit(QL_DMA64, &qdev->flags); + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + } else { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (!err) + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + } + + if (err) { + dev_err(&pdev->dev, "No usable DMA configuration.\n"); + goto err_out2; + } + + /* Set PCIe reset type for EEH to fundamental. */ + pdev->needs_freset = 1; + pci_save_state(pdev); + qdev->reg_base = + ioremap_nocache(pci_resource_start(pdev, 1), + pci_resource_len(pdev, 1)); + if (!qdev->reg_base) { + dev_err(&pdev->dev, "Register mapping failed.\n"); + err = -ENOMEM; + goto err_out2; + } + + qdev->doorbell_area_size = pci_resource_len(pdev, 3); + qdev->doorbell_area = + ioremap_nocache(pci_resource_start(pdev, 3), + pci_resource_len(pdev, 3)); + if (!qdev->doorbell_area) { + dev_err(&pdev->dev, "Doorbell register mapping failed.\n"); + err = -ENOMEM; + goto err_out2; + } + + err = ql_get_board_info(qdev); + if (err) { + dev_err(&pdev->dev, "Register access failed.\n"); + err = -EIO; + goto err_out2; + } + qdev->msg_enable = netif_msg_init(debug, default_msg); + spin_lock_init(&qdev->hw_lock); + spin_lock_init(&qdev->stats_lock); + + if (qlge_mpi_coredump) { + qdev->mpi_coredump = + vmalloc(sizeof(struct ql_mpi_coredump)); + if (qdev->mpi_coredump == NULL) { + dev_err(&pdev->dev, "Coredump alloc failed.\n"); + err = -ENOMEM; + goto err_out2; + } + if (qlge_force_coredump) + set_bit(QL_FRC_COREDUMP, &qdev->flags); + } + /* make sure the EEPROM is good */ + err = qdev->nic_ops->get_flash(qdev); + if (err) { + dev_err(&pdev->dev, "Invalid FLASH.\n"); + goto err_out2; + } + + memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); + /* Keep local copy of current mac address. */ + memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len); + + /* Set up the default ring sizes. */ + qdev->tx_ring_size = NUM_TX_RING_ENTRIES; + qdev->rx_ring_size = NUM_RX_RING_ENTRIES; + + /* Set up the coalescing parameters. */ + qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT; + qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT; + qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT; + qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT; + + /* + * Set up the operating parameters. + */ + qdev->workqueue = create_singlethread_workqueue(ndev->name); + INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work); + INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work); + INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work); + INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work); + INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work); + INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log); + init_completion(&qdev->ide_completion); + mutex_init(&qdev->mpi_mutex); + + if (!cards_found) { + dev_info(&pdev->dev, "%s\n", DRV_STRING); + dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n", + DRV_NAME, DRV_VERSION); + } + return 0; +err_out2: + ql_release_all(pdev); +err_out1: + pci_disable_device(pdev); + return err; +} + +static const struct net_device_ops qlge_netdev_ops = { + .ndo_open = qlge_open, + .ndo_stop = qlge_close, + .ndo_start_xmit = qlge_send, + .ndo_change_mtu = qlge_change_mtu, + .ndo_get_stats = qlge_get_stats, + .ndo_set_multicast_list = qlge_set_multicast_list, + .ndo_set_mac_address = qlge_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_tx_timeout = qlge_tx_timeout, + .ndo_fix_features = qlge_fix_features, + .ndo_set_features = qlge_set_features, + .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid, +}; + +static void ql_timer(unsigned long data) +{ + struct ql_adapter *qdev = (struct ql_adapter *)data; + u32 var = 0; + + var = ql_read32(qdev, STS); + if (pci_channel_offline(qdev->pdev)) { + netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var); + return; + } + + mod_timer(&qdev->timer, jiffies + (5*HZ)); +} + +static int __devinit qlge_probe(struct pci_dev *pdev, + const struct pci_device_id *pci_entry) +{ + struct net_device *ndev = NULL; + struct ql_adapter *qdev = NULL; + static int cards_found = 0; + int err = 0; + + ndev = alloc_etherdev_mq(sizeof(struct ql_adapter), + min(MAX_CPUS, (int)num_online_cpus())); + if (!ndev) + return -ENOMEM; + + err = ql_init_device(pdev, ndev, cards_found); + if (err < 0) { + free_netdev(ndev); + return err; + } + + qdev = netdev_priv(ndev); + SET_NETDEV_DEV(ndev, &pdev->dev); + ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | + NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN | + NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM; + ndev->features = ndev->hw_features | + NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; + + if (test_bit(QL_DMA64, &qdev->flags)) + ndev->features |= NETIF_F_HIGHDMA; + + /* + * Set up net_device structure. + */ + ndev->tx_queue_len = qdev->tx_ring_size; + ndev->irq = pdev->irq; + + ndev->netdev_ops = &qlge_netdev_ops; + SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops); + ndev->watchdog_timeo = 10 * HZ; + + err = register_netdev(ndev); + if (err) { + dev_err(&pdev->dev, "net device registration failed.\n"); + ql_release_all(pdev); + pci_disable_device(pdev); + return err; + } + /* Start up the timer to trigger EEH if + * the bus goes dead + */ + init_timer_deferrable(&qdev->timer); + qdev->timer.data = (unsigned long)qdev; + qdev->timer.function = ql_timer; + qdev->timer.expires = jiffies + (5*HZ); + add_timer(&qdev->timer); + ql_link_off(qdev); + ql_display_dev_info(ndev); + atomic_set(&qdev->lb_count, 0); + cards_found++; + return 0; +} + +netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev) +{ + return qlge_send(skb, ndev); +} + +int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget) +{ + return ql_clean_inbound_rx_ring(rx_ring, budget); +} + +static void __devexit qlge_remove(struct pci_dev *pdev) +{ + struct net_device *ndev = pci_get_drvdata(pdev); + struct ql_adapter *qdev = netdev_priv(ndev); + del_timer_sync(&qdev->timer); + ql_cancel_all_work_sync(qdev); + unregister_netdev(ndev); + ql_release_all(pdev); + pci_disable_device(pdev); + free_netdev(ndev); +} + +/* Clean up resources without touching hardware. */ +static void ql_eeh_close(struct net_device *ndev) +{ + int i; + struct ql_adapter *qdev = netdev_priv(ndev); + + if (netif_carrier_ok(ndev)) { + netif_carrier_off(ndev); + netif_stop_queue(ndev); + } + + /* Disabling the timer */ + del_timer_sync(&qdev->timer); + ql_cancel_all_work_sync(qdev); + + for (i = 0; i < qdev->rss_ring_count; i++) + netif_napi_del(&qdev->rx_ring[i].napi); + + clear_bit(QL_ADAPTER_UP, &qdev->flags); + ql_tx_ring_clean(qdev); + ql_free_rx_buffers(qdev); + ql_release_adapter_resources(qdev); +} + +/* + * This callback is called by the PCI subsystem whenever + * a PCI bus error is detected. + */ +static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev, + enum pci_channel_state state) +{ + struct net_device *ndev = pci_get_drvdata(pdev); + struct ql_adapter *qdev = netdev_priv(ndev); + + switch (state) { + case pci_channel_io_normal: + return PCI_ERS_RESULT_CAN_RECOVER; + case pci_channel_io_frozen: + netif_device_detach(ndev); + if (netif_running(ndev)) + ql_eeh_close(ndev); + pci_disable_device(pdev); + return PCI_ERS_RESULT_NEED_RESET; + case pci_channel_io_perm_failure: + dev_err(&pdev->dev, + "%s: pci_channel_io_perm_failure.\n", __func__); + ql_eeh_close(ndev); + set_bit(QL_EEH_FATAL, &qdev->flags); + return PCI_ERS_RESULT_DISCONNECT; + } + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/* + * This callback is called after the PCI buss has been reset. + * Basically, this tries to restart the card from scratch. + * This is a shortened version of the device probe/discovery code, + * it resembles the first-half of the () routine. + */ +static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *ndev = pci_get_drvdata(pdev); + struct ql_adapter *qdev = netdev_priv(ndev); + + pdev->error_state = pci_channel_io_normal; + + pci_restore_state(pdev); + if (pci_enable_device(pdev)) { + netif_err(qdev, ifup, qdev->ndev, + "Cannot re-enable PCI device after reset.\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + pci_set_master(pdev); + + if (ql_adapter_reset(qdev)) { + netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n"); + set_bit(QL_EEH_FATAL, &qdev->flags); + return PCI_ERS_RESULT_DISCONNECT; + } + + return PCI_ERS_RESULT_RECOVERED; +} + +static void qlge_io_resume(struct pci_dev *pdev) +{ + struct net_device *ndev = pci_get_drvdata(pdev); + struct ql_adapter *qdev = netdev_priv(ndev); + int err = 0; + + if (netif_running(ndev)) { + err = qlge_open(ndev); + if (err) { + netif_err(qdev, ifup, qdev->ndev, + "Device initialization failed after reset.\n"); + return; + } + } else { + netif_err(qdev, ifup, qdev->ndev, + "Device was not running prior to EEH.\n"); + } + mod_timer(&qdev->timer, jiffies + (5*HZ)); + netif_device_attach(ndev); +} + +static struct pci_error_handlers qlge_err_handler = { + .error_detected = qlge_io_error_detected, + .slot_reset = qlge_io_slot_reset, + .resume = qlge_io_resume, +}; + +static int qlge_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *ndev = pci_get_drvdata(pdev); + struct ql_adapter *qdev = netdev_priv(ndev); + int err; + + netif_device_detach(ndev); + del_timer_sync(&qdev->timer); + + if (netif_running(ndev)) { + err = ql_adapter_down(qdev); + if (!err) + return err; + } + + ql_wol(qdev); + err = pci_save_state(pdev); + if (err) + return err; + + pci_disable_device(pdev); + + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + + return 0; +} + +#ifdef CONFIG_PM +static int qlge_resume(struct pci_dev *pdev) +{ + struct net_device *ndev = pci_get_drvdata(pdev); + struct ql_adapter *qdev = netdev_priv(ndev); + int err; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + err = pci_enable_device(pdev); + if (err) { + netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n"); + return err; + } + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + if (netif_running(ndev)) { + err = ql_adapter_up(qdev); + if (err) + return err; + } + + mod_timer(&qdev->timer, jiffies + (5*HZ)); + netif_device_attach(ndev); + + return 0; +} +#endif /* CONFIG_PM */ + +static void qlge_shutdown(struct pci_dev *pdev) +{ + qlge_suspend(pdev, PMSG_SUSPEND); +} + +static struct pci_driver qlge_driver = { + .name = DRV_NAME, + .id_table = qlge_pci_tbl, + .probe = qlge_probe, + .remove = __devexit_p(qlge_remove), +#ifdef CONFIG_PM + .suspend = qlge_suspend, + .resume = qlge_resume, +#endif + .shutdown = qlge_shutdown, + .err_handler = &qlge_err_handler +}; + +static int __init qlge_init_module(void) +{ + return pci_register_driver(&qlge_driver); +} + +static void __exit qlge_exit(void) +{ + pci_unregister_driver(&qlge_driver); +} + +module_init(qlge_init_module); +module_exit(qlge_exit); diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c new file mode 100644 index 000000000000..ff2bf8a4e247 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c @@ -0,0 +1,1284 @@ +#include "qlge.h" + +int ql_unpause_mpi_risc(struct ql_adapter *qdev) +{ + u32 tmp; + + /* Un-pause the RISC */ + tmp = ql_read32(qdev, CSR); + if (!(tmp & CSR_RP)) + return -EIO; + + ql_write32(qdev, CSR, CSR_CMD_CLR_PAUSE); + return 0; +} + +int ql_pause_mpi_risc(struct ql_adapter *qdev) +{ + u32 tmp; + int count = UDELAY_COUNT; + + /* Pause the RISC */ + ql_write32(qdev, CSR, CSR_CMD_SET_PAUSE); + do { + tmp = ql_read32(qdev, CSR); + if (tmp & CSR_RP) + break; + mdelay(UDELAY_DELAY); + count--; + } while (count); + return (count == 0) ? -ETIMEDOUT : 0; +} + +int ql_hard_reset_mpi_risc(struct ql_adapter *qdev) +{ + u32 tmp; + int count = UDELAY_COUNT; + + /* Reset the RISC */ + ql_write32(qdev, CSR, CSR_CMD_SET_RST); + do { + tmp = ql_read32(qdev, CSR); + if (tmp & CSR_RR) { + ql_write32(qdev, CSR, CSR_CMD_CLR_RST); + break; + } + mdelay(UDELAY_DELAY); + count--; + } while (count); + return (count == 0) ? -ETIMEDOUT : 0; +} + +int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data) +{ + int status; + /* wait for reg to come ready */ + status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR); + if (status) + goto exit; + /* set up for reg read */ + ql_write32(qdev, PROC_ADDR, reg | PROC_ADDR_R); + /* wait for reg to come ready */ + status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR); + if (status) + goto exit; + /* get the data */ + *data = ql_read32(qdev, PROC_DATA); +exit: + return status; +} + +int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data) +{ + int status = 0; + /* wait for reg to come ready */ + status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR); + if (status) + goto exit; + /* write the data to the data reg */ + ql_write32(qdev, PROC_DATA, data); + /* trigger the write */ + ql_write32(qdev, PROC_ADDR, reg); + /* wait for reg to come ready */ + status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR); + if (status) + goto exit; +exit: + return status; +} + +int ql_soft_reset_mpi_risc(struct ql_adapter *qdev) +{ + int status; + status = ql_write_mpi_reg(qdev, 0x00001010, 1); + return status; +} + +/* Determine if we are in charge of the firwmare. If + * we are the lower of the 2 NIC pcie functions, or if + * we are the higher function and the lower function + * is not enabled. + */ +int ql_own_firmware(struct ql_adapter *qdev) +{ + u32 temp; + + /* If we are the lower of the 2 NIC functions + * on the chip the we are responsible for + * core dump and firmware reset after an error. + */ + if (qdev->func < qdev->alt_func) + return 1; + + /* If we are the higher of the 2 NIC functions + * on the chip and the lower function is not + * enabled, then we are responsible for + * core dump and firmware reset after an error. + */ + temp = ql_read32(qdev, STS); + if (!(temp & (1 << (8 + qdev->alt_func)))) + return 1; + + return 0; + +} + +static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp) +{ + int i, status; + + status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK); + if (status) + return -EBUSY; + for (i = 0; i < mbcp->out_count; i++) { + status = + ql_read_mpi_reg(qdev, qdev->mailbox_out + i, + &mbcp->mbox_out[i]); + if (status) { + netif_err(qdev, drv, qdev->ndev, "Failed mailbox read.\n"); + break; + } + } + ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */ + return status; +} + +/* Wait for a single mailbox command to complete. + * Returns zero on success. + */ +static int ql_wait_mbx_cmd_cmplt(struct ql_adapter *qdev) +{ + int count = 100; + u32 value; + + do { + value = ql_read32(qdev, STS); + if (value & STS_PI) + return 0; + mdelay(UDELAY_DELAY); /* 100ms */ + } while (--count); + return -ETIMEDOUT; +} + +/* Execute a single mailbox command. + * Caller must hold PROC_ADDR semaphore. + */ +static int ql_exec_mb_cmd(struct ql_adapter *qdev, struct mbox_params *mbcp) +{ + int i, status; + + /* + * Make sure there's nothing pending. + * This shouldn't happen. + */ + if (ql_read32(qdev, CSR) & CSR_HRI) + return -EIO; + + status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK); + if (status) + return status; + + /* + * Fill the outbound mailboxes. + */ + for (i = 0; i < mbcp->in_count; i++) { + status = ql_write_mpi_reg(qdev, qdev->mailbox_in + i, + mbcp->mbox_in[i]); + if (status) + goto end; + } + /* + * Wake up the MPI firmware. + */ + ql_write32(qdev, CSR, CSR_CMD_SET_H2R_INT); +end: + ql_sem_unlock(qdev, SEM_PROC_REG_MASK); + return status; +} + +/* We are being asked by firmware to accept + * a change to the port. This is only + * a change to max frame sizes (Tx/Rx), pause + * parameters, or loopback mode. We wake up a worker + * to handler processing this since a mailbox command + * will need to be sent to ACK the request. + */ +static int ql_idc_req_aen(struct ql_adapter *qdev) +{ + int status; + struct mbox_params *mbcp = &qdev->idc_mbc; + + netif_err(qdev, drv, qdev->ndev, "Enter!\n"); + /* Get the status data and start up a thread to + * handle the request. + */ + mbcp = &qdev->idc_mbc; + mbcp->out_count = 4; + status = ql_get_mb_sts(qdev, mbcp); + if (status) { + netif_err(qdev, drv, qdev->ndev, + "Could not read MPI, resetting ASIC!\n"); + ql_queue_asic_error(qdev); + } else { + /* Begin polled mode early so + * we don't get another interrupt + * when we leave mpi_worker. + */ + ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); + queue_delayed_work(qdev->workqueue, &qdev->mpi_idc_work, 0); + } + return status; +} + +/* Process an inter-device event completion. + * If good, signal the caller's completion. + */ +static int ql_idc_cmplt_aen(struct ql_adapter *qdev) +{ + int status; + struct mbox_params *mbcp = &qdev->idc_mbc; + mbcp->out_count = 4; + status = ql_get_mb_sts(qdev, mbcp); + if (status) { + netif_err(qdev, drv, qdev->ndev, + "Could not read MPI, resetting RISC!\n"); + ql_queue_fw_error(qdev); + } else + /* Wake up the sleeping mpi_idc_work thread that is + * waiting for this event. + */ + complete(&qdev->ide_completion); + + return status; +} + +static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp) +{ + int status; + mbcp->out_count = 2; + + status = ql_get_mb_sts(qdev, mbcp); + if (status) { + netif_err(qdev, drv, qdev->ndev, + "%s: Could not get mailbox status.\n", __func__); + return; + } + + qdev->link_status = mbcp->mbox_out[1]; + netif_err(qdev, drv, qdev->ndev, "Link Up.\n"); + + /* If we're coming back from an IDC event + * then set up the CAM and frame routing. + */ + if (test_bit(QL_CAM_RT_SET, &qdev->flags)) { + status = ql_cam_route_initialize(qdev); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Failed to init CAM/Routing tables.\n"); + return; + } else + clear_bit(QL_CAM_RT_SET, &qdev->flags); + } + + /* Queue up a worker to check the frame + * size information, and fix it if it's not + * to our liking. + */ + if (!test_bit(QL_PORT_CFG, &qdev->flags)) { + netif_err(qdev, drv, qdev->ndev, "Queue Port Config Worker!\n"); + set_bit(QL_PORT_CFG, &qdev->flags); + /* Begin polled mode early so + * we don't get another interrupt + * when we leave mpi_worker dpc. + */ + ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); + queue_delayed_work(qdev->workqueue, + &qdev->mpi_port_cfg_work, 0); + } + + ql_link_on(qdev); +} + +static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp) +{ + int status; + + mbcp->out_count = 3; + + status = ql_get_mb_sts(qdev, mbcp); + if (status) + netif_err(qdev, drv, qdev->ndev, "Link down AEN broken!\n"); + + ql_link_off(qdev); +} + +static int ql_sfp_in(struct ql_adapter *qdev, struct mbox_params *mbcp) +{ + int status; + + mbcp->out_count = 5; + + status = ql_get_mb_sts(qdev, mbcp); + if (status) + netif_err(qdev, drv, qdev->ndev, "SFP in AEN broken!\n"); + else + netif_err(qdev, drv, qdev->ndev, "SFP insertion detected.\n"); + + return status; +} + +static int ql_sfp_out(struct ql_adapter *qdev, struct mbox_params *mbcp) +{ + int status; + + mbcp->out_count = 1; + + status = ql_get_mb_sts(qdev, mbcp); + if (status) + netif_err(qdev, drv, qdev->ndev, "SFP out AEN broken!\n"); + else + netif_err(qdev, drv, qdev->ndev, "SFP removal detected.\n"); + + return status; +} + +static int ql_aen_lost(struct ql_adapter *qdev, struct mbox_params *mbcp) +{ + int status; + + mbcp->out_count = 6; + + status = ql_get_mb_sts(qdev, mbcp); + if (status) + netif_err(qdev, drv, qdev->ndev, "Lost AEN broken!\n"); + else { + int i; + netif_err(qdev, drv, qdev->ndev, "Lost AEN detected.\n"); + for (i = 0; i < mbcp->out_count; i++) + netif_err(qdev, drv, qdev->ndev, "mbox_out[%d] = 0x%.08x.\n", + i, mbcp->mbox_out[i]); + + } + + return status; +} + +static void ql_init_fw_done(struct ql_adapter *qdev, struct mbox_params *mbcp) +{ + int status; + + mbcp->out_count = 2; + + status = ql_get_mb_sts(qdev, mbcp); + if (status) { + netif_err(qdev, drv, qdev->ndev, "Firmware did not initialize!\n"); + } else { + netif_err(qdev, drv, qdev->ndev, "Firmware Revision = 0x%.08x.\n", + mbcp->mbox_out[1]); + qdev->fw_rev_id = mbcp->mbox_out[1]; + status = ql_cam_route_initialize(qdev); + if (status) + netif_err(qdev, ifup, qdev->ndev, + "Failed to init CAM/Routing tables.\n"); + } +} + +/* Process an async event and clear it unless it's an + * error condition. + * This can get called iteratively from the mpi_work thread + * when events arrive via an interrupt. + * It also gets called when a mailbox command is polling for + * it's completion. */ +static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp) +{ + int status; + int orig_count = mbcp->out_count; + + /* Just get mailbox zero for now. */ + mbcp->out_count = 1; + status = ql_get_mb_sts(qdev, mbcp); + if (status) { + netif_err(qdev, drv, qdev->ndev, + "Could not read MPI, resetting ASIC!\n"); + ql_queue_asic_error(qdev); + goto end; + } + + switch (mbcp->mbox_out[0]) { + + /* This case is only active when we arrive here + * as a result of issuing a mailbox command to + * the firmware. + */ + case MB_CMD_STS_INTRMDT: + case MB_CMD_STS_GOOD: + case MB_CMD_STS_INVLD_CMD: + case MB_CMD_STS_XFC_ERR: + case MB_CMD_STS_CSUM_ERR: + case MB_CMD_STS_ERR: + case MB_CMD_STS_PARAM_ERR: + /* We can only get mailbox status if we're polling from an + * unfinished command. Get the rest of the status data and + * return back to the caller. + * We only end up here when we're polling for a mailbox + * command completion. + */ + mbcp->out_count = orig_count; + status = ql_get_mb_sts(qdev, mbcp); + return status; + + /* We are being asked by firmware to accept + * a change to the port. This is only + * a change to max frame sizes (Tx/Rx), pause + * parameters, or loopback mode. + */ + case AEN_IDC_REQ: + status = ql_idc_req_aen(qdev); + break; + + /* Process and inbound IDC event. + * This will happen when we're trying to + * change tx/rx max frame size, change pause + * parameters or loopback mode. + */ + case AEN_IDC_CMPLT: + case AEN_IDC_EXT: + status = ql_idc_cmplt_aen(qdev); + break; + + case AEN_LINK_UP: + ql_link_up(qdev, mbcp); + break; + + case AEN_LINK_DOWN: + ql_link_down(qdev, mbcp); + break; + + case AEN_FW_INIT_DONE: + /* If we're in process on executing the firmware, + * then convert the status to normal mailbox status. + */ + if (mbcp->mbox_in[0] == MB_CMD_EX_FW) { + mbcp->out_count = orig_count; + status = ql_get_mb_sts(qdev, mbcp); + mbcp->mbox_out[0] = MB_CMD_STS_GOOD; + return status; + } + ql_init_fw_done(qdev, mbcp); + break; + + case AEN_AEN_SFP_IN: + ql_sfp_in(qdev, mbcp); + break; + + case AEN_AEN_SFP_OUT: + ql_sfp_out(qdev, mbcp); + break; + + /* This event can arrive at boot time or after an + * MPI reset if the firmware failed to initialize. + */ + case AEN_FW_INIT_FAIL: + /* If we're in process on executing the firmware, + * then convert the status to normal mailbox status. + */ + if (mbcp->mbox_in[0] == MB_CMD_EX_FW) { + mbcp->out_count = orig_count; + status = ql_get_mb_sts(qdev, mbcp); + mbcp->mbox_out[0] = MB_CMD_STS_ERR; + return status; + } + netif_err(qdev, drv, qdev->ndev, + "Firmware initialization failed.\n"); + status = -EIO; + ql_queue_fw_error(qdev); + break; + + case AEN_SYS_ERR: + netif_err(qdev, drv, qdev->ndev, "System Error.\n"); + ql_queue_fw_error(qdev); + status = -EIO; + break; + + case AEN_AEN_LOST: + ql_aen_lost(qdev, mbcp); + break; + + case AEN_DCBX_CHG: + /* Need to support AEN 8110 */ + break; + default: + netif_err(qdev, drv, qdev->ndev, + "Unsupported AE %.08x.\n", mbcp->mbox_out[0]); + /* Clear the MPI firmware status. */ + } +end: + ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT); + /* Restore the original mailbox count to + * what the caller asked for. This can get + * changed when a mailbox command is waiting + * for a response and an AEN arrives and + * is handled. + * */ + mbcp->out_count = orig_count; + return status; +} + +/* Execute a single mailbox command. + * mbcp is a pointer to an array of u32. Each + * element in the array contains the value for it's + * respective mailbox register. + */ +static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp) +{ + int status; + unsigned long count; + + mutex_lock(&qdev->mpi_mutex); + + /* Begin polled mode for MPI */ + ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); + + /* Load the mailbox registers and wake up MPI RISC. */ + status = ql_exec_mb_cmd(qdev, mbcp); + if (status) + goto end; + + + /* If we're generating a system error, then there's nothing + * to wait for. + */ + if (mbcp->mbox_in[0] == MB_CMD_MAKE_SYS_ERR) + goto end; + + /* Wait for the command to complete. We loop + * here because some AEN might arrive while + * we're waiting for the mailbox command to + * complete. If more than 5 seconds expire we can + * assume something is wrong. */ + count = jiffies + HZ * MAILBOX_TIMEOUT; + do { + /* Wait for the interrupt to come in. */ + status = ql_wait_mbx_cmd_cmplt(qdev); + if (status) + continue; + + /* Process the event. If it's an AEN, it + * will be handled in-line or a worker + * will be spawned. If it's our completion + * we will catch it below. + */ + status = ql_mpi_handler(qdev, mbcp); + if (status) + goto end; + + /* It's either the completion for our mailbox + * command complete or an AEN. If it's our + * completion then get out. + */ + if (((mbcp->mbox_out[0] & 0x0000f000) == + MB_CMD_STS_GOOD) || + ((mbcp->mbox_out[0] & 0x0000f000) == + MB_CMD_STS_INTRMDT)) + goto done; + } while (time_before(jiffies, count)); + + netif_err(qdev, drv, qdev->ndev, + "Timed out waiting for mailbox complete.\n"); + status = -ETIMEDOUT; + goto end; + +done: + + /* Now we can clear the interrupt condition + * and look at our status. + */ + ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT); + + if (((mbcp->mbox_out[0] & 0x0000f000) != + MB_CMD_STS_GOOD) && + ((mbcp->mbox_out[0] & 0x0000f000) != + MB_CMD_STS_INTRMDT)) { + status = -EIO; + } +end: + /* End polled mode for MPI */ + ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); + mutex_unlock(&qdev->mpi_mutex); + return status; +} + +/* Get MPI firmware version. This will be used for + * driver banner and for ethtool info. + * Returns zero on success. + */ +int ql_mb_about_fw(struct ql_adapter *qdev) +{ + struct mbox_params mbc; + struct mbox_params *mbcp = &mbc; + int status = 0; + + memset(mbcp, 0, sizeof(struct mbox_params)); + + mbcp->in_count = 1; + mbcp->out_count = 3; + + mbcp->mbox_in[0] = MB_CMD_ABOUT_FW; + + status = ql_mailbox_command(qdev, mbcp); + if (status) + return status; + + if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { + netif_err(qdev, drv, qdev->ndev, + "Failed about firmware command\n"); + status = -EIO; + } + + /* Store the firmware version */ + qdev->fw_rev_id = mbcp->mbox_out[1]; + + return status; +} + +/* Get functional state for MPI firmware. + * Returns zero on success. + */ +int ql_mb_get_fw_state(struct ql_adapter *qdev) +{ + struct mbox_params mbc; + struct mbox_params *mbcp = &mbc; + int status = 0; + + memset(mbcp, 0, sizeof(struct mbox_params)); + + mbcp->in_count = 1; + mbcp->out_count = 2; + + mbcp->mbox_in[0] = MB_CMD_GET_FW_STATE; + + status = ql_mailbox_command(qdev, mbcp); + if (status) + return status; + + if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { + netif_err(qdev, drv, qdev->ndev, + "Failed Get Firmware State.\n"); + status = -EIO; + } + + /* If bit zero is set in mbx 1 then the firmware is + * running, but not initialized. This should never + * happen. + */ + if (mbcp->mbox_out[1] & 1) { + netif_err(qdev, drv, qdev->ndev, + "Firmware waiting for initialization.\n"); + status = -EIO; + } + + return status; +} + +/* Send and ACK mailbox command to the firmware to + * let it continue with the change. + */ +static int ql_mb_idc_ack(struct ql_adapter *qdev) +{ + struct mbox_params mbc; + struct mbox_params *mbcp = &mbc; + int status = 0; + + memset(mbcp, 0, sizeof(struct mbox_params)); + + mbcp->in_count = 5; + mbcp->out_count = 1; + + mbcp->mbox_in[0] = MB_CMD_IDC_ACK; + mbcp->mbox_in[1] = qdev->idc_mbc.mbox_out[1]; + mbcp->mbox_in[2] = qdev->idc_mbc.mbox_out[2]; + mbcp->mbox_in[3] = qdev->idc_mbc.mbox_out[3]; + mbcp->mbox_in[4] = qdev->idc_mbc.mbox_out[4]; + + status = ql_mailbox_command(qdev, mbcp); + if (status) + return status; + + if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { + netif_err(qdev, drv, qdev->ndev, "Failed IDC ACK send.\n"); + status = -EIO; + } + return status; +} + +/* Get link settings and maximum frame size settings + * for the current port. + * Most likely will block. + */ +int ql_mb_set_port_cfg(struct ql_adapter *qdev) +{ + struct mbox_params mbc; + struct mbox_params *mbcp = &mbc; + int status = 0; + + memset(mbcp, 0, sizeof(struct mbox_params)); + + mbcp->in_count = 3; + mbcp->out_count = 1; + + mbcp->mbox_in[0] = MB_CMD_SET_PORT_CFG; + mbcp->mbox_in[1] = qdev->link_config; + mbcp->mbox_in[2] = qdev->max_frame_size; + + + status = ql_mailbox_command(qdev, mbcp); + if (status) + return status; + + if (mbcp->mbox_out[0] == MB_CMD_STS_INTRMDT) { + netif_err(qdev, drv, qdev->ndev, + "Port Config sent, wait for IDC.\n"); + } else if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { + netif_err(qdev, drv, qdev->ndev, + "Failed Set Port Configuration.\n"); + status = -EIO; + } + return status; +} + +static int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr, + u32 size) +{ + int status = 0; + struct mbox_params mbc; + struct mbox_params *mbcp = &mbc; + + memset(mbcp, 0, sizeof(struct mbox_params)); + + mbcp->in_count = 9; + mbcp->out_count = 1; + + mbcp->mbox_in[0] = MB_CMD_DUMP_RISC_RAM; + mbcp->mbox_in[1] = LSW(addr); + mbcp->mbox_in[2] = MSW(req_dma); + mbcp->mbox_in[3] = LSW(req_dma); + mbcp->mbox_in[4] = MSW(size); + mbcp->mbox_in[5] = LSW(size); + mbcp->mbox_in[6] = MSW(MSD(req_dma)); + mbcp->mbox_in[7] = LSW(MSD(req_dma)); + mbcp->mbox_in[8] = MSW(addr); + + + status = ql_mailbox_command(qdev, mbcp); + if (status) + return status; + + if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { + netif_err(qdev, drv, qdev->ndev, "Failed to dump risc RAM.\n"); + status = -EIO; + } + return status; +} + +/* Issue a mailbox command to dump RISC RAM. */ +int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf, + u32 ram_addr, int word_count) +{ + int status; + char *my_buf; + dma_addr_t buf_dma; + + my_buf = pci_alloc_consistent(qdev->pdev, word_count * sizeof(u32), + &buf_dma); + if (!my_buf) + return -EIO; + + status = ql_mb_dump_ram(qdev, buf_dma, ram_addr, word_count); + if (!status) + memcpy(buf, my_buf, word_count * sizeof(u32)); + + pci_free_consistent(qdev->pdev, word_count * sizeof(u32), my_buf, + buf_dma); + return status; +} + +/* Get link settings and maximum frame size settings + * for the current port. + * Most likely will block. + */ +int ql_mb_get_port_cfg(struct ql_adapter *qdev) +{ + struct mbox_params mbc; + struct mbox_params *mbcp = &mbc; + int status = 0; + + memset(mbcp, 0, sizeof(struct mbox_params)); + + mbcp->in_count = 1; + mbcp->out_count = 3; + + mbcp->mbox_in[0] = MB_CMD_GET_PORT_CFG; + + status = ql_mailbox_command(qdev, mbcp); + if (status) + return status; + + if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { + netif_err(qdev, drv, qdev->ndev, + "Failed Get Port Configuration.\n"); + status = -EIO; + } else { + netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev, + "Passed Get Port Configuration.\n"); + qdev->link_config = mbcp->mbox_out[1]; + qdev->max_frame_size = mbcp->mbox_out[2]; + } + return status; +} + +int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol) +{ + struct mbox_params mbc; + struct mbox_params *mbcp = &mbc; + int status; + + memset(mbcp, 0, sizeof(struct mbox_params)); + + mbcp->in_count = 2; + mbcp->out_count = 1; + + mbcp->mbox_in[0] = MB_CMD_SET_WOL_MODE; + mbcp->mbox_in[1] = wol; + + + status = ql_mailbox_command(qdev, mbcp); + if (status) + return status; + + if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { + netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n"); + status = -EIO; + } + return status; +} + +int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol) +{ + struct mbox_params mbc; + struct mbox_params *mbcp = &mbc; + int status; + u8 *addr = qdev->ndev->dev_addr; + + memset(mbcp, 0, sizeof(struct mbox_params)); + + mbcp->in_count = 8; + mbcp->out_count = 1; + + mbcp->mbox_in[0] = MB_CMD_SET_WOL_MAGIC; + if (enable_wol) { + mbcp->mbox_in[1] = (u32)addr[0]; + mbcp->mbox_in[2] = (u32)addr[1]; + mbcp->mbox_in[3] = (u32)addr[2]; + mbcp->mbox_in[4] = (u32)addr[3]; + mbcp->mbox_in[5] = (u32)addr[4]; + mbcp->mbox_in[6] = (u32)addr[5]; + mbcp->mbox_in[7] = 0; + } else { + mbcp->mbox_in[1] = 0; + mbcp->mbox_in[2] = 1; + mbcp->mbox_in[3] = 1; + mbcp->mbox_in[4] = 1; + mbcp->mbox_in[5] = 1; + mbcp->mbox_in[6] = 1; + mbcp->mbox_in[7] = 0; + } + + status = ql_mailbox_command(qdev, mbcp); + if (status) + return status; + + if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { + netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n"); + status = -EIO; + } + return status; +} + +/* IDC - Inter Device Communication... + * Some firmware commands require consent of adjacent FCOE + * function. This function waits for the OK, or a + * counter-request for a little more time.i + * The firmware will complete the request if the other + * function doesn't respond. + */ +static int ql_idc_wait(struct ql_adapter *qdev) +{ + int status = -ETIMEDOUT; + long wait_time = 1 * HZ; + struct mbox_params *mbcp = &qdev->idc_mbc; + do { + /* Wait here for the command to complete + * via the IDC process. + */ + wait_time = + wait_for_completion_timeout(&qdev->ide_completion, + wait_time); + if (!wait_time) { + netif_err(qdev, drv, qdev->ndev, "IDC Timeout.\n"); + break; + } + /* Now examine the response from the IDC process. + * We might have a good completion or a request for + * more wait time. + */ + if (mbcp->mbox_out[0] == AEN_IDC_EXT) { + netif_err(qdev, drv, qdev->ndev, + "IDC Time Extension from function.\n"); + wait_time += (mbcp->mbox_out[1] >> 8) & 0x0000000f; + } else if (mbcp->mbox_out[0] == AEN_IDC_CMPLT) { + netif_err(qdev, drv, qdev->ndev, "IDC Success.\n"); + status = 0; + break; + } else { + netif_err(qdev, drv, qdev->ndev, + "IDC: Invalid State 0x%.04x.\n", + mbcp->mbox_out[0]); + status = -EIO; + break; + } + } while (wait_time); + + return status; +} + +int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config) +{ + struct mbox_params mbc; + struct mbox_params *mbcp = &mbc; + int status; + + memset(mbcp, 0, sizeof(struct mbox_params)); + + mbcp->in_count = 2; + mbcp->out_count = 1; + + mbcp->mbox_in[0] = MB_CMD_SET_LED_CFG; + mbcp->mbox_in[1] = led_config; + + + status = ql_mailbox_command(qdev, mbcp); + if (status) + return status; + + if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { + netif_err(qdev, drv, qdev->ndev, + "Failed to set LED Configuration.\n"); + status = -EIO; + } + + return status; +} + +int ql_mb_get_led_cfg(struct ql_adapter *qdev) +{ + struct mbox_params mbc; + struct mbox_params *mbcp = &mbc; + int status; + + memset(mbcp, 0, sizeof(struct mbox_params)); + + mbcp->in_count = 1; + mbcp->out_count = 2; + + mbcp->mbox_in[0] = MB_CMD_GET_LED_CFG; + + status = ql_mailbox_command(qdev, mbcp); + if (status) + return status; + + if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { + netif_err(qdev, drv, qdev->ndev, + "Failed to get LED Configuration.\n"); + status = -EIO; + } else + qdev->led_config = mbcp->mbox_out[1]; + + return status; +} + +int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control) +{ + struct mbox_params mbc; + struct mbox_params *mbcp = &mbc; + int status; + + memset(mbcp, 0, sizeof(struct mbox_params)); + + mbcp->in_count = 1; + mbcp->out_count = 2; + + mbcp->mbox_in[0] = MB_CMD_SET_MGMNT_TFK_CTL; + mbcp->mbox_in[1] = control; + + status = ql_mailbox_command(qdev, mbcp); + if (status) + return status; + + if (mbcp->mbox_out[0] == MB_CMD_STS_GOOD) + return status; + + if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) { + netif_err(qdev, drv, qdev->ndev, + "Command not supported by firmware.\n"); + status = -EINVAL; + } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) { + /* This indicates that the firmware is + * already in the state we are trying to + * change it to. + */ + netif_err(qdev, drv, qdev->ndev, + "Command parameters make no change.\n"); + } + return status; +} + +/* Returns a negative error code or the mailbox command status. */ +static int ql_mb_get_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 *control) +{ + struct mbox_params mbc; + struct mbox_params *mbcp = &mbc; + int status; + + memset(mbcp, 0, sizeof(struct mbox_params)); + *control = 0; + + mbcp->in_count = 1; + mbcp->out_count = 1; + + mbcp->mbox_in[0] = MB_CMD_GET_MGMNT_TFK_CTL; + + status = ql_mailbox_command(qdev, mbcp); + if (status) + return status; + + if (mbcp->mbox_out[0] == MB_CMD_STS_GOOD) { + *control = mbcp->mbox_in[1]; + return status; + } + + if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) { + netif_err(qdev, drv, qdev->ndev, + "Command not supported by firmware.\n"); + status = -EINVAL; + } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) { + netif_err(qdev, drv, qdev->ndev, + "Failed to get MPI traffic control.\n"); + status = -EIO; + } + return status; +} + +int ql_wait_fifo_empty(struct ql_adapter *qdev) +{ + int count = 5; + u32 mgmnt_fifo_empty; + u32 nic_fifo_empty; + + do { + nic_fifo_empty = ql_read32(qdev, STS) & STS_NFE; + ql_mb_get_mgmnt_traffic_ctl(qdev, &mgmnt_fifo_empty); + mgmnt_fifo_empty &= MB_GET_MPI_TFK_FIFO_EMPTY; + if (nic_fifo_empty && mgmnt_fifo_empty) + return 0; + msleep(100); + } while (count-- > 0); + return -ETIMEDOUT; +} + +/* API called in work thread context to set new TX/RX + * maximum frame size values to match MTU. + */ +static int ql_set_port_cfg(struct ql_adapter *qdev) +{ + int status; + status = ql_mb_set_port_cfg(qdev); + if (status) + return status; + status = ql_idc_wait(qdev); + return status; +} + +/* The following routines are worker threads that process + * events that may sleep waiting for completion. + */ + +/* This thread gets the maximum TX and RX frame size values + * from the firmware and, if necessary, changes them to match + * the MTU setting. + */ +void ql_mpi_port_cfg_work(struct work_struct *work) +{ + struct ql_adapter *qdev = + container_of(work, struct ql_adapter, mpi_port_cfg_work.work); + int status; + + status = ql_mb_get_port_cfg(qdev); + if (status) { + netif_err(qdev, drv, qdev->ndev, + "Bug: Failed to get port config data.\n"); + goto err; + } + + if (qdev->link_config & CFG_JUMBO_FRAME_SIZE && + qdev->max_frame_size == + CFG_DEFAULT_MAX_FRAME_SIZE) + goto end; + + qdev->link_config |= CFG_JUMBO_FRAME_SIZE; + qdev->max_frame_size = CFG_DEFAULT_MAX_FRAME_SIZE; + status = ql_set_port_cfg(qdev); + if (status) { + netif_err(qdev, drv, qdev->ndev, + "Bug: Failed to set port config data.\n"); + goto err; + } +end: + clear_bit(QL_PORT_CFG, &qdev->flags); + return; +err: + ql_queue_fw_error(qdev); + goto end; +} + +/* Process an inter-device request. This is issues by + * the firmware in response to another function requesting + * a change to the port. We set a flag to indicate a change + * has been made and then send a mailbox command ACKing + * the change request. + */ +void ql_mpi_idc_work(struct work_struct *work) +{ + struct ql_adapter *qdev = + container_of(work, struct ql_adapter, mpi_idc_work.work); + int status; + struct mbox_params *mbcp = &qdev->idc_mbc; + u32 aen; + int timeout; + + aen = mbcp->mbox_out[1] >> 16; + timeout = (mbcp->mbox_out[1] >> 8) & 0xf; + + switch (aen) { + default: + netif_err(qdev, drv, qdev->ndev, + "Bug: Unhandled IDC action.\n"); + break; + case MB_CMD_PORT_RESET: + case MB_CMD_STOP_FW: + ql_link_off(qdev); + case MB_CMD_SET_PORT_CFG: + /* Signal the resulting link up AEN + * that the frame routing and mac addr + * needs to be set. + * */ + set_bit(QL_CAM_RT_SET, &qdev->flags); + /* Do ACK if required */ + if (timeout) { + status = ql_mb_idc_ack(qdev); + if (status) + netif_err(qdev, drv, qdev->ndev, + "Bug: No pending IDC!\n"); + } else { + netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev, + "IDC ACK not required\n"); + status = 0; /* success */ + } + break; + + /* These sub-commands issued by another (FCoE) + * function are requesting to do an operation + * on the shared resource (MPI environment). + * We currently don't issue these so we just + * ACK the request. + */ + case MB_CMD_IOP_RESTART_MPI: + case MB_CMD_IOP_PREP_LINK_DOWN: + /* Drop the link, reload the routing + * table when link comes up. + */ + ql_link_off(qdev); + set_bit(QL_CAM_RT_SET, &qdev->flags); + /* Fall through. */ + case MB_CMD_IOP_DVR_START: + case MB_CMD_IOP_FLASH_ACC: + case MB_CMD_IOP_CORE_DUMP_MPI: + case MB_CMD_IOP_PREP_UPDATE_MPI: + case MB_CMD_IOP_COMP_UPDATE_MPI: + case MB_CMD_IOP_NONE: /* an IDC without params */ + /* Do ACK if required */ + if (timeout) { + status = ql_mb_idc_ack(qdev); + if (status) + netif_err(qdev, drv, qdev->ndev, + "Bug: No pending IDC!\n"); + } else { + netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev, + "IDC ACK not required\n"); + status = 0; /* success */ + } + break; + } +} + +void ql_mpi_work(struct work_struct *work) +{ + struct ql_adapter *qdev = + container_of(work, struct ql_adapter, mpi_work.work); + struct mbox_params mbc; + struct mbox_params *mbcp = &mbc; + int err = 0; + + mutex_lock(&qdev->mpi_mutex); + /* Begin polled mode for MPI */ + ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); + + while (ql_read32(qdev, STS) & STS_PI) { + memset(mbcp, 0, sizeof(struct mbox_params)); + mbcp->out_count = 1; + /* Don't continue if an async event + * did not complete properly. + */ + err = ql_mpi_handler(qdev, mbcp); + if (err) + break; + } + + /* End polled mode for MPI */ + ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); + mutex_unlock(&qdev->mpi_mutex); + ql_enable_completion_interrupt(qdev, 0); +} + +void ql_mpi_reset_work(struct work_struct *work) +{ + struct ql_adapter *qdev = + container_of(work, struct ql_adapter, mpi_reset_work.work); + cancel_delayed_work_sync(&qdev->mpi_work); + cancel_delayed_work_sync(&qdev->mpi_port_cfg_work); + cancel_delayed_work_sync(&qdev->mpi_idc_work); + /* If we're not the dominant NIC function, + * then there is nothing to do. + */ + if (!ql_own_firmware(qdev)) { + netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n"); + return; + } + + if (!ql_core_dump(qdev, qdev->mpi_coredump)) { + netif_err(qdev, drv, qdev->ndev, "Core is dumped!\n"); + qdev->core_is_dumped = 1; + queue_delayed_work(qdev->workqueue, + &qdev->mpi_core_to_log, 5 * HZ); + } + ql_soft_reset_mpi_risc(qdev); +} diff --git a/drivers/net/netxen/Makefile b/drivers/net/netxen/Makefile deleted file mode 100644 index 861a0590b1f4..000000000000 --- a/drivers/net/netxen/Makefile +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (C) 2003 - 2009 NetXen, Inc. -# Copyright (C) 2009 - QLogic Corporation. -# All rights reserved. -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place - Suite 330, Boston, -# MA 02111-1307, USA. -# -# The full GNU General Public License is included in this distribution -# in the file called "COPYING". -# -# - - -obj-$(CONFIG_NETXEN_NIC) := netxen_nic.o - -netxen_nic-y := netxen_nic_hw.o netxen_nic_main.o netxen_nic_init.o \ - netxen_nic_ethtool.o netxen_nic_ctx.o diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h deleted file mode 100644 index 196b660e1d91..000000000000 --- a/drivers/net/netxen/netxen_nic.h +++ /dev/null @@ -1,1441 +0,0 @@ -/* - * Copyright (C) 2003 - 2009 NetXen, Inc. - * Copyright (C) 2009 - QLogic Corporation. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, - * MA 02111-1307, USA. - * - * The full GNU General Public License is included in this distribution - * in the file called "COPYING". - * - */ - -#ifndef _NETXEN_NIC_H_ -#define _NETXEN_NIC_H_ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include - -#include -#include - -#include "netxen_nic_hdr.h" -#include "netxen_nic_hw.h" - -#define _NETXEN_NIC_LINUX_MAJOR 4 -#define _NETXEN_NIC_LINUX_MINOR 0 -#define _NETXEN_NIC_LINUX_SUBVERSION 76 -#define NETXEN_NIC_LINUX_VERSIONID "4.0.76" - -#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) -#define _major(v) (((v) >> 24) & 0xff) -#define _minor(v) (((v) >> 16) & 0xff) -#define _build(v) ((v) & 0xffff) - -/* version in image has weird encoding: - * 7:0 - major - * 15:8 - minor - * 31:16 - build (little endian) - */ -#define NETXEN_DECODE_VERSION(v) \ - NETXEN_VERSION_CODE(((v) & 0xff), (((v) >> 8) & 0xff), ((v) >> 16)) - -#define NETXEN_NUM_FLASH_SECTORS (64) -#define NETXEN_FLASH_SECTOR_SIZE (64 * 1024) -#define NETXEN_FLASH_TOTAL_SIZE (NETXEN_NUM_FLASH_SECTORS \ - * NETXEN_FLASH_SECTOR_SIZE) - -#define RCV_DESC_RINGSIZE(rds_ring) \ - (sizeof(struct rcv_desc) * (rds_ring)->num_desc) -#define RCV_BUFF_RINGSIZE(rds_ring) \ - (sizeof(struct netxen_rx_buffer) * rds_ring->num_desc) -#define STATUS_DESC_RINGSIZE(sds_ring) \ - (sizeof(struct status_desc) * (sds_ring)->num_desc) -#define TX_BUFF_RINGSIZE(tx_ring) \ - (sizeof(struct netxen_cmd_buffer) * tx_ring->num_desc) -#define TX_DESC_RINGSIZE(tx_ring) \ - (sizeof(struct cmd_desc_type0) * tx_ring->num_desc) - -#define find_diff_among(a,b,range) ((a)<(b)?((b)-(a)):((b)+(range)-(a))) - -#define NETXEN_RCV_PRODUCER_OFFSET 0 -#define NETXEN_RCV_PEG_DB_ID 2 -#define NETXEN_HOST_DUMMY_DMA_SIZE 1024 -#define FLASH_SUCCESS 0 - -#define ADDR_IN_WINDOW1(off) \ - ((off > NETXEN_CRB_PCIX_HOST2) && (off < NETXEN_CRB_MAX)) ? 1 : 0 - -#define ADDR_IN_RANGE(addr, low, high) \ - (((addr) < (high)) && ((addr) >= (low))) - -/* - * normalize a 64MB crb address to 32MB PCI window - * To use NETXEN_CRB_NORMALIZE, window _must_ be set to 1 - */ -#define NETXEN_CRB_NORMAL(reg) \ - ((reg) - NETXEN_CRB_PCIX_HOST2 + NETXEN_CRB_PCIX_HOST) - -#define NETXEN_CRB_NORMALIZE(adapter, reg) \ - pci_base_offset(adapter, NETXEN_CRB_NORMAL(reg)) - -#define DB_NORMALIZE(adapter, off) \ - (adapter->ahw.db_base + (off)) - -#define NX_P2_C0 0x24 -#define NX_P2_C1 0x25 -#define NX_P3_A0 0x30 -#define NX_P3_A2 0x30 -#define NX_P3_B0 0x40 -#define NX_P3_B1 0x41 -#define NX_P3_B2 0x42 -#define NX_P3P_A0 0x50 - -#define NX_IS_REVISION_P2(REVISION) (REVISION <= NX_P2_C1) -#define NX_IS_REVISION_P3(REVISION) (REVISION >= NX_P3_A0) -#define NX_IS_REVISION_P3P(REVISION) (REVISION >= NX_P3P_A0) - -#define FIRST_PAGE_GROUP_START 0 -#define FIRST_PAGE_GROUP_END 0x100000 - -#define SECOND_PAGE_GROUP_START 0x6000000 -#define SECOND_PAGE_GROUP_END 0x68BC000 - -#define THIRD_PAGE_GROUP_START 0x70E4000 -#define THIRD_PAGE_GROUP_END 0x8000000 - -#define FIRST_PAGE_GROUP_SIZE FIRST_PAGE_GROUP_END - FIRST_PAGE_GROUP_START -#define SECOND_PAGE_GROUP_SIZE SECOND_PAGE_GROUP_END - SECOND_PAGE_GROUP_START -#define THIRD_PAGE_GROUP_SIZE THIRD_PAGE_GROUP_END - THIRD_PAGE_GROUP_START - -#define P2_MAX_MTU (8000) -#define P3_MAX_MTU (9600) -#define NX_ETHERMTU 1500 -#define NX_MAX_ETHERHDR 32 /* This contains some padding */ - -#define NX_P2_RX_BUF_MAX_LEN 1760 -#define NX_P3_RX_BUF_MAX_LEN (NX_MAX_ETHERHDR + NX_ETHERMTU) -#define NX_P2_RX_JUMBO_BUF_MAX_LEN (NX_MAX_ETHERHDR + P2_MAX_MTU) -#define NX_P3_RX_JUMBO_BUF_MAX_LEN (NX_MAX_ETHERHDR + P3_MAX_MTU) -#define NX_CT_DEFAULT_RX_BUF_LEN 2048 -#define NX_LRO_BUFFER_EXTRA 2048 - -#define NX_RX_LRO_BUFFER_LENGTH (8060) - -/* - * Maximum number of ring contexts - */ -#define MAX_RING_CTX 1 - -/* Opcodes to be used with the commands */ -#define TX_ETHER_PKT 0x01 -#define TX_TCP_PKT 0x02 -#define TX_UDP_PKT 0x03 -#define TX_IP_PKT 0x04 -#define TX_TCP_LSO 0x05 -#define TX_TCP_LSO6 0x06 -#define TX_IPSEC 0x07 -#define TX_IPSEC_CMD 0x0a -#define TX_TCPV6_PKT 0x0b -#define TX_UDPV6_PKT 0x0c - -/* The following opcodes are for internal consumption. */ -#define NETXEN_CONTROL_OP 0x10 -#define PEGNET_REQUEST 0x11 - -#define MAX_NUM_CARDS 4 - -#define NETXEN_MAX_FRAGS_PER_TX 14 -#define MAX_TSO_HEADER_DESC 2 -#define MGMT_CMD_DESC_RESV 4 -#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \ - + MGMT_CMD_DESC_RESV) -#define NX_MAX_TX_TIMEOUTS 2 - -/* - * Following are the states of the Phantom. Phantom will set them and - * Host will read to check if the fields are correct. - */ -#define PHAN_INITIALIZE_START 0xff00 -#define PHAN_INITIALIZE_FAILED 0xffff -#define PHAN_INITIALIZE_COMPLETE 0xff01 - -/* Host writes the following to notify that it has done the init-handshake */ -#define PHAN_INITIALIZE_ACK 0xf00f - -#define NUM_RCV_DESC_RINGS 3 -#define NUM_STS_DESC_RINGS 4 - -#define RCV_RING_NORMAL 0 -#define RCV_RING_JUMBO 1 -#define RCV_RING_LRO 2 - -#define MIN_CMD_DESCRIPTORS 64 -#define MIN_RCV_DESCRIPTORS 64 -#define MIN_JUMBO_DESCRIPTORS 32 - -#define MAX_CMD_DESCRIPTORS 1024 -#define MAX_RCV_DESCRIPTORS_1G 4096 -#define MAX_RCV_DESCRIPTORS_10G 8192 -#define MAX_JUMBO_RCV_DESCRIPTORS_1G 512 -#define MAX_JUMBO_RCV_DESCRIPTORS_10G 1024 -#define MAX_LRO_RCV_DESCRIPTORS 8 - -#define DEFAULT_RCV_DESCRIPTORS_1G 2048 -#define DEFAULT_RCV_DESCRIPTORS_10G 4096 - -#define NETXEN_CTX_SIGNATURE 0xdee0 -#define NETXEN_CTX_SIGNATURE_V2 0x0002dee0 -#define NETXEN_CTX_RESET 0xbad0 -#define NETXEN_CTX_D3_RESET 0xacc0 -#define NETXEN_RCV_PRODUCER(ringid) (ringid) - -#define PHAN_PEG_RCV_INITIALIZED 0xff01 -#define PHAN_PEG_RCV_START_INITIALIZE 0xff00 - -#define get_next_index(index, length) \ - (((index) + 1) & ((length) - 1)) - -#define get_index_range(index,length,count) \ - (((index) + (count)) & ((length) - 1)) - -#define MPORT_SINGLE_FUNCTION_MODE 0x1111 -#define MPORT_MULTI_FUNCTION_MODE 0x2222 - -#define NX_MAX_PCI_FUNC 8 - -/* - * NetXen host-peg signal message structure - * - * Bit 0-1 : peg_id => 0x2 for tx and 01 for rx - * Bit 2 : priv_id => must be 1 - * Bit 3-17 : count => for doorbell - * Bit 18-27 : ctx_id => Context id - * Bit 28-31 : opcode - */ - -typedef u32 netxen_ctx_msg; - -#define netxen_set_msg_peg_id(config_word, val) \ - ((config_word) &= ~3, (config_word) |= val & 3) -#define netxen_set_msg_privid(config_word) \ - ((config_word) |= 1 << 2) -#define netxen_set_msg_count(config_word, val) \ - ((config_word) &= ~(0x7fff<<3), (config_word) |= (val & 0x7fff) << 3) -#define netxen_set_msg_ctxid(config_word, val) \ - ((config_word) &= ~(0x3ff<<18), (config_word) |= (val & 0x3ff) << 18) -#define netxen_set_msg_opcode(config_word, val) \ - ((config_word) &= ~(0xf<<28), (config_word) |= (val & 0xf) << 28) - -struct netxen_rcv_ring { - __le64 addr; - __le32 size; - __le32 rsrvd; -}; - -struct netxen_sts_ring { - __le64 addr; - __le32 size; - __le16 msi_index; - __le16 rsvd; -} ; - -struct netxen_ring_ctx { - - /* one command ring */ - __le64 cmd_consumer_offset; - __le64 cmd_ring_addr; - __le32 cmd_ring_size; - __le32 rsrvd; - - /* three receive rings */ - struct netxen_rcv_ring rcv_rings[NUM_RCV_DESC_RINGS]; - - __le64 sts_ring_addr; - __le32 sts_ring_size; - - __le32 ctx_id; - - __le64 rsrvd_2[3]; - __le32 sts_ring_count; - __le32 rsrvd_3; - struct netxen_sts_ring sts_rings[NUM_STS_DESC_RINGS]; - -} __attribute__ ((aligned(64))); - -/* - * Following data structures describe the descriptors that will be used. - * Added fileds of tcpHdrSize and ipHdrSize, The driver needs to do it only when - * we are doing LSO (above the 1500 size packet) only. - */ - -/* - * The size of reference handle been changed to 16 bits to pass the MSS fields - * for the LSO packet - */ - -#define FLAGS_CHECKSUM_ENABLED 0x01 -#define FLAGS_LSO_ENABLED 0x02 -#define FLAGS_IPSEC_SA_ADD 0x04 -#define FLAGS_IPSEC_SA_DELETE 0x08 -#define FLAGS_VLAN_TAGGED 0x10 -#define FLAGS_VLAN_OOB 0x40 - -#define netxen_set_tx_vlan_tci(cmd_desc, v) \ - (cmd_desc)->vlan_TCI = cpu_to_le16(v); - -#define netxen_set_cmd_desc_port(cmd_desc, var) \ - ((cmd_desc)->port_ctxid |= ((var) & 0x0F)) -#define netxen_set_cmd_desc_ctxid(cmd_desc, var) \ - ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0)) - -#define netxen_set_tx_port(_desc, _port) \ - (_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0) - -#define netxen_set_tx_flags_opcode(_desc, _flags, _opcode) \ - (_desc)->flags_opcode = \ - cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)) - -#define netxen_set_tx_frags_len(_desc, _frags, _len) \ - (_desc)->nfrags__length = \ - cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8)) - -struct cmd_desc_type0 { - u8 tcp_hdr_offset; /* For LSO only */ - u8 ip_hdr_offset; /* For LSO only */ - __le16 flags_opcode; /* 15:13 unused, 12:7 opcode, 6:0 flags */ - __le32 nfrags__length; /* 31:8 total len, 7:0 frag count */ - - __le64 addr_buffer2; - - __le16 reference_handle; - __le16 mss; - u8 port_ctxid; /* 7:4 ctxid 3:0 port */ - u8 total_hdr_length; /* LSO only : MAC+IP+TCP Hdr size */ - __le16 conn_id; /* IPSec offoad only */ - - __le64 addr_buffer3; - __le64 addr_buffer1; - - __le16 buffer_length[4]; - - __le64 addr_buffer4; - - __le32 reserved2; - __le16 reserved; - __le16 vlan_TCI; - -} __attribute__ ((aligned(64))); - -/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */ -struct rcv_desc { - __le16 reference_handle; - __le16 reserved; - __le32 buffer_length; /* allocated buffer length (usually 2K) */ - __le64 addr_buffer; -}; - -/* opcode field in status_desc */ -#define NETXEN_NIC_SYN_OFFLOAD 0x03 -#define NETXEN_NIC_RXPKT_DESC 0x04 -#define NETXEN_OLD_RXPKT_DESC 0x3f -#define NETXEN_NIC_RESPONSE_DESC 0x05 -#define NETXEN_NIC_LRO_DESC 0x12 - -/* for status field in status_desc */ -#define STATUS_NEED_CKSUM (1) -#define STATUS_CKSUM_OK (2) - -/* owner bits of status_desc */ -#define STATUS_OWNER_HOST (0x1ULL << 56) -#define STATUS_OWNER_PHANTOM (0x2ULL << 56) - -/* Status descriptor: - 0-3 port, 4-7 status, 8-11 type, 12-27 total_length - 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset - 53-55 desc_cnt, 56-57 owner, 58-63 opcode - */ -#define netxen_get_sts_port(sts_data) \ - ((sts_data) & 0x0F) -#define netxen_get_sts_status(sts_data) \ - (((sts_data) >> 4) & 0x0F) -#define netxen_get_sts_type(sts_data) \ - (((sts_data) >> 8) & 0x0F) -#define netxen_get_sts_totallength(sts_data) \ - (((sts_data) >> 12) & 0xFFFF) -#define netxen_get_sts_refhandle(sts_data) \ - (((sts_data) >> 28) & 0xFFFF) -#define netxen_get_sts_prot(sts_data) \ - (((sts_data) >> 44) & 0x0F) -#define netxen_get_sts_pkt_offset(sts_data) \ - (((sts_data) >> 48) & 0x1F) -#define netxen_get_sts_desc_cnt(sts_data) \ - (((sts_data) >> 53) & 0x7) -#define netxen_get_sts_opcode(sts_data) \ - (((sts_data) >> 58) & 0x03F) - -#define netxen_get_lro_sts_refhandle(sts_data) \ - ((sts_data) & 0x0FFFF) -#define netxen_get_lro_sts_length(sts_data) \ - (((sts_data) >> 16) & 0x0FFFF) -#define netxen_get_lro_sts_l2_hdr_offset(sts_data) \ - (((sts_data) >> 32) & 0x0FF) -#define netxen_get_lro_sts_l4_hdr_offset(sts_data) \ - (((sts_data) >> 40) & 0x0FF) -#define netxen_get_lro_sts_timestamp(sts_data) \ - (((sts_data) >> 48) & 0x1) -#define netxen_get_lro_sts_type(sts_data) \ - (((sts_data) >> 49) & 0x7) -#define netxen_get_lro_sts_push_flag(sts_data) \ - (((sts_data) >> 52) & 0x1) -#define netxen_get_lro_sts_seq_number(sts_data) \ - ((sts_data) & 0x0FFFFFFFF) - - -struct status_desc { - __le64 status_desc_data[2]; -} __attribute__ ((aligned(16))); - -/* UNIFIED ROMIMAGE *************************/ -#define NX_UNI_DIR_SECT_PRODUCT_TBL 0x0 -#define NX_UNI_DIR_SECT_BOOTLD 0x6 -#define NX_UNI_DIR_SECT_FW 0x7 - -/*Offsets */ -#define NX_UNI_CHIP_REV_OFF 10 -#define NX_UNI_FLAGS_OFF 11 -#define NX_UNI_BIOS_VERSION_OFF 12 -#define NX_UNI_BOOTLD_IDX_OFF 27 -#define NX_UNI_FIRMWARE_IDX_OFF 29 - -struct uni_table_desc{ - uint32_t findex; - uint32_t num_entries; - uint32_t entry_size; - uint32_t reserved[5]; -}; - -struct uni_data_desc{ - uint32_t findex; - uint32_t size; - uint32_t reserved[5]; -}; - -/* UNIFIED ROMIMAGE *************************/ - -/* The version of the main data structure */ -#define NETXEN_BDINFO_VERSION 1 - -/* Magic number to let user know flash is programmed */ -#define NETXEN_BDINFO_MAGIC 0x12345678 - -/* Max number of Gig ports on a Phantom board */ -#define NETXEN_MAX_PORTS 4 - -#define NETXEN_BRDTYPE_P1_BD 0x0000 -#define NETXEN_BRDTYPE_P1_SB 0x0001 -#define NETXEN_BRDTYPE_P1_SMAX 0x0002 -#define NETXEN_BRDTYPE_P1_SOCK 0x0003 - -#define NETXEN_BRDTYPE_P2_SOCK_31 0x0008 -#define NETXEN_BRDTYPE_P2_SOCK_35 0x0009 -#define NETXEN_BRDTYPE_P2_SB35_4G 0x000a -#define NETXEN_BRDTYPE_P2_SB31_10G 0x000b -#define NETXEN_BRDTYPE_P2_SB31_2G 0x000c - -#define NETXEN_BRDTYPE_P2_SB31_10G_IMEZ 0x000d -#define NETXEN_BRDTYPE_P2_SB31_10G_HMEZ 0x000e -#define NETXEN_BRDTYPE_P2_SB31_10G_CX4 0x000f - -#define NETXEN_BRDTYPE_P3_REF_QG 0x0021 -#define NETXEN_BRDTYPE_P3_HMEZ 0x0022 -#define NETXEN_BRDTYPE_P3_10G_CX4_LP 0x0023 -#define NETXEN_BRDTYPE_P3_4_GB 0x0024 -#define NETXEN_BRDTYPE_P3_IMEZ 0x0025 -#define NETXEN_BRDTYPE_P3_10G_SFP_PLUS 0x0026 -#define NETXEN_BRDTYPE_P3_10000_BASE_T 0x0027 -#define NETXEN_BRDTYPE_P3_XG_LOM 0x0028 -#define NETXEN_BRDTYPE_P3_4_GB_MM 0x0029 -#define NETXEN_BRDTYPE_P3_10G_SFP_CT 0x002a -#define NETXEN_BRDTYPE_P3_10G_SFP_QT 0x002b -#define NETXEN_BRDTYPE_P3_10G_CX4 0x0031 -#define NETXEN_BRDTYPE_P3_10G_XFP 0x0032 -#define NETXEN_BRDTYPE_P3_10G_TP 0x0080 - -/* Flash memory map */ -#define NETXEN_CRBINIT_START 0 /* crbinit section */ -#define NETXEN_BRDCFG_START 0x4000 /* board config */ -#define NETXEN_INITCODE_START 0x6000 /* pegtune code */ -#define NETXEN_BOOTLD_START 0x10000 /* bootld */ -#define NETXEN_IMAGE_START 0x43000 /* compressed image */ -#define NETXEN_SECONDARY_START 0x200000 /* backup images */ -#define NETXEN_PXE_START 0x3E0000 /* PXE boot rom */ -#define NETXEN_USER_START 0x3E8000 /* Firmare info */ -#define NETXEN_FIXED_START 0x3F0000 /* backup of crbinit */ -#define NETXEN_USER_START_OLD NETXEN_PXE_START /* very old flash */ - -#define NX_OLD_MAC_ADDR_OFFSET (NETXEN_USER_START) -#define NX_FW_VERSION_OFFSET (NETXEN_USER_START+0x408) -#define NX_FW_SIZE_OFFSET (NETXEN_USER_START+0x40c) -#define NX_FW_MAC_ADDR_OFFSET (NETXEN_USER_START+0x418) -#define NX_FW_SERIAL_NUM_OFFSET (NETXEN_USER_START+0x81c) -#define NX_BIOS_VERSION_OFFSET (NETXEN_USER_START+0x83c) - -#define NX_HDR_VERSION_OFFSET (NETXEN_BRDCFG_START) -#define NX_BRDTYPE_OFFSET (NETXEN_BRDCFG_START+0x8) -#define NX_FW_MAGIC_OFFSET (NETXEN_BRDCFG_START+0x128) - -#define NX_FW_MIN_SIZE (0x3fffff) -#define NX_P2_MN_ROMIMAGE 0 -#define NX_P3_CT_ROMIMAGE 1 -#define NX_P3_MN_ROMIMAGE 2 -#define NX_UNIFIED_ROMIMAGE 3 -#define NX_FLASH_ROMIMAGE 4 -#define NX_UNKNOWN_ROMIMAGE 0xff - -#define NX_P2_MN_ROMIMAGE_NAME "nxromimg.bin" -#define NX_P3_CT_ROMIMAGE_NAME "nx3fwct.bin" -#define NX_P3_MN_ROMIMAGE_NAME "nx3fwmn.bin" -#define NX_UNIFIED_ROMIMAGE_NAME "phanfw.bin" -#define NX_FLASH_ROMIMAGE_NAME "flash" - -extern char netxen_nic_driver_name[]; - -/* Number of status descriptors to handle per interrupt */ -#define MAX_STATUS_HANDLE (64) - -/* - * netxen_skb_frag{} is to contain mapping info for each SG list. This - * has to be freed when DMA is complete. This is part of netxen_tx_buffer{}. - */ -struct netxen_skb_frag { - u64 dma; - u64 length; -}; - -struct netxen_recv_crb { - u32 crb_rcv_producer[NUM_RCV_DESC_RINGS]; - u32 crb_sts_consumer[NUM_STS_DESC_RINGS]; - u32 sw_int_mask[NUM_STS_DESC_RINGS]; -}; - -/* Following defines are for the state of the buffers */ -#define NETXEN_BUFFER_FREE 0 -#define NETXEN_BUFFER_BUSY 1 - -/* - * There will be one netxen_buffer per skb packet. These will be - * used to save the dma info for pci_unmap_page() - */ -struct netxen_cmd_buffer { - struct sk_buff *skb; - struct netxen_skb_frag frag_array[MAX_SKB_FRAGS + 1]; - u32 frag_count; -}; - -/* In rx_buffer, we do not need multiple fragments as is a single buffer */ -struct netxen_rx_buffer { - struct list_head list; - struct sk_buff *skb; - u64 dma; - u16 ref_handle; - u16 state; -}; - -/* Board types */ -#define NETXEN_NIC_GBE 0x01 -#define NETXEN_NIC_XGBE 0x02 - -/* - * One hardware_context{} per adapter - * contains interrupt info as well shared hardware info. - */ -struct netxen_hardware_context { - void __iomem *pci_base0; - void __iomem *pci_base1; - void __iomem *pci_base2; - void __iomem *db_base; - void __iomem *ocm_win_crb; - - unsigned long db_len; - unsigned long pci_len0; - - u32 ocm_win; - u32 crb_win; - - rwlock_t crb_lock; - spinlock_t mem_lock; - - u8 cut_through; - u8 revision_id; - u8 pci_func; - u8 linkup; - u16 port_type; - u16 board_type; -}; - -#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* With FCS */ -#define ETHERNET_FCS_SIZE 4 - -struct netxen_adapter_stats { - u64 xmitcalled; - u64 xmitfinished; - u64 rxdropped; - u64 txdropped; - u64 csummed; - u64 rx_pkts; - u64 lro_pkts; - u64 rxbytes; - u64 txbytes; -}; - -/* - * Rcv Descriptor Context. One such per Rcv Descriptor. There may - * be one Rcv Descriptor for normal packets, one for jumbo and may be others. - */ -struct nx_host_rds_ring { - u32 producer; - u32 num_desc; - u32 dma_size; - u32 skb_size; - u32 flags; - void __iomem *crb_rcv_producer; - struct rcv_desc *desc_head; - struct netxen_rx_buffer *rx_buf_arr; - struct list_head free_list; - spinlock_t lock; - dma_addr_t phys_addr; -}; - -struct nx_host_sds_ring { - u32 consumer; - u32 num_desc; - void __iomem *crb_sts_consumer; - void __iomem *crb_intr_mask; - - struct status_desc *desc_head; - struct netxen_adapter *adapter; - struct napi_struct napi; - struct list_head free_list[NUM_RCV_DESC_RINGS]; - - int irq; - - dma_addr_t phys_addr; - char name[IFNAMSIZ+4]; -}; - -struct nx_host_tx_ring { - u32 producer; - __le32 *hw_consumer; - u32 sw_consumer; - void __iomem *crb_cmd_producer; - void __iomem *crb_cmd_consumer; - u32 num_desc; - - struct netdev_queue *txq; - - struct netxen_cmd_buffer *cmd_buf_arr; - struct cmd_desc_type0 *desc_head; - dma_addr_t phys_addr; -}; - -/* - * Receive context. There is one such structure per instance of the - * receive processing. Any state information that is relevant to - * the receive, and is must be in this structure. The global data may be - * present elsewhere. - */ -struct netxen_recv_context { - u32 state; - u16 context_id; - u16 virt_port; - - struct nx_host_rds_ring *rds_rings; - struct nx_host_sds_ring *sds_rings; - - struct netxen_ring_ctx *hwctx; - dma_addr_t phys_addr; -}; - -/* New HW context creation */ - -#define NX_OS_CRB_RETRY_COUNT 4000 -#define NX_CDRP_SIGNATURE_MAKE(pcifn, version) \ - (((pcifn) & 0xff) | (((version) & 0xff) << 8) | (0xcafe << 16)) - -#define NX_CDRP_CLEAR 0x00000000 -#define NX_CDRP_CMD_BIT 0x80000000 - -/* - * All responses must have the NX_CDRP_CMD_BIT cleared - * in the crb NX_CDRP_CRB_OFFSET. - */ -#define NX_CDRP_FORM_RSP(rsp) (rsp) -#define NX_CDRP_IS_RSP(rsp) (((rsp) & NX_CDRP_CMD_BIT) == 0) - -#define NX_CDRP_RSP_OK 0x00000001 -#define NX_CDRP_RSP_FAIL 0x00000002 -#define NX_CDRP_RSP_TIMEOUT 0x00000003 - -/* - * All commands must have the NX_CDRP_CMD_BIT set in - * the crb NX_CDRP_CRB_OFFSET. - */ -#define NX_CDRP_FORM_CMD(cmd) (NX_CDRP_CMD_BIT | (cmd)) -#define NX_CDRP_IS_CMD(cmd) (((cmd) & NX_CDRP_CMD_BIT) != 0) - -#define NX_CDRP_CMD_SUBMIT_CAPABILITIES 0x00000001 -#define NX_CDRP_CMD_READ_MAX_RDS_PER_CTX 0x00000002 -#define NX_CDRP_CMD_READ_MAX_SDS_PER_CTX 0x00000003 -#define NX_CDRP_CMD_READ_MAX_RULES_PER_CTX 0x00000004 -#define NX_CDRP_CMD_READ_MAX_RX_CTX 0x00000005 -#define NX_CDRP_CMD_READ_MAX_TX_CTX 0x00000006 -#define NX_CDRP_CMD_CREATE_RX_CTX 0x00000007 -#define NX_CDRP_CMD_DESTROY_RX_CTX 0x00000008 -#define NX_CDRP_CMD_CREATE_TX_CTX 0x00000009 -#define NX_CDRP_CMD_DESTROY_TX_CTX 0x0000000a -#define NX_CDRP_CMD_SETUP_STATISTICS 0x0000000e -#define NX_CDRP_CMD_GET_STATISTICS 0x0000000f -#define NX_CDRP_CMD_DELETE_STATISTICS 0x00000010 -#define NX_CDRP_CMD_SET_MTU 0x00000012 -#define NX_CDRP_CMD_READ_PHY 0x00000013 -#define NX_CDRP_CMD_WRITE_PHY 0x00000014 -#define NX_CDRP_CMD_READ_HW_REG 0x00000015 -#define NX_CDRP_CMD_GET_FLOW_CTL 0x00000016 -#define NX_CDRP_CMD_SET_FLOW_CTL 0x00000017 -#define NX_CDRP_CMD_READ_MAX_MTU 0x00000018 -#define NX_CDRP_CMD_READ_MAX_LRO 0x00000019 -#define NX_CDRP_CMD_CONFIGURE_TOE 0x0000001a -#define NX_CDRP_CMD_FUNC_ATTRIB 0x0000001b -#define NX_CDRP_CMD_READ_PEXQ_PARAMETERS 0x0000001c -#define NX_CDRP_CMD_GET_LIC_CAPABILITIES 0x0000001d -#define NX_CDRP_CMD_READ_MAX_LRO_PER_BOARD 0x0000001e -#define NX_CDRP_CMD_CONFIG_GBE_PORT 0x0000001f -#define NX_CDRP_CMD_MAX 0x00000020 - -#define NX_RCODE_SUCCESS 0 -#define NX_RCODE_NO_HOST_MEM 1 -#define NX_RCODE_NO_HOST_RESOURCE 2 -#define NX_RCODE_NO_CARD_CRB 3 -#define NX_RCODE_NO_CARD_MEM 4 -#define NX_RCODE_NO_CARD_RESOURCE 5 -#define NX_RCODE_INVALID_ARGS 6 -#define NX_RCODE_INVALID_ACTION 7 -#define NX_RCODE_INVALID_STATE 8 -#define NX_RCODE_NOT_SUPPORTED 9 -#define NX_RCODE_NOT_PERMITTED 10 -#define NX_RCODE_NOT_READY 11 -#define NX_RCODE_DOES_NOT_EXIST 12 -#define NX_RCODE_ALREADY_EXISTS 13 -#define NX_RCODE_BAD_SIGNATURE 14 -#define NX_RCODE_CMD_NOT_IMPL 15 -#define NX_RCODE_CMD_INVALID 16 -#define NX_RCODE_TIMEOUT 17 -#define NX_RCODE_CMD_FAILED 18 -#define NX_RCODE_MAX_EXCEEDED 19 -#define NX_RCODE_MAX 20 - -#define NX_DESTROY_CTX_RESET 0 -#define NX_DESTROY_CTX_D3_RESET 1 -#define NX_DESTROY_CTX_MAX 2 - -/* - * Capabilities - */ -#define NX_CAP_BIT(class, bit) (1 << bit) -#define NX_CAP0_LEGACY_CONTEXT NX_CAP_BIT(0, 0) -#define NX_CAP0_MULTI_CONTEXT NX_CAP_BIT(0, 1) -#define NX_CAP0_LEGACY_MN NX_CAP_BIT(0, 2) -#define NX_CAP0_LEGACY_MS NX_CAP_BIT(0, 3) -#define NX_CAP0_CUT_THROUGH NX_CAP_BIT(0, 4) -#define NX_CAP0_LRO NX_CAP_BIT(0, 5) -#define NX_CAP0_LSO NX_CAP_BIT(0, 6) -#define NX_CAP0_JUMBO_CONTIGUOUS NX_CAP_BIT(0, 7) -#define NX_CAP0_LRO_CONTIGUOUS NX_CAP_BIT(0, 8) -#define NX_CAP0_HW_LRO NX_CAP_BIT(0, 10) - -/* - * Context state - */ -#define NX_HOST_CTX_STATE_FREED 0 -#define NX_HOST_CTX_STATE_ALLOCATED 1 -#define NX_HOST_CTX_STATE_ACTIVE 2 -#define NX_HOST_CTX_STATE_DISABLED 3 -#define NX_HOST_CTX_STATE_QUIESCED 4 -#define NX_HOST_CTX_STATE_MAX 5 - -/* - * Rx context - */ - -typedef struct { - __le64 host_phys_addr; /* Ring base addr */ - __le32 ring_size; /* Ring entries */ - __le16 msi_index; - __le16 rsvd; /* Padding */ -} nx_hostrq_sds_ring_t; - -typedef struct { - __le64 host_phys_addr; /* Ring base addr */ - __le64 buff_size; /* Packet buffer size */ - __le32 ring_size; /* Ring entries */ - __le32 ring_kind; /* Class of ring */ -} nx_hostrq_rds_ring_t; - -typedef struct { - __le64 host_rsp_dma_addr; /* Response dma'd here */ - __le32 capabilities[4]; /* Flag bit vector */ - __le32 host_int_crb_mode; /* Interrupt crb usage */ - __le32 host_rds_crb_mode; /* RDS crb usage */ - /* These ring offsets are relative to data[0] below */ - __le32 rds_ring_offset; /* Offset to RDS config */ - __le32 sds_ring_offset; /* Offset to SDS config */ - __le16 num_rds_rings; /* Count of RDS rings */ - __le16 num_sds_rings; /* Count of SDS rings */ - __le16 rsvd1; /* Padding */ - __le16 rsvd2; /* Padding */ - u8 reserved[128]; /* reserve space for future expansion*/ - /* MUST BE 64-bit aligned. - The following is packed: - - N hostrq_rds_rings - - N hostrq_sds_rings */ - char data[0]; -} nx_hostrq_rx_ctx_t; - -typedef struct { - __le32 host_producer_crb; /* Crb to use */ - __le32 rsvd1; /* Padding */ -} nx_cardrsp_rds_ring_t; - -typedef struct { - __le32 host_consumer_crb; /* Crb to use */ - __le32 interrupt_crb; /* Crb to use */ -} nx_cardrsp_sds_ring_t; - -typedef struct { - /* These ring offsets are relative to data[0] below */ - __le32 rds_ring_offset; /* Offset to RDS config */ - __le32 sds_ring_offset; /* Offset to SDS config */ - __le32 host_ctx_state; /* Starting State */ - __le32 num_fn_per_port; /* How many PCI fn share the port */ - __le16 num_rds_rings; /* Count of RDS rings */ - __le16 num_sds_rings; /* Count of SDS rings */ - __le16 context_id; /* Handle for context */ - u8 phys_port; /* Physical id of port */ - u8 virt_port; /* Virtual/Logical id of port */ - u8 reserved[128]; /* save space for future expansion */ - /* MUST BE 64-bit aligned. - The following is packed: - - N cardrsp_rds_rings - - N cardrs_sds_rings */ - char data[0]; -} nx_cardrsp_rx_ctx_t; - -#define SIZEOF_HOSTRQ_RX(HOSTRQ_RX, rds_rings, sds_rings) \ - (sizeof(HOSTRQ_RX) + \ - (rds_rings)*(sizeof(nx_hostrq_rds_ring_t)) + \ - (sds_rings)*(sizeof(nx_hostrq_sds_ring_t))) - -#define SIZEOF_CARDRSP_RX(CARDRSP_RX, rds_rings, sds_rings) \ - (sizeof(CARDRSP_RX) + \ - (rds_rings)*(sizeof(nx_cardrsp_rds_ring_t)) + \ - (sds_rings)*(sizeof(nx_cardrsp_sds_ring_t))) - -/* - * Tx context - */ - -typedef struct { - __le64 host_phys_addr; /* Ring base addr */ - __le32 ring_size; /* Ring entries */ - __le32 rsvd; /* Padding */ -} nx_hostrq_cds_ring_t; - -typedef struct { - __le64 host_rsp_dma_addr; /* Response dma'd here */ - __le64 cmd_cons_dma_addr; /* */ - __le64 dummy_dma_addr; /* */ - __le32 capabilities[4]; /* Flag bit vector */ - __le32 host_int_crb_mode; /* Interrupt crb usage */ - __le32 rsvd1; /* Padding */ - __le16 rsvd2; /* Padding */ - __le16 interrupt_ctl; - __le16 msi_index; - __le16 rsvd3; /* Padding */ - nx_hostrq_cds_ring_t cds_ring; /* Desc of cds ring */ - u8 reserved[128]; /* future expansion */ -} nx_hostrq_tx_ctx_t; - -typedef struct { - __le32 host_producer_crb; /* Crb to use */ - __le32 interrupt_crb; /* Crb to use */ -} nx_cardrsp_cds_ring_t; - -typedef struct { - __le32 host_ctx_state; /* Starting state */ - __le16 context_id; /* Handle for context */ - u8 phys_port; /* Physical id of port */ - u8 virt_port; /* Virtual/Logical id of port */ - nx_cardrsp_cds_ring_t cds_ring; /* Card cds settings */ - u8 reserved[128]; /* future expansion */ -} nx_cardrsp_tx_ctx_t; - -#define SIZEOF_HOSTRQ_TX(HOSTRQ_TX) (sizeof(HOSTRQ_TX)) -#define SIZEOF_CARDRSP_TX(CARDRSP_TX) (sizeof(CARDRSP_TX)) - -/* CRB */ - -#define NX_HOST_RDS_CRB_MODE_UNIQUE 0 -#define NX_HOST_RDS_CRB_MODE_SHARED 1 -#define NX_HOST_RDS_CRB_MODE_CUSTOM 2 -#define NX_HOST_RDS_CRB_MODE_MAX 3 - -#define NX_HOST_INT_CRB_MODE_UNIQUE 0 -#define NX_HOST_INT_CRB_MODE_SHARED 1 -#define NX_HOST_INT_CRB_MODE_NORX 2 -#define NX_HOST_INT_CRB_MODE_NOTX 3 -#define NX_HOST_INT_CRB_MODE_NORXTX 4 - - -/* MAC */ - -#define MC_COUNT_P2 16 -#define MC_COUNT_P3 38 - -#define NETXEN_MAC_NOOP 0 -#define NETXEN_MAC_ADD 1 -#define NETXEN_MAC_DEL 2 - -typedef struct nx_mac_list_s { - struct list_head list; - uint8_t mac_addr[ETH_ALEN+2]; -} nx_mac_list_t; - -struct nx_vlan_ip_list { - struct list_head list; - u32 ip_addr; -}; - -/* - * Interrupt coalescing defaults. The defaults are for 1500 MTU. It is - * adjusted based on configured MTU. - */ -#define NETXEN_DEFAULT_INTR_COALESCE_RX_TIME_US 3 -#define NETXEN_DEFAULT_INTR_COALESCE_RX_PACKETS 256 -#define NETXEN_DEFAULT_INTR_COALESCE_TX_PACKETS 64 -#define NETXEN_DEFAULT_INTR_COALESCE_TX_TIME_US 4 - -#define NETXEN_NIC_INTR_DEFAULT 0x04 - -typedef union { - struct { - uint16_t rx_packets; - uint16_t rx_time_us; - uint16_t tx_packets; - uint16_t tx_time_us; - } data; - uint64_t word; -} nx_nic_intr_coalesce_data_t; - -typedef struct { - uint16_t stats_time_us; - uint16_t rate_sample_time; - uint16_t flags; - uint16_t rsvd_1; - uint32_t low_threshold; - uint32_t high_threshold; - nx_nic_intr_coalesce_data_t normal; - nx_nic_intr_coalesce_data_t low; - nx_nic_intr_coalesce_data_t high; - nx_nic_intr_coalesce_data_t irq; -} nx_nic_intr_coalesce_t; - -#define NX_HOST_REQUEST 0x13 -#define NX_NIC_REQUEST 0x14 - -#define NX_MAC_EVENT 0x1 - -#define NX_IP_UP 2 -#define NX_IP_DOWN 3 - -/* - * Driver --> Firmware - */ -#define NX_NIC_H2C_OPCODE_START 0 -#define NX_NIC_H2C_OPCODE_CONFIG_RSS 1 -#define NX_NIC_H2C_OPCODE_CONFIG_RSS_TBL 2 -#define NX_NIC_H2C_OPCODE_CONFIG_INTR_COALESCE 3 -#define NX_NIC_H2C_OPCODE_CONFIG_LED 4 -#define NX_NIC_H2C_OPCODE_CONFIG_PROMISCUOUS 5 -#define NX_NIC_H2C_OPCODE_CONFIG_L2_MAC 6 -#define NX_NIC_H2C_OPCODE_LRO_REQUEST 7 -#define NX_NIC_H2C_OPCODE_GET_SNMP_STATS 8 -#define NX_NIC_H2C_OPCODE_PROXY_START_REQUEST 9 -#define NX_NIC_H2C_OPCODE_PROXY_STOP_REQUEST 10 -#define NX_NIC_H2C_OPCODE_PROXY_SET_MTU 11 -#define NX_NIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE 12 -#define NX_NIC_H2C_OPCODE_GET_FINGER_PRINT_REQUEST 13 -#define NX_NIC_H2C_OPCODE_INSTALL_LICENSE_REQUEST 14 -#define NX_NIC_H2C_OPCODE_GET_LICENSE_CAPABILITY_REQUEST 15 -#define NX_NIC_H2C_OPCODE_GET_NET_STATS 16 -#define NX_NIC_H2C_OPCODE_PROXY_UPDATE_P2V 17 -#define NX_NIC_H2C_OPCODE_CONFIG_IPADDR 18 -#define NX_NIC_H2C_OPCODE_CONFIG_LOOPBACK 19 -#define NX_NIC_H2C_OPCODE_PROXY_STOP_DONE 20 -#define NX_NIC_H2C_OPCODE_GET_LINKEVENT 21 -#define NX_NIC_C2C_OPCODE 22 -#define NX_NIC_H2C_OPCODE_CONFIG_BRIDGING 23 -#define NX_NIC_H2C_OPCODE_CONFIG_HW_LRO 24 -#define NX_NIC_H2C_OPCODE_LAST 25 - -/* - * Firmware --> Driver - */ - -#define NX_NIC_C2H_OPCODE_START 128 -#define NX_NIC_C2H_OPCODE_CONFIG_RSS_RESPONSE 129 -#define NX_NIC_C2H_OPCODE_CONFIG_RSS_TBL_RESPONSE 130 -#define NX_NIC_C2H_OPCODE_CONFIG_MAC_RESPONSE 131 -#define NX_NIC_C2H_OPCODE_CONFIG_PROMISCUOUS_RESPONSE 132 -#define NX_NIC_C2H_OPCODE_CONFIG_L2_MAC_RESPONSE 133 -#define NX_NIC_C2H_OPCODE_LRO_DELETE_RESPONSE 134 -#define NX_NIC_C2H_OPCODE_LRO_ADD_FAILURE_RESPONSE 135 -#define NX_NIC_C2H_OPCODE_GET_SNMP_STATS 136 -#define NX_NIC_C2H_OPCODE_GET_FINGER_PRINT_REPLY 137 -#define NX_NIC_C2H_OPCODE_INSTALL_LICENSE_REPLY 138 -#define NX_NIC_C2H_OPCODE_GET_LICENSE_CAPABILITIES_REPLY 139 -#define NX_NIC_C2H_OPCODE_GET_NET_STATS_RESPONSE 140 -#define NX_NIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 141 -#define NX_NIC_C2H_OPCODE_LAST 142 - -#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */ -#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */ -#define VPORT_MISS_MODE_ACCEPT_MULTI 2 /* accept unmatched multicast */ - -#define NX_NIC_LRO_REQUEST_FIRST 0 -#define NX_NIC_LRO_REQUEST_ADD_FLOW 1 -#define NX_NIC_LRO_REQUEST_DELETE_FLOW 2 -#define NX_NIC_LRO_REQUEST_TIMER 3 -#define NX_NIC_LRO_REQUEST_CLEANUP 4 -#define NX_NIC_LRO_REQUEST_ADD_FLOW_SCHEDULED 5 -#define NX_TOE_LRO_REQUEST_ADD_FLOW 6 -#define NX_TOE_LRO_REQUEST_ADD_FLOW_RESPONSE 7 -#define NX_TOE_LRO_REQUEST_DELETE_FLOW 8 -#define NX_TOE_LRO_REQUEST_DELETE_FLOW_RESPONSE 9 -#define NX_TOE_LRO_REQUEST_TIMER 10 -#define NX_NIC_LRO_REQUEST_LAST 11 - -#define NX_FW_CAPABILITY_LINK_NOTIFICATION (1 << 5) -#define NX_FW_CAPABILITY_SWITCHING (1 << 6) -#define NX_FW_CAPABILITY_PEXQ (1 << 7) -#define NX_FW_CAPABILITY_BDG (1 << 8) -#define NX_FW_CAPABILITY_FVLANTX (1 << 9) -#define NX_FW_CAPABILITY_HW_LRO (1 << 10) -#define NX_FW_CAPABILITY_GBE_LINK_CFG (1 << 11) - -/* module types */ -#define LINKEVENT_MODULE_NOT_PRESENT 1 -#define LINKEVENT_MODULE_OPTICAL_UNKNOWN 2 -#define LINKEVENT_MODULE_OPTICAL_SRLR 3 -#define LINKEVENT_MODULE_OPTICAL_LRM 4 -#define LINKEVENT_MODULE_OPTICAL_SFP_1G 5 -#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE 6 -#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN 7 -#define LINKEVENT_MODULE_TWINAX 8 - -#define LINKSPEED_10GBPS 10000 -#define LINKSPEED_1GBPS 1000 -#define LINKSPEED_100MBPS 100 -#define LINKSPEED_10MBPS 10 - -#define LINKSPEED_ENCODED_10MBPS 0 -#define LINKSPEED_ENCODED_100MBPS 1 -#define LINKSPEED_ENCODED_1GBPS 2 - -#define LINKEVENT_AUTONEG_DISABLED 0 -#define LINKEVENT_AUTONEG_ENABLED 1 - -#define LINKEVENT_HALF_DUPLEX 0 -#define LINKEVENT_FULL_DUPLEX 1 - -#define LINKEVENT_LINKSPEED_MBPS 0 -#define LINKEVENT_LINKSPEED_ENCODED 1 - -#define AUTO_FW_RESET_ENABLED 0xEF10AF12 -#define AUTO_FW_RESET_DISABLED 0xDCBAAF12 - -/* firmware response header: - * 63:58 - message type - * 57:56 - owner - * 55:53 - desc count - * 52:48 - reserved - * 47:40 - completion id - * 39:32 - opcode - * 31:16 - error code - * 15:00 - reserved - */ -#define netxen_get_nic_msgtype(msg_hdr) \ - ((msg_hdr >> 58) & 0x3F) -#define netxen_get_nic_msg_compid(msg_hdr) \ - ((msg_hdr >> 40) & 0xFF) -#define netxen_get_nic_msg_opcode(msg_hdr) \ - ((msg_hdr >> 32) & 0xFF) -#define netxen_get_nic_msg_errcode(msg_hdr) \ - ((msg_hdr >> 16) & 0xFFFF) - -typedef struct { - union { - struct { - u64 hdr; - u64 body[7]; - }; - u64 words[8]; - }; -} nx_fw_msg_t; - -typedef struct { - __le64 qhdr; - __le64 req_hdr; - __le64 words[6]; -} nx_nic_req_t; - -typedef struct { - u8 op; - u8 tag; - u8 mac_addr[6]; -} nx_mac_req_t; - -#define MAX_PENDING_DESC_BLOCK_SIZE 64 - -#define NETXEN_NIC_MSI_ENABLED 0x02 -#define NETXEN_NIC_MSIX_ENABLED 0x04 -#define NETXEN_NIC_LRO_ENABLED 0x08 -#define NETXEN_NIC_LRO_DISABLED 0x00 -#define NETXEN_NIC_BRIDGE_ENABLED 0X10 -#define NETXEN_NIC_DIAG_ENABLED 0x20 -#define NETXEN_IS_MSI_FAMILY(adapter) \ - ((adapter)->flags & (NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED)) - -#define MSIX_ENTRIES_PER_ADAPTER NUM_STS_DESC_RINGS -#define NETXEN_MSIX_TBL_SPACE 8192 -#define NETXEN_PCI_REG_MSIX_TBL 0x44 - -#define NETXEN_DB_MAPSIZE_BYTES 0x1000 - -#define NETXEN_NETDEV_WEIGHT 128 -#define NETXEN_ADAPTER_UP_MAGIC 777 -#define NETXEN_NIC_PEG_TUNE 0 - -#define __NX_FW_ATTACHED 0 -#define __NX_DEV_UP 1 -#define __NX_RESETTING 2 - -struct netxen_dummy_dma { - void *addr; - dma_addr_t phys_addr; -}; - -struct netxen_adapter { - struct netxen_hardware_context ahw; - - struct net_device *netdev; - struct pci_dev *pdev; - struct list_head mac_list; - struct list_head vlan_ip_list; - - spinlock_t tx_clean_lock; - - u16 num_txd; - u16 num_rxd; - u16 num_jumbo_rxd; - u16 num_lro_rxd; - - u8 max_rds_rings; - u8 max_sds_rings; - u8 driver_mismatch; - u8 msix_supported; - u8 __pad; - u8 pci_using_dac; - u8 portnum; - u8 physical_port; - - u8 mc_enabled; - u8 max_mc_count; - u8 rss_supported; - u8 link_changed; - u8 fw_wait_cnt; - u8 fw_fail_cnt; - u8 tx_timeo_cnt; - u8 need_fw_reset; - - u8 has_link_events; - u8 fw_type; - u16 tx_context_id; - u16 mtu; - u16 is_up; - - u16 link_speed; - u16 link_duplex; - u16 link_autoneg; - u16 module_type; - - u32 capabilities; - u32 flags; - u32 irq; - u32 temp; - - u32 int_vec_bit; - u32 heartbit; - - u8 mac_addr[ETH_ALEN]; - - struct netxen_adapter_stats stats; - - struct netxen_recv_context recv_ctx; - struct nx_host_tx_ring *tx_ring; - - int (*macaddr_set) (struct netxen_adapter *, u8 *); - int (*set_mtu) (struct netxen_adapter *, int); - int (*set_promisc) (struct netxen_adapter *, u32); - void (*set_multi) (struct net_device *); - int (*phy_read) (struct netxen_adapter *, u32 reg, u32 *); - int (*phy_write) (struct netxen_adapter *, u32 reg, u32 val); - int (*init_port) (struct netxen_adapter *, int); - int (*stop_port) (struct netxen_adapter *); - - u32 (*crb_read)(struct netxen_adapter *, ulong); - int (*crb_write)(struct netxen_adapter *, ulong, u32); - - int (*pci_mem_read)(struct netxen_adapter *, u64, u64 *); - int (*pci_mem_write)(struct netxen_adapter *, u64, u64); - - int (*pci_set_window)(struct netxen_adapter *, u64, u32 *); - - u32 (*io_read)(struct netxen_adapter *, void __iomem *); - void (*io_write)(struct netxen_adapter *, void __iomem *, u32); - - void __iomem *tgt_mask_reg; - void __iomem *pci_int_reg; - void __iomem *tgt_status_reg; - void __iomem *crb_int_state_reg; - void __iomem *isr_int_vec; - - struct msix_entry msix_entries[MSIX_ENTRIES_PER_ADAPTER]; - - struct netxen_dummy_dma dummy_dma; - - struct delayed_work fw_work; - - struct work_struct tx_timeout_task; - - nx_nic_intr_coalesce_t coal; - - unsigned long state; - __le32 file_prd_off; /*File fw product offset*/ - u32 fw_version; - const struct firmware *fw; -}; - -int nx_fw_cmd_query_phy(struct netxen_adapter *adapter, u32 reg, u32 *val); -int nx_fw_cmd_set_phy(struct netxen_adapter *adapter, u32 reg, u32 val); - -#define NXRD32(adapter, off) \ - (adapter->crb_read(adapter, off)) -#define NXWR32(adapter, off, val) \ - (adapter->crb_write(adapter, off, val)) -#define NXRDIO(adapter, addr) \ - (adapter->io_read(adapter, addr)) -#define NXWRIO(adapter, addr, val) \ - (adapter->io_write(adapter, addr, val)) - -int netxen_pcie_sem_lock(struct netxen_adapter *, int, u32); -void netxen_pcie_sem_unlock(struct netxen_adapter *, int); - -#define netxen_rom_lock(a) \ - netxen_pcie_sem_lock((a), 2, NETXEN_ROM_LOCK_ID) -#define netxen_rom_unlock(a) \ - netxen_pcie_sem_unlock((a), 2) -#define netxen_phy_lock(a) \ - netxen_pcie_sem_lock((a), 3, NETXEN_PHY_LOCK_ID) -#define netxen_phy_unlock(a) \ - netxen_pcie_sem_unlock((a), 3) -#define netxen_api_lock(a) \ - netxen_pcie_sem_lock((a), 5, 0) -#define netxen_api_unlock(a) \ - netxen_pcie_sem_unlock((a), 5) -#define netxen_sw_lock(a) \ - netxen_pcie_sem_lock((a), 6, 0) -#define netxen_sw_unlock(a) \ - netxen_pcie_sem_unlock((a), 6) -#define crb_win_lock(a) \ - netxen_pcie_sem_lock((a), 7, NETXEN_CRB_WIN_LOCK_ID) -#define crb_win_unlock(a) \ - netxen_pcie_sem_unlock((a), 7) - -int netxen_nic_get_board_info(struct netxen_adapter *adapter); -int netxen_nic_wol_supported(struct netxen_adapter *adapter); - -/* Functions from netxen_nic_init.c */ -int netxen_init_dummy_dma(struct netxen_adapter *adapter); -void netxen_free_dummy_dma(struct netxen_adapter *adapter); - -int netxen_check_flash_fw_compatibility(struct netxen_adapter *adapter); -int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val); -int netxen_load_firmware(struct netxen_adapter *adapter); -int netxen_need_fw_reset(struct netxen_adapter *adapter); -void netxen_request_firmware(struct netxen_adapter *adapter); -void netxen_release_firmware(struct netxen_adapter *adapter); -int netxen_pinit_from_rom(struct netxen_adapter *adapter); - -int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp); -int netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr, - u8 *bytes, size_t size); -int netxen_rom_fast_write_words(struct netxen_adapter *adapter, int addr, - u8 *bytes, size_t size); -int netxen_flash_unlock(struct netxen_adapter *adapter); -int netxen_backup_crbinit(struct netxen_adapter *adapter); -int netxen_flash_erase_secondary(struct netxen_adapter *adapter); -int netxen_flash_erase_primary(struct netxen_adapter *adapter); -void netxen_halt_pegs(struct netxen_adapter *adapter); - -int netxen_rom_se(struct netxen_adapter *adapter, int addr); - -int netxen_alloc_sw_resources(struct netxen_adapter *adapter); -void netxen_free_sw_resources(struct netxen_adapter *adapter); - -void netxen_setup_hwops(struct netxen_adapter *adapter); -void __iomem *netxen_get_ioaddr(struct netxen_adapter *, u32); - -int netxen_alloc_hw_resources(struct netxen_adapter *adapter); -void netxen_free_hw_resources(struct netxen_adapter *adapter); - -void netxen_release_rx_buffers(struct netxen_adapter *adapter); -void netxen_release_tx_buffers(struct netxen_adapter *adapter); - -int netxen_init_firmware(struct netxen_adapter *adapter); -void netxen_nic_clear_stats(struct netxen_adapter *adapter); -void netxen_watchdog_task(struct work_struct *work); -void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid, - struct nx_host_rds_ring *rds_ring); -int netxen_process_cmd_ring(struct netxen_adapter *adapter); -int netxen_process_rcv_ring(struct nx_host_sds_ring *sds_ring, int max); - -void netxen_p3_free_mac_list(struct netxen_adapter *adapter); -int netxen_config_intr_coalesce(struct netxen_adapter *adapter); -int netxen_config_rss(struct netxen_adapter *adapter, int enable); -int netxen_config_ipaddr(struct netxen_adapter *adapter, u32 ip, int cmd); -int netxen_linkevent_request(struct netxen_adapter *adapter, int enable); -void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup); -void netxen_pci_camqm_read_2M(struct netxen_adapter *, u64, u64 *); -void netxen_pci_camqm_write_2M(struct netxen_adapter *, u64, u64); - -int nx_fw_cmd_set_gbe_port(struct netxen_adapter *adapter, - u32 speed, u32 duplex, u32 autoneg); -int nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu); -int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu); -int netxen_config_hw_lro(struct netxen_adapter *adapter, int enable); -int netxen_config_bridged_mode(struct netxen_adapter *adapter, int enable); -int netxen_send_lro_cleanup(struct netxen_adapter *adapter); - -void netxen_nic_update_cmd_producer(struct netxen_adapter *adapter, - struct nx_host_tx_ring *tx_ring); - -/* Functions from netxen_nic_main.c */ -int netxen_nic_reset_context(struct netxen_adapter *); - -/* - * NetXen Board information - */ - -#define NETXEN_MAX_SHORT_NAME 32 -struct netxen_brdinfo { - int brdtype; /* type of board */ - long ports; /* max no of physical ports */ - char short_name[NETXEN_MAX_SHORT_NAME]; -}; - -static const struct netxen_brdinfo netxen_boards[] = { - {NETXEN_BRDTYPE_P2_SB31_10G_CX4, 1, "XGb CX4"}, - {NETXEN_BRDTYPE_P2_SB31_10G_HMEZ, 1, "XGb HMEZ"}, - {NETXEN_BRDTYPE_P2_SB31_10G_IMEZ, 2, "XGb IMEZ"}, - {NETXEN_BRDTYPE_P2_SB31_10G, 1, "XGb XFP"}, - {NETXEN_BRDTYPE_P2_SB35_4G, 4, "Quad Gb"}, - {NETXEN_BRDTYPE_P2_SB31_2G, 2, "Dual Gb"}, - {NETXEN_BRDTYPE_P3_REF_QG, 4, "Reference Quad Gig "}, - {NETXEN_BRDTYPE_P3_HMEZ, 2, "Dual XGb HMEZ"}, - {NETXEN_BRDTYPE_P3_10G_CX4_LP, 2, "Dual XGb CX4 LP"}, - {NETXEN_BRDTYPE_P3_4_GB, 4, "Quad Gig LP"}, - {NETXEN_BRDTYPE_P3_IMEZ, 2, "Dual XGb IMEZ"}, - {NETXEN_BRDTYPE_P3_10G_SFP_PLUS, 2, "Dual XGb SFP+ LP"}, - {NETXEN_BRDTYPE_P3_10000_BASE_T, 1, "XGB 10G BaseT LP"}, - {NETXEN_BRDTYPE_P3_XG_LOM, 2, "Dual XGb LOM"}, - {NETXEN_BRDTYPE_P3_4_GB_MM, 4, "NX3031 Gigabit Ethernet"}, - {NETXEN_BRDTYPE_P3_10G_SFP_CT, 2, "NX3031 10 Gigabit Ethernet"}, - {NETXEN_BRDTYPE_P3_10G_SFP_QT, 2, "Quanta Dual XGb SFP+"}, - {NETXEN_BRDTYPE_P3_10G_CX4, 2, "Reference Dual CX4 Option"}, - {NETXEN_BRDTYPE_P3_10G_XFP, 1, "Reference Single XFP Option"} -}; - -#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(netxen_boards) - -static inline void get_brd_name_by_type(u32 type, char *name) -{ - int i, found = 0; - for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) { - if (netxen_boards[i].brdtype == type) { - strcpy(name, netxen_boards[i].short_name); - found = 1; - break; - } - - } - if (!found) - name = "Unknown"; -} - -static inline u32 netxen_tx_avail(struct nx_host_tx_ring *tx_ring) -{ - smp_mb(); - return find_diff_among(tx_ring->producer, - tx_ring->sw_consumer, tx_ring->num_desc); - -} - -int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac); -int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac); -extern void netxen_change_ringparam(struct netxen_adapter *adapter); -extern int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, - int *valp); - -extern const struct ethtool_ops netxen_nic_ethtool_ops; - -#endif /* __NETXEN_NIC_H_ */ diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c deleted file mode 100644 index a925392abd6f..000000000000 --- a/drivers/net/netxen/netxen_nic_ctx.c +++ /dev/null @@ -1,793 +0,0 @@ -/* - * Copyright (C) 2003 - 2009 NetXen, Inc. - * Copyright (C) 2009 - QLogic Corporation. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, - * MA 02111-1307, USA. - * - * The full GNU General Public License is included in this distribution - * in the file called "COPYING". - * - */ - -#include "netxen_nic_hw.h" -#include "netxen_nic.h" - -#define NXHAL_VERSION 1 - -static u32 -netxen_poll_rsp(struct netxen_adapter *adapter) -{ - u32 rsp = NX_CDRP_RSP_OK; - int timeout = 0; - - do { - /* give atleast 1ms for firmware to respond */ - msleep(1); - - if (++timeout > NX_OS_CRB_RETRY_COUNT) - return NX_CDRP_RSP_TIMEOUT; - - rsp = NXRD32(adapter, NX_CDRP_CRB_OFFSET); - } while (!NX_CDRP_IS_RSP(rsp)); - - return rsp; -} - -static u32 -netxen_issue_cmd(struct netxen_adapter *adapter, - u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd) -{ - u32 rsp; - u32 signature = 0; - u32 rcode = NX_RCODE_SUCCESS; - - signature = NX_CDRP_SIGNATURE_MAKE(pci_fn, version); - - /* Acquire semaphore before accessing CRB */ - if (netxen_api_lock(adapter)) - return NX_RCODE_TIMEOUT; - - NXWR32(adapter, NX_SIGN_CRB_OFFSET, signature); - - NXWR32(adapter, NX_ARG1_CRB_OFFSET, arg1); - - NXWR32(adapter, NX_ARG2_CRB_OFFSET, arg2); - - NXWR32(adapter, NX_ARG3_CRB_OFFSET, arg3); - - NXWR32(adapter, NX_CDRP_CRB_OFFSET, NX_CDRP_FORM_CMD(cmd)); - - rsp = netxen_poll_rsp(adapter); - - if (rsp == NX_CDRP_RSP_TIMEOUT) { - printk(KERN_ERR "%s: card response timeout.\n", - netxen_nic_driver_name); - - rcode = NX_RCODE_TIMEOUT; - } else if (rsp == NX_CDRP_RSP_FAIL) { - rcode = NXRD32(adapter, NX_ARG1_CRB_OFFSET); - - printk(KERN_ERR "%s: failed card response code:0x%x\n", - netxen_nic_driver_name, rcode); - } - - /* Release semaphore */ - netxen_api_unlock(adapter); - - return rcode; -} - -int -nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu) -{ - u32 rcode = NX_RCODE_SUCCESS; - struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; - - if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE) - rcode = netxen_issue_cmd(adapter, - adapter->ahw.pci_func, - NXHAL_VERSION, - recv_ctx->context_id, - mtu, - 0, - NX_CDRP_CMD_SET_MTU); - - if (rcode != NX_RCODE_SUCCESS) - return -EIO; - - return 0; -} - -int -nx_fw_cmd_set_gbe_port(struct netxen_adapter *adapter, - u32 speed, u32 duplex, u32 autoneg) -{ - - return netxen_issue_cmd(adapter, - adapter->ahw.pci_func, - NXHAL_VERSION, - speed, - duplex, - autoneg, - NX_CDRP_CMD_CONFIG_GBE_PORT); - -} - -static int -nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter) -{ - void *addr; - nx_hostrq_rx_ctx_t *prq; - nx_cardrsp_rx_ctx_t *prsp; - nx_hostrq_rds_ring_t *prq_rds; - nx_hostrq_sds_ring_t *prq_sds; - nx_cardrsp_rds_ring_t *prsp_rds; - nx_cardrsp_sds_ring_t *prsp_sds; - struct nx_host_rds_ring *rds_ring; - struct nx_host_sds_ring *sds_ring; - - dma_addr_t hostrq_phys_addr, cardrsp_phys_addr; - u64 phys_addr; - - int i, nrds_rings, nsds_rings; - size_t rq_size, rsp_size; - u32 cap, reg, val; - - int err; - - struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; - - nrds_rings = adapter->max_rds_rings; - nsds_rings = adapter->max_sds_rings; - - rq_size = - SIZEOF_HOSTRQ_RX(nx_hostrq_rx_ctx_t, nrds_rings, nsds_rings); - rsp_size = - SIZEOF_CARDRSP_RX(nx_cardrsp_rx_ctx_t, nrds_rings, nsds_rings); - - addr = pci_alloc_consistent(adapter->pdev, - rq_size, &hostrq_phys_addr); - if (addr == NULL) - return -ENOMEM; - prq = addr; - - addr = pci_alloc_consistent(adapter->pdev, - rsp_size, &cardrsp_phys_addr); - if (addr == NULL) { - err = -ENOMEM; - goto out_free_rq; - } - prsp = addr; - - prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr); - - cap = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN); - cap |= (NX_CAP0_JUMBO_CONTIGUOUS | NX_CAP0_LRO_CONTIGUOUS); - - prq->capabilities[0] = cpu_to_le32(cap); - prq->host_int_crb_mode = - cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED); - prq->host_rds_crb_mode = - cpu_to_le32(NX_HOST_RDS_CRB_MODE_UNIQUE); - - prq->num_rds_rings = cpu_to_le16(nrds_rings); - prq->num_sds_rings = cpu_to_le16(nsds_rings); - prq->rds_ring_offset = cpu_to_le32(0); - - val = le32_to_cpu(prq->rds_ring_offset) + - (sizeof(nx_hostrq_rds_ring_t) * nrds_rings); - prq->sds_ring_offset = cpu_to_le32(val); - - prq_rds = (nx_hostrq_rds_ring_t *)(prq->data + - le32_to_cpu(prq->rds_ring_offset)); - - for (i = 0; i < nrds_rings; i++) { - - rds_ring = &recv_ctx->rds_rings[i]; - - prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr); - prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc); - prq_rds[i].ring_kind = cpu_to_le32(i); - prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size); - } - - prq_sds = (nx_hostrq_sds_ring_t *)(prq->data + - le32_to_cpu(prq->sds_ring_offset)); - - for (i = 0; i < nsds_rings; i++) { - - sds_ring = &recv_ctx->sds_rings[i]; - - prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr); - prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc); - prq_sds[i].msi_index = cpu_to_le16(i); - } - - phys_addr = hostrq_phys_addr; - err = netxen_issue_cmd(adapter, - adapter->ahw.pci_func, - NXHAL_VERSION, - (u32)(phys_addr >> 32), - (u32)(phys_addr & 0xffffffff), - rq_size, - NX_CDRP_CMD_CREATE_RX_CTX); - if (err) { - printk(KERN_WARNING - "Failed to create rx ctx in firmware%d\n", err); - goto out_free_rsp; - } - - - prsp_rds = ((nx_cardrsp_rds_ring_t *) - &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]); - - for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) { - rds_ring = &recv_ctx->rds_rings[i]; - - reg = le32_to_cpu(prsp_rds[i].host_producer_crb); - rds_ring->crb_rcv_producer = netxen_get_ioaddr(adapter, - NETXEN_NIC_REG(reg - 0x200)); - } - - prsp_sds = ((nx_cardrsp_sds_ring_t *) - &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]); - - for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) { - sds_ring = &recv_ctx->sds_rings[i]; - - reg = le32_to_cpu(prsp_sds[i].host_consumer_crb); - sds_ring->crb_sts_consumer = netxen_get_ioaddr(adapter, - NETXEN_NIC_REG(reg - 0x200)); - - reg = le32_to_cpu(prsp_sds[i].interrupt_crb); - sds_ring->crb_intr_mask = netxen_get_ioaddr(adapter, - NETXEN_NIC_REG(reg - 0x200)); - } - - recv_ctx->state = le32_to_cpu(prsp->host_ctx_state); - recv_ctx->context_id = le16_to_cpu(prsp->context_id); - recv_ctx->virt_port = prsp->virt_port; - -out_free_rsp: - pci_free_consistent(adapter->pdev, rsp_size, prsp, cardrsp_phys_addr); -out_free_rq: - pci_free_consistent(adapter->pdev, rq_size, prq, hostrq_phys_addr); - return err; -} - -static void -nx_fw_cmd_destroy_rx_ctx(struct netxen_adapter *adapter) -{ - struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; - - if (netxen_issue_cmd(adapter, - adapter->ahw.pci_func, - NXHAL_VERSION, - recv_ctx->context_id, - NX_DESTROY_CTX_RESET, - 0, - NX_CDRP_CMD_DESTROY_RX_CTX)) { - - printk(KERN_WARNING - "%s: Failed to destroy rx ctx in firmware\n", - netxen_nic_driver_name); - } -} - -static int -nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter) -{ - nx_hostrq_tx_ctx_t *prq; - nx_hostrq_cds_ring_t *prq_cds; - nx_cardrsp_tx_ctx_t *prsp; - void *rq_addr, *rsp_addr; - size_t rq_size, rsp_size; - u32 temp; - int err = 0; - u64 offset, phys_addr; - dma_addr_t rq_phys_addr, rsp_phys_addr; - struct nx_host_tx_ring *tx_ring = adapter->tx_ring; - struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; - - rq_size = SIZEOF_HOSTRQ_TX(nx_hostrq_tx_ctx_t); - rq_addr = pci_alloc_consistent(adapter->pdev, - rq_size, &rq_phys_addr); - if (!rq_addr) - return -ENOMEM; - - rsp_size = SIZEOF_CARDRSP_TX(nx_cardrsp_tx_ctx_t); - rsp_addr = pci_alloc_consistent(adapter->pdev, - rsp_size, &rsp_phys_addr); - if (!rsp_addr) { - err = -ENOMEM; - goto out_free_rq; - } - - memset(rq_addr, 0, rq_size); - prq = rq_addr; - - memset(rsp_addr, 0, rsp_size); - prsp = rsp_addr; - - prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr); - - temp = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN | NX_CAP0_LSO); - prq->capabilities[0] = cpu_to_le32(temp); - - prq->host_int_crb_mode = - cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED); - - prq->interrupt_ctl = 0; - prq->msi_index = 0; - - prq->dummy_dma_addr = cpu_to_le64(adapter->dummy_dma.phys_addr); - - offset = recv_ctx->phys_addr + sizeof(struct netxen_ring_ctx); - prq->cmd_cons_dma_addr = cpu_to_le64(offset); - - prq_cds = &prq->cds_ring; - - prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr); - prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc); - - phys_addr = rq_phys_addr; - err = netxen_issue_cmd(adapter, - adapter->ahw.pci_func, - NXHAL_VERSION, - (u32)(phys_addr >> 32), - ((u32)phys_addr & 0xffffffff), - rq_size, - NX_CDRP_CMD_CREATE_TX_CTX); - - if (err == NX_RCODE_SUCCESS) { - temp = le32_to_cpu(prsp->cds_ring.host_producer_crb); - tx_ring->crb_cmd_producer = netxen_get_ioaddr(adapter, - NETXEN_NIC_REG(temp - 0x200)); -#if 0 - adapter->tx_state = - le32_to_cpu(prsp->host_ctx_state); -#endif - adapter->tx_context_id = - le16_to_cpu(prsp->context_id); - } else { - printk(KERN_WARNING - "Failed to create tx ctx in firmware%d\n", err); - err = -EIO; - } - - pci_free_consistent(adapter->pdev, rsp_size, rsp_addr, rsp_phys_addr); - -out_free_rq: - pci_free_consistent(adapter->pdev, rq_size, rq_addr, rq_phys_addr); - - return err; -} - -static void -nx_fw_cmd_destroy_tx_ctx(struct netxen_adapter *adapter) -{ - if (netxen_issue_cmd(adapter, - adapter->ahw.pci_func, - NXHAL_VERSION, - adapter->tx_context_id, - NX_DESTROY_CTX_RESET, - 0, - NX_CDRP_CMD_DESTROY_TX_CTX)) { - - printk(KERN_WARNING - "%s: Failed to destroy tx ctx in firmware\n", - netxen_nic_driver_name); - } -} - -int -nx_fw_cmd_query_phy(struct netxen_adapter *adapter, u32 reg, u32 *val) -{ - u32 rcode; - - rcode = netxen_issue_cmd(adapter, - adapter->ahw.pci_func, - NXHAL_VERSION, - reg, - 0, - 0, - NX_CDRP_CMD_READ_PHY); - - if (rcode != NX_RCODE_SUCCESS) - return -EIO; - - return NXRD32(adapter, NX_ARG1_CRB_OFFSET); -} - -int -nx_fw_cmd_set_phy(struct netxen_adapter *adapter, u32 reg, u32 val) -{ - u32 rcode; - - rcode = netxen_issue_cmd(adapter, - adapter->ahw.pci_func, - NXHAL_VERSION, - reg, - val, - 0, - NX_CDRP_CMD_WRITE_PHY); - - if (rcode != NX_RCODE_SUCCESS) - return -EIO; - - return 0; -} - -static u64 ctx_addr_sig_regs[][3] = { - {NETXEN_NIC_REG(0x188), NETXEN_NIC_REG(0x18c), NETXEN_NIC_REG(0x1c0)}, - {NETXEN_NIC_REG(0x190), NETXEN_NIC_REG(0x194), NETXEN_NIC_REG(0x1c4)}, - {NETXEN_NIC_REG(0x198), NETXEN_NIC_REG(0x19c), NETXEN_NIC_REG(0x1c8)}, - {NETXEN_NIC_REG(0x1a0), NETXEN_NIC_REG(0x1a4), NETXEN_NIC_REG(0x1cc)} -}; - -#define CRB_CTX_ADDR_REG_LO(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][0]) -#define CRB_CTX_ADDR_REG_HI(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][2]) -#define CRB_CTX_SIGNATURE_REG(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][1]) - -#define lower32(x) ((u32)((x) & 0xffffffff)) -#define upper32(x) ((u32)(((u64)(x) >> 32) & 0xffffffff)) - -static struct netxen_recv_crb recv_crb_registers[] = { - /* Instance 0 */ - { - /* crb_rcv_producer: */ - { - NETXEN_NIC_REG(0x100), - /* Jumbo frames */ - NETXEN_NIC_REG(0x110), - /* LRO */ - NETXEN_NIC_REG(0x120) - }, - /* crb_sts_consumer: */ - { - NETXEN_NIC_REG(0x138), - NETXEN_NIC_REG_2(0x000), - NETXEN_NIC_REG_2(0x004), - NETXEN_NIC_REG_2(0x008), - }, - /* sw_int_mask */ - { - CRB_SW_INT_MASK_0, - NETXEN_NIC_REG_2(0x044), - NETXEN_NIC_REG_2(0x048), - NETXEN_NIC_REG_2(0x04c), - }, - }, - /* Instance 1 */ - { - /* crb_rcv_producer: */ - { - NETXEN_NIC_REG(0x144), - /* Jumbo frames */ - NETXEN_NIC_REG(0x154), - /* LRO */ - NETXEN_NIC_REG(0x164) - }, - /* crb_sts_consumer: */ - { - NETXEN_NIC_REG(0x17c), - NETXEN_NIC_REG_2(0x020), - NETXEN_NIC_REG_2(0x024), - NETXEN_NIC_REG_2(0x028), - }, - /* sw_int_mask */ - { - CRB_SW_INT_MASK_1, - NETXEN_NIC_REG_2(0x064), - NETXEN_NIC_REG_2(0x068), - NETXEN_NIC_REG_2(0x06c), - }, - }, - /* Instance 2 */ - { - /* crb_rcv_producer: */ - { - NETXEN_NIC_REG(0x1d8), - /* Jumbo frames */ - NETXEN_NIC_REG(0x1f8), - /* LRO */ - NETXEN_NIC_REG(0x208) - }, - /* crb_sts_consumer: */ - { - NETXEN_NIC_REG(0x220), - NETXEN_NIC_REG_2(0x03c), - NETXEN_NIC_REG_2(0x03c), - NETXEN_NIC_REG_2(0x03c), - }, - /* sw_int_mask */ - { - CRB_SW_INT_MASK_2, - NETXEN_NIC_REG_2(0x03c), - NETXEN_NIC_REG_2(0x03c), - NETXEN_NIC_REG_2(0x03c), - }, - }, - /* Instance 3 */ - { - /* crb_rcv_producer: */ - { - NETXEN_NIC_REG(0x22c), - /* Jumbo frames */ - NETXEN_NIC_REG(0x23c), - /* LRO */ - NETXEN_NIC_REG(0x24c) - }, - /* crb_sts_consumer: */ - { - NETXEN_NIC_REG(0x264), - NETXEN_NIC_REG_2(0x03c), - NETXEN_NIC_REG_2(0x03c), - NETXEN_NIC_REG_2(0x03c), - }, - /* sw_int_mask */ - { - CRB_SW_INT_MASK_3, - NETXEN_NIC_REG_2(0x03c), - NETXEN_NIC_REG_2(0x03c), - NETXEN_NIC_REG_2(0x03c), - }, - }, -}; - -static int -netxen_init_old_ctx(struct netxen_adapter *adapter) -{ - struct netxen_recv_context *recv_ctx; - struct nx_host_rds_ring *rds_ring; - struct nx_host_sds_ring *sds_ring; - struct nx_host_tx_ring *tx_ring; - int ring; - int port = adapter->portnum; - struct netxen_ring_ctx *hwctx; - u32 signature; - - tx_ring = adapter->tx_ring; - recv_ctx = &adapter->recv_ctx; - hwctx = recv_ctx->hwctx; - - hwctx->cmd_ring_addr = cpu_to_le64(tx_ring->phys_addr); - hwctx->cmd_ring_size = cpu_to_le32(tx_ring->num_desc); - - - for (ring = 0; ring < adapter->max_rds_rings; ring++) { - rds_ring = &recv_ctx->rds_rings[ring]; - - hwctx->rcv_rings[ring].addr = - cpu_to_le64(rds_ring->phys_addr); - hwctx->rcv_rings[ring].size = - cpu_to_le32(rds_ring->num_desc); - } - - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &recv_ctx->sds_rings[ring]; - - if (ring == 0) { - hwctx->sts_ring_addr = cpu_to_le64(sds_ring->phys_addr); - hwctx->sts_ring_size = cpu_to_le32(sds_ring->num_desc); - } - hwctx->sts_rings[ring].addr = cpu_to_le64(sds_ring->phys_addr); - hwctx->sts_rings[ring].size = cpu_to_le32(sds_ring->num_desc); - hwctx->sts_rings[ring].msi_index = cpu_to_le16(ring); - } - hwctx->sts_ring_count = cpu_to_le32(adapter->max_sds_rings); - - signature = (adapter->max_sds_rings > 1) ? - NETXEN_CTX_SIGNATURE_V2 : NETXEN_CTX_SIGNATURE; - - NXWR32(adapter, CRB_CTX_ADDR_REG_LO(port), - lower32(recv_ctx->phys_addr)); - NXWR32(adapter, CRB_CTX_ADDR_REG_HI(port), - upper32(recv_ctx->phys_addr)); - NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port), - signature | port); - return 0; -} - -int netxen_alloc_hw_resources(struct netxen_adapter *adapter) -{ - void *addr; - int err = 0; - int ring; - struct netxen_recv_context *recv_ctx; - struct nx_host_rds_ring *rds_ring; - struct nx_host_sds_ring *sds_ring; - struct nx_host_tx_ring *tx_ring; - - struct pci_dev *pdev = adapter->pdev; - struct net_device *netdev = adapter->netdev; - int port = adapter->portnum; - - recv_ctx = &adapter->recv_ctx; - tx_ring = adapter->tx_ring; - - addr = pci_alloc_consistent(pdev, - sizeof(struct netxen_ring_ctx) + sizeof(uint32_t), - &recv_ctx->phys_addr); - if (addr == NULL) { - dev_err(&pdev->dev, "failed to allocate hw context\n"); - return -ENOMEM; - } - - memset(addr, 0, sizeof(struct netxen_ring_ctx)); - recv_ctx->hwctx = addr; - recv_ctx->hwctx->ctx_id = cpu_to_le32(port); - recv_ctx->hwctx->cmd_consumer_offset = - cpu_to_le64(recv_ctx->phys_addr + - sizeof(struct netxen_ring_ctx)); - tx_ring->hw_consumer = - (__le32 *)(((char *)addr) + sizeof(struct netxen_ring_ctx)); - - /* cmd desc ring */ - addr = pci_alloc_consistent(pdev, TX_DESC_RINGSIZE(tx_ring), - &tx_ring->phys_addr); - - if (addr == NULL) { - dev_err(&pdev->dev, "%s: failed to allocate tx desc ring\n", - netdev->name); - err = -ENOMEM; - goto err_out_free; - } - - tx_ring->desc_head = addr; - - for (ring = 0; ring < adapter->max_rds_rings; ring++) { - rds_ring = &recv_ctx->rds_rings[ring]; - addr = pci_alloc_consistent(adapter->pdev, - RCV_DESC_RINGSIZE(rds_ring), - &rds_ring->phys_addr); - if (addr == NULL) { - dev_err(&pdev->dev, - "%s: failed to allocate rds ring [%d]\n", - netdev->name, ring); - err = -ENOMEM; - goto err_out_free; - } - rds_ring->desc_head = addr; - - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) - rds_ring->crb_rcv_producer = - netxen_get_ioaddr(adapter, - recv_crb_registers[port].crb_rcv_producer[ring]); - } - - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &recv_ctx->sds_rings[ring]; - - addr = pci_alloc_consistent(adapter->pdev, - STATUS_DESC_RINGSIZE(sds_ring), - &sds_ring->phys_addr); - if (addr == NULL) { - dev_err(&pdev->dev, - "%s: failed to allocate sds ring [%d]\n", - netdev->name, ring); - err = -ENOMEM; - goto err_out_free; - } - sds_ring->desc_head = addr; - - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { - sds_ring->crb_sts_consumer = - netxen_get_ioaddr(adapter, - recv_crb_registers[port].crb_sts_consumer[ring]); - - sds_ring->crb_intr_mask = - netxen_get_ioaddr(adapter, - recv_crb_registers[port].sw_int_mask[ring]); - } - } - - - if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) { - if (test_and_set_bit(__NX_FW_ATTACHED, &adapter->state)) - goto done; - err = nx_fw_cmd_create_rx_ctx(adapter); - if (err) - goto err_out_free; - err = nx_fw_cmd_create_tx_ctx(adapter); - if (err) - goto err_out_free; - } else { - err = netxen_init_old_ctx(adapter); - if (err) - goto err_out_free; - } - -done: - return 0; - -err_out_free: - netxen_free_hw_resources(adapter); - return err; -} - -void netxen_free_hw_resources(struct netxen_adapter *adapter) -{ - struct netxen_recv_context *recv_ctx; - struct nx_host_rds_ring *rds_ring; - struct nx_host_sds_ring *sds_ring; - struct nx_host_tx_ring *tx_ring; - int ring; - - int port = adapter->portnum; - - if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) { - if (!test_and_clear_bit(__NX_FW_ATTACHED, &adapter->state)) - goto done; - - nx_fw_cmd_destroy_rx_ctx(adapter); - nx_fw_cmd_destroy_tx_ctx(adapter); - } else { - netxen_api_lock(adapter); - NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port), - NETXEN_CTX_D3_RESET | port); - netxen_api_unlock(adapter); - } - - /* Allow dma queues to drain after context reset */ - msleep(20); - -done: - recv_ctx = &adapter->recv_ctx; - - if (recv_ctx->hwctx != NULL) { - pci_free_consistent(adapter->pdev, - sizeof(struct netxen_ring_ctx) + - sizeof(uint32_t), - recv_ctx->hwctx, - recv_ctx->phys_addr); - recv_ctx->hwctx = NULL; - } - - tx_ring = adapter->tx_ring; - if (tx_ring->desc_head != NULL) { - pci_free_consistent(adapter->pdev, - TX_DESC_RINGSIZE(tx_ring), - tx_ring->desc_head, tx_ring->phys_addr); - tx_ring->desc_head = NULL; - } - - for (ring = 0; ring < adapter->max_rds_rings; ring++) { - rds_ring = &recv_ctx->rds_rings[ring]; - - if (rds_ring->desc_head != NULL) { - pci_free_consistent(adapter->pdev, - RCV_DESC_RINGSIZE(rds_ring), - rds_ring->desc_head, - rds_ring->phys_addr); - rds_ring->desc_head = NULL; - } - } - - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &recv_ctx->sds_rings[ring]; - - if (sds_ring->desc_head != NULL) { - pci_free_consistent(adapter->pdev, - STATUS_DESC_RINGSIZE(sds_ring), - sds_ring->desc_head, - sds_ring->phys_addr); - sds_ring->desc_head = NULL; - } - } -} - diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c deleted file mode 100644 index b34fb74d07e3..000000000000 --- a/drivers/net/netxen/netxen_nic_ethtool.c +++ /dev/null @@ -1,835 +0,0 @@ -/* - * Copyright (C) 2003 - 2009 NetXen, Inc. - * Copyright (C) 2009 - QLogic Corporation. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, - * MA 02111-1307, USA. - * - * The full GNU General Public License is included in this distribution - * in the file called "COPYING". - * - */ - -#include -#include -#include -#include -#include -#include - -#include "netxen_nic.h" -#include "netxen_nic_hw.h" - -struct netxen_nic_stats { - char stat_string[ETH_GSTRING_LEN]; - int sizeof_stat; - int stat_offset; -}; - -#define NETXEN_NIC_STAT(m) sizeof(((struct netxen_adapter *)0)->m), \ - offsetof(struct netxen_adapter, m) - -#define NETXEN_NIC_PORT_WINDOW 0x10000 -#define NETXEN_NIC_INVALID_DATA 0xDEADBEEF - -static const struct netxen_nic_stats netxen_nic_gstrings_stats[] = { - {"xmit_called", NETXEN_NIC_STAT(stats.xmitcalled)}, - {"xmit_finished", NETXEN_NIC_STAT(stats.xmitfinished)}, - {"rx_dropped", NETXEN_NIC_STAT(stats.rxdropped)}, - {"tx_dropped", NETXEN_NIC_STAT(stats.txdropped)}, - {"csummed", NETXEN_NIC_STAT(stats.csummed)}, - {"rx_pkts", NETXEN_NIC_STAT(stats.rx_pkts)}, - {"lro_pkts", NETXEN_NIC_STAT(stats.lro_pkts)}, - {"rx_bytes", NETXEN_NIC_STAT(stats.rxbytes)}, - {"tx_bytes", NETXEN_NIC_STAT(stats.txbytes)}, -}; - -#define NETXEN_NIC_STATS_LEN ARRAY_SIZE(netxen_nic_gstrings_stats) - -static const char netxen_nic_gstrings_test[][ETH_GSTRING_LEN] = { - "Register_Test_on_offline", - "Link_Test_on_offline" -}; - -#define NETXEN_NIC_TEST_LEN ARRAY_SIZE(netxen_nic_gstrings_test) - -#define NETXEN_NIC_REGS_COUNT 30 -#define NETXEN_NIC_REGS_LEN (NETXEN_NIC_REGS_COUNT * sizeof(__le32)) -#define NETXEN_MAX_EEPROM_LEN 1024 - -static int netxen_nic_get_eeprom_len(struct net_device *dev) -{ - return NETXEN_FLASH_TOTAL_SIZE; -} - -static void -netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) -{ - struct netxen_adapter *adapter = netdev_priv(dev); - u32 fw_major = 0; - u32 fw_minor = 0; - u32 fw_build = 0; - - strncpy(drvinfo->driver, netxen_nic_driver_name, 32); - strncpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID, 32); - fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR); - fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR); - fw_build = NXRD32(adapter, NETXEN_FW_VERSION_SUB); - sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build); - - strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); - drvinfo->regdump_len = NETXEN_NIC_REGS_LEN; - drvinfo->eedump_len = netxen_nic_get_eeprom_len(dev); -} - -static int -netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) -{ - struct netxen_adapter *adapter = netdev_priv(dev); - int check_sfp_module = 0; - - /* read which mode */ - if (adapter->ahw.port_type == NETXEN_NIC_GBE) { - ecmd->supported = (SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_1000baseT_Half | - SUPPORTED_1000baseT_Full); - - ecmd->advertising = (ADVERTISED_100baseT_Half | - ADVERTISED_100baseT_Full | - ADVERTISED_1000baseT_Half | - ADVERTISED_1000baseT_Full); - - ecmd->port = PORT_TP; - - ethtool_cmd_speed_set(ecmd, adapter->link_speed); - ecmd->duplex = adapter->link_duplex; - ecmd->autoneg = adapter->link_autoneg; - - } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { - u32 val; - - val = NXRD32(adapter, NETXEN_PORT_MODE_ADDR); - if (val == NETXEN_PORT_MODE_802_3_AP) { - ecmd->supported = SUPPORTED_1000baseT_Full; - ecmd->advertising = ADVERTISED_1000baseT_Full; - } else { - ecmd->supported = SUPPORTED_10000baseT_Full; - ecmd->advertising = ADVERTISED_10000baseT_Full; - } - - if (netif_running(dev) && adapter->has_link_events) { - ethtool_cmd_speed_set(ecmd, adapter->link_speed); - ecmd->autoneg = adapter->link_autoneg; - ecmd->duplex = adapter->link_duplex; - goto skip; - } - - ecmd->port = PORT_TP; - - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { - u16 pcifn = adapter->ahw.pci_func; - - val = NXRD32(adapter, P3_LINK_SPEED_REG(pcifn)); - ethtool_cmd_speed_set(ecmd, P3_LINK_SPEED_MHZ * - P3_LINK_SPEED_VAL(pcifn, val)); - } else - ethtool_cmd_speed_set(ecmd, SPEED_10000); - - ecmd->duplex = DUPLEX_FULL; - ecmd->autoneg = AUTONEG_DISABLE; - } else - return -EIO; - -skip: - ecmd->phy_address = adapter->physical_port; - ecmd->transceiver = XCVR_EXTERNAL; - - switch (adapter->ahw.board_type) { - case NETXEN_BRDTYPE_P2_SB35_4G: - case NETXEN_BRDTYPE_P2_SB31_2G: - case NETXEN_BRDTYPE_P3_REF_QG: - case NETXEN_BRDTYPE_P3_4_GB: - case NETXEN_BRDTYPE_P3_4_GB_MM: - - ecmd->supported |= SUPPORTED_Autoneg; - ecmd->advertising |= ADVERTISED_Autoneg; - case NETXEN_BRDTYPE_P2_SB31_10G_CX4: - case NETXEN_BRDTYPE_P3_10G_CX4: - case NETXEN_BRDTYPE_P3_10G_CX4_LP: - case NETXEN_BRDTYPE_P3_10000_BASE_T: - ecmd->supported |= SUPPORTED_TP; - ecmd->advertising |= ADVERTISED_TP; - ecmd->port = PORT_TP; - ecmd->autoneg = (adapter->ahw.board_type == - NETXEN_BRDTYPE_P2_SB31_10G_CX4) ? - (AUTONEG_DISABLE) : (adapter->link_autoneg); - break; - case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ: - case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ: - case NETXEN_BRDTYPE_P3_IMEZ: - case NETXEN_BRDTYPE_P3_XG_LOM: - case NETXEN_BRDTYPE_P3_HMEZ: - ecmd->supported |= SUPPORTED_MII; - ecmd->advertising |= ADVERTISED_MII; - ecmd->port = PORT_MII; - ecmd->autoneg = AUTONEG_DISABLE; - break; - case NETXEN_BRDTYPE_P3_10G_SFP_PLUS: - case NETXEN_BRDTYPE_P3_10G_SFP_CT: - case NETXEN_BRDTYPE_P3_10G_SFP_QT: - ecmd->advertising |= ADVERTISED_TP; - ecmd->supported |= SUPPORTED_TP; - check_sfp_module = netif_running(dev) && - adapter->has_link_events; - case NETXEN_BRDTYPE_P2_SB31_10G: - case NETXEN_BRDTYPE_P3_10G_XFP: - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_FIBRE; - ecmd->port = PORT_FIBRE; - ecmd->autoneg = AUTONEG_DISABLE; - break; - case NETXEN_BRDTYPE_P3_10G_TP: - if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { - ecmd->autoneg = AUTONEG_DISABLE; - ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP); - ecmd->advertising |= - (ADVERTISED_FIBRE | ADVERTISED_TP); - ecmd->port = PORT_FIBRE; - check_sfp_module = netif_running(dev) && - adapter->has_link_events; - } else { - ecmd->supported |= (SUPPORTED_TP |SUPPORTED_Autoneg); - ecmd->advertising |= - (ADVERTISED_TP | ADVERTISED_Autoneg); - ecmd->port = PORT_TP; - } - break; - default: - printk(KERN_ERR "netxen-nic: Unsupported board model %d\n", - adapter->ahw.board_type); - return -EIO; - } - - if (check_sfp_module) { - switch (adapter->module_type) { - case LINKEVENT_MODULE_OPTICAL_UNKNOWN: - case LINKEVENT_MODULE_OPTICAL_SRLR: - case LINKEVENT_MODULE_OPTICAL_LRM: - case LINKEVENT_MODULE_OPTICAL_SFP_1G: - ecmd->port = PORT_FIBRE; - break; - case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE: - case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN: - case LINKEVENT_MODULE_TWINAX: - ecmd->port = PORT_TP; - break; - default: - ecmd->port = -1; - } - } - - return 0; -} - -static int -netxen_nic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) -{ - struct netxen_adapter *adapter = netdev_priv(dev); - u32 speed = ethtool_cmd_speed(ecmd); - int ret; - - if (adapter->ahw.port_type != NETXEN_NIC_GBE) - return -EOPNOTSUPP; - - if (!(adapter->capabilities & NX_FW_CAPABILITY_GBE_LINK_CFG)) - return -EOPNOTSUPP; - - ret = nx_fw_cmd_set_gbe_port(adapter, speed, ecmd->duplex, - ecmd->autoneg); - if (ret == NX_RCODE_NOT_SUPPORTED) - return -EOPNOTSUPP; - else if (ret) - return -EIO; - - adapter->link_speed = speed; - adapter->link_duplex = ecmd->duplex; - adapter->link_autoneg = ecmd->autoneg; - - if (!netif_running(dev)) - return 0; - - dev->netdev_ops->ndo_stop(dev); - return dev->netdev_ops->ndo_open(dev); -} - -static int netxen_nic_get_regs_len(struct net_device *dev) -{ - return NETXEN_NIC_REGS_LEN; -} - -static void -netxen_nic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) -{ - struct netxen_adapter *adapter = netdev_priv(dev); - struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; - struct nx_host_sds_ring *sds_ring; - u32 *regs_buff = p; - int ring, i = 0; - int port = adapter->physical_port; - - memset(p, 0, NETXEN_NIC_REGS_LEN); - - regs->version = (1 << 24) | (adapter->ahw.revision_id << 16) | - (adapter->pdev)->device; - - if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) - return; - - regs_buff[i++] = NXRD32(adapter, CRB_CMDPEG_STATE); - regs_buff[i++] = NXRD32(adapter, CRB_RCVPEG_STATE); - regs_buff[i++] = NXRD32(adapter, CRB_FW_CAPABILITIES_1); - regs_buff[i++] = NXRDIO(adapter, adapter->crb_int_state_reg); - regs_buff[i++] = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); - regs_buff[i++] = NXRD32(adapter, NX_CRB_DEV_STATE); - regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER); - regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1); - regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_HALT_STATUS2); - - regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_0+0x3c); - regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_1+0x3c); - regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_2+0x3c); - regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_3+0x3c); - - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { - - regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_4+0x3c); - i += 2; - - regs_buff[i++] = NXRD32(adapter, CRB_XG_STATE_P3); - regs_buff[i++] = le32_to_cpu(*(adapter->tx_ring->hw_consumer)); - - } else { - i++; - - regs_buff[i++] = NXRD32(adapter, - NETXEN_NIU_XGE_CONFIG_0+(0x10000*port)); - regs_buff[i++] = NXRD32(adapter, - NETXEN_NIU_XGE_CONFIG_1+(0x10000*port)); - - regs_buff[i++] = NXRD32(adapter, CRB_XG_STATE); - regs_buff[i++] = NXRDIO(adapter, - adapter->tx_ring->crb_cmd_consumer); - } - - regs_buff[i++] = NXRDIO(adapter, adapter->tx_ring->crb_cmd_producer); - - regs_buff[i++] = NXRDIO(adapter, - recv_ctx->rds_rings[0].crb_rcv_producer); - regs_buff[i++] = NXRDIO(adapter, - recv_ctx->rds_rings[1].crb_rcv_producer); - - regs_buff[i++] = adapter->max_sds_rings; - - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &(recv_ctx->sds_rings[ring]); - regs_buff[i++] = NXRDIO(adapter, - sds_ring->crb_sts_consumer); - } -} - -static u32 netxen_nic_test_link(struct net_device *dev) -{ - struct netxen_adapter *adapter = netdev_priv(dev); - u32 val, port; - - port = adapter->physical_port; - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { - val = NXRD32(adapter, CRB_XG_STATE_P3); - val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val); - return (val == XG_LINK_UP_P3) ? 0 : 1; - } else { - val = NXRD32(adapter, CRB_XG_STATE); - val = (val >> port*8) & 0xff; - return (val == XG_LINK_UP) ? 0 : 1; - } -} - -static int -netxen_nic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, - u8 * bytes) -{ - struct netxen_adapter *adapter = netdev_priv(dev); - int offset; - int ret; - - if (eeprom->len == 0) - return -EINVAL; - - eeprom->magic = (adapter->pdev)->vendor | - ((adapter->pdev)->device << 16); - offset = eeprom->offset; - - ret = netxen_rom_fast_read_words(adapter, offset, bytes, - eeprom->len); - if (ret < 0) - return ret; - - return 0; -} - -static void -netxen_nic_get_ringparam(struct net_device *dev, - struct ethtool_ringparam *ring) -{ - struct netxen_adapter *adapter = netdev_priv(dev); - - ring->rx_pending = adapter->num_rxd; - ring->rx_jumbo_pending = adapter->num_jumbo_rxd; - ring->rx_jumbo_pending += adapter->num_lro_rxd; - ring->tx_pending = adapter->num_txd; - - if (adapter->ahw.port_type == NETXEN_NIC_GBE) { - ring->rx_max_pending = MAX_RCV_DESCRIPTORS_1G; - ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_1G; - } else { - ring->rx_max_pending = MAX_RCV_DESCRIPTORS_10G; - ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_10G; - } - - ring->tx_max_pending = MAX_CMD_DESCRIPTORS; - - ring->rx_mini_max_pending = 0; - ring->rx_mini_pending = 0; -} - -static u32 -netxen_validate_ringparam(u32 val, u32 min, u32 max, char *r_name) -{ - u32 num_desc; - num_desc = max(val, min); - num_desc = min(num_desc, max); - num_desc = roundup_pow_of_two(num_desc); - - if (val != num_desc) { - printk(KERN_INFO "%s: setting %s ring size %d instead of %d\n", - netxen_nic_driver_name, r_name, num_desc, val); - } - - return num_desc; -} - -static int -netxen_nic_set_ringparam(struct net_device *dev, - struct ethtool_ringparam *ring) -{ - struct netxen_adapter *adapter = netdev_priv(dev); - u16 max_rcv_desc = MAX_RCV_DESCRIPTORS_10G; - u16 max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G; - u16 num_rxd, num_jumbo_rxd, num_txd; - - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) - return -EOPNOTSUPP; - - if (ring->rx_mini_pending) - return -EOPNOTSUPP; - - if (adapter->ahw.port_type == NETXEN_NIC_GBE) { - max_rcv_desc = MAX_RCV_DESCRIPTORS_1G; - max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G; - } - - num_rxd = netxen_validate_ringparam(ring->rx_pending, - MIN_RCV_DESCRIPTORS, max_rcv_desc, "rx"); - - num_jumbo_rxd = netxen_validate_ringparam(ring->rx_jumbo_pending, - MIN_JUMBO_DESCRIPTORS, max_jumbo_desc, "rx jumbo"); - - num_txd = netxen_validate_ringparam(ring->tx_pending, - MIN_CMD_DESCRIPTORS, MAX_CMD_DESCRIPTORS, "tx"); - - if (num_rxd == adapter->num_rxd && num_txd == adapter->num_txd && - num_jumbo_rxd == adapter->num_jumbo_rxd) - return 0; - - adapter->num_rxd = num_rxd; - adapter->num_jumbo_rxd = num_jumbo_rxd; - adapter->num_txd = num_txd; - - return netxen_nic_reset_context(adapter); -} - -static void -netxen_nic_get_pauseparam(struct net_device *dev, - struct ethtool_pauseparam *pause) -{ - struct netxen_adapter *adapter = netdev_priv(dev); - __u32 val; - int port = adapter->physical_port; - - if (adapter->ahw.port_type == NETXEN_NIC_GBE) { - if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS)) - return; - /* get flow control settings */ - val = NXRD32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port)); - pause->rx_pause = netxen_gb_get_rx_flowctl(val); - val = NXRD32(adapter, NETXEN_NIU_GB_PAUSE_CTL); - switch (port) { - case 0: - pause->tx_pause = !(netxen_gb_get_gb0_mask(val)); - break; - case 1: - pause->tx_pause = !(netxen_gb_get_gb1_mask(val)); - break; - case 2: - pause->tx_pause = !(netxen_gb_get_gb2_mask(val)); - break; - case 3: - default: - pause->tx_pause = !(netxen_gb_get_gb3_mask(val)); - break; - } - } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { - if ((port < 0) || (port > NETXEN_NIU_MAX_XG_PORTS)) - return; - pause->rx_pause = 1; - val = NXRD32(adapter, NETXEN_NIU_XG_PAUSE_CTL); - if (port == 0) - pause->tx_pause = !(netxen_xg_get_xg0_mask(val)); - else - pause->tx_pause = !(netxen_xg_get_xg1_mask(val)); - } else { - printk(KERN_ERR"%s: Unknown board type: %x\n", - netxen_nic_driver_name, adapter->ahw.port_type); - } -} - -static int -netxen_nic_set_pauseparam(struct net_device *dev, - struct ethtool_pauseparam *pause) -{ - struct netxen_adapter *adapter = netdev_priv(dev); - __u32 val; - int port = adapter->physical_port; - /* read mode */ - if (adapter->ahw.port_type == NETXEN_NIC_GBE) { - if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS)) - return -EIO; - /* set flow control */ - val = NXRD32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port)); - - if (pause->rx_pause) - netxen_gb_rx_flowctl(val); - else - netxen_gb_unset_rx_flowctl(val); - - NXWR32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port), - val); - /* set autoneg */ - val = NXRD32(adapter, NETXEN_NIU_GB_PAUSE_CTL); - switch (port) { - case 0: - if (pause->tx_pause) - netxen_gb_unset_gb0_mask(val); - else - netxen_gb_set_gb0_mask(val); - break; - case 1: - if (pause->tx_pause) - netxen_gb_unset_gb1_mask(val); - else - netxen_gb_set_gb1_mask(val); - break; - case 2: - if (pause->tx_pause) - netxen_gb_unset_gb2_mask(val); - else - netxen_gb_set_gb2_mask(val); - break; - case 3: - default: - if (pause->tx_pause) - netxen_gb_unset_gb3_mask(val); - else - netxen_gb_set_gb3_mask(val); - break; - } - NXWR32(adapter, NETXEN_NIU_GB_PAUSE_CTL, val); - } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { - if ((port < 0) || (port > NETXEN_NIU_MAX_XG_PORTS)) - return -EIO; - val = NXRD32(adapter, NETXEN_NIU_XG_PAUSE_CTL); - if (port == 0) { - if (pause->tx_pause) - netxen_xg_unset_xg0_mask(val); - else - netxen_xg_set_xg0_mask(val); - } else { - if (pause->tx_pause) - netxen_xg_unset_xg1_mask(val); - else - netxen_xg_set_xg1_mask(val); - } - NXWR32(adapter, NETXEN_NIU_XG_PAUSE_CTL, val); - } else { - printk(KERN_ERR "%s: Unknown board type: %x\n", - netxen_nic_driver_name, - adapter->ahw.port_type); - } - return 0; -} - -static int netxen_nic_reg_test(struct net_device *dev) -{ - struct netxen_adapter *adapter = netdev_priv(dev); - u32 data_read, data_written; - - data_read = NXRD32(adapter, NETXEN_PCIX_PH_REG(0)); - if ((data_read & 0xffff) != adapter->pdev->vendor) - return 1; - - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) - return 0; - - data_written = (u32)0xa5a5a5a5; - - NXWR32(adapter, CRB_SCRATCHPAD_TEST, data_written); - data_read = NXRD32(adapter, CRB_SCRATCHPAD_TEST); - if (data_written != data_read) - return 1; - - return 0; -} - -static int netxen_get_sset_count(struct net_device *dev, int sset) -{ - switch (sset) { - case ETH_SS_TEST: - return NETXEN_NIC_TEST_LEN; - case ETH_SS_STATS: - return NETXEN_NIC_STATS_LEN; - default: - return -EOPNOTSUPP; - } -} - -static void -netxen_nic_diag_test(struct net_device *dev, struct ethtool_test *eth_test, - u64 * data) -{ - memset(data, 0, sizeof(uint64_t) * NETXEN_NIC_TEST_LEN); - if ((data[0] = netxen_nic_reg_test(dev))) - eth_test->flags |= ETH_TEST_FL_FAILED; - /* link test */ - if ((data[1] = (u64) netxen_nic_test_link(dev))) - eth_test->flags |= ETH_TEST_FL_FAILED; -} - -static void -netxen_nic_get_strings(struct net_device *dev, u32 stringset, u8 * data) -{ - int index; - - switch (stringset) { - case ETH_SS_TEST: - memcpy(data, *netxen_nic_gstrings_test, - NETXEN_NIC_TEST_LEN * ETH_GSTRING_LEN); - break; - case ETH_SS_STATS: - for (index = 0; index < NETXEN_NIC_STATS_LEN; index++) { - memcpy(data + index * ETH_GSTRING_LEN, - netxen_nic_gstrings_stats[index].stat_string, - ETH_GSTRING_LEN); - } - break; - } -} - -static void -netxen_nic_get_ethtool_stats(struct net_device *dev, - struct ethtool_stats *stats, u64 * data) -{ - struct netxen_adapter *adapter = netdev_priv(dev); - int index; - - for (index = 0; index < NETXEN_NIC_STATS_LEN; index++) { - char *p = - (char *)adapter + - netxen_nic_gstrings_stats[index].stat_offset; - data[index] = - (netxen_nic_gstrings_stats[index].sizeof_stat == - sizeof(u64)) ? *(u64 *) p : *(u32 *) p; - } -} - -static void -netxen_nic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) -{ - struct netxen_adapter *adapter = netdev_priv(dev); - u32 wol_cfg = 0; - - wol->supported = 0; - wol->wolopts = 0; - - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) - return; - - wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG_NV); - if (wol_cfg & (1UL << adapter->portnum)) - wol->supported |= WAKE_MAGIC; - - wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG); - if (wol_cfg & (1UL << adapter->portnum)) - wol->wolopts |= WAKE_MAGIC; -} - -static int -netxen_nic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) -{ - struct netxen_adapter *adapter = netdev_priv(dev); - u32 wol_cfg = 0; - - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) - return -EOPNOTSUPP; - - if (wol->wolopts & ~WAKE_MAGIC) - return -EOPNOTSUPP; - - wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG_NV); - if (!(wol_cfg & (1 << adapter->portnum))) - return -EOPNOTSUPP; - - wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG); - if (wol->wolopts & WAKE_MAGIC) - wol_cfg |= 1UL << adapter->portnum; - else - wol_cfg &= ~(1UL << adapter->portnum); - NXWR32(adapter, NETXEN_WOL_CONFIG, wol_cfg); - - return 0; -} - -/* - * Set the coalescing parameters. Currently only normal is supported. - * If rx_coalesce_usecs == 0 or rx_max_coalesced_frames == 0 then set the - * firmware coalescing to default. - */ -static int netxen_set_intr_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ethcoal) -{ - struct netxen_adapter *adapter = netdev_priv(netdev); - - if (!NX_IS_REVISION_P3(adapter->ahw.revision_id)) - return -EINVAL; - - if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) - return -EINVAL; - - /* - * Return Error if unsupported values or - * unsupported parameters are set. - */ - if (ethcoal->rx_coalesce_usecs > 0xffff || - ethcoal->rx_max_coalesced_frames > 0xffff || - ethcoal->tx_coalesce_usecs > 0xffff || - ethcoal->tx_max_coalesced_frames > 0xffff || - ethcoal->rx_coalesce_usecs_irq || - ethcoal->rx_max_coalesced_frames_irq || - ethcoal->tx_coalesce_usecs_irq || - ethcoal->tx_max_coalesced_frames_irq || - ethcoal->stats_block_coalesce_usecs || - ethcoal->use_adaptive_rx_coalesce || - ethcoal->use_adaptive_tx_coalesce || - ethcoal->pkt_rate_low || - ethcoal->rx_coalesce_usecs_low || - ethcoal->rx_max_coalesced_frames_low || - ethcoal->tx_coalesce_usecs_low || - ethcoal->tx_max_coalesced_frames_low || - ethcoal->pkt_rate_high || - ethcoal->rx_coalesce_usecs_high || - ethcoal->rx_max_coalesced_frames_high || - ethcoal->tx_coalesce_usecs_high || - ethcoal->tx_max_coalesced_frames_high) - return -EINVAL; - - if (!ethcoal->rx_coalesce_usecs || - !ethcoal->rx_max_coalesced_frames) { - adapter->coal.flags = NETXEN_NIC_INTR_DEFAULT; - adapter->coal.normal.data.rx_time_us = - NETXEN_DEFAULT_INTR_COALESCE_RX_TIME_US; - adapter->coal.normal.data.rx_packets = - NETXEN_DEFAULT_INTR_COALESCE_RX_PACKETS; - } else { - adapter->coal.flags = 0; - adapter->coal.normal.data.rx_time_us = - ethcoal->rx_coalesce_usecs; - adapter->coal.normal.data.rx_packets = - ethcoal->rx_max_coalesced_frames; - } - adapter->coal.normal.data.tx_time_us = ethcoal->tx_coalesce_usecs; - adapter->coal.normal.data.tx_packets = - ethcoal->tx_max_coalesced_frames; - - netxen_config_intr_coalesce(adapter); - - return 0; -} - -static int netxen_get_intr_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ethcoal) -{ - struct netxen_adapter *adapter = netdev_priv(netdev); - - if (!NX_IS_REVISION_P3(adapter->ahw.revision_id)) - return -EINVAL; - - if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) - return -EINVAL; - - ethcoal->rx_coalesce_usecs = adapter->coal.normal.data.rx_time_us; - ethcoal->tx_coalesce_usecs = adapter->coal.normal.data.tx_time_us; - ethcoal->rx_max_coalesced_frames = - adapter->coal.normal.data.rx_packets; - ethcoal->tx_max_coalesced_frames = - adapter->coal.normal.data.tx_packets; - - return 0; -} - -const struct ethtool_ops netxen_nic_ethtool_ops = { - .get_settings = netxen_nic_get_settings, - .set_settings = netxen_nic_set_settings, - .get_drvinfo = netxen_nic_get_drvinfo, - .get_regs_len = netxen_nic_get_regs_len, - .get_regs = netxen_nic_get_regs, - .get_link = ethtool_op_get_link, - .get_eeprom_len = netxen_nic_get_eeprom_len, - .get_eeprom = netxen_nic_get_eeprom, - .get_ringparam = netxen_nic_get_ringparam, - .set_ringparam = netxen_nic_set_ringparam, - .get_pauseparam = netxen_nic_get_pauseparam, - .set_pauseparam = netxen_nic_set_pauseparam, - .get_wol = netxen_nic_get_wol, - .set_wol = netxen_nic_set_wol, - .self_test = netxen_nic_diag_test, - .get_strings = netxen_nic_get_strings, - .get_ethtool_stats = netxen_nic_get_ethtool_stats, - .get_sset_count = netxen_get_sset_count, - .get_coalesce = netxen_get_intr_coalesce, - .set_coalesce = netxen_set_intr_coalesce, -}; diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h deleted file mode 100644 index dc1967c1f312..000000000000 --- a/drivers/net/netxen/netxen_nic_hdr.h +++ /dev/null @@ -1,1050 +0,0 @@ -/* - * Copyright (C) 2003 - 2009 NetXen, Inc. - * Copyright (C) 2009 - QLogic Corporation. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, - * MA 02111-1307, USA. - * - * The full GNU General Public License is included in this distribution - * in the file called "COPYING". - * - */ - -#ifndef __NETXEN_NIC_HDR_H_ -#define __NETXEN_NIC_HDR_H_ - -#include -#include - -/* - * The basic unit of access when reading/writing control registers. - */ - -typedef __le32 netxen_crbword_t; /* single word in CRB space */ - -enum { - NETXEN_HW_H0_CH_HUB_ADR = 0x05, - NETXEN_HW_H1_CH_HUB_ADR = 0x0E, - NETXEN_HW_H2_CH_HUB_ADR = 0x03, - NETXEN_HW_H3_CH_HUB_ADR = 0x01, - NETXEN_HW_H4_CH_HUB_ADR = 0x06, - NETXEN_HW_H5_CH_HUB_ADR = 0x07, - NETXEN_HW_H6_CH_HUB_ADR = 0x08 -}; - -/* Hub 0 */ -enum { - NETXEN_HW_MN_CRB_AGT_ADR = 0x15, - NETXEN_HW_MS_CRB_AGT_ADR = 0x25 -}; - -/* Hub 1 */ -enum { - NETXEN_HW_PS_CRB_AGT_ADR = 0x73, - NETXEN_HW_SS_CRB_AGT_ADR = 0x20, - NETXEN_HW_RPMX3_CRB_AGT_ADR = 0x0b, - NETXEN_HW_QMS_CRB_AGT_ADR = 0x00, - NETXEN_HW_SQGS0_CRB_AGT_ADR = 0x01, - NETXEN_HW_SQGS1_CRB_AGT_ADR = 0x02, - NETXEN_HW_SQGS2_CRB_AGT_ADR = 0x03, - NETXEN_HW_SQGS3_CRB_AGT_ADR = 0x04, - NETXEN_HW_C2C0_CRB_AGT_ADR = 0x58, - NETXEN_HW_C2C1_CRB_AGT_ADR = 0x59, - NETXEN_HW_C2C2_CRB_AGT_ADR = 0x5a, - NETXEN_HW_RPMX2_CRB_AGT_ADR = 0x0a, - NETXEN_HW_RPMX4_CRB_AGT_ADR = 0x0c, - NETXEN_HW_RPMX7_CRB_AGT_ADR = 0x0f, - NETXEN_HW_RPMX9_CRB_AGT_ADR = 0x12, - NETXEN_HW_SMB_CRB_AGT_ADR = 0x18 -}; - -/* Hub 2 */ -enum { - NETXEN_HW_NIU_CRB_AGT_ADR = 0x31, - NETXEN_HW_I2C0_CRB_AGT_ADR = 0x19, - NETXEN_HW_I2C1_CRB_AGT_ADR = 0x29, - - NETXEN_HW_SN_CRB_AGT_ADR = 0x10, - NETXEN_HW_I2Q_CRB_AGT_ADR = 0x20, - NETXEN_HW_LPC_CRB_AGT_ADR = 0x22, - NETXEN_HW_ROMUSB_CRB_AGT_ADR = 0x21, - NETXEN_HW_QM_CRB_AGT_ADR = 0x66, - NETXEN_HW_SQG0_CRB_AGT_ADR = 0x60, - NETXEN_HW_SQG1_CRB_AGT_ADR = 0x61, - NETXEN_HW_SQG2_CRB_AGT_ADR = 0x62, - NETXEN_HW_SQG3_CRB_AGT_ADR = 0x63, - NETXEN_HW_RPMX1_CRB_AGT_ADR = 0x09, - NETXEN_HW_RPMX5_CRB_AGT_ADR = 0x0d, - NETXEN_HW_RPMX6_CRB_AGT_ADR = 0x0e, - NETXEN_HW_RPMX8_CRB_AGT_ADR = 0x11 -}; - -/* Hub 3 */ -enum { - NETXEN_HW_PH_CRB_AGT_ADR = 0x1A, - NETXEN_HW_SRE_CRB_AGT_ADR = 0x50, - NETXEN_HW_EG_CRB_AGT_ADR = 0x51, - NETXEN_HW_RPMX0_CRB_AGT_ADR = 0x08 -}; - -/* Hub 4 */ -enum { - NETXEN_HW_PEGN0_CRB_AGT_ADR = 0x40, - NETXEN_HW_PEGN1_CRB_AGT_ADR, - NETXEN_HW_PEGN2_CRB_AGT_ADR, - NETXEN_HW_PEGN3_CRB_AGT_ADR, - NETXEN_HW_PEGNI_CRB_AGT_ADR, - NETXEN_HW_PEGND_CRB_AGT_ADR, - NETXEN_HW_PEGNC_CRB_AGT_ADR, - NETXEN_HW_PEGR0_CRB_AGT_ADR, - NETXEN_HW_PEGR1_CRB_AGT_ADR, - NETXEN_HW_PEGR2_CRB_AGT_ADR, - NETXEN_HW_PEGR3_CRB_AGT_ADR, - NETXEN_HW_PEGN4_CRB_AGT_ADR -}; - -/* Hub 5 */ -enum { - NETXEN_HW_PEGS0_CRB_AGT_ADR = 0x40, - NETXEN_HW_PEGS1_CRB_AGT_ADR, - NETXEN_HW_PEGS2_CRB_AGT_ADR, - NETXEN_HW_PEGS3_CRB_AGT_ADR, - NETXEN_HW_PEGSI_CRB_AGT_ADR, - NETXEN_HW_PEGSD_CRB_AGT_ADR, - NETXEN_HW_PEGSC_CRB_AGT_ADR -}; - -/* Hub 6 */ -enum { - NETXEN_HW_CAS0_CRB_AGT_ADR = 0x46, - NETXEN_HW_CAS1_CRB_AGT_ADR = 0x47, - NETXEN_HW_CAS2_CRB_AGT_ADR = 0x48, - NETXEN_HW_CAS3_CRB_AGT_ADR = 0x49, - NETXEN_HW_NCM_CRB_AGT_ADR = 0x16, - NETXEN_HW_TMR_CRB_AGT_ADR = 0x17, - NETXEN_HW_XDMA_CRB_AGT_ADR = 0x05, - NETXEN_HW_OCM0_CRB_AGT_ADR = 0x06, - NETXEN_HW_OCM1_CRB_AGT_ADR = 0x07 -}; - -/* Floaters - non existent modules */ -#define NETXEN_HW_EFC_RPMX0_CRB_AGT_ADR 0x67 - -/* This field defines PCI/X adr [25:20] of agents on the CRB */ -enum { - NETXEN_HW_PX_MAP_CRB_PH = 0, - NETXEN_HW_PX_MAP_CRB_PS, - NETXEN_HW_PX_MAP_CRB_MN, - NETXEN_HW_PX_MAP_CRB_MS, - NETXEN_HW_PX_MAP_CRB_PGR1, - NETXEN_HW_PX_MAP_CRB_SRE, - NETXEN_HW_PX_MAP_CRB_NIU, - NETXEN_HW_PX_MAP_CRB_QMN, - NETXEN_HW_PX_MAP_CRB_SQN0, - NETXEN_HW_PX_MAP_CRB_SQN1, - NETXEN_HW_PX_MAP_CRB_SQN2, - NETXEN_HW_PX_MAP_CRB_SQN3, - NETXEN_HW_PX_MAP_CRB_QMS, - NETXEN_HW_PX_MAP_CRB_SQS0, - NETXEN_HW_PX_MAP_CRB_SQS1, - NETXEN_HW_PX_MAP_CRB_SQS2, - NETXEN_HW_PX_MAP_CRB_SQS3, - NETXEN_HW_PX_MAP_CRB_PGN0, - NETXEN_HW_PX_MAP_CRB_PGN1, - NETXEN_HW_PX_MAP_CRB_PGN2, - NETXEN_HW_PX_MAP_CRB_PGN3, - NETXEN_HW_PX_MAP_CRB_PGND, - NETXEN_HW_PX_MAP_CRB_PGNI, - NETXEN_HW_PX_MAP_CRB_PGS0, - NETXEN_HW_PX_MAP_CRB_PGS1, - NETXEN_HW_PX_MAP_CRB_PGS2, - NETXEN_HW_PX_MAP_CRB_PGS3, - NETXEN_HW_PX_MAP_CRB_PGSD, - NETXEN_HW_PX_MAP_CRB_PGSI, - NETXEN_HW_PX_MAP_CRB_SN, - NETXEN_HW_PX_MAP_CRB_PGR2, - NETXEN_HW_PX_MAP_CRB_EG, - NETXEN_HW_PX_MAP_CRB_PH2, - NETXEN_HW_PX_MAP_CRB_PS2, - NETXEN_HW_PX_MAP_CRB_CAM, - NETXEN_HW_PX_MAP_CRB_CAS0, - NETXEN_HW_PX_MAP_CRB_CAS1, - NETXEN_HW_PX_MAP_CRB_CAS2, - NETXEN_HW_PX_MAP_CRB_C2C0, - NETXEN_HW_PX_MAP_CRB_C2C1, - NETXEN_HW_PX_MAP_CRB_TIMR, - NETXEN_HW_PX_MAP_CRB_PGR3, - NETXEN_HW_PX_MAP_CRB_RPMX1, - NETXEN_HW_PX_MAP_CRB_RPMX2, - NETXEN_HW_PX_MAP_CRB_RPMX3, - NETXEN_HW_PX_MAP_CRB_RPMX4, - NETXEN_HW_PX_MAP_CRB_RPMX5, - NETXEN_HW_PX_MAP_CRB_RPMX6, - NETXEN_HW_PX_MAP_CRB_RPMX7, - NETXEN_HW_PX_MAP_CRB_XDMA, - NETXEN_HW_PX_MAP_CRB_I2Q, - NETXEN_HW_PX_MAP_CRB_ROMUSB, - NETXEN_HW_PX_MAP_CRB_CAS3, - NETXEN_HW_PX_MAP_CRB_RPMX0, - NETXEN_HW_PX_MAP_CRB_RPMX8, - NETXEN_HW_PX_MAP_CRB_RPMX9, - NETXEN_HW_PX_MAP_CRB_OCM0, - NETXEN_HW_PX_MAP_CRB_OCM1, - NETXEN_HW_PX_MAP_CRB_SMB, - NETXEN_HW_PX_MAP_CRB_I2C0, - NETXEN_HW_PX_MAP_CRB_I2C1, - NETXEN_HW_PX_MAP_CRB_LPC, - NETXEN_HW_PX_MAP_CRB_PGNC, - NETXEN_HW_PX_MAP_CRB_PGR0 -}; - -/* This field defines CRB adr [31:20] of the agents */ - -#define NETXEN_HW_CRB_HUB_AGT_ADR_MN \ - ((NETXEN_HW_H0_CH_HUB_ADR << 7) | NETXEN_HW_MN_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_PH \ - ((NETXEN_HW_H0_CH_HUB_ADR << 7) | NETXEN_HW_PH_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_MS \ - ((NETXEN_HW_H0_CH_HUB_ADR << 7) | NETXEN_HW_MS_CRB_AGT_ADR) - -#define NETXEN_HW_CRB_HUB_AGT_ADR_PS \ - ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_PS_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_SS \ - ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SS_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX3 \ - ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX3_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_QMS \ - ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_QMS_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_SQS0 \ - ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS0_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_SQS1 \ - ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS1_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_SQS2 \ - ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS2_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_SQS3 \ - ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS3_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_C2C0 \ - ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_C2C0_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_C2C1 \ - ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_C2C1_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX2 \ - ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX2_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX4 \ - ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX4_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX7 \ - ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX7_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX9 \ - ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX9_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_SMB \ - ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SMB_CRB_AGT_ADR) - -#define NETXEN_HW_CRB_HUB_AGT_ADR_NIU \ - ((NETXEN_HW_H2_CH_HUB_ADR << 7) | NETXEN_HW_NIU_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_I2C0 \ - ((NETXEN_HW_H2_CH_HUB_ADR << 7) | NETXEN_HW_I2C0_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_I2C1 \ - ((NETXEN_HW_H2_CH_HUB_ADR << 7) | NETXEN_HW_I2C1_CRB_AGT_ADR) - -#define NETXEN_HW_CRB_HUB_AGT_ADR_SRE \ - ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SRE_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_EG \ - ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_EG_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX0 \ - ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX0_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_QMN \ - ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_QM_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_SQN0 \ - ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG0_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_SQN1 \ - ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG1_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_SQN2 \ - ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG2_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_SQN3 \ - ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG3_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX1 \ - ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX1_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX5 \ - ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX5_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX6 \ - ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX6_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX8 \ - ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX8_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_CAS0 \ - ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS0_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_CAS1 \ - ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS1_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_CAS2 \ - ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS2_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_CAS3 \ - ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS3_CRB_AGT_ADR) - -#define NETXEN_HW_CRB_HUB_AGT_ADR_PGNI \ - ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGNI_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_PGND \ - ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGND_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN0 \ - ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN0_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN1 \ - ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN1_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN2 \ - ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN2_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN3 \ - ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN3_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN4 \ - ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN4_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_PGNC \ - ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGNC_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_PGR0 \ - ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR0_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_PGR1 \ - ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR1_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_PGR2 \ - ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR2_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_PGR3 \ - ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR3_CRB_AGT_ADR) - -#define NETXEN_HW_CRB_HUB_AGT_ADR_PGSI \ - ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGSI_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_PGSD \ - ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGSD_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_PGS0 \ - ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS0_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_PGS1 \ - ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS1_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_PGS2 \ - ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS2_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_PGS3 \ - ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS3_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_PGSC \ - ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGSC_CRB_AGT_ADR) - -#define NETXEN_HW_CRB_HUB_AGT_ADR_CAM \ - ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_NCM_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_TIMR \ - ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_TMR_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_XDMA \ - ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_XDMA_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_SN \ - ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_SN_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_I2Q \ - ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_I2Q_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_ROMUSB \ - ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_ROMUSB_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_OCM0 \ - ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_OCM0_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_OCM1 \ - ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_OCM1_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_LPC \ - ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_LPC_CRB_AGT_ADR) - -#define NETXEN_SRE_MISC (NETXEN_CRB_SRE + 0x0002c) -#define NETXEN_SRE_INT_STATUS (NETXEN_CRB_SRE + 0x00034) -#define NETXEN_SRE_PBI_ACTIVE_STATUS (NETXEN_CRB_SRE + 0x01014) -#define NETXEN_SRE_L1RE_CTL (NETXEN_CRB_SRE + 0x03000) -#define NETXEN_SRE_L2RE_CTL (NETXEN_CRB_SRE + 0x05000) -#define NETXEN_SRE_BUF_CTL (NETXEN_CRB_SRE + 0x01000) - -#define NETXEN_DMA_BASE(U) (NETXEN_CRB_PCIX_MD + 0x20000 + ((U)<<16)) -#define NETXEN_DMA_COMMAND(U) (NETXEN_DMA_BASE(U) + 0x00008) - -#define NETXEN_I2Q_CLR_PCI_HI (NETXEN_CRB_I2Q + 0x00034) - -#define PEG_NETWORK_BASE(N) (NETXEN_CRB_PEG_NET_0 + (((N)&3) << 20)) -#define CRB_REG_EX_PC 0x3c - -#define ROMUSB_GLB (NETXEN_CRB_ROMUSB + 0x00000) -#define ROMUSB_ROM (NETXEN_CRB_ROMUSB + 0x10000) - -#define NETXEN_ROMUSB_GLB_STATUS (ROMUSB_GLB + 0x0004) -#define NETXEN_ROMUSB_GLB_SW_RESET (ROMUSB_GLB + 0x0008) -#define NETXEN_ROMUSB_GLB_PAD_GPIO_I (ROMUSB_GLB + 0x000c) -#define NETXEN_ROMUSB_GLB_CAS_RST (ROMUSB_GLB + 0x0038) -#define NETXEN_ROMUSB_GLB_TEST_MUX_SEL (ROMUSB_GLB + 0x0044) -#define NETXEN_ROMUSB_GLB_PEGTUNE_DONE (ROMUSB_GLB + 0x005c) -#define NETXEN_ROMUSB_GLB_CHIP_CLK_CTRL (ROMUSB_GLB + 0x00A8) - -#define NETXEN_ROMUSB_GPIO(n) (ROMUSB_GLB + 0x60 + (4 * (n))) - -#define NETXEN_ROMUSB_ROM_INSTR_OPCODE (ROMUSB_ROM + 0x0004) -#define NETXEN_ROMUSB_ROM_ADDRESS (ROMUSB_ROM + 0x0008) -#define NETXEN_ROMUSB_ROM_WDATA (ROMUSB_ROM + 0x000c) -#define NETXEN_ROMUSB_ROM_ABYTE_CNT (ROMUSB_ROM + 0x0010) -#define NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT (ROMUSB_ROM + 0x0014) -#define NETXEN_ROMUSB_ROM_RDATA (ROMUSB_ROM + 0x0018) - -/* Lock IDs for ROM lock */ -#define ROM_LOCK_DRIVER 0x0d417340 - -/****************************************************************************** -* -* Definitions specific to M25P flash -* -******************************************************************************* -* Instructions -*/ -#define M25P_INSTR_WREN 0x06 -#define M25P_INSTR_WRDI 0x04 -#define M25P_INSTR_RDID 0x9f -#define M25P_INSTR_RDSR 0x05 -#define M25P_INSTR_WRSR 0x01 -#define M25P_INSTR_READ 0x03 -#define M25P_INSTR_FAST_READ 0x0b -#define M25P_INSTR_PP 0x02 -#define M25P_INSTR_SE 0xd8 -#define M25P_INSTR_BE 0xc7 -#define M25P_INSTR_DP 0xb9 -#define M25P_INSTR_RES 0xab - -/* all are 1MB windows */ - -#define NETXEN_PCI_CRB_WINDOWSIZE 0x00100000 -#define NETXEN_PCI_CRB_WINDOW(A) \ - (NETXEN_PCI_CRBSPACE + (A)*NETXEN_PCI_CRB_WINDOWSIZE) - -#define NETXEN_CRB_NIU NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_NIU) -#define NETXEN_CRB_SRE NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SRE) -#define NETXEN_CRB_ROMUSB \ - NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_ROMUSB) -#define NETXEN_CRB_I2Q NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_I2Q) -#define NETXEN_CRB_I2C0 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_I2C0) -#define NETXEN_CRB_SMB NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SMB) -#define NETXEN_CRB_MAX NETXEN_PCI_CRB_WINDOW(64) - -#define NETXEN_CRB_PCIX_HOST NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PH) -#define NETXEN_CRB_PCIX_HOST2 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PH2) -#define NETXEN_CRB_PEG_NET_0 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN0) -#define NETXEN_CRB_PEG_NET_1 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN1) -#define NETXEN_CRB_PEG_NET_2 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN2) -#define NETXEN_CRB_PEG_NET_3 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN3) -#define NETXEN_CRB_PEG_NET_4 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SQS2) -#define NETXEN_CRB_PEG_NET_D NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGND) -#define NETXEN_CRB_PEG_NET_I NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGNI) -#define NETXEN_CRB_DDR_NET NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_MN) -#define NETXEN_CRB_QDR_NET NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SN) - -#define NETXEN_CRB_PCIX_MD NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PS) -#define NETXEN_CRB_PCIE NETXEN_CRB_PCIX_MD - -#define ISR_INT_VECTOR (NETXEN_PCIX_PS_REG(PCIX_INT_VECTOR)) -#define ISR_INT_MASK (NETXEN_PCIX_PS_REG(PCIX_INT_MASK)) -#define ISR_INT_MASK_SLOW (NETXEN_PCIX_PS_REG(PCIX_INT_MASK)) -#define ISR_INT_TARGET_STATUS (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS)) -#define ISR_INT_TARGET_MASK (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK)) -#define ISR_INT_TARGET_STATUS_F1 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F1)) -#define ISR_INT_TARGET_MASK_F1 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F1)) -#define ISR_INT_TARGET_STATUS_F2 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F2)) -#define ISR_INT_TARGET_MASK_F2 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F2)) -#define ISR_INT_TARGET_STATUS_F3 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F3)) -#define ISR_INT_TARGET_MASK_F3 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F3)) -#define ISR_INT_TARGET_STATUS_F4 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F4)) -#define ISR_INT_TARGET_MASK_F4 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F4)) -#define ISR_INT_TARGET_STATUS_F5 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F5)) -#define ISR_INT_TARGET_MASK_F5 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F5)) -#define ISR_INT_TARGET_STATUS_F6 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F6)) -#define ISR_INT_TARGET_MASK_F6 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F6)) -#define ISR_INT_TARGET_STATUS_F7 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F7)) -#define ISR_INT_TARGET_MASK_F7 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F7)) - -#define NETXEN_PCI_MAPSIZE 128 -#define NETXEN_PCI_DDR_NET (0x00000000UL) -#define NETXEN_PCI_QDR_NET (0x04000000UL) -#define NETXEN_PCI_DIRECT_CRB (0x04400000UL) -#define NETXEN_PCI_CAMQM (0x04800000UL) -#define NETXEN_PCI_CAMQM_MAX (0x04ffffffUL) -#define NETXEN_PCI_OCM0 (0x05000000UL) -#define NETXEN_PCI_OCM0_MAX (0x050fffffUL) -#define NETXEN_PCI_OCM1 (0x05100000UL) -#define NETXEN_PCI_OCM1_MAX (0x051fffffUL) -#define NETXEN_PCI_CRBSPACE (0x06000000UL) -#define NETXEN_PCI_128MB_SIZE (0x08000000UL) -#define NETXEN_PCI_32MB_SIZE (0x02000000UL) -#define NETXEN_PCI_2MB_SIZE (0x00200000UL) - -#define NETXEN_PCI_MN_2M (0) -#define NETXEN_PCI_MS_2M (0x80000) -#define NETXEN_PCI_OCM0_2M (0x000c0000UL) -#define NETXEN_PCI_CAMQM_2M_BASE (0x000ff800UL) -#define NETXEN_PCI_CAMQM_2M_END (0x04800800UL) - -#define NETXEN_CRB_CAM NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_CAM) - -#define NETXEN_ADDR_DDR_NET (0x0000000000000000ULL) -#define NETXEN_ADDR_DDR_NET_MAX (0x000000000fffffffULL) -#define NETXEN_ADDR_OCM0 (0x0000000200000000ULL) -#define NETXEN_ADDR_OCM0_MAX (0x00000002000fffffULL) -#define NETXEN_ADDR_OCM1 (0x0000000200400000ULL) -#define NETXEN_ADDR_OCM1_MAX (0x00000002004fffffULL) -#define NETXEN_ADDR_QDR_NET (0x0000000300000000ULL) -#define NETXEN_ADDR_QDR_NET_MAX_P2 (0x00000003003fffffULL) -#define NETXEN_ADDR_QDR_NET_MAX_P3 (0x0000000303ffffffULL) - -/* - * Register offsets for MN - */ -#define NETXEN_MIU_CONTROL (0x000) -#define NETXEN_MIU_MN_CONTROL (NETXEN_CRB_DDR_NET+NETXEN_MIU_CONTROL) - - /* 200ms delay in each loop */ -#define NETXEN_NIU_PHY_WAITLEN 200000 - /* 10 seconds before we give up */ -#define NETXEN_NIU_PHY_WAITMAX 50 -#define NETXEN_NIU_MAX_GBE_PORTS 4 -#define NETXEN_NIU_MAX_XG_PORTS 2 - -#define NETXEN_NIU_MODE (NETXEN_CRB_NIU + 0x00000) - -#define NETXEN_NIU_XG_SINGLE_TERM (NETXEN_CRB_NIU + 0x00004) -#define NETXEN_NIU_XG_DRIVE_HI (NETXEN_CRB_NIU + 0x00008) -#define NETXEN_NIU_XG_DRIVE_LO (NETXEN_CRB_NIU + 0x0000c) -#define NETXEN_NIU_XG_DTX (NETXEN_CRB_NIU + 0x00010) -#define NETXEN_NIU_XG_DEQ (NETXEN_CRB_NIU + 0x00014) -#define NETXEN_NIU_XG_WORD_ALIGN (NETXEN_CRB_NIU + 0x00018) -#define NETXEN_NIU_XG_RESET (NETXEN_CRB_NIU + 0x0001c) -#define NETXEN_NIU_XG_POWER_DOWN (NETXEN_CRB_NIU + 0x00020) -#define NETXEN_NIU_XG_RESET_PLL (NETXEN_CRB_NIU + 0x00024) -#define NETXEN_NIU_XG_SERDES_LOOPBACK (NETXEN_CRB_NIU + 0x00028) -#define NETXEN_NIU_XG_DO_BYTE_ALIGN (NETXEN_CRB_NIU + 0x0002c) -#define NETXEN_NIU_XG_TX_ENABLE (NETXEN_CRB_NIU + 0x00030) -#define NETXEN_NIU_XG_RX_ENABLE (NETXEN_CRB_NIU + 0x00034) -#define NETXEN_NIU_XG_STATUS (NETXEN_CRB_NIU + 0x00038) -#define NETXEN_NIU_XG_PAUSE_THRESHOLD (NETXEN_CRB_NIU + 0x0003c) -#define NETXEN_NIU_INT_MASK (NETXEN_CRB_NIU + 0x00040) -#define NETXEN_NIU_ACTIVE_INT (NETXEN_CRB_NIU + 0x00044) -#define NETXEN_NIU_MASKABLE_INT (NETXEN_CRB_NIU + 0x00048) - -#define NETXEN_NIU_STRAP_VALUE_SAVE_HIGHER (NETXEN_CRB_NIU + 0x0004c) - -#define NETXEN_NIU_GB_SERDES_RESET (NETXEN_CRB_NIU + 0x00050) -#define NETXEN_NIU_GB0_GMII_MODE (NETXEN_CRB_NIU + 0x00054) -#define NETXEN_NIU_GB0_MII_MODE (NETXEN_CRB_NIU + 0x00058) -#define NETXEN_NIU_GB1_GMII_MODE (NETXEN_CRB_NIU + 0x0005c) -#define NETXEN_NIU_GB1_MII_MODE (NETXEN_CRB_NIU + 0x00060) -#define NETXEN_NIU_GB2_GMII_MODE (NETXEN_CRB_NIU + 0x00064) -#define NETXEN_NIU_GB2_MII_MODE (NETXEN_CRB_NIU + 0x00068) -#define NETXEN_NIU_GB3_GMII_MODE (NETXEN_CRB_NIU + 0x0006c) -#define NETXEN_NIU_GB3_MII_MODE (NETXEN_CRB_NIU + 0x00070) -#define NETXEN_NIU_REMOTE_LOOPBACK (NETXEN_CRB_NIU + 0x00074) -#define NETXEN_NIU_GB0_HALF_DUPLEX (NETXEN_CRB_NIU + 0x00078) -#define NETXEN_NIU_GB1_HALF_DUPLEX (NETXEN_CRB_NIU + 0x0007c) -#define NETXEN_NIU_RESET_SYS_FIFOS (NETXEN_CRB_NIU + 0x00088) -#define NETXEN_NIU_GB_CRC_DROP (NETXEN_CRB_NIU + 0x0008c) -#define NETXEN_NIU_GB_DROP_WRONGADDR (NETXEN_CRB_NIU + 0x00090) -#define NETXEN_NIU_TEST_MUX_CTL (NETXEN_CRB_NIU + 0x00094) -#define NETXEN_NIU_XG_PAUSE_CTL (NETXEN_CRB_NIU + 0x00098) -#define NETXEN_NIU_XG_PAUSE_LEVEL (NETXEN_CRB_NIU + 0x000dc) -#define NETXEN_NIU_FRAME_COUNT_SELECT (NETXEN_CRB_NIU + 0x000ac) -#define NETXEN_NIU_FRAME_COUNT (NETXEN_CRB_NIU + 0x000b0) -#define NETXEN_NIU_XG_SEL (NETXEN_CRB_NIU + 0x00128) -#define NETXEN_NIU_GB_PAUSE_CTL (NETXEN_CRB_NIU + 0x0030c) - -#define NETXEN_NIU_FULL_LEVEL_XG (NETXEN_CRB_NIU + 0x00450) - -#define NETXEN_NIU_XG1_RESET (NETXEN_CRB_NIU + 0x0011c) -#define NETXEN_NIU_XG1_POWER_DOWN (NETXEN_CRB_NIU + 0x00120) -#define NETXEN_NIU_XG1_RESET_PLL (NETXEN_CRB_NIU + 0x00124) - -#define NETXEN_MAC_ADDR_CNTL_REG (NETXEN_CRB_NIU + 0x1000) - -#define NETXEN_MULTICAST_ADDR_HI_0 (NETXEN_CRB_NIU + 0x1010) -#define NETXEN_MULTICAST_ADDR_HI_1 (NETXEN_CRB_NIU + 0x1014) -#define NETXEN_MULTICAST_ADDR_HI_2 (NETXEN_CRB_NIU + 0x1018) -#define NETXEN_MULTICAST_ADDR_HI_3 (NETXEN_CRB_NIU + 0x101c) - -#define NETXEN_UNICAST_ADDR_BASE (NETXEN_CRB_NIU + 0x1080) -#define NETXEN_MULTICAST_ADDR_BASE (NETXEN_CRB_NIU + 0x1100) - -#define NETXEN_NIU_GB_MAC_CONFIG_0(I) \ - (NETXEN_CRB_NIU + 0x30000 + (I)*0x10000) -#define NETXEN_NIU_GB_MAC_CONFIG_1(I) \ - (NETXEN_CRB_NIU + 0x30004 + (I)*0x10000) -#define NETXEN_NIU_GB_MAC_IPG_IFG(I) \ - (NETXEN_CRB_NIU + 0x30008 + (I)*0x10000) -#define NETXEN_NIU_GB_HALF_DUPLEX_CTRL(I) \ - (NETXEN_CRB_NIU + 0x3000c + (I)*0x10000) -#define NETXEN_NIU_GB_MAX_FRAME_SIZE(I) \ - (NETXEN_CRB_NIU + 0x30010 + (I)*0x10000) -#define NETXEN_NIU_GB_TEST_REG(I) \ - (NETXEN_CRB_NIU + 0x3001c + (I)*0x10000) -#define NETXEN_NIU_GB_MII_MGMT_CONFIG(I) \ - (NETXEN_CRB_NIU + 0x30020 + (I)*0x10000) -#define NETXEN_NIU_GB_MII_MGMT_COMMAND(I) \ - (NETXEN_CRB_NIU + 0x30024 + (I)*0x10000) -#define NETXEN_NIU_GB_MII_MGMT_ADDR(I) \ - (NETXEN_CRB_NIU + 0x30028 + (I)*0x10000) -#define NETXEN_NIU_GB_MII_MGMT_CTRL(I) \ - (NETXEN_CRB_NIU + 0x3002c + (I)*0x10000) -#define NETXEN_NIU_GB_MII_MGMT_STATUS(I) \ - (NETXEN_CRB_NIU + 0x30030 + (I)*0x10000) -#define NETXEN_NIU_GB_MII_MGMT_INDICATE(I) \ - (NETXEN_CRB_NIU + 0x30034 + (I)*0x10000) -#define NETXEN_NIU_GB_INTERFACE_CTRL(I) \ - (NETXEN_CRB_NIU + 0x30038 + (I)*0x10000) -#define NETXEN_NIU_GB_INTERFACE_STATUS(I) \ - (NETXEN_CRB_NIU + 0x3003c + (I)*0x10000) -#define NETXEN_NIU_GB_STATION_ADDR_0(I) \ - (NETXEN_CRB_NIU + 0x30040 + (I)*0x10000) -#define NETXEN_NIU_GB_STATION_ADDR_1(I) \ - (NETXEN_CRB_NIU + 0x30044 + (I)*0x10000) - -#define NETXEN_NIU_XGE_CONFIG_0 (NETXEN_CRB_NIU + 0x70000) -#define NETXEN_NIU_XGE_CONFIG_1 (NETXEN_CRB_NIU + 0x70004) -#define NETXEN_NIU_XGE_IPG (NETXEN_CRB_NIU + 0x70008) -#define NETXEN_NIU_XGE_STATION_ADDR_0_HI (NETXEN_CRB_NIU + 0x7000c) -#define NETXEN_NIU_XGE_STATION_ADDR_0_1 (NETXEN_CRB_NIU + 0x70010) -#define NETXEN_NIU_XGE_STATION_ADDR_1_LO (NETXEN_CRB_NIU + 0x70014) -#define NETXEN_NIU_XGE_STATUS (NETXEN_CRB_NIU + 0x70018) -#define NETXEN_NIU_XGE_MAX_FRAME_SIZE (NETXEN_CRB_NIU + 0x7001c) -#define NETXEN_NIU_XGE_PAUSE_FRAME_VALUE (NETXEN_CRB_NIU + 0x70020) -#define NETXEN_NIU_XGE_TX_BYTE_CNT (NETXEN_CRB_NIU + 0x70024) -#define NETXEN_NIU_XGE_TX_FRAME_CNT (NETXEN_CRB_NIU + 0x70028) -#define NETXEN_NIU_XGE_RX_BYTE_CNT (NETXEN_CRB_NIU + 0x7002c) -#define NETXEN_NIU_XGE_RX_FRAME_CNT (NETXEN_CRB_NIU + 0x70030) -#define NETXEN_NIU_XGE_AGGR_ERROR_CNT (NETXEN_CRB_NIU + 0x70034) -#define NETXEN_NIU_XGE_MULTICAST_FRAME_CNT (NETXEN_CRB_NIU + 0x70038) -#define NETXEN_NIU_XGE_UNICAST_FRAME_CNT (NETXEN_CRB_NIU + 0x7003c) -#define NETXEN_NIU_XGE_CRC_ERROR_CNT (NETXEN_CRB_NIU + 0x70040) -#define NETXEN_NIU_XGE_OVERSIZE_FRAME_ERR (NETXEN_CRB_NIU + 0x70044) -#define NETXEN_NIU_XGE_UNDERSIZE_FRAME_ERR (NETXEN_CRB_NIU + 0x70048) -#define NETXEN_NIU_XGE_LOCAL_ERROR_CNT (NETXEN_CRB_NIU + 0x7004c) -#define NETXEN_NIU_XGE_REMOTE_ERROR_CNT (NETXEN_CRB_NIU + 0x70050) -#define NETXEN_NIU_XGE_CONTROL_CHAR_CNT (NETXEN_CRB_NIU + 0x70054) -#define NETXEN_NIU_XGE_PAUSE_FRAME_CNT (NETXEN_CRB_NIU + 0x70058) -#define NETXEN_NIU_XG1_CONFIG_0 (NETXEN_CRB_NIU + 0x80000) -#define NETXEN_NIU_XG1_CONFIG_1 (NETXEN_CRB_NIU + 0x80004) -#define NETXEN_NIU_XG1_IPG (NETXEN_CRB_NIU + 0x80008) -#define NETXEN_NIU_XG1_STATION_ADDR_0_HI (NETXEN_CRB_NIU + 0x8000c) -#define NETXEN_NIU_XG1_STATION_ADDR_0_1 (NETXEN_CRB_NIU + 0x80010) -#define NETXEN_NIU_XG1_STATION_ADDR_1_LO (NETXEN_CRB_NIU + 0x80014) -#define NETXEN_NIU_XG1_STATUS (NETXEN_CRB_NIU + 0x80018) -#define NETXEN_NIU_XG1_MAX_FRAME_SIZE (NETXEN_CRB_NIU + 0x8001c) -#define NETXEN_NIU_XG1_PAUSE_FRAME_VALUE (NETXEN_CRB_NIU + 0x80020) -#define NETXEN_NIU_XG1_TX_BYTE_CNT (NETXEN_CRB_NIU + 0x80024) -#define NETXEN_NIU_XG1_TX_FRAME_CNT (NETXEN_CRB_NIU + 0x80028) -#define NETXEN_NIU_XG1_RX_BYTE_CNT (NETXEN_CRB_NIU + 0x8002c) -#define NETXEN_NIU_XG1_RX_FRAME_CNT (NETXEN_CRB_NIU + 0x80030) -#define NETXEN_NIU_XG1_AGGR_ERROR_CNT (NETXEN_CRB_NIU + 0x80034) -#define NETXEN_NIU_XG1_MULTICAST_FRAME_CNT (NETXEN_CRB_NIU + 0x80038) -#define NETXEN_NIU_XG1_UNICAST_FRAME_CNT (NETXEN_CRB_NIU + 0x8003c) -#define NETXEN_NIU_XG1_CRC_ERROR_CNT (NETXEN_CRB_NIU + 0x80040) -#define NETXEN_NIU_XG1_OVERSIZE_FRAME_ERR (NETXEN_CRB_NIU + 0x80044) -#define NETXEN_NIU_XG1_UNDERSIZE_FRAME_ERR (NETXEN_CRB_NIU + 0x80048) -#define NETXEN_NIU_XG1_LOCAL_ERROR_CNT (NETXEN_CRB_NIU + 0x8004c) -#define NETXEN_NIU_XG1_REMOTE_ERROR_CNT (NETXEN_CRB_NIU + 0x80050) -#define NETXEN_NIU_XG1_CONTROL_CHAR_CNT (NETXEN_CRB_NIU + 0x80054) -#define NETXEN_NIU_XG1_PAUSE_FRAME_CNT (NETXEN_CRB_NIU + 0x80058) - -/* P3 802.3ap */ -#define NETXEN_NIU_AP_MAC_CONFIG_0(I) (NETXEN_CRB_NIU+0xa0000+(I)*0x10000) -#define NETXEN_NIU_AP_MAC_CONFIG_1(I) (NETXEN_CRB_NIU+0xa0004+(I)*0x10000) -#define NETXEN_NIU_AP_MAC_IPG_IFG(I) (NETXEN_CRB_NIU+0xa0008+(I)*0x10000) -#define NETXEN_NIU_AP_HALF_DUPLEX_CTRL(I) (NETXEN_CRB_NIU+0xa000c+(I)*0x10000) -#define NETXEN_NIU_AP_MAX_FRAME_SIZE(I) (NETXEN_CRB_NIU+0xa0010+(I)*0x10000) -#define NETXEN_NIU_AP_TEST_REG(I) (NETXEN_CRB_NIU+0xa001c+(I)*0x10000) -#define NETXEN_NIU_AP_MII_MGMT_CONFIG(I) (NETXEN_CRB_NIU+0xa0020+(I)*0x10000) -#define NETXEN_NIU_AP_MII_MGMT_COMMAND(I) (NETXEN_CRB_NIU+0xa0024+(I)*0x10000) -#define NETXEN_NIU_AP_MII_MGMT_ADDR(I) (NETXEN_CRB_NIU+0xa0028+(I)*0x10000) -#define NETXEN_NIU_AP_MII_MGMT_CTRL(I) (NETXEN_CRB_NIU+0xa002c+(I)*0x10000) -#define NETXEN_NIU_AP_MII_MGMT_STATUS(I) (NETXEN_CRB_NIU+0xa0030+(I)*0x10000) -#define NETXEN_NIU_AP_MII_MGMT_INDICATE(I) (NETXEN_CRB_NIU+0xa0034+(I)*0x10000) -#define NETXEN_NIU_AP_INTERFACE_CTRL(I) (NETXEN_CRB_NIU+0xa0038+(I)*0x10000) -#define NETXEN_NIU_AP_INTERFACE_STATUS(I) (NETXEN_CRB_NIU+0xa003c+(I)*0x10000) -#define NETXEN_NIU_AP_STATION_ADDR_0(I) (NETXEN_CRB_NIU+0xa0040+(I)*0x10000) -#define NETXEN_NIU_AP_STATION_ADDR_1(I) (NETXEN_CRB_NIU+0xa0044+(I)*0x10000) - - -#define TEST_AGT_CTRL (0x00) - -#define TA_CTL_START 1 -#define TA_CTL_ENABLE 2 -#define TA_CTL_WRITE 4 -#define TA_CTL_BUSY 8 - -/* - * Register offsets for MN - */ -#define MIU_TEST_AGT_BASE (0x90) - -#define MIU_TEST_AGT_ADDR_LO (0x04) -#define MIU_TEST_AGT_ADDR_HI (0x08) -#define MIU_TEST_AGT_WRDATA_LO (0x10) -#define MIU_TEST_AGT_WRDATA_HI (0x14) -#define MIU_TEST_AGT_RDDATA_LO (0x18) -#define MIU_TEST_AGT_RDDATA_HI (0x1c) - -#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8 -#define MIU_TEST_AGT_UPPER_ADDR(off) (0) - -/* - * Register offsets for MS - */ -#define SIU_TEST_AGT_BASE (0x60) - -#define SIU_TEST_AGT_ADDR_LO (0x04) -#define SIU_TEST_AGT_ADDR_HI (0x18) -#define SIU_TEST_AGT_WRDATA_LO (0x08) -#define SIU_TEST_AGT_WRDATA_HI (0x0c) -#define SIU_TEST_AGT_WRDATA(i) (0x08+(4*(i))) -#define SIU_TEST_AGT_RDDATA_LO (0x10) -#define SIU_TEST_AGT_RDDATA_HI (0x14) -#define SIU_TEST_AGT_RDDATA(i) (0x10+(4*(i))) - -#define SIU_TEST_AGT_ADDR_MASK 0x3ffff8 -#define SIU_TEST_AGT_UPPER_ADDR(off) ((off)>>22) - -/* XG Link status */ -#define XG_LINK_UP 0x10 -#define XG_LINK_DOWN 0x20 - -#define XG_LINK_UP_P3 0x01 -#define XG_LINK_DOWN_P3 0x02 -#define XG_LINK_STATE_P3_MASK 0xf -#define XG_LINK_STATE_P3(pcifn,val) \ - (((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3_MASK) - -#define P3_LINK_SPEED_MHZ 100 -#define P3_LINK_SPEED_MASK 0xff -#define P3_LINK_SPEED_REG(pcifn) \ - (CRB_PF_LINK_SPEED_1 + (((pcifn) / 4) * 4)) -#define P3_LINK_SPEED_VAL(pcifn, reg) \ - (((reg) >> (8 * ((pcifn) & 0x3))) & P3_LINK_SPEED_MASK) - -#define NETXEN_CAM_RAM_BASE (NETXEN_CRB_CAM + 0x02000) -#define NETXEN_CAM_RAM(reg) (NETXEN_CAM_RAM_BASE + (reg)) -#define NETXEN_FW_VERSION_MAJOR (NETXEN_CAM_RAM(0x150)) -#define NETXEN_FW_VERSION_MINOR (NETXEN_CAM_RAM(0x154)) -#define NETXEN_FW_VERSION_SUB (NETXEN_CAM_RAM(0x158)) -#define NETXEN_ROM_LOCK_ID (NETXEN_CAM_RAM(0x100)) -#define NETXEN_PHY_LOCK_ID (NETXEN_CAM_RAM(0x120)) -#define NETXEN_CRB_WIN_LOCK_ID (NETXEN_CAM_RAM(0x124)) - -#define NIC_CRB_BASE (NETXEN_CAM_RAM(0x200)) -#define NIC_CRB_BASE_2 (NETXEN_CAM_RAM(0x700)) -#define NETXEN_NIC_REG(X) (NIC_CRB_BASE+(X)) -#define NETXEN_NIC_REG_2(X) (NIC_CRB_BASE_2+(X)) - -#define NX_CDRP_CRB_OFFSET (NETXEN_NIC_REG(0x18)) -#define NX_ARG1_CRB_OFFSET (NETXEN_NIC_REG(0x1c)) -#define NX_ARG2_CRB_OFFSET (NETXEN_NIC_REG(0x20)) -#define NX_ARG3_CRB_OFFSET (NETXEN_NIC_REG(0x24)) -#define NX_SIGN_CRB_OFFSET (NETXEN_NIC_REG(0x28)) - -#define CRB_HOST_DUMMY_BUF_ADDR_HI (NETXEN_NIC_REG(0x3c)) -#define CRB_HOST_DUMMY_BUF_ADDR_LO (NETXEN_NIC_REG(0x40)) - -#define CRB_CMDPEG_STATE (NETXEN_NIC_REG(0x50)) -#define CRB_RCVPEG_STATE (NETXEN_NIC_REG(0x13c)) - -#define CRB_XG_STATE (NETXEN_NIC_REG(0x94)) -#define CRB_XG_STATE_P3 (NETXEN_NIC_REG(0x98)) -#define CRB_PF_LINK_SPEED_1 (NETXEN_NIC_REG(0xe8)) -#define CRB_PF_LINK_SPEED_2 (NETXEN_NIC_REG(0xec)) - -#define CRB_MPORT_MODE (NETXEN_NIC_REG(0xc4)) -#define CRB_DMA_SHIFT (NETXEN_NIC_REG(0xcc)) -#define CRB_INT_VECTOR (NETXEN_NIC_REG(0xd4)) - -#define CRB_CMD_PRODUCER_OFFSET (NETXEN_NIC_REG(0x08)) -#define CRB_CMD_CONSUMER_OFFSET (NETXEN_NIC_REG(0x0c)) -#define CRB_CMD_PRODUCER_OFFSET_1 (NETXEN_NIC_REG(0x1ac)) -#define CRB_CMD_CONSUMER_OFFSET_1 (NETXEN_NIC_REG(0x1b0)) -#define CRB_CMD_PRODUCER_OFFSET_2 (NETXEN_NIC_REG(0x1b8)) -#define CRB_CMD_CONSUMER_OFFSET_2 (NETXEN_NIC_REG(0x1bc)) -#define CRB_CMD_PRODUCER_OFFSET_3 (NETXEN_NIC_REG(0x1d0)) -#define CRB_CMD_CONSUMER_OFFSET_3 (NETXEN_NIC_REG(0x1d4)) -#define CRB_TEMP_STATE (NETXEN_NIC_REG(0x1b4)) - -#define CRB_V2P_0 (NETXEN_NIC_REG(0x290)) -#define CRB_V2P(port) (CRB_V2P_0+((port)*4)) -#define CRB_DRIVER_VERSION (NETXEN_NIC_REG(0x2a0)) - -#define CRB_SW_INT_MASK_0 (NETXEN_NIC_REG(0x1d8)) -#define CRB_SW_INT_MASK_1 (NETXEN_NIC_REG(0x1e0)) -#define CRB_SW_INT_MASK_2 (NETXEN_NIC_REG(0x1e4)) -#define CRB_SW_INT_MASK_3 (NETXEN_NIC_REG(0x1e8)) - -#define CRB_FW_CAPABILITIES_1 (NETXEN_CAM_RAM(0x128)) -#define CRB_MAC_BLOCK_START (NETXEN_CAM_RAM(0x1c0)) - -/* - * capabilities register, can be used to selectively enable/disable features - * for backward compatibility - */ -#define CRB_NIC_CAPABILITIES_HOST NETXEN_NIC_REG(0x1a8) -#define CRB_NIC_MSI_MODE_HOST NETXEN_NIC_REG(0x270) - -#define INTR_SCHEME_PERPORT 0x1 -#define MSI_MODE_MULTIFUNC 0x1 - -/* used for ethtool tests */ -#define CRB_SCRATCHPAD_TEST NETXEN_NIC_REG(0x280) - -/* - * CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address - * which can be read by the Phantom host to get producer/consumer indexes from - * Phantom/Casper. If it is not HOST_SHARED_MEMORY, then the following - * registers will be used for the addresses of the ring's shared memory - * on the Phantom. - */ - -#define nx_get_temp_val(x) ((x) >> 16) -#define nx_get_temp_state(x) ((x) & 0xffff) -#define nx_encode_temp(val, state) (((val) << 16) | (state)) - -/* - * Temperature control. - */ -enum { - NX_TEMP_NORMAL = 0x1, /* Normal operating range */ - NX_TEMP_WARN, /* Sound alert, temperature getting high */ - NX_TEMP_PANIC /* Fatal error, hardware has shut down. */ -}; - -/* Lock IDs for PHY lock */ -#define PHY_LOCK_DRIVER 0x44524956 - -/* Used for PS PCI Memory access */ -#define PCIX_PS_OP_ADDR_LO (0x10000) -/* via CRB (PS side only) */ -#define PCIX_PS_OP_ADDR_HI (0x10004) - -#define PCIX_INT_VECTOR (0x10100) -#define PCIX_INT_MASK (0x10104) - -#define PCIX_CRB_WINDOW (0x10210) -#define PCIX_CRB_WINDOW_F0 (0x10210) -#define PCIX_CRB_WINDOW_F1 (0x10230) -#define PCIX_CRB_WINDOW_F2 (0x10250) -#define PCIX_CRB_WINDOW_F3 (0x10270) -#define PCIX_CRB_WINDOW_F4 (0x102ac) -#define PCIX_CRB_WINDOW_F5 (0x102bc) -#define PCIX_CRB_WINDOW_F6 (0x102cc) -#define PCIX_CRB_WINDOW_F7 (0x102dc) -#define PCIE_CRB_WINDOW_REG(func) (((func) < 4) ? \ - (PCIX_CRB_WINDOW_F0 + (0x20 * (func))) :\ - (PCIX_CRB_WINDOW_F4 + (0x10 * ((func)-4)))) - -#define PCIX_MN_WINDOW (0x10200) -#define PCIX_MN_WINDOW_F0 (0x10200) -#define PCIX_MN_WINDOW_F1 (0x10220) -#define PCIX_MN_WINDOW_F2 (0x10240) -#define PCIX_MN_WINDOW_F3 (0x10260) -#define PCIX_MN_WINDOW_F4 (0x102a0) -#define PCIX_MN_WINDOW_F5 (0x102b0) -#define PCIX_MN_WINDOW_F6 (0x102c0) -#define PCIX_MN_WINDOW_F7 (0x102d0) -#define PCIE_MN_WINDOW_REG(func) (((func) < 4) ? \ - (PCIX_MN_WINDOW_F0 + (0x20 * (func))) :\ - (PCIX_MN_WINDOW_F4 + (0x10 * ((func)-4)))) - -#define PCIX_SN_WINDOW (0x10208) -#define PCIX_SN_WINDOW_F0 (0x10208) -#define PCIX_SN_WINDOW_F1 (0x10228) -#define PCIX_SN_WINDOW_F2 (0x10248) -#define PCIX_SN_WINDOW_F3 (0x10268) -#define PCIX_SN_WINDOW_F4 (0x102a8) -#define PCIX_SN_WINDOW_F5 (0x102b8) -#define PCIX_SN_WINDOW_F6 (0x102c8) -#define PCIX_SN_WINDOW_F7 (0x102d8) -#define PCIE_SN_WINDOW_REG(func) (((func) < 4) ? \ - (PCIX_SN_WINDOW_F0 + (0x20 * (func))) :\ - (PCIX_SN_WINDOW_F4 + (0x10 * ((func)-4)))) - -#define PCIX_OCM_WINDOW (0x10800) -#define PCIX_OCM_WINDOW_REG(func) (PCIX_OCM_WINDOW + 0x20 * (func)) - -#define PCIX_TARGET_STATUS (0x10118) -#define PCIX_TARGET_STATUS_F1 (0x10160) -#define PCIX_TARGET_STATUS_F2 (0x10164) -#define PCIX_TARGET_STATUS_F3 (0x10168) -#define PCIX_TARGET_STATUS_F4 (0x10360) -#define PCIX_TARGET_STATUS_F5 (0x10364) -#define PCIX_TARGET_STATUS_F6 (0x10368) -#define PCIX_TARGET_STATUS_F7 (0x1036c) - -#define PCIX_TARGET_MASK (0x10128) -#define PCIX_TARGET_MASK_F1 (0x10170) -#define PCIX_TARGET_MASK_F2 (0x10174) -#define PCIX_TARGET_MASK_F3 (0x10178) -#define PCIX_TARGET_MASK_F4 (0x10370) -#define PCIX_TARGET_MASK_F5 (0x10374) -#define PCIX_TARGET_MASK_F6 (0x10378) -#define PCIX_TARGET_MASK_F7 (0x1037c) - -#define PCIX_MSI_F0 (0x13000) -#define PCIX_MSI_F1 (0x13004) -#define PCIX_MSI_F2 (0x13008) -#define PCIX_MSI_F3 (0x1300c) -#define PCIX_MSI_F4 (0x13010) -#define PCIX_MSI_F5 (0x13014) -#define PCIX_MSI_F6 (0x13018) -#define PCIX_MSI_F7 (0x1301c) -#define PCIX_MSI_F(i) (0x13000+((i)*4)) - -#define PCIX_PS_MEM_SPACE (0x90000) - -#define NETXEN_PCIX_PH_REG(reg) (NETXEN_CRB_PCIE + (reg)) -#define NETXEN_PCIX_PS_REG(reg) (NETXEN_CRB_PCIX_MD + (reg)) - -#define NETXEN_PCIE_REG(reg) (NETXEN_CRB_PCIE + (reg)) - -#define PCIE_MAX_DMA_XFER_SIZE (0x1404c) - -#define PCIE_DCR 0x00d8 - -#define PCIE_SEM0_LOCK (0x1c000) -#define PCIE_SEM0_UNLOCK (0x1c004) -#define PCIE_SEM1_LOCK (0x1c008) -#define PCIE_SEM1_UNLOCK (0x1c00c) -#define PCIE_SEM2_LOCK (0x1c010) /* Flash lock */ -#define PCIE_SEM2_UNLOCK (0x1c014) /* Flash unlock */ -#define PCIE_SEM3_LOCK (0x1c018) /* Phy lock */ -#define PCIE_SEM3_UNLOCK (0x1c01c) /* Phy unlock */ -#define PCIE_SEM4_LOCK (0x1c020) -#define PCIE_SEM4_UNLOCK (0x1c024) -#define PCIE_SEM5_LOCK (0x1c028) /* API lock */ -#define PCIE_SEM5_UNLOCK (0x1c02c) /* API unlock */ -#define PCIE_SEM6_LOCK (0x1c030) /* sw lock */ -#define PCIE_SEM6_UNLOCK (0x1c034) /* sw unlock */ -#define PCIE_SEM7_LOCK (0x1c038) /* crb win lock */ -#define PCIE_SEM7_UNLOCK (0x1c03c) /* crbwin unlock*/ -#define PCIE_SEM_LOCK(N) (PCIE_SEM0_LOCK + 8*(N)) -#define PCIE_SEM_UNLOCK(N) (PCIE_SEM0_UNLOCK + 8*(N)) - -#define PCIE_SETUP_FUNCTION (0x12040) -#define PCIE_SETUP_FUNCTION2 (0x12048) -#define PCIE_MISCCFG_RC (0x1206c) -#define PCIE_TGT_SPLIT_CHICKEN (0x12080) -#define PCIE_CHICKEN3 (0x120c8) - -#define ISR_INT_STATE_REG (NETXEN_PCIX_PS_REG(PCIE_MISCCFG_RC)) -#define PCIE_MAX_MASTER_SPLIT (0x14048) - -#define NETXEN_PORT_MODE_NONE 0 -#define NETXEN_PORT_MODE_XG 1 -#define NETXEN_PORT_MODE_GB 2 -#define NETXEN_PORT_MODE_802_3_AP 3 -#define NETXEN_PORT_MODE_AUTO_NEG 4 -#define NETXEN_PORT_MODE_AUTO_NEG_1G 5 -#define NETXEN_PORT_MODE_AUTO_NEG_XG 6 -#define NETXEN_PORT_MODE_ADDR (NETXEN_CAM_RAM(0x24)) -#define NETXEN_WOL_PORT_MODE (NETXEN_CAM_RAM(0x198)) - -#define NETXEN_WOL_CONFIG_NV (NETXEN_CAM_RAM(0x184)) -#define NETXEN_WOL_CONFIG (NETXEN_CAM_RAM(0x188)) - -#define NX_PEG_TUNE_MN_PRESENT 0x1 -#define NX_PEG_TUNE_CAPABILITY (NETXEN_CAM_RAM(0x02c)) - -#define NETXEN_DMA_WATCHDOG_CTRL (NETXEN_CAM_RAM(0x14)) -#define NETXEN_PEG_ALIVE_COUNTER (NETXEN_CAM_RAM(0xb0)) -#define NETXEN_PEG_HALT_STATUS1 (NETXEN_CAM_RAM(0xa8)) -#define NETXEN_PEG_HALT_STATUS2 (NETXEN_CAM_RAM(0xac)) -#define NX_CRB_DEV_REF_COUNT (NETXEN_CAM_RAM(0x138)) -#define NX_CRB_DEV_STATE (NETXEN_CAM_RAM(0x140)) - -/* Device State */ -#define NX_DEV_COLD 1 -#define NX_DEV_INITALIZING 2 -#define NX_DEV_READY 3 -#define NX_DEV_NEED_RESET 4 -#define NX_DEV_NEED_QUISCENT 5 -#define NX_DEV_NEED_AER 6 -#define NX_DEV_FAILED 7 - -#define NX_RCODE_DRIVER_INFO 0x20000000 -#define NX_RCODE_DRIVER_CAN_RELOAD 0x40000000 -#define NX_RCODE_FATAL_ERROR 0x80000000 -#define NX_FWERROR_PEGNUM(code) ((code) & 0xff) -#define NX_FWERROR_CODE(code) ((code >> 8) & 0xfffff) - -#define FW_POLL_DELAY (2 * HZ) -#define FW_FAIL_THRESH 3 -#define FW_POLL_THRESH 10 - -#define ISR_MSI_INT_TRIGGER(FUNC) (NETXEN_PCIX_PS_REG(PCIX_MSI_F(FUNC))) -#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200) - -/* - * PCI Interrupt Vector Values. - */ -#define PCIX_INT_VECTOR_BIT_F0 0x0080 -#define PCIX_INT_VECTOR_BIT_F1 0x0100 -#define PCIX_INT_VECTOR_BIT_F2 0x0200 -#define PCIX_INT_VECTOR_BIT_F3 0x0400 -#define PCIX_INT_VECTOR_BIT_F4 0x0800 -#define PCIX_INT_VECTOR_BIT_F5 0x1000 -#define PCIX_INT_VECTOR_BIT_F6 0x2000 -#define PCIX_INT_VECTOR_BIT_F7 0x4000 - -struct netxen_legacy_intr_set { - uint32_t int_vec_bit; - uint32_t tgt_status_reg; - uint32_t tgt_mask_reg; - uint32_t pci_int_reg; -}; - -#define NX_LEGACY_INTR_CONFIG \ -{ \ - { \ - .int_vec_bit = PCIX_INT_VECTOR_BIT_F0, \ - .tgt_status_reg = ISR_INT_TARGET_STATUS, \ - .tgt_mask_reg = ISR_INT_TARGET_MASK, \ - .pci_int_reg = ISR_MSI_INT_TRIGGER(0) }, \ - \ - { \ - .int_vec_bit = PCIX_INT_VECTOR_BIT_F1, \ - .tgt_status_reg = ISR_INT_TARGET_STATUS_F1, \ - .tgt_mask_reg = ISR_INT_TARGET_MASK_F1, \ - .pci_int_reg = ISR_MSI_INT_TRIGGER(1) }, \ - \ - { \ - .int_vec_bit = PCIX_INT_VECTOR_BIT_F2, \ - .tgt_status_reg = ISR_INT_TARGET_STATUS_F2, \ - .tgt_mask_reg = ISR_INT_TARGET_MASK_F2, \ - .pci_int_reg = ISR_MSI_INT_TRIGGER(2) }, \ - \ - { \ - .int_vec_bit = PCIX_INT_VECTOR_BIT_F3, \ - .tgt_status_reg = ISR_INT_TARGET_STATUS_F3, \ - .tgt_mask_reg = ISR_INT_TARGET_MASK_F3, \ - .pci_int_reg = ISR_MSI_INT_TRIGGER(3) }, \ - \ - { \ - .int_vec_bit = PCIX_INT_VECTOR_BIT_F4, \ - .tgt_status_reg = ISR_INT_TARGET_STATUS_F4, \ - .tgt_mask_reg = ISR_INT_TARGET_MASK_F4, \ - .pci_int_reg = ISR_MSI_INT_TRIGGER(4) }, \ - \ - { \ - .int_vec_bit = PCIX_INT_VECTOR_BIT_F5, \ - .tgt_status_reg = ISR_INT_TARGET_STATUS_F5, \ - .tgt_mask_reg = ISR_INT_TARGET_MASK_F5, \ - .pci_int_reg = ISR_MSI_INT_TRIGGER(5) }, \ - \ - { \ - .int_vec_bit = PCIX_INT_VECTOR_BIT_F6, \ - .tgt_status_reg = ISR_INT_TARGET_STATUS_F6, \ - .tgt_mask_reg = ISR_INT_TARGET_MASK_F6, \ - .pci_int_reg = ISR_MSI_INT_TRIGGER(6) }, \ - \ - { \ - .int_vec_bit = PCIX_INT_VECTOR_BIT_F7, \ - .tgt_status_reg = ISR_INT_TARGET_STATUS_F7, \ - .tgt_mask_reg = ISR_INT_TARGET_MASK_F7, \ - .pci_int_reg = ISR_MSI_INT_TRIGGER(7) }, \ -} - -#endif /* __NETXEN_NIC_HDR_H_ */ diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c deleted file mode 100644 index 3f89e57cae50..000000000000 --- a/drivers/net/netxen/netxen_nic_hw.c +++ /dev/null @@ -1,1976 +0,0 @@ -/* - * Copyright (C) 2003 - 2009 NetXen, Inc. - * Copyright (C) 2009 - QLogic Corporation. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, - * MA 02111-1307, USA. - * - * The full GNU General Public License is included in this distribution - * in the file called "COPYING". - * - */ - -#include -#include "netxen_nic.h" -#include "netxen_nic_hw.h" - -#include - -#define MASK(n) ((1ULL<<(n))-1) -#define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | ((addr >> 25) & 0x3ff)) -#define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | ((addr >> 25) & 0x3ff)) -#define MS_WIN(addr) (addr & 0x0ffc0000) - -#define GET_MEM_OFFS_2M(addr) (addr & MASK(18)) - -#define CRB_BLK(off) ((off >> 20) & 0x3f) -#define CRB_SUBBLK(off) ((off >> 16) & 0xf) -#define CRB_WINDOW_2M (0x130060) -#define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000)) -#define CRB_INDIRECT_2M (0x1e0000UL) - -static void netxen_nic_io_write_128M(struct netxen_adapter *adapter, - void __iomem *addr, u32 data); -static u32 netxen_nic_io_read_128M(struct netxen_adapter *adapter, - void __iomem *addr); - -#ifndef readq -static inline u64 readq(void __iomem *addr) -{ - return readl(addr) | (((u64) readl(addr + 4)) << 32LL); -} -#endif - -#ifndef writeq -static inline void writeq(u64 val, void __iomem *addr) -{ - writel(((u32) (val)), (addr)); - writel(((u32) (val >> 32)), (addr + 4)); -} -#endif - -#define PCI_OFFSET_FIRST_RANGE(adapter, off) \ - ((adapter)->ahw.pci_base0 + (off)) -#define PCI_OFFSET_SECOND_RANGE(adapter, off) \ - ((adapter)->ahw.pci_base1 + (off) - SECOND_PAGE_GROUP_START) -#define PCI_OFFSET_THIRD_RANGE(adapter, off) \ - ((adapter)->ahw.pci_base2 + (off) - THIRD_PAGE_GROUP_START) - -static void __iomem *pci_base_offset(struct netxen_adapter *adapter, - unsigned long off) -{ - if (ADDR_IN_RANGE(off, FIRST_PAGE_GROUP_START, FIRST_PAGE_GROUP_END)) - return PCI_OFFSET_FIRST_RANGE(adapter, off); - - if (ADDR_IN_RANGE(off, SECOND_PAGE_GROUP_START, SECOND_PAGE_GROUP_END)) - return PCI_OFFSET_SECOND_RANGE(adapter, off); - - if (ADDR_IN_RANGE(off, THIRD_PAGE_GROUP_START, THIRD_PAGE_GROUP_END)) - return PCI_OFFSET_THIRD_RANGE(adapter, off); - - return NULL; -} - -static crb_128M_2M_block_map_t -crb_128M_2M_map[64] __cacheline_aligned_in_smp = { - {{{0, 0, 0, 0} } }, /* 0: PCI */ - {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */ - {1, 0x0110000, 0x0120000, 0x130000}, - {1, 0x0120000, 0x0122000, 0x124000}, - {1, 0x0130000, 0x0132000, 0x126000}, - {1, 0x0140000, 0x0142000, 0x128000}, - {1, 0x0150000, 0x0152000, 0x12a000}, - {1, 0x0160000, 0x0170000, 0x110000}, - {1, 0x0170000, 0x0172000, 0x12e000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {1, 0x01e0000, 0x01e0800, 0x122000}, - {0, 0x0000000, 0x0000000, 0x000000} } }, - {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */ - {{{0, 0, 0, 0} } }, /* 3: */ - {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */ - {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE */ - {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU */ - {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM */ - {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */ - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {1, 0x08f0000, 0x08f2000, 0x172000} } }, - {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1*/ - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {1, 0x09f0000, 0x09f2000, 0x176000} } }, - {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2*/ - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {1, 0x0af0000, 0x0af2000, 0x17a000} } }, - {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3*/ - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {1, 0x0bf0000, 0x0bf2000, 0x17e000} } }, - {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */ - {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */ - {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */ - {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */ - {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */ - {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */ - {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */ - {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */ - {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */ - {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */ - {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */ - {{{0, 0, 0, 0} } }, /* 23: */ - {{{0, 0, 0, 0} } }, /* 24: */ - {{{0, 0, 0, 0} } }, /* 25: */ - {{{0, 0, 0, 0} } }, /* 26: */ - {{{0, 0, 0, 0} } }, /* 27: */ - {{{0, 0, 0, 0} } }, /* 28: */ - {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */ - {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */ - {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */ - {{{0} } }, /* 32: PCI */ - {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */ - {1, 0x2110000, 0x2120000, 0x130000}, - {1, 0x2120000, 0x2122000, 0x124000}, - {1, 0x2130000, 0x2132000, 0x126000}, - {1, 0x2140000, 0x2142000, 0x128000}, - {1, 0x2150000, 0x2152000, 0x12a000}, - {1, 0x2160000, 0x2170000, 0x110000}, - {1, 0x2170000, 0x2172000, 0x12e000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000} } }, - {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */ - {{{0} } }, /* 35: */ - {{{0} } }, /* 36: */ - {{{0} } }, /* 37: */ - {{{0} } }, /* 38: */ - {{{0} } }, /* 39: */ - {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */ - {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */ - {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */ - {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */ - {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */ - {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */ - {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */ - {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */ - {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */ - {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */ - {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */ - {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */ - {{{0} } }, /* 52: */ - {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */ - {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */ - {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */ - {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */ - {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */ - {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */ - {{{0} } }, /* 59: I2C0 */ - {{{0} } }, /* 60: I2C1 */ - {{{1, 0x3d00000, 0x3d04000, 0x1d8000} } },/* 61: LPC */ - {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */ - {{{1, 0x3f00000, 0x3f01000, 0x168000} } } /* 63: P2NR0 */ -}; - -/* - * top 12 bits of crb internal address (hub, agent) - */ -static unsigned crb_hub_agt[64] = -{ - 0, - NETXEN_HW_CRB_HUB_AGT_ADR_PS, - NETXEN_HW_CRB_HUB_AGT_ADR_MN, - NETXEN_HW_CRB_HUB_AGT_ADR_MS, - 0, - NETXEN_HW_CRB_HUB_AGT_ADR_SRE, - NETXEN_HW_CRB_HUB_AGT_ADR_NIU, - NETXEN_HW_CRB_HUB_AGT_ADR_QMN, - NETXEN_HW_CRB_HUB_AGT_ADR_SQN0, - NETXEN_HW_CRB_HUB_AGT_ADR_SQN1, - NETXEN_HW_CRB_HUB_AGT_ADR_SQN2, - NETXEN_HW_CRB_HUB_AGT_ADR_SQN3, - NETXEN_HW_CRB_HUB_AGT_ADR_I2Q, - NETXEN_HW_CRB_HUB_AGT_ADR_TIMR, - NETXEN_HW_CRB_HUB_AGT_ADR_ROMUSB, - NETXEN_HW_CRB_HUB_AGT_ADR_PGN4, - NETXEN_HW_CRB_HUB_AGT_ADR_XDMA, - NETXEN_HW_CRB_HUB_AGT_ADR_PGN0, - NETXEN_HW_CRB_HUB_AGT_ADR_PGN1, - NETXEN_HW_CRB_HUB_AGT_ADR_PGN2, - NETXEN_HW_CRB_HUB_AGT_ADR_PGN3, - NETXEN_HW_CRB_HUB_AGT_ADR_PGND, - NETXEN_HW_CRB_HUB_AGT_ADR_PGNI, - NETXEN_HW_CRB_HUB_AGT_ADR_PGS0, - NETXEN_HW_CRB_HUB_AGT_ADR_PGS1, - NETXEN_HW_CRB_HUB_AGT_ADR_PGS2, - NETXEN_HW_CRB_HUB_AGT_ADR_PGS3, - 0, - NETXEN_HW_CRB_HUB_AGT_ADR_PGSI, - NETXEN_HW_CRB_HUB_AGT_ADR_SN, - 0, - NETXEN_HW_CRB_HUB_AGT_ADR_EG, - 0, - NETXEN_HW_CRB_HUB_AGT_ADR_PS, - NETXEN_HW_CRB_HUB_AGT_ADR_CAM, - 0, - 0, - 0, - 0, - 0, - NETXEN_HW_CRB_HUB_AGT_ADR_TIMR, - 0, - NETXEN_HW_CRB_HUB_AGT_ADR_RPMX1, - NETXEN_HW_CRB_HUB_AGT_ADR_RPMX2, - NETXEN_HW_CRB_HUB_AGT_ADR_RPMX3, - NETXEN_HW_CRB_HUB_AGT_ADR_RPMX4, - NETXEN_HW_CRB_HUB_AGT_ADR_RPMX5, - NETXEN_HW_CRB_HUB_AGT_ADR_RPMX6, - NETXEN_HW_CRB_HUB_AGT_ADR_RPMX7, - NETXEN_HW_CRB_HUB_AGT_ADR_XDMA, - NETXEN_HW_CRB_HUB_AGT_ADR_I2Q, - NETXEN_HW_CRB_HUB_AGT_ADR_ROMUSB, - 0, - NETXEN_HW_CRB_HUB_AGT_ADR_RPMX0, - NETXEN_HW_CRB_HUB_AGT_ADR_RPMX8, - NETXEN_HW_CRB_HUB_AGT_ADR_RPMX9, - NETXEN_HW_CRB_HUB_AGT_ADR_OCM0, - 0, - NETXEN_HW_CRB_HUB_AGT_ADR_SMB, - NETXEN_HW_CRB_HUB_AGT_ADR_I2C0, - NETXEN_HW_CRB_HUB_AGT_ADR_I2C1, - 0, - NETXEN_HW_CRB_HUB_AGT_ADR_PGNC, - 0, -}; - -/* PCI Windowing for DDR regions. */ - -#define NETXEN_WINDOW_ONE 0x2000000 /*CRB Window: bit 25 of CRB address */ - -#define NETXEN_PCIE_SEM_TIMEOUT 10000 - -static int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu); - -int -netxen_pcie_sem_lock(struct netxen_adapter *adapter, int sem, u32 id_reg) -{ - int done = 0, timeout = 0; - - while (!done) { - done = NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM_LOCK(sem))); - if (done == 1) - break; - if (++timeout >= NETXEN_PCIE_SEM_TIMEOUT) - return -EIO; - msleep(1); - } - - if (id_reg) - NXWR32(adapter, id_reg, adapter->portnum); - - return 0; -} - -void -netxen_pcie_sem_unlock(struct netxen_adapter *adapter, int sem) -{ - NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM_UNLOCK(sem))); -} - -static int netxen_niu_xg_init_port(struct netxen_adapter *adapter, int port) -{ - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { - NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_1+(0x10000*port), 0x1447); - NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0+(0x10000*port), 0x5); - } - - return 0; -} - -/* Disable an XG interface */ -static int netxen_niu_disable_xg_port(struct netxen_adapter *adapter) -{ - __u32 mac_cfg; - u32 port = adapter->physical_port; - - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) - return 0; - - if (port > NETXEN_NIU_MAX_XG_PORTS) - return -EINVAL; - - mac_cfg = 0; - if (NXWR32(adapter, - NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), mac_cfg)) - return -EIO; - return 0; -} - -#define NETXEN_UNICAST_ADDR(port, index) \ - (NETXEN_UNICAST_ADDR_BASE+(port*32)+(index*8)) -#define NETXEN_MCAST_ADDR(port, index) \ - (NETXEN_MULTICAST_ADDR_BASE+(port*0x80)+(index*8)) -#define MAC_HI(addr) \ - ((addr[2] << 16) | (addr[1] << 8) | (addr[0])) -#define MAC_LO(addr) \ - ((addr[5] << 16) | (addr[4] << 8) | (addr[3])) - -static int netxen_p2_nic_set_promisc(struct netxen_adapter *adapter, u32 mode) -{ - u32 mac_cfg; - u32 cnt = 0; - __u32 reg = 0x0200; - u32 port = adapter->physical_port; - u16 board_type = adapter->ahw.board_type; - - if (port > NETXEN_NIU_MAX_XG_PORTS) - return -EINVAL; - - mac_cfg = NXRD32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port)); - mac_cfg &= ~0x4; - NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), mac_cfg); - - if ((board_type == NETXEN_BRDTYPE_P2_SB31_10G_IMEZ) || - (board_type == NETXEN_BRDTYPE_P2_SB31_10G_HMEZ)) - reg = (0x20 << port); - - NXWR32(adapter, NETXEN_NIU_FRAME_COUNT_SELECT, reg); - - mdelay(10); - - while (NXRD32(adapter, NETXEN_NIU_FRAME_COUNT) && ++cnt < 20) - mdelay(10); - - if (cnt < 20) { - - reg = NXRD32(adapter, - NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port)); - - if (mode == NETXEN_NIU_PROMISC_MODE) - reg = (reg | 0x2000UL); - else - reg = (reg & ~0x2000UL); - - if (mode == NETXEN_NIU_ALLMULTI_MODE) - reg = (reg | 0x1000UL); - else - reg = (reg & ~0x1000UL); - - NXWR32(adapter, - NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port), reg); - } - - mac_cfg |= 0x4; - NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), mac_cfg); - - return 0; -} - -static int netxen_p2_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr) -{ - u32 mac_hi, mac_lo; - u32 reg_hi, reg_lo; - - u8 phy = adapter->physical_port; - - if (phy >= NETXEN_NIU_MAX_XG_PORTS) - return -EINVAL; - - mac_lo = ((u32)addr[0] << 16) | ((u32)addr[1] << 24); - mac_hi = addr[2] | ((u32)addr[3] << 8) | - ((u32)addr[4] << 16) | ((u32)addr[5] << 24); - - reg_lo = NETXEN_NIU_XGE_STATION_ADDR_0_1 + (0x10000 * phy); - reg_hi = NETXEN_NIU_XGE_STATION_ADDR_0_HI + (0x10000 * phy); - - /* write twice to flush */ - if (NXWR32(adapter, reg_lo, mac_lo) || NXWR32(adapter, reg_hi, mac_hi)) - return -EIO; - if (NXWR32(adapter, reg_lo, mac_lo) || NXWR32(adapter, reg_hi, mac_hi)) - return -EIO; - - return 0; -} - -static int -netxen_nic_enable_mcast_filter(struct netxen_adapter *adapter) -{ - u32 val = 0; - u16 port = adapter->physical_port; - u8 *addr = adapter->mac_addr; - - if (adapter->mc_enabled) - return 0; - - val = NXRD32(adapter, NETXEN_MAC_ADDR_CNTL_REG); - val |= (1UL << (28+port)); - NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val); - - /* add broadcast addr to filter */ - val = 0xffffff; - NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0), val); - NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0)+4, val); - - /* add station addr to filter */ - val = MAC_HI(addr); - NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1), val); - val = MAC_LO(addr); - NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1)+4, val); - - adapter->mc_enabled = 1; - return 0; -} - -static int -netxen_nic_disable_mcast_filter(struct netxen_adapter *adapter) -{ - u32 val = 0; - u16 port = adapter->physical_port; - u8 *addr = adapter->mac_addr; - - if (!adapter->mc_enabled) - return 0; - - val = NXRD32(adapter, NETXEN_MAC_ADDR_CNTL_REG); - val &= ~(1UL << (28+port)); - NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val); - - val = MAC_HI(addr); - NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0), val); - val = MAC_LO(addr); - NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0)+4, val); - - NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1), 0); - NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1)+4, 0); - - adapter->mc_enabled = 0; - return 0; -} - -static int -netxen_nic_set_mcast_addr(struct netxen_adapter *adapter, - int index, u8 *addr) -{ - u32 hi = 0, lo = 0; - u16 port = adapter->physical_port; - - lo = MAC_LO(addr); - hi = MAC_HI(addr); - - NXWR32(adapter, NETXEN_MCAST_ADDR(port, index), hi); - NXWR32(adapter, NETXEN_MCAST_ADDR(port, index)+4, lo); - - return 0; -} - -static void netxen_p2_nic_set_multi(struct net_device *netdev) -{ - struct netxen_adapter *adapter = netdev_priv(netdev); - struct netdev_hw_addr *ha; - u8 null_addr[6]; - int i; - - memset(null_addr, 0, 6); - - if (netdev->flags & IFF_PROMISC) { - - adapter->set_promisc(adapter, - NETXEN_NIU_PROMISC_MODE); - - /* Full promiscuous mode */ - netxen_nic_disable_mcast_filter(adapter); - - return; - } - - if (netdev_mc_empty(netdev)) { - adapter->set_promisc(adapter, - NETXEN_NIU_NON_PROMISC_MODE); - netxen_nic_disable_mcast_filter(adapter); - return; - } - - adapter->set_promisc(adapter, NETXEN_NIU_ALLMULTI_MODE); - if (netdev->flags & IFF_ALLMULTI || - netdev_mc_count(netdev) > adapter->max_mc_count) { - netxen_nic_disable_mcast_filter(adapter); - return; - } - - netxen_nic_enable_mcast_filter(adapter); - - i = 0; - netdev_for_each_mc_addr(ha, netdev) - netxen_nic_set_mcast_addr(adapter, i++, ha->addr); - - /* Clear out remaining addresses */ - while (i < adapter->max_mc_count) - netxen_nic_set_mcast_addr(adapter, i++, null_addr); -} - -static int -netxen_send_cmd_descs(struct netxen_adapter *adapter, - struct cmd_desc_type0 *cmd_desc_arr, int nr_desc) -{ - u32 i, producer, consumer; - struct netxen_cmd_buffer *pbuf; - struct cmd_desc_type0 *cmd_desc; - struct nx_host_tx_ring *tx_ring; - - i = 0; - - if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) - return -EIO; - - tx_ring = adapter->tx_ring; - __netif_tx_lock_bh(tx_ring->txq); - - producer = tx_ring->producer; - consumer = tx_ring->sw_consumer; - - if (nr_desc >= netxen_tx_avail(tx_ring)) { - netif_tx_stop_queue(tx_ring->txq); - smp_mb(); - if (netxen_tx_avail(tx_ring) > nr_desc) { - if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH) - netif_tx_wake_queue(tx_ring->txq); - } else { - __netif_tx_unlock_bh(tx_ring->txq); - return -EBUSY; - } - } - - do { - cmd_desc = &cmd_desc_arr[i]; - - pbuf = &tx_ring->cmd_buf_arr[producer]; - pbuf->skb = NULL; - pbuf->frag_count = 0; - - memcpy(&tx_ring->desc_head[producer], - &cmd_desc_arr[i], sizeof(struct cmd_desc_type0)); - - producer = get_next_index(producer, tx_ring->num_desc); - i++; - - } while (i != nr_desc); - - tx_ring->producer = producer; - - netxen_nic_update_cmd_producer(adapter, tx_ring); - - __netif_tx_unlock_bh(tx_ring->txq); - - return 0; -} - -static int -nx_p3_sre_macaddr_change(struct netxen_adapter *adapter, u8 *addr, unsigned op) -{ - nx_nic_req_t req; - nx_mac_req_t *mac_req; - u64 word; - - memset(&req, 0, sizeof(nx_nic_req_t)); - req.qhdr = cpu_to_le64(NX_NIC_REQUEST << 23); - - word = NX_MAC_EVENT | ((u64)adapter->portnum << 16); - req.req_hdr = cpu_to_le64(word); - - mac_req = (nx_mac_req_t *)&req.words[0]; - mac_req->op = op; - memcpy(mac_req->mac_addr, addr, 6); - - return netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); -} - -static int nx_p3_nic_add_mac(struct netxen_adapter *adapter, - const u8 *addr, struct list_head *del_list) -{ - struct list_head *head; - nx_mac_list_t *cur; - - /* look up if already exists */ - list_for_each(head, del_list) { - cur = list_entry(head, nx_mac_list_t, list); - - if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0) { - list_move_tail(head, &adapter->mac_list); - return 0; - } - } - - cur = kzalloc(sizeof(nx_mac_list_t), GFP_ATOMIC); - if (cur == NULL) { - printk(KERN_ERR "%s: failed to add mac address filter\n", - adapter->netdev->name); - return -ENOMEM; - } - memcpy(cur->mac_addr, addr, ETH_ALEN); - list_add_tail(&cur->list, &adapter->mac_list); - return nx_p3_sre_macaddr_change(adapter, - cur->mac_addr, NETXEN_MAC_ADD); -} - -static void netxen_p3_nic_set_multi(struct net_device *netdev) -{ - struct netxen_adapter *adapter = netdev_priv(netdev); - struct netdev_hw_addr *ha; - static const u8 bcast_addr[ETH_ALEN] = { - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff - }; - u32 mode = VPORT_MISS_MODE_DROP; - LIST_HEAD(del_list); - struct list_head *head; - nx_mac_list_t *cur; - - if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) - return; - - list_splice_tail_init(&adapter->mac_list, &del_list); - - nx_p3_nic_add_mac(adapter, adapter->mac_addr, &del_list); - nx_p3_nic_add_mac(adapter, bcast_addr, &del_list); - - if (netdev->flags & IFF_PROMISC) { - mode = VPORT_MISS_MODE_ACCEPT_ALL; - goto send_fw_cmd; - } - - if ((netdev->flags & IFF_ALLMULTI) || - (netdev_mc_count(netdev) > adapter->max_mc_count)) { - mode = VPORT_MISS_MODE_ACCEPT_MULTI; - goto send_fw_cmd; - } - - if (!netdev_mc_empty(netdev)) { - netdev_for_each_mc_addr(ha, netdev) - nx_p3_nic_add_mac(adapter, ha->addr, &del_list); - } - -send_fw_cmd: - adapter->set_promisc(adapter, mode); - head = &del_list; - while (!list_empty(head)) { - cur = list_entry(head->next, nx_mac_list_t, list); - - nx_p3_sre_macaddr_change(adapter, - cur->mac_addr, NETXEN_MAC_DEL); - list_del(&cur->list); - kfree(cur); - } -} - -static int netxen_p3_nic_set_promisc(struct netxen_adapter *adapter, u32 mode) -{ - nx_nic_req_t req; - u64 word; - - memset(&req, 0, sizeof(nx_nic_req_t)); - - req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); - - word = NX_NIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE | - ((u64)adapter->portnum << 16); - req.req_hdr = cpu_to_le64(word); - - req.words[0] = cpu_to_le64(mode); - - return netxen_send_cmd_descs(adapter, - (struct cmd_desc_type0 *)&req, 1); -} - -void netxen_p3_free_mac_list(struct netxen_adapter *adapter) -{ - nx_mac_list_t *cur; - struct list_head *head = &adapter->mac_list; - - while (!list_empty(head)) { - cur = list_entry(head->next, nx_mac_list_t, list); - nx_p3_sre_macaddr_change(adapter, - cur->mac_addr, NETXEN_MAC_DEL); - list_del(&cur->list); - kfree(cur); - } -} - -static int netxen_p3_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr) -{ - /* assuming caller has already copied new addr to netdev */ - netxen_p3_nic_set_multi(adapter->netdev); - return 0; -} - -#define NETXEN_CONFIG_INTR_COALESCE 3 - -/* - * Send the interrupt coalescing parameter set by ethtool to the card. - */ -int netxen_config_intr_coalesce(struct netxen_adapter *adapter) -{ - nx_nic_req_t req; - u64 word[6]; - int rv, i; - - memset(&req, 0, sizeof(nx_nic_req_t)); - memset(word, 0, sizeof(word)); - - req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); - - word[0] = NETXEN_CONFIG_INTR_COALESCE | ((u64)adapter->portnum << 16); - req.req_hdr = cpu_to_le64(word[0]); - - memcpy(&word[0], &adapter->coal, sizeof(adapter->coal)); - for (i = 0; i < 6; i++) - req.words[i] = cpu_to_le64(word[i]); - - rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); - if (rv != 0) { - printk(KERN_ERR "ERROR. Could not send " - "interrupt coalescing parameters\n"); - } - - return rv; -} - -int netxen_config_hw_lro(struct netxen_adapter *adapter, int enable) -{ - nx_nic_req_t req; - u64 word; - int rv = 0; - - if (!test_bit(__NX_FW_ATTACHED, &adapter->state)) - return 0; - - memset(&req, 0, sizeof(nx_nic_req_t)); - - req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); - - word = NX_NIC_H2C_OPCODE_CONFIG_HW_LRO | ((u64)adapter->portnum << 16); - req.req_hdr = cpu_to_le64(word); - - req.words[0] = cpu_to_le64(enable); - - rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); - if (rv != 0) { - printk(KERN_ERR "ERROR. Could not send " - "configure hw lro request\n"); - } - - return rv; -} - -int netxen_config_bridged_mode(struct netxen_adapter *adapter, int enable) -{ - nx_nic_req_t req; - u64 word; - int rv = 0; - - if (!!(adapter->flags & NETXEN_NIC_BRIDGE_ENABLED) == enable) - return rv; - - memset(&req, 0, sizeof(nx_nic_req_t)); - - req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); - - word = NX_NIC_H2C_OPCODE_CONFIG_BRIDGING | - ((u64)adapter->portnum << 16); - req.req_hdr = cpu_to_le64(word); - - req.words[0] = cpu_to_le64(enable); - - rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); - if (rv != 0) { - printk(KERN_ERR "ERROR. Could not send " - "configure bridge mode request\n"); - } - - adapter->flags ^= NETXEN_NIC_BRIDGE_ENABLED; - - return rv; -} - - -#define RSS_HASHTYPE_IP_TCP 0x3 - -int netxen_config_rss(struct netxen_adapter *adapter, int enable) -{ - nx_nic_req_t req; - u64 word; - int i, rv; - - static const u64 key[] = { - 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL, - 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL, - 0x255b0ec26d5a56daULL - }; - - - memset(&req, 0, sizeof(nx_nic_req_t)); - req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); - - word = NX_NIC_H2C_OPCODE_CONFIG_RSS | ((u64)adapter->portnum << 16); - req.req_hdr = cpu_to_le64(word); - - /* - * RSS request: - * bits 3-0: hash_method - * 5-4: hash_type_ipv4 - * 7-6: hash_type_ipv6 - * 8: enable - * 9: use indirection table - * 47-10: reserved - * 63-48: indirection table mask - */ - word = ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) | - ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) | - ((u64)(enable & 0x1) << 8) | - ((0x7ULL) << 48); - req.words[0] = cpu_to_le64(word); - for (i = 0; i < ARRAY_SIZE(key); i++) - req.words[i+1] = cpu_to_le64(key[i]); - - - rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); - if (rv != 0) { - printk(KERN_ERR "%s: could not configure RSS\n", - adapter->netdev->name); - } - - return rv; -} - -int netxen_config_ipaddr(struct netxen_adapter *adapter, u32 ip, int cmd) -{ - nx_nic_req_t req; - u64 word; - int rv; - - memset(&req, 0, sizeof(nx_nic_req_t)); - req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); - - word = NX_NIC_H2C_OPCODE_CONFIG_IPADDR | ((u64)adapter->portnum << 16); - req.req_hdr = cpu_to_le64(word); - - req.words[0] = cpu_to_le64(cmd); - req.words[1] = cpu_to_le64(ip); - - rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); - if (rv != 0) { - printk(KERN_ERR "%s: could not notify %s IP 0x%x reuqest\n", - adapter->netdev->name, - (cmd == NX_IP_UP) ? "Add" : "Remove", ip); - } - return rv; -} - -int netxen_linkevent_request(struct netxen_adapter *adapter, int enable) -{ - nx_nic_req_t req; - u64 word; - int rv; - - memset(&req, 0, sizeof(nx_nic_req_t)); - req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); - - word = NX_NIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16); - req.req_hdr = cpu_to_le64(word); - req.words[0] = cpu_to_le64(enable | (enable << 8)); - - rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); - if (rv != 0) { - printk(KERN_ERR "%s: could not configure link notification\n", - adapter->netdev->name); - } - - return rv; -} - -int netxen_send_lro_cleanup(struct netxen_adapter *adapter) -{ - nx_nic_req_t req; - u64 word; - int rv; - - if (!test_bit(__NX_FW_ATTACHED, &adapter->state)) - return 0; - - memset(&req, 0, sizeof(nx_nic_req_t)); - req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); - - word = NX_NIC_H2C_OPCODE_LRO_REQUEST | - ((u64)adapter->portnum << 16) | - ((u64)NX_NIC_LRO_REQUEST_CLEANUP << 56) ; - - req.req_hdr = cpu_to_le64(word); - - rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); - if (rv != 0) { - printk(KERN_ERR "%s: could not cleanup lro flows\n", - adapter->netdev->name); - } - return rv; -} - -/* - * netxen_nic_change_mtu - Change the Maximum Transfer Unit - * @returns 0 on success, negative on failure - */ - -#define MTU_FUDGE_FACTOR 100 - -int netxen_nic_change_mtu(struct net_device *netdev, int mtu) -{ - struct netxen_adapter *adapter = netdev_priv(netdev); - int max_mtu; - int rc = 0; - - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) - max_mtu = P3_MAX_MTU; - else - max_mtu = P2_MAX_MTU; - - if (mtu > max_mtu) { - printk(KERN_ERR "%s: mtu > %d bytes unsupported\n", - netdev->name, max_mtu); - return -EINVAL; - } - - if (adapter->set_mtu) - rc = adapter->set_mtu(adapter, mtu); - - if (!rc) - netdev->mtu = mtu; - - return rc; -} - -static int netxen_get_flash_block(struct netxen_adapter *adapter, int base, - int size, __le32 * buf) -{ - int i, v, addr; - __le32 *ptr32; - - addr = base; - ptr32 = buf; - for (i = 0; i < size / sizeof(u32); i++) { - if (netxen_rom_fast_read(adapter, addr, &v) == -1) - return -1; - *ptr32 = cpu_to_le32(v); - ptr32++; - addr += sizeof(u32); - } - if ((char *)buf + size > (char *)ptr32) { - __le32 local; - if (netxen_rom_fast_read(adapter, addr, &v) == -1) - return -1; - local = cpu_to_le32(v); - memcpy(ptr32, &local, (char *)buf + size - (char *)ptr32); - } - - return 0; -} - -int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac) -{ - __le32 *pmac = (__le32 *) mac; - u32 offset; - - offset = NX_FW_MAC_ADDR_OFFSET + (adapter->portnum * sizeof(u64)); - - if (netxen_get_flash_block(adapter, offset, sizeof(u64), pmac) == -1) - return -1; - - if (*mac == cpu_to_le64(~0ULL)) { - - offset = NX_OLD_MAC_ADDR_OFFSET + - (adapter->portnum * sizeof(u64)); - - if (netxen_get_flash_block(adapter, - offset, sizeof(u64), pmac) == -1) - return -1; - - if (*mac == cpu_to_le64(~0ULL)) - return -1; - } - return 0; -} - -int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac) -{ - uint32_t crbaddr, mac_hi, mac_lo; - int pci_func = adapter->ahw.pci_func; - - crbaddr = CRB_MAC_BLOCK_START + - (4 * ((pci_func/2) * 3)) + (4 * (pci_func & 1)); - - mac_lo = NXRD32(adapter, crbaddr); - mac_hi = NXRD32(adapter, crbaddr+4); - - if (pci_func & 1) - *mac = le64_to_cpu((mac_lo >> 16) | ((u64)mac_hi << 16)); - else - *mac = le64_to_cpu((u64)mac_lo | ((u64)mac_hi << 32)); - - return 0; -} - -/* - * Changes the CRB window to the specified window. - */ -static void -netxen_nic_pci_set_crbwindow_128M(struct netxen_adapter *adapter, - u32 window) -{ - void __iomem *offset; - int count = 10; - u8 func = adapter->ahw.pci_func; - - if (adapter->ahw.crb_win == window) - return; - - offset = PCI_OFFSET_SECOND_RANGE(adapter, - NETXEN_PCIX_PH_REG(PCIE_CRB_WINDOW_REG(func))); - - writel(window, offset); - do { - if (window == readl(offset)) - break; - - if (printk_ratelimit()) - dev_warn(&adapter->pdev->dev, - "failed to set CRB window to %d\n", - (window == NETXEN_WINDOW_ONE)); - udelay(1); - - } while (--count > 0); - - if (count > 0) - adapter->ahw.crb_win = window; -} - -/* - * Returns < 0 if off is not valid, - * 1 if window access is needed. 'off' is set to offset from - * CRB space in 128M pci map - * 0 if no window access is needed. 'off' is set to 2M addr - * In: 'off' is offset from base in 128M pci map - */ -static int -netxen_nic_pci_get_crb_addr_2M(struct netxen_adapter *adapter, - ulong off, void __iomem **addr) -{ - crb_128M_2M_sub_block_map_t *m; - - - if ((off >= NETXEN_CRB_MAX) || (off < NETXEN_PCI_CRBSPACE)) - return -EINVAL; - - off -= NETXEN_PCI_CRBSPACE; - - /* - * Try direct map - */ - m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)]; - - if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) { - *addr = adapter->ahw.pci_base0 + m->start_2M + - (off - m->start_128M); - return 0; - } - - /* - * Not in direct map, use crb window - */ - *addr = adapter->ahw.pci_base0 + CRB_INDIRECT_2M + - (off & MASK(16)); - return 1; -} - -/* - * In: 'off' is offset from CRB space in 128M pci map - * Out: 'off' is 2M pci map addr - * side effect: lock crb window - */ -static void -netxen_nic_pci_set_crbwindow_2M(struct netxen_adapter *adapter, ulong off) -{ - u32 window; - void __iomem *addr = adapter->ahw.pci_base0 + CRB_WINDOW_2M; - - off -= NETXEN_PCI_CRBSPACE; - - window = CRB_HI(off); - - writel(window, addr); - if (readl(addr) != window) { - if (printk_ratelimit()) - dev_warn(&adapter->pdev->dev, - "failed to set CRB window to %d off 0x%lx\n", - window, off); - } -} - -static void __iomem * -netxen_nic_map_indirect_address_128M(struct netxen_adapter *adapter, - ulong win_off, void __iomem **mem_ptr) -{ - ulong off = win_off; - void __iomem *addr; - resource_size_t mem_base; - - if (ADDR_IN_WINDOW1(win_off)) - off = NETXEN_CRB_NORMAL(win_off); - - addr = pci_base_offset(adapter, off); - if (addr) - return addr; - - if (adapter->ahw.pci_len0 == 0) - off -= NETXEN_PCI_CRBSPACE; - - mem_base = pci_resource_start(adapter->pdev, 0); - *mem_ptr = ioremap(mem_base + (off & PAGE_MASK), PAGE_SIZE); - if (*mem_ptr) - addr = *mem_ptr + (off & (PAGE_SIZE - 1)); - - return addr; -} - -static int -netxen_nic_hw_write_wx_128M(struct netxen_adapter *adapter, ulong off, u32 data) -{ - unsigned long flags; - void __iomem *addr, *mem_ptr = NULL; - - addr = netxen_nic_map_indirect_address_128M(adapter, off, &mem_ptr); - if (!addr) - return -EIO; - - if (ADDR_IN_WINDOW1(off)) { /* Window 1 */ - netxen_nic_io_write_128M(adapter, addr, data); - } else { /* Window 0 */ - write_lock_irqsave(&adapter->ahw.crb_lock, flags); - netxen_nic_pci_set_crbwindow_128M(adapter, 0); - writel(data, addr); - netxen_nic_pci_set_crbwindow_128M(adapter, - NETXEN_WINDOW_ONE); - write_unlock_irqrestore(&adapter->ahw.crb_lock, flags); - } - - if (mem_ptr) - iounmap(mem_ptr); - - return 0; -} - -static u32 -netxen_nic_hw_read_wx_128M(struct netxen_adapter *adapter, ulong off) -{ - unsigned long flags; - void __iomem *addr, *mem_ptr = NULL; - u32 data; - - addr = netxen_nic_map_indirect_address_128M(adapter, off, &mem_ptr); - if (!addr) - return -EIO; - - if (ADDR_IN_WINDOW1(off)) { /* Window 1 */ - data = netxen_nic_io_read_128M(adapter, addr); - } else { /* Window 0 */ - write_lock_irqsave(&adapter->ahw.crb_lock, flags); - netxen_nic_pci_set_crbwindow_128M(adapter, 0); - data = readl(addr); - netxen_nic_pci_set_crbwindow_128M(adapter, - NETXEN_WINDOW_ONE); - write_unlock_irqrestore(&adapter->ahw.crb_lock, flags); - } - - if (mem_ptr) - iounmap(mem_ptr); - - return data; -} - -static int -netxen_nic_hw_write_wx_2M(struct netxen_adapter *adapter, ulong off, u32 data) -{ - unsigned long flags; - int rv; - void __iomem *addr = NULL; - - rv = netxen_nic_pci_get_crb_addr_2M(adapter, off, &addr); - - if (rv == 0) { - writel(data, addr); - return 0; - } - - if (rv > 0) { - /* indirect access */ - write_lock_irqsave(&adapter->ahw.crb_lock, flags); - crb_win_lock(adapter); - netxen_nic_pci_set_crbwindow_2M(adapter, off); - writel(data, addr); - crb_win_unlock(adapter); - write_unlock_irqrestore(&adapter->ahw.crb_lock, flags); - return 0; - } - - dev_err(&adapter->pdev->dev, - "%s: invalid offset: 0x%016lx\n", __func__, off); - dump_stack(); - return -EIO; -} - -static u32 -netxen_nic_hw_read_wx_2M(struct netxen_adapter *adapter, ulong off) -{ - unsigned long flags; - int rv; - u32 data; - void __iomem *addr = NULL; - - rv = netxen_nic_pci_get_crb_addr_2M(adapter, off, &addr); - - if (rv == 0) - return readl(addr); - - if (rv > 0) { - /* indirect access */ - write_lock_irqsave(&adapter->ahw.crb_lock, flags); - crb_win_lock(adapter); - netxen_nic_pci_set_crbwindow_2M(adapter, off); - data = readl(addr); - crb_win_unlock(adapter); - write_unlock_irqrestore(&adapter->ahw.crb_lock, flags); - return data; - } - - dev_err(&adapter->pdev->dev, - "%s: invalid offset: 0x%016lx\n", __func__, off); - dump_stack(); - return -1; -} - -/* window 1 registers only */ -static void netxen_nic_io_write_128M(struct netxen_adapter *adapter, - void __iomem *addr, u32 data) -{ - read_lock(&adapter->ahw.crb_lock); - writel(data, addr); - read_unlock(&adapter->ahw.crb_lock); -} - -static u32 netxen_nic_io_read_128M(struct netxen_adapter *adapter, - void __iomem *addr) -{ - u32 val; - - read_lock(&adapter->ahw.crb_lock); - val = readl(addr); - read_unlock(&adapter->ahw.crb_lock); - - return val; -} - -static void netxen_nic_io_write_2M(struct netxen_adapter *adapter, - void __iomem *addr, u32 data) -{ - writel(data, addr); -} - -static u32 netxen_nic_io_read_2M(struct netxen_adapter *adapter, - void __iomem *addr) -{ - return readl(addr); -} - -void __iomem * -netxen_get_ioaddr(struct netxen_adapter *adapter, u32 offset) -{ - void __iomem *addr = NULL; - - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { - if ((offset < NETXEN_CRB_PCIX_HOST2) && - (offset > NETXEN_CRB_PCIX_HOST)) - addr = PCI_OFFSET_SECOND_RANGE(adapter, offset); - else - addr = NETXEN_CRB_NORMALIZE(adapter, offset); - } else { - WARN_ON(netxen_nic_pci_get_crb_addr_2M(adapter, - offset, &addr)); - } - - return addr; -} - -static int -netxen_nic_pci_set_window_128M(struct netxen_adapter *adapter, - u64 addr, u32 *start) -{ - if (ADDR_IN_RANGE(addr, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) { - *start = (addr - NETXEN_ADDR_OCM0 + NETXEN_PCI_OCM0); - return 0; - } else if (ADDR_IN_RANGE(addr, - NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) { - *start = (addr - NETXEN_ADDR_OCM1 + NETXEN_PCI_OCM1); - return 0; - } - - return -EIO; -} - -static int -netxen_nic_pci_set_window_2M(struct netxen_adapter *adapter, - u64 addr, u32 *start) -{ - u32 window; - - window = OCM_WIN(addr); - - writel(window, adapter->ahw.ocm_win_crb); - /* read back to flush */ - readl(adapter->ahw.ocm_win_crb); - - adapter->ahw.ocm_win = window; - *start = NETXEN_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr); - return 0; -} - -static int -netxen_nic_pci_mem_access_direct(struct netxen_adapter *adapter, u64 off, - u64 *data, int op) -{ - void __iomem *addr, *mem_ptr = NULL; - resource_size_t mem_base; - int ret; - u32 start; - - spin_lock(&adapter->ahw.mem_lock); - - ret = adapter->pci_set_window(adapter, off, &start); - if (ret != 0) - goto unlock; - - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { - addr = adapter->ahw.pci_base0 + start; - } else { - addr = pci_base_offset(adapter, start); - if (addr) - goto noremap; - - mem_base = pci_resource_start(adapter->pdev, 0) + - (start & PAGE_MASK); - mem_ptr = ioremap(mem_base, PAGE_SIZE); - if (mem_ptr == NULL) { - ret = -EIO; - goto unlock; - } - - addr = mem_ptr + (start & (PAGE_SIZE-1)); - } -noremap: - if (op == 0) /* read */ - *data = readq(addr); - else /* write */ - writeq(*data, addr); - -unlock: - spin_unlock(&adapter->ahw.mem_lock); - - if (mem_ptr) - iounmap(mem_ptr); - return ret; -} - -void -netxen_pci_camqm_read_2M(struct netxen_adapter *adapter, u64 off, u64 *data) -{ - void __iomem *addr = adapter->ahw.pci_base0 + - NETXEN_PCI_CAMQM_2M_BASE + (off - NETXEN_PCI_CAMQM); - - spin_lock(&adapter->ahw.mem_lock); - *data = readq(addr); - spin_unlock(&adapter->ahw.mem_lock); -} - -void -netxen_pci_camqm_write_2M(struct netxen_adapter *adapter, u64 off, u64 data) -{ - void __iomem *addr = adapter->ahw.pci_base0 + - NETXEN_PCI_CAMQM_2M_BASE + (off - NETXEN_PCI_CAMQM); - - spin_lock(&adapter->ahw.mem_lock); - writeq(data, addr); - spin_unlock(&adapter->ahw.mem_lock); -} - -#define MAX_CTL_CHECK 1000 - -static int -netxen_nic_pci_mem_write_128M(struct netxen_adapter *adapter, - u64 off, u64 data) -{ - int j, ret; - u32 temp, off_lo, off_hi, addr_hi, data_hi, data_lo; - void __iomem *mem_crb; - - /* Only 64-bit aligned access */ - if (off & 7) - return -EIO; - - /* P2 has different SIU and MIU test agent base addr */ - if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET, - NETXEN_ADDR_QDR_NET_MAX_P2)) { - mem_crb = pci_base_offset(adapter, - NETXEN_CRB_QDR_NET+SIU_TEST_AGT_BASE); - addr_hi = SIU_TEST_AGT_ADDR_HI; - data_lo = SIU_TEST_AGT_WRDATA_LO; - data_hi = SIU_TEST_AGT_WRDATA_HI; - off_lo = off & SIU_TEST_AGT_ADDR_MASK; - off_hi = SIU_TEST_AGT_UPPER_ADDR(off); - goto correct; - } - - if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { - mem_crb = pci_base_offset(adapter, - NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE); - addr_hi = MIU_TEST_AGT_ADDR_HI; - data_lo = MIU_TEST_AGT_WRDATA_LO; - data_hi = MIU_TEST_AGT_WRDATA_HI; - off_lo = off & MIU_TEST_AGT_ADDR_MASK; - off_hi = 0; - goto correct; - } - - if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX) || - ADDR_IN_RANGE(off, NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) { - if (adapter->ahw.pci_len0 != 0) { - return netxen_nic_pci_mem_access_direct(adapter, - off, &data, 1); - } - } - - return -EIO; - -correct: - spin_lock(&adapter->ahw.mem_lock); - netxen_nic_pci_set_crbwindow_128M(adapter, 0); - - writel(off_lo, (mem_crb + MIU_TEST_AGT_ADDR_LO)); - writel(off_hi, (mem_crb + addr_hi)); - writel(data & 0xffffffff, (mem_crb + data_lo)); - writel((data >> 32) & 0xffffffff, (mem_crb + data_hi)); - writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL)); - writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE), - (mem_crb + TEST_AGT_CTRL)); - - for (j = 0; j < MAX_CTL_CHECK; j++) { - temp = readl((mem_crb + TEST_AGT_CTRL)); - if ((temp & TA_CTL_BUSY) == 0) - break; - } - - if (j >= MAX_CTL_CHECK) { - if (printk_ratelimit()) - dev_err(&adapter->pdev->dev, - "failed to write through agent\n"); - ret = -EIO; - } else - ret = 0; - - netxen_nic_pci_set_crbwindow_128M(adapter, NETXEN_WINDOW_ONE); - spin_unlock(&adapter->ahw.mem_lock); - return ret; -} - -static int -netxen_nic_pci_mem_read_128M(struct netxen_adapter *adapter, - u64 off, u64 *data) -{ - int j, ret; - u32 temp, off_lo, off_hi, addr_hi, data_hi, data_lo; - u64 val; - void __iomem *mem_crb; - - /* Only 64-bit aligned access */ - if (off & 7) - return -EIO; - - /* P2 has different SIU and MIU test agent base addr */ - if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET, - NETXEN_ADDR_QDR_NET_MAX_P2)) { - mem_crb = pci_base_offset(adapter, - NETXEN_CRB_QDR_NET+SIU_TEST_AGT_BASE); - addr_hi = SIU_TEST_AGT_ADDR_HI; - data_lo = SIU_TEST_AGT_RDDATA_LO; - data_hi = SIU_TEST_AGT_RDDATA_HI; - off_lo = off & SIU_TEST_AGT_ADDR_MASK; - off_hi = SIU_TEST_AGT_UPPER_ADDR(off); - goto correct; - } - - if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { - mem_crb = pci_base_offset(adapter, - NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE); - addr_hi = MIU_TEST_AGT_ADDR_HI; - data_lo = MIU_TEST_AGT_RDDATA_LO; - data_hi = MIU_TEST_AGT_RDDATA_HI; - off_lo = off & MIU_TEST_AGT_ADDR_MASK; - off_hi = 0; - goto correct; - } - - if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX) || - ADDR_IN_RANGE(off, NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) { - if (adapter->ahw.pci_len0 != 0) { - return netxen_nic_pci_mem_access_direct(adapter, - off, data, 0); - } - } - - return -EIO; - -correct: - spin_lock(&adapter->ahw.mem_lock); - netxen_nic_pci_set_crbwindow_128M(adapter, 0); - - writel(off_lo, (mem_crb + MIU_TEST_AGT_ADDR_LO)); - writel(off_hi, (mem_crb + addr_hi)); - writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL)); - writel((TA_CTL_START|TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL)); - - for (j = 0; j < MAX_CTL_CHECK; j++) { - temp = readl(mem_crb + TEST_AGT_CTRL); - if ((temp & TA_CTL_BUSY) == 0) - break; - } - - if (j >= MAX_CTL_CHECK) { - if (printk_ratelimit()) - dev_err(&adapter->pdev->dev, - "failed to read through agent\n"); - ret = -EIO; - } else { - - temp = readl(mem_crb + data_hi); - val = ((u64)temp << 32); - val |= readl(mem_crb + data_lo); - *data = val; - ret = 0; - } - - netxen_nic_pci_set_crbwindow_128M(adapter, NETXEN_WINDOW_ONE); - spin_unlock(&adapter->ahw.mem_lock); - - return ret; -} - -static int -netxen_nic_pci_mem_write_2M(struct netxen_adapter *adapter, - u64 off, u64 data) -{ - int j, ret; - u32 temp, off8; - void __iomem *mem_crb; - - /* Only 64-bit aligned access */ - if (off & 7) - return -EIO; - - /* P3 onward, test agent base for MIU and SIU is same */ - if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET, - NETXEN_ADDR_QDR_NET_MAX_P3)) { - mem_crb = netxen_get_ioaddr(adapter, - NETXEN_CRB_QDR_NET+MIU_TEST_AGT_BASE); - goto correct; - } - - if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { - mem_crb = netxen_get_ioaddr(adapter, - NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE); - goto correct; - } - - if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) - return netxen_nic_pci_mem_access_direct(adapter, off, &data, 1); - - return -EIO; - -correct: - off8 = off & 0xfffffff8; - - spin_lock(&adapter->ahw.mem_lock); - - writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO)); - writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI)); - - writel(data & 0xffffffff, - mem_crb + MIU_TEST_AGT_WRDATA_LO); - writel((data >> 32) & 0xffffffff, - mem_crb + MIU_TEST_AGT_WRDATA_HI); - - writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL)); - writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE), - (mem_crb + TEST_AGT_CTRL)); - - for (j = 0; j < MAX_CTL_CHECK; j++) { - temp = readl(mem_crb + TEST_AGT_CTRL); - if ((temp & TA_CTL_BUSY) == 0) - break; - } - - if (j >= MAX_CTL_CHECK) { - if (printk_ratelimit()) - dev_err(&adapter->pdev->dev, - "failed to write through agent\n"); - ret = -EIO; - } else - ret = 0; - - spin_unlock(&adapter->ahw.mem_lock); - - return ret; -} - -static int -netxen_nic_pci_mem_read_2M(struct netxen_adapter *adapter, - u64 off, u64 *data) -{ - int j, ret; - u32 temp, off8; - u64 val; - void __iomem *mem_crb; - - /* Only 64-bit aligned access */ - if (off & 7) - return -EIO; - - /* P3 onward, test agent base for MIU and SIU is same */ - if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET, - NETXEN_ADDR_QDR_NET_MAX_P3)) { - mem_crb = netxen_get_ioaddr(adapter, - NETXEN_CRB_QDR_NET+MIU_TEST_AGT_BASE); - goto correct; - } - - if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { - mem_crb = netxen_get_ioaddr(adapter, - NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE); - goto correct; - } - - if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) { - return netxen_nic_pci_mem_access_direct(adapter, - off, data, 0); - } - - return -EIO; - -correct: - off8 = off & 0xfffffff8; - - spin_lock(&adapter->ahw.mem_lock); - - writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO)); - writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI)); - writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL)); - writel((TA_CTL_START | TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL)); - - for (j = 0; j < MAX_CTL_CHECK; j++) { - temp = readl(mem_crb + TEST_AGT_CTRL); - if ((temp & TA_CTL_BUSY) == 0) - break; - } - - if (j >= MAX_CTL_CHECK) { - if (printk_ratelimit()) - dev_err(&adapter->pdev->dev, - "failed to read through agent\n"); - ret = -EIO; - } else { - val = (u64)(readl(mem_crb + MIU_TEST_AGT_RDDATA_HI)) << 32; - val |= readl(mem_crb + MIU_TEST_AGT_RDDATA_LO); - *data = val; - ret = 0; - } - - spin_unlock(&adapter->ahw.mem_lock); - - return ret; -} - -void -netxen_setup_hwops(struct netxen_adapter *adapter) -{ - adapter->init_port = netxen_niu_xg_init_port; - adapter->stop_port = netxen_niu_disable_xg_port; - - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { - adapter->crb_read = netxen_nic_hw_read_wx_128M, - adapter->crb_write = netxen_nic_hw_write_wx_128M, - adapter->pci_set_window = netxen_nic_pci_set_window_128M, - adapter->pci_mem_read = netxen_nic_pci_mem_read_128M, - adapter->pci_mem_write = netxen_nic_pci_mem_write_128M, - adapter->io_read = netxen_nic_io_read_128M, - adapter->io_write = netxen_nic_io_write_128M, - - adapter->macaddr_set = netxen_p2_nic_set_mac_addr; - adapter->set_multi = netxen_p2_nic_set_multi; - adapter->set_mtu = netxen_nic_set_mtu_xgb; - adapter->set_promisc = netxen_p2_nic_set_promisc; - - } else { - adapter->crb_read = netxen_nic_hw_read_wx_2M, - adapter->crb_write = netxen_nic_hw_write_wx_2M, - adapter->pci_set_window = netxen_nic_pci_set_window_2M, - adapter->pci_mem_read = netxen_nic_pci_mem_read_2M, - adapter->pci_mem_write = netxen_nic_pci_mem_write_2M, - adapter->io_read = netxen_nic_io_read_2M, - adapter->io_write = netxen_nic_io_write_2M, - - adapter->set_mtu = nx_fw_cmd_set_mtu; - adapter->set_promisc = netxen_p3_nic_set_promisc; - adapter->macaddr_set = netxen_p3_nic_set_mac_addr; - adapter->set_multi = netxen_p3_nic_set_multi; - - adapter->phy_read = nx_fw_cmd_query_phy; - adapter->phy_write = nx_fw_cmd_set_phy; - } -} - -int netxen_nic_get_board_info(struct netxen_adapter *adapter) -{ - int offset, board_type, magic; - struct pci_dev *pdev = adapter->pdev; - - offset = NX_FW_MAGIC_OFFSET; - if (netxen_rom_fast_read(adapter, offset, &magic)) - return -EIO; - - if (magic != NETXEN_BDINFO_MAGIC) { - dev_err(&pdev->dev, "invalid board config, magic=%08x\n", - magic); - return -EIO; - } - - offset = NX_BRDTYPE_OFFSET; - if (netxen_rom_fast_read(adapter, offset, &board_type)) - return -EIO; - - if (board_type == NETXEN_BRDTYPE_P3_4_GB_MM) { - u32 gpio = NXRD32(adapter, NETXEN_ROMUSB_GLB_PAD_GPIO_I); - if ((gpio & 0x8000) == 0) - board_type = NETXEN_BRDTYPE_P3_10G_TP; - } - - adapter->ahw.board_type = board_type; - - switch (board_type) { - case NETXEN_BRDTYPE_P2_SB35_4G: - adapter->ahw.port_type = NETXEN_NIC_GBE; - break; - case NETXEN_BRDTYPE_P2_SB31_10G: - case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ: - case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ: - case NETXEN_BRDTYPE_P2_SB31_10G_CX4: - case NETXEN_BRDTYPE_P3_HMEZ: - case NETXEN_BRDTYPE_P3_XG_LOM: - case NETXEN_BRDTYPE_P3_10G_CX4: - case NETXEN_BRDTYPE_P3_10G_CX4_LP: - case NETXEN_BRDTYPE_P3_IMEZ: - case NETXEN_BRDTYPE_P3_10G_SFP_PLUS: - case NETXEN_BRDTYPE_P3_10G_SFP_CT: - case NETXEN_BRDTYPE_P3_10G_SFP_QT: - case NETXEN_BRDTYPE_P3_10G_XFP: - case NETXEN_BRDTYPE_P3_10000_BASE_T: - adapter->ahw.port_type = NETXEN_NIC_XGBE; - break; - case NETXEN_BRDTYPE_P1_BD: - case NETXEN_BRDTYPE_P1_SB: - case NETXEN_BRDTYPE_P1_SMAX: - case NETXEN_BRDTYPE_P1_SOCK: - case NETXEN_BRDTYPE_P3_REF_QG: - case NETXEN_BRDTYPE_P3_4_GB: - case NETXEN_BRDTYPE_P3_4_GB_MM: - adapter->ahw.port_type = NETXEN_NIC_GBE; - break; - case NETXEN_BRDTYPE_P3_10G_TP: - adapter->ahw.port_type = (adapter->portnum < 2) ? - NETXEN_NIC_XGBE : NETXEN_NIC_GBE; - break; - default: - dev_err(&pdev->dev, "unknown board type %x\n", board_type); - adapter->ahw.port_type = NETXEN_NIC_XGBE; - break; - } - - return 0; -} - -/* NIU access sections */ -static int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu) -{ - new_mtu += MTU_FUDGE_FACTOR; - if (adapter->physical_port == 0) - NXWR32(adapter, NETXEN_NIU_XGE_MAX_FRAME_SIZE, new_mtu); - else - NXWR32(adapter, NETXEN_NIU_XG1_MAX_FRAME_SIZE, new_mtu); - return 0; -} - -void netxen_nic_set_link_parameters(struct netxen_adapter *adapter) -{ - __u32 status; - __u32 autoneg; - __u32 port_mode; - - if (!netif_carrier_ok(adapter->netdev)) { - adapter->link_speed = 0; - adapter->link_duplex = -1; - adapter->link_autoneg = AUTONEG_ENABLE; - return; - } - - if (adapter->ahw.port_type == NETXEN_NIC_GBE) { - port_mode = NXRD32(adapter, NETXEN_PORT_MODE_ADDR); - if (port_mode == NETXEN_PORT_MODE_802_3_AP) { - adapter->link_speed = SPEED_1000; - adapter->link_duplex = DUPLEX_FULL; - adapter->link_autoneg = AUTONEG_DISABLE; - return; - } - - if (adapter->phy_read && - adapter->phy_read(adapter, - NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS, - &status) == 0) { - if (netxen_get_phy_link(status)) { - switch (netxen_get_phy_speed(status)) { - case 0: - adapter->link_speed = SPEED_10; - break; - case 1: - adapter->link_speed = SPEED_100; - break; - case 2: - adapter->link_speed = SPEED_1000; - break; - default: - adapter->link_speed = 0; - break; - } - switch (netxen_get_phy_duplex(status)) { - case 0: - adapter->link_duplex = DUPLEX_HALF; - break; - case 1: - adapter->link_duplex = DUPLEX_FULL; - break; - default: - adapter->link_duplex = -1; - break; - } - if (adapter->phy_read && - adapter->phy_read(adapter, - NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG, - &autoneg) != 0) - adapter->link_autoneg = autoneg; - } else - goto link_down; - } else { - link_down: - adapter->link_speed = 0; - adapter->link_duplex = -1; - } - } -} - -int -netxen_nic_wol_supported(struct netxen_adapter *adapter) -{ - u32 wol_cfg; - - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) - return 0; - - wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG_NV); - if (wol_cfg & (1UL << adapter->portnum)) { - wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG); - if (wol_cfg & (1 << adapter->portnum)) - return 1; - } - - return 0; -} diff --git a/drivers/net/netxen/netxen_nic_hw.h b/drivers/net/netxen/netxen_nic_hw.h deleted file mode 100644 index e2c5b6f2df03..000000000000 --- a/drivers/net/netxen/netxen_nic_hw.h +++ /dev/null @@ -1,287 +0,0 @@ -/* - * Copyright (C) 2003 - 2009 NetXen, Inc. - * Copyright (C) 2009 - QLogic Corporation. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, - * MA 02111-1307, USA. - * - * The full GNU General Public License is included in this distribution - * in the file called "COPYING". - * - */ - -#ifndef __NETXEN_NIC_HW_H_ -#define __NETXEN_NIC_HW_H_ - -/* Hardware memory size of 128 meg */ -#define NETXEN_MEMADDR_MAX (128 * 1024 * 1024) - -struct netxen_adapter; - -#define NETXEN_PCI_MAPSIZE_BYTES (NETXEN_PCI_MAPSIZE << 20) - -void netxen_nic_set_link_parameters(struct netxen_adapter *adapter); - -/* Nibble or Byte mode for phy interface (GbE mode only) */ - -#define _netxen_crb_get_bit(var, bit) ((var >> bit) & 0x1) - -/* - * NIU GB MAC Config Register 0 (applies to GB0, GB1, GB2, GB3) - * - * Bit 0 : enable_tx => 1:enable frame xmit, 0:disable - * Bit 1 : tx_synced => R/O: xmit enable synched to xmit stream - * Bit 2 : enable_rx => 1:enable frame recv, 0:disable - * Bit 3 : rx_synced => R/O: recv enable synched to recv stream - * Bit 4 : tx_flowctl => 1:enable pause frame generation, 0:disable - * Bit 5 : rx_flowctl => 1:act on recv'd pause frames, 0:ignore - * Bit 8 : loopback => 1:loop MAC xmits to MAC recvs, 0:normal - * Bit 16: tx_reset_pb => 1:reset frame xmit protocol blk, 0:no-op - * Bit 17: rx_reset_pb => 1:reset frame recv protocol blk, 0:no-op - * Bit 18: tx_reset_mac => 1:reset data/ctl multiplexer blk, 0:no-op - * Bit 19: rx_reset_mac => 1:reset ctl frames & timers blk, 0:no-op - * Bit 31: soft_reset => 1:reset the MAC and the SERDES, 0:no-op - */ - -#define netxen_gb_tx_flowctl(config_word) \ - ((config_word) |= 1 << 4) -#define netxen_gb_rx_flowctl(config_word) \ - ((config_word) |= 1 << 5) -#define netxen_gb_tx_reset_pb(config_word) \ - ((config_word) |= 1 << 16) -#define netxen_gb_rx_reset_pb(config_word) \ - ((config_word) |= 1 << 17) -#define netxen_gb_tx_reset_mac(config_word) \ - ((config_word) |= 1 << 18) -#define netxen_gb_rx_reset_mac(config_word) \ - ((config_word) |= 1 << 19) - -#define netxen_gb_unset_tx_flowctl(config_word) \ - ((config_word) &= ~(1 << 4)) -#define netxen_gb_unset_rx_flowctl(config_word) \ - ((config_word) &= ~(1 << 5)) - -#define netxen_gb_get_tx_synced(config_word) \ - _netxen_crb_get_bit((config_word), 1) -#define netxen_gb_get_rx_synced(config_word) \ - _netxen_crb_get_bit((config_word), 3) -#define netxen_gb_get_tx_flowctl(config_word) \ - _netxen_crb_get_bit((config_word), 4) -#define netxen_gb_get_rx_flowctl(config_word) \ - _netxen_crb_get_bit((config_word), 5) -#define netxen_gb_get_soft_reset(config_word) \ - _netxen_crb_get_bit((config_word), 31) - -#define netxen_gb_get_stationaddress_low(config_word) ((config_word) >> 16) - -#define netxen_gb_set_mii_mgmt_clockselect(config_word, val) \ - ((config_word) |= ((val) & 0x07)) -#define netxen_gb_mii_mgmt_reset(config_word) \ - ((config_word) |= 1 << 31) -#define netxen_gb_mii_mgmt_unset(config_word) \ - ((config_word) &= ~(1 << 31)) - -/* - * NIU GB MII Mgmt Command Register (applies to GB0, GB1, GB2, GB3) - * Bit 0 : read_cycle => 1:perform single read cycle, 0:no-op - * Bit 1 : scan_cycle => 1:perform continuous read cycles, 0:no-op - */ - -#define netxen_gb_mii_mgmt_set_read_cycle(config_word) \ - ((config_word) |= 1 << 0) -#define netxen_gb_mii_mgmt_reg_addr(config_word, val) \ - ((config_word) |= ((val) & 0x1F)) -#define netxen_gb_mii_mgmt_phy_addr(config_word, val) \ - ((config_word) |= (((val) & 0x1F) << 8)) - -/* - * NIU GB MII Mgmt Indicators Register (applies to GB0, GB1, GB2, GB3) - * Read-only register. - * Bit 0 : busy => 1:performing an MII mgmt cycle, 0:idle - * Bit 1 : scanning => 1:scan operation in progress, 0:idle - * Bit 2 : notvalid => :mgmt result data not yet valid, 0:idle - */ -#define netxen_get_gb_mii_mgmt_busy(config_word) \ - _netxen_crb_get_bit(config_word, 0) -#define netxen_get_gb_mii_mgmt_scanning(config_word) \ - _netxen_crb_get_bit(config_word, 1) -#define netxen_get_gb_mii_mgmt_notvalid(config_word) \ - _netxen_crb_get_bit(config_word, 2) -/* - * NIU XG Pause Ctl Register - * - * Bit 0 : xg0_mask => 1:disable tx pause frames - * Bit 1 : xg0_request => 1:request single pause frame - * Bit 2 : xg0_on_off => 1:request is pause on, 0:off - * Bit 3 : xg1_mask => 1:disable tx pause frames - * Bit 4 : xg1_request => 1:request single pause frame - * Bit 5 : xg1_on_off => 1:request is pause on, 0:off - */ - -#define netxen_xg_set_xg0_mask(config_word) \ - ((config_word) |= 1 << 0) -#define netxen_xg_set_xg1_mask(config_word) \ - ((config_word) |= 1 << 3) - -#define netxen_xg_get_xg0_mask(config_word) \ - _netxen_crb_get_bit((config_word), 0) -#define netxen_xg_get_xg1_mask(config_word) \ - _netxen_crb_get_bit((config_word), 3) - -#define netxen_xg_unset_xg0_mask(config_word) \ - ((config_word) &= ~(1 << 0)) -#define netxen_xg_unset_xg1_mask(config_word) \ - ((config_word) &= ~(1 << 3)) - -/* - * NIU XG Pause Ctl Register - * - * Bit 0 : xg0_mask => 1:disable tx pause frames - * Bit 1 : xg0_request => 1:request single pause frame - * Bit 2 : xg0_on_off => 1:request is pause on, 0:off - * Bit 3 : xg1_mask => 1:disable tx pause frames - * Bit 4 : xg1_request => 1:request single pause frame - * Bit 5 : xg1_on_off => 1:request is pause on, 0:off - */ -#define netxen_gb_set_gb0_mask(config_word) \ - ((config_word) |= 1 << 0) -#define netxen_gb_set_gb1_mask(config_word) \ - ((config_word) |= 1 << 2) -#define netxen_gb_set_gb2_mask(config_word) \ - ((config_word) |= 1 << 4) -#define netxen_gb_set_gb3_mask(config_word) \ - ((config_word) |= 1 << 6) - -#define netxen_gb_get_gb0_mask(config_word) \ - _netxen_crb_get_bit((config_word), 0) -#define netxen_gb_get_gb1_mask(config_word) \ - _netxen_crb_get_bit((config_word), 2) -#define netxen_gb_get_gb2_mask(config_word) \ - _netxen_crb_get_bit((config_word), 4) -#define netxen_gb_get_gb3_mask(config_word) \ - _netxen_crb_get_bit((config_word), 6) - -#define netxen_gb_unset_gb0_mask(config_word) \ - ((config_word) &= ~(1 << 0)) -#define netxen_gb_unset_gb1_mask(config_word) \ - ((config_word) &= ~(1 << 2)) -#define netxen_gb_unset_gb2_mask(config_word) \ - ((config_word) &= ~(1 << 4)) -#define netxen_gb_unset_gb3_mask(config_word) \ - ((config_word) &= ~(1 << 6)) - - -/* - * PHY-Specific MII control/status registers. - */ -#define NETXEN_NIU_GB_MII_MGMT_ADDR_CONTROL 0 -#define NETXEN_NIU_GB_MII_MGMT_ADDR_STATUS 1 -#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_ID_0 2 -#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_ID_1 3 -#define NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG 4 -#define NETXEN_NIU_GB_MII_MGMT_ADDR_LNKPART 5 -#define NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG_MORE 6 -#define NETXEN_NIU_GB_MII_MGMT_ADDR_NEXTPAGE_XMIT 7 -#define NETXEN_NIU_GB_MII_MGMT_ADDR_LNKPART_NEXTPAGE 8 -#define NETXEN_NIU_GB_MII_MGMT_ADDR_1000BT_CONTROL 9 -#define NETXEN_NIU_GB_MII_MGMT_ADDR_1000BT_STATUS 10 -#define NETXEN_NIU_GB_MII_MGMT_ADDR_EXTENDED_STATUS 15 -#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL 16 -#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS 17 -#define NETXEN_NIU_GB_MII_MGMT_ADDR_INT_ENABLE 18 -#define NETXEN_NIU_GB_MII_MGMT_ADDR_INT_STATUS 19 -#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL_MORE 20 -#define NETXEN_NIU_GB_MII_MGMT_ADDR_RECV_ERROR_COUNT 21 -#define NETXEN_NIU_GB_MII_MGMT_ADDR_LED_CONTROL 24 -#define NETXEN_NIU_GB_MII_MGMT_ADDR_LED_OVERRIDE 25 -#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL_MORE_YET 26 -#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS_MORE 27 - -/* - * PHY-Specific Status Register (reg 17). - * - * Bit 0 : jabber => 1:jabber detected, 0:not - * Bit 1 : polarity => 1:polarity reversed, 0:normal - * Bit 2 : recvpause => 1:receive pause enabled, 0:disabled - * Bit 3 : xmitpause => 1:transmit pause enabled, 0:disabled - * Bit 4 : energydetect => 1:sleep, 0:active - * Bit 5 : downshift => 1:downshift, 0:no downshift - * Bit 6 : crossover => 1:MDIX (crossover), 0:MDI (no crossover) - * Bits 7-9 : cablelen => not valid in 10Mb/s mode - * 0:<50m, 1:50-80m, 2:80-110m, 3:110-140m, 4:>140m - * Bit 10 : link => 1:link up, 0:link down - * Bit 11 : resolved => 1:speed and duplex resolved, 0:not yet - * Bit 12 : pagercvd => 1:page received, 0:page not received - * Bit 13 : duplex => 1:full duplex, 0:half duplex - * Bits 14-15 : speed => 0:10Mb/s, 1:100Mb/s, 2:1000Mb/s, 3:rsvd - */ - -#define netxen_get_phy_speed(config_word) (((config_word) >> 14) & 0x03) - -#define netxen_set_phy_speed(config_word, val) \ - ((config_word) |= ((val & 0x03) << 14)) -#define netxen_set_phy_duplex(config_word) \ - ((config_word) |= 1 << 13) -#define netxen_clear_phy_duplex(config_word) \ - ((config_word) &= ~(1 << 13)) - -#define netxen_get_phy_link(config_word) \ - _netxen_crb_get_bit(config_word, 10) -#define netxen_get_phy_duplex(config_word) \ - _netxen_crb_get_bit(config_word, 13) - -/* - * NIU Mode Register. - * Bit 0 : enable FibreChannel - * Bit 1 : enable 10/100/1000 Ethernet - * Bit 2 : enable 10Gb Ethernet - */ - -#define netxen_get_niu_enable_ge(config_word) \ - _netxen_crb_get_bit(config_word, 1) - -#define NETXEN_NIU_NON_PROMISC_MODE 0 -#define NETXEN_NIU_PROMISC_MODE 1 -#define NETXEN_NIU_ALLMULTI_MODE 2 - -/* - * NIU XG MAC Config Register - * - * Bit 0 : tx_enable => 1:enable frame xmit, 0:disable - * Bit 2 : rx_enable => 1:enable frame recv, 0:disable - * Bit 4 : soft_reset => 1:reset the MAC , 0:no-op - * Bit 27: xaui_framer_reset - * Bit 28: xaui_rx_reset - * Bit 29: xaui_tx_reset - * Bit 30: xg_ingress_afifo_reset - * Bit 31: xg_egress_afifo_reset - */ - -#define netxen_xg_soft_reset(config_word) \ - ((config_word) |= 1 << 4) - -typedef struct { - unsigned valid; - unsigned start_128M; - unsigned end_128M; - unsigned start_2M; -} crb_128M_2M_sub_block_map_t; - -typedef struct { - crb_128M_2M_sub_block_map_t sub_block[16]; -} crb_128M_2M_block_map_t; - -#endif /* __NETXEN_NIC_HW_H_ */ diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c deleted file mode 100644 index d6c6357de6aa..000000000000 --- a/drivers/net/netxen/netxen_nic_init.c +++ /dev/null @@ -1,1949 +0,0 @@ -/* - * Copyright (C) 2003 - 2009 NetXen, Inc. - * Copyright (C) 2009 - QLogic Corporation. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, - * MA 02111-1307, USA. - * - * The full GNU General Public License is included in this distribution - * in the file called "COPYING". - * - */ - -#include -#include -#include -#include -#include "netxen_nic.h" -#include "netxen_nic_hw.h" - -struct crb_addr_pair { - u32 addr; - u32 data; -}; - -#define NETXEN_MAX_CRB_XFORM 60 -static unsigned int crb_addr_xform[NETXEN_MAX_CRB_XFORM]; -#define NETXEN_ADDR_ERROR (0xffffffff) - -#define crb_addr_transform(name) \ - crb_addr_xform[NETXEN_HW_PX_MAP_CRB_##name] = \ - NETXEN_HW_CRB_HUB_AGT_ADR_##name << 20 - -#define NETXEN_NIC_XDMA_RESET 0x8000ff - -static void -netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, - struct nx_host_rds_ring *rds_ring); -static int netxen_p3_has_mn(struct netxen_adapter *adapter); - -static void crb_addr_transform_setup(void) -{ - crb_addr_transform(XDMA); - crb_addr_transform(TIMR); - crb_addr_transform(SRE); - crb_addr_transform(SQN3); - crb_addr_transform(SQN2); - crb_addr_transform(SQN1); - crb_addr_transform(SQN0); - crb_addr_transform(SQS3); - crb_addr_transform(SQS2); - crb_addr_transform(SQS1); - crb_addr_transform(SQS0); - crb_addr_transform(RPMX7); - crb_addr_transform(RPMX6); - crb_addr_transform(RPMX5); - crb_addr_transform(RPMX4); - crb_addr_transform(RPMX3); - crb_addr_transform(RPMX2); - crb_addr_transform(RPMX1); - crb_addr_transform(RPMX0); - crb_addr_transform(ROMUSB); - crb_addr_transform(SN); - crb_addr_transform(QMN); - crb_addr_transform(QMS); - crb_addr_transform(PGNI); - crb_addr_transform(PGND); - crb_addr_transform(PGN3); - crb_addr_transform(PGN2); - crb_addr_transform(PGN1); - crb_addr_transform(PGN0); - crb_addr_transform(PGSI); - crb_addr_transform(PGSD); - crb_addr_transform(PGS3); - crb_addr_transform(PGS2); - crb_addr_transform(PGS1); - crb_addr_transform(PGS0); - crb_addr_transform(PS); - crb_addr_transform(PH); - crb_addr_transform(NIU); - crb_addr_transform(I2Q); - crb_addr_transform(EG); - crb_addr_transform(MN); - crb_addr_transform(MS); - crb_addr_transform(CAS2); - crb_addr_transform(CAS1); - crb_addr_transform(CAS0); - crb_addr_transform(CAM); - crb_addr_transform(C2C1); - crb_addr_transform(C2C0); - crb_addr_transform(SMB); - crb_addr_transform(OCM0); - crb_addr_transform(I2C0); -} - -void netxen_release_rx_buffers(struct netxen_adapter *adapter) -{ - struct netxen_recv_context *recv_ctx; - struct nx_host_rds_ring *rds_ring; - struct netxen_rx_buffer *rx_buf; - int i, ring; - - recv_ctx = &adapter->recv_ctx; - for (ring = 0; ring < adapter->max_rds_rings; ring++) { - rds_ring = &recv_ctx->rds_rings[ring]; - for (i = 0; i < rds_ring->num_desc; ++i) { - rx_buf = &(rds_ring->rx_buf_arr[i]); - if (rx_buf->state == NETXEN_BUFFER_FREE) - continue; - pci_unmap_single(adapter->pdev, - rx_buf->dma, - rds_ring->dma_size, - PCI_DMA_FROMDEVICE); - if (rx_buf->skb != NULL) - dev_kfree_skb_any(rx_buf->skb); - } - } -} - -void netxen_release_tx_buffers(struct netxen_adapter *adapter) -{ - struct netxen_cmd_buffer *cmd_buf; - struct netxen_skb_frag *buffrag; - int i, j; - struct nx_host_tx_ring *tx_ring = adapter->tx_ring; - - cmd_buf = tx_ring->cmd_buf_arr; - for (i = 0; i < tx_ring->num_desc; i++) { - buffrag = cmd_buf->frag_array; - if (buffrag->dma) { - pci_unmap_single(adapter->pdev, buffrag->dma, - buffrag->length, PCI_DMA_TODEVICE); - buffrag->dma = 0ULL; - } - for (j = 0; j < cmd_buf->frag_count; j++) { - buffrag++; - if (buffrag->dma) { - pci_unmap_page(adapter->pdev, buffrag->dma, - buffrag->length, - PCI_DMA_TODEVICE); - buffrag->dma = 0ULL; - } - } - if (cmd_buf->skb) { - dev_kfree_skb_any(cmd_buf->skb); - cmd_buf->skb = NULL; - } - cmd_buf++; - } -} - -void netxen_free_sw_resources(struct netxen_adapter *adapter) -{ - struct netxen_recv_context *recv_ctx; - struct nx_host_rds_ring *rds_ring; - struct nx_host_tx_ring *tx_ring; - int ring; - - recv_ctx = &adapter->recv_ctx; - - if (recv_ctx->rds_rings == NULL) - goto skip_rds; - - for (ring = 0; ring < adapter->max_rds_rings; ring++) { - rds_ring = &recv_ctx->rds_rings[ring]; - vfree(rds_ring->rx_buf_arr); - rds_ring->rx_buf_arr = NULL; - } - kfree(recv_ctx->rds_rings); - -skip_rds: - if (adapter->tx_ring == NULL) - return; - - tx_ring = adapter->tx_ring; - vfree(tx_ring->cmd_buf_arr); - kfree(tx_ring); - adapter->tx_ring = NULL; -} - -int netxen_alloc_sw_resources(struct netxen_adapter *adapter) -{ - struct netxen_recv_context *recv_ctx; - struct nx_host_rds_ring *rds_ring; - struct nx_host_sds_ring *sds_ring; - struct nx_host_tx_ring *tx_ring; - struct netxen_rx_buffer *rx_buf; - int ring, i, size; - - struct netxen_cmd_buffer *cmd_buf_arr; - struct net_device *netdev = adapter->netdev; - struct pci_dev *pdev = adapter->pdev; - - size = sizeof(struct nx_host_tx_ring); - tx_ring = kzalloc(size, GFP_KERNEL); - if (tx_ring == NULL) { - dev_err(&pdev->dev, "%s: failed to allocate tx ring struct\n", - netdev->name); - return -ENOMEM; - } - adapter->tx_ring = tx_ring; - - tx_ring->num_desc = adapter->num_txd; - tx_ring->txq = netdev_get_tx_queue(netdev, 0); - - cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring)); - if (cmd_buf_arr == NULL) { - dev_err(&pdev->dev, "%s: failed to allocate cmd buffer ring\n", - netdev->name); - goto err_out; - } - tx_ring->cmd_buf_arr = cmd_buf_arr; - - recv_ctx = &adapter->recv_ctx; - - size = adapter->max_rds_rings * sizeof (struct nx_host_rds_ring); - rds_ring = kzalloc(size, GFP_KERNEL); - if (rds_ring == NULL) { - dev_err(&pdev->dev, "%s: failed to allocate rds ring struct\n", - netdev->name); - goto err_out; - } - recv_ctx->rds_rings = rds_ring; - - for (ring = 0; ring < adapter->max_rds_rings; ring++) { - rds_ring = &recv_ctx->rds_rings[ring]; - switch (ring) { - case RCV_RING_NORMAL: - rds_ring->num_desc = adapter->num_rxd; - if (adapter->ahw.cut_through) { - rds_ring->dma_size = - NX_CT_DEFAULT_RX_BUF_LEN; - rds_ring->skb_size = - NX_CT_DEFAULT_RX_BUF_LEN; - } else { - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) - rds_ring->dma_size = - NX_P3_RX_BUF_MAX_LEN; - else - rds_ring->dma_size = - NX_P2_RX_BUF_MAX_LEN; - rds_ring->skb_size = - rds_ring->dma_size + NET_IP_ALIGN; - } - break; - - case RCV_RING_JUMBO: - rds_ring->num_desc = adapter->num_jumbo_rxd; - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) - rds_ring->dma_size = - NX_P3_RX_JUMBO_BUF_MAX_LEN; - else - rds_ring->dma_size = - NX_P2_RX_JUMBO_BUF_MAX_LEN; - - if (adapter->capabilities & NX_CAP0_HW_LRO) - rds_ring->dma_size += NX_LRO_BUFFER_EXTRA; - - rds_ring->skb_size = - rds_ring->dma_size + NET_IP_ALIGN; - break; - - case RCV_RING_LRO: - rds_ring->num_desc = adapter->num_lro_rxd; - rds_ring->dma_size = NX_RX_LRO_BUFFER_LENGTH; - rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN; - break; - - } - rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring)); - if (rds_ring->rx_buf_arr == NULL) { - printk(KERN_ERR "%s: Failed to allocate " - "rx buffer ring %d\n", - netdev->name, ring); - /* free whatever was already allocated */ - goto err_out; - } - INIT_LIST_HEAD(&rds_ring->free_list); - /* - * Now go through all of them, set reference handles - * and put them in the queues. - */ - rx_buf = rds_ring->rx_buf_arr; - for (i = 0; i < rds_ring->num_desc; i++) { - list_add_tail(&rx_buf->list, - &rds_ring->free_list); - rx_buf->ref_handle = i; - rx_buf->state = NETXEN_BUFFER_FREE; - rx_buf++; - } - spin_lock_init(&rds_ring->lock); - } - - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &recv_ctx->sds_rings[ring]; - sds_ring->irq = adapter->msix_entries[ring].vector; - sds_ring->adapter = adapter; - sds_ring->num_desc = adapter->num_rxd; - - for (i = 0; i < NUM_RCV_DESC_RINGS; i++) - INIT_LIST_HEAD(&sds_ring->free_list[i]); - } - - return 0; - -err_out: - netxen_free_sw_resources(adapter); - return -ENOMEM; -} - -/* - * netxen_decode_crb_addr(0 - utility to translate from internal Phantom CRB - * address to external PCI CRB address. - */ -static u32 netxen_decode_crb_addr(u32 addr) -{ - int i; - u32 base_addr, offset, pci_base; - - crb_addr_transform_setup(); - - pci_base = NETXEN_ADDR_ERROR; - base_addr = addr & 0xfff00000; - offset = addr & 0x000fffff; - - for (i = 0; i < NETXEN_MAX_CRB_XFORM; i++) { - if (crb_addr_xform[i] == base_addr) { - pci_base = i << 20; - break; - } - } - if (pci_base == NETXEN_ADDR_ERROR) - return pci_base; - else - return pci_base + offset; -} - -#define NETXEN_MAX_ROM_WAIT_USEC 100 - -static int netxen_wait_rom_done(struct netxen_adapter *adapter) -{ - long timeout = 0; - long done = 0; - - cond_resched(); - - while (done == 0) { - done = NXRD32(adapter, NETXEN_ROMUSB_GLB_STATUS); - done &= 2; - if (++timeout >= NETXEN_MAX_ROM_WAIT_USEC) { - dev_err(&adapter->pdev->dev, - "Timeout reached waiting for rom done"); - return -EIO; - } - udelay(1); - } - return 0; -} - -static int do_rom_fast_read(struct netxen_adapter *adapter, - int addr, int *valp) -{ - NXWR32(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr); - NXWR32(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); - NXWR32(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3); - NXWR32(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE, 0xb); - if (netxen_wait_rom_done(adapter)) { - printk("Error waiting for rom done\n"); - return -EIO; - } - /* reset abyte_cnt and dummy_byte_cnt */ - NXWR32(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0); - udelay(10); - NXWR32(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); - - *valp = NXRD32(adapter, NETXEN_ROMUSB_ROM_RDATA); - return 0; -} - -static int do_rom_fast_read_words(struct netxen_adapter *adapter, int addr, - u8 *bytes, size_t size) -{ - int addridx; - int ret = 0; - - for (addridx = addr; addridx < (addr + size); addridx += 4) { - int v; - ret = do_rom_fast_read(adapter, addridx, &v); - if (ret != 0) - break; - *(__le32 *)bytes = cpu_to_le32(v); - bytes += 4; - } - - return ret; -} - -int -netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr, - u8 *bytes, size_t size) -{ - int ret; - - ret = netxen_rom_lock(adapter); - if (ret < 0) - return ret; - - ret = do_rom_fast_read_words(adapter, addr, bytes, size); - - netxen_rom_unlock(adapter); - return ret; -} - -int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp) -{ - int ret; - - if (netxen_rom_lock(adapter) != 0) - return -EIO; - - ret = do_rom_fast_read(adapter, addr, valp); - netxen_rom_unlock(adapter); - return ret; -} - -#define NETXEN_BOARDTYPE 0x4008 -#define NETXEN_BOARDNUM 0x400c -#define NETXEN_CHIPNUM 0x4010 - -int netxen_pinit_from_rom(struct netxen_adapter *adapter) -{ - int addr, val; - int i, n, init_delay = 0; - struct crb_addr_pair *buf; - unsigned offset; - u32 off; - - /* resetall */ - netxen_rom_lock(adapter); - NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0xffffffff); - netxen_rom_unlock(adapter); - - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { - if (netxen_rom_fast_read(adapter, 0, &n) != 0 || - (n != 0xcafecafe) || - netxen_rom_fast_read(adapter, 4, &n) != 0) { - printk(KERN_ERR "%s: ERROR Reading crb_init area: " - "n: %08x\n", netxen_nic_driver_name, n); - return -EIO; - } - offset = n & 0xffffU; - n = (n >> 16) & 0xffffU; - } else { - if (netxen_rom_fast_read(adapter, 0, &n) != 0 || - !(n & 0x80000000)) { - printk(KERN_ERR "%s: ERROR Reading crb_init area: " - "n: %08x\n", netxen_nic_driver_name, n); - return -EIO; - } - offset = 1; - n &= ~0x80000000; - } - - if (n >= 1024) { - printk(KERN_ERR "%s:n=0x%x Error! NetXen card flash not" - " initialized.\n", __func__, n); - return -EIO; - } - - buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL); - if (buf == NULL) { - printk("%s: netxen_pinit_from_rom: Unable to calloc memory.\n", - netxen_nic_driver_name); - return -ENOMEM; - } - - for (i = 0; i < n; i++) { - if (netxen_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 || - netxen_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) { - kfree(buf); - return -EIO; - } - - buf[i].addr = addr; - buf[i].data = val; - - } - - for (i = 0; i < n; i++) { - - off = netxen_decode_crb_addr(buf[i].addr); - if (off == NETXEN_ADDR_ERROR) { - printk(KERN_ERR"CRB init value out of range %x\n", - buf[i].addr); - continue; - } - off += NETXEN_PCI_CRBSPACE; - - if (off & 1) - continue; - - /* skipping cold reboot MAGIC */ - if (off == NETXEN_CAM_RAM(0x1fc)) - continue; - - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { - if (off == (NETXEN_CRB_I2C0 + 0x1c)) - continue; - /* do not reset PCI */ - if (off == (ROMUSB_GLB + 0xbc)) - continue; - if (off == (ROMUSB_GLB + 0xa8)) - continue; - if (off == (ROMUSB_GLB + 0xc8)) /* core clock */ - continue; - if (off == (ROMUSB_GLB + 0x24)) /* MN clock */ - continue; - if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */ - continue; - if ((off & 0x0ff00000) == NETXEN_CRB_DDR_NET) - continue; - if (off == (NETXEN_CRB_PEG_NET_1 + 0x18) && - !NX_IS_REVISION_P3P(adapter->ahw.revision_id)) - buf[i].data = 0x1020; - /* skip the function enable register */ - if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION)) - continue; - if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION2)) - continue; - if ((off & 0x0ff00000) == NETXEN_CRB_SMB) - continue; - } - - init_delay = 1; - /* After writing this register, HW needs time for CRB */ - /* to quiet down (else crb_window returns 0xffffffff) */ - if (off == NETXEN_ROMUSB_GLB_SW_RESET) { - init_delay = 1000; - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { - /* hold xdma in reset also */ - buf[i].data = NETXEN_NIC_XDMA_RESET; - buf[i].data = 0x8000ff; - } - } - - NXWR32(adapter, off, buf[i].data); - - msleep(init_delay); - } - kfree(buf); - - /* disable_peg_cache_all */ - - /* unreset_net_cache */ - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { - val = NXRD32(adapter, NETXEN_ROMUSB_GLB_SW_RESET); - NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, (val & 0xffffff0f)); - } - - /* p2dn replyCount */ - NXWR32(adapter, NETXEN_CRB_PEG_NET_D + 0xec, 0x1e); - /* disable_peg_cache 0 */ - NXWR32(adapter, NETXEN_CRB_PEG_NET_D + 0x4c, 8); - /* disable_peg_cache 1 */ - NXWR32(adapter, NETXEN_CRB_PEG_NET_I + 0x4c, 8); - - /* peg_clr_all */ - - /* peg_clr 0 */ - NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0x8, 0); - NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0xc, 0); - /* peg_clr 1 */ - NXWR32(adapter, NETXEN_CRB_PEG_NET_1 + 0x8, 0); - NXWR32(adapter, NETXEN_CRB_PEG_NET_1 + 0xc, 0); - /* peg_clr 2 */ - NXWR32(adapter, NETXEN_CRB_PEG_NET_2 + 0x8, 0); - NXWR32(adapter, NETXEN_CRB_PEG_NET_2 + 0xc, 0); - /* peg_clr 3 */ - NXWR32(adapter, NETXEN_CRB_PEG_NET_3 + 0x8, 0); - NXWR32(adapter, NETXEN_CRB_PEG_NET_3 + 0xc, 0); - return 0; -} - -static struct uni_table_desc *nx_get_table_desc(const u8 *unirom, int section) -{ - uint32_t i; - struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0]; - __le32 entries = cpu_to_le32(directory->num_entries); - - for (i = 0; i < entries; i++) { - - __le32 offs = cpu_to_le32(directory->findex) + - (i * cpu_to_le32(directory->entry_size)); - __le32 tab_type = cpu_to_le32(*((u32 *)&unirom[offs] + 8)); - - if (tab_type == section) - return (struct uni_table_desc *) &unirom[offs]; - } - - return NULL; -} - -#define QLCNIC_FILEHEADER_SIZE (14 * 4) - -static int -netxen_nic_validate_header(struct netxen_adapter *adapter) - { - const u8 *unirom = adapter->fw->data; - struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0]; - u32 fw_file_size = adapter->fw->size; - u32 tab_size; - __le32 entries; - __le32 entry_size; - - if (fw_file_size < QLCNIC_FILEHEADER_SIZE) - return -EINVAL; - - entries = cpu_to_le32(directory->num_entries); - entry_size = cpu_to_le32(directory->entry_size); - tab_size = cpu_to_le32(directory->findex) + (entries * entry_size); - - if (fw_file_size < tab_size) - return -EINVAL; - - return 0; -} - -static int -netxen_nic_validate_bootld(struct netxen_adapter *adapter) -{ - struct uni_table_desc *tab_desc; - struct uni_data_desc *descr; - const u8 *unirom = adapter->fw->data; - __le32 idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] + - NX_UNI_BOOTLD_IDX_OFF)); - u32 offs; - u32 tab_size; - u32 data_size; - - tab_desc = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_BOOTLD); - - if (!tab_desc) - return -EINVAL; - - tab_size = cpu_to_le32(tab_desc->findex) + - (cpu_to_le32(tab_desc->entry_size) * (idx + 1)); - - if (adapter->fw->size < tab_size) - return -EINVAL; - - offs = cpu_to_le32(tab_desc->findex) + - (cpu_to_le32(tab_desc->entry_size) * (idx)); - descr = (struct uni_data_desc *)&unirom[offs]; - - data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size); - - if (adapter->fw->size < data_size) - return -EINVAL; - - return 0; -} - -static int -netxen_nic_validate_fw(struct netxen_adapter *adapter) -{ - struct uni_table_desc *tab_desc; - struct uni_data_desc *descr; - const u8 *unirom = adapter->fw->data; - __le32 idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] + - NX_UNI_FIRMWARE_IDX_OFF)); - u32 offs; - u32 tab_size; - u32 data_size; - - tab_desc = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_FW); - - if (!tab_desc) - return -EINVAL; - - tab_size = cpu_to_le32(tab_desc->findex) + - (cpu_to_le32(tab_desc->entry_size) * (idx + 1)); - - if (adapter->fw->size < tab_size) - return -EINVAL; - - offs = cpu_to_le32(tab_desc->findex) + - (cpu_to_le32(tab_desc->entry_size) * (idx)); - descr = (struct uni_data_desc *)&unirom[offs]; - data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size); - - if (adapter->fw->size < data_size) - return -EINVAL; - - return 0; -} - - -static int -netxen_nic_validate_product_offs(struct netxen_adapter *adapter) -{ - struct uni_table_desc *ptab_descr; - const u8 *unirom = adapter->fw->data; - int mn_present = (NX_IS_REVISION_P2(adapter->ahw.revision_id)) ? - 1 : netxen_p3_has_mn(adapter); - __le32 entries; - __le32 entry_size; - u32 tab_size; - u32 i; - - ptab_descr = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_PRODUCT_TBL); - if (ptab_descr == NULL) - return -EINVAL; - - entries = cpu_to_le32(ptab_descr->num_entries); - entry_size = cpu_to_le32(ptab_descr->entry_size); - tab_size = cpu_to_le32(ptab_descr->findex) + (entries * entry_size); - - if (adapter->fw->size < tab_size) - return -EINVAL; - -nomn: - for (i = 0; i < entries; i++) { - - __le32 flags, file_chiprev, offs; - u8 chiprev = adapter->ahw.revision_id; - uint32_t flagbit; - - offs = cpu_to_le32(ptab_descr->findex) + - (i * cpu_to_le32(ptab_descr->entry_size)); - flags = cpu_to_le32(*((int *)&unirom[offs] + NX_UNI_FLAGS_OFF)); - file_chiprev = cpu_to_le32(*((int *)&unirom[offs] + - NX_UNI_CHIP_REV_OFF)); - - flagbit = mn_present ? 1 : 2; - - if ((chiprev == file_chiprev) && - ((1ULL << flagbit) & flags)) { - adapter->file_prd_off = offs; - return 0; - } - } - - if (mn_present && NX_IS_REVISION_P3(adapter->ahw.revision_id)) { - mn_present = 0; - goto nomn; - } - - return -EINVAL; -} - -static int -netxen_nic_validate_unified_romimage(struct netxen_adapter *adapter) -{ - if (netxen_nic_validate_header(adapter)) { - dev_err(&adapter->pdev->dev, - "unified image: header validation failed\n"); - return -EINVAL; - } - - if (netxen_nic_validate_product_offs(adapter)) { - dev_err(&adapter->pdev->dev, - "unified image: product validation failed\n"); - return -EINVAL; - } - - if (netxen_nic_validate_bootld(adapter)) { - dev_err(&adapter->pdev->dev, - "unified image: bootld validation failed\n"); - return -EINVAL; - } - - if (netxen_nic_validate_fw(adapter)) { - dev_err(&adapter->pdev->dev, - "unified image: firmware validation failed\n"); - return -EINVAL; - } - - return 0; -} - -static struct uni_data_desc *nx_get_data_desc(struct netxen_adapter *adapter, - u32 section, u32 idx_offset) -{ - const u8 *unirom = adapter->fw->data; - int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] + - idx_offset)); - struct uni_table_desc *tab_desc; - __le32 offs; - - tab_desc = nx_get_table_desc(unirom, section); - - if (tab_desc == NULL) - return NULL; - - offs = cpu_to_le32(tab_desc->findex) + - (cpu_to_le32(tab_desc->entry_size) * idx); - - return (struct uni_data_desc *)&unirom[offs]; -} - -static u8 * -nx_get_bootld_offs(struct netxen_adapter *adapter) -{ - u32 offs = NETXEN_BOOTLD_START; - - if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) - offs = cpu_to_le32((nx_get_data_desc(adapter, - NX_UNI_DIR_SECT_BOOTLD, - NX_UNI_BOOTLD_IDX_OFF))->findex); - - return (u8 *)&adapter->fw->data[offs]; -} - -static u8 * -nx_get_fw_offs(struct netxen_adapter *adapter) -{ - u32 offs = NETXEN_IMAGE_START; - - if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) - offs = cpu_to_le32((nx_get_data_desc(adapter, - NX_UNI_DIR_SECT_FW, - NX_UNI_FIRMWARE_IDX_OFF))->findex); - - return (u8 *)&adapter->fw->data[offs]; -} - -static __le32 -nx_get_fw_size(struct netxen_adapter *adapter) -{ - if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) - return cpu_to_le32((nx_get_data_desc(adapter, - NX_UNI_DIR_SECT_FW, - NX_UNI_FIRMWARE_IDX_OFF))->size); - else - return cpu_to_le32( - *(u32 *)&adapter->fw->data[NX_FW_SIZE_OFFSET]); -} - -static __le32 -nx_get_fw_version(struct netxen_adapter *adapter) -{ - struct uni_data_desc *fw_data_desc; - const struct firmware *fw = adapter->fw; - __le32 major, minor, sub; - const u8 *ver_str; - int i, ret = 0; - - if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) { - - fw_data_desc = nx_get_data_desc(adapter, - NX_UNI_DIR_SECT_FW, NX_UNI_FIRMWARE_IDX_OFF); - ver_str = fw->data + cpu_to_le32(fw_data_desc->findex) + - cpu_to_le32(fw_data_desc->size) - 17; - - for (i = 0; i < 12; i++) { - if (!strncmp(&ver_str[i], "REV=", 4)) { - ret = sscanf(&ver_str[i+4], "%u.%u.%u ", - &major, &minor, &sub); - break; - } - } - - if (ret != 3) - return 0; - - return major + (minor << 8) + (sub << 16); - - } else - return cpu_to_le32(*(u32 *)&fw->data[NX_FW_VERSION_OFFSET]); -} - -static __le32 -nx_get_bios_version(struct netxen_adapter *adapter) -{ - const struct firmware *fw = adapter->fw; - __le32 bios_ver, prd_off = adapter->file_prd_off; - - if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) { - bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off]) - + NX_UNI_BIOS_VERSION_OFF)); - return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) + - (bios_ver >> 24); - } else - return cpu_to_le32(*(u32 *)&fw->data[NX_BIOS_VERSION_OFFSET]); - -} - -int -netxen_need_fw_reset(struct netxen_adapter *adapter) -{ - u32 count, old_count; - u32 val, version, major, minor, build; - int i, timeout; - u8 fw_type; - - /* NX2031 firmware doesn't support heartbit */ - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) - return 1; - - if (adapter->need_fw_reset) - return 1; - - /* last attempt had failed */ - if (NXRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED) - return 1; - - old_count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER); - - for (i = 0; i < 10; i++) { - - timeout = msleep_interruptible(200); - if (timeout) { - NXWR32(adapter, CRB_CMDPEG_STATE, - PHAN_INITIALIZE_FAILED); - return -EINTR; - } - - count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER); - if (count != old_count) - break; - } - - /* firmware is dead */ - if (count == old_count) - return 1; - - /* check if we have got newer or different file firmware */ - if (adapter->fw) { - - val = nx_get_fw_version(adapter); - - version = NETXEN_DECODE_VERSION(val); - - major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR); - minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR); - build = NXRD32(adapter, NETXEN_FW_VERSION_SUB); - - if (version > NETXEN_VERSION_CODE(major, minor, build)) - return 1; - - if (version == NETXEN_VERSION_CODE(major, minor, build) && - adapter->fw_type != NX_UNIFIED_ROMIMAGE) { - - val = NXRD32(adapter, NETXEN_MIU_MN_CONTROL); - fw_type = (val & 0x4) ? - NX_P3_CT_ROMIMAGE : NX_P3_MN_ROMIMAGE; - - if (adapter->fw_type != fw_type) - return 1; - } - } - - return 0; -} - -#define NETXEN_MIN_P3_FW_SUPP NETXEN_VERSION_CODE(4, 0, 505) - -int -netxen_check_flash_fw_compatibility(struct netxen_adapter *adapter) -{ - u32 flash_fw_ver, min_fw_ver; - - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) - return 0; - - if (netxen_rom_fast_read(adapter, - NX_FW_VERSION_OFFSET, (int *)&flash_fw_ver)) { - dev_err(&adapter->pdev->dev, "Unable to read flash fw" - "version\n"); - return -EIO; - } - - flash_fw_ver = NETXEN_DECODE_VERSION(flash_fw_ver); - min_fw_ver = NETXEN_MIN_P3_FW_SUPP; - if (flash_fw_ver >= min_fw_ver) - return 0; - - dev_info(&adapter->pdev->dev, "Flash fw[%d.%d.%d] is < min fw supported" - "[4.0.505]. Please update firmware on flash\n", - _major(flash_fw_ver), _minor(flash_fw_ver), - _build(flash_fw_ver)); - return -EINVAL; -} - -static char *fw_name[] = { - NX_P2_MN_ROMIMAGE_NAME, - NX_P3_CT_ROMIMAGE_NAME, - NX_P3_MN_ROMIMAGE_NAME, - NX_UNIFIED_ROMIMAGE_NAME, - NX_FLASH_ROMIMAGE_NAME, -}; - -int -netxen_load_firmware(struct netxen_adapter *adapter) -{ - u64 *ptr64; - u32 i, flashaddr, size; - const struct firmware *fw = adapter->fw; - struct pci_dev *pdev = adapter->pdev; - - dev_info(&pdev->dev, "loading firmware from %s\n", - fw_name[adapter->fw_type]); - - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) - NXWR32(adapter, NETXEN_ROMUSB_GLB_CAS_RST, 1); - - if (fw) { - __le64 data; - - size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START) / 8; - - ptr64 = (u64 *)nx_get_bootld_offs(adapter); - flashaddr = NETXEN_BOOTLD_START; - - for (i = 0; i < size; i++) { - data = cpu_to_le64(ptr64[i]); - - if (adapter->pci_mem_write(adapter, flashaddr, data)) - return -EIO; - - flashaddr += 8; - } - - size = (__force u32)nx_get_fw_size(adapter) / 8; - - ptr64 = (u64 *)nx_get_fw_offs(adapter); - flashaddr = NETXEN_IMAGE_START; - - for (i = 0; i < size; i++) { - data = cpu_to_le64(ptr64[i]); - - if (adapter->pci_mem_write(adapter, - flashaddr, data)) - return -EIO; - - flashaddr += 8; - } - - size = (__force u32)nx_get_fw_size(adapter) % 8; - if (size) { - data = cpu_to_le64(ptr64[i]); - - if (adapter->pci_mem_write(adapter, - flashaddr, data)) - return -EIO; - } - - } else { - u64 data; - u32 hi, lo; - - size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START) / 8; - flashaddr = NETXEN_BOOTLD_START; - - for (i = 0; i < size; i++) { - if (netxen_rom_fast_read(adapter, - flashaddr, (int *)&lo) != 0) - return -EIO; - if (netxen_rom_fast_read(adapter, - flashaddr + 4, (int *)&hi) != 0) - return -EIO; - - /* hi, lo are already in host endian byteorder */ - data = (((u64)hi << 32) | lo); - - if (adapter->pci_mem_write(adapter, - flashaddr, data)) - return -EIO; - - flashaddr += 8; - } - } - msleep(1); - - if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) { - NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0x18, 0x1020); - NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0x80001e); - } else if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) - NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0x80001d); - else { - NXWR32(adapter, NETXEN_ROMUSB_GLB_CHIP_CLK_CTRL, 0x3fff); - NXWR32(adapter, NETXEN_ROMUSB_GLB_CAS_RST, 0); - } - - return 0; -} - -static int -netxen_validate_firmware(struct netxen_adapter *adapter) -{ - __le32 val; - __le32 flash_fw_ver; - u32 file_fw_ver, min_ver, bios; - struct pci_dev *pdev = adapter->pdev; - const struct firmware *fw = adapter->fw; - u8 fw_type = adapter->fw_type; - u32 crbinit_fix_fw; - - if (fw_type == NX_UNIFIED_ROMIMAGE) { - if (netxen_nic_validate_unified_romimage(adapter)) - return -EINVAL; - } else { - val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_MAGIC_OFFSET]); - if ((__force u32)val != NETXEN_BDINFO_MAGIC) - return -EINVAL; - - if (fw->size < NX_FW_MIN_SIZE) - return -EINVAL; - } - - val = nx_get_fw_version(adapter); - - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) - min_ver = NETXEN_MIN_P3_FW_SUPP; - else - min_ver = NETXEN_VERSION_CODE(3, 4, 216); - - file_fw_ver = NETXEN_DECODE_VERSION(val); - - if ((_major(file_fw_ver) > _NETXEN_NIC_LINUX_MAJOR) || - (file_fw_ver < min_ver)) { - dev_err(&pdev->dev, - "%s: firmware version %d.%d.%d unsupported\n", - fw_name[fw_type], _major(file_fw_ver), _minor(file_fw_ver), - _build(file_fw_ver)); - return -EINVAL; - } - - val = nx_get_bios_version(adapter); - netxen_rom_fast_read(adapter, NX_BIOS_VERSION_OFFSET, (int *)&bios); - if ((__force u32)val != bios) { - dev_err(&pdev->dev, "%s: firmware bios is incompatible\n", - fw_name[fw_type]); - return -EINVAL; - } - - if (netxen_rom_fast_read(adapter, - NX_FW_VERSION_OFFSET, (int *)&flash_fw_ver)) { - dev_err(&pdev->dev, "Unable to read flash fw version\n"); - return -EIO; - } - flash_fw_ver = NETXEN_DECODE_VERSION(flash_fw_ver); - - /* New fw from file is not allowed, if fw on flash is < 4.0.554 */ - crbinit_fix_fw = NETXEN_VERSION_CODE(4, 0, 554); - if (file_fw_ver >= crbinit_fix_fw && flash_fw_ver < crbinit_fix_fw && - NX_IS_REVISION_P3(adapter->ahw.revision_id)) { - dev_err(&pdev->dev, "Incompatibility detected between driver " - "and firmware version on flash. This configuration " - "is not recommended. Please update the firmware on " - "flash immediately\n"); - return -EINVAL; - } - - /* check if flashed firmware is newer only for no-mn and P2 case*/ - if (!netxen_p3_has_mn(adapter) || - NX_IS_REVISION_P2(adapter->ahw.revision_id)) { - if (flash_fw_ver > file_fw_ver) { - dev_info(&pdev->dev, "%s: firmware is older than flash\n", - fw_name[fw_type]); - return -EINVAL; - } - } - - NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC); - return 0; -} - -static void -nx_get_next_fwtype(struct netxen_adapter *adapter) -{ - u8 fw_type; - - switch (adapter->fw_type) { - case NX_UNKNOWN_ROMIMAGE: - fw_type = NX_UNIFIED_ROMIMAGE; - break; - - case NX_UNIFIED_ROMIMAGE: - if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) - fw_type = NX_FLASH_ROMIMAGE; - else if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) - fw_type = NX_P2_MN_ROMIMAGE; - else if (netxen_p3_has_mn(adapter)) - fw_type = NX_P3_MN_ROMIMAGE; - else - fw_type = NX_P3_CT_ROMIMAGE; - break; - - case NX_P3_MN_ROMIMAGE: - fw_type = NX_P3_CT_ROMIMAGE; - break; - - case NX_P2_MN_ROMIMAGE: - case NX_P3_CT_ROMIMAGE: - default: - fw_type = NX_FLASH_ROMIMAGE; - break; - } - - adapter->fw_type = fw_type; -} - -static int -netxen_p3_has_mn(struct netxen_adapter *adapter) -{ - u32 capability, flashed_ver; - capability = 0; - - /* NX2031 always had MN */ - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) - return 1; - - netxen_rom_fast_read(adapter, - NX_FW_VERSION_OFFSET, (int *)&flashed_ver); - flashed_ver = NETXEN_DECODE_VERSION(flashed_ver); - - if (flashed_ver >= NETXEN_VERSION_CODE(4, 0, 220)) { - - capability = NXRD32(adapter, NX_PEG_TUNE_CAPABILITY); - if (capability & NX_PEG_TUNE_MN_PRESENT) - return 1; - } - return 0; -} - -void netxen_request_firmware(struct netxen_adapter *adapter) -{ - struct pci_dev *pdev = adapter->pdev; - int rc = 0; - - adapter->fw_type = NX_UNKNOWN_ROMIMAGE; - -next: - nx_get_next_fwtype(adapter); - - if (adapter->fw_type == NX_FLASH_ROMIMAGE) { - adapter->fw = NULL; - } else { - rc = request_firmware(&adapter->fw, - fw_name[adapter->fw_type], &pdev->dev); - if (rc != 0) - goto next; - - rc = netxen_validate_firmware(adapter); - if (rc != 0) { - release_firmware(adapter->fw); - msleep(1); - goto next; - } - } -} - - -void -netxen_release_firmware(struct netxen_adapter *adapter) -{ - if (adapter->fw) - release_firmware(adapter->fw); - adapter->fw = NULL; -} - -int netxen_init_dummy_dma(struct netxen_adapter *adapter) -{ - u64 addr; - u32 hi, lo; - - if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) - return 0; - - adapter->dummy_dma.addr = pci_alloc_consistent(adapter->pdev, - NETXEN_HOST_DUMMY_DMA_SIZE, - &adapter->dummy_dma.phys_addr); - if (adapter->dummy_dma.addr == NULL) { - dev_err(&adapter->pdev->dev, - "ERROR: Could not allocate dummy DMA memory\n"); - return -ENOMEM; - } - - addr = (uint64_t) adapter->dummy_dma.phys_addr; - hi = (addr >> 32) & 0xffffffff; - lo = addr & 0xffffffff; - - NXWR32(adapter, CRB_HOST_DUMMY_BUF_ADDR_HI, hi); - NXWR32(adapter, CRB_HOST_DUMMY_BUF_ADDR_LO, lo); - - return 0; -} - -/* - * NetXen DMA watchdog control: - * - * Bit 0 : enabled => R/O: 1 watchdog active, 0 inactive - * Bit 1 : disable_request => 1 req disable dma watchdog - * Bit 2 : enable_request => 1 req enable dma watchdog - * Bit 3-31 : unused - */ -void netxen_free_dummy_dma(struct netxen_adapter *adapter) -{ - int i = 100; - u32 ctrl; - - if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) - return; - - if (!adapter->dummy_dma.addr) - return; - - ctrl = NXRD32(adapter, NETXEN_DMA_WATCHDOG_CTRL); - if ((ctrl & 0x1) != 0) { - NXWR32(adapter, NETXEN_DMA_WATCHDOG_CTRL, (ctrl | 0x2)); - - while ((ctrl & 0x1) != 0) { - - msleep(50); - - ctrl = NXRD32(adapter, NETXEN_DMA_WATCHDOG_CTRL); - - if (--i == 0) - break; - } - } - - if (i) { - pci_free_consistent(adapter->pdev, - NETXEN_HOST_DUMMY_DMA_SIZE, - adapter->dummy_dma.addr, - adapter->dummy_dma.phys_addr); - adapter->dummy_dma.addr = NULL; - } else - dev_err(&adapter->pdev->dev, "dma_watchdog_shutdown failed\n"); -} - -int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val) -{ - u32 val = 0; - int retries = 60; - - if (pegtune_val) - return 0; - - do { - val = NXRD32(adapter, CRB_CMDPEG_STATE); - - switch (val) { - case PHAN_INITIALIZE_COMPLETE: - case PHAN_INITIALIZE_ACK: - return 0; - case PHAN_INITIALIZE_FAILED: - goto out_err; - default: - break; - } - - msleep(500); - - } while (--retries); - - NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED); - -out_err: - dev_warn(&adapter->pdev->dev, "firmware init failed\n"); - return -EIO; -} - -static int -netxen_receive_peg_ready(struct netxen_adapter *adapter) -{ - u32 val = 0; - int retries = 2000; - - do { - val = NXRD32(adapter, CRB_RCVPEG_STATE); - - if (val == PHAN_PEG_RCV_INITIALIZED) - return 0; - - msleep(10); - - } while (--retries); - - if (!retries) { - printk(KERN_ERR "Receive Peg initialization not " - "complete, state: 0x%x.\n", val); - return -EIO; - } - - return 0; -} - -int netxen_init_firmware(struct netxen_adapter *adapter) -{ - int err; - - err = netxen_receive_peg_ready(adapter); - if (err) - return err; - - NXWR32(adapter, CRB_NIC_CAPABILITIES_HOST, INTR_SCHEME_PERPORT); - NXWR32(adapter, CRB_MPORT_MODE, MPORT_MULTI_FUNCTION_MODE); - NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK); - - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) - NXWR32(adapter, CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC); - - return err; -} - -static void -netxen_handle_linkevent(struct netxen_adapter *adapter, nx_fw_msg_t *msg) -{ - u32 cable_OUI; - u16 cable_len; - u16 link_speed; - u8 link_status, module, duplex, autoneg; - struct net_device *netdev = adapter->netdev; - - adapter->has_link_events = 1; - - cable_OUI = msg->body[1] & 0xffffffff; - cable_len = (msg->body[1] >> 32) & 0xffff; - link_speed = (msg->body[1] >> 48) & 0xffff; - - link_status = msg->body[2] & 0xff; - duplex = (msg->body[2] >> 16) & 0xff; - autoneg = (msg->body[2] >> 24) & 0xff; - - module = (msg->body[2] >> 8) & 0xff; - if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE) { - printk(KERN_INFO "%s: unsupported cable: OUI 0x%x, length %d\n", - netdev->name, cable_OUI, cable_len); - } else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN) { - printk(KERN_INFO "%s: unsupported cable length %d\n", - netdev->name, cable_len); - } - - netxen_advert_link_change(adapter, link_status); - - /* update link parameters */ - if (duplex == LINKEVENT_FULL_DUPLEX) - adapter->link_duplex = DUPLEX_FULL; - else - adapter->link_duplex = DUPLEX_HALF; - adapter->module_type = module; - adapter->link_autoneg = autoneg; - adapter->link_speed = link_speed; -} - -static void -netxen_handle_fw_message(int desc_cnt, int index, - struct nx_host_sds_ring *sds_ring) -{ - nx_fw_msg_t msg; - struct status_desc *desc; - int i = 0, opcode; - - while (desc_cnt > 0 && i < 8) { - desc = &sds_ring->desc_head[index]; - msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]); - msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]); - - index = get_next_index(index, sds_ring->num_desc); - desc_cnt--; - } - - opcode = netxen_get_nic_msg_opcode(msg.body[0]); - switch (opcode) { - case NX_NIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE: - netxen_handle_linkevent(sds_ring->adapter, &msg); - break; - default: - break; - } -} - -static int -netxen_alloc_rx_skb(struct netxen_adapter *adapter, - struct nx_host_rds_ring *rds_ring, - struct netxen_rx_buffer *buffer) -{ - struct sk_buff *skb; - dma_addr_t dma; - struct pci_dev *pdev = adapter->pdev; - - buffer->skb = dev_alloc_skb(rds_ring->skb_size); - if (!buffer->skb) - return 1; - - skb = buffer->skb; - - if (!adapter->ahw.cut_through) - skb_reserve(skb, 2); - - dma = pci_map_single(pdev, skb->data, - rds_ring->dma_size, PCI_DMA_FROMDEVICE); - - if (pci_dma_mapping_error(pdev, dma)) { - dev_kfree_skb_any(skb); - buffer->skb = NULL; - return 1; - } - - buffer->skb = skb; - buffer->dma = dma; - buffer->state = NETXEN_BUFFER_BUSY; - - return 0; -} - -static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter, - struct nx_host_rds_ring *rds_ring, u16 index, u16 cksum) -{ - struct netxen_rx_buffer *buffer; - struct sk_buff *skb; - - buffer = &rds_ring->rx_buf_arr[index]; - - pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size, - PCI_DMA_FROMDEVICE); - - skb = buffer->skb; - if (!skb) - goto no_skb; - - if (likely((adapter->netdev->features & NETIF_F_RXCSUM) - && cksum == STATUS_CKSUM_OK)) { - adapter->stats.csummed++; - skb->ip_summed = CHECKSUM_UNNECESSARY; - } else - skb->ip_summed = CHECKSUM_NONE; - - skb->dev = adapter->netdev; - - buffer->skb = NULL; -no_skb: - buffer->state = NETXEN_BUFFER_FREE; - return skb; -} - -static struct netxen_rx_buffer * -netxen_process_rcv(struct netxen_adapter *adapter, - struct nx_host_sds_ring *sds_ring, - int ring, u64 sts_data0) -{ - struct net_device *netdev = adapter->netdev; - struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; - struct netxen_rx_buffer *buffer; - struct sk_buff *skb; - struct nx_host_rds_ring *rds_ring; - int index, length, cksum, pkt_offset; - - if (unlikely(ring >= adapter->max_rds_rings)) - return NULL; - - rds_ring = &recv_ctx->rds_rings[ring]; - - index = netxen_get_sts_refhandle(sts_data0); - if (unlikely(index >= rds_ring->num_desc)) - return NULL; - - buffer = &rds_ring->rx_buf_arr[index]; - - length = netxen_get_sts_totallength(sts_data0); - cksum = netxen_get_sts_status(sts_data0); - pkt_offset = netxen_get_sts_pkt_offset(sts_data0); - - skb = netxen_process_rxbuf(adapter, rds_ring, index, cksum); - if (!skb) - return buffer; - - if (length > rds_ring->skb_size) - skb_put(skb, rds_ring->skb_size); - else - skb_put(skb, length); - - - if (pkt_offset) - skb_pull(skb, pkt_offset); - - skb->protocol = eth_type_trans(skb, netdev); - - napi_gro_receive(&sds_ring->napi, skb); - - adapter->stats.rx_pkts++; - adapter->stats.rxbytes += length; - - return buffer; -} - -#define TCP_HDR_SIZE 20 -#define TCP_TS_OPTION_SIZE 12 -#define TCP_TS_HDR_SIZE (TCP_HDR_SIZE + TCP_TS_OPTION_SIZE) - -static struct netxen_rx_buffer * -netxen_process_lro(struct netxen_adapter *adapter, - struct nx_host_sds_ring *sds_ring, - int ring, u64 sts_data0, u64 sts_data1) -{ - struct net_device *netdev = adapter->netdev; - struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; - struct netxen_rx_buffer *buffer; - struct sk_buff *skb; - struct nx_host_rds_ring *rds_ring; - struct iphdr *iph; - struct tcphdr *th; - bool push, timestamp; - int l2_hdr_offset, l4_hdr_offset; - int index; - u16 lro_length, length, data_offset; - u32 seq_number; - u8 vhdr_len; - - if (unlikely(ring > adapter->max_rds_rings)) - return NULL; - - rds_ring = &recv_ctx->rds_rings[ring]; - - index = netxen_get_lro_sts_refhandle(sts_data0); - if (unlikely(index > rds_ring->num_desc)) - return NULL; - - buffer = &rds_ring->rx_buf_arr[index]; - - timestamp = netxen_get_lro_sts_timestamp(sts_data0); - lro_length = netxen_get_lro_sts_length(sts_data0); - l2_hdr_offset = netxen_get_lro_sts_l2_hdr_offset(sts_data0); - l4_hdr_offset = netxen_get_lro_sts_l4_hdr_offset(sts_data0); - push = netxen_get_lro_sts_push_flag(sts_data0); - seq_number = netxen_get_lro_sts_seq_number(sts_data1); - - skb = netxen_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK); - if (!skb) - return buffer; - - if (timestamp) - data_offset = l4_hdr_offset + TCP_TS_HDR_SIZE; - else - data_offset = l4_hdr_offset + TCP_HDR_SIZE; - - skb_put(skb, lro_length + data_offset); - - skb_pull(skb, l2_hdr_offset); - skb->protocol = eth_type_trans(skb, netdev); - - if (skb->protocol == htons(ETH_P_8021Q)) - vhdr_len = VLAN_HLEN; - iph = (struct iphdr *)(skb->data + vhdr_len); - th = (struct tcphdr *)((skb->data + vhdr_len) + (iph->ihl << 2)); - - length = (iph->ihl << 2) + (th->doff << 2) + lro_length; - iph->tot_len = htons(length); - iph->check = 0; - iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); - th->psh = push; - th->seq = htonl(seq_number); - - length = skb->len; - - netif_receive_skb(skb); - - adapter->stats.lro_pkts++; - adapter->stats.rxbytes += length; - - return buffer; -} - -#define netxen_merge_rx_buffers(list, head) \ - do { list_splice_tail_init(list, head); } while (0); - -int -netxen_process_rcv_ring(struct nx_host_sds_ring *sds_ring, int max) -{ - struct netxen_adapter *adapter = sds_ring->adapter; - - struct list_head *cur; - - struct status_desc *desc; - struct netxen_rx_buffer *rxbuf; - - u32 consumer = sds_ring->consumer; - - int count = 0; - u64 sts_data0, sts_data1; - int opcode, ring = 0, desc_cnt; - - while (count < max) { - desc = &sds_ring->desc_head[consumer]; - sts_data0 = le64_to_cpu(desc->status_desc_data[0]); - - if (!(sts_data0 & STATUS_OWNER_HOST)) - break; - - desc_cnt = netxen_get_sts_desc_cnt(sts_data0); - - opcode = netxen_get_sts_opcode(sts_data0); - - switch (opcode) { - case NETXEN_NIC_RXPKT_DESC: - case NETXEN_OLD_RXPKT_DESC: - case NETXEN_NIC_SYN_OFFLOAD: - ring = netxen_get_sts_type(sts_data0); - rxbuf = netxen_process_rcv(adapter, sds_ring, - ring, sts_data0); - break; - case NETXEN_NIC_LRO_DESC: - ring = netxen_get_lro_sts_type(sts_data0); - sts_data1 = le64_to_cpu(desc->status_desc_data[1]); - rxbuf = netxen_process_lro(adapter, sds_ring, - ring, sts_data0, sts_data1); - break; - case NETXEN_NIC_RESPONSE_DESC: - netxen_handle_fw_message(desc_cnt, consumer, sds_ring); - default: - goto skip; - } - - WARN_ON(desc_cnt > 1); - - if (rxbuf) - list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]); - -skip: - for (; desc_cnt > 0; desc_cnt--) { - desc = &sds_ring->desc_head[consumer]; - desc->status_desc_data[0] = - cpu_to_le64(STATUS_OWNER_PHANTOM); - consumer = get_next_index(consumer, sds_ring->num_desc); - } - count++; - } - - for (ring = 0; ring < adapter->max_rds_rings; ring++) { - struct nx_host_rds_ring *rds_ring = - &adapter->recv_ctx.rds_rings[ring]; - - if (!list_empty(&sds_ring->free_list[ring])) { - list_for_each(cur, &sds_ring->free_list[ring]) { - rxbuf = list_entry(cur, - struct netxen_rx_buffer, list); - netxen_alloc_rx_skb(adapter, rds_ring, rxbuf); - } - spin_lock(&rds_ring->lock); - netxen_merge_rx_buffers(&sds_ring->free_list[ring], - &rds_ring->free_list); - spin_unlock(&rds_ring->lock); - } - - netxen_post_rx_buffers_nodb(adapter, rds_ring); - } - - if (count) { - sds_ring->consumer = consumer; - NXWRIO(adapter, sds_ring->crb_sts_consumer, consumer); - } - - return count; -} - -/* Process Command status ring */ -int netxen_process_cmd_ring(struct netxen_adapter *adapter) -{ - u32 sw_consumer, hw_consumer; - int count = 0, i; - struct netxen_cmd_buffer *buffer; - struct pci_dev *pdev = adapter->pdev; - struct net_device *netdev = adapter->netdev; - struct netxen_skb_frag *frag; - int done = 0; - struct nx_host_tx_ring *tx_ring = adapter->tx_ring; - - if (!spin_trylock(&adapter->tx_clean_lock)) - return 1; - - sw_consumer = tx_ring->sw_consumer; - hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); - - while (sw_consumer != hw_consumer) { - buffer = &tx_ring->cmd_buf_arr[sw_consumer]; - if (buffer->skb) { - frag = &buffer->frag_array[0]; - pci_unmap_single(pdev, frag->dma, frag->length, - PCI_DMA_TODEVICE); - frag->dma = 0ULL; - for (i = 1; i < buffer->frag_count; i++) { - frag++; /* Get the next frag */ - pci_unmap_page(pdev, frag->dma, frag->length, - PCI_DMA_TODEVICE); - frag->dma = 0ULL; - } - - adapter->stats.xmitfinished++; - dev_kfree_skb_any(buffer->skb); - buffer->skb = NULL; - } - - sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc); - if (++count >= MAX_STATUS_HANDLE) - break; - } - - if (count && netif_running(netdev)) { - tx_ring->sw_consumer = sw_consumer; - - smp_mb(); - - if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) - if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH) - netif_wake_queue(netdev); - adapter->tx_timeo_cnt = 0; - } - /* - * If everything is freed up to consumer then check if the ring is full - * If the ring is full then check if more needs to be freed and - * schedule the call back again. - * - * This happens when there are 2 CPUs. One could be freeing and the - * other filling it. If the ring is full when we get out of here and - * the card has already interrupted the host then the host can miss the - * interrupt. - * - * There is still a possible race condition and the host could miss an - * interrupt. The card has to take care of this. - */ - hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); - done = (sw_consumer == hw_consumer); - spin_unlock(&adapter->tx_clean_lock); - - return done; -} - -void -netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid, - struct nx_host_rds_ring *rds_ring) -{ - struct rcv_desc *pdesc; - struct netxen_rx_buffer *buffer; - int producer, count = 0; - netxen_ctx_msg msg = 0; - struct list_head *head; - - producer = rds_ring->producer; - - head = &rds_ring->free_list; - while (!list_empty(head)) { - - buffer = list_entry(head->next, struct netxen_rx_buffer, list); - - if (!buffer->skb) { - if (netxen_alloc_rx_skb(adapter, rds_ring, buffer)) - break; - } - - count++; - list_del(&buffer->list); - - /* make a rcv descriptor */ - pdesc = &rds_ring->desc_head[producer]; - pdesc->addr_buffer = cpu_to_le64(buffer->dma); - pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); - pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); - - producer = get_next_index(producer, rds_ring->num_desc); - } - - if (count) { - rds_ring->producer = producer; - NXWRIO(adapter, rds_ring->crb_rcv_producer, - (producer-1) & (rds_ring->num_desc-1)); - - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { - /* - * Write a doorbell msg to tell phanmon of change in - * receive ring producer - * Only for firmware version < 4.0.0 - */ - netxen_set_msg_peg_id(msg, NETXEN_RCV_PEG_DB_ID); - netxen_set_msg_privid(msg); - netxen_set_msg_count(msg, - ((producer - 1) & - (rds_ring->num_desc - 1))); - netxen_set_msg_ctxid(msg, adapter->portnum); - netxen_set_msg_opcode(msg, NETXEN_RCV_PRODUCER(ringid)); - NXWRIO(adapter, DB_NORMALIZE(adapter, - NETXEN_RCV_PRODUCER_OFFSET), msg); - } - } -} - -static void -netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, - struct nx_host_rds_ring *rds_ring) -{ - struct rcv_desc *pdesc; - struct netxen_rx_buffer *buffer; - int producer, count = 0; - struct list_head *head; - - if (!spin_trylock(&rds_ring->lock)) - return; - - producer = rds_ring->producer; - - head = &rds_ring->free_list; - while (!list_empty(head)) { - - buffer = list_entry(head->next, struct netxen_rx_buffer, list); - - if (!buffer->skb) { - if (netxen_alloc_rx_skb(adapter, rds_ring, buffer)) - break; - } - - count++; - list_del(&buffer->list); - - /* make a rcv descriptor */ - pdesc = &rds_ring->desc_head[producer]; - pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); - pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); - pdesc->addr_buffer = cpu_to_le64(buffer->dma); - - producer = get_next_index(producer, rds_ring->num_desc); - } - - if (count) { - rds_ring->producer = producer; - NXWRIO(adapter, rds_ring->crb_rcv_producer, - (producer - 1) & (rds_ring->num_desc - 1)); - } - spin_unlock(&rds_ring->lock); -} - -void netxen_nic_clear_stats(struct netxen_adapter *adapter) -{ - memset(&adapter->stats, 0, sizeof(adapter->stats)); -} - diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c deleted file mode 100644 index 8c7fc32d781f..000000000000 --- a/drivers/net/netxen/netxen_nic_main.c +++ /dev/null @@ -1,3100 +0,0 @@ -/* - * Copyright (C) 2003 - 2009 NetXen, Inc. - * Copyright (C) 2009 - QLogic Corporation. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, - * MA 02111-1307, USA. - * - * The full GNU General Public License is included in this distribution - * in the file called "COPYING". - * - */ - -#include -#include -#include -#include "netxen_nic_hw.h" - -#include "netxen_nic.h" - -#include -#include -#include -#include -#include -#include -#include - -MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Intelligent Ethernet Driver"); -MODULE_LICENSE("GPL"); -MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID); -MODULE_FIRMWARE(NX_UNIFIED_ROMIMAGE_NAME); - -char netxen_nic_driver_name[] = "netxen_nic"; -static char netxen_nic_driver_string[] = "QLogic/NetXen Network Driver v" - NETXEN_NIC_LINUX_VERSIONID; - -static int port_mode = NETXEN_PORT_MODE_AUTO_NEG; - -/* Default to restricted 1G auto-neg mode */ -static int wol_port_mode = 5; - -static int use_msi = 1; - -static int use_msi_x = 1; - -static int auto_fw_reset = AUTO_FW_RESET_ENABLED; -module_param(auto_fw_reset, int, 0644); -MODULE_PARM_DESC(auto_fw_reset,"Auto firmware reset (0=disabled, 1=enabled"); - -static int __devinit netxen_nic_probe(struct pci_dev *pdev, - const struct pci_device_id *ent); -static void __devexit netxen_nic_remove(struct pci_dev *pdev); -static int netxen_nic_open(struct net_device *netdev); -static int netxen_nic_close(struct net_device *netdev); -static netdev_tx_t netxen_nic_xmit_frame(struct sk_buff *, - struct net_device *); -static void netxen_tx_timeout(struct net_device *netdev); -static void netxen_tx_timeout_task(struct work_struct *work); -static void netxen_fw_poll_work(struct work_struct *work); -static void netxen_schedule_work(struct netxen_adapter *adapter, - work_func_t func, int delay); -static void netxen_cancel_fw_work(struct netxen_adapter *adapter); -static int netxen_nic_poll(struct napi_struct *napi, int budget); -#ifdef CONFIG_NET_POLL_CONTROLLER -static void netxen_nic_poll_controller(struct net_device *netdev); -#endif - -static void netxen_create_sysfs_entries(struct netxen_adapter *adapter); -static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter); -static void netxen_create_diag_entries(struct netxen_adapter *adapter); -static void netxen_remove_diag_entries(struct netxen_adapter *adapter); - -static int nx_dev_request_aer(struct netxen_adapter *adapter); -static int nx_decr_dev_ref_cnt(struct netxen_adapter *adapter); -static int netxen_can_start_firmware(struct netxen_adapter *adapter); - -static irqreturn_t netxen_intr(int irq, void *data); -static irqreturn_t netxen_msi_intr(int irq, void *data); -static irqreturn_t netxen_msix_intr(int irq, void *data); - -static void netxen_free_vlan_ip_list(struct netxen_adapter *); -static void netxen_restore_indev_addr(struct net_device *dev, unsigned long); -static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *dev, - struct rtnl_link_stats64 *stats); -static int netxen_nic_set_mac(struct net_device *netdev, void *p); - -/* PCI Device ID Table */ -#define ENTRY(device) \ - {PCI_DEVICE(PCI_VENDOR_ID_NETXEN, (device)), \ - .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0} - -static DEFINE_PCI_DEVICE_TABLE(netxen_pci_tbl) = { - ENTRY(PCI_DEVICE_ID_NX2031_10GXSR), - ENTRY(PCI_DEVICE_ID_NX2031_10GCX4), - ENTRY(PCI_DEVICE_ID_NX2031_4GCU), - ENTRY(PCI_DEVICE_ID_NX2031_IMEZ), - ENTRY(PCI_DEVICE_ID_NX2031_HMEZ), - ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT), - ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT2), - ENTRY(PCI_DEVICE_ID_NX3031), - {0,} -}; - -MODULE_DEVICE_TABLE(pci, netxen_pci_tbl); - -static uint32_t crb_cmd_producer[4] = { - CRB_CMD_PRODUCER_OFFSET, CRB_CMD_PRODUCER_OFFSET_1, - CRB_CMD_PRODUCER_OFFSET_2, CRB_CMD_PRODUCER_OFFSET_3 -}; - -void -netxen_nic_update_cmd_producer(struct netxen_adapter *adapter, - struct nx_host_tx_ring *tx_ring) -{ - NXWRIO(adapter, tx_ring->crb_cmd_producer, tx_ring->producer); -} - -static uint32_t crb_cmd_consumer[4] = { - CRB_CMD_CONSUMER_OFFSET, CRB_CMD_CONSUMER_OFFSET_1, - CRB_CMD_CONSUMER_OFFSET_2, CRB_CMD_CONSUMER_OFFSET_3 -}; - -static inline void -netxen_nic_update_cmd_consumer(struct netxen_adapter *adapter, - struct nx_host_tx_ring *tx_ring) -{ - NXWRIO(adapter, tx_ring->crb_cmd_consumer, tx_ring->sw_consumer); -} - -static uint32_t msi_tgt_status[8] = { - ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1, - ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3, - ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5, - ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7 -}; - -static struct netxen_legacy_intr_set legacy_intr[] = NX_LEGACY_INTR_CONFIG; - -static inline void netxen_nic_disable_int(struct nx_host_sds_ring *sds_ring) -{ - struct netxen_adapter *adapter = sds_ring->adapter; - - NXWRIO(adapter, sds_ring->crb_intr_mask, 0); -} - -static inline void netxen_nic_enable_int(struct nx_host_sds_ring *sds_ring) -{ - struct netxen_adapter *adapter = sds_ring->adapter; - - NXWRIO(adapter, sds_ring->crb_intr_mask, 0x1); - - if (!NETXEN_IS_MSI_FAMILY(adapter)) - NXWRIO(adapter, adapter->tgt_mask_reg, 0xfbff); -} - -static int -netxen_alloc_sds_rings(struct netxen_recv_context *recv_ctx, int count) -{ - int size = sizeof(struct nx_host_sds_ring) * count; - - recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL); - - return recv_ctx->sds_rings == NULL; -} - -static void -netxen_free_sds_rings(struct netxen_recv_context *recv_ctx) -{ - if (recv_ctx->sds_rings != NULL) - kfree(recv_ctx->sds_rings); - - recv_ctx->sds_rings = NULL; -} - -static int -netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev) -{ - int ring; - struct nx_host_sds_ring *sds_ring; - struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; - - if (netxen_alloc_sds_rings(recv_ctx, adapter->max_sds_rings)) - return -ENOMEM; - - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &recv_ctx->sds_rings[ring]; - netif_napi_add(netdev, &sds_ring->napi, - netxen_nic_poll, NETXEN_NETDEV_WEIGHT); - } - - return 0; -} - -static void -netxen_napi_del(struct netxen_adapter *adapter) -{ - int ring; - struct nx_host_sds_ring *sds_ring; - struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; - - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &recv_ctx->sds_rings[ring]; - netif_napi_del(&sds_ring->napi); - } - - netxen_free_sds_rings(&adapter->recv_ctx); -} - -static void -netxen_napi_enable(struct netxen_adapter *adapter) -{ - int ring; - struct nx_host_sds_ring *sds_ring; - struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; - - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &recv_ctx->sds_rings[ring]; - napi_enable(&sds_ring->napi); - netxen_nic_enable_int(sds_ring); - } -} - -static void -netxen_napi_disable(struct netxen_adapter *adapter) -{ - int ring; - struct nx_host_sds_ring *sds_ring; - struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; - - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &recv_ctx->sds_rings[ring]; - netxen_nic_disable_int(sds_ring); - napi_synchronize(&sds_ring->napi); - napi_disable(&sds_ring->napi); - } -} - -static int nx_set_dma_mask(struct netxen_adapter *adapter) -{ - struct pci_dev *pdev = adapter->pdev; - uint64_t mask, cmask; - - adapter->pci_using_dac = 0; - - mask = DMA_BIT_MASK(32); - cmask = DMA_BIT_MASK(32); - - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { -#ifndef CONFIG_IA64 - mask = DMA_BIT_MASK(35); -#endif - } else { - mask = DMA_BIT_MASK(39); - cmask = mask; - } - - if (pci_set_dma_mask(pdev, mask) == 0 && - pci_set_consistent_dma_mask(pdev, cmask) == 0) { - adapter->pci_using_dac = 1; - return 0; - } - - return -EIO; -} - -/* Update addressable range if firmware supports it */ -static int -nx_update_dma_mask(struct netxen_adapter *adapter) -{ - int change, shift, err; - uint64_t mask, old_mask, old_cmask; - struct pci_dev *pdev = adapter->pdev; - - change = 0; - - shift = NXRD32(adapter, CRB_DMA_SHIFT); - if (shift > 32) - return 0; - - if (NX_IS_REVISION_P3(adapter->ahw.revision_id) && (shift > 9)) - change = 1; - else if ((adapter->ahw.revision_id == NX_P2_C1) && (shift <= 4)) - change = 1; - - if (change) { - old_mask = pdev->dma_mask; - old_cmask = pdev->dev.coherent_dma_mask; - - mask = DMA_BIT_MASK(32+shift); - - err = pci_set_dma_mask(pdev, mask); - if (err) - goto err_out; - - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { - - err = pci_set_consistent_dma_mask(pdev, mask); - if (err) - goto err_out; - } - dev_info(&pdev->dev, "using %d-bit dma mask\n", 32+shift); - } - - return 0; - -err_out: - pci_set_dma_mask(pdev, old_mask); - pci_set_consistent_dma_mask(pdev, old_cmask); - return err; -} - -static int -netxen_check_hw_init(struct netxen_adapter *adapter, int first_boot) -{ - u32 val, timeout; - - if (first_boot == 0x55555555) { - /* This is the first boot after power up */ - NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC); - - if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) - return 0; - - /* PCI bus master workaround */ - first_boot = NXRD32(adapter, NETXEN_PCIE_REG(0x4)); - if (!(first_boot & 0x4)) { - first_boot |= 0x4; - NXWR32(adapter, NETXEN_PCIE_REG(0x4), first_boot); - NXRD32(adapter, NETXEN_PCIE_REG(0x4)); - } - - /* This is the first boot after power up */ - first_boot = NXRD32(adapter, NETXEN_ROMUSB_GLB_SW_RESET); - if (first_boot != 0x80000f) { - /* clear the register for future unloads/loads */ - NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), 0); - return -EIO; - } - - /* Start P2 boot loader */ - val = NXRD32(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE); - NXWR32(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE, val | 0x1); - timeout = 0; - do { - msleep(1); - val = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc)); - - if (++timeout > 5000) - return -EIO; - - } while (val == NETXEN_BDINFO_MAGIC); - } - return 0; -} - -static void netxen_set_port_mode(struct netxen_adapter *adapter) -{ - u32 val, data; - - val = adapter->ahw.board_type; - if ((val == NETXEN_BRDTYPE_P3_HMEZ) || - (val == NETXEN_BRDTYPE_P3_XG_LOM)) { - if (port_mode == NETXEN_PORT_MODE_802_3_AP) { - data = NETXEN_PORT_MODE_802_3_AP; - NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data); - } else if (port_mode == NETXEN_PORT_MODE_XG) { - data = NETXEN_PORT_MODE_XG; - NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data); - } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_1G) { - data = NETXEN_PORT_MODE_AUTO_NEG_1G; - NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data); - } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_XG) { - data = NETXEN_PORT_MODE_AUTO_NEG_XG; - NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data); - } else { - data = NETXEN_PORT_MODE_AUTO_NEG; - NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data); - } - - if ((wol_port_mode != NETXEN_PORT_MODE_802_3_AP) && - (wol_port_mode != NETXEN_PORT_MODE_XG) && - (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_1G) && - (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_XG)) { - wol_port_mode = NETXEN_PORT_MODE_AUTO_NEG; - } - NXWR32(adapter, NETXEN_WOL_PORT_MODE, wol_port_mode); - } -} - -static void netxen_set_msix_bit(struct pci_dev *pdev, int enable) -{ - u32 control; - int pos; - - pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); - if (pos) { - pci_read_config_dword(pdev, pos, &control); - if (enable) - control |= PCI_MSIX_FLAGS_ENABLE; - else - control = 0; - pci_write_config_dword(pdev, pos, control); - } -} - -static void netxen_init_msix_entries(struct netxen_adapter *adapter, int count) -{ - int i; - - for (i = 0; i < count; i++) - adapter->msix_entries[i].entry = i; -} - -static int -netxen_read_mac_addr(struct netxen_adapter *adapter) -{ - int i; - unsigned char *p; - u64 mac_addr; - struct net_device *netdev = adapter->netdev; - struct pci_dev *pdev = adapter->pdev; - - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { - if (netxen_p3_get_mac_addr(adapter, &mac_addr) != 0) - return -EIO; - } else { - if (netxen_get_flash_mac_addr(adapter, &mac_addr) != 0) - return -EIO; - } - - p = (unsigned char *)&mac_addr; - for (i = 0; i < 6; i++) - netdev->dev_addr[i] = *(p + 5 - i); - - memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); - memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len); - - /* set station address */ - - if (!is_valid_ether_addr(netdev->perm_addr)) - dev_warn(&pdev->dev, "Bad MAC address %pM.\n", netdev->dev_addr); - - return 0; -} - -static int netxen_nic_set_mac(struct net_device *netdev, void *p) -{ - struct netxen_adapter *adapter = netdev_priv(netdev); - struct sockaddr *addr = p; - - if (!is_valid_ether_addr(addr->sa_data)) - return -EINVAL; - - if (netif_running(netdev)) { - netif_device_detach(netdev); - netxen_napi_disable(adapter); - } - - memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len); - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); - adapter->macaddr_set(adapter, addr->sa_data); - - if (netif_running(netdev)) { - netif_device_attach(netdev); - netxen_napi_enable(adapter); - } - return 0; -} - -static void netxen_set_multicast_list(struct net_device *dev) -{ - struct netxen_adapter *adapter = netdev_priv(dev); - - adapter->set_multi(dev); -} - -static u32 netxen_fix_features(struct net_device *dev, u32 features) -{ - if (!(features & NETIF_F_RXCSUM)) { - netdev_info(dev, "disabling LRO as RXCSUM is off\n"); - - features &= ~NETIF_F_LRO; - } - - return features; -} - -static int netxen_set_features(struct net_device *dev, u32 features) -{ - struct netxen_adapter *adapter = netdev_priv(dev); - int hw_lro; - - if (!((dev->features ^ features) & NETIF_F_LRO)) - return 0; - - hw_lro = (features & NETIF_F_LRO) ? NETXEN_NIC_LRO_ENABLED - : NETXEN_NIC_LRO_DISABLED; - - if (netxen_config_hw_lro(adapter, hw_lro)) - return -EIO; - - if (!(features & NETIF_F_LRO) && netxen_send_lro_cleanup(adapter)) - return -EIO; - - return 0; -} - -static const struct net_device_ops netxen_netdev_ops = { - .ndo_open = netxen_nic_open, - .ndo_stop = netxen_nic_close, - .ndo_start_xmit = netxen_nic_xmit_frame, - .ndo_get_stats64 = netxen_nic_get_stats, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_multicast_list = netxen_set_multicast_list, - .ndo_set_mac_address = netxen_nic_set_mac, - .ndo_change_mtu = netxen_nic_change_mtu, - .ndo_tx_timeout = netxen_tx_timeout, - .ndo_fix_features = netxen_fix_features, - .ndo_set_features = netxen_set_features, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = netxen_nic_poll_controller, -#endif -}; - -static void -netxen_setup_intr(struct netxen_adapter *adapter) -{ - struct netxen_legacy_intr_set *legacy_intrp; - struct pci_dev *pdev = adapter->pdev; - int err, num_msix; - - if (adapter->rss_supported) { - num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ? - MSIX_ENTRIES_PER_ADAPTER : 2; - } else - num_msix = 1; - - adapter->max_sds_rings = 1; - - adapter->flags &= ~(NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED); - - if (adapter->ahw.revision_id >= NX_P3_B0) - legacy_intrp = &legacy_intr[adapter->ahw.pci_func]; - else - legacy_intrp = &legacy_intr[0]; - - adapter->int_vec_bit = legacy_intrp->int_vec_bit; - adapter->tgt_status_reg = netxen_get_ioaddr(adapter, - legacy_intrp->tgt_status_reg); - adapter->tgt_mask_reg = netxen_get_ioaddr(adapter, - legacy_intrp->tgt_mask_reg); - adapter->pci_int_reg = netxen_get_ioaddr(adapter, - legacy_intrp->pci_int_reg); - adapter->isr_int_vec = netxen_get_ioaddr(adapter, ISR_INT_VECTOR); - - if (adapter->ahw.revision_id >= NX_P3_B1) - adapter->crb_int_state_reg = netxen_get_ioaddr(adapter, - ISR_INT_STATE_REG); - else - adapter->crb_int_state_reg = netxen_get_ioaddr(adapter, - CRB_INT_VECTOR); - - netxen_set_msix_bit(pdev, 0); - - if (adapter->msix_supported) { - - netxen_init_msix_entries(adapter, num_msix); - err = pci_enable_msix(pdev, adapter->msix_entries, num_msix); - if (err == 0) { - adapter->flags |= NETXEN_NIC_MSIX_ENABLED; - netxen_set_msix_bit(pdev, 1); - - if (adapter->rss_supported) - adapter->max_sds_rings = num_msix; - - dev_info(&pdev->dev, "using msi-x interrupts\n"); - return; - } - - if (err > 0) - pci_disable_msix(pdev); - - /* fall through for msi */ - } - - if (use_msi && !pci_enable_msi(pdev)) { - adapter->flags |= NETXEN_NIC_MSI_ENABLED; - adapter->tgt_status_reg = netxen_get_ioaddr(adapter, - msi_tgt_status[adapter->ahw.pci_func]); - dev_info(&pdev->dev, "using msi interrupts\n"); - adapter->msix_entries[0].vector = pdev->irq; - return; - } - - dev_info(&pdev->dev, "using legacy interrupts\n"); - adapter->msix_entries[0].vector = pdev->irq; -} - -static void -netxen_teardown_intr(struct netxen_adapter *adapter) -{ - if (adapter->flags & NETXEN_NIC_MSIX_ENABLED) - pci_disable_msix(adapter->pdev); - if (adapter->flags & NETXEN_NIC_MSI_ENABLED) - pci_disable_msi(adapter->pdev); -} - -static void -netxen_cleanup_pci_map(struct netxen_adapter *adapter) -{ - if (adapter->ahw.db_base != NULL) - iounmap(adapter->ahw.db_base); - if (adapter->ahw.pci_base0 != NULL) - iounmap(adapter->ahw.pci_base0); - if (adapter->ahw.pci_base1 != NULL) - iounmap(adapter->ahw.pci_base1); - if (adapter->ahw.pci_base2 != NULL) - iounmap(adapter->ahw.pci_base2); -} - -static int -netxen_setup_pci_map(struct netxen_adapter *adapter) -{ - void __iomem *db_ptr = NULL; - - resource_size_t mem_base, db_base; - unsigned long mem_len, db_len = 0; - - struct pci_dev *pdev = adapter->pdev; - int pci_func = adapter->ahw.pci_func; - struct netxen_hardware_context *ahw = &adapter->ahw; - - int err = 0; - - /* - * Set the CRB window to invalid. If any register in window 0 is - * accessed it should set the window to 0 and then reset it to 1. - */ - adapter->ahw.crb_win = -1; - adapter->ahw.ocm_win = -1; - - /* remap phys address */ - mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */ - mem_len = pci_resource_len(pdev, 0); - - /* 128 Meg of memory */ - if (mem_len == NETXEN_PCI_128MB_SIZE) { - - ahw->pci_base0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE); - ahw->pci_base1 = ioremap(mem_base + SECOND_PAGE_GROUP_START, - SECOND_PAGE_GROUP_SIZE); - ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START, - THIRD_PAGE_GROUP_SIZE); - if (ahw->pci_base0 == NULL || ahw->pci_base1 == NULL || - ahw->pci_base2 == NULL) { - dev_err(&pdev->dev, "failed to map PCI bar 0\n"); - err = -EIO; - goto err_out; - } - - ahw->pci_len0 = FIRST_PAGE_GROUP_SIZE; - - } else if (mem_len == NETXEN_PCI_32MB_SIZE) { - - ahw->pci_base1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE); - ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START - - SECOND_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE); - if (ahw->pci_base1 == NULL || ahw->pci_base2 == NULL) { - dev_err(&pdev->dev, "failed to map PCI bar 0\n"); - err = -EIO; - goto err_out; - } - - } else if (mem_len == NETXEN_PCI_2MB_SIZE) { - - ahw->pci_base0 = pci_ioremap_bar(pdev, 0); - if (ahw->pci_base0 == NULL) { - dev_err(&pdev->dev, "failed to map PCI bar 0\n"); - return -EIO; - } - ahw->pci_len0 = mem_len; - } else { - return -EIO; - } - - netxen_setup_hwops(adapter); - - dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20)); - - if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) { - adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter, - NETXEN_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func))); - - } else if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { - adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter, - NETXEN_PCIX_PS_REG(PCIE_MN_WINDOW_REG(pci_func))); - } - - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) - goto skip_doorbell; - - db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */ - db_len = pci_resource_len(pdev, 4); - - if (db_len == 0) { - printk(KERN_ERR "%s: doorbell is disabled\n", - netxen_nic_driver_name); - err = -EIO; - goto err_out; - } - - db_ptr = ioremap(db_base, NETXEN_DB_MAPSIZE_BYTES); - if (!db_ptr) { - printk(KERN_ERR "%s: Failed to allocate doorbell map.", - netxen_nic_driver_name); - err = -EIO; - goto err_out; - } - -skip_doorbell: - adapter->ahw.db_base = db_ptr; - adapter->ahw.db_len = db_len; - return 0; - -err_out: - netxen_cleanup_pci_map(adapter); - return err; -} - -static void -netxen_check_options(struct netxen_adapter *adapter) -{ - u32 fw_major, fw_minor, fw_build; - char brd_name[NETXEN_MAX_SHORT_NAME]; - char serial_num[32]; - int i, offset, val; - int *ptr32; - struct pci_dev *pdev = adapter->pdev; - - adapter->driver_mismatch = 0; - - ptr32 = (int *)&serial_num; - offset = NX_FW_SERIAL_NUM_OFFSET; - for (i = 0; i < 8; i++) { - if (netxen_rom_fast_read(adapter, offset, &val) == -1) { - dev_err(&pdev->dev, "error reading board info\n"); - adapter->driver_mismatch = 1; - return; - } - ptr32[i] = cpu_to_le32(val); - offset += sizeof(u32); - } - - fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR); - fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR); - fw_build = NXRD32(adapter, NETXEN_FW_VERSION_SUB); - - adapter->fw_version = NETXEN_VERSION_CODE(fw_major, fw_minor, fw_build); - - if (adapter->portnum == 0) { - get_brd_name_by_type(adapter->ahw.board_type, brd_name); - - pr_info("%s: %s Board S/N %s Chip rev 0x%x\n", - module_name(THIS_MODULE), - brd_name, serial_num, adapter->ahw.revision_id); - } - - if (adapter->fw_version < NETXEN_VERSION_CODE(3, 4, 216)) { - adapter->driver_mismatch = 1; - dev_warn(&pdev->dev, "firmware version %d.%d.%d unsupported\n", - fw_major, fw_minor, fw_build); - return; - } - - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { - i = NXRD32(adapter, NETXEN_SRE_MISC); - adapter->ahw.cut_through = (i & 0x8000) ? 1 : 0; - } - - dev_info(&pdev->dev, "firmware v%d.%d.%d [%s]\n", - fw_major, fw_minor, fw_build, - adapter->ahw.cut_through ? "cut-through" : "legacy"); - - if (adapter->fw_version >= NETXEN_VERSION_CODE(4, 0, 222)) - adapter->capabilities = NXRD32(adapter, CRB_FW_CAPABILITIES_1); - - if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { - adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G; - adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; - } else if (adapter->ahw.port_type == NETXEN_NIC_GBE) { - adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G; - adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G; - } - - adapter->msix_supported = 0; - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { - adapter->msix_supported = !!use_msi_x; - adapter->rss_supported = !!use_msi_x; - } else { - u32 flashed_ver = 0; - netxen_rom_fast_read(adapter, - NX_FW_VERSION_OFFSET, (int *)&flashed_ver); - flashed_ver = NETXEN_DECODE_VERSION(flashed_ver); - - if (flashed_ver >= NETXEN_VERSION_CODE(3, 4, 336)) { - switch (adapter->ahw.board_type) { - case NETXEN_BRDTYPE_P2_SB31_10G: - case NETXEN_BRDTYPE_P2_SB31_10G_CX4: - adapter->msix_supported = !!use_msi_x; - adapter->rss_supported = !!use_msi_x; - break; - default: - break; - } - } - } - - adapter->num_txd = MAX_CMD_DESCRIPTORS; - - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { - adapter->num_lro_rxd = MAX_LRO_RCV_DESCRIPTORS; - adapter->max_rds_rings = 3; - } else { - adapter->num_lro_rxd = 0; - adapter->max_rds_rings = 2; - } -} - -static int -netxen_start_firmware(struct netxen_adapter *adapter) -{ - int val, err, first_boot; - struct pci_dev *pdev = adapter->pdev; - - /* required for NX2031 dummy dma */ - err = nx_set_dma_mask(adapter); - if (err) - return err; - - if (!netxen_can_start_firmware(adapter)) - goto wait_init; - - first_boot = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc)); - - err = netxen_check_hw_init(adapter, first_boot); - if (err) { - dev_err(&pdev->dev, "error in init HW init sequence\n"); - return err; - } - - netxen_request_firmware(adapter); - - err = netxen_need_fw_reset(adapter); - if (err < 0) - goto err_out; - if (err == 0) - goto wait_init; - - if (first_boot != 0x55555555) { - NXWR32(adapter, CRB_CMDPEG_STATE, 0); - netxen_pinit_from_rom(adapter); - msleep(1); - } - - NXWR32(adapter, CRB_DMA_SHIFT, 0x55555555); - NXWR32(adapter, NETXEN_PEG_HALT_STATUS1, 0); - NXWR32(adapter, NETXEN_PEG_HALT_STATUS2, 0); - - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) - netxen_set_port_mode(adapter); - - err = netxen_load_firmware(adapter); - if (err) - goto err_out; - - netxen_release_firmware(adapter); - - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { - - /* Initialize multicast addr pool owners */ - val = 0x7654; - if (adapter->ahw.port_type == NETXEN_NIC_XGBE) - val |= 0x0f000000; - NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val); - - } - - err = netxen_init_dummy_dma(adapter); - if (err) - goto err_out; - - /* - * Tell the hardware our version number. - */ - val = (_NETXEN_NIC_LINUX_MAJOR << 16) - | ((_NETXEN_NIC_LINUX_MINOR << 8)) - | (_NETXEN_NIC_LINUX_SUBVERSION); - NXWR32(adapter, CRB_DRIVER_VERSION, val); - -wait_init: - /* Handshake with the card before we register the devices. */ - err = netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE); - if (err) { - netxen_free_dummy_dma(adapter); - goto err_out; - } - - NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_READY); - - nx_update_dma_mask(adapter); - - netxen_check_options(adapter); - - adapter->need_fw_reset = 0; - - /* fall through and release firmware */ - -err_out: - netxen_release_firmware(adapter); - return err; -} - -static int -netxen_nic_request_irq(struct netxen_adapter *adapter) -{ - irq_handler_t handler; - struct nx_host_sds_ring *sds_ring; - int err, ring; - - unsigned long flags = 0; - struct net_device *netdev = adapter->netdev; - struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; - - if (adapter->flags & NETXEN_NIC_MSIX_ENABLED) - handler = netxen_msix_intr; - else if (adapter->flags & NETXEN_NIC_MSI_ENABLED) - handler = netxen_msi_intr; - else { - flags |= IRQF_SHARED; - handler = netxen_intr; - } - adapter->irq = netdev->irq; - - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &recv_ctx->sds_rings[ring]; - sprintf(sds_ring->name, "%s[%d]", netdev->name, ring); - err = request_irq(sds_ring->irq, handler, - flags, sds_ring->name, sds_ring); - if (err) - return err; - } - - return 0; -} - -static void -netxen_nic_free_irq(struct netxen_adapter *adapter) -{ - int ring; - struct nx_host_sds_ring *sds_ring; - - struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; - - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &recv_ctx->sds_rings[ring]; - free_irq(sds_ring->irq, sds_ring); - } -} - -static void -netxen_nic_init_coalesce_defaults(struct netxen_adapter *adapter) -{ - adapter->coal.flags = NETXEN_NIC_INTR_DEFAULT; - adapter->coal.normal.data.rx_time_us = - NETXEN_DEFAULT_INTR_COALESCE_RX_TIME_US; - adapter->coal.normal.data.rx_packets = - NETXEN_DEFAULT_INTR_COALESCE_RX_PACKETS; - adapter->coal.normal.data.tx_time_us = - NETXEN_DEFAULT_INTR_COALESCE_TX_TIME_US; - adapter->coal.normal.data.tx_packets = - NETXEN_DEFAULT_INTR_COALESCE_TX_PACKETS; -} - -/* with rtnl_lock */ -static int -__netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev) -{ - int err; - - if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) - return -EIO; - - err = adapter->init_port(adapter, adapter->physical_port); - if (err) { - printk(KERN_ERR "%s: Failed to initialize port %d\n", - netxen_nic_driver_name, adapter->portnum); - return err; - } - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) - adapter->macaddr_set(adapter, adapter->mac_addr); - - adapter->set_multi(netdev); - adapter->set_mtu(adapter, netdev->mtu); - - adapter->ahw.linkup = 0; - - if (adapter->max_sds_rings > 1) - netxen_config_rss(adapter, 1); - - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) - netxen_config_intr_coalesce(adapter); - - if (netdev->features & NETIF_F_LRO) - netxen_config_hw_lro(adapter, NETXEN_NIC_LRO_ENABLED); - - netxen_napi_enable(adapter); - - if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION) - netxen_linkevent_request(adapter, 1); - else - netxen_nic_set_link_parameters(adapter); - - set_bit(__NX_DEV_UP, &adapter->state); - return 0; -} - -/* Usage: During resume and firmware recovery module.*/ - -static inline int -netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev) -{ - int err = 0; - - rtnl_lock(); - if (netif_running(netdev)) - err = __netxen_nic_up(adapter, netdev); - rtnl_unlock(); - - return err; -} - -/* with rtnl_lock */ -static void -__netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev) -{ - if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) - return; - - if (!test_and_clear_bit(__NX_DEV_UP, &adapter->state)) - return; - - smp_mb(); - spin_lock(&adapter->tx_clean_lock); - netif_carrier_off(netdev); - netif_tx_disable(netdev); - - if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION) - netxen_linkevent_request(adapter, 0); - - if (adapter->stop_port) - adapter->stop_port(adapter); - - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) - netxen_p3_free_mac_list(adapter); - - adapter->set_promisc(adapter, NETXEN_NIU_NON_PROMISC_MODE); - - netxen_napi_disable(adapter); - - netxen_release_tx_buffers(adapter); - spin_unlock(&adapter->tx_clean_lock); -} - -/* Usage: During suspend and firmware recovery module */ - -static inline void -netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev) -{ - rtnl_lock(); - if (netif_running(netdev)) - __netxen_nic_down(adapter, netdev); - rtnl_unlock(); - -} - -static int -netxen_nic_attach(struct netxen_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - struct pci_dev *pdev = adapter->pdev; - int err, ring; - struct nx_host_rds_ring *rds_ring; - struct nx_host_tx_ring *tx_ring; - - if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) - return 0; - - err = netxen_init_firmware(adapter); - if (err) - return err; - - err = netxen_napi_add(adapter, netdev); - if (err) - return err; - - err = netxen_alloc_sw_resources(adapter); - if (err) { - printk(KERN_ERR "%s: Error in setting sw resources\n", - netdev->name); - return err; - } - - err = netxen_alloc_hw_resources(adapter); - if (err) { - printk(KERN_ERR "%s: Error in setting hw resources\n", - netdev->name); - goto err_out_free_sw; - } - - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { - tx_ring = adapter->tx_ring; - tx_ring->crb_cmd_producer = netxen_get_ioaddr(adapter, - crb_cmd_producer[adapter->portnum]); - tx_ring->crb_cmd_consumer = netxen_get_ioaddr(adapter, - crb_cmd_consumer[adapter->portnum]); - - tx_ring->producer = 0; - tx_ring->sw_consumer = 0; - - netxen_nic_update_cmd_producer(adapter, tx_ring); - netxen_nic_update_cmd_consumer(adapter, tx_ring); - } - - for (ring = 0; ring < adapter->max_rds_rings; ring++) { - rds_ring = &adapter->recv_ctx.rds_rings[ring]; - netxen_post_rx_buffers(adapter, ring, rds_ring); - } - - err = netxen_nic_request_irq(adapter); - if (err) { - dev_err(&pdev->dev, "%s: failed to setup interrupt\n", - netdev->name); - goto err_out_free_rxbuf; - } - - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) - netxen_nic_init_coalesce_defaults(adapter); - - netxen_create_sysfs_entries(adapter); - - adapter->is_up = NETXEN_ADAPTER_UP_MAGIC; - return 0; - -err_out_free_rxbuf: - netxen_release_rx_buffers(adapter); - netxen_free_hw_resources(adapter); -err_out_free_sw: - netxen_free_sw_resources(adapter); - return err; -} - -static void -netxen_nic_detach(struct netxen_adapter *adapter) -{ - if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) - return; - - netxen_remove_sysfs_entries(adapter); - - netxen_free_hw_resources(adapter); - netxen_release_rx_buffers(adapter); - netxen_nic_free_irq(adapter); - netxen_napi_del(adapter); - netxen_free_sw_resources(adapter); - - adapter->is_up = 0; -} - -int -netxen_nic_reset_context(struct netxen_adapter *adapter) -{ - int err = 0; - struct net_device *netdev = adapter->netdev; - - if (test_and_set_bit(__NX_RESETTING, &adapter->state)) - return -EBUSY; - - if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) { - - netif_device_detach(netdev); - - if (netif_running(netdev)) - __netxen_nic_down(adapter, netdev); - - netxen_nic_detach(adapter); - - if (netif_running(netdev)) { - err = netxen_nic_attach(adapter); - if (!err) - err = __netxen_nic_up(adapter, netdev); - - if (err) - goto done; - } - - netif_device_attach(netdev); - } - -done: - clear_bit(__NX_RESETTING, &adapter->state); - return err; -} - -static int -netxen_setup_netdev(struct netxen_adapter *adapter, - struct net_device *netdev) -{ - int err = 0; - struct pci_dev *pdev = adapter->pdev; - - adapter->mc_enabled = 0; - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) - adapter->max_mc_count = 38; - else - adapter->max_mc_count = 16; - - netdev->netdev_ops = &netxen_netdev_ops; - netdev->watchdog_timeo = 5*HZ; - - netxen_nic_change_mtu(netdev, netdev->mtu); - - SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops); - - netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | - NETIF_F_RXCSUM; - - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) - netdev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6; - - netdev->vlan_features |= netdev->hw_features; - - if (adapter->pci_using_dac) { - netdev->features |= NETIF_F_HIGHDMA; - netdev->vlan_features |= NETIF_F_HIGHDMA; - } - - if (adapter->capabilities & NX_FW_CAPABILITY_FVLANTX) - netdev->hw_features |= NETIF_F_HW_VLAN_TX; - - if (adapter->capabilities & NX_FW_CAPABILITY_HW_LRO) - netdev->hw_features |= NETIF_F_LRO; - - netdev->features |= netdev->hw_features; - - netdev->irq = adapter->msix_entries[0].vector; - - INIT_WORK(&adapter->tx_timeout_task, netxen_tx_timeout_task); - - if (netxen_read_mac_addr(adapter)) - dev_warn(&pdev->dev, "failed to read mac addr\n"); - - netif_carrier_off(netdev); - - err = register_netdev(netdev); - if (err) { - dev_err(&pdev->dev, "failed to register net device\n"); - return err; - } - - return 0; -} - -#ifdef CONFIG_PCIEAER -static void netxen_mask_aer_correctable(struct netxen_adapter *adapter) -{ - struct pci_dev *pdev = adapter->pdev; - struct pci_dev *root = pdev->bus->self; - u32 aer_pos; - - if (adapter->ahw.board_type != NETXEN_BRDTYPE_P3_4_GB_MM && - adapter->ahw.board_type != NETXEN_BRDTYPE_P3_10G_TP) - return; - - if (root->pcie_type != PCI_EXP_TYPE_ROOT_PORT) - return; - - aer_pos = pci_find_ext_capability(root, PCI_EXT_CAP_ID_ERR); - if (!aer_pos) - return; - - pci_write_config_dword(root, aer_pos + PCI_ERR_COR_MASK, 0xffff); -} -#endif - -static int __devinit -netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) -{ - struct net_device *netdev = NULL; - struct netxen_adapter *adapter = NULL; - int i = 0, err; - int pci_func_id = PCI_FUNC(pdev->devfn); - uint8_t revision_id; - u32 val; - - if (pdev->revision >= NX_P3_A0 && pdev->revision <= NX_P3_B1) { - pr_warning("%s: chip revisions between 0x%x-0x%x " - "will not be enabled.\n", - module_name(THIS_MODULE), NX_P3_A0, NX_P3_B1); - return -ENODEV; - } - - if ((err = pci_enable_device(pdev))) - return err; - - if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { - err = -ENODEV; - goto err_out_disable_pdev; - } - - if ((err = pci_request_regions(pdev, netxen_nic_driver_name))) - goto err_out_disable_pdev; - - if (NX_IS_REVISION_P3(pdev->revision)) - pci_enable_pcie_error_reporting(pdev); - - pci_set_master(pdev); - - netdev = alloc_etherdev(sizeof(struct netxen_adapter)); - if(!netdev) { - dev_err(&pdev->dev, "failed to allocate net_device\n"); - err = -ENOMEM; - goto err_out_free_res; - } - - SET_NETDEV_DEV(netdev, &pdev->dev); - - adapter = netdev_priv(netdev); - adapter->netdev = netdev; - adapter->pdev = pdev; - adapter->ahw.pci_func = pci_func_id; - - revision_id = pdev->revision; - adapter->ahw.revision_id = revision_id; - - rwlock_init(&adapter->ahw.crb_lock); - spin_lock_init(&adapter->ahw.mem_lock); - - spin_lock_init(&adapter->tx_clean_lock); - INIT_LIST_HEAD(&adapter->mac_list); - INIT_LIST_HEAD(&adapter->vlan_ip_list); - - err = netxen_setup_pci_map(adapter); - if (err) - goto err_out_free_netdev; - - /* This will be reset for mezz cards */ - adapter->portnum = pci_func_id; - - err = netxen_nic_get_board_info(adapter); - if (err) { - dev_err(&pdev->dev, "Error getting board config info.\n"); - goto err_out_iounmap; - } - -#ifdef CONFIG_PCIEAER - netxen_mask_aer_correctable(adapter); -#endif - - /* Mezz cards have PCI function 0,2,3 enabled */ - switch (adapter->ahw.board_type) { - case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ: - case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ: - if (pci_func_id >= 2) - adapter->portnum = pci_func_id - 2; - break; - default: - break; - } - - err = netxen_check_flash_fw_compatibility(adapter); - if (err) - goto err_out_iounmap; - - if (adapter->portnum == 0) { - val = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); - if (val != 0xffffffff && val != 0) { - NXWR32(adapter, NX_CRB_DEV_REF_COUNT, 0); - adapter->need_fw_reset = 1; - } - } - - err = netxen_start_firmware(adapter); - if (err) - goto err_out_decr_ref; - - /* - * See if the firmware gave us a virtual-physical port mapping. - */ - adapter->physical_port = adapter->portnum; - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { - i = NXRD32(adapter, CRB_V2P(adapter->portnum)); - if (i != 0x55555555) - adapter->physical_port = i; - } - - netxen_nic_clear_stats(adapter); - - netxen_setup_intr(adapter); - - err = netxen_setup_netdev(adapter, netdev); - if (err) - goto err_out_disable_msi; - - pci_set_drvdata(pdev, adapter); - - netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY); - - switch (adapter->ahw.port_type) { - case NETXEN_NIC_GBE: - dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n", - adapter->netdev->name); - break; - case NETXEN_NIC_XGBE: - dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n", - adapter->netdev->name); - break; - } - - netxen_create_diag_entries(adapter); - - return 0; - -err_out_disable_msi: - netxen_teardown_intr(adapter); - - netxen_free_dummy_dma(adapter); - -err_out_decr_ref: - nx_decr_dev_ref_cnt(adapter); - -err_out_iounmap: - netxen_cleanup_pci_map(adapter); - -err_out_free_netdev: - free_netdev(netdev); - -err_out_free_res: - pci_release_regions(pdev); - -err_out_disable_pdev: - pci_set_drvdata(pdev, NULL); - pci_disable_device(pdev); - return err; -} - -static void __devexit netxen_nic_remove(struct pci_dev *pdev) -{ - struct netxen_adapter *adapter; - struct net_device *netdev; - - adapter = pci_get_drvdata(pdev); - if (adapter == NULL) - return; - - netdev = adapter->netdev; - - netxen_cancel_fw_work(adapter); - - unregister_netdev(netdev); - - cancel_work_sync(&adapter->tx_timeout_task); - - netxen_free_vlan_ip_list(adapter); - netxen_nic_detach(adapter); - - nx_decr_dev_ref_cnt(adapter); - - if (adapter->portnum == 0) - netxen_free_dummy_dma(adapter); - - clear_bit(__NX_RESETTING, &adapter->state); - - netxen_teardown_intr(adapter); - - netxen_remove_diag_entries(adapter); - - netxen_cleanup_pci_map(adapter); - - netxen_release_firmware(adapter); - - if (NX_IS_REVISION_P3(pdev->revision)) - pci_disable_pcie_error_reporting(pdev); - - pci_release_regions(pdev); - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); - - free_netdev(netdev); -} - -static void netxen_nic_detach_func(struct netxen_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - - netif_device_detach(netdev); - - netxen_cancel_fw_work(adapter); - - if (netif_running(netdev)) - netxen_nic_down(adapter, netdev); - - cancel_work_sync(&adapter->tx_timeout_task); - - netxen_nic_detach(adapter); - - if (adapter->portnum == 0) - netxen_free_dummy_dma(adapter); - - nx_decr_dev_ref_cnt(adapter); - - clear_bit(__NX_RESETTING, &adapter->state); -} - -static int netxen_nic_attach_func(struct pci_dev *pdev) -{ - struct netxen_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; - int err; - - err = pci_enable_device(pdev); - if (err) - return err; - - pci_set_power_state(pdev, PCI_D0); - pci_set_master(pdev); - pci_restore_state(pdev); - - adapter->ahw.crb_win = -1; - adapter->ahw.ocm_win = -1; - - err = netxen_start_firmware(adapter); - if (err) { - dev_err(&pdev->dev, "failed to start firmware\n"); - return err; - } - - if (netif_running(netdev)) { - err = netxen_nic_attach(adapter); - if (err) - goto err_out; - - err = netxen_nic_up(adapter, netdev); - if (err) - goto err_out_detach; - - netxen_restore_indev_addr(netdev, NETDEV_UP); - } - - netif_device_attach(netdev); - netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY); - return 0; - -err_out_detach: - netxen_nic_detach(adapter); -err_out: - nx_decr_dev_ref_cnt(adapter); - return err; -} - -static pci_ers_result_t netxen_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) -{ - struct netxen_adapter *adapter = pci_get_drvdata(pdev); - - if (state == pci_channel_io_perm_failure) - return PCI_ERS_RESULT_DISCONNECT; - - if (nx_dev_request_aer(adapter)) - return PCI_ERS_RESULT_RECOVERED; - - netxen_nic_detach_func(adapter); - - pci_disable_device(pdev); - - return PCI_ERS_RESULT_NEED_RESET; -} - -static pci_ers_result_t netxen_io_slot_reset(struct pci_dev *pdev) -{ - int err = 0; - - err = netxen_nic_attach_func(pdev); - - return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; -} - -static void netxen_io_resume(struct pci_dev *pdev) -{ - pci_cleanup_aer_uncorrect_error_status(pdev); -} - -static void netxen_nic_shutdown(struct pci_dev *pdev) -{ - struct netxen_adapter *adapter = pci_get_drvdata(pdev); - - netxen_nic_detach_func(adapter); - - if (pci_save_state(pdev)) - return; - - if (netxen_nic_wol_supported(adapter)) { - pci_enable_wake(pdev, PCI_D3cold, 1); - pci_enable_wake(pdev, PCI_D3hot, 1); - } - - pci_disable_device(pdev); -} - -#ifdef CONFIG_PM -static int -netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state) -{ - struct netxen_adapter *adapter = pci_get_drvdata(pdev); - int retval; - - netxen_nic_detach_func(adapter); - - retval = pci_save_state(pdev); - if (retval) - return retval; - - if (netxen_nic_wol_supported(adapter)) { - pci_enable_wake(pdev, PCI_D3cold, 1); - pci_enable_wake(pdev, PCI_D3hot, 1); - } - - pci_disable_device(pdev); - pci_set_power_state(pdev, pci_choose_state(pdev, state)); - - return 0; -} - -static int -netxen_nic_resume(struct pci_dev *pdev) -{ - return netxen_nic_attach_func(pdev); -} -#endif - -static int netxen_nic_open(struct net_device *netdev) -{ - struct netxen_adapter *adapter = netdev_priv(netdev); - int err = 0; - - if (adapter->driver_mismatch) - return -EIO; - - err = netxen_nic_attach(adapter); - if (err) - return err; - - err = __netxen_nic_up(adapter, netdev); - if (err) - goto err_out; - - netif_start_queue(netdev); - - return 0; - -err_out: - netxen_nic_detach(adapter); - return err; -} - -/* - * netxen_nic_close - Disables a network interface entry point - */ -static int netxen_nic_close(struct net_device *netdev) -{ - struct netxen_adapter *adapter = netdev_priv(netdev); - - __netxen_nic_down(adapter, netdev); - return 0; -} - -static void -netxen_tso_check(struct net_device *netdev, - struct nx_host_tx_ring *tx_ring, - struct cmd_desc_type0 *first_desc, - struct sk_buff *skb) -{ - u8 opcode = TX_ETHER_PKT; - __be16 protocol = skb->protocol; - u16 flags = 0, vid = 0; - u32 producer; - int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0; - struct cmd_desc_type0 *hwdesc; - struct vlan_ethhdr *vh; - - if (protocol == cpu_to_be16(ETH_P_8021Q)) { - - vh = (struct vlan_ethhdr *)skb->data; - protocol = vh->h_vlan_encapsulated_proto; - flags = FLAGS_VLAN_TAGGED; - - } else if (vlan_tx_tag_present(skb)) { - - flags = FLAGS_VLAN_OOB; - vid = vlan_tx_tag_get(skb); - netxen_set_tx_vlan_tci(first_desc, vid); - vlan_oob = 1; - } - - if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) && - skb_shinfo(skb)->gso_size > 0) { - - hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); - - first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); - first_desc->total_hdr_length = hdr_len; - if (vlan_oob) { - first_desc->total_hdr_length += VLAN_HLEN; - first_desc->tcp_hdr_offset = VLAN_HLEN; - first_desc->ip_hdr_offset = VLAN_HLEN; - /* Only in case of TSO on vlan device */ - flags |= FLAGS_VLAN_TAGGED; - } - - opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ? - TX_TCP_LSO6 : TX_TCP_LSO; - tso = 1; - - } else if (skb->ip_summed == CHECKSUM_PARTIAL) { - u8 l4proto; - - if (protocol == cpu_to_be16(ETH_P_IP)) { - l4proto = ip_hdr(skb)->protocol; - - if (l4proto == IPPROTO_TCP) - opcode = TX_TCP_PKT; - else if(l4proto == IPPROTO_UDP) - opcode = TX_UDP_PKT; - } else if (protocol == cpu_to_be16(ETH_P_IPV6)) { - l4proto = ipv6_hdr(skb)->nexthdr; - - if (l4proto == IPPROTO_TCP) - opcode = TX_TCPV6_PKT; - else if(l4proto == IPPROTO_UDP) - opcode = TX_UDPV6_PKT; - } - } - - first_desc->tcp_hdr_offset += skb_transport_offset(skb); - first_desc->ip_hdr_offset += skb_network_offset(skb); - netxen_set_tx_flags_opcode(first_desc, flags, opcode); - - if (!tso) - return; - - /* For LSO, we need to copy the MAC/IP/TCP headers into - * the descriptor ring - */ - producer = tx_ring->producer; - copied = 0; - offset = 2; - - if (vlan_oob) { - /* Create a TSO vlan header template for firmware */ - - hwdesc = &tx_ring->desc_head[producer]; - tx_ring->cmd_buf_arr[producer].skb = NULL; - - copy_len = min((int)sizeof(struct cmd_desc_type0) - offset, - hdr_len + VLAN_HLEN); - - vh = (struct vlan_ethhdr *)((char *)hwdesc + 2); - skb_copy_from_linear_data(skb, vh, 12); - vh->h_vlan_proto = htons(ETH_P_8021Q); - vh->h_vlan_TCI = htons(vid); - skb_copy_from_linear_data_offset(skb, 12, - (char *)vh + 16, copy_len - 16); - - copied = copy_len - VLAN_HLEN; - offset = 0; - - producer = get_next_index(producer, tx_ring->num_desc); - } - - while (copied < hdr_len) { - - copy_len = min((int)sizeof(struct cmd_desc_type0) - offset, - (hdr_len - copied)); - - hwdesc = &tx_ring->desc_head[producer]; - tx_ring->cmd_buf_arr[producer].skb = NULL; - - skb_copy_from_linear_data_offset(skb, copied, - (char *)hwdesc + offset, copy_len); - - copied += copy_len; - offset = 0; - - producer = get_next_index(producer, tx_ring->num_desc); - } - - tx_ring->producer = producer; - barrier(); -} - -static int -netxen_map_tx_skb(struct pci_dev *pdev, - struct sk_buff *skb, struct netxen_cmd_buffer *pbuf) -{ - struct netxen_skb_frag *nf; - struct skb_frag_struct *frag; - int i, nr_frags; - dma_addr_t map; - - nr_frags = skb_shinfo(skb)->nr_frags; - nf = &pbuf->frag_array[0]; - - map = pci_map_single(pdev, skb->data, - skb_headlen(skb), PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(pdev, map)) - goto out_err; - - nf->dma = map; - nf->length = skb_headlen(skb); - - for (i = 0; i < nr_frags; i++) { - frag = &skb_shinfo(skb)->frags[i]; - nf = &pbuf->frag_array[i+1]; - - map = pci_map_page(pdev, frag->page, frag->page_offset, - frag->size, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(pdev, map)) - goto unwind; - - nf->dma = map; - nf->length = frag->size; - } - - return 0; - -unwind: - while (--i >= 0) { - nf = &pbuf->frag_array[i+1]; - pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE); - } - - nf = &pbuf->frag_array[0]; - pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE); - -out_err: - return -ENOMEM; -} - -static inline void -netxen_clear_cmddesc(u64 *desc) -{ - desc[0] = 0ULL; - desc[2] = 0ULL; -} - -static netdev_tx_t -netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) -{ - struct netxen_adapter *adapter = netdev_priv(netdev); - struct nx_host_tx_ring *tx_ring = adapter->tx_ring; - struct netxen_cmd_buffer *pbuf; - struct netxen_skb_frag *buffrag; - struct cmd_desc_type0 *hwdesc, *first_desc; - struct pci_dev *pdev; - int i, k; - int delta = 0; - struct skb_frag_struct *frag; - - u32 producer; - int frag_count, no_of_desc; - u32 num_txd = tx_ring->num_desc; - - frag_count = skb_shinfo(skb)->nr_frags + 1; - - /* 14 frags supported for normal packet and - * 32 frags supported for TSO packet - */ - if (!skb_is_gso(skb) && frag_count > NETXEN_MAX_FRAGS_PER_TX) { - - for (i = 0; i < (frag_count - NETXEN_MAX_FRAGS_PER_TX); i++) { - frag = &skb_shinfo(skb)->frags[i]; - delta += frag->size; - } - - if (!__pskb_pull_tail(skb, delta)) - goto drop_packet; - - frag_count = 1 + skb_shinfo(skb)->nr_frags; - } - /* 4 fragments per cmd des */ - no_of_desc = (frag_count + 3) >> 2; - - if (unlikely(netxen_tx_avail(tx_ring) <= TX_STOP_THRESH)) { - netif_stop_queue(netdev); - smp_mb(); - if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH) - netif_start_queue(netdev); - else - return NETDEV_TX_BUSY; - } - - producer = tx_ring->producer; - pbuf = &tx_ring->cmd_buf_arr[producer]; - - pdev = adapter->pdev; - - if (netxen_map_tx_skb(pdev, skb, pbuf)) - goto drop_packet; - - pbuf->skb = skb; - pbuf->frag_count = frag_count; - - first_desc = hwdesc = &tx_ring->desc_head[producer]; - netxen_clear_cmddesc((u64 *)hwdesc); - - netxen_set_tx_frags_len(first_desc, frag_count, skb->len); - netxen_set_tx_port(first_desc, adapter->portnum); - - for (i = 0; i < frag_count; i++) { - - k = i % 4; - - if ((k == 0) && (i > 0)) { - /* move to next desc.*/ - producer = get_next_index(producer, num_txd); - hwdesc = &tx_ring->desc_head[producer]; - netxen_clear_cmddesc((u64 *)hwdesc); - tx_ring->cmd_buf_arr[producer].skb = NULL; - } - - buffrag = &pbuf->frag_array[i]; - - hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length); - switch (k) { - case 0: - hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma); - break; - case 1: - hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma); - break; - case 2: - hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma); - break; - case 3: - hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma); - break; - } - } - - tx_ring->producer = get_next_index(producer, num_txd); - - netxen_tso_check(netdev, tx_ring, first_desc, skb); - - adapter->stats.txbytes += skb->len; - adapter->stats.xmitcalled++; - - netxen_nic_update_cmd_producer(adapter, tx_ring); - - return NETDEV_TX_OK; - -drop_packet: - adapter->stats.txdropped++; - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; -} - -static int netxen_nic_check_temp(struct netxen_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - uint32_t temp, temp_state, temp_val; - int rv = 0; - - temp = NXRD32(adapter, CRB_TEMP_STATE); - - temp_state = nx_get_temp_state(temp); - temp_val = nx_get_temp_val(temp); - - if (temp_state == NX_TEMP_PANIC) { - printk(KERN_ALERT - "%s: Device temperature %d degrees C exceeds" - " maximum allowed. Hardware has been shut down.\n", - netdev->name, temp_val); - rv = 1; - } else if (temp_state == NX_TEMP_WARN) { - if (adapter->temp == NX_TEMP_NORMAL) { - printk(KERN_ALERT - "%s: Device temperature %d degrees C " - "exceeds operating range." - " Immediate action needed.\n", - netdev->name, temp_val); - } - } else { - if (adapter->temp == NX_TEMP_WARN) { - printk(KERN_INFO - "%s: Device temperature is now %d degrees C" - " in normal range.\n", netdev->name, - temp_val); - } - } - adapter->temp = temp_state; - return rv; -} - -void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup) -{ - struct net_device *netdev = adapter->netdev; - - if (adapter->ahw.linkup && !linkup) { - printk(KERN_INFO "%s: %s NIC Link is down\n", - netxen_nic_driver_name, netdev->name); - adapter->ahw.linkup = 0; - if (netif_running(netdev)) { - netif_carrier_off(netdev); - netif_stop_queue(netdev); - } - adapter->link_changed = !adapter->has_link_events; - } else if (!adapter->ahw.linkup && linkup) { - printk(KERN_INFO "%s: %s NIC Link is up\n", - netxen_nic_driver_name, netdev->name); - adapter->ahw.linkup = 1; - if (netif_running(netdev)) { - netif_carrier_on(netdev); - netif_wake_queue(netdev); - } - adapter->link_changed = !adapter->has_link_events; - } -} - -static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter) -{ - u32 val, port, linkup; - - port = adapter->physical_port; - - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { - val = NXRD32(adapter, CRB_XG_STATE_P3); - val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val); - linkup = (val == XG_LINK_UP_P3); - } else { - val = NXRD32(adapter, CRB_XG_STATE); - val = (val >> port*8) & 0xff; - linkup = (val == XG_LINK_UP); - } - - netxen_advert_link_change(adapter, linkup); -} - -static void netxen_tx_timeout(struct net_device *netdev) -{ - struct netxen_adapter *adapter = netdev_priv(netdev); - - if (test_bit(__NX_RESETTING, &adapter->state)) - return; - - dev_err(&netdev->dev, "transmit timeout, resetting.\n"); - schedule_work(&adapter->tx_timeout_task); -} - -static void netxen_tx_timeout_task(struct work_struct *work) -{ - struct netxen_adapter *adapter = - container_of(work, struct netxen_adapter, tx_timeout_task); - - if (!netif_running(adapter->netdev)) - return; - - if (test_and_set_bit(__NX_RESETTING, &adapter->state)) - return; - - if (++adapter->tx_timeo_cnt >= NX_MAX_TX_TIMEOUTS) - goto request_reset; - - rtnl_lock(); - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { - /* try to scrub interrupt */ - netxen_napi_disable(adapter); - - netxen_napi_enable(adapter); - - netif_wake_queue(adapter->netdev); - - clear_bit(__NX_RESETTING, &adapter->state); - } else { - clear_bit(__NX_RESETTING, &adapter->state); - if (netxen_nic_reset_context(adapter)) { - rtnl_unlock(); - goto request_reset; - } - } - adapter->netdev->trans_start = jiffies; - rtnl_unlock(); - return; - -request_reset: - adapter->need_fw_reset = 1; - clear_bit(__NX_RESETTING, &adapter->state); -} - -static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *netdev, - struct rtnl_link_stats64 *stats) -{ - struct netxen_adapter *adapter = netdev_priv(netdev); - - stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts; - stats->tx_packets = adapter->stats.xmitfinished; - stats->rx_bytes = adapter->stats.rxbytes; - stats->tx_bytes = adapter->stats.txbytes; - stats->rx_dropped = adapter->stats.rxdropped; - stats->tx_dropped = adapter->stats.txdropped; - - return stats; -} - -static irqreturn_t netxen_intr(int irq, void *data) -{ - struct nx_host_sds_ring *sds_ring = data; - struct netxen_adapter *adapter = sds_ring->adapter; - u32 status = 0; - - status = readl(adapter->isr_int_vec); - - if (!(status & adapter->int_vec_bit)) - return IRQ_NONE; - - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { - /* check interrupt state machine, to be sure */ - status = readl(adapter->crb_int_state_reg); - if (!ISR_LEGACY_INT_TRIGGERED(status)) - return IRQ_NONE; - - } else { - unsigned long our_int = 0; - - our_int = readl(adapter->crb_int_state_reg); - - /* not our interrupt */ - if (!test_and_clear_bit((7 + adapter->portnum), &our_int)) - return IRQ_NONE; - - /* claim interrupt */ - writel((our_int & 0xffffffff), adapter->crb_int_state_reg); - - /* clear interrupt */ - netxen_nic_disable_int(sds_ring); - } - - writel(0xffffffff, adapter->tgt_status_reg); - /* read twice to ensure write is flushed */ - readl(adapter->isr_int_vec); - readl(adapter->isr_int_vec); - - napi_schedule(&sds_ring->napi); - - return IRQ_HANDLED; -} - -static irqreturn_t netxen_msi_intr(int irq, void *data) -{ - struct nx_host_sds_ring *sds_ring = data; - struct netxen_adapter *adapter = sds_ring->adapter; - - /* clear interrupt */ - writel(0xffffffff, adapter->tgt_status_reg); - - napi_schedule(&sds_ring->napi); - return IRQ_HANDLED; -} - -static irqreturn_t netxen_msix_intr(int irq, void *data) -{ - struct nx_host_sds_ring *sds_ring = data; - - napi_schedule(&sds_ring->napi); - return IRQ_HANDLED; -} - -static int netxen_nic_poll(struct napi_struct *napi, int budget) -{ - struct nx_host_sds_ring *sds_ring = - container_of(napi, struct nx_host_sds_ring, napi); - - struct netxen_adapter *adapter = sds_ring->adapter; - - int tx_complete; - int work_done; - - tx_complete = netxen_process_cmd_ring(adapter); - - work_done = netxen_process_rcv_ring(sds_ring, budget); - - if ((work_done < budget) && tx_complete) { - napi_complete(&sds_ring->napi); - if (test_bit(__NX_DEV_UP, &adapter->state)) - netxen_nic_enable_int(sds_ring); - } - - return work_done; -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -static void netxen_nic_poll_controller(struct net_device *netdev) -{ - int ring; - struct nx_host_sds_ring *sds_ring; - struct netxen_adapter *adapter = netdev_priv(netdev); - struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; - - disable_irq(adapter->irq); - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &recv_ctx->sds_rings[ring]; - netxen_intr(adapter->irq, sds_ring); - } - enable_irq(adapter->irq); -} -#endif - -static int -nx_incr_dev_ref_cnt(struct netxen_adapter *adapter) -{ - int count; - if (netxen_api_lock(adapter)) - return -EIO; - - count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); - - NXWR32(adapter, NX_CRB_DEV_REF_COUNT, ++count); - - netxen_api_unlock(adapter); - return count; -} - -static int -nx_decr_dev_ref_cnt(struct netxen_adapter *adapter) -{ - int count; - if (netxen_api_lock(adapter)) - return -EIO; - - count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); - WARN_ON(count == 0); - - NXWR32(adapter, NX_CRB_DEV_REF_COUNT, --count); - - if (count == 0) - NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_COLD); - - netxen_api_unlock(adapter); - return count; -} - -static int -nx_dev_request_aer(struct netxen_adapter *adapter) -{ - u32 state; - int ret = -EINVAL; - - if (netxen_api_lock(adapter)) - return ret; - - state = NXRD32(adapter, NX_CRB_DEV_STATE); - - if (state == NX_DEV_NEED_AER) - ret = 0; - else if (state == NX_DEV_READY) { - NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_AER); - ret = 0; - } - - netxen_api_unlock(adapter); - return ret; -} - -static int -nx_dev_request_reset(struct netxen_adapter *adapter) -{ - u32 state; - int ret = -EINVAL; - - if (netxen_api_lock(adapter)) - return ret; - - state = NXRD32(adapter, NX_CRB_DEV_STATE); - - if (state == NX_DEV_NEED_RESET) - ret = 0; - else if (state != NX_DEV_INITALIZING && state != NX_DEV_NEED_AER) { - NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_RESET); - ret = 0; - } - - netxen_api_unlock(adapter); - - return ret; -} - -static int -netxen_can_start_firmware(struct netxen_adapter *adapter) -{ - int count; - int can_start = 0; - - if (netxen_api_lock(adapter)) - return 0; - - count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); - - if ((count < 0) || (count >= NX_MAX_PCI_FUNC)) - count = 0; - - if (count == 0) { - can_start = 1; - NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_INITALIZING); - } - - NXWR32(adapter, NX_CRB_DEV_REF_COUNT, ++count); - - netxen_api_unlock(adapter); - - return can_start; -} - -static void -netxen_schedule_work(struct netxen_adapter *adapter, - work_func_t func, int delay) -{ - INIT_DELAYED_WORK(&adapter->fw_work, func); - schedule_delayed_work(&adapter->fw_work, delay); -} - -static void -netxen_cancel_fw_work(struct netxen_adapter *adapter) -{ - while (test_and_set_bit(__NX_RESETTING, &adapter->state)) - msleep(10); - - cancel_delayed_work_sync(&adapter->fw_work); -} - -static void -netxen_attach_work(struct work_struct *work) -{ - struct netxen_adapter *adapter = container_of(work, - struct netxen_adapter, fw_work.work); - struct net_device *netdev = adapter->netdev; - int err = 0; - - if (netif_running(netdev)) { - err = netxen_nic_attach(adapter); - if (err) - goto done; - - err = netxen_nic_up(adapter, netdev); - if (err) { - netxen_nic_detach(adapter); - goto done; - } - - netxen_restore_indev_addr(netdev, NETDEV_UP); - } - - netif_device_attach(netdev); - -done: - adapter->fw_fail_cnt = 0; - clear_bit(__NX_RESETTING, &adapter->state); - netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY); -} - -static void -netxen_fwinit_work(struct work_struct *work) -{ - struct netxen_adapter *adapter = container_of(work, - struct netxen_adapter, fw_work.work); - int dev_state; - - dev_state = NXRD32(adapter, NX_CRB_DEV_STATE); - - switch (dev_state) { - case NX_DEV_COLD: - case NX_DEV_READY: - if (!netxen_start_firmware(adapter)) { - netxen_schedule_work(adapter, netxen_attach_work, 0); - return; - } - break; - - case NX_DEV_NEED_RESET: - case NX_DEV_INITALIZING: - if (++adapter->fw_wait_cnt < FW_POLL_THRESH) { - netxen_schedule_work(adapter, - netxen_fwinit_work, 2 * FW_POLL_DELAY); - return; - } - - case NX_DEV_FAILED: - default: - nx_incr_dev_ref_cnt(adapter); - break; - } - - clear_bit(__NX_RESETTING, &adapter->state); -} - -static void -netxen_detach_work(struct work_struct *work) -{ - struct netxen_adapter *adapter = container_of(work, - struct netxen_adapter, fw_work.work); - struct net_device *netdev = adapter->netdev; - int ref_cnt, delay; - u32 status; - - netif_device_detach(netdev); - - netxen_nic_down(adapter, netdev); - - rtnl_lock(); - netxen_nic_detach(adapter); - rtnl_unlock(); - - status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1); - - if (status & NX_RCODE_FATAL_ERROR) - goto err_ret; - - if (adapter->temp == NX_TEMP_PANIC) - goto err_ret; - - ref_cnt = nx_decr_dev_ref_cnt(adapter); - - if (ref_cnt == -EIO) - goto err_ret; - - delay = (ref_cnt == 0) ? 0 : (2 * FW_POLL_DELAY); - - adapter->fw_wait_cnt = 0; - netxen_schedule_work(adapter, netxen_fwinit_work, delay); - - return; - -err_ret: - clear_bit(__NX_RESETTING, &adapter->state); -} - -static int -netxen_check_health(struct netxen_adapter *adapter) -{ - u32 state, heartbit; - struct net_device *netdev = adapter->netdev; - - state = NXRD32(adapter, NX_CRB_DEV_STATE); - if (state == NX_DEV_NEED_AER) - return 0; - - if (netxen_nic_check_temp(adapter)) - goto detach; - - if (adapter->need_fw_reset) { - if (nx_dev_request_reset(adapter)) - return 0; - goto detach; - } - - /* NX_DEV_NEED_RESET, this state can be marked in two cases - * 1. Tx timeout 2. Fw hang - * Send request to destroy context in case of tx timeout only - * and doesn't required in case of Fw hang - */ - if (state == NX_DEV_NEED_RESET) { - adapter->need_fw_reset = 1; - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) - goto detach; - } - - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) - return 0; - - heartbit = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER); - if (heartbit != adapter->heartbit) { - adapter->heartbit = heartbit; - adapter->fw_fail_cnt = 0; - if (adapter->need_fw_reset) - goto detach; - return 0; - } - - if (++adapter->fw_fail_cnt < FW_FAIL_THRESH) - return 0; - - if (nx_dev_request_reset(adapter)) - return 0; - - clear_bit(__NX_FW_ATTACHED, &adapter->state); - - dev_info(&netdev->dev, "firmware hang detected\n"); - -detach: - if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) && - !test_and_set_bit(__NX_RESETTING, &adapter->state)) - netxen_schedule_work(adapter, netxen_detach_work, 0); - return 1; -} - -static void -netxen_fw_poll_work(struct work_struct *work) -{ - struct netxen_adapter *adapter = container_of(work, - struct netxen_adapter, fw_work.work); - - if (test_bit(__NX_RESETTING, &adapter->state)) - goto reschedule; - - if (test_bit(__NX_DEV_UP, &adapter->state)) { - if (!adapter->has_link_events) { - - netxen_nic_handle_phy_intr(adapter); - - if (adapter->link_changed) - netxen_nic_set_link_parameters(adapter); - } - } - - if (netxen_check_health(adapter)) - return; - -reschedule: - netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY); -} - -static ssize_t -netxen_store_bridged_mode(struct device *dev, - struct device_attribute *attr, const char *buf, size_t len) -{ - struct net_device *net = to_net_dev(dev); - struct netxen_adapter *adapter = netdev_priv(net); - unsigned long new; - int ret = -EINVAL; - - if (!(adapter->capabilities & NX_FW_CAPABILITY_BDG)) - goto err_out; - - if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) - goto err_out; - - if (strict_strtoul(buf, 2, &new)) - goto err_out; - - if (!netxen_config_bridged_mode(adapter, !!new)) - ret = len; - -err_out: - return ret; -} - -static ssize_t -netxen_show_bridged_mode(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct net_device *net = to_net_dev(dev); - struct netxen_adapter *adapter; - int bridged_mode = 0; - - adapter = netdev_priv(net); - - if (adapter->capabilities & NX_FW_CAPABILITY_BDG) - bridged_mode = !!(adapter->flags & NETXEN_NIC_BRIDGE_ENABLED); - - return sprintf(buf, "%d\n", bridged_mode); -} - -static struct device_attribute dev_attr_bridged_mode = { - .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)}, - .show = netxen_show_bridged_mode, - .store = netxen_store_bridged_mode, -}; - -static ssize_t -netxen_store_diag_mode(struct device *dev, - struct device_attribute *attr, const char *buf, size_t len) -{ - struct netxen_adapter *adapter = dev_get_drvdata(dev); - unsigned long new; - - if (strict_strtoul(buf, 2, &new)) - return -EINVAL; - - if (!!new != !!(adapter->flags & NETXEN_NIC_DIAG_ENABLED)) - adapter->flags ^= NETXEN_NIC_DIAG_ENABLED; - - return len; -} - -static ssize_t -netxen_show_diag_mode(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct netxen_adapter *adapter = dev_get_drvdata(dev); - - return sprintf(buf, "%d\n", - !!(adapter->flags & NETXEN_NIC_DIAG_ENABLED)); -} - -static struct device_attribute dev_attr_diag_mode = { - .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)}, - .show = netxen_show_diag_mode, - .store = netxen_store_diag_mode, -}; - -static int -netxen_sysfs_validate_crb(struct netxen_adapter *adapter, - loff_t offset, size_t size) -{ - size_t crb_size = 4; - - if (!(adapter->flags & NETXEN_NIC_DIAG_ENABLED)) - return -EIO; - - if (offset < NETXEN_PCI_CRBSPACE) { - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) - return -EINVAL; - - if (ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM, - NETXEN_PCI_CAMQM_2M_END)) - crb_size = 8; - else - return -EINVAL; - } - - if ((size != crb_size) || (offset & (crb_size-1))) - return -EINVAL; - - return 0; -} - -static ssize_t -netxen_sysfs_read_crb(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t offset, size_t size) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct netxen_adapter *adapter = dev_get_drvdata(dev); - u32 data; - u64 qmdata; - int ret; - - ret = netxen_sysfs_validate_crb(adapter, offset, size); - if (ret != 0) - return ret; - - if (NX_IS_REVISION_P3(adapter->ahw.revision_id) && - ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM, - NETXEN_PCI_CAMQM_2M_END)) { - netxen_pci_camqm_read_2M(adapter, offset, &qmdata); - memcpy(buf, &qmdata, size); - } else { - data = NXRD32(adapter, offset); - memcpy(buf, &data, size); - } - - return size; -} - -static ssize_t -netxen_sysfs_write_crb(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t offset, size_t size) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct netxen_adapter *adapter = dev_get_drvdata(dev); - u32 data; - u64 qmdata; - int ret; - - ret = netxen_sysfs_validate_crb(adapter, offset, size); - if (ret != 0) - return ret; - - if (NX_IS_REVISION_P3(adapter->ahw.revision_id) && - ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM, - NETXEN_PCI_CAMQM_2M_END)) { - memcpy(&qmdata, buf, size); - netxen_pci_camqm_write_2M(adapter, offset, qmdata); - } else { - memcpy(&data, buf, size); - NXWR32(adapter, offset, data); - } - - return size; -} - -static int -netxen_sysfs_validate_mem(struct netxen_adapter *adapter, - loff_t offset, size_t size) -{ - if (!(adapter->flags & NETXEN_NIC_DIAG_ENABLED)) - return -EIO; - - if ((size != 8) || (offset & 0x7)) - return -EIO; - - return 0; -} - -static ssize_t -netxen_sysfs_read_mem(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t offset, size_t size) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct netxen_adapter *adapter = dev_get_drvdata(dev); - u64 data; - int ret; - - ret = netxen_sysfs_validate_mem(adapter, offset, size); - if (ret != 0) - return ret; - - if (adapter->pci_mem_read(adapter, offset, &data)) - return -EIO; - - memcpy(buf, &data, size); - - return size; -} - -static ssize_t netxen_sysfs_write_mem(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, char *buf, - loff_t offset, size_t size) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct netxen_adapter *adapter = dev_get_drvdata(dev); - u64 data; - int ret; - - ret = netxen_sysfs_validate_mem(adapter, offset, size); - if (ret != 0) - return ret; - - memcpy(&data, buf, size); - - if (adapter->pci_mem_write(adapter, offset, data)) - return -EIO; - - return size; -} - - -static struct bin_attribute bin_attr_crb = { - .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)}, - .size = 0, - .read = netxen_sysfs_read_crb, - .write = netxen_sysfs_write_crb, -}; - -static struct bin_attribute bin_attr_mem = { - .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)}, - .size = 0, - .read = netxen_sysfs_read_mem, - .write = netxen_sysfs_write_mem, -}; - - -static void -netxen_create_sysfs_entries(struct netxen_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - struct device *dev = &netdev->dev; - - if (adapter->capabilities & NX_FW_CAPABILITY_BDG) { - /* bridged_mode control */ - if (device_create_file(dev, &dev_attr_bridged_mode)) { - dev_warn(&netdev->dev, - "failed to create bridged_mode sysfs entry\n"); - } - } -} - -static void -netxen_remove_sysfs_entries(struct netxen_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - struct device *dev = &netdev->dev; - - if (adapter->capabilities & NX_FW_CAPABILITY_BDG) - device_remove_file(dev, &dev_attr_bridged_mode); -} - -static void -netxen_create_diag_entries(struct netxen_adapter *adapter) -{ - struct pci_dev *pdev = adapter->pdev; - struct device *dev; - - dev = &pdev->dev; - if (device_create_file(dev, &dev_attr_diag_mode)) - dev_info(dev, "failed to create diag_mode sysfs entry\n"); - if (device_create_bin_file(dev, &bin_attr_crb)) - dev_info(dev, "failed to create crb sysfs entry\n"); - if (device_create_bin_file(dev, &bin_attr_mem)) - dev_info(dev, "failed to create mem sysfs entry\n"); -} - - -static void -netxen_remove_diag_entries(struct netxen_adapter *adapter) -{ - struct pci_dev *pdev = adapter->pdev; - struct device *dev = &pdev->dev; - - device_remove_file(dev, &dev_attr_diag_mode); - device_remove_bin_file(dev, &bin_attr_crb); - device_remove_bin_file(dev, &bin_attr_mem); -} - -#ifdef CONFIG_INET - -#define is_netxen_netdev(dev) (dev->netdev_ops == &netxen_netdev_ops) - -static int -netxen_destip_supported(struct netxen_adapter *adapter) -{ - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) - return 0; - - if (adapter->ahw.cut_through) - return 0; - - return 1; -} - -static void -netxen_free_vlan_ip_list(struct netxen_adapter *adapter) -{ - struct nx_vlan_ip_list *cur; - struct list_head *head = &adapter->vlan_ip_list; - - while (!list_empty(head)) { - cur = list_entry(head->next, struct nx_vlan_ip_list, list); - netxen_config_ipaddr(adapter, cur->ip_addr, NX_IP_DOWN); - list_del(&cur->list); - kfree(cur); - } - -} -static void -netxen_list_config_vlan_ip(struct netxen_adapter *adapter, - struct in_ifaddr *ifa, unsigned long event) -{ - struct net_device *dev; - struct nx_vlan_ip_list *cur, *tmp_cur; - struct list_head *head; - - dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL; - - if (dev == NULL) - return; - - if (!is_vlan_dev(dev)) - return; - - switch (event) { - case NX_IP_UP: - list_for_each(head, &adapter->vlan_ip_list) { - cur = list_entry(head, struct nx_vlan_ip_list, list); - - if (cur->ip_addr == ifa->ifa_address) - return; - } - - cur = kzalloc(sizeof(struct nx_vlan_ip_list), GFP_ATOMIC); - if (cur == NULL) { - printk(KERN_ERR "%s: failed to add vlan ip to list\n", - adapter->netdev->name); - return; - } - - cur->ip_addr = ifa->ifa_address; - list_add_tail(&cur->list, &adapter->vlan_ip_list); - break; - case NX_IP_DOWN: - list_for_each_entry_safe(cur, tmp_cur, - &adapter->vlan_ip_list, list) { - if (cur->ip_addr == ifa->ifa_address) { - list_del(&cur->list); - kfree(cur); - break; - } - } - } -} -static void -netxen_config_indev_addr(struct netxen_adapter *adapter, - struct net_device *dev, unsigned long event) -{ - struct in_device *indev; - - if (!netxen_destip_supported(adapter)) - return; - - indev = in_dev_get(dev); - if (!indev) - return; - - for_ifa(indev) { - switch (event) { - case NETDEV_UP: - netxen_config_ipaddr(adapter, - ifa->ifa_address, NX_IP_UP); - netxen_list_config_vlan_ip(adapter, ifa, NX_IP_UP); - break; - case NETDEV_DOWN: - netxen_config_ipaddr(adapter, - ifa->ifa_address, NX_IP_DOWN); - netxen_list_config_vlan_ip(adapter, ifa, NX_IP_DOWN); - break; - default: - break; - } - } endfor_ifa(indev); - - in_dev_put(indev); -} - -static void -netxen_restore_indev_addr(struct net_device *netdev, unsigned long event) - -{ - struct netxen_adapter *adapter = netdev_priv(netdev); - struct nx_vlan_ip_list *pos, *tmp_pos; - unsigned long ip_event; - - ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN; - netxen_config_indev_addr(adapter, netdev, event); - - list_for_each_entry_safe(pos, tmp_pos, &adapter->vlan_ip_list, list) { - netxen_config_ipaddr(adapter, pos->ip_addr, ip_event); - } -} - -static int netxen_netdev_event(struct notifier_block *this, - unsigned long event, void *ptr) -{ - struct netxen_adapter *adapter; - struct net_device *dev = (struct net_device *)ptr; - struct net_device *orig_dev = dev; - -recheck: - if (dev == NULL) - goto done; - - if (dev->priv_flags & IFF_802_1Q_VLAN) { - dev = vlan_dev_real_dev(dev); - goto recheck; - } - - if (!is_netxen_netdev(dev)) - goto done; - - adapter = netdev_priv(dev); - - if (!adapter) - goto done; - - if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) - goto done; - - netxen_config_indev_addr(adapter, orig_dev, event); -done: - return NOTIFY_DONE; -} - -static int -netxen_inetaddr_event(struct notifier_block *this, - unsigned long event, void *ptr) -{ - struct netxen_adapter *adapter; - struct net_device *dev; - - struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; - - dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL; - -recheck: - if (dev == NULL) - goto done; - - if (dev->priv_flags & IFF_802_1Q_VLAN) { - dev = vlan_dev_real_dev(dev); - goto recheck; - } - - if (!is_netxen_netdev(dev)) - goto done; - - adapter = netdev_priv(dev); - - if (!adapter || !netxen_destip_supported(adapter)) - goto done; - - if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) - goto done; - - switch (event) { - case NETDEV_UP: - netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_UP); - netxen_list_config_vlan_ip(adapter, ifa, NX_IP_UP); - break; - case NETDEV_DOWN: - netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_DOWN); - netxen_list_config_vlan_ip(adapter, ifa, NX_IP_DOWN); - break; - default: - break; - } - -done: - return NOTIFY_DONE; -} - -static struct notifier_block netxen_netdev_cb = { - .notifier_call = netxen_netdev_event, -}; - -static struct notifier_block netxen_inetaddr_cb = { - .notifier_call = netxen_inetaddr_event, -}; -#else -static void -netxen_restore_indev_addr(struct net_device *dev, unsigned long event) -{ } -static void -netxen_free_vlan_ip_list(struct netxen_adapter *adapter) -{ } -#endif - -static struct pci_error_handlers netxen_err_handler = { - .error_detected = netxen_io_error_detected, - .slot_reset = netxen_io_slot_reset, - .resume = netxen_io_resume, -}; - -static struct pci_driver netxen_driver = { - .name = netxen_nic_driver_name, - .id_table = netxen_pci_tbl, - .probe = netxen_nic_probe, - .remove = __devexit_p(netxen_nic_remove), -#ifdef CONFIG_PM - .suspend = netxen_nic_suspend, - .resume = netxen_nic_resume, -#endif - .shutdown = netxen_nic_shutdown, - .err_handler = &netxen_err_handler -}; - -static int __init netxen_init_module(void) -{ - printk(KERN_INFO "%s\n", netxen_nic_driver_string); - -#ifdef CONFIG_INET - register_netdevice_notifier(&netxen_netdev_cb); - register_inetaddr_notifier(&netxen_inetaddr_cb); -#endif - return pci_register_driver(&netxen_driver); -} - -module_init(netxen_init_module); - -static void __exit netxen_exit_module(void) -{ - pci_unregister_driver(&netxen_driver); - -#ifdef CONFIG_INET - unregister_inetaddr_notifier(&netxen_inetaddr_cb); - unregister_netdevice_notifier(&netxen_netdev_cb); -#endif -} - -module_exit(netxen_exit_module); diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c deleted file mode 100644 index ccde8061afa8..000000000000 --- a/drivers/net/qla3xxx.c +++ /dev/null @@ -1,3970 +0,0 @@ -/* - * QLogic QLA3xxx NIC HBA Driver - * Copyright (c) 2003-2006 QLogic Corporation - * - * See LICENSE.qla3xxx for copyright and licensing details. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "qla3xxx.h" - -#define DRV_NAME "qla3xxx" -#define DRV_STRING "QLogic ISP3XXX Network Driver" -#define DRV_VERSION "v2.03.00-k5" - -static const char ql3xxx_driver_name[] = DRV_NAME; -static const char ql3xxx_driver_version[] = DRV_VERSION; - -#define TIMED_OUT_MSG \ -"Timed out waiting for management port to get free before issuing command\n" - -MODULE_AUTHOR("QLogic Corporation"); -MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " "); -MODULE_LICENSE("GPL"); -MODULE_VERSION(DRV_VERSION); - -static const u32 default_msg - = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK - | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN; - -static int debug = -1; /* defaults above */ -module_param(debug, int, 0); -MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); - -static int msi; -module_param(msi, int, 0); -MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts."); - -static DEFINE_PCI_DEVICE_TABLE(ql3xxx_pci_tbl) = { - {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)}, - {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)}, - /* required last entry */ - {0,} -}; - -MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl); - -/* - * These are the known PHY's which are used - */ -enum PHY_DEVICE_TYPE { - PHY_TYPE_UNKNOWN = 0, - PHY_VITESSE_VSC8211, - PHY_AGERE_ET1011C, - MAX_PHY_DEV_TYPES -}; - -struct PHY_DEVICE_INFO { - const enum PHY_DEVICE_TYPE phyDevice; - const u32 phyIdOUI; - const u16 phyIdModel; - const char *name; -}; - -static const struct PHY_DEVICE_INFO PHY_DEVICES[] = { - {PHY_TYPE_UNKNOWN, 0x000000, 0x0, "PHY_TYPE_UNKNOWN"}, - {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"}, - {PHY_AGERE_ET1011C, 0x00a0bc, 0x1, "PHY_AGERE_ET1011C"}, -}; - - -/* - * Caller must take hw_lock. - */ -static int ql_sem_spinlock(struct ql3_adapter *qdev, - u32 sem_mask, u32 sem_bits) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - u32 value; - unsigned int seconds = 3; - - do { - writel((sem_mask | sem_bits), - &port_regs->CommonRegs.semaphoreReg); - value = readl(&port_regs->CommonRegs.semaphoreReg); - if ((value & (sem_mask >> 16)) == sem_bits) - return 0; - ssleep(1); - } while (--seconds); - return -1; -} - -static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - writel(sem_mask, &port_regs->CommonRegs.semaphoreReg); - readl(&port_regs->CommonRegs.semaphoreReg); -} - -static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - u32 value; - - writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg); - value = readl(&port_regs->CommonRegs.semaphoreReg); - return ((value & (sem_mask >> 16)) == sem_bits); -} - -/* - * Caller holds hw_lock. - */ -static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev) -{ - int i = 0; - - while (i < 10) { - if (i) - ssleep(1); - - if (ql_sem_lock(qdev, - QL_DRVR_SEM_MASK, - (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) - * 2) << 1)) { - netdev_printk(KERN_DEBUG, qdev->ndev, - "driver lock acquired\n"); - return 1; - } - } - - netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n"); - return 0; -} - -static void ql_set_register_page(struct ql3_adapter *qdev, u32 page) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - - writel(((ISP_CONTROL_NP_MASK << 16) | page), - &port_regs->CommonRegs.ispControlStatus); - readl(&port_regs->CommonRegs.ispControlStatus); - qdev->current_page = page; -} - -static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) -{ - u32 value; - unsigned long hw_flags; - - spin_lock_irqsave(&qdev->hw_lock, hw_flags); - value = readl(reg); - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - - return value; -} - -static u32 ql_read_common_reg(struct ql3_adapter *qdev, u32 __iomem *reg) -{ - return readl(reg); -} - -static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) -{ - u32 value; - unsigned long hw_flags; - - spin_lock_irqsave(&qdev->hw_lock, hw_flags); - - if (qdev->current_page != 0) - ql_set_register_page(qdev, 0); - value = readl(reg); - - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - return value; -} - -static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg) -{ - if (qdev->current_page != 0) - ql_set_register_page(qdev, 0); - return readl(reg); -} - -static void ql_write_common_reg_l(struct ql3_adapter *qdev, - u32 __iomem *reg, u32 value) -{ - unsigned long hw_flags; - - spin_lock_irqsave(&qdev->hw_lock, hw_flags); - writel(value, reg); - readl(reg); - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); -} - -static void ql_write_common_reg(struct ql3_adapter *qdev, - u32 __iomem *reg, u32 value) -{ - writel(value, reg); - readl(reg); -} - -static void ql_write_nvram_reg(struct ql3_adapter *qdev, - u32 __iomem *reg, u32 value) -{ - writel(value, reg); - readl(reg); - udelay(1); -} - -static void ql_write_page0_reg(struct ql3_adapter *qdev, - u32 __iomem *reg, u32 value) -{ - if (qdev->current_page != 0) - ql_set_register_page(qdev, 0); - writel(value, reg); - readl(reg); -} - -/* - * Caller holds hw_lock. Only called during init. - */ -static void ql_write_page1_reg(struct ql3_adapter *qdev, - u32 __iomem *reg, u32 value) -{ - if (qdev->current_page != 1) - ql_set_register_page(qdev, 1); - writel(value, reg); - readl(reg); -} - -/* - * Caller holds hw_lock. Only called during init. - */ -static void ql_write_page2_reg(struct ql3_adapter *qdev, - u32 __iomem *reg, u32 value) -{ - if (qdev->current_page != 2) - ql_set_register_page(qdev, 2); - writel(value, reg); - readl(reg); -} - -static void ql_disable_interrupts(struct ql3_adapter *qdev) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - - ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, - (ISP_IMR_ENABLE_INT << 16)); - -} - -static void ql_enable_interrupts(struct ql3_adapter *qdev) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - - ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, - ((0xff << 16) | ISP_IMR_ENABLE_INT)); - -} - -static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev, - struct ql_rcv_buf_cb *lrg_buf_cb) -{ - dma_addr_t map; - int err; - lrg_buf_cb->next = NULL; - - if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */ - qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb; - } else { - qdev->lrg_buf_free_tail->next = lrg_buf_cb; - qdev->lrg_buf_free_tail = lrg_buf_cb; - } - - if (!lrg_buf_cb->skb) { - lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev, - qdev->lrg_buffer_len); - if (unlikely(!lrg_buf_cb->skb)) { - netdev_err(qdev->ndev, "failed netdev_alloc_skb()\n"); - qdev->lrg_buf_skb_check++; - } else { - /* - * We save some space to copy the ethhdr from first - * buffer - */ - skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE); - map = pci_map_single(qdev->pdev, - lrg_buf_cb->skb->data, - qdev->lrg_buffer_len - - QL_HEADER_SPACE, - PCI_DMA_FROMDEVICE); - err = pci_dma_mapping_error(qdev->pdev, map); - if (err) { - netdev_err(qdev->ndev, - "PCI mapping failed with error: %d\n", - err); - dev_kfree_skb(lrg_buf_cb->skb); - lrg_buf_cb->skb = NULL; - - qdev->lrg_buf_skb_check++; - return; - } - - lrg_buf_cb->buf_phy_addr_low = - cpu_to_le32(LS_64BITS(map)); - lrg_buf_cb->buf_phy_addr_high = - cpu_to_le32(MS_64BITS(map)); - dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); - dma_unmap_len_set(lrg_buf_cb, maplen, - qdev->lrg_buffer_len - - QL_HEADER_SPACE); - } - } - - qdev->lrg_buf_free_count++; -} - -static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter - *qdev) -{ - struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; - - if (lrg_buf_cb != NULL) { - qdev->lrg_buf_free_head = lrg_buf_cb->next; - if (qdev->lrg_buf_free_head == NULL) - qdev->lrg_buf_free_tail = NULL; - qdev->lrg_buf_free_count--; - } - - return lrg_buf_cb; -} - -static u32 addrBits = EEPROM_NO_ADDR_BITS; -static u32 dataBits = EEPROM_NO_DATA_BITS; - -static void fm93c56a_deselect(struct ql3_adapter *qdev); -static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr, - unsigned short *value); - -/* - * Caller holds hw_lock. - */ -static void fm93c56a_select(struct ql3_adapter *qdev) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; - - qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1; - ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); - ql_write_nvram_reg(qdev, spir, - ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data)); -} - -/* - * Caller holds hw_lock. - */ -static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr) -{ - int i; - u32 mask; - u32 dataBit; - u32 previousBit; - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; - - /* Clock in a zero, then do the start bit */ - ql_write_nvram_reg(qdev, spir, - (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | - AUBURN_EEPROM_DO_1)); - ql_write_nvram_reg(qdev, spir, - (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | - AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_RISE)); - ql_write_nvram_reg(qdev, spir, - (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | - AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_FALL)); - - mask = 1 << (FM93C56A_CMD_BITS - 1); - /* Force the previous data bit to be different */ - previousBit = 0xffff; - for (i = 0; i < FM93C56A_CMD_BITS; i++) { - dataBit = (cmd & mask) - ? AUBURN_EEPROM_DO_1 - : AUBURN_EEPROM_DO_0; - if (previousBit != dataBit) { - /* If the bit changed, change the DO state to match */ - ql_write_nvram_reg(qdev, spir, - (ISP_NVRAM_MASK | - qdev->eeprom_cmd_data | dataBit)); - previousBit = dataBit; - } - ql_write_nvram_reg(qdev, spir, - (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | - dataBit | AUBURN_EEPROM_CLK_RISE)); - ql_write_nvram_reg(qdev, spir, - (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | - dataBit | AUBURN_EEPROM_CLK_FALL)); - cmd = cmd << 1; - } - - mask = 1 << (addrBits - 1); - /* Force the previous data bit to be different */ - previousBit = 0xffff; - for (i = 0; i < addrBits; i++) { - dataBit = (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 - : AUBURN_EEPROM_DO_0; - if (previousBit != dataBit) { - /* - * If the bit changed, then change the DO state to - * match - */ - ql_write_nvram_reg(qdev, spir, - (ISP_NVRAM_MASK | - qdev->eeprom_cmd_data | dataBit)); - previousBit = dataBit; - } - ql_write_nvram_reg(qdev, spir, - (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | - dataBit | AUBURN_EEPROM_CLK_RISE)); - ql_write_nvram_reg(qdev, spir, - (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | - dataBit | AUBURN_EEPROM_CLK_FALL)); - eepromAddr = eepromAddr << 1; - } -} - -/* - * Caller holds hw_lock. - */ -static void fm93c56a_deselect(struct ql3_adapter *qdev) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; - - qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0; - ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); -} - -/* - * Caller holds hw_lock. - */ -static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value) -{ - int i; - u32 data = 0; - u32 dataBit; - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; - - /* Read the data bits */ - /* The first bit is a dummy. Clock right over it. */ - for (i = 0; i < dataBits; i++) { - ql_write_nvram_reg(qdev, spir, - ISP_NVRAM_MASK | qdev->eeprom_cmd_data | - AUBURN_EEPROM_CLK_RISE); - ql_write_nvram_reg(qdev, spir, - ISP_NVRAM_MASK | qdev->eeprom_cmd_data | - AUBURN_EEPROM_CLK_FALL); - dataBit = (ql_read_common_reg(qdev, spir) & - AUBURN_EEPROM_DI_1) ? 1 : 0; - data = (data << 1) | dataBit; - } - *value = (u16)data; -} - -/* - * Caller holds hw_lock. - */ -static void eeprom_readword(struct ql3_adapter *qdev, - u32 eepromAddr, unsigned short *value) -{ - fm93c56a_select(qdev); - fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr); - fm93c56a_datain(qdev, value); - fm93c56a_deselect(qdev); -} - -static void ql_set_mac_addr(struct net_device *ndev, u16 *addr) -{ - __le16 *p = (__le16 *)ndev->dev_addr; - p[0] = cpu_to_le16(addr[0]); - p[1] = cpu_to_le16(addr[1]); - p[2] = cpu_to_le16(addr[2]); -} - -static int ql_get_nvram_params(struct ql3_adapter *qdev) -{ - u16 *pEEPROMData; - u16 checksum = 0; - u32 index; - unsigned long hw_flags; - - spin_lock_irqsave(&qdev->hw_lock, hw_flags); - - pEEPROMData = (u16 *)&qdev->nvram_data; - qdev->eeprom_cmd_data = 0; - if (ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK, - (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * - 2) << 10)) { - pr_err("%s: Failed ql_sem_spinlock()\n", __func__); - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - return -1; - } - - for (index = 0; index < EEPROM_SIZE; index++) { - eeprom_readword(qdev, index, pEEPROMData); - checksum += *pEEPROMData; - pEEPROMData++; - } - ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK); - - if (checksum != 0) { - netdev_err(qdev->ndev, "checksum should be zero, is %x!!\n", - checksum); - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - return -1; - } - - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - return checksum; -} - -static const u32 PHYAddr[2] = { - PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS -}; - -static int ql_wait_for_mii_ready(struct ql3_adapter *qdev) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - u32 temp; - int count = 1000; - - while (count) { - temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg); - if (!(temp & MAC_MII_STATUS_BSY)) - return 0; - udelay(10); - count--; - } - return -1; -} - -static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - u32 scanControl; - - if (qdev->numPorts > 1) { - /* Auto scan will cycle through multiple ports */ - scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC; - } else { - scanControl = MAC_MII_CONTROL_SC; - } - - /* - * Scan register 1 of PHY/PETBI, - * Set up to scan both devices - * The autoscan starts from the first register, completes - * the last one before rolling over to the first - */ - ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, - PHYAddr[0] | MII_SCAN_REGISTER); - - ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, - (scanControl) | - ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16)); -} - -static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev) -{ - u8 ret; - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - - /* See if scan mode is enabled before we turn it off */ - if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) & - (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) { - /* Scan is enabled */ - ret = 1; - } else { - /* Scan is disabled */ - ret = 0; - } - - /* - * When disabling scan mode you must first change the MII register - * address - */ - ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, - PHYAddr[0] | MII_SCAN_REGISTER); - - ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, - ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS | - MAC_MII_CONTROL_RC) << 16)); - - return ret; -} - -static int ql_mii_write_reg_ex(struct ql3_adapter *qdev, - u16 regAddr, u16 value, u32 phyAddr) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - u8 scanWasEnabled; - - scanWasEnabled = ql_mii_disable_scan_mode(qdev); - - if (ql_wait_for_mii_ready(qdev)) { - netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); - return -1; - } - - ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, - phyAddr | regAddr); - - ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); - - /* Wait for write to complete 9/10/04 SJP */ - if (ql_wait_for_mii_ready(qdev)) { - netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); - return -1; - } - - if (scanWasEnabled) - ql_mii_enable_scan_mode(qdev); - - return 0; -} - -static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr, - u16 *value, u32 phyAddr) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - u8 scanWasEnabled; - u32 temp; - - scanWasEnabled = ql_mii_disable_scan_mode(qdev); - - if (ql_wait_for_mii_ready(qdev)) { - netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); - return -1; - } - - ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, - phyAddr | regAddr); - - ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, - (MAC_MII_CONTROL_RC << 16)); - - ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, - (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC); - - /* Wait for the read to complete */ - if (ql_wait_for_mii_ready(qdev)) { - netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); - return -1; - } - - temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg); - *value = (u16) temp; - - if (scanWasEnabled) - ql_mii_enable_scan_mode(qdev); - - return 0; -} - -static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - - ql_mii_disable_scan_mode(qdev); - - if (ql_wait_for_mii_ready(qdev)) { - netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); - return -1; - } - - ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, - qdev->PHYAddr | regAddr); - - ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); - - /* Wait for write to complete. */ - if (ql_wait_for_mii_ready(qdev)) { - netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); - return -1; - } - - ql_mii_enable_scan_mode(qdev); - - return 0; -} - -static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value) -{ - u32 temp; - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - - ql_mii_disable_scan_mode(qdev); - - if (ql_wait_for_mii_ready(qdev)) { - netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); - return -1; - } - - ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, - qdev->PHYAddr | regAddr); - - ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, - (MAC_MII_CONTROL_RC << 16)); - - ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, - (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC); - - /* Wait for the read to complete */ - if (ql_wait_for_mii_ready(qdev)) { - netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); - return -1; - } - - temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg); - *value = (u16) temp; - - ql_mii_enable_scan_mode(qdev); - - return 0; -} - -static void ql_petbi_reset(struct ql3_adapter *qdev) -{ - ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET); -} - -static void ql_petbi_start_neg(struct ql3_adapter *qdev) -{ - u16 reg; - - /* Enable Auto-negotiation sense */ - ql_mii_read_reg(qdev, PETBI_TBI_CTRL, ®); - reg |= PETBI_TBI_AUTO_SENSE; - ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg); - - ql_mii_write_reg(qdev, PETBI_NEG_ADVER, - PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX); - - ql_mii_write_reg(qdev, PETBI_CONTROL_REG, - PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG | - PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000); - -} - -static void ql_petbi_reset_ex(struct ql3_adapter *qdev) -{ - ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET, - PHYAddr[qdev->mac_index]); -} - -static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev) -{ - u16 reg; - - /* Enable Auto-negotiation sense */ - ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, ®, - PHYAddr[qdev->mac_index]); - reg |= PETBI_TBI_AUTO_SENSE; - ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg, - PHYAddr[qdev->mac_index]); - - ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER, - PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX, - PHYAddr[qdev->mac_index]); - - ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, - PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG | - PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000, - PHYAddr[qdev->mac_index]); -} - -static void ql_petbi_init(struct ql3_adapter *qdev) -{ - ql_petbi_reset(qdev); - ql_petbi_start_neg(qdev); -} - -static void ql_petbi_init_ex(struct ql3_adapter *qdev) -{ - ql_petbi_reset_ex(qdev); - ql_petbi_start_neg_ex(qdev); -} - -static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev) -{ - u16 reg; - - if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, ®) < 0) - return 0; - - return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE; -} - -static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr) -{ - netdev_info(qdev->ndev, "enabling Agere specific PHY\n"); - /* power down device bit 11 = 1 */ - ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr); - /* enable diagnostic mode bit 2 = 1 */ - ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr); - /* 1000MB amplitude adjust (see Agere errata) */ - ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr); - /* 1000MB amplitude adjust (see Agere errata) */ - ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr); - /* 100MB amplitude adjust (see Agere errata) */ - ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr); - /* 100MB amplitude adjust (see Agere errata) */ - ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr); - /* 10MB amplitude adjust (see Agere errata) */ - ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr); - /* 10MB amplitude adjust (see Agere errata) */ - ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr); - /* point to hidden reg 0x2806 */ - ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr); - /* Write new PHYAD w/bit 5 set */ - ql_mii_write_reg_ex(qdev, 0x11, - 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr); - /* - * Disable diagnostic mode bit 2 = 0 - * Power up device bit 11 = 0 - * Link up (on) and activity (blink) - */ - ql_mii_write_reg(qdev, 0x12, 0x840a); - ql_mii_write_reg(qdev, 0x00, 0x1140); - ql_mii_write_reg(qdev, 0x1c, 0xfaf0); -} - -static enum PHY_DEVICE_TYPE getPhyType(struct ql3_adapter *qdev, - u16 phyIdReg0, u16 phyIdReg1) -{ - enum PHY_DEVICE_TYPE result = PHY_TYPE_UNKNOWN; - u32 oui; - u16 model; - int i; - - if (phyIdReg0 == 0xffff) - return result; - - if (phyIdReg1 == 0xffff) - return result; - - /* oui is split between two registers */ - oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10); - - model = (phyIdReg1 & PHY_MODEL_MASK) >> 4; - - /* Scan table for this PHY */ - for (i = 0; i < MAX_PHY_DEV_TYPES; i++) { - if ((oui == PHY_DEVICES[i].phyIdOUI) && - (model == PHY_DEVICES[i].phyIdModel)) { - netdev_info(qdev->ndev, "Phy: %s\n", - PHY_DEVICES[i].name); - result = PHY_DEVICES[i].phyDevice; - break; - } - } - - return result; -} - -static int ql_phy_get_speed(struct ql3_adapter *qdev) -{ - u16 reg; - - switch (qdev->phyType) { - case PHY_AGERE_ET1011C: { - if (ql_mii_read_reg(qdev, 0x1A, ®) < 0) - return 0; - - reg = (reg >> 8) & 3; - break; - } - default: - if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) - return 0; - - reg = (((reg & 0x18) >> 3) & 3); - } - - switch (reg) { - case 2: - return SPEED_1000; - case 1: - return SPEED_100; - case 0: - return SPEED_10; - default: - return -1; - } -} - -static int ql_is_full_dup(struct ql3_adapter *qdev) -{ - u16 reg; - - switch (qdev->phyType) { - case PHY_AGERE_ET1011C: { - if (ql_mii_read_reg(qdev, 0x1A, ®)) - return 0; - - return ((reg & 0x0080) && (reg & 0x1000)) != 0; - } - case PHY_VITESSE_VSC8211: - default: { - if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) - return 0; - return (reg & PHY_AUX_DUPLEX_STAT) != 0; - } - } -} - -static int ql_is_phy_neg_pause(struct ql3_adapter *qdev) -{ - u16 reg; - - if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, ®) < 0) - return 0; - - return (reg & PHY_NEG_PAUSE) != 0; -} - -static int PHY_Setup(struct ql3_adapter *qdev) -{ - u16 reg1; - u16 reg2; - bool agereAddrChangeNeeded = false; - u32 miiAddr = 0; - int err; - - /* Determine the PHY we are using by reading the ID's */ - err = ql_mii_read_reg(qdev, PHY_ID_0_REG, ®1); - if (err != 0) { - netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG\n"); - return err; - } - - err = ql_mii_read_reg(qdev, PHY_ID_1_REG, ®2); - if (err != 0) { - netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG\n"); - return err; - } - - /* Check if we have a Agere PHY */ - if ((reg1 == 0xffff) || (reg2 == 0xffff)) { - - /* Determine which MII address we should be using - determined by the index of the card */ - if (qdev->mac_index == 0) - miiAddr = MII_AGERE_ADDR_1; - else - miiAddr = MII_AGERE_ADDR_2; - - err = ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, ®1, miiAddr); - if (err != 0) { - netdev_err(qdev->ndev, - "Could not read from reg PHY_ID_0_REG after Agere detected\n"); - return err; - } - - err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, ®2, miiAddr); - if (err != 0) { - netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG after Agere detected\n"); - return err; - } - - /* We need to remember to initialize the Agere PHY */ - agereAddrChangeNeeded = true; - } - - /* Determine the particular PHY we have on board to apply - PHY specific initializations */ - qdev->phyType = getPhyType(qdev, reg1, reg2); - - if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) { - /* need this here so address gets changed */ - phyAgereSpecificInit(qdev, miiAddr); - } else if (qdev->phyType == PHY_TYPE_UNKNOWN) { - netdev_err(qdev->ndev, "PHY is unknown\n"); - return -EIO; - } - - return 0; -} - -/* - * Caller holds hw_lock. - */ -static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - u32 value; - - if (enable) - value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16)); - else - value = (MAC_CONFIG_REG_PE << 16); - - if (qdev->mac_index) - ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); - else - ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); -} - -/* - * Caller holds hw_lock. - */ -static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - u32 value; - - if (enable) - value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16)); - else - value = (MAC_CONFIG_REG_SR << 16); - - if (qdev->mac_index) - ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); - else - ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); -} - -/* - * Caller holds hw_lock. - */ -static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - u32 value; - - if (enable) - value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16)); - else - value = (MAC_CONFIG_REG_GM << 16); - - if (qdev->mac_index) - ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); - else - ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); -} - -/* - * Caller holds hw_lock. - */ -static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - u32 value; - - if (enable) - value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16)); - else - value = (MAC_CONFIG_REG_FD << 16); - - if (qdev->mac_index) - ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); - else - ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); -} - -/* - * Caller holds hw_lock. - */ -static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - u32 value; - - if (enable) - value = - ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) | - ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16)); - else - value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16); - - if (qdev->mac_index) - ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); - else - ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); -} - -/* - * Caller holds hw_lock. - */ -static int ql_is_fiber(struct ql3_adapter *qdev) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - u32 bitToCheck = 0; - u32 temp; - - switch (qdev->mac_index) { - case 0: - bitToCheck = PORT_STATUS_SM0; - break; - case 1: - bitToCheck = PORT_STATUS_SM1; - break; - } - - temp = ql_read_page0_reg(qdev, &port_regs->portStatus); - return (temp & bitToCheck) != 0; -} - -static int ql_is_auto_cfg(struct ql3_adapter *qdev) -{ - u16 reg; - ql_mii_read_reg(qdev, 0x00, ®); - return (reg & 0x1000) != 0; -} - -/* - * Caller holds hw_lock. - */ -static int ql_is_auto_neg_complete(struct ql3_adapter *qdev) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - u32 bitToCheck = 0; - u32 temp; - - switch (qdev->mac_index) { - case 0: - bitToCheck = PORT_STATUS_AC0; - break; - case 1: - bitToCheck = PORT_STATUS_AC1; - break; - } - - temp = ql_read_page0_reg(qdev, &port_regs->portStatus); - if (temp & bitToCheck) { - netif_info(qdev, link, qdev->ndev, "Auto-Negotiate complete\n"); - return 1; - } - netif_info(qdev, link, qdev->ndev, "Auto-Negotiate incomplete\n"); - return 0; -} - -/* - * ql_is_neg_pause() returns 1 if pause was negotiated to be on - */ -static int ql_is_neg_pause(struct ql3_adapter *qdev) -{ - if (ql_is_fiber(qdev)) - return ql_is_petbi_neg_pause(qdev); - else - return ql_is_phy_neg_pause(qdev); -} - -static int ql_auto_neg_error(struct ql3_adapter *qdev) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - u32 bitToCheck = 0; - u32 temp; - - switch (qdev->mac_index) { - case 0: - bitToCheck = PORT_STATUS_AE0; - break; - case 1: - bitToCheck = PORT_STATUS_AE1; - break; - } - temp = ql_read_page0_reg(qdev, &port_regs->portStatus); - return (temp & bitToCheck) != 0; -} - -static u32 ql_get_link_speed(struct ql3_adapter *qdev) -{ - if (ql_is_fiber(qdev)) - return SPEED_1000; - else - return ql_phy_get_speed(qdev); -} - -static int ql_is_link_full_dup(struct ql3_adapter *qdev) -{ - if (ql_is_fiber(qdev)) - return 1; - else - return ql_is_full_dup(qdev); -} - -/* - * Caller holds hw_lock. - */ -static int ql_link_down_detect(struct ql3_adapter *qdev) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - u32 bitToCheck = 0; - u32 temp; - - switch (qdev->mac_index) { - case 0: - bitToCheck = ISP_CONTROL_LINK_DN_0; - break; - case 1: - bitToCheck = ISP_CONTROL_LINK_DN_1; - break; - } - - temp = - ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); - return (temp & bitToCheck) != 0; -} - -/* - * Caller holds hw_lock. - */ -static int ql_link_down_detect_clear(struct ql3_adapter *qdev) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - - switch (qdev->mac_index) { - case 0: - ql_write_common_reg(qdev, - &port_regs->CommonRegs.ispControlStatus, - (ISP_CONTROL_LINK_DN_0) | - (ISP_CONTROL_LINK_DN_0 << 16)); - break; - - case 1: - ql_write_common_reg(qdev, - &port_regs->CommonRegs.ispControlStatus, - (ISP_CONTROL_LINK_DN_1) | - (ISP_CONTROL_LINK_DN_1 << 16)); - break; - - default: - return 1; - } - - return 0; -} - -/* - * Caller holds hw_lock. - */ -static int ql_this_adapter_controls_port(struct ql3_adapter *qdev) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - u32 bitToCheck = 0; - u32 temp; - - switch (qdev->mac_index) { - case 0: - bitToCheck = PORT_STATUS_F1_ENABLED; - break; - case 1: - bitToCheck = PORT_STATUS_F3_ENABLED; - break; - default: - break; - } - - temp = ql_read_page0_reg(qdev, &port_regs->portStatus); - if (temp & bitToCheck) { - netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, - "not link master\n"); - return 0; - } - - netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "link master\n"); - return 1; -} - -static void ql_phy_reset_ex(struct ql3_adapter *qdev) -{ - ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET, - PHYAddr[qdev->mac_index]); -} - -static void ql_phy_start_neg_ex(struct ql3_adapter *qdev) -{ - u16 reg; - u16 portConfiguration; - - if (qdev->phyType == PHY_AGERE_ET1011C) - ql_mii_write_reg(qdev, 0x13, 0x0000); - /* turn off external loopback */ - - if (qdev->mac_index == 0) - portConfiguration = - qdev->nvram_data.macCfg_port0.portConfiguration; - else - portConfiguration = - qdev->nvram_data.macCfg_port1.portConfiguration; - - /* Some HBA's in the field are set to 0 and they need to - be reinterpreted with a default value */ - if (portConfiguration == 0) - portConfiguration = PORT_CONFIG_DEFAULT; - - /* Set the 1000 advertisements */ - ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, ®, - PHYAddr[qdev->mac_index]); - reg &= ~PHY_GIG_ALL_PARAMS; - - if (portConfiguration & PORT_CONFIG_1000MB_SPEED) { - if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) - reg |= PHY_GIG_ADV_1000F; - else - reg |= PHY_GIG_ADV_1000H; - } - - ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg, - PHYAddr[qdev->mac_index]); - - /* Set the 10/100 & pause negotiation advertisements */ - ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, ®, - PHYAddr[qdev->mac_index]); - reg &= ~PHY_NEG_ALL_PARAMS; - - if (portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED) - reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE; - - if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) { - if (portConfiguration & PORT_CONFIG_100MB_SPEED) - reg |= PHY_NEG_ADV_100F; - - if (portConfiguration & PORT_CONFIG_10MB_SPEED) - reg |= PHY_NEG_ADV_10F; - } - - if (portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) { - if (portConfiguration & PORT_CONFIG_100MB_SPEED) - reg |= PHY_NEG_ADV_100H; - - if (portConfiguration & PORT_CONFIG_10MB_SPEED) - reg |= PHY_NEG_ADV_10H; - } - - if (portConfiguration & PORT_CONFIG_1000MB_SPEED) - reg |= 1; - - ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg, - PHYAddr[qdev->mac_index]); - - ql_mii_read_reg_ex(qdev, CONTROL_REG, ®, PHYAddr[qdev->mac_index]); - - ql_mii_write_reg_ex(qdev, CONTROL_REG, - reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG, - PHYAddr[qdev->mac_index]); -} - -static void ql_phy_init_ex(struct ql3_adapter *qdev) -{ - ql_phy_reset_ex(qdev); - PHY_Setup(qdev); - ql_phy_start_neg_ex(qdev); -} - -/* - * Caller holds hw_lock. - */ -static u32 ql_get_link_state(struct ql3_adapter *qdev) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - u32 bitToCheck = 0; - u32 temp, linkState; - - switch (qdev->mac_index) { - case 0: - bitToCheck = PORT_STATUS_UP0; - break; - case 1: - bitToCheck = PORT_STATUS_UP1; - break; - } - - temp = ql_read_page0_reg(qdev, &port_regs->portStatus); - if (temp & bitToCheck) - linkState = LS_UP; - else - linkState = LS_DOWN; - - return linkState; -} - -static int ql_port_start(struct ql3_adapter *qdev) -{ - if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, - (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * - 2) << 7)) { - netdev_err(qdev->ndev, "Could not get hw lock for GIO\n"); - return -1; - } - - if (ql_is_fiber(qdev)) { - ql_petbi_init(qdev); - } else { - /* Copper port */ - ql_phy_init_ex(qdev); - } - - ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); - return 0; -} - -static int ql_finish_auto_neg(struct ql3_adapter *qdev) -{ - - if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, - (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * - 2) << 7)) - return -1; - - if (!ql_auto_neg_error(qdev)) { - if (test_bit(QL_LINK_MASTER, &qdev->flags)) { - /* configure the MAC */ - netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, - "Configuring link\n"); - ql_mac_cfg_soft_reset(qdev, 1); - ql_mac_cfg_gig(qdev, - (ql_get_link_speed - (qdev) == - SPEED_1000)); - ql_mac_cfg_full_dup(qdev, - ql_is_link_full_dup - (qdev)); - ql_mac_cfg_pause(qdev, - ql_is_neg_pause - (qdev)); - ql_mac_cfg_soft_reset(qdev, 0); - - /* enable the MAC */ - netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, - "Enabling mac\n"); - ql_mac_enable(qdev, 1); - } - - qdev->port_link_state = LS_UP; - netif_start_queue(qdev->ndev); - netif_carrier_on(qdev->ndev); - netif_info(qdev, link, qdev->ndev, - "Link is up at %d Mbps, %s duplex\n", - ql_get_link_speed(qdev), - ql_is_link_full_dup(qdev) ? "full" : "half"); - - } else { /* Remote error detected */ - - if (test_bit(QL_LINK_MASTER, &qdev->flags)) { - netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, - "Remote error detected. Calling ql_port_start()\n"); - /* - * ql_port_start() is shared code and needs - * to lock the PHY on it's own. - */ - ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); - if (ql_port_start(qdev)) /* Restart port */ - return -1; - return 0; - } - } - ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); - return 0; -} - -static void ql_link_state_machine_work(struct work_struct *work) -{ - struct ql3_adapter *qdev = - container_of(work, struct ql3_adapter, link_state_work.work); - - u32 curr_link_state; - unsigned long hw_flags; - - spin_lock_irqsave(&qdev->hw_lock, hw_flags); - - curr_link_state = ql_get_link_state(qdev); - - if (test_bit(QL_RESET_ACTIVE, &qdev->flags)) { - netif_info(qdev, link, qdev->ndev, - "Reset in progress, skip processing link state\n"); - - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - - /* Restart timer on 2 second interval. */ - mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); - - return; - } - - switch (qdev->port_link_state) { - default: - if (test_bit(QL_LINK_MASTER, &qdev->flags)) - ql_port_start(qdev); - qdev->port_link_state = LS_DOWN; - /* Fall Through */ - - case LS_DOWN: - if (curr_link_state == LS_UP) { - netif_info(qdev, link, qdev->ndev, "Link is up\n"); - if (ql_is_auto_neg_complete(qdev)) - ql_finish_auto_neg(qdev); - - if (qdev->port_link_state == LS_UP) - ql_link_down_detect_clear(qdev); - - qdev->port_link_state = LS_UP; - } - break; - - case LS_UP: - /* - * See if the link is currently down or went down and came - * back up - */ - if (curr_link_state == LS_DOWN) { - netif_info(qdev, link, qdev->ndev, "Link is down\n"); - qdev->port_link_state = LS_DOWN; - } - if (ql_link_down_detect(qdev)) - qdev->port_link_state = LS_DOWN; - break; - } - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - - /* Restart timer on 2 second interval. */ - mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); -} - -/* - * Caller must take hw_lock and QL_PHY_GIO_SEM. - */ -static void ql_get_phy_owner(struct ql3_adapter *qdev) -{ - if (ql_this_adapter_controls_port(qdev)) - set_bit(QL_LINK_MASTER, &qdev->flags); - else - clear_bit(QL_LINK_MASTER, &qdev->flags); -} - -/* - * Caller must take hw_lock and QL_PHY_GIO_SEM. - */ -static void ql_init_scan_mode(struct ql3_adapter *qdev) -{ - ql_mii_enable_scan_mode(qdev); - - if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { - if (ql_this_adapter_controls_port(qdev)) - ql_petbi_init_ex(qdev); - } else { - if (ql_this_adapter_controls_port(qdev)) - ql_phy_init_ex(qdev); - } -} - -/* - * MII_Setup needs to be called before taking the PHY out of reset - * so that the management interface clock speed can be set properly. - * It would be better if we had a way to disable MDC until after the - * PHY is out of reset, but we don't have that capability. - */ -static int ql_mii_setup(struct ql3_adapter *qdev) -{ - u32 reg; - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - - if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, - (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * - 2) << 7)) - return -1; - - if (qdev->device_id == QL3032_DEVICE_ID) - ql_write_page0_reg(qdev, - &port_regs->macMIIMgmtControlReg, 0x0f00000); - - /* Divide 125MHz clock by 28 to meet PHY timing requirements */ - reg = MAC_MII_CONTROL_CLK_SEL_DIV28; - - ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, - reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16)); - - ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); - return 0; -} - -#define SUPPORTED_OPTICAL_MODES (SUPPORTED_1000baseT_Full | \ - SUPPORTED_FIBRE | \ - SUPPORTED_Autoneg) -#define SUPPORTED_TP_MODES (SUPPORTED_10baseT_Half | \ - SUPPORTED_10baseT_Full | \ - SUPPORTED_100baseT_Half | \ - SUPPORTED_100baseT_Full | \ - SUPPORTED_1000baseT_Half | \ - SUPPORTED_1000baseT_Full | \ - SUPPORTED_Autoneg | \ - SUPPORTED_TP) \ - -static u32 ql_supported_modes(struct ql3_adapter *qdev) -{ - if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) - return SUPPORTED_OPTICAL_MODES; - - return SUPPORTED_TP_MODES; -} - -static int ql_get_auto_cfg_status(struct ql3_adapter *qdev) -{ - int status; - unsigned long hw_flags; - spin_lock_irqsave(&qdev->hw_lock, hw_flags); - if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, - (QL_RESOURCE_BITS_BASE_CODE | - (qdev->mac_index) * 2) << 7)) { - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - return 0; - } - status = ql_is_auto_cfg(qdev); - ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - return status; -} - -static u32 ql_get_speed(struct ql3_adapter *qdev) -{ - u32 status; - unsigned long hw_flags; - spin_lock_irqsave(&qdev->hw_lock, hw_flags); - if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, - (QL_RESOURCE_BITS_BASE_CODE | - (qdev->mac_index) * 2) << 7)) { - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - return 0; - } - status = ql_get_link_speed(qdev); - ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - return status; -} - -static int ql_get_full_dup(struct ql3_adapter *qdev) -{ - int status; - unsigned long hw_flags; - spin_lock_irqsave(&qdev->hw_lock, hw_flags); - if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, - (QL_RESOURCE_BITS_BASE_CODE | - (qdev->mac_index) * 2) << 7)) { - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - return 0; - } - status = ql_is_link_full_dup(qdev); - ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - return status; -} - -static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) -{ - struct ql3_adapter *qdev = netdev_priv(ndev); - - ecmd->transceiver = XCVR_INTERNAL; - ecmd->supported = ql_supported_modes(qdev); - - if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { - ecmd->port = PORT_FIBRE; - } else { - ecmd->port = PORT_TP; - ecmd->phy_address = qdev->PHYAddr; - } - ecmd->advertising = ql_supported_modes(qdev); - ecmd->autoneg = ql_get_auto_cfg_status(qdev); - ethtool_cmd_speed_set(ecmd, ql_get_speed(qdev)); - ecmd->duplex = ql_get_full_dup(qdev); - return 0; -} - -static void ql_get_drvinfo(struct net_device *ndev, - struct ethtool_drvinfo *drvinfo) -{ - struct ql3_adapter *qdev = netdev_priv(ndev); - strncpy(drvinfo->driver, ql3xxx_driver_name, 32); - strncpy(drvinfo->version, ql3xxx_driver_version, 32); - strncpy(drvinfo->fw_version, "N/A", 32); - strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32); - drvinfo->regdump_len = 0; - drvinfo->eedump_len = 0; -} - -static u32 ql_get_msglevel(struct net_device *ndev) -{ - struct ql3_adapter *qdev = netdev_priv(ndev); - return qdev->msg_enable; -} - -static void ql_set_msglevel(struct net_device *ndev, u32 value) -{ - struct ql3_adapter *qdev = netdev_priv(ndev); - qdev->msg_enable = value; -} - -static void ql_get_pauseparam(struct net_device *ndev, - struct ethtool_pauseparam *pause) -{ - struct ql3_adapter *qdev = netdev_priv(ndev); - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - - u32 reg; - if (qdev->mac_index == 0) - reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg); - else - reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg); - - pause->autoneg = ql_get_auto_cfg_status(qdev); - pause->rx_pause = (reg & MAC_CONFIG_REG_RF) >> 2; - pause->tx_pause = (reg & MAC_CONFIG_REG_TF) >> 1; -} - -static const struct ethtool_ops ql3xxx_ethtool_ops = { - .get_settings = ql_get_settings, - .get_drvinfo = ql_get_drvinfo, - .get_link = ethtool_op_get_link, - .get_msglevel = ql_get_msglevel, - .set_msglevel = ql_set_msglevel, - .get_pauseparam = ql_get_pauseparam, -}; - -static int ql_populate_free_queue(struct ql3_adapter *qdev) -{ - struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; - dma_addr_t map; - int err; - - while (lrg_buf_cb) { - if (!lrg_buf_cb->skb) { - lrg_buf_cb->skb = - netdev_alloc_skb(qdev->ndev, - qdev->lrg_buffer_len); - if (unlikely(!lrg_buf_cb->skb)) { - netdev_printk(KERN_DEBUG, qdev->ndev, - "Failed netdev_alloc_skb()\n"); - break; - } else { - /* - * We save some space to copy the ethhdr from - * first buffer - */ - skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE); - map = pci_map_single(qdev->pdev, - lrg_buf_cb->skb->data, - qdev->lrg_buffer_len - - QL_HEADER_SPACE, - PCI_DMA_FROMDEVICE); - - err = pci_dma_mapping_error(qdev->pdev, map); - if (err) { - netdev_err(qdev->ndev, - "PCI mapping failed with error: %d\n", - err); - dev_kfree_skb(lrg_buf_cb->skb); - lrg_buf_cb->skb = NULL; - break; - } - - - lrg_buf_cb->buf_phy_addr_low = - cpu_to_le32(LS_64BITS(map)); - lrg_buf_cb->buf_phy_addr_high = - cpu_to_le32(MS_64BITS(map)); - dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); - dma_unmap_len_set(lrg_buf_cb, maplen, - qdev->lrg_buffer_len - - QL_HEADER_SPACE); - --qdev->lrg_buf_skb_check; - if (!qdev->lrg_buf_skb_check) - return 1; - } - } - lrg_buf_cb = lrg_buf_cb->next; - } - return 0; -} - -/* - * Caller holds hw_lock. - */ -static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - - if (qdev->small_buf_release_cnt >= 16) { - while (qdev->small_buf_release_cnt >= 16) { - qdev->small_buf_q_producer_index++; - - if (qdev->small_buf_q_producer_index == - NUM_SBUFQ_ENTRIES) - qdev->small_buf_q_producer_index = 0; - qdev->small_buf_release_cnt -= 8; - } - wmb(); - writel(qdev->small_buf_q_producer_index, - &port_regs->CommonRegs.rxSmallQProducerIndex); - } -} - -/* - * Caller holds hw_lock. - */ -static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev) -{ - struct bufq_addr_element *lrg_buf_q_ele; - int i; - struct ql_rcv_buf_cb *lrg_buf_cb; - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - - if ((qdev->lrg_buf_free_count >= 8) && - (qdev->lrg_buf_release_cnt >= 16)) { - - if (qdev->lrg_buf_skb_check) - if (!ql_populate_free_queue(qdev)) - return; - - lrg_buf_q_ele = qdev->lrg_buf_next_free; - - while ((qdev->lrg_buf_release_cnt >= 16) && - (qdev->lrg_buf_free_count >= 8)) { - - for (i = 0; i < 8; i++) { - lrg_buf_cb = - ql_get_from_lrg_buf_free_list(qdev); - lrg_buf_q_ele->addr_high = - lrg_buf_cb->buf_phy_addr_high; - lrg_buf_q_ele->addr_low = - lrg_buf_cb->buf_phy_addr_low; - lrg_buf_q_ele++; - - qdev->lrg_buf_release_cnt--; - } - - qdev->lrg_buf_q_producer_index++; - - if (qdev->lrg_buf_q_producer_index == - qdev->num_lbufq_entries) - qdev->lrg_buf_q_producer_index = 0; - - if (qdev->lrg_buf_q_producer_index == - (qdev->num_lbufq_entries - 1)) { - lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr; - } - } - wmb(); - qdev->lrg_buf_next_free = lrg_buf_q_ele; - writel(qdev->lrg_buf_q_producer_index, - &port_regs->CommonRegs.rxLargeQProducerIndex); - } -} - -static void ql_process_mac_tx_intr(struct ql3_adapter *qdev, - struct ob_mac_iocb_rsp *mac_rsp) -{ - struct ql_tx_buf_cb *tx_cb; - int i; - int retval = 0; - - if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) { - netdev_warn(qdev->ndev, - "Frame too short but it was padded and sent\n"); - } - - tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; - - /* Check the transmit response flags for any errors */ - if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) { - netdev_err(qdev->ndev, - "Frame too short to be legal, frame not sent\n"); - - qdev->ndev->stats.tx_errors++; - retval = -EIO; - goto frame_not_sent; - } - - if (tx_cb->seg_count == 0) { - netdev_err(qdev->ndev, "tx_cb->seg_count == 0: %d\n", - mac_rsp->transaction_id); - - qdev->ndev->stats.tx_errors++; - retval = -EIO; - goto invalid_seg_count; - } - - pci_unmap_single(qdev->pdev, - dma_unmap_addr(&tx_cb->map[0], mapaddr), - dma_unmap_len(&tx_cb->map[0], maplen), - PCI_DMA_TODEVICE); - tx_cb->seg_count--; - if (tx_cb->seg_count) { - for (i = 1; i < tx_cb->seg_count; i++) { - pci_unmap_page(qdev->pdev, - dma_unmap_addr(&tx_cb->map[i], - mapaddr), - dma_unmap_len(&tx_cb->map[i], maplen), - PCI_DMA_TODEVICE); - } - } - qdev->ndev->stats.tx_packets++; - qdev->ndev->stats.tx_bytes += tx_cb->skb->len; - -frame_not_sent: - dev_kfree_skb_irq(tx_cb->skb); - tx_cb->skb = NULL; - -invalid_seg_count: - atomic_inc(&qdev->tx_count); -} - -static void ql_get_sbuf(struct ql3_adapter *qdev) -{ - if (++qdev->small_buf_index == NUM_SMALL_BUFFERS) - qdev->small_buf_index = 0; - qdev->small_buf_release_cnt++; -} - -static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev) -{ - struct ql_rcv_buf_cb *lrg_buf_cb = NULL; - lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index]; - qdev->lrg_buf_release_cnt++; - if (++qdev->lrg_buf_index == qdev->num_large_buffers) - qdev->lrg_buf_index = 0; - return lrg_buf_cb; -} - -/* - * The difference between 3022 and 3032 for inbound completions: - * 3022 uses two buffers per completion. The first buffer contains - * (some) header info, the second the remainder of the headers plus - * the data. For this chip we reserve some space at the top of the - * receive buffer so that the header info in buffer one can be - * prepended to the buffer two. Buffer two is the sent up while - * buffer one is returned to the hardware to be reused. - * 3032 receives all of it's data and headers in one buffer for a - * simpler process. 3032 also supports checksum verification as - * can be seen in ql_process_macip_rx_intr(). - */ -static void ql_process_mac_rx_intr(struct ql3_adapter *qdev, - struct ib_mac_iocb_rsp *ib_mac_rsp_ptr) -{ - struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; - struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; - struct sk_buff *skb; - u16 length = le16_to_cpu(ib_mac_rsp_ptr->length); - - /* - * Get the inbound address list (small buffer). - */ - ql_get_sbuf(qdev); - - if (qdev->device_id == QL3022_DEVICE_ID) - lrg_buf_cb1 = ql_get_lbuf(qdev); - - /* start of second buffer */ - lrg_buf_cb2 = ql_get_lbuf(qdev); - skb = lrg_buf_cb2->skb; - - qdev->ndev->stats.rx_packets++; - qdev->ndev->stats.rx_bytes += length; - - skb_put(skb, length); - pci_unmap_single(qdev->pdev, - dma_unmap_addr(lrg_buf_cb2, mapaddr), - dma_unmap_len(lrg_buf_cb2, maplen), - PCI_DMA_FROMDEVICE); - prefetch(skb->data); - skb_checksum_none_assert(skb); - skb->protocol = eth_type_trans(skb, qdev->ndev); - - netif_receive_skb(skb); - lrg_buf_cb2->skb = NULL; - - if (qdev->device_id == QL3022_DEVICE_ID) - ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); - ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); -} - -static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, - struct ib_ip_iocb_rsp *ib_ip_rsp_ptr) -{ - struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; - struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; - struct sk_buff *skb1 = NULL, *skb2; - struct net_device *ndev = qdev->ndev; - u16 length = le16_to_cpu(ib_ip_rsp_ptr->length); - u16 size = 0; - - /* - * Get the inbound address list (small buffer). - */ - - ql_get_sbuf(qdev); - - if (qdev->device_id == QL3022_DEVICE_ID) { - /* start of first buffer on 3022 */ - lrg_buf_cb1 = ql_get_lbuf(qdev); - skb1 = lrg_buf_cb1->skb; - size = ETH_HLEN; - if (*((u16 *) skb1->data) != 0xFFFF) - size += VLAN_ETH_HLEN - ETH_HLEN; - } - - /* start of second buffer */ - lrg_buf_cb2 = ql_get_lbuf(qdev); - skb2 = lrg_buf_cb2->skb; - - skb_put(skb2, length); /* Just the second buffer length here. */ - pci_unmap_single(qdev->pdev, - dma_unmap_addr(lrg_buf_cb2, mapaddr), - dma_unmap_len(lrg_buf_cb2, maplen), - PCI_DMA_FROMDEVICE); - prefetch(skb2->data); - - skb_checksum_none_assert(skb2); - if (qdev->device_id == QL3022_DEVICE_ID) { - /* - * Copy the ethhdr from first buffer to second. This - * is necessary for 3022 IP completions. - */ - skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN, - skb_push(skb2, size), size); - } else { - u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum); - if (checksum & - (IB_IP_IOCB_RSP_3032_ICE | - IB_IP_IOCB_RSP_3032_CE)) { - netdev_err(ndev, - "%s: Bad checksum for this %s packet, checksum = %x\n", - __func__, - ((checksum & IB_IP_IOCB_RSP_3032_TCP) ? - "TCP" : "UDP"), checksum); - } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) || - (checksum & IB_IP_IOCB_RSP_3032_UDP && - !(checksum & IB_IP_IOCB_RSP_3032_NUC))) { - skb2->ip_summed = CHECKSUM_UNNECESSARY; - } - } - skb2->protocol = eth_type_trans(skb2, qdev->ndev); - - netif_receive_skb(skb2); - ndev->stats.rx_packets++; - ndev->stats.rx_bytes += length; - lrg_buf_cb2->skb = NULL; - - if (qdev->device_id == QL3022_DEVICE_ID) - ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); - ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); -} - -static int ql_tx_rx_clean(struct ql3_adapter *qdev, - int *tx_cleaned, int *rx_cleaned, int work_to_do) -{ - struct net_rsp_iocb *net_rsp; - struct net_device *ndev = qdev->ndev; - int work_done = 0; - - /* While there are entries in the completion queue. */ - while ((le32_to_cpu(*(qdev->prsp_producer_index)) != - qdev->rsp_consumer_index) && (work_done < work_to_do)) { - - net_rsp = qdev->rsp_current; - rmb(); - /* - * Fix 4032 chip's undocumented "feature" where bit-8 is set - * if the inbound completion is for a VLAN. - */ - if (qdev->device_id == QL3032_DEVICE_ID) - net_rsp->opcode &= 0x7f; - switch (net_rsp->opcode) { - - case OPCODE_OB_MAC_IOCB_FN0: - case OPCODE_OB_MAC_IOCB_FN2: - ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *) - net_rsp); - (*tx_cleaned)++; - break; - - case OPCODE_IB_MAC_IOCB: - case OPCODE_IB_3032_MAC_IOCB: - ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *) - net_rsp); - (*rx_cleaned)++; - break; - - case OPCODE_IB_IP_IOCB: - case OPCODE_IB_3032_IP_IOCB: - ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *) - net_rsp); - (*rx_cleaned)++; - break; - default: { - u32 *tmp = (u32 *)net_rsp; - netdev_err(ndev, - "Hit default case, not handled!\n" - " dropping the packet, opcode = %x\n" - "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", - net_rsp->opcode, - (unsigned long int)tmp[0], - (unsigned long int)tmp[1], - (unsigned long int)tmp[2], - (unsigned long int)tmp[3]); - } - } - - qdev->rsp_consumer_index++; - - if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) { - qdev->rsp_consumer_index = 0; - qdev->rsp_current = qdev->rsp_q_virt_addr; - } else { - qdev->rsp_current++; - } - - work_done = *tx_cleaned + *rx_cleaned; - } - - return work_done; -} - -static int ql_poll(struct napi_struct *napi, int budget) -{ - struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi); - int rx_cleaned = 0, tx_cleaned = 0; - unsigned long hw_flags; - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - - ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget); - - if (tx_cleaned + rx_cleaned != budget) { - spin_lock_irqsave(&qdev->hw_lock, hw_flags); - __napi_complete(napi); - ql_update_small_bufq_prod_index(qdev); - ql_update_lrg_bufq_prod_index(qdev); - writel(qdev->rsp_consumer_index, - &port_regs->CommonRegs.rspQConsumerIndex); - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - - ql_enable_interrupts(qdev); - } - return tx_cleaned + rx_cleaned; -} - -static irqreturn_t ql3xxx_isr(int irq, void *dev_id) -{ - - struct net_device *ndev = dev_id; - struct ql3_adapter *qdev = netdev_priv(ndev); - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - u32 value; - int handled = 1; - u32 var; - - value = ql_read_common_reg_l(qdev, - &port_regs->CommonRegs.ispControlStatus); - - if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) { - spin_lock(&qdev->adapter_lock); - netif_stop_queue(qdev->ndev); - netif_carrier_off(qdev->ndev); - ql_disable_interrupts(qdev); - qdev->port_link_state = LS_DOWN; - set_bit(QL_RESET_ACTIVE, &qdev->flags) ; - - if (value & ISP_CONTROL_FE) { - /* - * Chip Fatal Error. - */ - var = - ql_read_page0_reg_l(qdev, - &port_regs->PortFatalErrStatus); - netdev_warn(ndev, - "Resetting chip. PortFatalErrStatus register = 0x%x\n", - var); - set_bit(QL_RESET_START, &qdev->flags) ; - } else { - /* - * Soft Reset Requested. - */ - set_bit(QL_RESET_PER_SCSI, &qdev->flags) ; - netdev_err(ndev, - "Another function issued a reset to the chip. ISR value = %x\n", - value); - } - queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0); - spin_unlock(&qdev->adapter_lock); - } else if (value & ISP_IMR_DISABLE_CMPL_INT) { - ql_disable_interrupts(qdev); - if (likely(napi_schedule_prep(&qdev->napi))) - __napi_schedule(&qdev->napi); - } else - return IRQ_NONE; - - return IRQ_RETVAL(handled); -} - -/* - * Get the total number of segments needed for the given number of fragments. - * This is necessary because outbound address lists (OAL) will be used when - * more than two frags are given. Each address list has 5 addr/len pairs. - * The 5th pair in each OAL is used to point to the next OAL if more frags - * are coming. That is why the frags:segment count ratio is not linear. - */ -static int ql_get_seg_count(struct ql3_adapter *qdev, unsigned short frags) -{ - if (qdev->device_id == QL3022_DEVICE_ID) - return 1; - - if (frags <= 2) - return frags + 1; - else if (frags <= 6) - return frags + 2; - else if (frags <= 10) - return frags + 3; - else if (frags <= 14) - return frags + 4; - else if (frags <= 18) - return frags + 5; - return -1; -} - -static void ql_hw_csum_setup(const struct sk_buff *skb, - struct ob_mac_iocb_req *mac_iocb_ptr) -{ - const struct iphdr *ip = ip_hdr(skb); - - mac_iocb_ptr->ip_hdr_off = skb_network_offset(skb); - mac_iocb_ptr->ip_hdr_len = ip->ihl; - - if (ip->protocol == IPPROTO_TCP) { - mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC | - OB_3032MAC_IOCB_REQ_IC; - } else { - mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC | - OB_3032MAC_IOCB_REQ_IC; - } - -} - -/* - * Map the buffers for this transmit. - * This will return NETDEV_TX_BUSY or NETDEV_TX_OK based on success. - */ -static int ql_send_map(struct ql3_adapter *qdev, - struct ob_mac_iocb_req *mac_iocb_ptr, - struct ql_tx_buf_cb *tx_cb, - struct sk_buff *skb) -{ - struct oal *oal; - struct oal_entry *oal_entry; - int len = skb_headlen(skb); - dma_addr_t map; - int err; - int completed_segs, i; - int seg_cnt, seg = 0; - int frag_cnt = (int)skb_shinfo(skb)->nr_frags; - - seg_cnt = tx_cb->seg_count; - /* - * Map the skb buffer first. - */ - map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); - - err = pci_dma_mapping_error(qdev->pdev, map); - if (err) { - netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n", - err); - - return NETDEV_TX_BUSY; - } - - oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; - oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); - oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); - oal_entry->len = cpu_to_le32(len); - dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); - dma_unmap_len_set(&tx_cb->map[seg], maplen, len); - seg++; - - if (seg_cnt == 1) { - /* Terminate the last segment. */ - oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); - return NETDEV_TX_OK; - } - oal = tx_cb->oal; - for (completed_segs = 0; - completed_segs < frag_cnt; - completed_segs++, seg++) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs]; - oal_entry++; - /* - * Check for continuation requirements. - * It's strange but necessary. - * Continuation entry points to outbound address list. - */ - if ((seg == 2 && seg_cnt > 3) || - (seg == 7 && seg_cnt > 8) || - (seg == 12 && seg_cnt > 13) || - (seg == 17 && seg_cnt > 18)) { - map = pci_map_single(qdev->pdev, oal, - sizeof(struct oal), - PCI_DMA_TODEVICE); - - err = pci_dma_mapping_error(qdev->pdev, map); - if (err) { - netdev_err(qdev->ndev, - "PCI mapping outbound address list with error: %d\n", - err); - goto map_error; - } - - oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); - oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); - oal_entry->len = cpu_to_le32(sizeof(struct oal) | - OAL_CONT_ENTRY); - dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); - dma_unmap_len_set(&tx_cb->map[seg], maplen, - sizeof(struct oal)); - oal_entry = (struct oal_entry *)oal; - oal++; - seg++; - } - - map = pci_map_page(qdev->pdev, frag->page, - frag->page_offset, frag->size, - PCI_DMA_TODEVICE); - - err = pci_dma_mapping_error(qdev->pdev, map); - if (err) { - netdev_err(qdev->ndev, - "PCI mapping frags failed with error: %d\n", - err); - goto map_error; - } - - oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); - oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); - oal_entry->len = cpu_to_le32(frag->size); - dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); - dma_unmap_len_set(&tx_cb->map[seg], maplen, frag->size); - } - /* Terminate the last segment. */ - oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); - return NETDEV_TX_OK; - -map_error: - /* A PCI mapping failed and now we will need to back out - * We need to traverse through the oal's and associated pages which - * have been mapped and now we must unmap them to clean up properly - */ - - seg = 1; - oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; - oal = tx_cb->oal; - for (i = 0; i < completed_segs; i++, seg++) { - oal_entry++; - - /* - * Check for continuation requirements. - * It's strange but necessary. - */ - - if ((seg == 2 && seg_cnt > 3) || - (seg == 7 && seg_cnt > 8) || - (seg == 12 && seg_cnt > 13) || - (seg == 17 && seg_cnt > 18)) { - pci_unmap_single(qdev->pdev, - dma_unmap_addr(&tx_cb->map[seg], mapaddr), - dma_unmap_len(&tx_cb->map[seg], maplen), - PCI_DMA_TODEVICE); - oal++; - seg++; - } - - pci_unmap_page(qdev->pdev, - dma_unmap_addr(&tx_cb->map[seg], mapaddr), - dma_unmap_len(&tx_cb->map[seg], maplen), - PCI_DMA_TODEVICE); - } - - pci_unmap_single(qdev->pdev, - dma_unmap_addr(&tx_cb->map[0], mapaddr), - dma_unmap_addr(&tx_cb->map[0], maplen), - PCI_DMA_TODEVICE); - - return NETDEV_TX_BUSY; - -} - -/* - * The difference between 3022 and 3032 sends: - * 3022 only supports a simple single segment transmission. - * 3032 supports checksumming and scatter/gather lists (fragments). - * The 3032 supports sglists by using the 3 addr/len pairs (ALP) - * in the IOCB plus a chain of outbound address lists (OAL) that - * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th) - * will be used to point to an OAL when more ALP entries are required. - * The IOCB is always the top of the chain followed by one or more - * OALs (when necessary). - */ -static netdev_tx_t ql3xxx_send(struct sk_buff *skb, - struct net_device *ndev) -{ - struct ql3_adapter *qdev = netdev_priv(ndev); - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - struct ql_tx_buf_cb *tx_cb; - u32 tot_len = skb->len; - struct ob_mac_iocb_req *mac_iocb_ptr; - - if (unlikely(atomic_read(&qdev->tx_count) < 2)) - return NETDEV_TX_BUSY; - - tx_cb = &qdev->tx_buf[qdev->req_producer_index]; - tx_cb->seg_count = ql_get_seg_count(qdev, - skb_shinfo(skb)->nr_frags); - if (tx_cb->seg_count == -1) { - netdev_err(ndev, "%s: invalid segment count!\n", __func__); - return NETDEV_TX_OK; - } - - mac_iocb_ptr = tx_cb->queue_entry; - memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req)); - mac_iocb_ptr->opcode = qdev->mac_ob_opcode; - mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X; - mac_iocb_ptr->flags |= qdev->mb_bit_mask; - mac_iocb_ptr->transaction_id = qdev->req_producer_index; - mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len); - tx_cb->skb = skb; - if (qdev->device_id == QL3032_DEVICE_ID && - skb->ip_summed == CHECKSUM_PARTIAL) - ql_hw_csum_setup(skb, mac_iocb_ptr); - - if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) != NETDEV_TX_OK) { - netdev_err(ndev, "%s: Could not map the segments!\n", __func__); - return NETDEV_TX_BUSY; - } - - wmb(); - qdev->req_producer_index++; - if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES) - qdev->req_producer_index = 0; - wmb(); - ql_write_common_reg_l(qdev, - &port_regs->CommonRegs.reqQProducerIndex, - qdev->req_producer_index); - - netif_printk(qdev, tx_queued, KERN_DEBUG, ndev, - "tx queued, slot %d, len %d\n", - qdev->req_producer_index, skb->len); - - atomic_dec(&qdev->tx_count); - return NETDEV_TX_OK; -} - -static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev) -{ - qdev->req_q_size = - (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req)); - - qdev->req_q_virt_addr = - pci_alloc_consistent(qdev->pdev, - (size_t) qdev->req_q_size, - &qdev->req_q_phy_addr); - - if ((qdev->req_q_virt_addr == NULL) || - LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) { - netdev_err(qdev->ndev, "reqQ failed\n"); - return -ENOMEM; - } - - qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb); - - qdev->rsp_q_virt_addr = - pci_alloc_consistent(qdev->pdev, - (size_t) qdev->rsp_q_size, - &qdev->rsp_q_phy_addr); - - if ((qdev->rsp_q_virt_addr == NULL) || - LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) { - netdev_err(qdev->ndev, "rspQ allocation failed\n"); - pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size, - qdev->req_q_virt_addr, - qdev->req_q_phy_addr); - return -ENOMEM; - } - - set_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags); - - return 0; -} - -static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev) -{ - if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags)) { - netdev_info(qdev->ndev, "Already done\n"); - return; - } - - pci_free_consistent(qdev->pdev, - qdev->req_q_size, - qdev->req_q_virt_addr, qdev->req_q_phy_addr); - - qdev->req_q_virt_addr = NULL; - - pci_free_consistent(qdev->pdev, - qdev->rsp_q_size, - qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr); - - qdev->rsp_q_virt_addr = NULL; - - clear_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags); -} - -static int ql_alloc_buffer_queues(struct ql3_adapter *qdev) -{ - /* Create Large Buffer Queue */ - qdev->lrg_buf_q_size = - qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry); - if (qdev->lrg_buf_q_size < PAGE_SIZE) - qdev->lrg_buf_q_alloc_size = PAGE_SIZE; - else - qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2; - - qdev->lrg_buf = - kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb), - GFP_KERNEL); - if (qdev->lrg_buf == NULL) { - netdev_err(qdev->ndev, "qdev->lrg_buf alloc failed\n"); - return -ENOMEM; - } - - qdev->lrg_buf_q_alloc_virt_addr = - pci_alloc_consistent(qdev->pdev, - qdev->lrg_buf_q_alloc_size, - &qdev->lrg_buf_q_alloc_phy_addr); - - if (qdev->lrg_buf_q_alloc_virt_addr == NULL) { - netdev_err(qdev->ndev, "lBufQ failed\n"); - return -ENOMEM; - } - qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr; - qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr; - - /* Create Small Buffer Queue */ - qdev->small_buf_q_size = - NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry); - if (qdev->small_buf_q_size < PAGE_SIZE) - qdev->small_buf_q_alloc_size = PAGE_SIZE; - else - qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2; - - qdev->small_buf_q_alloc_virt_addr = - pci_alloc_consistent(qdev->pdev, - qdev->small_buf_q_alloc_size, - &qdev->small_buf_q_alloc_phy_addr); - - if (qdev->small_buf_q_alloc_virt_addr == NULL) { - netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n"); - pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size, - qdev->lrg_buf_q_alloc_virt_addr, - qdev->lrg_buf_q_alloc_phy_addr); - return -ENOMEM; - } - - qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr; - qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr; - set_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags); - return 0; -} - -static void ql_free_buffer_queues(struct ql3_adapter *qdev) -{ - if (!test_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags)) { - netdev_info(qdev->ndev, "Already done\n"); - return; - } - kfree(qdev->lrg_buf); - pci_free_consistent(qdev->pdev, - qdev->lrg_buf_q_alloc_size, - qdev->lrg_buf_q_alloc_virt_addr, - qdev->lrg_buf_q_alloc_phy_addr); - - qdev->lrg_buf_q_virt_addr = NULL; - - pci_free_consistent(qdev->pdev, - qdev->small_buf_q_alloc_size, - qdev->small_buf_q_alloc_virt_addr, - qdev->small_buf_q_alloc_phy_addr); - - qdev->small_buf_q_virt_addr = NULL; - - clear_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags); -} - -static int ql_alloc_small_buffers(struct ql3_adapter *qdev) -{ - int i; - struct bufq_addr_element *small_buf_q_entry; - - /* Currently we allocate on one of memory and use it for smallbuffers */ - qdev->small_buf_total_size = - (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES * - QL_SMALL_BUFFER_SIZE); - - qdev->small_buf_virt_addr = - pci_alloc_consistent(qdev->pdev, - qdev->small_buf_total_size, - &qdev->small_buf_phy_addr); - - if (qdev->small_buf_virt_addr == NULL) { - netdev_err(qdev->ndev, "Failed to get small buffer memory\n"); - return -ENOMEM; - } - - qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr); - qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr); - - small_buf_q_entry = qdev->small_buf_q_virt_addr; - - /* Initialize the small buffer queue. */ - for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) { - small_buf_q_entry->addr_high = - cpu_to_le32(qdev->small_buf_phy_addr_high); - small_buf_q_entry->addr_low = - cpu_to_le32(qdev->small_buf_phy_addr_low + - (i * QL_SMALL_BUFFER_SIZE)); - small_buf_q_entry++; - } - qdev->small_buf_index = 0; - set_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags); - return 0; -} - -static void ql_free_small_buffers(struct ql3_adapter *qdev) -{ - if (!test_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags)) { - netdev_info(qdev->ndev, "Already done\n"); - return; - } - if (qdev->small_buf_virt_addr != NULL) { - pci_free_consistent(qdev->pdev, - qdev->small_buf_total_size, - qdev->small_buf_virt_addr, - qdev->small_buf_phy_addr); - - qdev->small_buf_virt_addr = NULL; - } -} - -static void ql_free_large_buffers(struct ql3_adapter *qdev) -{ - int i = 0; - struct ql_rcv_buf_cb *lrg_buf_cb; - - for (i = 0; i < qdev->num_large_buffers; i++) { - lrg_buf_cb = &qdev->lrg_buf[i]; - if (lrg_buf_cb->skb) { - dev_kfree_skb(lrg_buf_cb->skb); - pci_unmap_single(qdev->pdev, - dma_unmap_addr(lrg_buf_cb, mapaddr), - dma_unmap_len(lrg_buf_cb, maplen), - PCI_DMA_FROMDEVICE); - memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); - } else { - break; - } - } -} - -static void ql_init_large_buffers(struct ql3_adapter *qdev) -{ - int i; - struct ql_rcv_buf_cb *lrg_buf_cb; - struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr; - - for (i = 0; i < qdev->num_large_buffers; i++) { - lrg_buf_cb = &qdev->lrg_buf[i]; - buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high; - buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low; - buf_addr_ele++; - } - qdev->lrg_buf_index = 0; - qdev->lrg_buf_skb_check = 0; -} - -static int ql_alloc_large_buffers(struct ql3_adapter *qdev) -{ - int i; - struct ql_rcv_buf_cb *lrg_buf_cb; - struct sk_buff *skb; - dma_addr_t map; - int err; - - for (i = 0; i < qdev->num_large_buffers; i++) { - skb = netdev_alloc_skb(qdev->ndev, - qdev->lrg_buffer_len); - if (unlikely(!skb)) { - /* Better luck next round */ - netdev_err(qdev->ndev, - "large buff alloc failed for %d bytes at index %d\n", - qdev->lrg_buffer_len * 2, i); - ql_free_large_buffers(qdev); - return -ENOMEM; - } else { - - lrg_buf_cb = &qdev->lrg_buf[i]; - memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); - lrg_buf_cb->index = i; - lrg_buf_cb->skb = skb; - /* - * We save some space to copy the ethhdr from first - * buffer - */ - skb_reserve(skb, QL_HEADER_SPACE); - map = pci_map_single(qdev->pdev, - skb->data, - qdev->lrg_buffer_len - - QL_HEADER_SPACE, - PCI_DMA_FROMDEVICE); - - err = pci_dma_mapping_error(qdev->pdev, map); - if (err) { - netdev_err(qdev->ndev, - "PCI mapping failed with error: %d\n", - err); - ql_free_large_buffers(qdev); - return -ENOMEM; - } - - dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); - dma_unmap_len_set(lrg_buf_cb, maplen, - qdev->lrg_buffer_len - - QL_HEADER_SPACE); - lrg_buf_cb->buf_phy_addr_low = - cpu_to_le32(LS_64BITS(map)); - lrg_buf_cb->buf_phy_addr_high = - cpu_to_le32(MS_64BITS(map)); - } - } - return 0; -} - -static void ql_free_send_free_list(struct ql3_adapter *qdev) -{ - struct ql_tx_buf_cb *tx_cb; - int i; - - tx_cb = &qdev->tx_buf[0]; - for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { - kfree(tx_cb->oal); - tx_cb->oal = NULL; - tx_cb++; - } -} - -static int ql_create_send_free_list(struct ql3_adapter *qdev) -{ - struct ql_tx_buf_cb *tx_cb; - int i; - struct ob_mac_iocb_req *req_q_curr = qdev->req_q_virt_addr; - - /* Create free list of transmit buffers */ - for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { - - tx_cb = &qdev->tx_buf[i]; - tx_cb->skb = NULL; - tx_cb->queue_entry = req_q_curr; - req_q_curr++; - tx_cb->oal = kmalloc(512, GFP_KERNEL); - if (tx_cb->oal == NULL) - return -1; - } - return 0; -} - -static int ql_alloc_mem_resources(struct ql3_adapter *qdev) -{ - if (qdev->ndev->mtu == NORMAL_MTU_SIZE) { - qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES; - qdev->lrg_buffer_len = NORMAL_MTU_SIZE; - } else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) { - /* - * Bigger buffers, so less of them. - */ - qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES; - qdev->lrg_buffer_len = JUMBO_MTU_SIZE; - } else { - netdev_err(qdev->ndev, "Invalid mtu size: %d. Only %d and %d are accepted.\n", - qdev->ndev->mtu, NORMAL_MTU_SIZE, JUMBO_MTU_SIZE); - return -ENOMEM; - } - qdev->num_large_buffers = - qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY; - qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE; - qdev->max_frame_size = - (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE; - - /* - * First allocate a page of shared memory and use it for shadow - * locations of Network Request Queue Consumer Address Register and - * Network Completion Queue Producer Index Register - */ - qdev->shadow_reg_virt_addr = - pci_alloc_consistent(qdev->pdev, - PAGE_SIZE, &qdev->shadow_reg_phy_addr); - - if (qdev->shadow_reg_virt_addr != NULL) { - qdev->preq_consumer_index = qdev->shadow_reg_virt_addr; - qdev->req_consumer_index_phy_addr_high = - MS_64BITS(qdev->shadow_reg_phy_addr); - qdev->req_consumer_index_phy_addr_low = - LS_64BITS(qdev->shadow_reg_phy_addr); - - qdev->prsp_producer_index = - (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8); - qdev->rsp_producer_index_phy_addr_high = - qdev->req_consumer_index_phy_addr_high; - qdev->rsp_producer_index_phy_addr_low = - qdev->req_consumer_index_phy_addr_low + 8; - } else { - netdev_err(qdev->ndev, "shadowReg Alloc failed\n"); - return -ENOMEM; - } - - if (ql_alloc_net_req_rsp_queues(qdev) != 0) { - netdev_err(qdev->ndev, "ql_alloc_net_req_rsp_queues failed\n"); - goto err_req_rsp; - } - - if (ql_alloc_buffer_queues(qdev) != 0) { - netdev_err(qdev->ndev, "ql_alloc_buffer_queues failed\n"); - goto err_buffer_queues; - } - - if (ql_alloc_small_buffers(qdev) != 0) { - netdev_err(qdev->ndev, "ql_alloc_small_buffers failed\n"); - goto err_small_buffers; - } - - if (ql_alloc_large_buffers(qdev) != 0) { - netdev_err(qdev->ndev, "ql_alloc_large_buffers failed\n"); - goto err_small_buffers; - } - - /* Initialize the large buffer queue. */ - ql_init_large_buffers(qdev); - if (ql_create_send_free_list(qdev)) - goto err_free_list; - - qdev->rsp_current = qdev->rsp_q_virt_addr; - - return 0; -err_free_list: - ql_free_send_free_list(qdev); -err_small_buffers: - ql_free_buffer_queues(qdev); -err_buffer_queues: - ql_free_net_req_rsp_queues(qdev); -err_req_rsp: - pci_free_consistent(qdev->pdev, - PAGE_SIZE, - qdev->shadow_reg_virt_addr, - qdev->shadow_reg_phy_addr); - - return -ENOMEM; -} - -static void ql_free_mem_resources(struct ql3_adapter *qdev) -{ - ql_free_send_free_list(qdev); - ql_free_large_buffers(qdev); - ql_free_small_buffers(qdev); - ql_free_buffer_queues(qdev); - ql_free_net_req_rsp_queues(qdev); - if (qdev->shadow_reg_virt_addr != NULL) { - pci_free_consistent(qdev->pdev, - PAGE_SIZE, - qdev->shadow_reg_virt_addr, - qdev->shadow_reg_phy_addr); - qdev->shadow_reg_virt_addr = NULL; - } -} - -static int ql_init_misc_registers(struct ql3_adapter *qdev) -{ - struct ql3xxx_local_ram_registers __iomem *local_ram = - (void __iomem *)qdev->mem_map_registers; - - if (ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK, - (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * - 2) << 4)) - return -1; - - ql_write_page2_reg(qdev, - &local_ram->bufletSize, qdev->nvram_data.bufletSize); - - ql_write_page2_reg(qdev, - &local_ram->maxBufletCount, - qdev->nvram_data.bufletCount); - - ql_write_page2_reg(qdev, - &local_ram->freeBufletThresholdLow, - (qdev->nvram_data.tcpWindowThreshold25 << 16) | - (qdev->nvram_data.tcpWindowThreshold0)); - - ql_write_page2_reg(qdev, - &local_ram->freeBufletThresholdHigh, - qdev->nvram_data.tcpWindowThreshold50); - - ql_write_page2_reg(qdev, - &local_ram->ipHashTableBase, - (qdev->nvram_data.ipHashTableBaseHi << 16) | - qdev->nvram_data.ipHashTableBaseLo); - ql_write_page2_reg(qdev, - &local_ram->ipHashTableCount, - qdev->nvram_data.ipHashTableSize); - ql_write_page2_reg(qdev, - &local_ram->tcpHashTableBase, - (qdev->nvram_data.tcpHashTableBaseHi << 16) | - qdev->nvram_data.tcpHashTableBaseLo); - ql_write_page2_reg(qdev, - &local_ram->tcpHashTableCount, - qdev->nvram_data.tcpHashTableSize); - ql_write_page2_reg(qdev, - &local_ram->ncbBase, - (qdev->nvram_data.ncbTableBaseHi << 16) | - qdev->nvram_data.ncbTableBaseLo); - ql_write_page2_reg(qdev, - &local_ram->maxNcbCount, - qdev->nvram_data.ncbTableSize); - ql_write_page2_reg(qdev, - &local_ram->drbBase, - (qdev->nvram_data.drbTableBaseHi << 16) | - qdev->nvram_data.drbTableBaseLo); - ql_write_page2_reg(qdev, - &local_ram->maxDrbCount, - qdev->nvram_data.drbTableSize); - ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK); - return 0; -} - -static int ql_adapter_initialize(struct ql3_adapter *qdev) -{ - u32 value; - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; - struct ql3xxx_host_memory_registers __iomem *hmem_regs = - (void __iomem *)port_regs; - u32 delay = 10; - int status = 0; - unsigned long hw_flags = 0; - - if (ql_mii_setup(qdev)) - return -1; - - /* Bring out PHY out of reset */ - ql_write_common_reg(qdev, spir, - (ISP_SERIAL_PORT_IF_WE | - (ISP_SERIAL_PORT_IF_WE << 16))); - /* Give the PHY time to come out of reset. */ - mdelay(100); - qdev->port_link_state = LS_DOWN; - netif_carrier_off(qdev->ndev); - - /* V2 chip fix for ARS-39168. */ - ql_write_common_reg(qdev, spir, - (ISP_SERIAL_PORT_IF_SDE | - (ISP_SERIAL_PORT_IF_SDE << 16))); - - /* Request Queue Registers */ - *((u32 *)(qdev->preq_consumer_index)) = 0; - atomic_set(&qdev->tx_count, NUM_REQ_Q_ENTRIES); - qdev->req_producer_index = 0; - - ql_write_page1_reg(qdev, - &hmem_regs->reqConsumerIndexAddrHigh, - qdev->req_consumer_index_phy_addr_high); - ql_write_page1_reg(qdev, - &hmem_regs->reqConsumerIndexAddrLow, - qdev->req_consumer_index_phy_addr_low); - - ql_write_page1_reg(qdev, - &hmem_regs->reqBaseAddrHigh, - MS_64BITS(qdev->req_q_phy_addr)); - ql_write_page1_reg(qdev, - &hmem_regs->reqBaseAddrLow, - LS_64BITS(qdev->req_q_phy_addr)); - ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES); - - /* Response Queue Registers */ - *((__le16 *) (qdev->prsp_producer_index)) = 0; - qdev->rsp_consumer_index = 0; - qdev->rsp_current = qdev->rsp_q_virt_addr; - - ql_write_page1_reg(qdev, - &hmem_regs->rspProducerIndexAddrHigh, - qdev->rsp_producer_index_phy_addr_high); - - ql_write_page1_reg(qdev, - &hmem_regs->rspProducerIndexAddrLow, - qdev->rsp_producer_index_phy_addr_low); - - ql_write_page1_reg(qdev, - &hmem_regs->rspBaseAddrHigh, - MS_64BITS(qdev->rsp_q_phy_addr)); - - ql_write_page1_reg(qdev, - &hmem_regs->rspBaseAddrLow, - LS_64BITS(qdev->rsp_q_phy_addr)); - - ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES); - - /* Large Buffer Queue */ - ql_write_page1_reg(qdev, - &hmem_regs->rxLargeQBaseAddrHigh, - MS_64BITS(qdev->lrg_buf_q_phy_addr)); - - ql_write_page1_reg(qdev, - &hmem_regs->rxLargeQBaseAddrLow, - LS_64BITS(qdev->lrg_buf_q_phy_addr)); - - ql_write_page1_reg(qdev, - &hmem_regs->rxLargeQLength, - qdev->num_lbufq_entries); - - ql_write_page1_reg(qdev, - &hmem_regs->rxLargeBufferLength, - qdev->lrg_buffer_len); - - /* Small Buffer Queue */ - ql_write_page1_reg(qdev, - &hmem_regs->rxSmallQBaseAddrHigh, - MS_64BITS(qdev->small_buf_q_phy_addr)); - - ql_write_page1_reg(qdev, - &hmem_regs->rxSmallQBaseAddrLow, - LS_64BITS(qdev->small_buf_q_phy_addr)); - - ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES); - ql_write_page1_reg(qdev, - &hmem_regs->rxSmallBufferLength, - QL_SMALL_BUFFER_SIZE); - - qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1; - qdev->small_buf_release_cnt = 8; - qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1; - qdev->lrg_buf_release_cnt = 8; - qdev->lrg_buf_next_free = qdev->lrg_buf_q_virt_addr; - qdev->small_buf_index = 0; - qdev->lrg_buf_index = 0; - qdev->lrg_buf_free_count = 0; - qdev->lrg_buf_free_head = NULL; - qdev->lrg_buf_free_tail = NULL; - - ql_write_common_reg(qdev, - &port_regs->CommonRegs. - rxSmallQProducerIndex, - qdev->small_buf_q_producer_index); - ql_write_common_reg(qdev, - &port_regs->CommonRegs. - rxLargeQProducerIndex, - qdev->lrg_buf_q_producer_index); - - /* - * Find out if the chip has already been initialized. If it has, then - * we skip some of the initialization. - */ - clear_bit(QL_LINK_MASTER, &qdev->flags); - value = ql_read_page0_reg(qdev, &port_regs->portStatus); - if ((value & PORT_STATUS_IC) == 0) { - - /* Chip has not been configured yet, so let it rip. */ - if (ql_init_misc_registers(qdev)) { - status = -1; - goto out; - } - - value = qdev->nvram_data.tcpMaxWindowSize; - ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value); - - value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig; - - if (ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK, - (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) - * 2) << 13)) { - status = -1; - goto out; - } - ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value); - ql_write_page0_reg(qdev, &port_regs->InternalChipConfig, - (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) << - 16) | (INTERNAL_CHIP_SD | - INTERNAL_CHIP_WE))); - ql_sem_unlock(qdev, QL_FLASH_SEM_MASK); - } - - if (qdev->mac_index) - ql_write_page0_reg(qdev, - &port_regs->mac1MaxFrameLengthReg, - qdev->max_frame_size); - else - ql_write_page0_reg(qdev, - &port_regs->mac0MaxFrameLengthReg, - qdev->max_frame_size); - - if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, - (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * - 2) << 7)) { - status = -1; - goto out; - } - - PHY_Setup(qdev); - ql_init_scan_mode(qdev); - ql_get_phy_owner(qdev); - - /* Load the MAC Configuration */ - - /* Program lower 32 bits of the MAC address */ - ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, - (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16)); - ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, - ((qdev->ndev->dev_addr[2] << 24) - | (qdev->ndev->dev_addr[3] << 16) - | (qdev->ndev->dev_addr[4] << 8) - | qdev->ndev->dev_addr[5])); - - /* Program top 16 bits of the MAC address */ - ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, - ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1)); - ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, - ((qdev->ndev->dev_addr[0] << 8) - | qdev->ndev->dev_addr[1])); - - /* Enable Primary MAC */ - ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, - ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) | - MAC_ADDR_INDIRECT_PTR_REG_PE)); - - /* Clear Primary and Secondary IP addresses */ - ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg, - ((IP_ADDR_INDEX_REG_MASK << 16) | - (qdev->mac_index << 2))); - ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0); - - ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg, - ((IP_ADDR_INDEX_REG_MASK << 16) | - ((qdev->mac_index << 2) + 1))); - ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0); - - ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); - - /* Indicate Configuration Complete */ - ql_write_page0_reg(qdev, - &port_regs->portControl, - ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC)); - - do { - value = ql_read_page0_reg(qdev, &port_regs->portStatus); - if (value & PORT_STATUS_IC) - break; - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - msleep(500); - spin_lock_irqsave(&qdev->hw_lock, hw_flags); - } while (--delay); - - if (delay == 0) { - netdev_err(qdev->ndev, "Hw Initialization timeout\n"); - status = -1; - goto out; - } - - /* Enable Ethernet Function */ - if (qdev->device_id == QL3032_DEVICE_ID) { - value = - (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE | - QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4 | - QL3032_PORT_CONTROL_ET); - ql_write_page0_reg(qdev, &port_regs->functionControl, - ((value << 16) | value)); - } else { - value = - (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI | - PORT_CONTROL_HH); - ql_write_page0_reg(qdev, &port_regs->portControl, - ((value << 16) | value)); - } - - -out: - return status; -} - -/* - * Caller holds hw_lock. - */ -static int ql_adapter_reset(struct ql3_adapter *qdev) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - int status = 0; - u16 value; - int max_wait_time; - - set_bit(QL_RESET_ACTIVE, &qdev->flags); - clear_bit(QL_RESET_DONE, &qdev->flags); - - /* - * Issue soft reset to chip. - */ - netdev_printk(KERN_DEBUG, qdev->ndev, "Issue soft reset to chip\n"); - ql_write_common_reg(qdev, - &port_regs->CommonRegs.ispControlStatus, - ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR)); - - /* Wait 3 seconds for reset to complete. */ - netdev_printk(KERN_DEBUG, qdev->ndev, - "Wait 10 milliseconds for reset to complete\n"); - - /* Wait until the firmware tells us the Soft Reset is done */ - max_wait_time = 5; - do { - value = - ql_read_common_reg(qdev, - &port_regs->CommonRegs.ispControlStatus); - if ((value & ISP_CONTROL_SR) == 0) - break; - - ssleep(1); - } while ((--max_wait_time)); - - /* - * Also, make sure that the Network Reset Interrupt bit has been - * cleared after the soft reset has taken place. - */ - value = - ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); - if (value & ISP_CONTROL_RI) { - netdev_printk(KERN_DEBUG, qdev->ndev, - "clearing RI after reset\n"); - ql_write_common_reg(qdev, - &port_regs->CommonRegs. - ispControlStatus, - ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); - } - - if (max_wait_time == 0) { - /* Issue Force Soft Reset */ - ql_write_common_reg(qdev, - &port_regs->CommonRegs. - ispControlStatus, - ((ISP_CONTROL_FSR << 16) | - ISP_CONTROL_FSR)); - /* - * Wait until the firmware tells us the Force Soft Reset is - * done - */ - max_wait_time = 5; - do { - value = ql_read_common_reg(qdev, - &port_regs->CommonRegs. - ispControlStatus); - if ((value & ISP_CONTROL_FSR) == 0) - break; - ssleep(1); - } while ((--max_wait_time)); - } - if (max_wait_time == 0) - status = 1; - - clear_bit(QL_RESET_ACTIVE, &qdev->flags); - set_bit(QL_RESET_DONE, &qdev->flags); - return status; -} - -static void ql_set_mac_info(struct ql3_adapter *qdev) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - u32 value, port_status; - u8 func_number; - - /* Get the function number */ - value = - ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus); - func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK); - port_status = ql_read_page0_reg(qdev, &port_regs->portStatus); - switch (value & ISP_CONTROL_FN_MASK) { - case ISP_CONTROL_FN0_NET: - qdev->mac_index = 0; - qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; - qdev->mb_bit_mask = FN0_MA_BITS_MASK; - qdev->PHYAddr = PORT0_PHY_ADDRESS; - if (port_status & PORT_STATUS_SM0) - set_bit(QL_LINK_OPTICAL, &qdev->flags); - else - clear_bit(QL_LINK_OPTICAL, &qdev->flags); - break; - - case ISP_CONTROL_FN1_NET: - qdev->mac_index = 1; - qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; - qdev->mb_bit_mask = FN1_MA_BITS_MASK; - qdev->PHYAddr = PORT1_PHY_ADDRESS; - if (port_status & PORT_STATUS_SM1) - set_bit(QL_LINK_OPTICAL, &qdev->flags); - else - clear_bit(QL_LINK_OPTICAL, &qdev->flags); - break; - - case ISP_CONTROL_FN0_SCSI: - case ISP_CONTROL_FN1_SCSI: - default: - netdev_printk(KERN_DEBUG, qdev->ndev, - "Invalid function number, ispControlStatus = 0x%x\n", - value); - break; - } - qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8; -} - -static void ql_display_dev_info(struct net_device *ndev) -{ - struct ql3_adapter *qdev = netdev_priv(ndev); - struct pci_dev *pdev = qdev->pdev; - - netdev_info(ndev, - "%s Adapter %d RevisionID %d found %s on PCI slot %d\n", - DRV_NAME, qdev->index, qdev->chip_rev_id, - qdev->device_id == QL3032_DEVICE_ID ? "QLA3032" : "QLA3022", - qdev->pci_slot); - netdev_info(ndev, "%s Interface\n", - test_bit(QL_LINK_OPTICAL, &qdev->flags) ? "OPTICAL" : "COPPER"); - - /* - * Print PCI bus width/type. - */ - netdev_info(ndev, "Bus interface is %s %s\n", - ((qdev->pci_width == 64) ? "64-bit" : "32-bit"), - ((qdev->pci_x) ? "PCI-X" : "PCI")); - - netdev_info(ndev, "mem IO base address adjusted = 0x%p\n", - qdev->mem_map_registers); - netdev_info(ndev, "Interrupt number = %d\n", pdev->irq); - - netif_info(qdev, probe, ndev, "MAC address %pM\n", ndev->dev_addr); -} - -static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset) -{ - struct net_device *ndev = qdev->ndev; - int retval = 0; - - netif_stop_queue(ndev); - netif_carrier_off(ndev); - - clear_bit(QL_ADAPTER_UP, &qdev->flags); - clear_bit(QL_LINK_MASTER, &qdev->flags); - - ql_disable_interrupts(qdev); - - free_irq(qdev->pdev->irq, ndev); - - if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) { - netdev_info(qdev->ndev, "calling pci_disable_msi()\n"); - clear_bit(QL_MSI_ENABLED, &qdev->flags); - pci_disable_msi(qdev->pdev); - } - - del_timer_sync(&qdev->adapter_timer); - - napi_disable(&qdev->napi); - - if (do_reset) { - int soft_reset; - unsigned long hw_flags; - - spin_lock_irqsave(&qdev->hw_lock, hw_flags); - if (ql_wait_for_drvr_lock(qdev)) { - soft_reset = ql_adapter_reset(qdev); - if (soft_reset) { - netdev_err(ndev, "ql_adapter_reset(%d) FAILED!\n", - qdev->index); - } - netdev_err(ndev, - "Releasing driver lock via chip reset\n"); - } else { - netdev_err(ndev, - "Could not acquire driver lock to do reset!\n"); - retval = -1; - } - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - } - ql_free_mem_resources(qdev); - return retval; -} - -static int ql_adapter_up(struct ql3_adapter *qdev) -{ - struct net_device *ndev = qdev->ndev; - int err; - unsigned long irq_flags = IRQF_SHARED; - unsigned long hw_flags; - - if (ql_alloc_mem_resources(qdev)) { - netdev_err(ndev, "Unable to allocate buffers\n"); - return -ENOMEM; - } - - if (qdev->msi) { - if (pci_enable_msi(qdev->pdev)) { - netdev_err(ndev, - "User requested MSI, but MSI failed to initialize. Continuing without MSI.\n"); - qdev->msi = 0; - } else { - netdev_info(ndev, "MSI Enabled...\n"); - set_bit(QL_MSI_ENABLED, &qdev->flags); - irq_flags &= ~IRQF_SHARED; - } - } - - err = request_irq(qdev->pdev->irq, ql3xxx_isr, - irq_flags, ndev->name, ndev); - if (err) { - netdev_err(ndev, - "Failed to reserve interrupt %d - already in use\n", - qdev->pdev->irq); - goto err_irq; - } - - spin_lock_irqsave(&qdev->hw_lock, hw_flags); - - err = ql_wait_for_drvr_lock(qdev); - if (err) { - err = ql_adapter_initialize(qdev); - if (err) { - netdev_err(ndev, "Unable to initialize adapter\n"); - goto err_init; - } - netdev_err(ndev, "Releasing driver lock\n"); - ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); - } else { - netdev_err(ndev, "Could not acquire driver lock\n"); - goto err_lock; - } - - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - - set_bit(QL_ADAPTER_UP, &qdev->flags); - - mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); - - napi_enable(&qdev->napi); - ql_enable_interrupts(qdev); - return 0; - -err_init: - ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); -err_lock: - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - free_irq(qdev->pdev->irq, ndev); -err_irq: - if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) { - netdev_info(ndev, "calling pci_disable_msi()\n"); - clear_bit(QL_MSI_ENABLED, &qdev->flags); - pci_disable_msi(qdev->pdev); - } - return err; -} - -static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset) -{ - if (ql_adapter_down(qdev, reset) || ql_adapter_up(qdev)) { - netdev_err(qdev->ndev, - "Driver up/down cycle failed, closing device\n"); - rtnl_lock(); - dev_close(qdev->ndev); - rtnl_unlock(); - return -1; - } - return 0; -} - -static int ql3xxx_close(struct net_device *ndev) -{ - struct ql3_adapter *qdev = netdev_priv(ndev); - - /* - * Wait for device to recover from a reset. - * (Rarely happens, but possible.) - */ - while (!test_bit(QL_ADAPTER_UP, &qdev->flags)) - msleep(50); - - ql_adapter_down(qdev, QL_DO_RESET); - return 0; -} - -static int ql3xxx_open(struct net_device *ndev) -{ - struct ql3_adapter *qdev = netdev_priv(ndev); - return ql_adapter_up(qdev); -} - -static int ql3xxx_set_mac_address(struct net_device *ndev, void *p) -{ - struct ql3_adapter *qdev = netdev_priv(ndev); - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - struct sockaddr *addr = p; - unsigned long hw_flags; - - if (netif_running(ndev)) - return -EBUSY; - - if (!is_valid_ether_addr(addr->sa_data)) - return -EADDRNOTAVAIL; - - memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); - - spin_lock_irqsave(&qdev->hw_lock, hw_flags); - /* Program lower 32 bits of the MAC address */ - ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, - (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16)); - ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, - ((ndev->dev_addr[2] << 24) | (ndev-> - dev_addr[3] << 16) | - (ndev->dev_addr[4] << 8) | ndev->dev_addr[5])); - - /* Program top 16 bits of the MAC address */ - ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, - ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1)); - ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, - ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1])); - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - - return 0; -} - -static void ql3xxx_tx_timeout(struct net_device *ndev) -{ - struct ql3_adapter *qdev = netdev_priv(ndev); - - netdev_err(ndev, "Resetting...\n"); - /* - * Stop the queues, we've got a problem. - */ - netif_stop_queue(ndev); - - /* - * Wake up the worker to process this event. - */ - queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0); -} - -static void ql_reset_work(struct work_struct *work) -{ - struct ql3_adapter *qdev = - container_of(work, struct ql3_adapter, reset_work.work); - struct net_device *ndev = qdev->ndev; - u32 value; - struct ql_tx_buf_cb *tx_cb; - int max_wait_time, i; - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - unsigned long hw_flags; - - if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) { - clear_bit(QL_LINK_MASTER, &qdev->flags); - - /* - * Loop through the active list and return the skb. - */ - for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { - int j; - tx_cb = &qdev->tx_buf[i]; - if (tx_cb->skb) { - netdev_printk(KERN_DEBUG, ndev, - "Freeing lost SKB\n"); - pci_unmap_single(qdev->pdev, - dma_unmap_addr(&tx_cb->map[0], - mapaddr), - dma_unmap_len(&tx_cb->map[0], maplen), - PCI_DMA_TODEVICE); - for (j = 1; j < tx_cb->seg_count; j++) { - pci_unmap_page(qdev->pdev, - dma_unmap_addr(&tx_cb->map[j], - mapaddr), - dma_unmap_len(&tx_cb->map[j], - maplen), - PCI_DMA_TODEVICE); - } - dev_kfree_skb(tx_cb->skb); - tx_cb->skb = NULL; - } - } - - netdev_err(ndev, "Clearing NRI after reset\n"); - spin_lock_irqsave(&qdev->hw_lock, hw_flags); - ql_write_common_reg(qdev, - &port_regs->CommonRegs. - ispControlStatus, - ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); - /* - * Wait the for Soft Reset to Complete. - */ - max_wait_time = 10; - do { - value = ql_read_common_reg(qdev, - &port_regs->CommonRegs. - - ispControlStatus); - if ((value & ISP_CONTROL_SR) == 0) { - netdev_printk(KERN_DEBUG, ndev, - "reset completed\n"); - break; - } - - if (value & ISP_CONTROL_RI) { - netdev_printk(KERN_DEBUG, ndev, - "clearing NRI after reset\n"); - ql_write_common_reg(qdev, - &port_regs-> - CommonRegs. - ispControlStatus, - ((ISP_CONTROL_RI << - 16) | ISP_CONTROL_RI)); - } - - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - ssleep(1); - spin_lock_irqsave(&qdev->hw_lock, hw_flags); - } while (--max_wait_time); - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - - if (value & ISP_CONTROL_SR) { - - /* - * Set the reset flags and clear the board again. - * Nothing else to do... - */ - netdev_err(ndev, - "Timed out waiting for reset to complete\n"); - netdev_err(ndev, "Do a reset\n"); - clear_bit(QL_RESET_PER_SCSI, &qdev->flags); - clear_bit(QL_RESET_START, &qdev->flags); - ql_cycle_adapter(qdev, QL_DO_RESET); - return; - } - - clear_bit(QL_RESET_ACTIVE, &qdev->flags); - clear_bit(QL_RESET_PER_SCSI, &qdev->flags); - clear_bit(QL_RESET_START, &qdev->flags); - ql_cycle_adapter(qdev, QL_NO_RESET); - } -} - -static void ql_tx_timeout_work(struct work_struct *work) -{ - struct ql3_adapter *qdev = - container_of(work, struct ql3_adapter, tx_timeout_work.work); - - ql_cycle_adapter(qdev, QL_DO_RESET); -} - -static void ql_get_board_info(struct ql3_adapter *qdev) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - u32 value; - - value = ql_read_page0_reg_l(qdev, &port_regs->portStatus); - - qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12); - if (value & PORT_STATUS_64) - qdev->pci_width = 64; - else - qdev->pci_width = 32; - if (value & PORT_STATUS_X) - qdev->pci_x = 1; - else - qdev->pci_x = 0; - qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn); -} - -static void ql3xxx_timer(unsigned long ptr) -{ - struct ql3_adapter *qdev = (struct ql3_adapter *)ptr; - queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0); -} - -static const struct net_device_ops ql3xxx_netdev_ops = { - .ndo_open = ql3xxx_open, - .ndo_start_xmit = ql3xxx_send, - .ndo_stop = ql3xxx_close, - .ndo_set_multicast_list = NULL, /* not allowed on NIC side */ - .ndo_change_mtu = eth_change_mtu, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = ql3xxx_set_mac_address, - .ndo_tx_timeout = ql3xxx_tx_timeout, -}; - -static int __devinit ql3xxx_probe(struct pci_dev *pdev, - const struct pci_device_id *pci_entry) -{ - struct net_device *ndev = NULL; - struct ql3_adapter *qdev = NULL; - static int cards_found; - int uninitialized_var(pci_using_dac), err; - - err = pci_enable_device(pdev); - if (err) { - pr_err("%s cannot enable PCI device\n", pci_name(pdev)); - goto err_out; - } - - err = pci_request_regions(pdev, DRV_NAME); - if (err) { - pr_err("%s cannot obtain PCI resources\n", pci_name(pdev)); - goto err_out_disable_pdev; - } - - pci_set_master(pdev); - - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { - pci_using_dac = 1; - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { - pci_using_dac = 0; - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); - } - - if (err) { - pr_err("%s no usable DMA configuration\n", pci_name(pdev)); - goto err_out_free_regions; - } - - ndev = alloc_etherdev(sizeof(struct ql3_adapter)); - if (!ndev) { - pr_err("%s could not alloc etherdev\n", pci_name(pdev)); - err = -ENOMEM; - goto err_out_free_regions; - } - - SET_NETDEV_DEV(ndev, &pdev->dev); - - pci_set_drvdata(pdev, ndev); - - qdev = netdev_priv(ndev); - qdev->index = cards_found; - qdev->ndev = ndev; - qdev->pdev = pdev; - qdev->device_id = pci_entry->device; - qdev->port_link_state = LS_DOWN; - if (msi) - qdev->msi = 1; - - qdev->msg_enable = netif_msg_init(debug, default_msg); - - if (pci_using_dac) - ndev->features |= NETIF_F_HIGHDMA; - if (qdev->device_id == QL3032_DEVICE_ID) - ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; - - qdev->mem_map_registers = pci_ioremap_bar(pdev, 1); - if (!qdev->mem_map_registers) { - pr_err("%s: cannot map device registers\n", pci_name(pdev)); - err = -EIO; - goto err_out_free_ndev; - } - - spin_lock_init(&qdev->adapter_lock); - spin_lock_init(&qdev->hw_lock); - - /* Set driver entry points */ - ndev->netdev_ops = &ql3xxx_netdev_ops; - SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops); - ndev->watchdog_timeo = 5 * HZ; - - netif_napi_add(ndev, &qdev->napi, ql_poll, 64); - - ndev->irq = pdev->irq; - - /* make sure the EEPROM is good */ - if (ql_get_nvram_params(qdev)) { - pr_alert("%s: Adapter #%d, Invalid NVRAM parameters\n", - __func__, qdev->index); - err = -EIO; - goto err_out_iounmap; - } - - ql_set_mac_info(qdev); - - /* Validate and set parameters */ - if (qdev->mac_index) { - ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ; - ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn2.macAddress); - } else { - ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ; - ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress); - } - memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); - - ndev->tx_queue_len = NUM_REQ_Q_ENTRIES; - - /* Record PCI bus information. */ - ql_get_board_info(qdev); - - /* - * Set the Maximum Memory Read Byte Count value. We do this to handle - * jumbo frames. - */ - if (qdev->pci_x) - pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036); - - err = register_netdev(ndev); - if (err) { - pr_err("%s: cannot register net device\n", pci_name(pdev)); - goto err_out_iounmap; - } - - /* we're going to reset, so assume we have no link for now */ - - netif_carrier_off(ndev); - netif_stop_queue(ndev); - - qdev->workqueue = create_singlethread_workqueue(ndev->name); - INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work); - INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work); - INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work); - - init_timer(&qdev->adapter_timer); - qdev->adapter_timer.function = ql3xxx_timer; - qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */ - qdev->adapter_timer.data = (unsigned long)qdev; - - if (!cards_found) { - pr_alert("%s\n", DRV_STRING); - pr_alert("Driver name: %s, Version: %s\n", - DRV_NAME, DRV_VERSION); - } - ql_display_dev_info(ndev); - - cards_found++; - return 0; - -err_out_iounmap: - iounmap(qdev->mem_map_registers); -err_out_free_ndev: - free_netdev(ndev); -err_out_free_regions: - pci_release_regions(pdev); -err_out_disable_pdev: - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); -err_out: - return err; -} - -static void __devexit ql3xxx_remove(struct pci_dev *pdev) -{ - struct net_device *ndev = pci_get_drvdata(pdev); - struct ql3_adapter *qdev = netdev_priv(ndev); - - unregister_netdev(ndev); - - ql_disable_interrupts(qdev); - - if (qdev->workqueue) { - cancel_delayed_work(&qdev->reset_work); - cancel_delayed_work(&qdev->tx_timeout_work); - destroy_workqueue(qdev->workqueue); - qdev->workqueue = NULL; - } - - iounmap(qdev->mem_map_registers); - pci_release_regions(pdev); - pci_set_drvdata(pdev, NULL); - free_netdev(ndev); -} - -static struct pci_driver ql3xxx_driver = { - - .name = DRV_NAME, - .id_table = ql3xxx_pci_tbl, - .probe = ql3xxx_probe, - .remove = __devexit_p(ql3xxx_remove), -}; - -static int __init ql3xxx_init_module(void) -{ - return pci_register_driver(&ql3xxx_driver); -} - -static void __exit ql3xxx_exit(void) -{ - pci_unregister_driver(&ql3xxx_driver); -} - -module_init(ql3xxx_init_module); -module_exit(ql3xxx_exit); diff --git a/drivers/net/qla3xxx.h b/drivers/net/qla3xxx.h deleted file mode 100644 index 73e234366a82..000000000000 --- a/drivers/net/qla3xxx.h +++ /dev/null @@ -1,1189 +0,0 @@ -/* - * QLogic QLA3xxx NIC HBA Driver - * Copyright (c) 2003-2006 QLogic Corporation - * - * See LICENSE.qla3xxx for copyright and licensing details. - */ -#ifndef _QLA3XXX_H_ -#define _QLA3XXX_H_ - -/* - * IOCB Definitions... - */ -#pragma pack(1) - -#define OPCODE_OB_MAC_IOCB_FN0 0x01 -#define OPCODE_OB_MAC_IOCB_FN2 0x21 - -#define OPCODE_IB_MAC_IOCB 0xF9 -#define OPCODE_IB_3032_MAC_IOCB 0x09 -#define OPCODE_IB_IP_IOCB 0xFA -#define OPCODE_IB_3032_IP_IOCB 0x0A - -#define OPCODE_FUNC_ID_MASK 0x30 -#define OUTBOUND_MAC_IOCB 0x01 /* plus function bits */ - -#define FN0_MA_BITS_MASK 0x00 -#define FN1_MA_BITS_MASK 0x80 - -struct ob_mac_iocb_req { - u8 opcode; - u8 flags; -#define OB_MAC_IOCB_REQ_MA 0xe0 -#define OB_MAC_IOCB_REQ_F 0x10 -#define OB_MAC_IOCB_REQ_X 0x08 -#define OB_MAC_IOCB_REQ_D 0x02 -#define OB_MAC_IOCB_REQ_I 0x01 - u8 flags1; -#define OB_3032MAC_IOCB_REQ_IC 0x04 -#define OB_3032MAC_IOCB_REQ_TC 0x02 -#define OB_3032MAC_IOCB_REQ_UC 0x01 - u8 reserved0; - - u32 transaction_id; /* opaque for hardware */ - __le16 data_len; - u8 ip_hdr_off; - u8 ip_hdr_len; - __le32 reserved1; - __le32 reserved2; - __le32 buf_addr0_low; - __le32 buf_addr0_high; - __le32 buf_0_len; - __le32 buf_addr1_low; - __le32 buf_addr1_high; - __le32 buf_1_len; - __le32 buf_addr2_low; - __le32 buf_addr2_high; - __le32 buf_2_len; - __le32 reserved3; - __le32 reserved4; -}; -/* - * The following constants define control bits for buffer - * length fields for all IOCB's. - */ -#define OB_MAC_IOCB_REQ_E 0x80000000 /* Last valid buffer in list. */ -#define OB_MAC_IOCB_REQ_C 0x40000000 /* points to an OAL. (continuation) */ -#define OB_MAC_IOCB_REQ_L 0x20000000 /* Auburn local address pointer. */ -#define OB_MAC_IOCB_REQ_R 0x10000000 /* 32-bit address pointer. */ - -struct ob_mac_iocb_rsp { - u8 opcode; - u8 flags; -#define OB_MAC_IOCB_RSP_P 0x08 -#define OB_MAC_IOCB_RSP_L 0x04 -#define OB_MAC_IOCB_RSP_S 0x02 -#define OB_MAC_IOCB_RSP_I 0x01 - - __le16 reserved0; - u32 transaction_id; /* opaque for hardware */ - __le32 reserved1; - __le32 reserved2; -}; - -struct ib_mac_iocb_rsp { - u8 opcode; -#define IB_MAC_IOCB_RSP_V 0x80 - u8 flags; -#define IB_MAC_IOCB_RSP_S 0x80 -#define IB_MAC_IOCB_RSP_H1 0x40 -#define IB_MAC_IOCB_RSP_H0 0x20 -#define IB_MAC_IOCB_RSP_B 0x10 -#define IB_MAC_IOCB_RSP_M 0x08 -#define IB_MAC_IOCB_RSP_MA 0x07 - - __le16 length; - __le32 reserved; - __le32 ial_low; - __le32 ial_high; - -}; - -struct ob_ip_iocb_req { - u8 opcode; - __le16 flags; -#define OB_IP_IOCB_REQ_O 0x100 -#define OB_IP_IOCB_REQ_H 0x008 -#define OB_IP_IOCB_REQ_U 0x004 -#define OB_IP_IOCB_REQ_D 0x002 -#define OB_IP_IOCB_REQ_I 0x001 - - u8 reserved0; - - __le32 transaction_id; - __le16 data_len; - __le16 reserved1; - __le32 hncb_ptr_low; - __le32 hncb_ptr_high; - __le32 buf_addr0_low; - __le32 buf_addr0_high; - __le32 buf_0_len; - __le32 buf_addr1_low; - __le32 buf_addr1_high; - __le32 buf_1_len; - __le32 buf_addr2_low; - __le32 buf_addr2_high; - __le32 buf_2_len; - __le32 reserved2; - __le32 reserved3; -}; - -/* defines for BufferLength fields above */ -#define OB_IP_IOCB_REQ_E 0x80000000 -#define OB_IP_IOCB_REQ_C 0x40000000 -#define OB_IP_IOCB_REQ_L 0x20000000 -#define OB_IP_IOCB_REQ_R 0x10000000 - -struct ob_ip_iocb_rsp { - u8 opcode; - u8 flags; -#define OB_MAC_IOCB_RSP_H 0x10 -#define OB_MAC_IOCB_RSP_E 0x08 -#define OB_MAC_IOCB_RSP_L 0x04 -#define OB_MAC_IOCB_RSP_S 0x02 -#define OB_MAC_IOCB_RSP_I 0x01 - - __le16 reserved0; - __le32 transaction_id; - __le32 reserved1; - __le32 reserved2; -}; - -struct ib_ip_iocb_rsp { - u8 opcode; -#define IB_IP_IOCB_RSP_3032_V 0x80 -#define IB_IP_IOCB_RSP_3032_O 0x40 -#define IB_IP_IOCB_RSP_3032_I 0x20 -#define IB_IP_IOCB_RSP_3032_R 0x10 - u8 flags; -#define IB_IP_IOCB_RSP_S 0x80 -#define IB_IP_IOCB_RSP_H1 0x40 -#define IB_IP_IOCB_RSP_H0 0x20 -#define IB_IP_IOCB_RSP_B 0x10 -#define IB_IP_IOCB_RSP_M 0x08 -#define IB_IP_IOCB_RSP_MA 0x07 - - __le16 length; - __le16 checksum; -#define IB_IP_IOCB_RSP_3032_ICE 0x01 -#define IB_IP_IOCB_RSP_3032_CE 0x02 -#define IB_IP_IOCB_RSP_3032_NUC 0x04 -#define IB_IP_IOCB_RSP_3032_UDP 0x08 -#define IB_IP_IOCB_RSP_3032_TCP 0x10 -#define IB_IP_IOCB_RSP_3032_IPE 0x20 - __le16 reserved; -#define IB_IP_IOCB_RSP_R 0x01 - __le32 ial_low; - __le32 ial_high; -}; - -struct net_rsp_iocb { - u8 opcode; - u8 flags; - __le16 reserved0; - __le32 reserved[3]; -}; -#pragma pack() - -/* - * Register Definitions... - */ -#define PORT0_PHY_ADDRESS 0x1e00 -#define PORT1_PHY_ADDRESS 0x1f00 - -#define ETHERNET_CRC_SIZE 4 - -#define MII_SCAN_REGISTER 0x00000001 - -#define PHY_ID_0_REG 2 -#define PHY_ID_1_REG 3 - -#define PHY_OUI_1_MASK 0xfc00 -#define PHY_MODEL_MASK 0x03f0 - -/* Address for the Agere Phy */ -#define MII_AGERE_ADDR_1 0x00001000 -#define MII_AGERE_ADDR_2 0x00001100 - -/* 32-bit ispControlStatus */ -enum { - ISP_CONTROL_NP_MASK = 0x0003, - ISP_CONTROL_NP_PCSR = 0x0000, - ISP_CONTROL_NP_HMCR = 0x0001, - ISP_CONTROL_NP_LRAMCR = 0x0002, - ISP_CONTROL_NP_PSR = 0x0003, - ISP_CONTROL_RI = 0x0008, - ISP_CONTROL_CI = 0x0010, - ISP_CONTROL_PI = 0x0020, - ISP_CONTROL_IN = 0x0040, - ISP_CONTROL_BE = 0x0080, - ISP_CONTROL_FN_MASK = 0x0700, - ISP_CONTROL_FN0_NET = 0x0400, - ISP_CONTROL_FN0_SCSI = 0x0500, - ISP_CONTROL_FN1_NET = 0x0600, - ISP_CONTROL_FN1_SCSI = 0x0700, - ISP_CONTROL_LINK_DN_0 = 0x0800, - ISP_CONTROL_LINK_DN_1 = 0x1000, - ISP_CONTROL_FSR = 0x2000, - ISP_CONTROL_FE = 0x4000, - ISP_CONTROL_SR = 0x8000, -}; - -/* 32-bit ispInterruptMaskReg */ -enum { - ISP_IMR_ENABLE_INT = 0x0004, - ISP_IMR_DISABLE_RESET_INT = 0x0008, - ISP_IMR_DISABLE_CMPL_INT = 0x0010, - ISP_IMR_DISABLE_PROC_INT = 0x0020, -}; - -/* 32-bit serialPortInterfaceReg */ -enum { - ISP_SERIAL_PORT_IF_CLK = 0x0001, - ISP_SERIAL_PORT_IF_CS = 0x0002, - ISP_SERIAL_PORT_IF_D0 = 0x0004, - ISP_SERIAL_PORT_IF_DI = 0x0008, - ISP_NVRAM_MASK = (0x000F << 16), - ISP_SERIAL_PORT_IF_WE = 0x0010, - ISP_SERIAL_PORT_IF_NVR_MASK = 0x001F, - ISP_SERIAL_PORT_IF_SCI = 0x0400, - ISP_SERIAL_PORT_IF_SC0 = 0x0800, - ISP_SERIAL_PORT_IF_SCE = 0x1000, - ISP_SERIAL_PORT_IF_SDI = 0x2000, - ISP_SERIAL_PORT_IF_SDO = 0x4000, - ISP_SERIAL_PORT_IF_SDE = 0x8000, - ISP_SERIAL_PORT_IF_I2C_MASK = 0xFC00, -}; - -/* semaphoreReg */ -enum { - QL_RESOURCE_MASK_BASE_CODE = 0x7, - QL_RESOURCE_BITS_BASE_CODE = 0x4, - QL_DRVR_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 1), - QL_DDR_RAM_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 4), - QL_PHY_GIO_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 7), - QL_NVRAM_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 10), - QL_FLASH_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 13), - QL_DRVR_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (1 + 16)), - QL_DDR_RAM_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (4 + 16)), - QL_PHY_GIO_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (7 + 16)), - QL_NVRAM_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (10 + 16)), - QL_FLASH_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (13 + 16)), -}; - - /* - * QL3XXX memory-mapped registers - * QL3XXX has 4 "pages" of registers, each page occupying - * 256 bytes. Each page has a "common" area at the start and then - * page-specific registers after that. - */ -struct ql3xxx_common_registers { - u32 MB0; /* Offset 0x00 */ - u32 MB1; /* Offset 0x04 */ - u32 MB2; /* Offset 0x08 */ - u32 MB3; /* Offset 0x0c */ - u32 MB4; /* Offset 0x10 */ - u32 MB5; /* Offset 0x14 */ - u32 MB6; /* Offset 0x18 */ - u32 MB7; /* Offset 0x1c */ - u32 flashBiosAddr; - u32 flashBiosData; - u32 ispControlStatus; - u32 ispInterruptMaskReg; - u32 serialPortInterfaceReg; - u32 semaphoreReg; - u32 reqQProducerIndex; - u32 rspQConsumerIndex; - - u32 rxLargeQProducerIndex; - u32 rxSmallQProducerIndex; - u32 arcMadiCommand; - u32 arcMadiData; -}; - -enum { - EXT_HW_CONFIG_SP_MASK = 0x0006, - EXT_HW_CONFIG_SP_NONE = 0x0000, - EXT_HW_CONFIG_SP_BYTE_PARITY = 0x0002, - EXT_HW_CONFIG_SP_ECC = 0x0004, - EXT_HW_CONFIG_SP_ECCx = 0x0006, - EXT_HW_CONFIG_SIZE_MASK = 0x0060, - EXT_HW_CONFIG_SIZE_128M = 0x0000, - EXT_HW_CONFIG_SIZE_256M = 0x0020, - EXT_HW_CONFIG_SIZE_512M = 0x0040, - EXT_HW_CONFIG_SIZE_INVALID = 0x0060, - EXT_HW_CONFIG_PD = 0x0080, - EXT_HW_CONFIG_FW = 0x0200, - EXT_HW_CONFIG_US = 0x0400, - EXT_HW_CONFIG_DCS_MASK = 0x1800, - EXT_HW_CONFIG_DCS_9MA = 0x0000, - EXT_HW_CONFIG_DCS_15MA = 0x0800, - EXT_HW_CONFIG_DCS_18MA = 0x1000, - EXT_HW_CONFIG_DCS_24MA = 0x1800, - EXT_HW_CONFIG_DDS_MASK = 0x6000, - EXT_HW_CONFIG_DDS_9MA = 0x0000, - EXT_HW_CONFIG_DDS_15MA = 0x2000, - EXT_HW_CONFIG_DDS_18MA = 0x4000, - EXT_HW_CONFIG_DDS_24MA = 0x6000, -}; - -/* InternalChipConfig */ -enum { - INTERNAL_CHIP_DM = 0x0001, - INTERNAL_CHIP_SD = 0x0002, - INTERNAL_CHIP_RAP_MASK = 0x000C, - INTERNAL_CHIP_RAP_RR = 0x0000, - INTERNAL_CHIP_RAP_NRM = 0x0004, - INTERNAL_CHIP_RAP_ERM = 0x0008, - INTERNAL_CHIP_RAP_ERMx = 0x000C, - INTERNAL_CHIP_WE = 0x0010, - INTERNAL_CHIP_EF = 0x0020, - INTERNAL_CHIP_FR = 0x0040, - INTERNAL_CHIP_FW = 0x0080, - INTERNAL_CHIP_FI = 0x0100, - INTERNAL_CHIP_FT = 0x0200, -}; - -/* portControl */ -enum { - PORT_CONTROL_DS = 0x0001, - PORT_CONTROL_HH = 0x0002, - PORT_CONTROL_EI = 0x0004, - PORT_CONTROL_ET = 0x0008, - PORT_CONTROL_EF = 0x0010, - PORT_CONTROL_DRM = 0x0020, - PORT_CONTROL_RLB = 0x0040, - PORT_CONTROL_RCB = 0x0080, - PORT_CONTROL_MAC = 0x0100, - PORT_CONTROL_IPV = 0x0200, - PORT_CONTROL_IFP = 0x0400, - PORT_CONTROL_ITP = 0x0800, - PORT_CONTROL_FI = 0x1000, - PORT_CONTROL_DFP = 0x2000, - PORT_CONTROL_OI = 0x4000, - PORT_CONTROL_CC = 0x8000, -}; - -/* portStatus */ -enum { - PORT_STATUS_SM0 = 0x0001, - PORT_STATUS_SM1 = 0x0002, - PORT_STATUS_X = 0x0008, - PORT_STATUS_DL = 0x0080, - PORT_STATUS_IC = 0x0200, - PORT_STATUS_MRC = 0x0400, - PORT_STATUS_NL = 0x0800, - PORT_STATUS_REV_ID_MASK = 0x7000, - PORT_STATUS_REV_ID_1 = 0x1000, - PORT_STATUS_REV_ID_2 = 0x2000, - PORT_STATUS_REV_ID_3 = 0x3000, - PORT_STATUS_64 = 0x8000, - PORT_STATUS_UP0 = 0x10000, - PORT_STATUS_AC0 = 0x20000, - PORT_STATUS_AE0 = 0x40000, - PORT_STATUS_UP1 = 0x100000, - PORT_STATUS_AC1 = 0x200000, - PORT_STATUS_AE1 = 0x400000, - PORT_STATUS_F0_ENABLED = 0x1000000, - PORT_STATUS_F1_ENABLED = 0x2000000, - PORT_STATUS_F2_ENABLED = 0x4000000, - PORT_STATUS_F3_ENABLED = 0x8000000, -}; - -/* macMIIMgmtControlReg */ -enum { - MAC_ADDR_INDIRECT_PTR_REG_RP_MASK = 0x0003, - MAC_ADDR_INDIRECT_PTR_REG_RP_PRI_LWR = 0x0000, - MAC_ADDR_INDIRECT_PTR_REG_RP_PRI_UPR = 0x0001, - MAC_ADDR_INDIRECT_PTR_REG_RP_SEC_LWR = 0x0002, - MAC_ADDR_INDIRECT_PTR_REG_RP_SEC_UPR = 0x0003, - MAC_ADDR_INDIRECT_PTR_REG_PR = 0x0008, - MAC_ADDR_INDIRECT_PTR_REG_SS = 0x0010, - MAC_ADDR_INDIRECT_PTR_REG_SE = 0x0020, - MAC_ADDR_INDIRECT_PTR_REG_SP = 0x0040, - MAC_ADDR_INDIRECT_PTR_REG_PE = 0x0080, -}; - -/* macMIIMgmtControlReg */ -enum { - MAC_MII_CONTROL_RC = 0x0001, - MAC_MII_CONTROL_SC = 0x0002, - MAC_MII_CONTROL_AS = 0x0004, - MAC_MII_CONTROL_NP = 0x0008, - MAC_MII_CONTROL_CLK_SEL_MASK = 0x0070, - MAC_MII_CONTROL_CLK_SEL_DIV2 = 0x0000, - MAC_MII_CONTROL_CLK_SEL_DIV4 = 0x0010, - MAC_MII_CONTROL_CLK_SEL_DIV6 = 0x0020, - MAC_MII_CONTROL_CLK_SEL_DIV8 = 0x0030, - MAC_MII_CONTROL_CLK_SEL_DIV10 = 0x0040, - MAC_MII_CONTROL_CLK_SEL_DIV14 = 0x0050, - MAC_MII_CONTROL_CLK_SEL_DIV20 = 0x0060, - MAC_MII_CONTROL_CLK_SEL_DIV28 = 0x0070, - MAC_MII_CONTROL_RM = 0x8000, -}; - -/* macMIIStatusReg */ -enum { - MAC_MII_STATUS_BSY = 0x0001, - MAC_MII_STATUS_SC = 0x0002, - MAC_MII_STATUS_NV = 0x0004, -}; - -enum { - MAC_CONFIG_REG_PE = 0x0001, - MAC_CONFIG_REG_TF = 0x0002, - MAC_CONFIG_REG_RF = 0x0004, - MAC_CONFIG_REG_FD = 0x0008, - MAC_CONFIG_REG_GM = 0x0010, - MAC_CONFIG_REG_LB = 0x0020, - MAC_CONFIG_REG_SR = 0x8000, -}; - -enum { - MAC_HALF_DUPLEX_REG_ED = 0x10000, - MAC_HALF_DUPLEX_REG_NB = 0x20000, - MAC_HALF_DUPLEX_REG_BNB = 0x40000, - MAC_HALF_DUPLEX_REG_ALT = 0x80000, -}; - -enum { - IP_ADDR_INDEX_REG_MASK = 0x000f, - IP_ADDR_INDEX_REG_FUNC_0_PRI = 0x0000, - IP_ADDR_INDEX_REG_FUNC_0_SEC = 0x0001, - IP_ADDR_INDEX_REG_FUNC_1_PRI = 0x0002, - IP_ADDR_INDEX_REG_FUNC_1_SEC = 0x0003, - IP_ADDR_INDEX_REG_FUNC_2_PRI = 0x0004, - IP_ADDR_INDEX_REG_FUNC_2_SEC = 0x0005, - IP_ADDR_INDEX_REG_FUNC_3_PRI = 0x0006, - IP_ADDR_INDEX_REG_FUNC_3_SEC = 0x0007, - IP_ADDR_INDEX_REG_6 = 0x0008, - IP_ADDR_INDEX_REG_OFFSET_MASK = 0x0030, - IP_ADDR_INDEX_REG_E = 0x0040, -}; -enum { - QL3032_PORT_CONTROL_DS = 0x0001, - QL3032_PORT_CONTROL_HH = 0x0002, - QL3032_PORT_CONTROL_EIv6 = 0x0004, - QL3032_PORT_CONTROL_EIv4 = 0x0008, - QL3032_PORT_CONTROL_ET = 0x0010, - QL3032_PORT_CONTROL_EF = 0x0020, - QL3032_PORT_CONTROL_DRM = 0x0040, - QL3032_PORT_CONTROL_RLB = 0x0080, - QL3032_PORT_CONTROL_RCB = 0x0100, - QL3032_PORT_CONTROL_KIE = 0x0200, -}; - -enum { - PROBE_MUX_ADDR_REG_MUX_SEL_MASK = 0x003f, - PROBE_MUX_ADDR_REG_SYSCLK = 0x0000, - PROBE_MUX_ADDR_REG_PCICLK = 0x0040, - PROBE_MUX_ADDR_REG_NRXCLK = 0x0080, - PROBE_MUX_ADDR_REG_CPUCLK = 0x00C0, - PROBE_MUX_ADDR_REG_MODULE_SEL_MASK = 0x3f00, - PROBE_MUX_ADDR_REG_UP = 0x4000, - PROBE_MUX_ADDR_REG_RE = 0x8000, -}; - -enum { - STATISTICS_INDEX_REG_MASK = 0x01ff, - STATISTICS_INDEX_REG_MAC0_TX_FRAME = 0x0000, - STATISTICS_INDEX_REG_MAC0_TX_BYTES = 0x0001, - STATISTICS_INDEX_REG_MAC0_TX_STAT1 = 0x0002, - STATISTICS_INDEX_REG_MAC0_TX_STAT2 = 0x0003, - STATISTICS_INDEX_REG_MAC0_TX_STAT3 = 0x0004, - STATISTICS_INDEX_REG_MAC0_TX_STAT4 = 0x0005, - STATISTICS_INDEX_REG_MAC0_TX_STAT5 = 0x0006, - STATISTICS_INDEX_REG_MAC0_RX_FRAME = 0x0007, - STATISTICS_INDEX_REG_MAC0_RX_BYTES = 0x0008, - STATISTICS_INDEX_REG_MAC0_RX_STAT1 = 0x0009, - STATISTICS_INDEX_REG_MAC0_RX_STAT2 = 0x000a, - STATISTICS_INDEX_REG_MAC0_RX_STAT3 = 0x000b, - STATISTICS_INDEX_REG_MAC0_RX_ERR_CRC = 0x000c, - STATISTICS_INDEX_REG_MAC0_RX_ERR_ENC = 0x000d, - STATISTICS_INDEX_REG_MAC0_RX_ERR_LEN = 0x000e, - STATISTICS_INDEX_REG_MAC0_RX_STAT4 = 0x000f, - STATISTICS_INDEX_REG_MAC1_TX_FRAME = 0x0010, - STATISTICS_INDEX_REG_MAC1_TX_BYTES = 0x0011, - STATISTICS_INDEX_REG_MAC1_TX_STAT1 = 0x0012, - STATISTICS_INDEX_REG_MAC1_TX_STAT2 = 0x0013, - STATISTICS_INDEX_REG_MAC1_TX_STAT3 = 0x0014, - STATISTICS_INDEX_REG_MAC1_TX_STAT4 = 0x0015, - STATISTICS_INDEX_REG_MAC1_TX_STAT5 = 0x0016, - STATISTICS_INDEX_REG_MAC1_RX_FRAME = 0x0017, - STATISTICS_INDEX_REG_MAC1_RX_BYTES = 0x0018, - STATISTICS_INDEX_REG_MAC1_RX_STAT1 = 0x0019, - STATISTICS_INDEX_REG_MAC1_RX_STAT2 = 0x001a, - STATISTICS_INDEX_REG_MAC1_RX_STAT3 = 0x001b, - STATISTICS_INDEX_REG_MAC1_RX_ERR_CRC = 0x001c, - STATISTICS_INDEX_REG_MAC1_RX_ERR_ENC = 0x001d, - STATISTICS_INDEX_REG_MAC1_RX_ERR_LEN = 0x001e, - STATISTICS_INDEX_REG_MAC1_RX_STAT4 = 0x001f, - STATISTICS_INDEX_REG_IP_TX_PKTS = 0x0020, - STATISTICS_INDEX_REG_IP_TX_BYTES = 0x0021, - STATISTICS_INDEX_REG_IP_TX_FRAG = 0x0022, - STATISTICS_INDEX_REG_IP_RX_PKTS = 0x0023, - STATISTICS_INDEX_REG_IP_RX_BYTES = 0x0024, - STATISTICS_INDEX_REG_IP_RX_FRAG = 0x0025, - STATISTICS_INDEX_REG_IP_DGRM_REASSEMBLY = 0x0026, - STATISTICS_INDEX_REG_IP_V6_RX_PKTS = 0x0027, - STATISTICS_INDEX_REG_IP_RX_PKTERR = 0x0028, - STATISTICS_INDEX_REG_IP_REASSEMBLY_ERR = 0x0029, - STATISTICS_INDEX_REG_TCP_TX_SEG = 0x0030, - STATISTICS_INDEX_REG_TCP_TX_BYTES = 0x0031, - STATISTICS_INDEX_REG_TCP_RX_SEG = 0x0032, - STATISTICS_INDEX_REG_TCP_RX_BYTES = 0x0033, - STATISTICS_INDEX_REG_TCP_TIMER_EXP = 0x0034, - STATISTICS_INDEX_REG_TCP_RX_ACK = 0x0035, - STATISTICS_INDEX_REG_TCP_TX_ACK = 0x0036, - STATISTICS_INDEX_REG_TCP_RX_ERR = 0x0037, - STATISTICS_INDEX_REG_TCP_RX_WIN_PROBE = 0x0038, - STATISTICS_INDEX_REG_TCP_ECC_ERR_CORR = 0x003f, -}; - -enum { - PORT_FATAL_ERROR_STATUS_OFB_RE_MAC0 = 0x00000001, - PORT_FATAL_ERROR_STATUS_OFB_RE_MAC1 = 0x00000002, - PORT_FATAL_ERROR_STATUS_OFB_WE = 0x00000004, - PORT_FATAL_ERROR_STATUS_IFB_RE = 0x00000008, - PORT_FATAL_ERROR_STATUS_IFB_WE_MAC0 = 0x00000010, - PORT_FATAL_ERROR_STATUS_IFB_WE_MAC1 = 0x00000020, - PORT_FATAL_ERROR_STATUS_ODE_RE = 0x00000040, - PORT_FATAL_ERROR_STATUS_ODE_WE = 0x00000080, - PORT_FATAL_ERROR_STATUS_IDE_RE = 0x00000100, - PORT_FATAL_ERROR_STATUS_IDE_WE = 0x00000200, - PORT_FATAL_ERROR_STATUS_SDE_RE = 0x00000400, - PORT_FATAL_ERROR_STATUS_SDE_WE = 0x00000800, - PORT_FATAL_ERROR_STATUS_BLE = 0x00001000, - PORT_FATAL_ERROR_STATUS_SPE = 0x00002000, - PORT_FATAL_ERROR_STATUS_EP0 = 0x00004000, - PORT_FATAL_ERROR_STATUS_EP1 = 0x00008000, - PORT_FATAL_ERROR_STATUS_ICE = 0x00010000, - PORT_FATAL_ERROR_STATUS_ILE = 0x00020000, - PORT_FATAL_ERROR_STATUS_OPE = 0x00040000, - PORT_FATAL_ERROR_STATUS_TA = 0x00080000, - PORT_FATAL_ERROR_STATUS_MA = 0x00100000, - PORT_FATAL_ERROR_STATUS_SCE = 0x00200000, - PORT_FATAL_ERROR_STATUS_RPE = 0x00400000, - PORT_FATAL_ERROR_STATUS_MPE = 0x00800000, - PORT_FATAL_ERROR_STATUS_OCE = 0x01000000, -}; - -/* - * port control and status page - page 0 - */ - -struct ql3xxx_port_registers { - struct ql3xxx_common_registers CommonRegs; - - u32 ExternalHWConfig; - u32 InternalChipConfig; - u32 portControl; - u32 portStatus; - u32 macAddrIndirectPtrReg; - u32 macAddrDataReg; - u32 macMIIMgmtControlReg; - u32 macMIIMgmtAddrReg; - u32 macMIIMgmtDataReg; - u32 macMIIStatusReg; - u32 mac0ConfigReg; - u32 mac0IpgIfgReg; - u32 mac0HalfDuplexReg; - u32 mac0MaxFrameLengthReg; - u32 mac0PauseThresholdReg; - u32 mac1ConfigReg; - u32 mac1IpgIfgReg; - u32 mac1HalfDuplexReg; - u32 mac1MaxFrameLengthReg; - u32 mac1PauseThresholdReg; - u32 ipAddrIndexReg; - u32 ipAddrDataReg; - u32 ipReassemblyTimeout; - u32 tcpMaxWindow; - u32 currentTcpTimestamp[2]; - u32 internalRamRWAddrReg; - u32 internalRamWDataReg; - u32 reclaimedBufferAddrRegLow; - u32 reclaimedBufferAddrRegHigh; - u32 tcpConfiguration; - u32 functionControl; - u32 fpgaRevID; - u32 localRamAddr; - u32 localRamDataAutoIncr; - u32 localRamDataNonIncr; - u32 gpOutput; - u32 gpInput; - u32 probeMuxAddr; - u32 probeMuxData; - u32 statisticsIndexReg; - u32 statisticsReadDataRegAutoIncr; - u32 statisticsReadDataRegNoIncr; - u32 PortFatalErrStatus; -}; - -/* - * port host memory config page - page 1 - */ -struct ql3xxx_host_memory_registers { - struct ql3xxx_common_registers CommonRegs; - - u32 reserved[12]; - - /* Network Request Queue */ - u32 reqConsumerIndex; - u32 reqConsumerIndexAddrLow; - u32 reqConsumerIndexAddrHigh; - u32 reqBaseAddrLow; - u32 reqBaseAddrHigh; - u32 reqLength; - - /* Network Completion Queue */ - u32 rspProducerIndex; - u32 rspProducerIndexAddrLow; - u32 rspProducerIndexAddrHigh; - u32 rspBaseAddrLow; - u32 rspBaseAddrHigh; - u32 rspLength; - - /* RX Large Buffer Queue */ - u32 rxLargeQConsumerIndex; - u32 rxLargeQBaseAddrLow; - u32 rxLargeQBaseAddrHigh; - u32 rxLargeQLength; - u32 rxLargeBufferLength; - - /* RX Small Buffer Queue */ - u32 rxSmallQConsumerIndex; - u32 rxSmallQBaseAddrLow; - u32 rxSmallQBaseAddrHigh; - u32 rxSmallQLength; - u32 rxSmallBufferLength; - -}; - -/* - * port local RAM page - page 2 - */ -struct ql3xxx_local_ram_registers { - struct ql3xxx_common_registers CommonRegs; - u32 bufletSize; - u32 maxBufletCount; - u32 currentBufletCount; - u32 reserved; - u32 freeBufletThresholdLow; - u32 freeBufletThresholdHigh; - u32 ipHashTableBase; - u32 ipHashTableCount; - u32 tcpHashTableBase; - u32 tcpHashTableCount; - u32 ncbBase; - u32 maxNcbCount; - u32 currentNcbCount; - u32 drbBase; - u32 maxDrbCount; - u32 currentDrbCount; -}; - -/* - * definitions for Semaphore bits in Semaphore/Serial NVRAM interface register - */ - -#define LS_64BITS(x) (u32)(0xffffffff & ((u64)x)) -#define MS_64BITS(x) (u32)(0xffffffff & (((u64)x)>>16>>16) ) - -/* - * I/O register - */ - -enum { - CONTROL_REG = 0, - STATUS_REG = 1, - PHY_STAT_LINK_UP = 0x0004, - PHY_CTRL_LOOPBACK = 0x4000, - - PETBI_CONTROL_REG = 0x00, - PETBI_CTRL_ALL_PARAMS = 0x7140, - PETBI_CTRL_SOFT_RESET = 0x8000, - PETBI_CTRL_AUTO_NEG = 0x1000, - PETBI_CTRL_RESTART_NEG = 0x0200, - PETBI_CTRL_FULL_DUPLEX = 0x0100, - PETBI_CTRL_SPEED_1000 = 0x0040, - - PETBI_STATUS_REG = 0x01, - PETBI_STAT_NEG_DONE = 0x0020, - PETBI_STAT_LINK_UP = 0x0004, - - PETBI_NEG_ADVER = 0x04, - PETBI_NEG_PAUSE = 0x0080, - PETBI_NEG_PAUSE_MASK = 0x0180, - PETBI_NEG_DUPLEX = 0x0020, - PETBI_NEG_DUPLEX_MASK = 0x0060, - - PETBI_NEG_PARTNER = 0x05, - PETBI_NEG_ERROR_MASK = 0x3000, - - PETBI_EXPANSION_REG = 0x06, - PETBI_EXP_PAGE_RX = 0x0002, - - PHY_GIG_CONTROL = 9, - PHY_GIG_ENABLE_MAN = 0x1000, /* Enable Master/Slave Manual Config*/ - PHY_GIG_SET_MASTER = 0x0800, /* Set Master (slave if clear)*/ - PHY_GIG_ALL_PARAMS = 0x0300, - PHY_GIG_ADV_1000F = 0x0200, - PHY_GIG_ADV_1000H = 0x0100, - - PHY_NEG_ADVER = 4, - PHY_NEG_ALL_PARAMS = 0x0fe0, - PHY_NEG_ASY_PAUSE = 0x0800, - PHY_NEG_SYM_PAUSE = 0x0400, - PHY_NEG_ADV_SPEED = 0x01e0, - PHY_NEG_ADV_100F = 0x0100, - PHY_NEG_ADV_100H = 0x0080, - PHY_NEG_ADV_10F = 0x0040, - PHY_NEG_ADV_10H = 0x0020, - - PETBI_TBI_CTRL = 0x11, - PETBI_TBI_RESET = 0x8000, - PETBI_TBI_AUTO_SENSE = 0x0100, - PETBI_TBI_SERDES_MODE = 0x0010, - PETBI_TBI_SERDES_WRAP = 0x0002, - - AUX_CONTROL_STATUS = 0x1c, - PHY_AUX_NEG_DONE = 0x8000, - PHY_NEG_PARTNER = 5, - PHY_AUX_DUPLEX_STAT = 0x0020, - PHY_AUX_SPEED_STAT = 0x0018, - PHY_AUX_NO_HW_STRAP = 0x0004, - PHY_AUX_RESET_STICK = 0x0002, - PHY_NEG_PAUSE = 0x0400, - PHY_CTRL_SOFT_RESET = 0x8000, - PHY_CTRL_AUTO_NEG = 0x1000, - PHY_CTRL_RESTART_NEG = 0x0200, -}; -enum { -/* AM29LV Flash definitions */ - FM93C56A_START = 0x1, -/* Commands */ - FM93C56A_READ = 0x2, - FM93C56A_WEN = 0x0, - FM93C56A_WRITE = 0x1, - FM93C56A_WRITE_ALL = 0x0, - FM93C56A_WDS = 0x0, - FM93C56A_ERASE = 0x3, - FM93C56A_ERASE_ALL = 0x0, -/* Command Extensions */ - FM93C56A_WEN_EXT = 0x3, - FM93C56A_WRITE_ALL_EXT = 0x1, - FM93C56A_WDS_EXT = 0x0, - FM93C56A_ERASE_ALL_EXT = 0x2, -/* Special Bits */ - FM93C56A_READ_DUMMY_BITS = 1, - FM93C56A_READY = 0, - FM93C56A_BUSY = 1, - FM93C56A_CMD_BITS = 2, -/* AM29LV Flash definitions */ - FM93C56A_SIZE_8 = 0x100, - FM93C56A_SIZE_16 = 0x80, - FM93C66A_SIZE_8 = 0x200, - FM93C66A_SIZE_16 = 0x100, - FM93C86A_SIZE_16 = 0x400, -/* Address Bits */ - FM93C56A_NO_ADDR_BITS_16 = 8, - FM93C56A_NO_ADDR_BITS_8 = 9, - FM93C86A_NO_ADDR_BITS_16 = 10, -/* Data Bits */ - FM93C56A_DATA_BITS_16 = 16, - FM93C56A_DATA_BITS_8 = 8, -}; -enum { -/* Auburn Bits */ - AUBURN_EEPROM_DI = 0x8, - AUBURN_EEPROM_DI_0 = 0x0, - AUBURN_EEPROM_DI_1 = 0x8, - AUBURN_EEPROM_DO = 0x4, - AUBURN_EEPROM_DO_0 = 0x0, - AUBURN_EEPROM_DO_1 = 0x4, - AUBURN_EEPROM_CS = 0x2, - AUBURN_EEPROM_CS_0 = 0x0, - AUBURN_EEPROM_CS_1 = 0x2, - AUBURN_EEPROM_CLK_RISE = 0x1, - AUBURN_EEPROM_CLK_FALL = 0x0, -}; -enum {EEPROM_SIZE = FM93C86A_SIZE_16, - EEPROM_NO_ADDR_BITS = FM93C86A_NO_ADDR_BITS_16, - EEPROM_NO_DATA_BITS = FM93C56A_DATA_BITS_16, -}; - -/* - * MAC Config data structure - */ - struct eeprom_port_cfg { - u16 etherMtu_mac; - u16 pauseThreshold_mac; - u16 resumeThreshold_mac; - u16 portConfiguration; -#define PORT_CONFIG_DEFAULT 0xf700 -#define PORT_CONFIG_AUTO_NEG_ENABLED 0x8000 -#define PORT_CONFIG_SYM_PAUSE_ENABLED 0x4000 -#define PORT_CONFIG_FULL_DUPLEX_ENABLED 0x2000 -#define PORT_CONFIG_HALF_DUPLEX_ENABLED 0x1000 -#define PORT_CONFIG_1000MB_SPEED 0x0400 -#define PORT_CONFIG_100MB_SPEED 0x0200 -#define PORT_CONFIG_10MB_SPEED 0x0100 -#define PORT_CONFIG_LINK_SPEED_MASK 0x0F00 - u16 reserved[12]; - -}; - -/* - * BIOS data structure - */ -struct eeprom_bios_cfg { - u16 SpinDlyEn:1, disBios:1, EnMemMap:1, EnSelectBoot:1, Reserved:12; - - u8 bootID0:7, boodID0Valid:1; - u8 bootLun0[8]; - - u8 bootID1:7, boodID1Valid:1; - u8 bootLun1[8]; - - u16 MaxLunsTrgt; - u8 reserved[10]; -}; - -/* - * Function Specific Data structure - */ -struct eeprom_function_cfg { - u8 reserved[30]; - u16 macAddress[3]; - u16 macAddressSecondary[3]; - - u16 subsysVendorId; - u16 subsysDeviceId; -}; - -/* - * EEPROM format - */ -struct eeprom_data { - u8 asicId[4]; - u16 version_and_numPorts; /* together to avoid endianness crap */ - u16 boardId; - -#define EEPROM_BOARDID_STR_SIZE 16 -#define EEPROM_SERIAL_NUM_SIZE 16 - - u8 boardIdStr[16]; - u8 serialNumber[16]; - u16 extHwConfig; - struct eeprom_port_cfg macCfg_port0; - struct eeprom_port_cfg macCfg_port1; - u16 bufletSize; - u16 bufletCount; - u16 tcpWindowThreshold50; - u16 tcpWindowThreshold25; - u16 tcpWindowThreshold0; - u16 ipHashTableBaseHi; - u16 ipHashTableBaseLo; - u16 ipHashTableSize; - u16 tcpHashTableBaseHi; - u16 tcpHashTableBaseLo; - u16 tcpHashTableSize; - u16 ncbTableBaseHi; - u16 ncbTableBaseLo; - u16 ncbTableSize; - u16 drbTableBaseHi; - u16 drbTableBaseLo; - u16 drbTableSize; - u16 reserved_142[4]; - u16 ipReassemblyTimeout; - u16 tcpMaxWindowSize; - u16 ipSecurity; -#define IPSEC_CONFIG_PRESENT 0x0001 - u8 reserved_156[294]; - u16 qDebug[8]; - struct eeprom_function_cfg funcCfg_fn0; - u16 reserved_510; - u8 oemSpace[432]; - struct eeprom_bios_cfg biosCfg_fn1; - struct eeprom_function_cfg funcCfg_fn1; - u16 reserved_1022; - u8 reserved_1024[464]; - struct eeprom_function_cfg funcCfg_fn2; - u16 reserved_1534; - u8 reserved_1536[432]; - struct eeprom_bios_cfg biosCfg_fn3; - struct eeprom_function_cfg funcCfg_fn3; - u16 checksum; -}; - -/* - * General definitions... - */ - -/* - * Below are a number compiler switches for controlling driver behavior. - * Some are not supported under certain conditions and are notated as such. - */ - -#define QL3XXX_VENDOR_ID 0x1077 -#define QL3022_DEVICE_ID 0x3022 -#define QL3032_DEVICE_ID 0x3032 - -/* MTU & Frame Size stuff */ -#define NORMAL_MTU_SIZE ETH_DATA_LEN -#define JUMBO_MTU_SIZE 9000 -#define VLAN_ID_LEN 2 - -/* Request Queue Related Definitions */ -#define NUM_REQ_Q_ENTRIES 256 /* so that 64 * 64 = 4096 (1 page) */ - -/* Response Queue Related Definitions */ -#define NUM_RSP_Q_ENTRIES 256 /* so that 256 * 16 = 4096 (1 page) */ - -/* Transmit and Receive Buffers */ -#define NUM_LBUFQ_ENTRIES 128 -#define JUMBO_NUM_LBUFQ_ENTRIES 32 -#define NUM_SBUFQ_ENTRIES 64 -#define QL_SMALL_BUFFER_SIZE 32 -#define QL_ADDR_ELE_PER_BUFQ_ENTRY \ -(sizeof(struct lrg_buf_q_entry) / sizeof(struct bufq_addr_element)) - /* Each send has at least control block. This is how many we keep. */ -#define NUM_SMALL_BUFFERS NUM_SBUFQ_ENTRIES * QL_ADDR_ELE_PER_BUFQ_ENTRY - -#define QL_HEADER_SPACE 32 /* make header space at top of skb. */ -/* - * Large & Small Buffers for Receives - */ -struct lrg_buf_q_entry { - - __le32 addr0_lower; -#define IAL_LAST_ENTRY 0x00000001 -#define IAL_CONT_ENTRY 0x00000002 -#define IAL_FLAG_MASK 0x00000003 - __le32 addr0_upper; - __le32 addr1_lower; - __le32 addr1_upper; - __le32 addr2_lower; - __le32 addr2_upper; - __le32 addr3_lower; - __le32 addr3_upper; - __le32 addr4_lower; - __le32 addr4_upper; - __le32 addr5_lower; - __le32 addr5_upper; - __le32 addr6_lower; - __le32 addr6_upper; - __le32 addr7_lower; - __le32 addr7_upper; - -}; - -struct bufq_addr_element { - __le32 addr_low; - __le32 addr_high; -}; - -#define QL_NO_RESET 0 -#define QL_DO_RESET 1 - -enum link_state_t { - LS_UNKNOWN = 0, - LS_DOWN, - LS_DEGRADE, - LS_RECOVER, - LS_UP, -}; - -struct ql_rcv_buf_cb { - struct ql_rcv_buf_cb *next; - struct sk_buff *skb; - DEFINE_DMA_UNMAP_ADDR(mapaddr); - DEFINE_DMA_UNMAP_LEN(maplen); - __le32 buf_phy_addr_low; - __le32 buf_phy_addr_high; - int index; -}; - -/* - * Original IOCB has 3 sg entries: - * first points to skb-data area - * second points to first frag - * third points to next oal. - * OAL has 5 entries: - * 1 thru 4 point to frags - * fifth points to next oal. - */ -#define MAX_OAL_CNT ((MAX_SKB_FRAGS-1)/4 + 1) - -struct oal_entry { - __le32 dma_lo; - __le32 dma_hi; - __le32 len; -#define OAL_LAST_ENTRY 0x80000000 /* Last valid buffer in list. */ -#define OAL_CONT_ENTRY 0x40000000 /* points to an OAL. (continuation) */ -}; - -struct oal { - struct oal_entry oal_entry[5]; -}; - -struct map_list { - DEFINE_DMA_UNMAP_ADDR(mapaddr); - DEFINE_DMA_UNMAP_LEN(maplen); -}; - -struct ql_tx_buf_cb { - struct sk_buff *skb; - struct ob_mac_iocb_req *queue_entry ; - int seg_count; - struct oal *oal; - struct map_list map[MAX_SKB_FRAGS+1]; -}; - -/* definitions for type field */ -#define QL_BUF_TYPE_MACIOCB 0x01 -#define QL_BUF_TYPE_IPIOCB 0x02 -#define QL_BUF_TYPE_TCPIOCB 0x03 - -/* qdev->flags definitions. */ -enum { QL_RESET_DONE = 1, /* Reset finished. */ - QL_RESET_ACTIVE = 2, /* Waiting for reset to finish. */ - QL_RESET_START = 3, /* Please reset the chip. */ - QL_RESET_PER_SCSI = 4, /* SCSI driver requests reset. */ - QL_TX_TIMEOUT = 5, /* Timeout in progress. */ - QL_LINK_MASTER = 6, /* This driver controls the link. */ - QL_ADAPTER_UP = 7, /* Adapter has been brought up. */ - QL_THREAD_UP = 8, /* This flag is available. */ - QL_LINK_UP = 9, /* Link Status. */ - QL_ALLOC_REQ_RSP_Q_DONE = 10, - QL_ALLOC_BUFQS_DONE = 11, - QL_ALLOC_SMALL_BUF_DONE = 12, - QL_LINK_OPTICAL = 13, - QL_MSI_ENABLED = 14, -}; - -/* - * ql3_adapter - The main Adapter structure definition. - * This structure has all fields relevant to the hardware. - */ - -struct ql3_adapter { - u32 reserved_00; - unsigned long flags; - - /* PCI Configuration information for this device */ - struct pci_dev *pdev; - struct net_device *ndev; /* Parent NET device */ - - struct napi_struct napi; - - /* Hardware information */ - u8 chip_rev_id; - u8 pci_slot; - u8 pci_width; - u8 pci_x; - u32 msi; - int index; - struct timer_list adapter_timer; /* timer used for various functions */ - - spinlock_t adapter_lock; - spinlock_t hw_lock; - - /* PCI Bus Relative Register Addresses */ - u8 __iomem *mmap_virt_base; /* stores return value from ioremap() */ - struct ql3xxx_port_registers __iomem *mem_map_registers; - u32 current_page; /* tracks current register page */ - - u32 msg_enable; - u8 reserved_01[2]; - u8 reserved_02[2]; - - /* Page for Shadow Registers */ - void *shadow_reg_virt_addr; - dma_addr_t shadow_reg_phy_addr; - - /* Net Request Queue */ - u32 req_q_size; - u32 reserved_03; - struct ob_mac_iocb_req *req_q_virt_addr; - dma_addr_t req_q_phy_addr; - u16 req_producer_index; - u16 reserved_04; - u16 *preq_consumer_index; - u32 req_consumer_index_phy_addr_high; - u32 req_consumer_index_phy_addr_low; - atomic_t tx_count; - struct ql_tx_buf_cb tx_buf[NUM_REQ_Q_ENTRIES]; - - /* Net Response Queue */ - u32 rsp_q_size; - u32 eeprom_cmd_data; - struct net_rsp_iocb *rsp_q_virt_addr; - dma_addr_t rsp_q_phy_addr; - struct net_rsp_iocb *rsp_current; - u16 rsp_consumer_index; - u16 reserved_06; - volatile __le32 *prsp_producer_index; - u32 rsp_producer_index_phy_addr_high; - u32 rsp_producer_index_phy_addr_low; - - /* Large Buffer Queue */ - u32 lrg_buf_q_alloc_size; - u32 lrg_buf_q_size; - void *lrg_buf_q_alloc_virt_addr; - void *lrg_buf_q_virt_addr; - dma_addr_t lrg_buf_q_alloc_phy_addr; - dma_addr_t lrg_buf_q_phy_addr; - u32 lrg_buf_q_producer_index; - u32 lrg_buf_release_cnt; - struct bufq_addr_element *lrg_buf_next_free; - u32 num_large_buffers; - u32 num_lbufq_entries; - - /* Large (Receive) Buffers */ - struct ql_rcv_buf_cb *lrg_buf; - struct ql_rcv_buf_cb *lrg_buf_free_head; - struct ql_rcv_buf_cb *lrg_buf_free_tail; - u32 lrg_buf_free_count; - u32 lrg_buffer_len; - u32 lrg_buf_index; - u32 lrg_buf_skb_check; - - /* Small Buffer Queue */ - u32 small_buf_q_alloc_size; - u32 small_buf_q_size; - u32 small_buf_q_producer_index; - void *small_buf_q_alloc_virt_addr; - void *small_buf_q_virt_addr; - dma_addr_t small_buf_q_alloc_phy_addr; - dma_addr_t small_buf_q_phy_addr; - u32 small_buf_index; - - /* Small (Receive) Buffers */ - void *small_buf_virt_addr; - dma_addr_t small_buf_phy_addr; - u32 small_buf_phy_addr_low; - u32 small_buf_phy_addr_high; - u32 small_buf_release_cnt; - u32 small_buf_total_size; - - struct eeprom_data nvram_data; - u32 port_link_state; - - /* 4022 specific */ - u32 mac_index; /* Driver's MAC number can be 0 or 1 for first and second networking functions respectively */ - u32 PHYAddr; /* Address of PHY 0x1e00 Port 0 and 0x1f00 Port 1 */ - u32 mac_ob_opcode; /* Opcode to use on mac transmission */ - u32 mb_bit_mask; /* MA Bits mask to use on transmission */ - u32 numPorts; - struct workqueue_struct *workqueue; - struct delayed_work reset_work; - struct delayed_work tx_timeout_work; - struct delayed_work link_state_work; - u32 max_frame_size; - u32 device_id; - u16 phyType; -}; - -#endif /* _QLA3XXX_H_ */ diff --git a/drivers/net/qlcnic/Makefile b/drivers/net/qlcnic/Makefile deleted file mode 100644 index ddba83ef3f44..000000000000 --- a/drivers/net/qlcnic/Makefile +++ /dev/null @@ -1,8 +0,0 @@ -# -# Makefile for Qlogic 1G/10G Ethernet Driver for CNA devices -# - -obj-$(CONFIG_QLCNIC) := qlcnic.o - -qlcnic-y := qlcnic_hw.o qlcnic_main.o qlcnic_init.o \ - qlcnic_ethtool.o qlcnic_ctx.o diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h deleted file mode 100644 index 53c6e5dcf26c..000000000000 --- a/drivers/net/qlcnic/qlcnic.h +++ /dev/null @@ -1,1555 +0,0 @@ -/* - * QLogic qlcnic NIC Driver - * Copyright (c) 2009-2010 QLogic Corporation - * - * See LICENSE.qlcnic for copyright and licensing details. - */ - -#ifndef _QLCNIC_H_ -#define _QLCNIC_H_ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include - -#include -#include -#include -#include - -#include "qlcnic_hdr.h" - -#define _QLCNIC_LINUX_MAJOR 5 -#define _QLCNIC_LINUX_MINOR 0 -#define _QLCNIC_LINUX_SUBVERSION 22 -#define QLCNIC_LINUX_VERSIONID "5.0.22" -#define QLCNIC_DRV_IDC_VER 0x01 -#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ - (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) - -#define QLCNIC_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) -#define _major(v) (((v) >> 24) & 0xff) -#define _minor(v) (((v) >> 16) & 0xff) -#define _build(v) ((v) & 0xffff) - -/* version in image has weird encoding: - * 7:0 - major - * 15:8 - minor - * 31:16 - build (little endian) - */ -#define QLCNIC_DECODE_VERSION(v) \ - QLCNIC_VERSION_CODE(((v) & 0xff), (((v) >> 8) & 0xff), ((v) >> 16)) - -#define QLCNIC_MIN_FW_VERSION QLCNIC_VERSION_CODE(4, 4, 2) -#define QLCNIC_NUM_FLASH_SECTORS (64) -#define QLCNIC_FLASH_SECTOR_SIZE (64 * 1024) -#define QLCNIC_FLASH_TOTAL_SIZE (QLCNIC_NUM_FLASH_SECTORS \ - * QLCNIC_FLASH_SECTOR_SIZE) - -#define RCV_DESC_RINGSIZE(rds_ring) \ - (sizeof(struct rcv_desc) * (rds_ring)->num_desc) -#define RCV_BUFF_RINGSIZE(rds_ring) \ - (sizeof(struct qlcnic_rx_buffer) * rds_ring->num_desc) -#define STATUS_DESC_RINGSIZE(sds_ring) \ - (sizeof(struct status_desc) * (sds_ring)->num_desc) -#define TX_BUFF_RINGSIZE(tx_ring) \ - (sizeof(struct qlcnic_cmd_buffer) * tx_ring->num_desc) -#define TX_DESC_RINGSIZE(tx_ring) \ - (sizeof(struct cmd_desc_type0) * tx_ring->num_desc) - -#define QLCNIC_P3P_A0 0x50 - -#define QLCNIC_IS_REVISION_P3P(REVISION) (REVISION >= QLCNIC_P3P_A0) - -#define FIRST_PAGE_GROUP_START 0 -#define FIRST_PAGE_GROUP_END 0x100000 - -#define P3P_MAX_MTU (9600) -#define P3P_MIN_MTU (68) -#define QLCNIC_MAX_ETHERHDR 32 /* This contains some padding */ - -#define QLCNIC_P3P_RX_BUF_MAX_LEN (QLCNIC_MAX_ETHERHDR + ETH_DATA_LEN) -#define QLCNIC_P3P_RX_JUMBO_BUF_MAX_LEN (QLCNIC_MAX_ETHERHDR + P3P_MAX_MTU) -#define QLCNIC_CT_DEFAULT_RX_BUF_LEN 2048 -#define QLCNIC_LRO_BUFFER_EXTRA 2048 - -/* Opcodes to be used with the commands */ -#define TX_ETHER_PKT 0x01 -#define TX_TCP_PKT 0x02 -#define TX_UDP_PKT 0x03 -#define TX_IP_PKT 0x04 -#define TX_TCP_LSO 0x05 -#define TX_TCP_LSO6 0x06 -#define TX_TCPV6_PKT 0x0b -#define TX_UDPV6_PKT 0x0c - -/* Tx defines */ -#define QLCNIC_MAX_FRAGS_PER_TX 14 -#define MAX_TSO_HEADER_DESC 2 -#define MGMT_CMD_DESC_RESV 4 -#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \ - + MGMT_CMD_DESC_RESV) -#define QLCNIC_MAX_TX_TIMEOUTS 2 - -/* - * Following are the states of the Phantom. Phantom will set them and - * Host will read to check if the fields are correct. - */ -#define PHAN_INITIALIZE_FAILED 0xffff -#define PHAN_INITIALIZE_COMPLETE 0xff01 - -/* Host writes the following to notify that it has done the init-handshake */ -#define PHAN_INITIALIZE_ACK 0xf00f -#define PHAN_PEG_RCV_INITIALIZED 0xff01 - -#define NUM_RCV_DESC_RINGS 3 - -#define RCV_RING_NORMAL 0 -#define RCV_RING_JUMBO 1 - -#define MIN_CMD_DESCRIPTORS 64 -#define MIN_RCV_DESCRIPTORS 64 -#define MIN_JUMBO_DESCRIPTORS 32 - -#define MAX_CMD_DESCRIPTORS 1024 -#define MAX_RCV_DESCRIPTORS_1G 4096 -#define MAX_RCV_DESCRIPTORS_10G 8192 -#define MAX_RCV_DESCRIPTORS_VF 2048 -#define MAX_JUMBO_RCV_DESCRIPTORS_1G 512 -#define MAX_JUMBO_RCV_DESCRIPTORS_10G 1024 - -#define DEFAULT_RCV_DESCRIPTORS_1G 2048 -#define DEFAULT_RCV_DESCRIPTORS_10G 4096 -#define DEFAULT_RCV_DESCRIPTORS_VF 1024 -#define MAX_RDS_RINGS 2 - -#define get_next_index(index, length) \ - (((index) + 1) & ((length) - 1)) - -/* - * Following data structures describe the descriptors that will be used. - * Added fileds of tcpHdrSize and ipHdrSize, The driver needs to do it only when - * we are doing LSO (above the 1500 size packet) only. - */ - -#define FLAGS_VLAN_TAGGED 0x10 -#define FLAGS_VLAN_OOB 0x40 - -#define qlcnic_set_tx_vlan_tci(cmd_desc, v) \ - (cmd_desc)->vlan_TCI = cpu_to_le16(v); -#define qlcnic_set_cmd_desc_port(cmd_desc, var) \ - ((cmd_desc)->port_ctxid |= ((var) & 0x0F)) -#define qlcnic_set_cmd_desc_ctxid(cmd_desc, var) \ - ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0)) - -#define qlcnic_set_tx_port(_desc, _port) \ - ((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0)) - -#define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \ - ((_desc)->flags_opcode |= \ - cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7))) - -#define qlcnic_set_tx_frags_len(_desc, _frags, _len) \ - ((_desc)->nfrags__length = \ - cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8))) - -struct cmd_desc_type0 { - u8 tcp_hdr_offset; /* For LSO only */ - u8 ip_hdr_offset; /* For LSO only */ - __le16 flags_opcode; /* 15:13 unused, 12:7 opcode, 6:0 flags */ - __le32 nfrags__length; /* 31:8 total len, 7:0 frag count */ - - __le64 addr_buffer2; - - __le16 reference_handle; - __le16 mss; - u8 port_ctxid; /* 7:4 ctxid 3:0 port */ - u8 total_hdr_length; /* LSO only : MAC+IP+TCP Hdr size */ - __le16 conn_id; /* IPSec offoad only */ - - __le64 addr_buffer3; - __le64 addr_buffer1; - - __le16 buffer_length[4]; - - __le64 addr_buffer4; - - u8 eth_addr[ETH_ALEN]; - __le16 vlan_TCI; - -} __attribute__ ((aligned(64))); - -/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */ -struct rcv_desc { - __le16 reference_handle; - __le16 reserved; - __le32 buffer_length; /* allocated buffer length (usually 2K) */ - __le64 addr_buffer; -} __packed; - -/* opcode field in status_desc */ -#define QLCNIC_SYN_OFFLOAD 0x03 -#define QLCNIC_RXPKT_DESC 0x04 -#define QLCNIC_OLD_RXPKT_DESC 0x3f -#define QLCNIC_RESPONSE_DESC 0x05 -#define QLCNIC_LRO_DESC 0x12 - -/* for status field in status_desc */ -#define STATUS_CKSUM_LOOP 0 -#define STATUS_CKSUM_OK 2 - -/* owner bits of status_desc */ -#define STATUS_OWNER_HOST (0x1ULL << 56) -#define STATUS_OWNER_PHANTOM (0x2ULL << 56) - -/* Status descriptor: - 0-3 port, 4-7 status, 8-11 type, 12-27 total_length - 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset - 53-55 desc_cnt, 56-57 owner, 58-63 opcode - */ -#define qlcnic_get_sts_port(sts_data) \ - ((sts_data) & 0x0F) -#define qlcnic_get_sts_status(sts_data) \ - (((sts_data) >> 4) & 0x0F) -#define qlcnic_get_sts_type(sts_data) \ - (((sts_data) >> 8) & 0x0F) -#define qlcnic_get_sts_totallength(sts_data) \ - (((sts_data) >> 12) & 0xFFFF) -#define qlcnic_get_sts_refhandle(sts_data) \ - (((sts_data) >> 28) & 0xFFFF) -#define qlcnic_get_sts_prot(sts_data) \ - (((sts_data) >> 44) & 0x0F) -#define qlcnic_get_sts_pkt_offset(sts_data) \ - (((sts_data) >> 48) & 0x1F) -#define qlcnic_get_sts_desc_cnt(sts_data) \ - (((sts_data) >> 53) & 0x7) -#define qlcnic_get_sts_opcode(sts_data) \ - (((sts_data) >> 58) & 0x03F) - -#define qlcnic_get_lro_sts_refhandle(sts_data) \ - ((sts_data) & 0x0FFFF) -#define qlcnic_get_lro_sts_length(sts_data) \ - (((sts_data) >> 16) & 0x0FFFF) -#define qlcnic_get_lro_sts_l2_hdr_offset(sts_data) \ - (((sts_data) >> 32) & 0x0FF) -#define qlcnic_get_lro_sts_l4_hdr_offset(sts_data) \ - (((sts_data) >> 40) & 0x0FF) -#define qlcnic_get_lro_sts_timestamp(sts_data) \ - (((sts_data) >> 48) & 0x1) -#define qlcnic_get_lro_sts_type(sts_data) \ - (((sts_data) >> 49) & 0x7) -#define qlcnic_get_lro_sts_push_flag(sts_data) \ - (((sts_data) >> 52) & 0x1) -#define qlcnic_get_lro_sts_seq_number(sts_data) \ - ((sts_data) & 0x0FFFFFFFF) - - -struct status_desc { - __le64 status_desc_data[2]; -} __attribute__ ((aligned(16))); - -/* UNIFIED ROMIMAGE */ -#define QLCNIC_UNI_FW_MIN_SIZE 0xc8000 -#define QLCNIC_UNI_DIR_SECT_PRODUCT_TBL 0x0 -#define QLCNIC_UNI_DIR_SECT_BOOTLD 0x6 -#define QLCNIC_UNI_DIR_SECT_FW 0x7 - -/*Offsets */ -#define QLCNIC_UNI_CHIP_REV_OFF 10 -#define QLCNIC_UNI_FLAGS_OFF 11 -#define QLCNIC_UNI_BIOS_VERSION_OFF 12 -#define QLCNIC_UNI_BOOTLD_IDX_OFF 27 -#define QLCNIC_UNI_FIRMWARE_IDX_OFF 29 - -struct uni_table_desc{ - u32 findex; - u32 num_entries; - u32 entry_size; - u32 reserved[5]; -}; - -struct uni_data_desc{ - u32 findex; - u32 size; - u32 reserved[5]; -}; - -/* Flash Defines and Structures */ -#define QLCNIC_FLT_LOCATION 0x3F1000 -#define QLCNIC_FW_IMAGE_REGION 0x74 -#define QLCNIC_BOOTLD_REGION 0X72 -struct qlcnic_flt_header { - u16 version; - u16 len; - u16 checksum; - u16 reserved; -}; - -struct qlcnic_flt_entry { - u8 region; - u8 reserved0; - u8 attrib; - u8 reserved1; - u32 size; - u32 start_addr; - u32 end_addr; -}; - -/* Magic number to let user know flash is programmed */ -#define QLCNIC_BDINFO_MAGIC 0x12345678 - -#define QLCNIC_BRDTYPE_P3P_REF_QG 0x0021 -#define QLCNIC_BRDTYPE_P3P_HMEZ 0x0022 -#define QLCNIC_BRDTYPE_P3P_10G_CX4_LP 0x0023 -#define QLCNIC_BRDTYPE_P3P_4_GB 0x0024 -#define QLCNIC_BRDTYPE_P3P_IMEZ 0x0025 -#define QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS 0x0026 -#define QLCNIC_BRDTYPE_P3P_10000_BASE_T 0x0027 -#define QLCNIC_BRDTYPE_P3P_XG_LOM 0x0028 -#define QLCNIC_BRDTYPE_P3P_4_GB_MM 0x0029 -#define QLCNIC_BRDTYPE_P3P_10G_SFP_CT 0x002a -#define QLCNIC_BRDTYPE_P3P_10G_SFP_QT 0x002b -#define QLCNIC_BRDTYPE_P3P_10G_CX4 0x0031 -#define QLCNIC_BRDTYPE_P3P_10G_XFP 0x0032 -#define QLCNIC_BRDTYPE_P3P_10G_TP 0x0080 - -#define QLCNIC_MSIX_TABLE_OFFSET 0x44 - -/* Flash memory map */ -#define QLCNIC_BRDCFG_START 0x4000 /* board config */ -#define QLCNIC_BOOTLD_START 0x10000 /* bootld */ -#define QLCNIC_IMAGE_START 0x43000 /* compressed image */ -#define QLCNIC_USER_START 0x3E8000 /* Firmare info */ - -#define QLCNIC_FW_VERSION_OFFSET (QLCNIC_USER_START+0x408) -#define QLCNIC_FW_SIZE_OFFSET (QLCNIC_USER_START+0x40c) -#define QLCNIC_FW_SERIAL_NUM_OFFSET (QLCNIC_USER_START+0x81c) -#define QLCNIC_BIOS_VERSION_OFFSET (QLCNIC_USER_START+0x83c) - -#define QLCNIC_BRDTYPE_OFFSET (QLCNIC_BRDCFG_START+0x8) -#define QLCNIC_FW_MAGIC_OFFSET (QLCNIC_BRDCFG_START+0x128) - -#define QLCNIC_FW_MIN_SIZE (0x3fffff) -#define QLCNIC_UNIFIED_ROMIMAGE 0 -#define QLCNIC_FLASH_ROMIMAGE 1 -#define QLCNIC_UNKNOWN_ROMIMAGE 0xff - -#define QLCNIC_UNIFIED_ROMIMAGE_NAME "phanfw.bin" -#define QLCNIC_FLASH_ROMIMAGE_NAME "flash" - -extern char qlcnic_driver_name[]; - -/* Number of status descriptors to handle per interrupt */ -#define MAX_STATUS_HANDLE (64) - -/* - * qlcnic_skb_frag{} is to contain mapping info for each SG list. This - * has to be freed when DMA is complete. This is part of qlcnic_tx_buffer{}. - */ -struct qlcnic_skb_frag { - u64 dma; - u64 length; -}; - -/* Following defines are for the state of the buffers */ -#define QLCNIC_BUFFER_FREE 0 -#define QLCNIC_BUFFER_BUSY 1 - -/* - * There will be one qlcnic_buffer per skb packet. These will be - * used to save the dma info for pci_unmap_page() - */ -struct qlcnic_cmd_buffer { - struct sk_buff *skb; - struct qlcnic_skb_frag frag_array[MAX_SKB_FRAGS + 1]; - u32 frag_count; -}; - -/* In rx_buffer, we do not need multiple fragments as is a single buffer */ -struct qlcnic_rx_buffer { - u16 ref_handle; - struct sk_buff *skb; - struct list_head list; - u64 dma; -}; - -/* Board types */ -#define QLCNIC_GBE 0x01 -#define QLCNIC_XGBE 0x02 - -/* - * Interrupt coalescing defaults. The defaults are for 1500 MTU. It is - * adjusted based on configured MTU. - */ -#define QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US 3 -#define QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS 256 - -#define QLCNIC_INTR_DEFAULT 0x04 -#define QLCNIC_CONFIG_INTR_COALESCE 3 - -struct qlcnic_nic_intr_coalesce { - u8 type; - u8 sts_ring_mask; - u16 rx_packets; - u16 rx_time_us; - u16 flag; - u32 timer_out; -}; - -struct qlcnic_dump_template_hdr { - __le32 type; - __le32 offset; - __le32 size; - __le32 cap_mask; - __le32 num_entries; - __le32 version; - __le32 timestamp; - __le32 checksum; - __le32 drv_cap_mask; - __le32 sys_info[3]; - __le32 saved_state[16]; - __le32 cap_sizes[8]; - __le32 rsvd[0]; -}; - -struct qlcnic_fw_dump { - u8 clr; /* flag to indicate if dump is cleared */ - u8 enable; /* enable/disable dump */ - u32 size; /* total size of the dump */ - void *data; /* dump data area */ - struct qlcnic_dump_template_hdr *tmpl_hdr; -}; - -/* - * One hardware_context{} per adapter - * contains interrupt info as well shared hardware info. - */ -struct qlcnic_hardware_context { - void __iomem *pci_base0; - void __iomem *ocm_win_crb; - - unsigned long pci_len0; - - rwlock_t crb_lock; - struct mutex mem_lock; - - u8 revision_id; - u8 pci_func; - u8 linkup; - u8 loopback_state; - u16 port_type; - u16 board_type; - - struct qlcnic_nic_intr_coalesce coal; - struct qlcnic_fw_dump fw_dump; -}; - -struct qlcnic_adapter_stats { - u64 xmitcalled; - u64 xmitfinished; - u64 rxdropped; - u64 txdropped; - u64 csummed; - u64 rx_pkts; - u64 lro_pkts; - u64 rxbytes; - u64 txbytes; - u64 lrobytes; - u64 lso_frames; - u64 xmit_on; - u64 xmit_off; - u64 skb_alloc_failure; - u64 null_rxbuf; - u64 rx_dma_map_error; - u64 tx_dma_map_error; -}; - -/* - * Rcv Descriptor Context. One such per Rcv Descriptor. There may - * be one Rcv Descriptor for normal packets, one for jumbo and may be others. - */ -struct qlcnic_host_rds_ring { - void __iomem *crb_rcv_producer; - struct rcv_desc *desc_head; - struct qlcnic_rx_buffer *rx_buf_arr; - u32 num_desc; - u32 producer; - u32 dma_size; - u32 skb_size; - u32 flags; - struct list_head free_list; - spinlock_t lock; - dma_addr_t phys_addr; -} ____cacheline_internodealigned_in_smp; - -struct qlcnic_host_sds_ring { - u32 consumer; - u32 num_desc; - void __iomem *crb_sts_consumer; - - struct status_desc *desc_head; - struct qlcnic_adapter *adapter; - struct napi_struct napi; - struct list_head free_list[NUM_RCV_DESC_RINGS]; - - void __iomem *crb_intr_mask; - int irq; - - dma_addr_t phys_addr; - char name[IFNAMSIZ+4]; -} ____cacheline_internodealigned_in_smp; - -struct qlcnic_host_tx_ring { - u32 producer; - u32 sw_consumer; - u32 num_desc; - void __iomem *crb_cmd_producer; - struct cmd_desc_type0 *desc_head; - struct qlcnic_cmd_buffer *cmd_buf_arr; - __le32 *hw_consumer; - - dma_addr_t phys_addr; - dma_addr_t hw_cons_phys_addr; - struct netdev_queue *txq; -} ____cacheline_internodealigned_in_smp; - -/* - * Receive context. There is one such structure per instance of the - * receive processing. Any state information that is relevant to - * the receive, and is must be in this structure. The global data may be - * present elsewhere. - */ -struct qlcnic_recv_context { - struct qlcnic_host_rds_ring *rds_rings; - struct qlcnic_host_sds_ring *sds_rings; - u32 state; - u16 context_id; - u16 virt_port; - -}; - -/* HW context creation */ - -#define QLCNIC_OS_CRB_RETRY_COUNT 4000 -#define QLCNIC_CDRP_SIGNATURE_MAKE(pcifn, version) \ - (((pcifn) & 0xff) | (((version) & 0xff) << 8) | (0xcafe << 16)) - -#define QLCNIC_CDRP_CMD_BIT 0x80000000 - -/* - * All responses must have the QLCNIC_CDRP_CMD_BIT cleared - * in the crb QLCNIC_CDRP_CRB_OFFSET. - */ -#define QLCNIC_CDRP_FORM_RSP(rsp) (rsp) -#define QLCNIC_CDRP_IS_RSP(rsp) (((rsp) & QLCNIC_CDRP_CMD_BIT) == 0) - -#define QLCNIC_CDRP_RSP_OK 0x00000001 -#define QLCNIC_CDRP_RSP_FAIL 0x00000002 -#define QLCNIC_CDRP_RSP_TIMEOUT 0x00000003 - -/* - * All commands must have the QLCNIC_CDRP_CMD_BIT set in - * the crb QLCNIC_CDRP_CRB_OFFSET. - */ -#define QLCNIC_CDRP_FORM_CMD(cmd) (QLCNIC_CDRP_CMD_BIT | (cmd)) -#define QLCNIC_CDRP_IS_CMD(cmd) (((cmd) & QLCNIC_CDRP_CMD_BIT) != 0) - -#define QLCNIC_CDRP_CMD_SUBMIT_CAPABILITIES 0x00000001 -#define QLCNIC_CDRP_CMD_READ_MAX_RDS_PER_CTX 0x00000002 -#define QLCNIC_CDRP_CMD_READ_MAX_SDS_PER_CTX 0x00000003 -#define QLCNIC_CDRP_CMD_READ_MAX_RULES_PER_CTX 0x00000004 -#define QLCNIC_CDRP_CMD_READ_MAX_RX_CTX 0x00000005 -#define QLCNIC_CDRP_CMD_READ_MAX_TX_CTX 0x00000006 -#define QLCNIC_CDRP_CMD_CREATE_RX_CTX 0x00000007 -#define QLCNIC_CDRP_CMD_DESTROY_RX_CTX 0x00000008 -#define QLCNIC_CDRP_CMD_CREATE_TX_CTX 0x00000009 -#define QLCNIC_CDRP_CMD_DESTROY_TX_CTX 0x0000000a -#define QLCNIC_CDRP_CMD_SET_MTU 0x00000012 -#define QLCNIC_CDRP_CMD_READ_PHY 0x00000013 -#define QLCNIC_CDRP_CMD_WRITE_PHY 0x00000014 -#define QLCNIC_CDRP_CMD_READ_HW_REG 0x00000015 -#define QLCNIC_CDRP_CMD_GET_FLOW_CTL 0x00000016 -#define QLCNIC_CDRP_CMD_SET_FLOW_CTL 0x00000017 -#define QLCNIC_CDRP_CMD_READ_MAX_MTU 0x00000018 -#define QLCNIC_CDRP_CMD_READ_MAX_LRO 0x00000019 -#define QLCNIC_CDRP_CMD_MAC_ADDRESS 0x0000001f - -#define QLCNIC_CDRP_CMD_GET_PCI_INFO 0x00000020 -#define QLCNIC_CDRP_CMD_GET_NIC_INFO 0x00000021 -#define QLCNIC_CDRP_CMD_SET_NIC_INFO 0x00000022 -#define QLCNIC_CDRP_CMD_GET_ESWITCH_CAPABILITY 0x00000024 -#define QLCNIC_CDRP_CMD_TOGGLE_ESWITCH 0x00000025 -#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATUS 0x00000026 -#define QLCNIC_CDRP_CMD_SET_PORTMIRRORING 0x00000027 -#define QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH 0x00000028 -#define QLCNIC_CDRP_CMD_GET_ESWITCH_PORT_CONFIG 0x00000029 -#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATS 0x0000002a -#define QLCNIC_CDRP_CMD_CONFIG_PORT 0x0000002E -#define QLCNIC_CDRP_CMD_TEMP_SIZE 0x0000002f -#define QLCNIC_CDRP_CMD_GET_TEMP_HDR 0x00000030 - -#define QLCNIC_RCODE_SUCCESS 0 -#define QLCNIC_RCODE_NOT_SUPPORTED 9 -#define QLCNIC_RCODE_TIMEOUT 17 -#define QLCNIC_DESTROY_CTX_RESET 0 - -/* - * Capabilities Announced - */ -#define QLCNIC_CAP0_LEGACY_CONTEXT (1) -#define QLCNIC_CAP0_LEGACY_MN (1 << 2) -#define QLCNIC_CAP0_LSO (1 << 6) -#define QLCNIC_CAP0_JUMBO_CONTIGUOUS (1 << 7) -#define QLCNIC_CAP0_LRO_CONTIGUOUS (1 << 8) -#define QLCNIC_CAP0_VALIDOFF (1 << 11) - -/* - * Context state - */ -#define QLCNIC_HOST_CTX_STATE_FREED 0 -#define QLCNIC_HOST_CTX_STATE_ACTIVE 2 - -/* - * Rx context - */ - -struct qlcnic_hostrq_sds_ring { - __le64 host_phys_addr; /* Ring base addr */ - __le32 ring_size; /* Ring entries */ - __le16 msi_index; - __le16 rsvd; /* Padding */ -} __packed; - -struct qlcnic_hostrq_rds_ring { - __le64 host_phys_addr; /* Ring base addr */ - __le64 buff_size; /* Packet buffer size */ - __le32 ring_size; /* Ring entries */ - __le32 ring_kind; /* Class of ring */ -} __packed; - -struct qlcnic_hostrq_rx_ctx { - __le64 host_rsp_dma_addr; /* Response dma'd here */ - __le32 capabilities[4]; /* Flag bit vector */ - __le32 host_int_crb_mode; /* Interrupt crb usage */ - __le32 host_rds_crb_mode; /* RDS crb usage */ - /* These ring offsets are relative to data[0] below */ - __le32 rds_ring_offset; /* Offset to RDS config */ - __le32 sds_ring_offset; /* Offset to SDS config */ - __le16 num_rds_rings; /* Count of RDS rings */ - __le16 num_sds_rings; /* Count of SDS rings */ - __le16 valid_field_offset; - u8 txrx_sds_binding; - u8 msix_handler; - u8 reserved[128]; /* reserve space for future expansion*/ - /* MUST BE 64-bit aligned. - The following is packed: - - N hostrq_rds_rings - - N hostrq_sds_rings */ - char data[0]; -} __packed; - -struct qlcnic_cardrsp_rds_ring{ - __le32 host_producer_crb; /* Crb to use */ - __le32 rsvd1; /* Padding */ -} __packed; - -struct qlcnic_cardrsp_sds_ring { - __le32 host_consumer_crb; /* Crb to use */ - __le32 interrupt_crb; /* Crb to use */ -} __packed; - -struct qlcnic_cardrsp_rx_ctx { - /* These ring offsets are relative to data[0] below */ - __le32 rds_ring_offset; /* Offset to RDS config */ - __le32 sds_ring_offset; /* Offset to SDS config */ - __le32 host_ctx_state; /* Starting State */ - __le32 num_fn_per_port; /* How many PCI fn share the port */ - __le16 num_rds_rings; /* Count of RDS rings */ - __le16 num_sds_rings; /* Count of SDS rings */ - __le16 context_id; /* Handle for context */ - u8 phys_port; /* Physical id of port */ - u8 virt_port; /* Virtual/Logical id of port */ - u8 reserved[128]; /* save space for future expansion */ - /* MUST BE 64-bit aligned. - The following is packed: - - N cardrsp_rds_rings - - N cardrs_sds_rings */ - char data[0]; -} __packed; - -#define SIZEOF_HOSTRQ_RX(HOSTRQ_RX, rds_rings, sds_rings) \ - (sizeof(HOSTRQ_RX) + \ - (rds_rings)*(sizeof(struct qlcnic_hostrq_rds_ring)) + \ - (sds_rings)*(sizeof(struct qlcnic_hostrq_sds_ring))) - -#define SIZEOF_CARDRSP_RX(CARDRSP_RX, rds_rings, sds_rings) \ - (sizeof(CARDRSP_RX) + \ - (rds_rings)*(sizeof(struct qlcnic_cardrsp_rds_ring)) + \ - (sds_rings)*(sizeof(struct qlcnic_cardrsp_sds_ring))) - -/* - * Tx context - */ - -struct qlcnic_hostrq_cds_ring { - __le64 host_phys_addr; /* Ring base addr */ - __le32 ring_size; /* Ring entries */ - __le32 rsvd; /* Padding */ -} __packed; - -struct qlcnic_hostrq_tx_ctx { - __le64 host_rsp_dma_addr; /* Response dma'd here */ - __le64 cmd_cons_dma_addr; /* */ - __le64 dummy_dma_addr; /* */ - __le32 capabilities[4]; /* Flag bit vector */ - __le32 host_int_crb_mode; /* Interrupt crb usage */ - __le32 rsvd1; /* Padding */ - __le16 rsvd2; /* Padding */ - __le16 interrupt_ctl; - __le16 msi_index; - __le16 rsvd3; /* Padding */ - struct qlcnic_hostrq_cds_ring cds_ring; /* Desc of cds ring */ - u8 reserved[128]; /* future expansion */ -} __packed; - -struct qlcnic_cardrsp_cds_ring { - __le32 host_producer_crb; /* Crb to use */ - __le32 interrupt_crb; /* Crb to use */ -} __packed; - -struct qlcnic_cardrsp_tx_ctx { - __le32 host_ctx_state; /* Starting state */ - __le16 context_id; /* Handle for context */ - u8 phys_port; /* Physical id of port */ - u8 virt_port; /* Virtual/Logical id of port */ - struct qlcnic_cardrsp_cds_ring cds_ring; /* Card cds settings */ - u8 reserved[128]; /* future expansion */ -} __packed; - -#define SIZEOF_HOSTRQ_TX(HOSTRQ_TX) (sizeof(HOSTRQ_TX)) -#define SIZEOF_CARDRSP_TX(CARDRSP_TX) (sizeof(CARDRSP_TX)) - -/* CRB */ - -#define QLCNIC_HOST_RDS_CRB_MODE_UNIQUE 0 -#define QLCNIC_HOST_RDS_CRB_MODE_SHARED 1 -#define QLCNIC_HOST_RDS_CRB_MODE_CUSTOM 2 -#define QLCNIC_HOST_RDS_CRB_MODE_MAX 3 - -#define QLCNIC_HOST_INT_CRB_MODE_UNIQUE 0 -#define QLCNIC_HOST_INT_CRB_MODE_SHARED 1 -#define QLCNIC_HOST_INT_CRB_MODE_NORX 2 -#define QLCNIC_HOST_INT_CRB_MODE_NOTX 3 -#define QLCNIC_HOST_INT_CRB_MODE_NORXTX 4 - - -/* MAC */ - -#define MC_COUNT_P3P 38 - -#define QLCNIC_MAC_NOOP 0 -#define QLCNIC_MAC_ADD 1 -#define QLCNIC_MAC_DEL 2 -#define QLCNIC_MAC_VLAN_ADD 3 -#define QLCNIC_MAC_VLAN_DEL 4 - -struct qlcnic_mac_list_s { - struct list_head list; - uint8_t mac_addr[ETH_ALEN+2]; -}; - -#define QLCNIC_HOST_REQUEST 0x13 -#define QLCNIC_REQUEST 0x14 - -#define QLCNIC_MAC_EVENT 0x1 - -#define QLCNIC_IP_UP 2 -#define QLCNIC_IP_DOWN 3 - -#define QLCNIC_ILB_MODE 0x1 -#define QLCNIC_ELB_MODE 0x2 - -#define QLCNIC_LINKEVENT 0x1 -#define QLCNIC_LB_RESPONSE 0x2 -#define QLCNIC_IS_LB_CONFIGURED(VAL) \ - (VAL == (QLCNIC_LINKEVENT | QLCNIC_LB_RESPONSE)) - -/* - * Driver --> Firmware - */ -#define QLCNIC_H2C_OPCODE_CONFIG_RSS 0x1 -#define QLCNIC_H2C_OPCODE_CONFIG_INTR_COALESCE 0x3 -#define QLCNIC_H2C_OPCODE_CONFIG_LED 0x4 -#define QLCNIC_H2C_OPCODE_LRO_REQUEST 0x7 -#define QLCNIC_H2C_OPCODE_SET_MAC_RECEIVE_MODE 0xc -#define QLCNIC_H2C_OPCODE_CONFIG_IPADDR 0x12 - -#define QLCNIC_H2C_OPCODE_GET_LINKEVENT 0x15 -#define QLCNIC_H2C_OPCODE_CONFIG_BRIDGING 0x17 -#define QLCNIC_H2C_OPCODE_CONFIG_HW_LRO 0x18 -#define QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK 0x13 - -/* - * Firmware --> Driver - */ - -#define QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK 0x8f -#define QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 141 - -#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */ -#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */ -#define VPORT_MISS_MODE_ACCEPT_MULTI 2 /* accept unmatched multicast */ - -#define QLCNIC_LRO_REQUEST_CLEANUP 4 - -/* Capabilites received */ -#define QLCNIC_FW_CAPABILITY_TSO BIT_1 -#define QLCNIC_FW_CAPABILITY_BDG BIT_8 -#define QLCNIC_FW_CAPABILITY_FVLANTX BIT_9 -#define QLCNIC_FW_CAPABILITY_HW_LRO BIT_10 -#define QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK BIT_27 - -/* module types */ -#define LINKEVENT_MODULE_NOT_PRESENT 1 -#define LINKEVENT_MODULE_OPTICAL_UNKNOWN 2 -#define LINKEVENT_MODULE_OPTICAL_SRLR 3 -#define LINKEVENT_MODULE_OPTICAL_LRM 4 -#define LINKEVENT_MODULE_OPTICAL_SFP_1G 5 -#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE 6 -#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN 7 -#define LINKEVENT_MODULE_TWINAX 8 - -#define LINKSPEED_10GBPS 10000 -#define LINKSPEED_1GBPS 1000 -#define LINKSPEED_100MBPS 100 -#define LINKSPEED_10MBPS 10 - -#define LINKSPEED_ENCODED_10MBPS 0 -#define LINKSPEED_ENCODED_100MBPS 1 -#define LINKSPEED_ENCODED_1GBPS 2 - -#define LINKEVENT_AUTONEG_DISABLED 0 -#define LINKEVENT_AUTONEG_ENABLED 1 - -#define LINKEVENT_HALF_DUPLEX 0 -#define LINKEVENT_FULL_DUPLEX 1 - -#define LINKEVENT_LINKSPEED_MBPS 0 -#define LINKEVENT_LINKSPEED_ENCODED 1 - -/* firmware response header: - * 63:58 - message type - * 57:56 - owner - * 55:53 - desc count - * 52:48 - reserved - * 47:40 - completion id - * 39:32 - opcode - * 31:16 - error code - * 15:00 - reserved - */ -#define qlcnic_get_nic_msg_opcode(msg_hdr) \ - ((msg_hdr >> 32) & 0xFF) - -struct qlcnic_fw_msg { - union { - struct { - u64 hdr; - u64 body[7]; - }; - u64 words[8]; - }; -}; - -struct qlcnic_nic_req { - __le64 qhdr; - __le64 req_hdr; - __le64 words[6]; -} __packed; - -struct qlcnic_mac_req { - u8 op; - u8 tag; - u8 mac_addr[6]; -}; - -struct qlcnic_vlan_req { - __le16 vlan_id; - __le16 rsvd[3]; -} __packed; - -struct qlcnic_ipaddr { - __be32 ipv4; - __be32 ipv6[4]; -}; - -#define QLCNIC_MSI_ENABLED 0x02 -#define QLCNIC_MSIX_ENABLED 0x04 -#define QLCNIC_LRO_ENABLED 0x08 -#define QLCNIC_LRO_DISABLED 0x00 -#define QLCNIC_BRIDGE_ENABLED 0X10 -#define QLCNIC_DIAG_ENABLED 0x20 -#define QLCNIC_ESWITCH_ENABLED 0x40 -#define QLCNIC_ADAPTER_INITIALIZED 0x80 -#define QLCNIC_TAGGING_ENABLED 0x100 -#define QLCNIC_MACSPOOF 0x200 -#define QLCNIC_MAC_OVERRIDE_DISABLED 0x400 -#define QLCNIC_PROMISC_DISABLED 0x800 -#define QLCNIC_NEED_FLR 0x1000 -#define QLCNIC_FW_RESET_OWNER 0x2000 -#define QLCNIC_FW_HANG 0x4000 -#define QLCNIC_IS_MSI_FAMILY(adapter) \ - ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED)) - -#define QLCNIC_DEF_NUM_STS_DESC_RINGS 4 -#define QLCNIC_MSIX_TBL_SPACE 8192 -#define QLCNIC_PCI_REG_MSIX_TBL 0x44 -#define QLCNIC_MSIX_TBL_PGSIZE 4096 - -#define QLCNIC_NETDEV_WEIGHT 128 -#define QLCNIC_ADAPTER_UP_MAGIC 777 - -#define __QLCNIC_FW_ATTACHED 0 -#define __QLCNIC_DEV_UP 1 -#define __QLCNIC_RESETTING 2 -#define __QLCNIC_START_FW 4 -#define __QLCNIC_AER 5 -#define __QLCNIC_DIAG_RES_ALLOC 6 - -#define QLCNIC_INTERRUPT_TEST 1 -#define QLCNIC_LOOPBACK_TEST 2 -#define QLCNIC_LED_TEST 3 - -#define QLCNIC_FILTER_AGE 80 -#define QLCNIC_READD_AGE 20 -#define QLCNIC_LB_MAX_FILTERS 64 - -/* QLCNIC Driver Error Code */ -#define QLCNIC_FW_NOT_RESPOND 51 -#define QLCNIC_TEST_IN_PROGRESS 52 -#define QLCNIC_UNDEFINED_ERROR 53 -#define QLCNIC_LB_CABLE_NOT_CONN 54 - -struct qlcnic_filter { - struct hlist_node fnode; - u8 faddr[ETH_ALEN]; - __le16 vlan_id; - unsigned long ftime; -}; - -struct qlcnic_filter_hash { - struct hlist_head *fhead; - u8 fnum; - u8 fmax; -}; - -struct qlcnic_adapter { - struct qlcnic_hardware_context *ahw; - struct qlcnic_recv_context *recv_ctx; - struct qlcnic_host_tx_ring *tx_ring; - struct net_device *netdev; - struct pci_dev *pdev; - - unsigned long state; - u32 flags; - - u16 num_txd; - u16 num_rxd; - u16 num_jumbo_rxd; - u16 max_rxd; - u16 max_jumbo_rxd; - - u8 max_rds_rings; - u8 max_sds_rings; - u8 msix_supported; - u8 portnum; - u8 physical_port; - u8 reset_context; - - u8 mc_enabled; - u8 max_mc_count; - u8 fw_wait_cnt; - u8 fw_fail_cnt; - u8 tx_timeo_cnt; - u8 need_fw_reset; - - u8 has_link_events; - u8 fw_type; - u16 tx_context_id; - u16 is_up; - - u16 link_speed; - u16 link_duplex; - u16 link_autoneg; - u16 module_type; - - u16 op_mode; - u16 switch_mode; - u16 max_tx_ques; - u16 max_rx_ques; - u16 max_mtu; - u16 pvid; - - u32 fw_hal_version; - u32 capabilities; - u32 irq; - u32 temp; - - u32 int_vec_bit; - u32 heartbeat; - - u8 max_mac_filters; - u8 dev_state; - u8 diag_test; - char diag_cnt; - u8 reset_ack_timeo; - u8 dev_init_timeo; - u16 msg_enable; - - u8 mac_addr[ETH_ALEN]; - - u64 dev_rst_time; - u8 mac_learn; - unsigned long vlans[BITS_TO_LONGS(VLAN_N_VID)]; - - struct qlcnic_npar_info *npars; - struct qlcnic_eswitch *eswitch; - struct qlcnic_nic_template *nic_ops; - - struct qlcnic_adapter_stats stats; - struct list_head mac_list; - - void __iomem *tgt_mask_reg; - void __iomem *tgt_status_reg; - void __iomem *crb_int_state_reg; - void __iomem *isr_int_vec; - - struct msix_entry *msix_entries; - - struct delayed_work fw_work; - - - struct qlcnic_filter_hash fhash; - - spinlock_t tx_clean_lock; - spinlock_t mac_learn_lock; - __le32 file_prd_off; /*File fw product offset*/ - u32 fw_version; - const struct firmware *fw; -}; - -struct qlcnic_info { - __le16 pci_func; - __le16 op_mode; /* 1 = Priv, 2 = NP, 3 = NP passthru */ - __le16 phys_port; - __le16 switch_mode; /* 0 = disabled, 1 = int, 2 = ext */ - - __le32 capabilities; - u8 max_mac_filters; - u8 reserved1; - __le16 max_mtu; - - __le16 max_tx_ques; - __le16 max_rx_ques; - __le16 min_tx_bw; - __le16 max_tx_bw; - u8 reserved2[104]; -} __packed; - -struct qlcnic_pci_info { - __le16 id; /* pci function id */ - __le16 active; /* 1 = Enabled */ - __le16 type; /* 1 = NIC, 2 = FCoE, 3 = iSCSI */ - __le16 default_port; /* default port number */ - - __le16 tx_min_bw; /* Multiple of 100mbpc */ - __le16 tx_max_bw; - __le16 reserved1[2]; - - u8 mac[ETH_ALEN]; - u8 reserved2[106]; -} __packed; - -struct qlcnic_npar_info { - u16 pvid; - u16 min_bw; - u16 max_bw; - u8 phy_port; - u8 type; - u8 active; - u8 enable_pm; - u8 dest_npar; - u8 discard_tagged; - u8 mac_override; - u8 mac_anti_spoof; - u8 promisc_mode; - u8 offload_flags; -}; - -struct qlcnic_eswitch { - u8 port; - u8 active_vports; - u8 active_vlans; - u8 active_ucast_filters; - u8 max_ucast_filters; - u8 max_active_vlans; - - u32 flags; -#define QLCNIC_SWITCH_ENABLE BIT_1 -#define QLCNIC_SWITCH_VLAN_FILTERING BIT_2 -#define QLCNIC_SWITCH_PROMISC_MODE BIT_3 -#define QLCNIC_SWITCH_PORT_MIRRORING BIT_4 -}; - - -/* Return codes for Error handling */ -#define QL_STATUS_INVALID_PARAM -1 - -#define MAX_BW 100 /* % of link speed */ -#define MAX_VLAN_ID 4095 -#define MIN_VLAN_ID 2 -#define DEFAULT_MAC_LEARN 1 - -#define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan < MAX_VLAN_ID) -#define IS_VALID_BW(bw) (bw <= MAX_BW) - -struct qlcnic_pci_func_cfg { - u16 func_type; - u16 min_bw; - u16 max_bw; - u16 port_num; - u8 pci_func; - u8 func_state; - u8 def_mac_addr[6]; -}; - -struct qlcnic_npar_func_cfg { - u32 fw_capab; - u16 port_num; - u16 min_bw; - u16 max_bw; - u16 max_tx_queues; - u16 max_rx_queues; - u8 pci_func; - u8 op_mode; -}; - -struct qlcnic_pm_func_cfg { - u8 pci_func; - u8 action; - u8 dest_npar; - u8 reserved[5]; -}; - -struct qlcnic_esw_func_cfg { - u16 vlan_id; - u8 op_mode; - u8 op_type; - u8 pci_func; - u8 host_vlan_tag; - u8 promisc_mode; - u8 discard_tagged; - u8 mac_override; - u8 mac_anti_spoof; - u8 offload_flags; - u8 reserved[5]; -}; - -#define QLCNIC_STATS_VERSION 1 -#define QLCNIC_STATS_PORT 1 -#define QLCNIC_STATS_ESWITCH 2 -#define QLCNIC_QUERY_RX_COUNTER 0 -#define QLCNIC_QUERY_TX_COUNTER 1 -#define QLCNIC_ESW_STATS_NOT_AVAIL 0xffffffffffffffffULL - -#define QLCNIC_ADD_ESW_STATS(VAL1, VAL2)\ -do { \ - if (((VAL1) == QLCNIC_ESW_STATS_NOT_AVAIL) && \ - ((VAL2) != QLCNIC_ESW_STATS_NOT_AVAIL)) \ - (VAL1) = (VAL2); \ - else if (((VAL1) != QLCNIC_ESW_STATS_NOT_AVAIL) && \ - ((VAL2) != QLCNIC_ESW_STATS_NOT_AVAIL)) \ - (VAL1) += (VAL2); \ -} while (0) - -struct __qlcnic_esw_statistics { - __le16 context_id; - __le16 version; - __le16 size; - __le16 unused; - __le64 unicast_frames; - __le64 multicast_frames; - __le64 broadcast_frames; - __le64 dropped_frames; - __le64 errors; - __le64 local_frames; - __le64 numbytes; - __le64 rsvd[3]; -} __packed; - -struct qlcnic_esw_statistics { - struct __qlcnic_esw_statistics rx; - struct __qlcnic_esw_statistics tx; -}; - -struct qlcnic_common_entry_hdr { - __le32 type; - __le32 offset; - __le32 cap_size; - u8 mask; - u8 rsvd[2]; - u8 flags; -} __packed; - -struct __crb { - __le32 addr; - u8 stride; - u8 rsvd1[3]; - __le32 data_size; - __le32 no_ops; - __le32 rsvd2[4]; -} __packed; - -struct __ctrl { - __le32 addr; - u8 stride; - u8 index_a; - __le16 timeout; - __le32 data_size; - __le32 no_ops; - u8 opcode; - u8 index_v; - u8 shl_val; - u8 shr_val; - __le32 val1; - __le32 val2; - __le32 val3; -} __packed; - -struct __cache { - __le32 addr; - __le16 stride; - __le16 init_tag_val; - __le32 size; - __le32 no_ops; - __le32 ctrl_addr; - __le32 ctrl_val; - __le32 read_addr; - u8 read_addr_stride; - u8 read_addr_num; - u8 rsvd1[2]; -} __packed; - -struct __ocm { - u8 rsvd[8]; - __le32 size; - __le32 no_ops; - u8 rsvd1[8]; - __le32 read_addr; - __le32 read_addr_stride; -} __packed; - -struct __mem { - u8 rsvd[24]; - __le32 addr; - __le32 size; -} __packed; - -struct __mux { - __le32 addr; - u8 rsvd[4]; - __le32 size; - __le32 no_ops; - __le32 val; - __le32 val_stride; - __le32 read_addr; - u8 rsvd2[4]; -} __packed; - -struct __queue { - __le32 sel_addr; - __le16 stride; - u8 rsvd[2]; - __le32 size; - __le32 no_ops; - u8 rsvd2[8]; - __le32 read_addr; - u8 read_addr_stride; - u8 read_addr_cnt; - u8 rsvd3[2]; -} __packed; - -struct qlcnic_dump_entry { - struct qlcnic_common_entry_hdr hdr; - union { - struct __crb crb; - struct __cache cache; - struct __ocm ocm; - struct __mem mem; - struct __mux mux; - struct __queue que; - struct __ctrl ctrl; - } region; -} __packed; - -enum op_codes { - QLCNIC_DUMP_NOP = 0, - QLCNIC_DUMP_READ_CRB = 1, - QLCNIC_DUMP_READ_MUX = 2, - QLCNIC_DUMP_QUEUE = 3, - QLCNIC_DUMP_BRD_CONFIG = 4, - QLCNIC_DUMP_READ_OCM = 6, - QLCNIC_DUMP_PEG_REG = 7, - QLCNIC_DUMP_L1_DTAG = 8, - QLCNIC_DUMP_L1_ITAG = 9, - QLCNIC_DUMP_L1_DATA = 11, - QLCNIC_DUMP_L1_INST = 12, - QLCNIC_DUMP_L2_DTAG = 21, - QLCNIC_DUMP_L2_ITAG = 22, - QLCNIC_DUMP_L2_DATA = 23, - QLCNIC_DUMP_L2_INST = 24, - QLCNIC_DUMP_READ_ROM = 71, - QLCNIC_DUMP_READ_MEM = 72, - QLCNIC_DUMP_READ_CTRL = 98, - QLCNIC_DUMP_TLHDR = 99, - QLCNIC_DUMP_RDEND = 255 -}; - -#define QLCNIC_DUMP_WCRB BIT_0 -#define QLCNIC_DUMP_RWCRB BIT_1 -#define QLCNIC_DUMP_ANDCRB BIT_2 -#define QLCNIC_DUMP_ORCRB BIT_3 -#define QLCNIC_DUMP_POLLCRB BIT_4 -#define QLCNIC_DUMP_RD_SAVE BIT_5 -#define QLCNIC_DUMP_WRT_SAVED BIT_6 -#define QLCNIC_DUMP_MOD_SAVE_ST BIT_7 -#define QLCNIC_DUMP_SKIP BIT_7 - -#define QLCNIC_DUMP_MASK_MIN 3 -#define QLCNIC_DUMP_MASK_DEF 0x1f -#define QLCNIC_DUMP_MASK_MAX 0xff -#define QLCNIC_FORCE_FW_DUMP_KEY 0xdeadfeed -#define QLCNIC_ENABLE_FW_DUMP 0xaddfeed -#define QLCNIC_DISABLE_FW_DUMP 0xbadfeed -#define QLCNIC_FORCE_FW_RESET 0xdeaddead - -struct qlcnic_dump_operations { - enum op_codes opcode; - u32 (*handler)(struct qlcnic_adapter *, - struct qlcnic_dump_entry *, u32 *); -}; - -int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter); -int qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config); - -u32 qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off); -int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *, ulong off, u32 data); -int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *, u64 off, u64 data); -int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *, u64 off, u64 *data); -void qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *, u64, u64 *); -void qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *, u64, u64); - -#define ADDR_IN_RANGE(addr, low, high) \ - (((addr) < (high)) && ((addr) >= (low))) - -#define QLCRD32(adapter, off) \ - (qlcnic_hw_read_wx_2M(adapter, off)) -#define QLCWR32(adapter, off, val) \ - (qlcnic_hw_write_wx_2M(adapter, off, val)) - -int qlcnic_pcie_sem_lock(struct qlcnic_adapter *, int, u32); -void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *, int); - -#define qlcnic_rom_lock(a) \ - qlcnic_pcie_sem_lock((a), 2, QLCNIC_ROM_LOCK_ID) -#define qlcnic_rom_unlock(a) \ - qlcnic_pcie_sem_unlock((a), 2) -#define qlcnic_phy_lock(a) \ - qlcnic_pcie_sem_lock((a), 3, QLCNIC_PHY_LOCK_ID) -#define qlcnic_phy_unlock(a) \ - qlcnic_pcie_sem_unlock((a), 3) -#define qlcnic_api_lock(a) \ - qlcnic_pcie_sem_lock((a), 5, 0) -#define qlcnic_api_unlock(a) \ - qlcnic_pcie_sem_unlock((a), 5) -#define qlcnic_sw_lock(a) \ - qlcnic_pcie_sem_lock((a), 6, 0) -#define qlcnic_sw_unlock(a) \ - qlcnic_pcie_sem_unlock((a), 6) -#define crb_win_lock(a) \ - qlcnic_pcie_sem_lock((a), 7, QLCNIC_CRB_WIN_LOCK_ID) -#define crb_win_unlock(a) \ - qlcnic_pcie_sem_unlock((a), 7) - -int qlcnic_get_board_info(struct qlcnic_adapter *adapter); -int qlcnic_wol_supported(struct qlcnic_adapter *adapter); -int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate); -void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter); -void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter); -int qlcnic_dump_fw(struct qlcnic_adapter *); - -/* Functions from qlcnic_init.c */ -int qlcnic_load_firmware(struct qlcnic_adapter *adapter); -int qlcnic_need_fw_reset(struct qlcnic_adapter *adapter); -void qlcnic_request_firmware(struct qlcnic_adapter *adapter); -void qlcnic_release_firmware(struct qlcnic_adapter *adapter); -int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter); -int qlcnic_setup_idc_param(struct qlcnic_adapter *adapter); -int qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter); - -int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, u32 addr, u32 *valp); -int qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr, - u8 *bytes, size_t size); -int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter); -void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter); - -void __iomem *qlcnic_get_ioaddr(struct qlcnic_adapter *, u32); - -int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter); -void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter); - -int qlcnic_fw_create_ctx(struct qlcnic_adapter *adapter); -void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter); - -void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter); -void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter); -void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter); - -int qlcnic_check_fw_status(struct qlcnic_adapter *adapter); -void qlcnic_watchdog_task(struct work_struct *work); -void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, - struct qlcnic_host_rds_ring *rds_ring); -int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max); -void qlcnic_set_multi(struct net_device *netdev); -void qlcnic_free_mac_list(struct qlcnic_adapter *adapter); -int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32); -int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter); -int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable); -int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, int cmd); -int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable); -void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup); - -int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu); -int qlcnic_change_mtu(struct net_device *netdev, int new_mtu); -u32 qlcnic_fix_features(struct net_device *netdev, u32 features); -int qlcnic_set_features(struct net_device *netdev, u32 features); -int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable); -int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable); -int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter); -void qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter, - struct qlcnic_host_tx_ring *tx_ring); -void qlcnic_fetch_mac(struct qlcnic_adapter *, u32, u32, u8, u8 *); -void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring); -void qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter); -int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode); - -/* Functions from qlcnic_ethtool.c */ -int qlcnic_check_loopback_buff(unsigned char *data, u8 mac[]); - -/* Functions from qlcnic_main.c */ -int qlcnic_reset_context(struct qlcnic_adapter *); -u32 qlcnic_issue_cmd(struct qlcnic_adapter *adapter, - u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd); -void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings); -int qlcnic_diag_alloc_res(struct net_device *netdev, int test); -netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev); -int qlcnic_validate_max_rss(struct net_device *netdev, u8 max_hw, u8 val); -int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data); -void qlcnic_dev_request_reset(struct qlcnic_adapter *); -void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter); - -/* Management functions */ -int qlcnic_get_mac_address(struct qlcnic_adapter *, u8*); -int qlcnic_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8); -int qlcnic_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *); -int qlcnic_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info*); - -/* eSwitch management functions */ -int qlcnic_config_switch_port(struct qlcnic_adapter *, - struct qlcnic_esw_func_cfg *); -int qlcnic_get_eswitch_port_config(struct qlcnic_adapter *, - struct qlcnic_esw_func_cfg *); -int qlcnic_config_port_mirroring(struct qlcnic_adapter *, u8, u8, u8); -int qlcnic_get_port_stats(struct qlcnic_adapter *, const u8, const u8, - struct __qlcnic_esw_statistics *); -int qlcnic_get_eswitch_stats(struct qlcnic_adapter *, const u8, u8, - struct __qlcnic_esw_statistics *); -int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, u8, u8, u8); -extern int qlcnic_config_tso; - -/* - * QLOGIC Board information - */ - -#define QLCNIC_MAX_BOARD_NAME_LEN 100 -struct qlcnic_brdinfo { - unsigned short vendor; - unsigned short device; - unsigned short sub_vendor; - unsigned short sub_device; - char short_name[QLCNIC_MAX_BOARD_NAME_LEN]; -}; - -static const struct qlcnic_brdinfo qlcnic_boards[] = { - {0x1077, 0x8020, 0x1077, 0x203, - "8200 Series Single Port 10GbE Converged Network Adapter " - "(TCP/IP Networking)"}, - {0x1077, 0x8020, 0x1077, 0x207, - "8200 Series Dual Port 10GbE Converged Network Adapter " - "(TCP/IP Networking)"}, - {0x1077, 0x8020, 0x1077, 0x20b, - "3200 Series Dual Port 10Gb Intelligent Ethernet Adapter"}, - {0x1077, 0x8020, 0x1077, 0x20c, - "3200 Series Quad Port 1Gb Intelligent Ethernet Adapter"}, - {0x1077, 0x8020, 0x1077, 0x20f, - "3200 Series Single Port 10Gb Intelligent Ethernet Adapter"}, - {0x1077, 0x8020, 0x103c, 0x3733, - "NC523SFP 10Gb 2-port Server Adapter"}, - {0x1077, 0x8020, 0x103c, 0x3346, - "CN1000Q Dual Port Converged Network Adapter"}, - {0x1077, 0x8020, 0x1077, 0x210, - "QME8242-k 10GbE Dual Port Mezzanine Card"}, - {0x1077, 0x8020, 0x0, 0x0, "cLOM8214 1/10GbE Controller"}, -}; - -#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(qlcnic_boards) - -static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring) -{ - if (likely(tx_ring->producer < tx_ring->sw_consumer)) - return tx_ring->sw_consumer - tx_ring->producer; - else - return tx_ring->sw_consumer + tx_ring->num_desc - - tx_ring->producer; -} - -extern const struct ethtool_ops qlcnic_ethtool_ops; - -struct qlcnic_nic_template { - int (*config_bridged_mode) (struct qlcnic_adapter *, u32); - int (*config_led) (struct qlcnic_adapter *, u32, u32); - int (*start_firmware) (struct qlcnic_adapter *); -}; - -#define QLCDB(adapter, lvl, _fmt, _args...) do { \ - if (NETIF_MSG_##lvl & adapter->msg_enable) \ - printk(KERN_INFO "%s: %s: " _fmt, \ - dev_name(&adapter->pdev->dev), \ - __func__, ##_args); \ - } while (0) - -#endif /* __QLCNIC_H_ */ diff --git a/drivers/net/qlcnic/qlcnic_ctx.c b/drivers/net/qlcnic/qlcnic_ctx.c deleted file mode 100644 index b0d32ddd2ccb..000000000000 --- a/drivers/net/qlcnic/qlcnic_ctx.c +++ /dev/null @@ -1,1117 +0,0 @@ -/* - * QLogic qlcnic NIC Driver - * Copyright (c) 2009-2010 QLogic Corporation - * - * See LICENSE.qlcnic for copyright and licensing details. - */ - -#include "qlcnic.h" - -static u32 -qlcnic_poll_rsp(struct qlcnic_adapter *adapter) -{ - u32 rsp; - int timeout = 0; - - do { - /* give atleast 1ms for firmware to respond */ - msleep(1); - - if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT) - return QLCNIC_CDRP_RSP_TIMEOUT; - - rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET); - } while (!QLCNIC_CDRP_IS_RSP(rsp)); - - return rsp; -} - -u32 -qlcnic_issue_cmd(struct qlcnic_adapter *adapter, - u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd) -{ - u32 rsp; - u32 signature; - u32 rcode = QLCNIC_RCODE_SUCCESS; - struct pci_dev *pdev = adapter->pdev; - - signature = QLCNIC_CDRP_SIGNATURE_MAKE(pci_fn, version); - - /* Acquire semaphore before accessing CRB */ - if (qlcnic_api_lock(adapter)) - return QLCNIC_RCODE_TIMEOUT; - - QLCWR32(adapter, QLCNIC_SIGN_CRB_OFFSET, signature); - QLCWR32(adapter, QLCNIC_ARG1_CRB_OFFSET, arg1); - QLCWR32(adapter, QLCNIC_ARG2_CRB_OFFSET, arg2); - QLCWR32(adapter, QLCNIC_ARG3_CRB_OFFSET, arg3); - QLCWR32(adapter, QLCNIC_CDRP_CRB_OFFSET, QLCNIC_CDRP_FORM_CMD(cmd)); - - rsp = qlcnic_poll_rsp(adapter); - - if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) { - dev_err(&pdev->dev, "card response timeout.\n"); - rcode = QLCNIC_RCODE_TIMEOUT; - } else if (rsp == QLCNIC_CDRP_RSP_FAIL) { - rcode = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET); - dev_err(&pdev->dev, "failed card response code:0x%x\n", - rcode); - } - - /* Release semaphore */ - qlcnic_api_unlock(adapter); - - return rcode; -} - -static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer, u16 temp_size) -{ - uint64_t sum = 0; - int count = temp_size / sizeof(uint32_t); - while (count-- > 0) - sum += *temp_buffer++; - while (sum >> 32) - sum = (sum & 0xFFFFFFFF) + (sum >> 32); - return ~sum; -} - -int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter) -{ - int err, i; - u16 temp_size; - void *tmp_addr; - u32 version, csum, *template, *tmp_buf; - struct qlcnic_hardware_context *ahw; - struct qlcnic_dump_template_hdr *tmpl_hdr, *tmp_tmpl; - dma_addr_t tmp_addr_t = 0; - - ahw = adapter->ahw; - err = qlcnic_issue_cmd(adapter, - adapter->ahw->pci_func, - adapter->fw_hal_version, - 0, - 0, - 0, - QLCNIC_CDRP_CMD_TEMP_SIZE); - if (err != QLCNIC_RCODE_SUCCESS) { - err = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET); - dev_info(&adapter->pdev->dev, - "Can't get template size %d\n", err); - err = -EIO; - return err; - } - version = QLCRD32(adapter, QLCNIC_ARG3_CRB_OFFSET); - temp_size = QLCRD32(adapter, QLCNIC_ARG2_CRB_OFFSET); - if (!temp_size) - return -EIO; - - tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size, - &tmp_addr_t, GFP_KERNEL); - if (!tmp_addr) { - dev_err(&adapter->pdev->dev, - "Can't get memory for FW dump template\n"); - return -ENOMEM; - } - err = qlcnic_issue_cmd(adapter, - adapter->ahw->pci_func, - adapter->fw_hal_version, - LSD(tmp_addr_t), - MSD(tmp_addr_t), - temp_size, - QLCNIC_CDRP_CMD_GET_TEMP_HDR); - - if (err != QLCNIC_RCODE_SUCCESS) { - dev_err(&adapter->pdev->dev, - "Failed to get mini dump template header %d\n", err); - err = -EIO; - goto error; - } - tmp_tmpl = tmp_addr; - csum = qlcnic_temp_checksum((uint32_t *) tmp_addr, temp_size); - if (csum) { - dev_err(&adapter->pdev->dev, - "Template header checksum validation failed\n"); - err = -EIO; - goto error; - } - ahw->fw_dump.tmpl_hdr = vzalloc(temp_size); - if (!ahw->fw_dump.tmpl_hdr) { - err = -EIO; - goto error; - } - tmp_buf = tmp_addr; - template = (u32 *) ahw->fw_dump.tmpl_hdr; - for (i = 0; i < temp_size/sizeof(u32); i++) - *template++ = __le32_to_cpu(*tmp_buf++); - - tmpl_hdr = ahw->fw_dump.tmpl_hdr; - tmpl_hdr->drv_cap_mask = QLCNIC_DUMP_MASK_DEF; - ahw->fw_dump.enable = 1; -error: - dma_free_coherent(&adapter->pdev->dev, temp_size, tmp_addr, tmp_addr_t); - return err; -} - -int -qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu) -{ - struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; - - if (recv_ctx->state == QLCNIC_HOST_CTX_STATE_ACTIVE) { - if (qlcnic_issue_cmd(adapter, - adapter->ahw->pci_func, - adapter->fw_hal_version, - recv_ctx->context_id, - mtu, - 0, - QLCNIC_CDRP_CMD_SET_MTU)) { - - dev_err(&adapter->pdev->dev, "Failed to set mtu\n"); - return -EIO; - } - } - - return 0; -} - -static int -qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter) -{ - void *addr; - struct qlcnic_hostrq_rx_ctx *prq; - struct qlcnic_cardrsp_rx_ctx *prsp; - struct qlcnic_hostrq_rds_ring *prq_rds; - struct qlcnic_hostrq_sds_ring *prq_sds; - struct qlcnic_cardrsp_rds_ring *prsp_rds; - struct qlcnic_cardrsp_sds_ring *prsp_sds; - struct qlcnic_host_rds_ring *rds_ring; - struct qlcnic_host_sds_ring *sds_ring; - - dma_addr_t hostrq_phys_addr, cardrsp_phys_addr; - u64 phys_addr; - - u8 i, nrds_rings, nsds_rings; - size_t rq_size, rsp_size; - u32 cap, reg, val, reg2; - int err; - - struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; - - nrds_rings = adapter->max_rds_rings; - nsds_rings = adapter->max_sds_rings; - - rq_size = - SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings, - nsds_rings); - rsp_size = - SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings, - nsds_rings); - - addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size, - &hostrq_phys_addr, GFP_KERNEL); - if (addr == NULL) - return -ENOMEM; - prq = addr; - - addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size, - &cardrsp_phys_addr, GFP_KERNEL); - if (addr == NULL) { - err = -ENOMEM; - goto out_free_rq; - } - prsp = addr; - - prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr); - - cap = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN - | QLCNIC_CAP0_VALIDOFF); - cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS); - - prq->valid_field_offset = offsetof(struct qlcnic_hostrq_rx_ctx, - msix_handler); - prq->txrx_sds_binding = nsds_rings - 1; - - prq->capabilities[0] = cpu_to_le32(cap); - prq->host_int_crb_mode = - cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED); - prq->host_rds_crb_mode = - cpu_to_le32(QLCNIC_HOST_RDS_CRB_MODE_UNIQUE); - - prq->num_rds_rings = cpu_to_le16(nrds_rings); - prq->num_sds_rings = cpu_to_le16(nsds_rings); - prq->rds_ring_offset = 0; - - val = le32_to_cpu(prq->rds_ring_offset) + - (sizeof(struct qlcnic_hostrq_rds_ring) * nrds_rings); - prq->sds_ring_offset = cpu_to_le32(val); - - prq_rds = (struct qlcnic_hostrq_rds_ring *)(prq->data + - le32_to_cpu(prq->rds_ring_offset)); - - for (i = 0; i < nrds_rings; i++) { - - rds_ring = &recv_ctx->rds_rings[i]; - rds_ring->producer = 0; - - prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr); - prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc); - prq_rds[i].ring_kind = cpu_to_le32(i); - prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size); - } - - prq_sds = (struct qlcnic_hostrq_sds_ring *)(prq->data + - le32_to_cpu(prq->sds_ring_offset)); - - for (i = 0; i < nsds_rings; i++) { - - sds_ring = &recv_ctx->sds_rings[i]; - sds_ring->consumer = 0; - memset(sds_ring->desc_head, 0, STATUS_DESC_RINGSIZE(sds_ring)); - - prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr); - prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc); - prq_sds[i].msi_index = cpu_to_le16(i); - } - - phys_addr = hostrq_phys_addr; - err = qlcnic_issue_cmd(adapter, - adapter->ahw->pci_func, - adapter->fw_hal_version, - (u32)(phys_addr >> 32), - (u32)(phys_addr & 0xffffffff), - rq_size, - QLCNIC_CDRP_CMD_CREATE_RX_CTX); - if (err) { - dev_err(&adapter->pdev->dev, - "Failed to create rx ctx in firmware%d\n", err); - goto out_free_rsp; - } - - - prsp_rds = ((struct qlcnic_cardrsp_rds_ring *) - &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]); - - for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) { - rds_ring = &recv_ctx->rds_rings[i]; - - reg = le32_to_cpu(prsp_rds[i].host_producer_crb); - rds_ring->crb_rcv_producer = adapter->ahw->pci_base0 + reg; - } - - prsp_sds = ((struct qlcnic_cardrsp_sds_ring *) - &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]); - - for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) { - sds_ring = &recv_ctx->sds_rings[i]; - - reg = le32_to_cpu(prsp_sds[i].host_consumer_crb); - reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb); - - sds_ring->crb_sts_consumer = adapter->ahw->pci_base0 + reg; - sds_ring->crb_intr_mask = adapter->ahw->pci_base0 + reg2; - } - - recv_ctx->state = le32_to_cpu(prsp->host_ctx_state); - recv_ctx->context_id = le16_to_cpu(prsp->context_id); - recv_ctx->virt_port = prsp->virt_port; - -out_free_rsp: - dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp, - cardrsp_phys_addr); -out_free_rq: - dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr); - return err; -} - -static void -qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter) -{ - struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; - - if (qlcnic_issue_cmd(adapter, - adapter->ahw->pci_func, - adapter->fw_hal_version, - recv_ctx->context_id, - QLCNIC_DESTROY_CTX_RESET, - 0, - QLCNIC_CDRP_CMD_DESTROY_RX_CTX)) { - - dev_err(&adapter->pdev->dev, - "Failed to destroy rx ctx in firmware\n"); - } - - recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED; -} - -static int -qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter) -{ - struct qlcnic_hostrq_tx_ctx *prq; - struct qlcnic_hostrq_cds_ring *prq_cds; - struct qlcnic_cardrsp_tx_ctx *prsp; - void *rq_addr, *rsp_addr; - size_t rq_size, rsp_size; - u32 temp; - int err; - u64 phys_addr; - dma_addr_t rq_phys_addr, rsp_phys_addr; - struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; - - /* reset host resources */ - tx_ring->producer = 0; - tx_ring->sw_consumer = 0; - *(tx_ring->hw_consumer) = 0; - - rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx); - rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size, - &rq_phys_addr, GFP_KERNEL); - if (!rq_addr) - return -ENOMEM; - - rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx); - rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size, - &rsp_phys_addr, GFP_KERNEL); - if (!rsp_addr) { - err = -ENOMEM; - goto out_free_rq; - } - - memset(rq_addr, 0, rq_size); - prq = rq_addr; - - memset(rsp_addr, 0, rsp_size); - prsp = rsp_addr; - - prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr); - - temp = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN | - QLCNIC_CAP0_LSO); - prq->capabilities[0] = cpu_to_le32(temp); - - prq->host_int_crb_mode = - cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED); - - prq->interrupt_ctl = 0; - prq->msi_index = 0; - prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr); - - prq_cds = &prq->cds_ring; - - prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr); - prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc); - - phys_addr = rq_phys_addr; - err = qlcnic_issue_cmd(adapter, - adapter->ahw->pci_func, - adapter->fw_hal_version, - (u32)(phys_addr >> 32), - ((u32)phys_addr & 0xffffffff), - rq_size, - QLCNIC_CDRP_CMD_CREATE_TX_CTX); - - if (err == QLCNIC_RCODE_SUCCESS) { - temp = le32_to_cpu(prsp->cds_ring.host_producer_crb); - tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp; - - adapter->tx_context_id = - le16_to_cpu(prsp->context_id); - } else { - dev_err(&adapter->pdev->dev, - "Failed to create tx ctx in firmware%d\n", err); - err = -EIO; - } - - dma_free_coherent(&adapter->pdev->dev, rsp_size, rsp_addr, - rsp_phys_addr); - -out_free_rq: - dma_free_coherent(&adapter->pdev->dev, rq_size, rq_addr, rq_phys_addr); - - return err; -} - -static void -qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter) -{ - if (qlcnic_issue_cmd(adapter, - adapter->ahw->pci_func, - adapter->fw_hal_version, - adapter->tx_context_id, - QLCNIC_DESTROY_CTX_RESET, - 0, - QLCNIC_CDRP_CMD_DESTROY_TX_CTX)) { - - dev_err(&adapter->pdev->dev, - "Failed to destroy tx ctx in firmware\n"); - } -} - -int -qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config) -{ - return qlcnic_issue_cmd(adapter, - adapter->ahw->pci_func, - adapter->fw_hal_version, - config, - 0, - 0, - QLCNIC_CDRP_CMD_CONFIG_PORT); -} - -int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter) -{ - void *addr; - int err; - int ring; - struct qlcnic_recv_context *recv_ctx; - struct qlcnic_host_rds_ring *rds_ring; - struct qlcnic_host_sds_ring *sds_ring; - struct qlcnic_host_tx_ring *tx_ring; - - struct pci_dev *pdev = adapter->pdev; - - recv_ctx = adapter->recv_ctx; - tx_ring = adapter->tx_ring; - - tx_ring->hw_consumer = (__le32 *) dma_alloc_coherent(&pdev->dev, - sizeof(u32), &tx_ring->hw_cons_phys_addr, GFP_KERNEL); - if (tx_ring->hw_consumer == NULL) { - dev_err(&pdev->dev, "failed to allocate tx consumer\n"); - return -ENOMEM; - } - - /* cmd desc ring */ - addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring), - &tx_ring->phys_addr, GFP_KERNEL); - - if (addr == NULL) { - dev_err(&pdev->dev, "failed to allocate tx desc ring\n"); - err = -ENOMEM; - goto err_out_free; - } - - tx_ring->desc_head = addr; - - for (ring = 0; ring < adapter->max_rds_rings; ring++) { - rds_ring = &recv_ctx->rds_rings[ring]; - addr = dma_alloc_coherent(&adapter->pdev->dev, - RCV_DESC_RINGSIZE(rds_ring), - &rds_ring->phys_addr, GFP_KERNEL); - if (addr == NULL) { - dev_err(&pdev->dev, - "failed to allocate rds ring [%d]\n", ring); - err = -ENOMEM; - goto err_out_free; - } - rds_ring->desc_head = addr; - - } - - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &recv_ctx->sds_rings[ring]; - - addr = dma_alloc_coherent(&adapter->pdev->dev, - STATUS_DESC_RINGSIZE(sds_ring), - &sds_ring->phys_addr, GFP_KERNEL); - if (addr == NULL) { - dev_err(&pdev->dev, - "failed to allocate sds ring [%d]\n", ring); - err = -ENOMEM; - goto err_out_free; - } - sds_ring->desc_head = addr; - } - - return 0; - -err_out_free: - qlcnic_free_hw_resources(adapter); - return err; -} - - -int qlcnic_fw_create_ctx(struct qlcnic_adapter *adapter) -{ - int err; - - if (adapter->flags & QLCNIC_NEED_FLR) { - pci_reset_function(adapter->pdev); - adapter->flags &= ~QLCNIC_NEED_FLR; - } - - err = qlcnic_fw_cmd_create_rx_ctx(adapter); - if (err) - return err; - - err = qlcnic_fw_cmd_create_tx_ctx(adapter); - if (err) { - qlcnic_fw_cmd_destroy_rx_ctx(adapter); - return err; - } - - set_bit(__QLCNIC_FW_ATTACHED, &adapter->state); - return 0; -} - -void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter) -{ - if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) { - qlcnic_fw_cmd_destroy_rx_ctx(adapter); - qlcnic_fw_cmd_destroy_tx_ctx(adapter); - - /* Allow dma queues to drain after context reset */ - msleep(20); - } -} - -void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter) -{ - struct qlcnic_recv_context *recv_ctx; - struct qlcnic_host_rds_ring *rds_ring; - struct qlcnic_host_sds_ring *sds_ring; - struct qlcnic_host_tx_ring *tx_ring; - int ring; - - recv_ctx = adapter->recv_ctx; - - tx_ring = adapter->tx_ring; - if (tx_ring->hw_consumer != NULL) { - dma_free_coherent(&adapter->pdev->dev, - sizeof(u32), - tx_ring->hw_consumer, - tx_ring->hw_cons_phys_addr); - tx_ring->hw_consumer = NULL; - } - - if (tx_ring->desc_head != NULL) { - dma_free_coherent(&adapter->pdev->dev, - TX_DESC_RINGSIZE(tx_ring), - tx_ring->desc_head, tx_ring->phys_addr); - tx_ring->desc_head = NULL; - } - - for (ring = 0; ring < adapter->max_rds_rings; ring++) { - rds_ring = &recv_ctx->rds_rings[ring]; - - if (rds_ring->desc_head != NULL) { - dma_free_coherent(&adapter->pdev->dev, - RCV_DESC_RINGSIZE(rds_ring), - rds_ring->desc_head, - rds_ring->phys_addr); - rds_ring->desc_head = NULL; - } - } - - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &recv_ctx->sds_rings[ring]; - - if (sds_ring->desc_head != NULL) { - dma_free_coherent(&adapter->pdev->dev, - STATUS_DESC_RINGSIZE(sds_ring), - sds_ring->desc_head, - sds_ring->phys_addr); - sds_ring->desc_head = NULL; - } - } -} - - -/* Get MAC address of a NIC partition */ -int qlcnic_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac) -{ - int err; - u32 arg1; - - arg1 = adapter->ahw->pci_func | BIT_8; - err = qlcnic_issue_cmd(adapter, - adapter->ahw->pci_func, - adapter->fw_hal_version, - arg1, - 0, - 0, - QLCNIC_CDRP_CMD_MAC_ADDRESS); - - if (err == QLCNIC_RCODE_SUCCESS) - qlcnic_fetch_mac(adapter, QLCNIC_ARG1_CRB_OFFSET, - QLCNIC_ARG2_CRB_OFFSET, 0, mac); - else { - dev_err(&adapter->pdev->dev, - "Failed to get mac address%d\n", err); - err = -EIO; - } - - return err; -} - -/* Get info of a NIC partition */ -int qlcnic_get_nic_info(struct qlcnic_adapter *adapter, - struct qlcnic_info *npar_info, u8 func_id) -{ - int err; - dma_addr_t nic_dma_t; - struct qlcnic_info *nic_info; - void *nic_info_addr; - size_t nic_size = sizeof(struct qlcnic_info); - - nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size, - &nic_dma_t, GFP_KERNEL); - if (!nic_info_addr) - return -ENOMEM; - memset(nic_info_addr, 0, nic_size); - - nic_info = nic_info_addr; - err = qlcnic_issue_cmd(adapter, - adapter->ahw->pci_func, - adapter->fw_hal_version, - MSD(nic_dma_t), - LSD(nic_dma_t), - (func_id << 16 | nic_size), - QLCNIC_CDRP_CMD_GET_NIC_INFO); - - if (err == QLCNIC_RCODE_SUCCESS) { - npar_info->pci_func = le16_to_cpu(nic_info->pci_func); - npar_info->op_mode = le16_to_cpu(nic_info->op_mode); - npar_info->phys_port = le16_to_cpu(nic_info->phys_port); - npar_info->switch_mode = le16_to_cpu(nic_info->switch_mode); - npar_info->max_tx_ques = le16_to_cpu(nic_info->max_tx_ques); - npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques); - npar_info->min_tx_bw = le16_to_cpu(nic_info->min_tx_bw); - npar_info->max_tx_bw = le16_to_cpu(nic_info->max_tx_bw); - npar_info->capabilities = le32_to_cpu(nic_info->capabilities); - npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu); - - dev_info(&adapter->pdev->dev, - "phy port: %d switch_mode: %d,\n" - "\tmax_tx_q: %d max_rx_q: %d min_tx_bw: 0x%x,\n" - "\tmax_tx_bw: 0x%x max_mtu:0x%x, capabilities: 0x%x\n", - npar_info->phys_port, npar_info->switch_mode, - npar_info->max_tx_ques, npar_info->max_rx_ques, - npar_info->min_tx_bw, npar_info->max_tx_bw, - npar_info->max_mtu, npar_info->capabilities); - } else { - dev_err(&adapter->pdev->dev, - "Failed to get nic info%d\n", err); - err = -EIO; - } - - dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr, - nic_dma_t); - return err; -} - -/* Configure a NIC partition */ -int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic) -{ - int err = -EIO; - dma_addr_t nic_dma_t; - void *nic_info_addr; - struct qlcnic_info *nic_info; - size_t nic_size = sizeof(struct qlcnic_info); - - if (adapter->op_mode != QLCNIC_MGMT_FUNC) - return err; - - nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size, - &nic_dma_t, GFP_KERNEL); - if (!nic_info_addr) - return -ENOMEM; - - memset(nic_info_addr, 0, nic_size); - nic_info = nic_info_addr; - - nic_info->pci_func = cpu_to_le16(nic->pci_func); - nic_info->op_mode = cpu_to_le16(nic->op_mode); - nic_info->phys_port = cpu_to_le16(nic->phys_port); - nic_info->switch_mode = cpu_to_le16(nic->switch_mode); - nic_info->capabilities = cpu_to_le32(nic->capabilities); - nic_info->max_mac_filters = nic->max_mac_filters; - nic_info->max_tx_ques = cpu_to_le16(nic->max_tx_ques); - nic_info->max_rx_ques = cpu_to_le16(nic->max_rx_ques); - nic_info->min_tx_bw = cpu_to_le16(nic->min_tx_bw); - nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw); - - err = qlcnic_issue_cmd(adapter, - adapter->ahw->pci_func, - adapter->fw_hal_version, - MSD(nic_dma_t), - LSD(nic_dma_t), - ((nic->pci_func << 16) | nic_size), - QLCNIC_CDRP_CMD_SET_NIC_INFO); - - if (err != QLCNIC_RCODE_SUCCESS) { - dev_err(&adapter->pdev->dev, - "Failed to set nic info%d\n", err); - err = -EIO; - } - - dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr, - nic_dma_t); - return err; -} - -/* Get PCI Info of a partition */ -int qlcnic_get_pci_info(struct qlcnic_adapter *adapter, - struct qlcnic_pci_info *pci_info) -{ - int err = 0, i; - dma_addr_t pci_info_dma_t; - struct qlcnic_pci_info *npar; - void *pci_info_addr; - size_t npar_size = sizeof(struct qlcnic_pci_info); - size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC; - - pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size, - &pci_info_dma_t, GFP_KERNEL); - if (!pci_info_addr) - return -ENOMEM; - memset(pci_info_addr, 0, pci_size); - - npar = pci_info_addr; - err = qlcnic_issue_cmd(adapter, - adapter->ahw->pci_func, - adapter->fw_hal_version, - MSD(pci_info_dma_t), - LSD(pci_info_dma_t), - pci_size, - QLCNIC_CDRP_CMD_GET_PCI_INFO); - - if (err == QLCNIC_RCODE_SUCCESS) { - for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++, npar++, pci_info++) { - pci_info->id = le16_to_cpu(npar->id); - pci_info->active = le16_to_cpu(npar->active); - pci_info->type = le16_to_cpu(npar->type); - pci_info->default_port = - le16_to_cpu(npar->default_port); - pci_info->tx_min_bw = - le16_to_cpu(npar->tx_min_bw); - pci_info->tx_max_bw = - le16_to_cpu(npar->tx_max_bw); - memcpy(pci_info->mac, npar->mac, ETH_ALEN); - } - } else { - dev_err(&adapter->pdev->dev, - "Failed to get PCI Info%d\n", err); - err = -EIO; - } - - dma_free_coherent(&adapter->pdev->dev, pci_size, pci_info_addr, - pci_info_dma_t); - return err; -} - -/* Configure eSwitch for port mirroring */ -int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id, - u8 enable_mirroring, u8 pci_func) -{ - int err = -EIO; - u32 arg1; - - if (adapter->op_mode != QLCNIC_MGMT_FUNC || - !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) - return err; - - arg1 = id | (enable_mirroring ? BIT_4 : 0); - arg1 |= pci_func << 8; - - err = qlcnic_issue_cmd(adapter, - adapter->ahw->pci_func, - adapter->fw_hal_version, - arg1, - 0, - 0, - QLCNIC_CDRP_CMD_SET_PORTMIRRORING); - - if (err != QLCNIC_RCODE_SUCCESS) { - dev_err(&adapter->pdev->dev, - "Failed to configure port mirroring%d on eswitch:%d\n", - pci_func, id); - } else { - dev_info(&adapter->pdev->dev, - "Configured eSwitch %d for port mirroring:%d\n", - id, pci_func); - } - - return err; -} - -int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func, - const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) { - - size_t stats_size = sizeof(struct __qlcnic_esw_statistics); - struct __qlcnic_esw_statistics *stats; - dma_addr_t stats_dma_t; - void *stats_addr; - u32 arg1; - int err; - - if (esw_stats == NULL) - return -ENOMEM; - - if (adapter->op_mode != QLCNIC_MGMT_FUNC && - func != adapter->ahw->pci_func) { - dev_err(&adapter->pdev->dev, - "Not privilege to query stats for func=%d", func); - return -EIO; - } - - stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size, - &stats_dma_t, GFP_KERNEL); - if (!stats_addr) { - dev_err(&adapter->pdev->dev, "Unable to allocate memory\n"); - return -ENOMEM; - } - memset(stats_addr, 0, stats_size); - - arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12; - arg1 |= rx_tx << 15 | stats_size << 16; - - err = qlcnic_issue_cmd(adapter, - adapter->ahw->pci_func, - adapter->fw_hal_version, - arg1, - MSD(stats_dma_t), - LSD(stats_dma_t), - QLCNIC_CDRP_CMD_GET_ESWITCH_STATS); - - if (!err) { - stats = stats_addr; - esw_stats->context_id = le16_to_cpu(stats->context_id); - esw_stats->version = le16_to_cpu(stats->version); - esw_stats->size = le16_to_cpu(stats->size); - esw_stats->multicast_frames = - le64_to_cpu(stats->multicast_frames); - esw_stats->broadcast_frames = - le64_to_cpu(stats->broadcast_frames); - esw_stats->unicast_frames = le64_to_cpu(stats->unicast_frames); - esw_stats->dropped_frames = le64_to_cpu(stats->dropped_frames); - esw_stats->local_frames = le64_to_cpu(stats->local_frames); - esw_stats->errors = le64_to_cpu(stats->errors); - esw_stats->numbytes = le64_to_cpu(stats->numbytes); - } - - dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr, - stats_dma_t); - return err; -} - -int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch, - const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) { - - struct __qlcnic_esw_statistics port_stats; - u8 i; - int ret = -EIO; - - if (esw_stats == NULL) - return -ENOMEM; - if (adapter->op_mode != QLCNIC_MGMT_FUNC) - return -EIO; - if (adapter->npars == NULL) - return -EIO; - - memset(esw_stats, 0, sizeof(u64)); - esw_stats->unicast_frames = QLCNIC_ESW_STATS_NOT_AVAIL; - esw_stats->multicast_frames = QLCNIC_ESW_STATS_NOT_AVAIL; - esw_stats->broadcast_frames = QLCNIC_ESW_STATS_NOT_AVAIL; - esw_stats->dropped_frames = QLCNIC_ESW_STATS_NOT_AVAIL; - esw_stats->errors = QLCNIC_ESW_STATS_NOT_AVAIL; - esw_stats->local_frames = QLCNIC_ESW_STATS_NOT_AVAIL; - esw_stats->numbytes = QLCNIC_ESW_STATS_NOT_AVAIL; - esw_stats->context_id = eswitch; - - for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { - if (adapter->npars[i].phy_port != eswitch) - continue; - - memset(&port_stats, 0, sizeof(struct __qlcnic_esw_statistics)); - if (qlcnic_get_port_stats(adapter, i, rx_tx, &port_stats)) - continue; - - esw_stats->size = port_stats.size; - esw_stats->version = port_stats.version; - QLCNIC_ADD_ESW_STATS(esw_stats->unicast_frames, - port_stats.unicast_frames); - QLCNIC_ADD_ESW_STATS(esw_stats->multicast_frames, - port_stats.multicast_frames); - QLCNIC_ADD_ESW_STATS(esw_stats->broadcast_frames, - port_stats.broadcast_frames); - QLCNIC_ADD_ESW_STATS(esw_stats->dropped_frames, - port_stats.dropped_frames); - QLCNIC_ADD_ESW_STATS(esw_stats->errors, - port_stats.errors); - QLCNIC_ADD_ESW_STATS(esw_stats->local_frames, - port_stats.local_frames); - QLCNIC_ADD_ESW_STATS(esw_stats->numbytes, - port_stats.numbytes); - ret = 0; - } - return ret; -} - -int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw, - const u8 port, const u8 rx_tx) -{ - - u32 arg1; - - if (adapter->op_mode != QLCNIC_MGMT_FUNC) - return -EIO; - - if (func_esw == QLCNIC_STATS_PORT) { - if (port >= QLCNIC_MAX_PCI_FUNC) - goto err_ret; - } else if (func_esw == QLCNIC_STATS_ESWITCH) { - if (port >= QLCNIC_NIU_MAX_XG_PORTS) - goto err_ret; - } else { - goto err_ret; - } - - if (rx_tx > QLCNIC_QUERY_TX_COUNTER) - goto err_ret; - - arg1 = port | QLCNIC_STATS_VERSION << 8 | func_esw << 12; - arg1 |= BIT_14 | rx_tx << 15; - - return qlcnic_issue_cmd(adapter, - adapter->ahw->pci_func, - adapter->fw_hal_version, - arg1, - 0, - 0, - QLCNIC_CDRP_CMD_GET_ESWITCH_STATS); - -err_ret: - dev_err(&adapter->pdev->dev, "Invalid argument func_esw=%d port=%d" - "rx_ctx=%d\n", func_esw, port, rx_tx); - return -EIO; -} - -static int -__qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter, - u32 *arg1, u32 *arg2) -{ - int err = -EIO; - u8 pci_func; - pci_func = (*arg1 >> 8); - err = qlcnic_issue_cmd(adapter, - adapter->ahw->pci_func, - adapter->fw_hal_version, - *arg1, - 0, - 0, - QLCNIC_CDRP_CMD_GET_ESWITCH_PORT_CONFIG); - - if (err == QLCNIC_RCODE_SUCCESS) { - *arg1 = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET); - *arg2 = QLCRD32(adapter, QLCNIC_ARG2_CRB_OFFSET); - dev_info(&adapter->pdev->dev, - "eSwitch port config for pci func %d\n", pci_func); - } else { - dev_err(&adapter->pdev->dev, - "Failed to get eswitch port config for pci func %d\n", - pci_func); - } - return err; -} -/* Configure eSwitch port -op_mode = 0 for setting default port behavior -op_mode = 1 for setting vlan id -op_mode = 2 for deleting vlan id -op_type = 0 for vlan_id -op_type = 1 for port vlan_id -*/ -int qlcnic_config_switch_port(struct qlcnic_adapter *adapter, - struct qlcnic_esw_func_cfg *esw_cfg) -{ - int err = -EIO; - u32 arg1, arg2 = 0; - u8 pci_func; - - if (adapter->op_mode != QLCNIC_MGMT_FUNC) - return err; - pci_func = esw_cfg->pci_func; - arg1 = (adapter->npars[pci_func].phy_port & BIT_0); - arg1 |= (pci_func << 8); - - if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2)) - return err; - arg1 &= ~(0x0ff << 8); - arg1 |= (pci_func << 8); - arg1 &= ~(BIT_2 | BIT_3); - switch (esw_cfg->op_mode) { - case QLCNIC_PORT_DEFAULTS: - arg1 |= (BIT_4 | BIT_6 | BIT_7); - arg2 |= (BIT_0 | BIT_1); - if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) - arg2 |= (BIT_2 | BIT_3); - if (!(esw_cfg->discard_tagged)) - arg1 &= ~BIT_4; - if (!(esw_cfg->promisc_mode)) - arg1 &= ~BIT_6; - if (!(esw_cfg->mac_override)) - arg1 &= ~BIT_7; - if (!(esw_cfg->mac_anti_spoof)) - arg2 &= ~BIT_0; - if (!(esw_cfg->offload_flags & BIT_0)) - arg2 &= ~(BIT_1 | BIT_2 | BIT_3); - if (!(esw_cfg->offload_flags & BIT_1)) - arg2 &= ~BIT_2; - if (!(esw_cfg->offload_flags & BIT_2)) - arg2 &= ~BIT_3; - break; - case QLCNIC_ADD_VLAN: - arg1 |= (BIT_2 | BIT_5); - arg1 |= (esw_cfg->vlan_id << 16); - break; - case QLCNIC_DEL_VLAN: - arg1 |= (BIT_3 | BIT_5); - arg1 &= ~(0x0ffff << 16); - break; - default: - return err; - } - - err = qlcnic_issue_cmd(adapter, - adapter->ahw->pci_func, - adapter->fw_hal_version, - arg1, - arg2, - 0, - QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH); - - if (err != QLCNIC_RCODE_SUCCESS) { - dev_err(&adapter->pdev->dev, - "Failed to configure eswitch pci func %d\n", pci_func); - } else { - dev_info(&adapter->pdev->dev, - "Configured eSwitch for pci func %d\n", pci_func); - } - - return err; -} - -int -qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter, - struct qlcnic_esw_func_cfg *esw_cfg) -{ - u32 arg1, arg2; - u8 phy_port; - if (adapter->op_mode == QLCNIC_MGMT_FUNC) - phy_port = adapter->npars[esw_cfg->pci_func].phy_port; - else - phy_port = adapter->physical_port; - arg1 = phy_port; - arg1 |= (esw_cfg->pci_func << 8); - if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2)) - return -EIO; - - esw_cfg->discard_tagged = !!(arg1 & BIT_4); - esw_cfg->host_vlan_tag = !!(arg1 & BIT_5); - esw_cfg->promisc_mode = !!(arg1 & BIT_6); - esw_cfg->mac_override = !!(arg1 & BIT_7); - esw_cfg->vlan_id = LSW(arg1 >> 16); - esw_cfg->mac_anti_spoof = (arg2 & 0x1); - esw_cfg->offload_flags = ((arg2 >> 1) & 0x7); - - return 0; -} diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c deleted file mode 100644 index 7c64f2ffc219..000000000000 --- a/drivers/net/qlcnic/qlcnic_ethtool.c +++ /dev/null @@ -1,1234 +0,0 @@ -/* - * QLogic qlcnic NIC Driver - * Copyright (c) 2009-2010 QLogic Corporation - * - * See LICENSE.qlcnic for copyright and licensing details. - */ - -#include -#include -#include -#include -#include -#include - -#include "qlcnic.h" - -struct qlcnic_stats { - char stat_string[ETH_GSTRING_LEN]; - int sizeof_stat; - int stat_offset; -}; - -#define QLC_SIZEOF(m) FIELD_SIZEOF(struct qlcnic_adapter, m) -#define QLC_OFF(m) offsetof(struct qlcnic_adapter, m) - -static const struct qlcnic_stats qlcnic_gstrings_stats[] = { - {"xmit_called", - QLC_SIZEOF(stats.xmitcalled), QLC_OFF(stats.xmitcalled)}, - {"xmit_finished", - QLC_SIZEOF(stats.xmitfinished), QLC_OFF(stats.xmitfinished)}, - {"rx_dropped", - QLC_SIZEOF(stats.rxdropped), QLC_OFF(stats.rxdropped)}, - {"tx_dropped", - QLC_SIZEOF(stats.txdropped), QLC_OFF(stats.txdropped)}, - {"csummed", - QLC_SIZEOF(stats.csummed), QLC_OFF(stats.csummed)}, - {"rx_pkts", - QLC_SIZEOF(stats.rx_pkts), QLC_OFF(stats.rx_pkts)}, - {"lro_pkts", - QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)}, - {"rx_bytes", - QLC_SIZEOF(stats.rxbytes), QLC_OFF(stats.rxbytes)}, - {"tx_bytes", - QLC_SIZEOF(stats.txbytes), QLC_OFF(stats.txbytes)}, - {"lrobytes", - QLC_SIZEOF(stats.lrobytes), QLC_OFF(stats.lrobytes)}, - {"lso_frames", - QLC_SIZEOF(stats.lso_frames), QLC_OFF(stats.lso_frames)}, - {"xmit_on", - QLC_SIZEOF(stats.xmit_on), QLC_OFF(stats.xmit_on)}, - {"xmit_off", - QLC_SIZEOF(stats.xmit_off), QLC_OFF(stats.xmit_off)}, - {"skb_alloc_failure", QLC_SIZEOF(stats.skb_alloc_failure), - QLC_OFF(stats.skb_alloc_failure)}, - {"null rxbuf", - QLC_SIZEOF(stats.null_rxbuf), QLC_OFF(stats.null_rxbuf)}, - {"rx dma map error", QLC_SIZEOF(stats.rx_dma_map_error), - QLC_OFF(stats.rx_dma_map_error)}, - {"tx dma map error", QLC_SIZEOF(stats.tx_dma_map_error), - QLC_OFF(stats.tx_dma_map_error)}, - -}; - -static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = { - "rx unicast frames", - "rx multicast frames", - "rx broadcast frames", - "rx dropped frames", - "rx errors", - "rx local frames", - "rx numbytes", - "tx unicast frames", - "tx multicast frames", - "tx broadcast frames", - "tx dropped frames", - "tx errors", - "tx local frames", - "tx numbytes", -}; - -#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats) -#define QLCNIC_DEVICE_STATS_LEN ARRAY_SIZE(qlcnic_device_gstrings_stats) - -static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = { - "Register_Test_on_offline", - "Link_Test_on_offline", - "Interrupt_Test_offline", - "Internal_Loopback_offline", - "External_Loopback_offline" -}; - -#define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test) - -#define QLCNIC_RING_REGS_COUNT 20 -#define QLCNIC_RING_REGS_LEN (QLCNIC_RING_REGS_COUNT * sizeof(u32)) -#define QLCNIC_MAX_EEPROM_LEN 1024 - -static const u32 diag_registers[] = { - CRB_CMDPEG_STATE, - CRB_RCVPEG_STATE, - CRB_XG_STATE_P3P, - CRB_FW_CAPABILITIES_1, - ISR_INT_STATE_REG, - QLCNIC_CRB_DRV_ACTIVE, - QLCNIC_CRB_DEV_STATE, - QLCNIC_CRB_DRV_STATE, - QLCNIC_CRB_DRV_SCRATCH, - QLCNIC_CRB_DEV_PARTITION_INFO, - QLCNIC_CRB_DRV_IDC_VER, - QLCNIC_PEG_ALIVE_COUNTER, - QLCNIC_PEG_HALT_STATUS1, - QLCNIC_PEG_HALT_STATUS2, - QLCNIC_CRB_PEG_NET_0+0x3c, - QLCNIC_CRB_PEG_NET_1+0x3c, - QLCNIC_CRB_PEG_NET_2+0x3c, - QLCNIC_CRB_PEG_NET_4+0x3c, - -1 -}; - -#define QLCNIC_MGMT_API_VERSION 2 -#define QLCNIC_DEV_INFO_SIZE 1 -#define QLCNIC_ETHTOOL_REGS_VER 2 -static int qlcnic_get_regs_len(struct net_device *dev) -{ - return sizeof(diag_registers) + QLCNIC_RING_REGS_LEN + - QLCNIC_DEV_INFO_SIZE + 1; -} - -static int qlcnic_get_eeprom_len(struct net_device *dev) -{ - return QLCNIC_FLASH_TOTAL_SIZE; -} - -static void -qlcnic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) -{ - struct qlcnic_adapter *adapter = netdev_priv(dev); - u32 fw_major, fw_minor, fw_build; - - fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR); - fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR); - fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB); - sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build); - - strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); - strlcpy(drvinfo->driver, qlcnic_driver_name, 32); - strlcpy(drvinfo->version, QLCNIC_LINUX_VERSIONID, 32); -} - -static int -qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) -{ - struct qlcnic_adapter *adapter = netdev_priv(dev); - int check_sfp_module = 0; - u16 pcifn = adapter->ahw->pci_func; - - /* read which mode */ - if (adapter->ahw->port_type == QLCNIC_GBE) { - ecmd->supported = (SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_1000baseT_Half | - SUPPORTED_1000baseT_Full); - - ecmd->advertising = (ADVERTISED_100baseT_Half | - ADVERTISED_100baseT_Full | - ADVERTISED_1000baseT_Half | - ADVERTISED_1000baseT_Full); - - ethtool_cmd_speed_set(ecmd, adapter->link_speed); - ecmd->duplex = adapter->link_duplex; - ecmd->autoneg = adapter->link_autoneg; - - } else if (adapter->ahw->port_type == QLCNIC_XGBE) { - u32 val; - - val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR); - if (val == QLCNIC_PORT_MODE_802_3_AP) { - ecmd->supported = SUPPORTED_1000baseT_Full; - ecmd->advertising = ADVERTISED_1000baseT_Full; - } else { - ecmd->supported = SUPPORTED_10000baseT_Full; - ecmd->advertising = ADVERTISED_10000baseT_Full; - } - - if (netif_running(dev) && adapter->has_link_events) { - ethtool_cmd_speed_set(ecmd, adapter->link_speed); - ecmd->autoneg = adapter->link_autoneg; - ecmd->duplex = adapter->link_duplex; - goto skip; - } - - val = QLCRD32(adapter, P3P_LINK_SPEED_REG(pcifn)); - ethtool_cmd_speed_set(ecmd, P3P_LINK_SPEED_MHZ * - P3P_LINK_SPEED_VAL(pcifn, val)); - ecmd->duplex = DUPLEX_FULL; - ecmd->autoneg = AUTONEG_DISABLE; - } else - return -EIO; - -skip: - ecmd->phy_address = adapter->physical_port; - ecmd->transceiver = XCVR_EXTERNAL; - - switch (adapter->ahw->board_type) { - case QLCNIC_BRDTYPE_P3P_REF_QG: - case QLCNIC_BRDTYPE_P3P_4_GB: - case QLCNIC_BRDTYPE_P3P_4_GB_MM: - - ecmd->supported |= SUPPORTED_Autoneg; - ecmd->advertising |= ADVERTISED_Autoneg; - case QLCNIC_BRDTYPE_P3P_10G_CX4: - case QLCNIC_BRDTYPE_P3P_10G_CX4_LP: - case QLCNIC_BRDTYPE_P3P_10000_BASE_T: - ecmd->supported |= SUPPORTED_TP; - ecmd->advertising |= ADVERTISED_TP; - ecmd->port = PORT_TP; - ecmd->autoneg = adapter->link_autoneg; - break; - case QLCNIC_BRDTYPE_P3P_IMEZ: - case QLCNIC_BRDTYPE_P3P_XG_LOM: - case QLCNIC_BRDTYPE_P3P_HMEZ: - ecmd->supported |= SUPPORTED_MII; - ecmd->advertising |= ADVERTISED_MII; - ecmd->port = PORT_MII; - ecmd->autoneg = AUTONEG_DISABLE; - break; - case QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS: - case QLCNIC_BRDTYPE_P3P_10G_SFP_CT: - case QLCNIC_BRDTYPE_P3P_10G_SFP_QT: - ecmd->advertising |= ADVERTISED_TP; - ecmd->supported |= SUPPORTED_TP; - check_sfp_module = netif_running(dev) && - adapter->has_link_events; - case QLCNIC_BRDTYPE_P3P_10G_XFP: - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_FIBRE; - ecmd->port = PORT_FIBRE; - ecmd->autoneg = AUTONEG_DISABLE; - break; - case QLCNIC_BRDTYPE_P3P_10G_TP: - if (adapter->ahw->port_type == QLCNIC_XGBE) { - ecmd->autoneg = AUTONEG_DISABLE; - ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP); - ecmd->advertising |= - (ADVERTISED_FIBRE | ADVERTISED_TP); - ecmd->port = PORT_FIBRE; - check_sfp_module = netif_running(dev) && - adapter->has_link_events; - } else { - ecmd->autoneg = AUTONEG_ENABLE; - ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); - ecmd->advertising |= - (ADVERTISED_TP | ADVERTISED_Autoneg); - ecmd->port = PORT_TP; - } - break; - default: - dev_err(&adapter->pdev->dev, "Unsupported board model %d\n", - adapter->ahw->board_type); - return -EIO; - } - - if (check_sfp_module) { - switch (adapter->module_type) { - case LINKEVENT_MODULE_OPTICAL_UNKNOWN: - case LINKEVENT_MODULE_OPTICAL_SRLR: - case LINKEVENT_MODULE_OPTICAL_LRM: - case LINKEVENT_MODULE_OPTICAL_SFP_1G: - ecmd->port = PORT_FIBRE; - break; - case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE: - case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN: - case LINKEVENT_MODULE_TWINAX: - ecmd->port = PORT_TP; - break; - default: - ecmd->port = PORT_OTHER; - } - } - - return 0; -} - -static int -qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) -{ - u32 config = 0; - u32 ret = 0; - struct qlcnic_adapter *adapter = netdev_priv(dev); - - if (adapter->ahw->port_type != QLCNIC_GBE) - return -EOPNOTSUPP; - - /* read which mode */ - if (ecmd->duplex) - config |= 0x1; - - if (ecmd->autoneg) - config |= 0x2; - - switch (ethtool_cmd_speed(ecmd)) { - case SPEED_10: - config |= (0 << 8); - break; - case SPEED_100: - config |= (1 << 8); - break; - case SPEED_1000: - config |= (10 << 8); - break; - default: - return -EIO; - } - - ret = qlcnic_fw_cmd_set_port(adapter, config); - - if (ret == QLCNIC_RCODE_NOT_SUPPORTED) - return -EOPNOTSUPP; - else if (ret) - return -EIO; - - adapter->link_speed = ethtool_cmd_speed(ecmd); - adapter->link_duplex = ecmd->duplex; - adapter->link_autoneg = ecmd->autoneg; - - if (!netif_running(dev)) - return 0; - - dev->netdev_ops->ndo_stop(dev); - return dev->netdev_ops->ndo_open(dev); -} - -static void -qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) -{ - struct qlcnic_adapter *adapter = netdev_priv(dev); - struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; - struct qlcnic_host_sds_ring *sds_ring; - u32 *regs_buff = p; - int ring, i = 0, j = 0; - - memset(p, 0, qlcnic_get_regs_len(dev)); - regs->version = (QLCNIC_ETHTOOL_REGS_VER << 24) | - (adapter->ahw->revision_id << 16) | (adapter->pdev)->device; - - regs_buff[0] = (0xcafe0000 | (QLCNIC_DEV_INFO_SIZE & 0xffff)); - regs_buff[1] = QLCNIC_MGMT_API_VERSION; - - for (i = QLCNIC_DEV_INFO_SIZE + 1; diag_registers[j] != -1; j++, i++) - regs_buff[i] = QLCRD32(adapter, diag_registers[j]); - - if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) - return; - - regs_buff[i++] = 0xFFEFCDAB; /* Marker btw regs and ring count*/ - - regs_buff[i++] = 1; /* No. of tx ring */ - regs_buff[i++] = le32_to_cpu(*(adapter->tx_ring->hw_consumer)); - regs_buff[i++] = readl(adapter->tx_ring->crb_cmd_producer); - - regs_buff[i++] = 2; /* No. of rx ring */ - regs_buff[i++] = readl(recv_ctx->rds_rings[0].crb_rcv_producer); - regs_buff[i++] = readl(recv_ctx->rds_rings[1].crb_rcv_producer); - - regs_buff[i++] = adapter->max_sds_rings; - - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &(recv_ctx->sds_rings[ring]); - regs_buff[i++] = readl(sds_ring->crb_sts_consumer); - } -} - -static u32 qlcnic_test_link(struct net_device *dev) -{ - struct qlcnic_adapter *adapter = netdev_priv(dev); - u32 val; - - val = QLCRD32(adapter, CRB_XG_STATE_P3P); - val = XG_LINK_STATE_P3P(adapter->ahw->pci_func, val); - return (val == XG_LINK_UP_P3P) ? 0 : 1; -} - -static int -qlcnic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, - u8 *bytes) -{ - struct qlcnic_adapter *adapter = netdev_priv(dev); - int offset; - int ret; - - if (eeprom->len == 0) - return -EINVAL; - - eeprom->magic = (adapter->pdev)->vendor | - ((adapter->pdev)->device << 16); - offset = eeprom->offset; - - ret = qlcnic_rom_fast_read_words(adapter, offset, bytes, - eeprom->len); - if (ret < 0) - return ret; - - return 0; -} - -static void -qlcnic_get_ringparam(struct net_device *dev, - struct ethtool_ringparam *ring) -{ - struct qlcnic_adapter *adapter = netdev_priv(dev); - - ring->rx_pending = adapter->num_rxd; - ring->rx_jumbo_pending = adapter->num_jumbo_rxd; - ring->tx_pending = adapter->num_txd; - - ring->rx_max_pending = adapter->max_rxd; - ring->rx_jumbo_max_pending = adapter->max_jumbo_rxd; - ring->tx_max_pending = MAX_CMD_DESCRIPTORS; - - ring->rx_mini_max_pending = 0; - ring->rx_mini_pending = 0; -} - -static u32 -qlcnic_validate_ringparam(u32 val, u32 min, u32 max, char *r_name) -{ - u32 num_desc; - num_desc = max(val, min); - num_desc = min(num_desc, max); - num_desc = roundup_pow_of_two(num_desc); - - if (val != num_desc) { - printk(KERN_INFO "%s: setting %s ring size %d instead of %d\n", - qlcnic_driver_name, r_name, num_desc, val); - } - - return num_desc; -} - -static int -qlcnic_set_ringparam(struct net_device *dev, - struct ethtool_ringparam *ring) -{ - struct qlcnic_adapter *adapter = netdev_priv(dev); - u16 num_rxd, num_jumbo_rxd, num_txd; - - if (ring->rx_mini_pending) - return -EOPNOTSUPP; - - num_rxd = qlcnic_validate_ringparam(ring->rx_pending, - MIN_RCV_DESCRIPTORS, adapter->max_rxd, "rx"); - - num_jumbo_rxd = qlcnic_validate_ringparam(ring->rx_jumbo_pending, - MIN_JUMBO_DESCRIPTORS, adapter->max_jumbo_rxd, - "rx jumbo"); - - num_txd = qlcnic_validate_ringparam(ring->tx_pending, - MIN_CMD_DESCRIPTORS, MAX_CMD_DESCRIPTORS, "tx"); - - if (num_rxd == adapter->num_rxd && num_txd == adapter->num_txd && - num_jumbo_rxd == adapter->num_jumbo_rxd) - return 0; - - adapter->num_rxd = num_rxd; - adapter->num_jumbo_rxd = num_jumbo_rxd; - adapter->num_txd = num_txd; - - return qlcnic_reset_context(adapter); -} - -static void qlcnic_get_channels(struct net_device *dev, - struct ethtool_channels *channel) -{ - struct qlcnic_adapter *adapter = netdev_priv(dev); - - channel->max_rx = rounddown_pow_of_two(min_t(int, - adapter->max_rx_ques, num_online_cpus())); - channel->max_tx = adapter->max_tx_ques; - - channel->rx_count = adapter->max_sds_rings; - channel->tx_count = adapter->max_tx_ques; -} - -static int qlcnic_set_channels(struct net_device *dev, - struct ethtool_channels *channel) -{ - struct qlcnic_adapter *adapter = netdev_priv(dev); - int err; - - if (channel->other_count || channel->combined_count || - channel->tx_count != channel->max_tx) - return -EINVAL; - - err = qlcnic_validate_max_rss(dev, channel->max_rx, channel->rx_count); - if (err) - return err; - - err = qlcnic_set_max_rss(adapter, channel->rx_count); - netdev_info(dev, "allocated 0x%x sds rings\n", - adapter->max_sds_rings); - return err; -} - -static void -qlcnic_get_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pause) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - int port = adapter->physical_port; - __u32 val; - - if (adapter->ahw->port_type == QLCNIC_GBE) { - if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS)) - return; - /* get flow control settings */ - val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port)); - pause->rx_pause = qlcnic_gb_get_rx_flowctl(val); - val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL); - switch (port) { - case 0: - pause->tx_pause = !(qlcnic_gb_get_gb0_mask(val)); - break; - case 1: - pause->tx_pause = !(qlcnic_gb_get_gb1_mask(val)); - break; - case 2: - pause->tx_pause = !(qlcnic_gb_get_gb2_mask(val)); - break; - case 3: - default: - pause->tx_pause = !(qlcnic_gb_get_gb3_mask(val)); - break; - } - } else if (adapter->ahw->port_type == QLCNIC_XGBE) { - if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS)) - return; - pause->rx_pause = 1; - val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL); - if (port == 0) - pause->tx_pause = !(qlcnic_xg_get_xg0_mask(val)); - else - pause->tx_pause = !(qlcnic_xg_get_xg1_mask(val)); - } else { - dev_err(&netdev->dev, "Unknown board type: %x\n", - adapter->ahw->port_type); - } -} - -static int -qlcnic_set_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pause) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - int port = adapter->physical_port; - __u32 val; - - /* read mode */ - if (adapter->ahw->port_type == QLCNIC_GBE) { - if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS)) - return -EIO; - /* set flow control */ - val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port)); - - if (pause->rx_pause) - qlcnic_gb_rx_flowctl(val); - else - qlcnic_gb_unset_rx_flowctl(val); - - QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), - val); - /* set autoneg */ - val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL); - switch (port) { - case 0: - if (pause->tx_pause) - qlcnic_gb_unset_gb0_mask(val); - else - qlcnic_gb_set_gb0_mask(val); - break; - case 1: - if (pause->tx_pause) - qlcnic_gb_unset_gb1_mask(val); - else - qlcnic_gb_set_gb1_mask(val); - break; - case 2: - if (pause->tx_pause) - qlcnic_gb_unset_gb2_mask(val); - else - qlcnic_gb_set_gb2_mask(val); - break; - case 3: - default: - if (pause->tx_pause) - qlcnic_gb_unset_gb3_mask(val); - else - qlcnic_gb_set_gb3_mask(val); - break; - } - QLCWR32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, val); - } else if (adapter->ahw->port_type == QLCNIC_XGBE) { - if (!pause->rx_pause || pause->autoneg) - return -EOPNOTSUPP; - - if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS)) - return -EIO; - - val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL); - if (port == 0) { - if (pause->tx_pause) - qlcnic_xg_unset_xg0_mask(val); - else - qlcnic_xg_set_xg0_mask(val); - } else { - if (pause->tx_pause) - qlcnic_xg_unset_xg1_mask(val); - else - qlcnic_xg_set_xg1_mask(val); - } - QLCWR32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, val); - } else { - dev_err(&netdev->dev, "Unknown board type: %x\n", - adapter->ahw->port_type); - } - return 0; -} - -static int qlcnic_reg_test(struct net_device *dev) -{ - struct qlcnic_adapter *adapter = netdev_priv(dev); - u32 data_read; - - data_read = QLCRD32(adapter, QLCNIC_PCIX_PH_REG(0)); - if ((data_read & 0xffff) != adapter->pdev->vendor) - return 1; - - return 0; -} - -static int qlcnic_get_sset_count(struct net_device *dev, int sset) -{ - struct qlcnic_adapter *adapter = netdev_priv(dev); - switch (sset) { - case ETH_SS_TEST: - return QLCNIC_TEST_LEN; - case ETH_SS_STATS: - if (adapter->flags & QLCNIC_ESWITCH_ENABLED) - return QLCNIC_STATS_LEN + QLCNIC_DEVICE_STATS_LEN; - return QLCNIC_STATS_LEN; - default: - return -EOPNOTSUPP; - } -} - -static int qlcnic_irq_test(struct net_device *netdev) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - int max_sds_rings = adapter->max_sds_rings; - int ret; - - if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) - return -EIO; - - ret = qlcnic_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST); - if (ret) - goto clear_it; - - adapter->diag_cnt = 0; - ret = qlcnic_issue_cmd(adapter, adapter->ahw->pci_func, - adapter->fw_hal_version, adapter->ahw->pci_func, - 0, 0, 0x00000011); - if (ret) - goto done; - - msleep(10); - - ret = !adapter->diag_cnt; - -done: - qlcnic_diag_free_res(netdev, max_sds_rings); - -clear_it: - adapter->max_sds_rings = max_sds_rings; - clear_bit(__QLCNIC_RESETTING, &adapter->state); - return ret; -} - -#define QLCNIC_ILB_PKT_SIZE 64 -#define QLCNIC_NUM_ILB_PKT 16 -#define QLCNIC_ILB_MAX_RCV_LOOP 10 - -static void qlcnic_create_loopback_buff(unsigned char *data, u8 mac[]) -{ - unsigned char random_data[] = {0xa8, 0x06, 0x45, 0x00}; - - memset(data, 0x4e, QLCNIC_ILB_PKT_SIZE); - - memcpy(data, mac, ETH_ALEN); - memcpy(data + ETH_ALEN, mac, ETH_ALEN); - - memcpy(data + 2 * ETH_ALEN, random_data, sizeof(random_data)); -} - -int qlcnic_check_loopback_buff(unsigned char *data, u8 mac[]) -{ - unsigned char buff[QLCNIC_ILB_PKT_SIZE]; - qlcnic_create_loopback_buff(buff, mac); - return memcmp(data, buff, QLCNIC_ILB_PKT_SIZE); -} - -static int qlcnic_do_lb_test(struct qlcnic_adapter *adapter) -{ - struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; - struct qlcnic_host_sds_ring *sds_ring = &recv_ctx->sds_rings[0]; - struct sk_buff *skb; - int i, loop, cnt = 0; - - for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) { - skb = dev_alloc_skb(QLCNIC_ILB_PKT_SIZE); - qlcnic_create_loopback_buff(skb->data, adapter->mac_addr); - skb_put(skb, QLCNIC_ILB_PKT_SIZE); - - adapter->diag_cnt = 0; - qlcnic_xmit_frame(skb, adapter->netdev); - - loop = 0; - do { - msleep(1); - qlcnic_process_rcv_ring_diag(sds_ring); - if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) - break; - } while (!adapter->diag_cnt); - - dev_kfree_skb_any(skb); - - if (!adapter->diag_cnt) - dev_warn(&adapter->pdev->dev, "LB Test: %dth packet" - " not recevied\n", i + 1); - else - cnt++; - } - if (cnt != i) { - dev_warn(&adapter->pdev->dev, "LB Test failed\n"); - return -1; - } - return 0; -} - -static int qlcnic_loopback_test(struct net_device *netdev, u8 mode) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - int max_sds_rings = adapter->max_sds_rings; - struct qlcnic_host_sds_ring *sds_ring; - int loop = 0; - int ret; - - if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK)) { - netdev_info(netdev, "Firmware is not loopback test capable\n"); - return -EOPNOTSUPP; - } - - netdev_info(netdev, "%s loopback test in progress\n", - mode == QLCNIC_ILB_MODE ? "internal" : "external"); - if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) { - netdev_warn(netdev, "Loopback test not supported for non " - "privilege function\n"); - return 0; - } - - if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) - return -EBUSY; - - ret = qlcnic_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST); - if (ret) - goto clear_it; - - sds_ring = &adapter->recv_ctx->sds_rings[0]; - - ret = qlcnic_set_lb_mode(adapter, mode); - if (ret) - goto free_res; - - adapter->diag_cnt = 0; - do { - msleep(500); - qlcnic_process_rcv_ring_diag(sds_ring); - if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) { - netdev_info(netdev, "firmware didnt respond to loopback" - " configure request\n"); - ret = -QLCNIC_FW_NOT_RESPOND; - goto free_res; - } else if (adapter->diag_cnt) { - ret = adapter->diag_cnt; - goto free_res; - } - } while (!QLCNIC_IS_LB_CONFIGURED(adapter->ahw->loopback_state)); - - ret = qlcnic_do_lb_test(adapter); - - qlcnic_clear_lb_mode(adapter); - - free_res: - qlcnic_diag_free_res(netdev, max_sds_rings); - - clear_it: - adapter->max_sds_rings = max_sds_rings; - clear_bit(__QLCNIC_RESETTING, &adapter->state); - return ret; -} - -static void -qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test, - u64 *data) -{ - memset(data, 0, sizeof(u64) * QLCNIC_TEST_LEN); - - data[0] = qlcnic_reg_test(dev); - if (data[0]) - eth_test->flags |= ETH_TEST_FL_FAILED; - - data[1] = (u64) qlcnic_test_link(dev); - if (data[1]) - eth_test->flags |= ETH_TEST_FL_FAILED; - - if (eth_test->flags & ETH_TEST_FL_OFFLINE) { - data[2] = qlcnic_irq_test(dev); - if (data[2]) - eth_test->flags |= ETH_TEST_FL_FAILED; - - data[3] = qlcnic_loopback_test(dev, QLCNIC_ILB_MODE); - if (data[3]) - eth_test->flags |= ETH_TEST_FL_FAILED; - - if (eth_test->flags & ETH_TEST_FL_EXTERNAL_LB) { - data[4] = qlcnic_loopback_test(dev, QLCNIC_ELB_MODE); - if (data[4]) - eth_test->flags |= ETH_TEST_FL_FAILED; - eth_test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; - } - } -} - -static void -qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data) -{ - struct qlcnic_adapter *adapter = netdev_priv(dev); - int index, i; - - switch (stringset) { - case ETH_SS_TEST: - memcpy(data, *qlcnic_gstrings_test, - QLCNIC_TEST_LEN * ETH_GSTRING_LEN); - break; - case ETH_SS_STATS: - for (index = 0; index < QLCNIC_STATS_LEN; index++) { - memcpy(data + index * ETH_GSTRING_LEN, - qlcnic_gstrings_stats[index].stat_string, - ETH_GSTRING_LEN); - } - if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) - return; - for (i = 0; i < QLCNIC_DEVICE_STATS_LEN; index++, i++) { - memcpy(data + index * ETH_GSTRING_LEN, - qlcnic_device_gstrings_stats[i], - ETH_GSTRING_LEN); - } - } -} - -#define QLCNIC_FILL_ESWITCH_STATS(VAL1) \ - (((VAL1) == QLCNIC_ESW_STATS_NOT_AVAIL) ? 0 : VAL1) - -static void -qlcnic_fill_device_stats(int *index, u64 *data, - struct __qlcnic_esw_statistics *stats) -{ - int ind = *index; - - data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->unicast_frames); - data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->multicast_frames); - data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->broadcast_frames); - data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->dropped_frames); - data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->errors); - data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->local_frames); - data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->numbytes); - - *index = ind; -} - -static void -qlcnic_get_ethtool_stats(struct net_device *dev, - struct ethtool_stats *stats, u64 * data) -{ - struct qlcnic_adapter *adapter = netdev_priv(dev); - struct qlcnic_esw_statistics port_stats; - int index, ret; - - for (index = 0; index < QLCNIC_STATS_LEN; index++) { - char *p = - (char *)adapter + - qlcnic_gstrings_stats[index].stat_offset; - data[index] = - (qlcnic_gstrings_stats[index].sizeof_stat == - sizeof(u64)) ? *(u64 *)p:(*(u32 *)p); - } - - if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) - return; - - memset(&port_stats, 0, sizeof(struct qlcnic_esw_statistics)); - ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func, - QLCNIC_QUERY_RX_COUNTER, &port_stats.rx); - if (ret) - return; - - qlcnic_fill_device_stats(&index, data, &port_stats.rx); - - ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func, - QLCNIC_QUERY_TX_COUNTER, &port_stats.tx); - if (ret) - return; - - qlcnic_fill_device_stats(&index, data, &port_stats.tx); -} - -static int qlcnic_set_led(struct net_device *dev, - enum ethtool_phys_id_state state) -{ - struct qlcnic_adapter *adapter = netdev_priv(dev); - int max_sds_rings = adapter->max_sds_rings; - - switch (state) { - case ETHTOOL_ID_ACTIVE: - if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) { - if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) - return -EIO; - - if (qlcnic_diag_alloc_res(dev, QLCNIC_LED_TEST)) { - clear_bit(__QLCNIC_RESETTING, &adapter->state); - return -EIO; - } - set_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state); - } - - if (adapter->nic_ops->config_led(adapter, 1, 0xf) == 0) - return 0; - - dev_err(&adapter->pdev->dev, - "Failed to set LED blink state.\n"); - break; - - case ETHTOOL_ID_INACTIVE: - if (adapter->nic_ops->config_led(adapter, 0, 0xf)) - dev_err(&adapter->pdev->dev, - "Failed to reset LED blink state.\n"); - - break; - - default: - return -EINVAL; - } - - if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state)) { - qlcnic_diag_free_res(dev, max_sds_rings); - clear_bit(__QLCNIC_RESETTING, &adapter->state); - } - - return -EIO; -} - -static void -qlcnic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) -{ - struct qlcnic_adapter *adapter = netdev_priv(dev); - u32 wol_cfg; - - wol->supported = 0; - wol->wolopts = 0; - - wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV); - if (wol_cfg & (1UL << adapter->portnum)) - wol->supported |= WAKE_MAGIC; - - wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG); - if (wol_cfg & (1UL << adapter->portnum)) - wol->wolopts |= WAKE_MAGIC; -} - -static int -qlcnic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) -{ - struct qlcnic_adapter *adapter = netdev_priv(dev); - u32 wol_cfg; - - if (wol->wolopts & ~WAKE_MAGIC) - return -EOPNOTSUPP; - - wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV); - if (!(wol_cfg & (1 << adapter->portnum))) - return -EOPNOTSUPP; - - wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG); - if (wol->wolopts & WAKE_MAGIC) - wol_cfg |= 1UL << adapter->portnum; - else - wol_cfg &= ~(1UL << adapter->portnum); - - QLCWR32(adapter, QLCNIC_WOL_CONFIG, wol_cfg); - - return 0; -} - -/* - * Set the coalescing parameters. Currently only normal is supported. - * If rx_coalesce_usecs == 0 or rx_max_coalesced_frames == 0 then set the - * firmware coalescing to default. - */ -static int qlcnic_set_intr_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ethcoal) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - - if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) - return -EINVAL; - - /* - * Return Error if unsupported values or - * unsupported parameters are set. - */ - if (ethcoal->rx_coalesce_usecs > 0xffff || - ethcoal->rx_max_coalesced_frames > 0xffff || - ethcoal->tx_coalesce_usecs || - ethcoal->tx_max_coalesced_frames || - ethcoal->rx_coalesce_usecs_irq || - ethcoal->rx_max_coalesced_frames_irq || - ethcoal->tx_coalesce_usecs_irq || - ethcoal->tx_max_coalesced_frames_irq || - ethcoal->stats_block_coalesce_usecs || - ethcoal->use_adaptive_rx_coalesce || - ethcoal->use_adaptive_tx_coalesce || - ethcoal->pkt_rate_low || - ethcoal->rx_coalesce_usecs_low || - ethcoal->rx_max_coalesced_frames_low || - ethcoal->tx_coalesce_usecs_low || - ethcoal->tx_max_coalesced_frames_low || - ethcoal->pkt_rate_high || - ethcoal->rx_coalesce_usecs_high || - ethcoal->rx_max_coalesced_frames_high || - ethcoal->tx_coalesce_usecs_high || - ethcoal->tx_max_coalesced_frames_high) - return -EINVAL; - - if (!ethcoal->rx_coalesce_usecs || - !ethcoal->rx_max_coalesced_frames) { - adapter->ahw->coal.flag = QLCNIC_INTR_DEFAULT; - adapter->ahw->coal.rx_time_us = - QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US; - adapter->ahw->coal.rx_packets = - QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS; - } else { - adapter->ahw->coal.flag = 0; - adapter->ahw->coal.rx_time_us = ethcoal->rx_coalesce_usecs; - adapter->ahw->coal.rx_packets = - ethcoal->rx_max_coalesced_frames; - } - - qlcnic_config_intr_coalesce(adapter); - - return 0; -} - -static int qlcnic_get_intr_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ethcoal) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - - if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) - return -EINVAL; - - ethcoal->rx_coalesce_usecs = adapter->ahw->coal.rx_time_us; - ethcoal->rx_max_coalesced_frames = adapter->ahw->coal.rx_packets; - - return 0; -} - -static u32 qlcnic_get_msglevel(struct net_device *netdev) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - - return adapter->msg_enable; -} - -static void qlcnic_set_msglevel(struct net_device *netdev, u32 msglvl) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - - adapter->msg_enable = msglvl; -} - -static int -qlcnic_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; - - if (fw_dump->clr) - dump->len = fw_dump->tmpl_hdr->size + fw_dump->size; - else - dump->len = 0; - dump->flag = fw_dump->tmpl_hdr->drv_cap_mask; - dump->version = adapter->fw_version; - return 0; -} - -static int -qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump, - void *buffer) -{ - int i, copy_sz; - u32 *hdr_ptr, *data; - struct qlcnic_adapter *adapter = netdev_priv(netdev); - struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; - - if (!fw_dump->clr) { - netdev_info(netdev, "Dump not available\n"); - qlcnic_api_unlock(adapter); - return -EINVAL; - } - /* Copy template header first */ - copy_sz = fw_dump->tmpl_hdr->size; - hdr_ptr = (u32 *) fw_dump->tmpl_hdr; - data = buffer; - for (i = 0; i < copy_sz/sizeof(u32); i++) - *data++ = cpu_to_le32(*hdr_ptr++); - - /* Copy captured dump data */ - memcpy(buffer + copy_sz, fw_dump->data, fw_dump->size); - dump->len = copy_sz + fw_dump->size; - dump->flag = fw_dump->tmpl_hdr->drv_cap_mask; - - /* Free dump area once data has been captured */ - vfree(fw_dump->data); - fw_dump->data = NULL; - fw_dump->clr = 0; - - return 0; -} - -static int -qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val) -{ - int ret = 0; - struct qlcnic_adapter *adapter = netdev_priv(netdev); - struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; - - switch (val->flag) { - case QLCNIC_FORCE_FW_DUMP_KEY: - if (!fw_dump->enable) { - netdev_info(netdev, "FW dump not enabled\n"); - return ret; - } - if (fw_dump->clr) { - dev_info(&adapter->pdev->dev, - "Previous dump not cleared, not forcing dump\n"); - return ret; - } - netdev_info(netdev, "Forcing a FW dump\n"); - qlcnic_dev_request_reset(adapter); - break; - case QLCNIC_DISABLE_FW_DUMP: - if (fw_dump->enable) { - netdev_info(netdev, "Disabling FW dump\n"); - fw_dump->enable = 0; - } - break; - case QLCNIC_ENABLE_FW_DUMP: - if (!fw_dump->enable && fw_dump->tmpl_hdr) { - netdev_info(netdev, "Enabling FW dump\n"); - fw_dump->enable = 1; - } - break; - case QLCNIC_FORCE_FW_RESET: - netdev_info(netdev, "Forcing a FW reset\n"); - qlcnic_dev_request_reset(adapter); - adapter->flags &= ~QLCNIC_FW_RESET_OWNER; - break; - default: - if (val->flag > QLCNIC_DUMP_MASK_MAX || - val->flag < QLCNIC_DUMP_MASK_MIN) { - netdev_info(netdev, - "Invalid dump level: 0x%x\n", val->flag); - ret = -EINVAL; - goto out; - } - fw_dump->tmpl_hdr->drv_cap_mask = val->flag & 0xff; - netdev_info(netdev, "Driver mask changed to: 0x%x\n", - fw_dump->tmpl_hdr->drv_cap_mask); - } -out: - return ret; -} - -const struct ethtool_ops qlcnic_ethtool_ops = { - .get_settings = qlcnic_get_settings, - .set_settings = qlcnic_set_settings, - .get_drvinfo = qlcnic_get_drvinfo, - .get_regs_len = qlcnic_get_regs_len, - .get_regs = qlcnic_get_regs, - .get_link = ethtool_op_get_link, - .get_eeprom_len = qlcnic_get_eeprom_len, - .get_eeprom = qlcnic_get_eeprom, - .get_ringparam = qlcnic_get_ringparam, - .set_ringparam = qlcnic_set_ringparam, - .get_channels = qlcnic_get_channels, - .set_channels = qlcnic_set_channels, - .get_pauseparam = qlcnic_get_pauseparam, - .set_pauseparam = qlcnic_set_pauseparam, - .get_wol = qlcnic_get_wol, - .set_wol = qlcnic_set_wol, - .self_test = qlcnic_diag_test, - .get_strings = qlcnic_get_strings, - .get_ethtool_stats = qlcnic_get_ethtool_stats, - .get_sset_count = qlcnic_get_sset_count, - .get_coalesce = qlcnic_get_intr_coalesce, - .set_coalesce = qlcnic_set_intr_coalesce, - .set_phys_id = qlcnic_set_led, - .set_msglevel = qlcnic_set_msglevel, - .get_msglevel = qlcnic_get_msglevel, - .get_dump_flag = qlcnic_get_dump_flag, - .get_dump_data = qlcnic_get_dump_data, - .set_dump = qlcnic_set_dump, -}; diff --git a/drivers/net/qlcnic/qlcnic_hdr.h b/drivers/net/qlcnic/qlcnic_hdr.h deleted file mode 100644 index d14506f764e0..000000000000 --- a/drivers/net/qlcnic/qlcnic_hdr.h +++ /dev/null @@ -1,1023 +0,0 @@ -/* - * QLogic qlcnic NIC Driver - * Copyright (c) 2009-2010 QLogic Corporation - * - * See LICENSE.qlcnic for copyright and licensing details. - */ - -#ifndef __QLCNIC_HDR_H_ -#define __QLCNIC_HDR_H_ - -#include -#include - -/* - * The basic unit of access when reading/writing control registers. - */ - -enum { - QLCNIC_HW_H0_CH_HUB_ADR = 0x05, - QLCNIC_HW_H1_CH_HUB_ADR = 0x0E, - QLCNIC_HW_H2_CH_HUB_ADR = 0x03, - QLCNIC_HW_H3_CH_HUB_ADR = 0x01, - QLCNIC_HW_H4_CH_HUB_ADR = 0x06, - QLCNIC_HW_H5_CH_HUB_ADR = 0x07, - QLCNIC_HW_H6_CH_HUB_ADR = 0x08 -}; - -/* Hub 0 */ -enum { - QLCNIC_HW_MN_CRB_AGT_ADR = 0x15, - QLCNIC_HW_MS_CRB_AGT_ADR = 0x25 -}; - -/* Hub 1 */ -enum { - QLCNIC_HW_PS_CRB_AGT_ADR = 0x73, - QLCNIC_HW_SS_CRB_AGT_ADR = 0x20, - QLCNIC_HW_RPMX3_CRB_AGT_ADR = 0x0b, - QLCNIC_HW_QMS_CRB_AGT_ADR = 0x00, - QLCNIC_HW_SQGS0_CRB_AGT_ADR = 0x01, - QLCNIC_HW_SQGS1_CRB_AGT_ADR = 0x02, - QLCNIC_HW_SQGS2_CRB_AGT_ADR = 0x03, - QLCNIC_HW_SQGS3_CRB_AGT_ADR = 0x04, - QLCNIC_HW_C2C0_CRB_AGT_ADR = 0x58, - QLCNIC_HW_C2C1_CRB_AGT_ADR = 0x59, - QLCNIC_HW_C2C2_CRB_AGT_ADR = 0x5a, - QLCNIC_HW_RPMX2_CRB_AGT_ADR = 0x0a, - QLCNIC_HW_RPMX4_CRB_AGT_ADR = 0x0c, - QLCNIC_HW_RPMX7_CRB_AGT_ADR = 0x0f, - QLCNIC_HW_RPMX9_CRB_AGT_ADR = 0x12, - QLCNIC_HW_SMB_CRB_AGT_ADR = 0x18 -}; - -/* Hub 2 */ -enum { - QLCNIC_HW_NIU_CRB_AGT_ADR = 0x31, - QLCNIC_HW_I2C0_CRB_AGT_ADR = 0x19, - QLCNIC_HW_I2C1_CRB_AGT_ADR = 0x29, - - QLCNIC_HW_SN_CRB_AGT_ADR = 0x10, - QLCNIC_HW_I2Q_CRB_AGT_ADR = 0x20, - QLCNIC_HW_LPC_CRB_AGT_ADR = 0x22, - QLCNIC_HW_ROMUSB_CRB_AGT_ADR = 0x21, - QLCNIC_HW_QM_CRB_AGT_ADR = 0x66, - QLCNIC_HW_SQG0_CRB_AGT_ADR = 0x60, - QLCNIC_HW_SQG1_CRB_AGT_ADR = 0x61, - QLCNIC_HW_SQG2_CRB_AGT_ADR = 0x62, - QLCNIC_HW_SQG3_CRB_AGT_ADR = 0x63, - QLCNIC_HW_RPMX1_CRB_AGT_ADR = 0x09, - QLCNIC_HW_RPMX5_CRB_AGT_ADR = 0x0d, - QLCNIC_HW_RPMX6_CRB_AGT_ADR = 0x0e, - QLCNIC_HW_RPMX8_CRB_AGT_ADR = 0x11 -}; - -/* Hub 3 */ -enum { - QLCNIC_HW_PH_CRB_AGT_ADR = 0x1A, - QLCNIC_HW_SRE_CRB_AGT_ADR = 0x50, - QLCNIC_HW_EG_CRB_AGT_ADR = 0x51, - QLCNIC_HW_RPMX0_CRB_AGT_ADR = 0x08 -}; - -/* Hub 4 */ -enum { - QLCNIC_HW_PEGN0_CRB_AGT_ADR = 0x40, - QLCNIC_HW_PEGN1_CRB_AGT_ADR, - QLCNIC_HW_PEGN2_CRB_AGT_ADR, - QLCNIC_HW_PEGN3_CRB_AGT_ADR, - QLCNIC_HW_PEGNI_CRB_AGT_ADR, - QLCNIC_HW_PEGND_CRB_AGT_ADR, - QLCNIC_HW_PEGNC_CRB_AGT_ADR, - QLCNIC_HW_PEGR0_CRB_AGT_ADR, - QLCNIC_HW_PEGR1_CRB_AGT_ADR, - QLCNIC_HW_PEGR2_CRB_AGT_ADR, - QLCNIC_HW_PEGR3_CRB_AGT_ADR, - QLCNIC_HW_PEGN4_CRB_AGT_ADR -}; - -/* Hub 5 */ -enum { - QLCNIC_HW_PEGS0_CRB_AGT_ADR = 0x40, - QLCNIC_HW_PEGS1_CRB_AGT_ADR, - QLCNIC_HW_PEGS2_CRB_AGT_ADR, - QLCNIC_HW_PEGS3_CRB_AGT_ADR, - QLCNIC_HW_PEGSI_CRB_AGT_ADR, - QLCNIC_HW_PEGSD_CRB_AGT_ADR, - QLCNIC_HW_PEGSC_CRB_AGT_ADR -}; - -/* Hub 6 */ -enum { - QLCNIC_HW_CAS0_CRB_AGT_ADR = 0x46, - QLCNIC_HW_CAS1_CRB_AGT_ADR = 0x47, - QLCNIC_HW_CAS2_CRB_AGT_ADR = 0x48, - QLCNIC_HW_CAS3_CRB_AGT_ADR = 0x49, - QLCNIC_HW_NCM_CRB_AGT_ADR = 0x16, - QLCNIC_HW_TMR_CRB_AGT_ADR = 0x17, - QLCNIC_HW_XDMA_CRB_AGT_ADR = 0x05, - QLCNIC_HW_OCM0_CRB_AGT_ADR = 0x06, - QLCNIC_HW_OCM1_CRB_AGT_ADR = 0x07 -}; - -/* Floaters - non existent modules */ -#define QLCNIC_HW_EFC_RPMX0_CRB_AGT_ADR 0x67 - -/* This field defines PCI/X adr [25:20] of agents on the CRB */ -enum { - QLCNIC_HW_PX_MAP_CRB_PH = 0, - QLCNIC_HW_PX_MAP_CRB_PS, - QLCNIC_HW_PX_MAP_CRB_MN, - QLCNIC_HW_PX_MAP_CRB_MS, - QLCNIC_HW_PX_MAP_CRB_PGR1, - QLCNIC_HW_PX_MAP_CRB_SRE, - QLCNIC_HW_PX_MAP_CRB_NIU, - QLCNIC_HW_PX_MAP_CRB_QMN, - QLCNIC_HW_PX_MAP_CRB_SQN0, - QLCNIC_HW_PX_MAP_CRB_SQN1, - QLCNIC_HW_PX_MAP_CRB_SQN2, - QLCNIC_HW_PX_MAP_CRB_SQN3, - QLCNIC_HW_PX_MAP_CRB_QMS, - QLCNIC_HW_PX_MAP_CRB_SQS0, - QLCNIC_HW_PX_MAP_CRB_SQS1, - QLCNIC_HW_PX_MAP_CRB_SQS2, - QLCNIC_HW_PX_MAP_CRB_SQS3, - QLCNIC_HW_PX_MAP_CRB_PGN0, - QLCNIC_HW_PX_MAP_CRB_PGN1, - QLCNIC_HW_PX_MAP_CRB_PGN2, - QLCNIC_HW_PX_MAP_CRB_PGN3, - QLCNIC_HW_PX_MAP_CRB_PGND, - QLCNIC_HW_PX_MAP_CRB_PGNI, - QLCNIC_HW_PX_MAP_CRB_PGS0, - QLCNIC_HW_PX_MAP_CRB_PGS1, - QLCNIC_HW_PX_MAP_CRB_PGS2, - QLCNIC_HW_PX_MAP_CRB_PGS3, - QLCNIC_HW_PX_MAP_CRB_PGSD, - QLCNIC_HW_PX_MAP_CRB_PGSI, - QLCNIC_HW_PX_MAP_CRB_SN, - QLCNIC_HW_PX_MAP_CRB_PGR2, - QLCNIC_HW_PX_MAP_CRB_EG, - QLCNIC_HW_PX_MAP_CRB_PH2, - QLCNIC_HW_PX_MAP_CRB_PS2, - QLCNIC_HW_PX_MAP_CRB_CAM, - QLCNIC_HW_PX_MAP_CRB_CAS0, - QLCNIC_HW_PX_MAP_CRB_CAS1, - QLCNIC_HW_PX_MAP_CRB_CAS2, - QLCNIC_HW_PX_MAP_CRB_C2C0, - QLCNIC_HW_PX_MAP_CRB_C2C1, - QLCNIC_HW_PX_MAP_CRB_TIMR, - QLCNIC_HW_PX_MAP_CRB_PGR3, - QLCNIC_HW_PX_MAP_CRB_RPMX1, - QLCNIC_HW_PX_MAP_CRB_RPMX2, - QLCNIC_HW_PX_MAP_CRB_RPMX3, - QLCNIC_HW_PX_MAP_CRB_RPMX4, - QLCNIC_HW_PX_MAP_CRB_RPMX5, - QLCNIC_HW_PX_MAP_CRB_RPMX6, - QLCNIC_HW_PX_MAP_CRB_RPMX7, - QLCNIC_HW_PX_MAP_CRB_XDMA, - QLCNIC_HW_PX_MAP_CRB_I2Q, - QLCNIC_HW_PX_MAP_CRB_ROMUSB, - QLCNIC_HW_PX_MAP_CRB_CAS3, - QLCNIC_HW_PX_MAP_CRB_RPMX0, - QLCNIC_HW_PX_MAP_CRB_RPMX8, - QLCNIC_HW_PX_MAP_CRB_RPMX9, - QLCNIC_HW_PX_MAP_CRB_OCM0, - QLCNIC_HW_PX_MAP_CRB_OCM1, - QLCNIC_HW_PX_MAP_CRB_SMB, - QLCNIC_HW_PX_MAP_CRB_I2C0, - QLCNIC_HW_PX_MAP_CRB_I2C1, - QLCNIC_HW_PX_MAP_CRB_LPC, - QLCNIC_HW_PX_MAP_CRB_PGNC, - QLCNIC_HW_PX_MAP_CRB_PGR0 -}; - -#define BIT_0 0x1 -#define BIT_1 0x2 -#define BIT_2 0x4 -#define BIT_3 0x8 -#define BIT_4 0x10 -#define BIT_5 0x20 -#define BIT_6 0x40 -#define BIT_7 0x80 -#define BIT_8 0x100 -#define BIT_9 0x200 -#define BIT_10 0x400 -#define BIT_11 0x800 -#define BIT_12 0x1000 -#define BIT_13 0x2000 -#define BIT_14 0x4000 -#define BIT_15 0x8000 -#define BIT_16 0x10000 -#define BIT_17 0x20000 -#define BIT_18 0x40000 -#define BIT_19 0x80000 -#define BIT_20 0x100000 -#define BIT_21 0x200000 -#define BIT_22 0x400000 -#define BIT_23 0x800000 -#define BIT_24 0x1000000 -#define BIT_25 0x2000000 -#define BIT_26 0x4000000 -#define BIT_27 0x8000000 -#define BIT_28 0x10000000 -#define BIT_29 0x20000000 -#define BIT_30 0x40000000 -#define BIT_31 0x80000000 - -/* This field defines CRB adr [31:20] of the agents */ - -#define QLCNIC_HW_CRB_HUB_AGT_ADR_MN \ - ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_MN_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_PH \ - ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_PH_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_MS \ - ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_MS_CRB_AGT_ADR) - -#define QLCNIC_HW_CRB_HUB_AGT_ADR_PS \ - ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_PS_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_SS \ - ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SS_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3 \ - ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX3_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_QMS \ - ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_QMS_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS0 \ - ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS0_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS1 \ - ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS1_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS2 \ - ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS2_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS3 \ - ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS3_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_C2C0 \ - ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_C2C0_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_C2C1 \ - ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_C2C1_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2 \ - ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX2_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4 \ - ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX4_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7 \ - ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX7_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9 \ - ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX9_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_SMB \ - ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SMB_CRB_AGT_ADR) - -#define QLCNIC_HW_CRB_HUB_AGT_ADR_NIU \ - ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_NIU_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0 \ - ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_I2C0_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1 \ - ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_I2C1_CRB_AGT_ADR) - -#define QLCNIC_HW_CRB_HUB_AGT_ADR_SRE \ - ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SRE_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_EG \ - ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_EG_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0 \ - ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX0_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_QMN \ - ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_QM_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0 \ - ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG0_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1 \ - ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG1_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2 \ - ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG2_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3 \ - ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG3_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1 \ - ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX1_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5 \ - ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX5_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6 \ - ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX6_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8 \ - ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX8_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS0 \ - ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS0_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS1 \ - ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS1_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS2 \ - ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS2_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS3 \ - ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS3_CRB_AGT_ADR) - -#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI \ - ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGNI_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGND \ - ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGND_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0 \ - ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN0_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1 \ - ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN1_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2 \ - ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN2_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3 \ - ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN3_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4 \ - ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN4_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC \ - ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGNC_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR0 \ - ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR0_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR1 \ - ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR1_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR2 \ - ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR2_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR3 \ - ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR3_CRB_AGT_ADR) - -#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI \ - ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSI_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSD \ - ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSD_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0 \ - ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS0_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1 \ - ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS1_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2 \ - ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS2_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3 \ - ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS3_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSC \ - ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSC_CRB_AGT_ADR) - -#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAM \ - ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_NCM_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR \ - ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_TMR_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA \ - ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_XDMA_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_SN \ - ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_SN_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q \ - ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_I2Q_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB \ - ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_ROMUSB_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0 \ - ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_OCM0_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_OCM1 \ - ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_OCM1_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_LPC \ - ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_LPC_CRB_AGT_ADR) - -#define QLCNIC_SRE_MISC (QLCNIC_CRB_SRE + 0x0002c) - -#define QLCNIC_I2Q_CLR_PCI_HI (QLCNIC_CRB_I2Q + 0x00034) - -#define ROMUSB_GLB (QLCNIC_CRB_ROMUSB + 0x00000) -#define ROMUSB_ROM (QLCNIC_CRB_ROMUSB + 0x10000) - -#define QLCNIC_ROMUSB_GLB_STATUS (ROMUSB_GLB + 0x0004) -#define QLCNIC_ROMUSB_GLB_SW_RESET (ROMUSB_GLB + 0x0008) -#define QLCNIC_ROMUSB_GLB_PAD_GPIO_I (ROMUSB_GLB + 0x000c) -#define QLCNIC_ROMUSB_GLB_CAS_RST (ROMUSB_GLB + 0x0038) -#define QLCNIC_ROMUSB_GLB_TEST_MUX_SEL (ROMUSB_GLB + 0x0044) -#define QLCNIC_ROMUSB_GLB_PEGTUNE_DONE (ROMUSB_GLB + 0x005c) -#define QLCNIC_ROMUSB_GLB_CHIP_CLK_CTRL (ROMUSB_GLB + 0x00A8) - -#define QLCNIC_ROMUSB_GPIO(n) (ROMUSB_GLB + 0x60 + (4 * (n))) - -#define QLCNIC_ROMUSB_ROM_INSTR_OPCODE (ROMUSB_ROM + 0x0004) -#define QLCNIC_ROMUSB_ROM_ADDRESS (ROMUSB_ROM + 0x0008) -#define QLCNIC_ROMUSB_ROM_WDATA (ROMUSB_ROM + 0x000c) -#define QLCNIC_ROMUSB_ROM_ABYTE_CNT (ROMUSB_ROM + 0x0010) -#define QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT (ROMUSB_ROM + 0x0014) -#define QLCNIC_ROMUSB_ROM_RDATA (ROMUSB_ROM + 0x0018) - -/* Lock IDs for ROM lock */ -#define ROM_LOCK_DRIVER 0x0d417340 - -/****************************************************************************** -* -* Definitions specific to M25P flash -* -******************************************************************************* -*/ - -/* all are 1MB windows */ - -#define QLCNIC_PCI_CRB_WINDOWSIZE 0x00100000 -#define QLCNIC_PCI_CRB_WINDOW(A) \ - (QLCNIC_PCI_CRBSPACE + (A)*QLCNIC_PCI_CRB_WINDOWSIZE) - -#define QLCNIC_CRB_NIU QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_NIU) -#define QLCNIC_CRB_SRE QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SRE) -#define QLCNIC_CRB_ROMUSB \ - QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_ROMUSB) -#define QLCNIC_CRB_I2Q QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_I2Q) -#define QLCNIC_CRB_I2C0 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_I2C0) -#define QLCNIC_CRB_SMB QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SMB) -#define QLCNIC_CRB_MAX QLCNIC_PCI_CRB_WINDOW(64) - -#define QLCNIC_CRB_PCIX_HOST QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PH) -#define QLCNIC_CRB_PCIX_HOST2 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PH2) -#define QLCNIC_CRB_PEG_NET_0 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN0) -#define QLCNIC_CRB_PEG_NET_1 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN1) -#define QLCNIC_CRB_PEG_NET_2 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN2) -#define QLCNIC_CRB_PEG_NET_3 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN3) -#define QLCNIC_CRB_PEG_NET_4 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SQS2) -#define QLCNIC_CRB_PEG_NET_D QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGND) -#define QLCNIC_CRB_PEG_NET_I QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGNI) -#define QLCNIC_CRB_DDR_NET QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_MN) -#define QLCNIC_CRB_QDR_NET QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SN) - -#define QLCNIC_CRB_PCIX_MD QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PS) -#define QLCNIC_CRB_PCIE QLCNIC_CRB_PCIX_MD - -#define ISR_INT_VECTOR (QLCNIC_PCIX_PS_REG(PCIX_INT_VECTOR)) -#define ISR_INT_MASK (QLCNIC_PCIX_PS_REG(PCIX_INT_MASK)) -#define ISR_INT_MASK_SLOW (QLCNIC_PCIX_PS_REG(PCIX_INT_MASK)) -#define ISR_INT_TARGET_STATUS (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS)) -#define ISR_INT_TARGET_MASK (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK)) -#define ISR_INT_TARGET_STATUS_F1 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F1)) -#define ISR_INT_TARGET_MASK_F1 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F1)) -#define ISR_INT_TARGET_STATUS_F2 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F2)) -#define ISR_INT_TARGET_MASK_F2 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F2)) -#define ISR_INT_TARGET_STATUS_F3 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F3)) -#define ISR_INT_TARGET_MASK_F3 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F3)) -#define ISR_INT_TARGET_STATUS_F4 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F4)) -#define ISR_INT_TARGET_MASK_F4 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F4)) -#define ISR_INT_TARGET_STATUS_F5 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F5)) -#define ISR_INT_TARGET_MASK_F5 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F5)) -#define ISR_INT_TARGET_STATUS_F6 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F6)) -#define ISR_INT_TARGET_MASK_F6 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F6)) -#define ISR_INT_TARGET_STATUS_F7 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F7)) -#define ISR_INT_TARGET_MASK_F7 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F7)) - -#define QLCNIC_PCI_MN_2M (0) -#define QLCNIC_PCI_MS_2M (0x80000) -#define QLCNIC_PCI_OCM0_2M (0x000c0000UL) -#define QLCNIC_PCI_CRBSPACE (0x06000000UL) -#define QLCNIC_PCI_CAMQM (0x04800000UL) -#define QLCNIC_PCI_CAMQM_END (0x04800800UL) -#define QLCNIC_PCI_2MB_SIZE (0x00200000UL) -#define QLCNIC_PCI_CAMQM_2M_BASE (0x000ff800UL) - -#define QLCNIC_CRB_CAM QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_CAM) - -#define QLCNIC_ADDR_DDR_NET (0x0000000000000000ULL) -#define QLCNIC_ADDR_DDR_NET_MAX (0x000000000fffffffULL) -#define QLCNIC_ADDR_OCM0 (0x0000000200000000ULL) -#define QLCNIC_ADDR_OCM0_MAX (0x00000002000fffffULL) -#define QLCNIC_ADDR_OCM1 (0x0000000200400000ULL) -#define QLCNIC_ADDR_OCM1_MAX (0x00000002004fffffULL) -#define QLCNIC_ADDR_QDR_NET (0x0000000300000000ULL) -#define QLCNIC_ADDR_QDR_NET_MAX (0x0000000307ffffffULL) - -/* - * Register offsets for MN - */ -#define QLCNIC_MIU_CONTROL (0x000) -#define QLCNIC_MIU_MN_CONTROL (QLCNIC_CRB_DDR_NET+QLCNIC_MIU_CONTROL) - -/* 200ms delay in each loop */ -#define QLCNIC_NIU_PHY_WAITLEN 200000 -/* 10 seconds before we give up */ -#define QLCNIC_NIU_PHY_WAITMAX 50 -#define QLCNIC_NIU_MAX_GBE_PORTS 4 -#define QLCNIC_NIU_MAX_XG_PORTS 2 - -#define QLCNIC_NIU_MODE (QLCNIC_CRB_NIU + 0x00000) -#define QLCNIC_NIU_GB_PAUSE_CTL (QLCNIC_CRB_NIU + 0x0030c) -#define QLCNIC_NIU_XG_PAUSE_CTL (QLCNIC_CRB_NIU + 0x00098) - -#define QLCNIC_NIU_GB_MAC_CONFIG_0(I) \ - (QLCNIC_CRB_NIU + 0x30000 + (I)*0x10000) -#define QLCNIC_NIU_GB_MAC_CONFIG_1(I) \ - (QLCNIC_CRB_NIU + 0x30004 + (I)*0x10000) - - -#define TEST_AGT_CTRL (0x00) - -#define TA_CTL_START BIT_0 -#define TA_CTL_ENABLE BIT_1 -#define TA_CTL_WRITE BIT_2 -#define TA_CTL_BUSY BIT_3 - -/* - * Register offsets for MN - */ -#define MIU_TEST_AGT_BASE (0x90) - -#define MIU_TEST_AGT_ADDR_LO (0x04) -#define MIU_TEST_AGT_ADDR_HI (0x08) -#define MIU_TEST_AGT_WRDATA_LO (0x10) -#define MIU_TEST_AGT_WRDATA_HI (0x14) -#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x20) -#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x24) -#define MIU_TEST_AGT_WRDATA(i) (0x10+(0x10*((i)>>1))+(4*((i)&1))) -#define MIU_TEST_AGT_RDDATA_LO (0x18) -#define MIU_TEST_AGT_RDDATA_HI (0x1c) -#define MIU_TEST_AGT_RDDATA_UPPER_LO (0x28) -#define MIU_TEST_AGT_RDDATA_UPPER_HI (0x2c) -#define MIU_TEST_AGT_RDDATA(i) (0x18+(0x10*((i)>>1))+(4*((i)&1))) - -#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8 -#define MIU_TEST_AGT_UPPER_ADDR(off) (0) - -/* - * Register offsets for MS - */ -#define SIU_TEST_AGT_BASE (0x60) - -#define SIU_TEST_AGT_ADDR_LO (0x04) -#define SIU_TEST_AGT_ADDR_HI (0x18) -#define SIU_TEST_AGT_WRDATA_LO (0x08) -#define SIU_TEST_AGT_WRDATA_HI (0x0c) -#define SIU_TEST_AGT_WRDATA(i) (0x08+(4*(i))) -#define SIU_TEST_AGT_RDDATA_LO (0x10) -#define SIU_TEST_AGT_RDDATA_HI (0x14) -#define SIU_TEST_AGT_RDDATA(i) (0x10+(4*(i))) - -#define SIU_TEST_AGT_ADDR_MASK 0x3ffff8 -#define SIU_TEST_AGT_UPPER_ADDR(off) ((off)>>22) - -/* XG Link status */ -#define XG_LINK_UP 0x10 -#define XG_LINK_DOWN 0x20 - -#define XG_LINK_UP_P3P 0x01 -#define XG_LINK_DOWN_P3P 0x02 -#define XG_LINK_STATE_P3P_MASK 0xf -#define XG_LINK_STATE_P3P(pcifn, val) \ - (((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3P_MASK) - -#define P3P_LINK_SPEED_MHZ 100 -#define P3P_LINK_SPEED_MASK 0xff -#define P3P_LINK_SPEED_REG(pcifn) \ - (CRB_PF_LINK_SPEED_1 + (((pcifn) / 4) * 4)) -#define P3P_LINK_SPEED_VAL(pcifn, reg) \ - (((reg) >> (8 * ((pcifn) & 0x3))) & P3P_LINK_SPEED_MASK) - -#define QLCNIC_CAM_RAM_BASE (QLCNIC_CRB_CAM + 0x02000) -#define QLCNIC_CAM_RAM(reg) (QLCNIC_CAM_RAM_BASE + (reg)) -#define QLCNIC_FW_VERSION_MAJOR (QLCNIC_CAM_RAM(0x150)) -#define QLCNIC_FW_VERSION_MINOR (QLCNIC_CAM_RAM(0x154)) -#define QLCNIC_FW_VERSION_SUB (QLCNIC_CAM_RAM(0x158)) -#define QLCNIC_ROM_LOCK_ID (QLCNIC_CAM_RAM(0x100)) -#define QLCNIC_PHY_LOCK_ID (QLCNIC_CAM_RAM(0x120)) -#define QLCNIC_CRB_WIN_LOCK_ID (QLCNIC_CAM_RAM(0x124)) - -#define NIC_CRB_BASE (QLCNIC_CAM_RAM(0x200)) -#define NIC_CRB_BASE_2 (QLCNIC_CAM_RAM(0x700)) -#define QLCNIC_REG(X) (NIC_CRB_BASE+(X)) -#define QLCNIC_REG_2(X) (NIC_CRB_BASE_2+(X)) - -#define QLCNIC_CDRP_CRB_OFFSET (QLCNIC_REG(0x18)) -#define QLCNIC_ARG1_CRB_OFFSET (QLCNIC_REG(0x1c)) -#define QLCNIC_ARG2_CRB_OFFSET (QLCNIC_REG(0x20)) -#define QLCNIC_ARG3_CRB_OFFSET (QLCNIC_REG(0x24)) -#define QLCNIC_SIGN_CRB_OFFSET (QLCNIC_REG(0x28)) - -#define CRB_CMDPEG_STATE (QLCNIC_REG(0x50)) -#define CRB_RCVPEG_STATE (QLCNIC_REG(0x13c)) - -#define CRB_XG_STATE_P3P (QLCNIC_REG(0x98)) -#define CRB_PF_LINK_SPEED_1 (QLCNIC_REG(0xe8)) -#define CRB_PF_LINK_SPEED_2 (QLCNIC_REG(0xec)) - -#define CRB_TEMP_STATE (QLCNIC_REG(0x1b4)) - -#define CRB_V2P_0 (QLCNIC_REG(0x290)) -#define CRB_V2P(port) (CRB_V2P_0+((port)*4)) -#define CRB_DRIVER_VERSION (QLCNIC_REG(0x2a0)) - -#define CRB_FW_CAPABILITIES_1 (QLCNIC_CAM_RAM(0x128)) -#define CRB_MAC_BLOCK_START (QLCNIC_CAM_RAM(0x1c0)) - -/* - * CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address - * which can be read by the Phantom host to get producer/consumer indexes from - * Phantom/Casper. If it is not HOST_SHARED_MEMORY, then the following - * registers will be used for the addresses of the ring's shared memory - * on the Phantom. - */ - -#define qlcnic_get_temp_val(x) ((x) >> 16) -#define qlcnic_get_temp_state(x) ((x) & 0xffff) -#define qlcnic_encode_temp(val, state) (((val) << 16) | (state)) - -/* - * Temperature control. - */ -enum { - QLCNIC_TEMP_NORMAL = 0x1, /* Normal operating range */ - QLCNIC_TEMP_WARN, /* Sound alert, temperature getting high */ - QLCNIC_TEMP_PANIC /* Fatal error, hardware has shut down. */ -}; - -/* Lock IDs for PHY lock */ -#define PHY_LOCK_DRIVER 0x44524956 - -/* Used for PS PCI Memory access */ -#define PCIX_PS_OP_ADDR_LO (0x10000) -/* via CRB (PS side only) */ -#define PCIX_PS_OP_ADDR_HI (0x10004) - -#define PCIX_INT_VECTOR (0x10100) -#define PCIX_INT_MASK (0x10104) - -#define PCIX_OCM_WINDOW (0x10800) -#define PCIX_OCM_WINDOW_REG(func) (PCIX_OCM_WINDOW + 0x4 * (func)) - -#define PCIX_TARGET_STATUS (0x10118) -#define PCIX_TARGET_STATUS_F1 (0x10160) -#define PCIX_TARGET_STATUS_F2 (0x10164) -#define PCIX_TARGET_STATUS_F3 (0x10168) -#define PCIX_TARGET_STATUS_F4 (0x10360) -#define PCIX_TARGET_STATUS_F5 (0x10364) -#define PCIX_TARGET_STATUS_F6 (0x10368) -#define PCIX_TARGET_STATUS_F7 (0x1036c) - -#define PCIX_TARGET_MASK (0x10128) -#define PCIX_TARGET_MASK_F1 (0x10170) -#define PCIX_TARGET_MASK_F2 (0x10174) -#define PCIX_TARGET_MASK_F3 (0x10178) -#define PCIX_TARGET_MASK_F4 (0x10370) -#define PCIX_TARGET_MASK_F5 (0x10374) -#define PCIX_TARGET_MASK_F6 (0x10378) -#define PCIX_TARGET_MASK_F7 (0x1037c) - -#define PCIX_MSI_F(i) (0x13000+((i)*4)) - -#define QLCNIC_PCIX_PH_REG(reg) (QLCNIC_CRB_PCIE + (reg)) -#define QLCNIC_PCIX_PS_REG(reg) (QLCNIC_CRB_PCIX_MD + (reg)) -#define QLCNIC_PCIE_REG(reg) (QLCNIC_CRB_PCIE + (reg)) - -#define PCIE_SEM0_LOCK (0x1c000) -#define PCIE_SEM0_UNLOCK (0x1c004) -#define PCIE_SEM_LOCK(N) (PCIE_SEM0_LOCK + 8*(N)) -#define PCIE_SEM_UNLOCK(N) (PCIE_SEM0_UNLOCK + 8*(N)) - -#define PCIE_SETUP_FUNCTION (0x12040) -#define PCIE_SETUP_FUNCTION2 (0x12048) -#define PCIE_MISCCFG_RC (0x1206c) -#define PCIE_TGT_SPLIT_CHICKEN (0x12080) -#define PCIE_CHICKEN3 (0x120c8) - -#define ISR_INT_STATE_REG (QLCNIC_PCIX_PS_REG(PCIE_MISCCFG_RC)) -#define PCIE_MAX_MASTER_SPLIT (0x14048) - -#define QLCNIC_PORT_MODE_NONE 0 -#define QLCNIC_PORT_MODE_XG 1 -#define QLCNIC_PORT_MODE_GB 2 -#define QLCNIC_PORT_MODE_802_3_AP 3 -#define QLCNIC_PORT_MODE_AUTO_NEG 4 -#define QLCNIC_PORT_MODE_AUTO_NEG_1G 5 -#define QLCNIC_PORT_MODE_AUTO_NEG_XG 6 -#define QLCNIC_PORT_MODE_ADDR (QLCNIC_CAM_RAM(0x24)) -#define QLCNIC_WOL_PORT_MODE (QLCNIC_CAM_RAM(0x198)) - -#define QLCNIC_WOL_CONFIG_NV (QLCNIC_CAM_RAM(0x184)) -#define QLCNIC_WOL_CONFIG (QLCNIC_CAM_RAM(0x188)) - -#define QLCNIC_PEG_TUNE_MN_PRESENT 0x1 -#define QLCNIC_PEG_TUNE_CAPABILITY (QLCNIC_CAM_RAM(0x02c)) - -#define QLCNIC_DMA_WATCHDOG_CTRL (QLCNIC_CAM_RAM(0x14)) -#define QLCNIC_PEG_ALIVE_COUNTER (QLCNIC_CAM_RAM(0xb0)) -#define QLCNIC_PEG_HALT_STATUS1 (QLCNIC_CAM_RAM(0xa8)) -#define QLCNIC_PEG_HALT_STATUS2 (QLCNIC_CAM_RAM(0xac)) -#define QLCNIC_CRB_DRV_ACTIVE (QLCNIC_CAM_RAM(0x138)) -#define QLCNIC_CRB_DEV_STATE (QLCNIC_CAM_RAM(0x140)) - -#define QLCNIC_CRB_DRV_STATE (QLCNIC_CAM_RAM(0x144)) -#define QLCNIC_CRB_DRV_SCRATCH (QLCNIC_CAM_RAM(0x148)) -#define QLCNIC_CRB_DEV_PARTITION_INFO (QLCNIC_CAM_RAM(0x14c)) -#define QLCNIC_CRB_DRV_IDC_VER (QLCNIC_CAM_RAM(0x174)) -#define QLCNIC_CRB_DEV_NPAR_STATE (QLCNIC_CAM_RAM(0x19c)) -#define QLCNIC_ROM_DEV_INIT_TIMEOUT (0x3e885c) -#define QLCNIC_ROM_DRV_RESET_TIMEOUT (0x3e8860) - -/* Device State */ -#define QLCNIC_DEV_COLD 0x1 -#define QLCNIC_DEV_INITIALIZING 0x2 -#define QLCNIC_DEV_READY 0x3 -#define QLCNIC_DEV_NEED_RESET 0x4 -#define QLCNIC_DEV_NEED_QUISCENT 0x5 -#define QLCNIC_DEV_FAILED 0x6 -#define QLCNIC_DEV_QUISCENT 0x7 - -#define QLCNIC_DEV_NPAR_NON_OPER 0 /* NON Operational */ -#define QLCNIC_DEV_NPAR_OPER 1 /* NPAR Operational */ -#define QLCNIC_DEV_NPAR_OPER_TIMEO 30 /* Operational time out */ - -#define QLC_DEV_CHECK_ACTIVE(VAL, FN) ((VAL) & (1 << (FN * 4))) -#define QLC_DEV_SET_REF_CNT(VAL, FN) ((VAL) |= (1 << (FN * 4))) -#define QLC_DEV_CLR_REF_CNT(VAL, FN) ((VAL) &= ~(1 << (FN * 4))) -#define QLC_DEV_SET_RST_RDY(VAL, FN) ((VAL) |= (1 << (FN * 4))) -#define QLC_DEV_SET_QSCNT_RDY(VAL, FN) ((VAL) |= (2 << (FN * 4))) -#define QLC_DEV_CLR_RST_QSCNT(VAL, FN) ((VAL) &= ~(3 << (FN * 4))) - -#define QLC_DEV_GET_DRV(VAL, FN) (0xf & ((VAL) >> (FN * 4))) -#define QLC_DEV_SET_DRV(VAL, FN) ((VAL) << (FN * 4)) - -#define QLCNIC_TYPE_NIC 1 -#define QLCNIC_TYPE_FCOE 2 -#define QLCNIC_TYPE_ISCSI 3 - -#define QLCNIC_RCODE_DRIVER_INFO 0x20000000 -#define QLCNIC_RCODE_DRIVER_CAN_RELOAD BIT_30 -#define QLCNIC_RCODE_FATAL_ERROR BIT_31 -#define QLCNIC_FWERROR_PEGNUM(code) ((code) & 0xff) -#define QLCNIC_FWERROR_CODE(code) ((code >> 8) & 0xfffff) - -#define FW_POLL_DELAY (1 * HZ) -#define FW_FAIL_THRESH 2 - -#define QLCNIC_RESET_TIMEOUT_SECS 10 -#define QLCNIC_INIT_TIMEOUT_SECS 30 -#define QLCNIC_RCVPEG_CHECK_RETRY_COUNT 2000 -#define QLCNIC_RCVPEG_CHECK_DELAY 10 -#define QLCNIC_CMDPEG_CHECK_RETRY_COUNT 60 -#define QLCNIC_CMDPEG_CHECK_DELAY 500 -#define QLCNIC_HEARTBEAT_PERIOD_MSECS 200 -#define QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT 45 - -#define ISR_MSI_INT_TRIGGER(FUNC) (QLCNIC_PCIX_PS_REG(PCIX_MSI_F(FUNC))) -#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200) - -/* - * PCI Interrupt Vector Values. - */ -#define PCIX_INT_VECTOR_BIT_F0 0x0080 -#define PCIX_INT_VECTOR_BIT_F1 0x0100 -#define PCIX_INT_VECTOR_BIT_F2 0x0200 -#define PCIX_INT_VECTOR_BIT_F3 0x0400 -#define PCIX_INT_VECTOR_BIT_F4 0x0800 -#define PCIX_INT_VECTOR_BIT_F5 0x1000 -#define PCIX_INT_VECTOR_BIT_F6 0x2000 -#define PCIX_INT_VECTOR_BIT_F7 0x4000 - -struct qlcnic_legacy_intr_set { - u32 int_vec_bit; - u32 tgt_status_reg; - u32 tgt_mask_reg; - u32 pci_int_reg; -}; - -#define QLCNIC_FW_API 0x1b216c -#define QLCNIC_DRV_OP_MODE 0x1b2170 -#define QLCNIC_MSIX_BASE 0x132110 -#define QLCNIC_MAX_PCI_FUNC 8 -#define QLCNIC_MAX_VLAN_FILTERS 64 - -/* FW dump defines */ -#define MIU_TEST_CTR 0x41000090 -#define MIU_TEST_ADDR_LO 0x41000094 -#define MIU_TEST_ADDR_HI 0x41000098 -#define FLASH_ROM_WINDOW 0x42110030 -#define FLASH_ROM_DATA 0x42150000 - -static const u32 MIU_TEST_READ_DATA[] = { - 0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC, }; - -#define QLCNIC_FW_DUMP_REG1 0x00130060 -#define QLCNIC_FW_DUMP_REG2 0x001e0000 -#define QLCNIC_FLASH_SEM2_LK 0x0013C010 -#define QLCNIC_FLASH_SEM2_ULK 0x0013C014 -#define QLCNIC_FLASH_LOCK_ID 0x001B2100 - -#define QLCNIC_RD_DUMP_REG(addr, bar0, data) do { \ - writel((addr & 0xFFFF0000), (void *) (bar0 + \ - QLCNIC_FW_DUMP_REG1)); \ - readl((void *) (bar0 + QLCNIC_FW_DUMP_REG1)); \ - *data = readl((void *) (bar0 + QLCNIC_FW_DUMP_REG2 + \ - LSW(addr))); \ -} while (0) - -#define QLCNIC_WR_DUMP_REG(addr, bar0, data) do { \ - writel((addr & 0xFFFF0000), (void *) (bar0 + \ - QLCNIC_FW_DUMP_REG1)); \ - readl((void *) (bar0 + QLCNIC_FW_DUMP_REG1)); \ - writel(data, (void *) (bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr)));\ - readl((void *) (bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr))); \ -} while (0) - -/* PCI function operational mode */ -enum { - QLCNIC_MGMT_FUNC = 0, - QLCNIC_PRIV_FUNC = 1, - QLCNIC_NON_PRIV_FUNC = 2 -}; - -enum { - QLCNIC_PORT_DEFAULTS = 0, - QLCNIC_ADD_VLAN = 1, - QLCNIC_DEL_VLAN = 2 -}; - -#define QLC_DEV_DRV_DEFAULT 0x11111111 - -#define LSB(x) ((uint8_t)(x)) -#define MSB(x) ((uint8_t)((uint16_t)(x) >> 8)) - -#define LSW(x) ((uint16_t)((uint32_t)(x))) -#define MSW(x) ((uint16_t)((uint32_t)(x) >> 16)) - -#define LSD(x) ((uint32_t)((uint64_t)(x))) -#define MSD(x) ((uint32_t)((((uint64_t)(x)) >> 16) >> 16)) - -#define QLCNIC_LEGACY_INTR_CONFIG \ -{ \ - { \ - .int_vec_bit = PCIX_INT_VECTOR_BIT_F0, \ - .tgt_status_reg = ISR_INT_TARGET_STATUS, \ - .tgt_mask_reg = ISR_INT_TARGET_MASK, \ - .pci_int_reg = ISR_MSI_INT_TRIGGER(0) }, \ - \ - { \ - .int_vec_bit = PCIX_INT_VECTOR_BIT_F1, \ - .tgt_status_reg = ISR_INT_TARGET_STATUS_F1, \ - .tgt_mask_reg = ISR_INT_TARGET_MASK_F1, \ - .pci_int_reg = ISR_MSI_INT_TRIGGER(1) }, \ - \ - { \ - .int_vec_bit = PCIX_INT_VECTOR_BIT_F2, \ - .tgt_status_reg = ISR_INT_TARGET_STATUS_F2, \ - .tgt_mask_reg = ISR_INT_TARGET_MASK_F2, \ - .pci_int_reg = ISR_MSI_INT_TRIGGER(2) }, \ - \ - { \ - .int_vec_bit = PCIX_INT_VECTOR_BIT_F3, \ - .tgt_status_reg = ISR_INT_TARGET_STATUS_F3, \ - .tgt_mask_reg = ISR_INT_TARGET_MASK_F3, \ - .pci_int_reg = ISR_MSI_INT_TRIGGER(3) }, \ - \ - { \ - .int_vec_bit = PCIX_INT_VECTOR_BIT_F4, \ - .tgt_status_reg = ISR_INT_TARGET_STATUS_F4, \ - .tgt_mask_reg = ISR_INT_TARGET_MASK_F4, \ - .pci_int_reg = ISR_MSI_INT_TRIGGER(4) }, \ - \ - { \ - .int_vec_bit = PCIX_INT_VECTOR_BIT_F5, \ - .tgt_status_reg = ISR_INT_TARGET_STATUS_F5, \ - .tgt_mask_reg = ISR_INT_TARGET_MASK_F5, \ - .pci_int_reg = ISR_MSI_INT_TRIGGER(5) }, \ - \ - { \ - .int_vec_bit = PCIX_INT_VECTOR_BIT_F6, \ - .tgt_status_reg = ISR_INT_TARGET_STATUS_F6, \ - .tgt_mask_reg = ISR_INT_TARGET_MASK_F6, \ - .pci_int_reg = ISR_MSI_INT_TRIGGER(6) }, \ - \ - { \ - .int_vec_bit = PCIX_INT_VECTOR_BIT_F7, \ - .tgt_status_reg = ISR_INT_TARGET_STATUS_F7, \ - .tgt_mask_reg = ISR_INT_TARGET_MASK_F7, \ - .pci_int_reg = ISR_MSI_INT_TRIGGER(7) }, \ -} - -/* NIU REGS */ - -#define _qlcnic_crb_get_bit(var, bit) ((var >> bit) & 0x1) - -/* - * NIU GB MAC Config Register 0 (applies to GB0, GB1, GB2, GB3) - * - * Bit 0 : enable_tx => 1:enable frame xmit, 0:disable - * Bit 1 : tx_synced => R/O: xmit enable synched to xmit stream - * Bit 2 : enable_rx => 1:enable frame recv, 0:disable - * Bit 3 : rx_synced => R/O: recv enable synched to recv stream - * Bit 4 : tx_flowctl => 1:enable pause frame generation, 0:disable - * Bit 5 : rx_flowctl => 1:act on recv'd pause frames, 0:ignore - * Bit 8 : loopback => 1:loop MAC xmits to MAC recvs, 0:normal - * Bit 16: tx_reset_pb => 1:reset frame xmit protocol blk, 0:no-op - * Bit 17: rx_reset_pb => 1:reset frame recv protocol blk, 0:no-op - * Bit 18: tx_reset_mac => 1:reset data/ctl multiplexer blk, 0:no-op - * Bit 19: rx_reset_mac => 1:reset ctl frames & timers blk, 0:no-op - * Bit 31: soft_reset => 1:reset the MAC and the SERDES, 0:no-op - */ -#define qlcnic_gb_rx_flowctl(config_word) \ - ((config_word) |= 1 << 5) -#define qlcnic_gb_get_rx_flowctl(config_word) \ - _qlcnic_crb_get_bit((config_word), 5) -#define qlcnic_gb_unset_rx_flowctl(config_word) \ - ((config_word) &= ~(1 << 5)) - -/* - * NIU GB Pause Ctl Register - */ - -#define qlcnic_gb_set_gb0_mask(config_word) \ - ((config_word) |= 1 << 0) -#define qlcnic_gb_set_gb1_mask(config_word) \ - ((config_word) |= 1 << 2) -#define qlcnic_gb_set_gb2_mask(config_word) \ - ((config_word) |= 1 << 4) -#define qlcnic_gb_set_gb3_mask(config_word) \ - ((config_word) |= 1 << 6) - -#define qlcnic_gb_get_gb0_mask(config_word) \ - _qlcnic_crb_get_bit((config_word), 0) -#define qlcnic_gb_get_gb1_mask(config_word) \ - _qlcnic_crb_get_bit((config_word), 2) -#define qlcnic_gb_get_gb2_mask(config_word) \ - _qlcnic_crb_get_bit((config_word), 4) -#define qlcnic_gb_get_gb3_mask(config_word) \ - _qlcnic_crb_get_bit((config_word), 6) - -#define qlcnic_gb_unset_gb0_mask(config_word) \ - ((config_word) &= ~(1 << 0)) -#define qlcnic_gb_unset_gb1_mask(config_word) \ - ((config_word) &= ~(1 << 2)) -#define qlcnic_gb_unset_gb2_mask(config_word) \ - ((config_word) &= ~(1 << 4)) -#define qlcnic_gb_unset_gb3_mask(config_word) \ - ((config_word) &= ~(1 << 6)) - -/* - * NIU XG Pause Ctl Register - * - * Bit 0 : xg0_mask => 1:disable tx pause frames - * Bit 1 : xg0_request => 1:request single pause frame - * Bit 2 : xg0_on_off => 1:request is pause on, 0:off - * Bit 3 : xg1_mask => 1:disable tx pause frames - * Bit 4 : xg1_request => 1:request single pause frame - * Bit 5 : xg1_on_off => 1:request is pause on, 0:off - */ - -#define qlcnic_xg_set_xg0_mask(config_word) \ - ((config_word) |= 1 << 0) -#define qlcnic_xg_set_xg1_mask(config_word) \ - ((config_word) |= 1 << 3) - -#define qlcnic_xg_get_xg0_mask(config_word) \ - _qlcnic_crb_get_bit((config_word), 0) -#define qlcnic_xg_get_xg1_mask(config_word) \ - _qlcnic_crb_get_bit((config_word), 3) - -#define qlcnic_xg_unset_xg0_mask(config_word) \ - ((config_word) &= ~(1 << 0)) -#define qlcnic_xg_unset_xg1_mask(config_word) \ - ((config_word) &= ~(1 << 3)) - -/* - * NIU XG Pause Ctl Register - * - * Bit 0 : xg0_mask => 1:disable tx pause frames - * Bit 1 : xg0_request => 1:request single pause frame - * Bit 2 : xg0_on_off => 1:request is pause on, 0:off - * Bit 3 : xg1_mask => 1:disable tx pause frames - * Bit 4 : xg1_request => 1:request single pause frame - * Bit 5 : xg1_on_off => 1:request is pause on, 0:off - */ - -/* - * PHY-Specific MII control/status registers. - */ -#define QLCNIC_NIU_GB_MII_MGMT_ADDR_AUTONEG 4 -#define QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_STATUS 17 - -/* - * PHY-Specific Status Register (reg 17). - * - * Bit 0 : jabber => 1:jabber detected, 0:not - * Bit 1 : polarity => 1:polarity reversed, 0:normal - * Bit 2 : recvpause => 1:receive pause enabled, 0:disabled - * Bit 3 : xmitpause => 1:transmit pause enabled, 0:disabled - * Bit 4 : energydetect => 1:sleep, 0:active - * Bit 5 : downshift => 1:downshift, 0:no downshift - * Bit 6 : crossover => 1:MDIX (crossover), 0:MDI (no crossover) - * Bits 7-9 : cablelen => not valid in 10Mb/s mode - * 0:<50m, 1:50-80m, 2:80-110m, 3:110-140m, 4:>140m - * Bit 10 : link => 1:link up, 0:link down - * Bit 11 : resolved => 1:speed and duplex resolved, 0:not yet - * Bit 12 : pagercvd => 1:page received, 0:page not received - * Bit 13 : duplex => 1:full duplex, 0:half duplex - * Bits 14-15 : speed => 0:10Mb/s, 1:100Mb/s, 2:1000Mb/s, 3:rsvd - */ - -#define qlcnic_get_phy_speed(config_word) (((config_word) >> 14) & 0x03) - -#define qlcnic_set_phy_speed(config_word, val) \ - ((config_word) |= ((val & 0x03) << 14)) -#define qlcnic_set_phy_duplex(config_word) \ - ((config_word) |= 1 << 13) -#define qlcnic_clear_phy_duplex(config_word) \ - ((config_word) &= ~(1 << 13)) - -#define qlcnic_get_phy_link(config_word) \ - _qlcnic_crb_get_bit(config_word, 10) -#define qlcnic_get_phy_duplex(config_word) \ - _qlcnic_crb_get_bit(config_word, 13) - -#define QLCNIC_NIU_NON_PROMISC_MODE 0 -#define QLCNIC_NIU_PROMISC_MODE 1 -#define QLCNIC_NIU_ALLMULTI_MODE 2 - -struct crb_128M_2M_sub_block_map { - unsigned valid; - unsigned start_128M; - unsigned end_128M; - unsigned start_2M; -}; - -struct crb_128M_2M_block_map{ - struct crb_128M_2M_sub_block_map sub_block[16]; -}; -#endif /* __QLCNIC_HDR_H_ */ diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c deleted file mode 100644 index 74e9d7b94965..000000000000 --- a/drivers/net/qlcnic/qlcnic_hw.c +++ /dev/null @@ -1,1787 +0,0 @@ -/* - * QLogic qlcnic NIC Driver - * Copyright (c) 2009-2010 QLogic Corporation - * - * See LICENSE.qlcnic for copyright and licensing details. - */ - -#include "qlcnic.h" - -#include -#include -#include - -#define MASK(n) ((1ULL<<(n))-1) -#define OCM_WIN_P3P(addr) (addr & 0xffc0000) - -#define GET_MEM_OFFS_2M(addr) (addr & MASK(18)) - -#define CRB_BLK(off) ((off >> 20) & 0x3f) -#define CRB_SUBBLK(off) ((off >> 16) & 0xf) -#define CRB_WINDOW_2M (0x130060) -#define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000)) -#define CRB_INDIRECT_2M (0x1e0000UL) - - -#ifndef readq -static inline u64 readq(void __iomem *addr) -{ - return readl(addr) | (((u64) readl(addr + 4)) << 32LL); -} -#endif - -#ifndef writeq -static inline void writeq(u64 val, void __iomem *addr) -{ - writel(((u32) (val)), (addr)); - writel(((u32) (val >> 32)), (addr + 4)); -} -#endif - -static const struct crb_128M_2M_block_map -crb_128M_2M_map[64] __cacheline_aligned_in_smp = { - {{{0, 0, 0, 0} } }, /* 0: PCI */ - {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */ - {1, 0x0110000, 0x0120000, 0x130000}, - {1, 0x0120000, 0x0122000, 0x124000}, - {1, 0x0130000, 0x0132000, 0x126000}, - {1, 0x0140000, 0x0142000, 0x128000}, - {1, 0x0150000, 0x0152000, 0x12a000}, - {1, 0x0160000, 0x0170000, 0x110000}, - {1, 0x0170000, 0x0172000, 0x12e000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {1, 0x01e0000, 0x01e0800, 0x122000}, - {0, 0x0000000, 0x0000000, 0x000000} } }, - {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */ - {{{0, 0, 0, 0} } }, /* 3: */ - {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */ - {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE */ - {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU */ - {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM */ - {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */ - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {1, 0x08f0000, 0x08f2000, 0x172000} } }, - {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1*/ - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {1, 0x09f0000, 0x09f2000, 0x176000} } }, - {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2*/ - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {1, 0x0af0000, 0x0af2000, 0x17a000} } }, - {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3*/ - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {1, 0x0bf0000, 0x0bf2000, 0x17e000} } }, - {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */ - {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */ - {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */ - {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */ - {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */ - {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */ - {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */ - {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */ - {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */ - {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */ - {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */ - {{{0, 0, 0, 0} } }, /* 23: */ - {{{0, 0, 0, 0} } }, /* 24: */ - {{{0, 0, 0, 0} } }, /* 25: */ - {{{0, 0, 0, 0} } }, /* 26: */ - {{{0, 0, 0, 0} } }, /* 27: */ - {{{0, 0, 0, 0} } }, /* 28: */ - {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */ - {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */ - {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */ - {{{0} } }, /* 32: PCI */ - {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */ - {1, 0x2110000, 0x2120000, 0x130000}, - {1, 0x2120000, 0x2122000, 0x124000}, - {1, 0x2130000, 0x2132000, 0x126000}, - {1, 0x2140000, 0x2142000, 0x128000}, - {1, 0x2150000, 0x2152000, 0x12a000}, - {1, 0x2160000, 0x2170000, 0x110000}, - {1, 0x2170000, 0x2172000, 0x12e000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000} } }, - {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */ - {{{0} } }, /* 35: */ - {{{0} } }, /* 36: */ - {{{0} } }, /* 37: */ - {{{0} } }, /* 38: */ - {{{0} } }, /* 39: */ - {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */ - {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */ - {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */ - {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */ - {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */ - {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */ - {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */ - {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */ - {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */ - {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */ - {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */ - {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */ - {{{0} } }, /* 52: */ - {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */ - {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */ - {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */ - {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */ - {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */ - {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */ - {{{0} } }, /* 59: I2C0 */ - {{{0} } }, /* 60: I2C1 */ - {{{1, 0x3d00000, 0x3d04000, 0x1d8000} } },/* 61: LPC */ - {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */ - {{{1, 0x3f00000, 0x3f01000, 0x168000} } } /* 63: P2NR0 */ -}; - -/* - * top 12 bits of crb internal address (hub, agent) - */ -static const unsigned crb_hub_agt[64] = { - 0, - QLCNIC_HW_CRB_HUB_AGT_ADR_PS, - QLCNIC_HW_CRB_HUB_AGT_ADR_MN, - QLCNIC_HW_CRB_HUB_AGT_ADR_MS, - 0, - QLCNIC_HW_CRB_HUB_AGT_ADR_SRE, - QLCNIC_HW_CRB_HUB_AGT_ADR_NIU, - QLCNIC_HW_CRB_HUB_AGT_ADR_QMN, - QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0, - QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1, - QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2, - QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3, - QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q, - QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR, - QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB, - QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4, - QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA, - QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0, - QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1, - QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2, - QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3, - QLCNIC_HW_CRB_HUB_AGT_ADR_PGND, - QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI, - QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0, - QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1, - QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2, - QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3, - 0, - QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI, - QLCNIC_HW_CRB_HUB_AGT_ADR_SN, - 0, - QLCNIC_HW_CRB_HUB_AGT_ADR_EG, - 0, - QLCNIC_HW_CRB_HUB_AGT_ADR_PS, - QLCNIC_HW_CRB_HUB_AGT_ADR_CAM, - 0, - 0, - 0, - 0, - 0, - QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR, - 0, - QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1, - QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2, - QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3, - QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4, - QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5, - QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6, - QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7, - QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA, - QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q, - QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB, - 0, - QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0, - QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8, - QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9, - QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0, - 0, - QLCNIC_HW_CRB_HUB_AGT_ADR_SMB, - QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0, - QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1, - 0, - QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC, - 0, -}; - -/* PCI Windowing for DDR regions. */ - -#define QLCNIC_PCIE_SEM_TIMEOUT 10000 - -int -qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg) -{ - int done = 0, timeout = 0; - - while (!done) { - done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem))); - if (done == 1) - break; - if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) { - dev_err(&adapter->pdev->dev, - "Failed to acquire sem=%d lock; holdby=%d\n", - sem, id_reg ? QLCRD32(adapter, id_reg) : -1); - return -EIO; - } - msleep(1); - } - - if (id_reg) - QLCWR32(adapter, id_reg, adapter->portnum); - - return 0; -} - -void -qlcnic_pcie_sem_unlock(struct qlcnic_adapter *adapter, int sem) -{ - QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem))); -} - -static int -qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter, - struct cmd_desc_type0 *cmd_desc_arr, int nr_desc) -{ - u32 i, producer, consumer; - struct qlcnic_cmd_buffer *pbuf; - struct cmd_desc_type0 *cmd_desc; - struct qlcnic_host_tx_ring *tx_ring; - - i = 0; - - if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) - return -EIO; - - tx_ring = adapter->tx_ring; - __netif_tx_lock_bh(tx_ring->txq); - - producer = tx_ring->producer; - consumer = tx_ring->sw_consumer; - - if (nr_desc >= qlcnic_tx_avail(tx_ring)) { - netif_tx_stop_queue(tx_ring->txq); - smp_mb(); - if (qlcnic_tx_avail(tx_ring) > nr_desc) { - if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) - netif_tx_wake_queue(tx_ring->txq); - } else { - adapter->stats.xmit_off++; - __netif_tx_unlock_bh(tx_ring->txq); - return -EBUSY; - } - } - - do { - cmd_desc = &cmd_desc_arr[i]; - - pbuf = &tx_ring->cmd_buf_arr[producer]; - pbuf->skb = NULL; - pbuf->frag_count = 0; - - memcpy(&tx_ring->desc_head[producer], - &cmd_desc_arr[i], sizeof(struct cmd_desc_type0)); - - producer = get_next_index(producer, tx_ring->num_desc); - i++; - - } while (i != nr_desc); - - tx_ring->producer = producer; - - qlcnic_update_cmd_producer(adapter, tx_ring); - - __netif_tx_unlock_bh(tx_ring->txq); - - return 0; -} - -static int -qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr, - __le16 vlan_id, unsigned op) -{ - struct qlcnic_nic_req req; - struct qlcnic_mac_req *mac_req; - struct qlcnic_vlan_req *vlan_req; - u64 word; - - memset(&req, 0, sizeof(struct qlcnic_nic_req)); - req.qhdr = cpu_to_le64(QLCNIC_REQUEST << 23); - - word = QLCNIC_MAC_EVENT | ((u64)adapter->portnum << 16); - req.req_hdr = cpu_to_le64(word); - - mac_req = (struct qlcnic_mac_req *)&req.words[0]; - mac_req->op = op; - memcpy(mac_req->mac_addr, addr, 6); - - vlan_req = (struct qlcnic_vlan_req *)&req.words[1]; - vlan_req->vlan_id = vlan_id; - - return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); -} - -static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr) -{ - struct list_head *head; - struct qlcnic_mac_list_s *cur; - - /* look up if already exists */ - list_for_each(head, &adapter->mac_list) { - cur = list_entry(head, struct qlcnic_mac_list_s, list); - if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0) - return 0; - } - - cur = kzalloc(sizeof(struct qlcnic_mac_list_s), GFP_ATOMIC); - if (cur == NULL) { - dev_err(&adapter->netdev->dev, - "failed to add mac address filter\n"); - return -ENOMEM; - } - memcpy(cur->mac_addr, addr, ETH_ALEN); - - if (qlcnic_sre_macaddr_change(adapter, - cur->mac_addr, 0, QLCNIC_MAC_ADD)) { - kfree(cur); - return -EIO; - } - - list_add_tail(&cur->list, &adapter->mac_list); - return 0; -} - -void qlcnic_set_multi(struct net_device *netdev) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - struct netdev_hw_addr *ha; - static const u8 bcast_addr[ETH_ALEN] = { - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff - }; - u32 mode = VPORT_MISS_MODE_DROP; - - if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) - return; - - qlcnic_nic_add_mac(adapter, adapter->mac_addr); - qlcnic_nic_add_mac(adapter, bcast_addr); - - if (netdev->flags & IFF_PROMISC) { - if (!(adapter->flags & QLCNIC_PROMISC_DISABLED)) - mode = VPORT_MISS_MODE_ACCEPT_ALL; - goto send_fw_cmd; - } - - if ((netdev->flags & IFF_ALLMULTI) || - (netdev_mc_count(netdev) > adapter->max_mc_count)) { - mode = VPORT_MISS_MODE_ACCEPT_MULTI; - goto send_fw_cmd; - } - - if (!netdev_mc_empty(netdev)) { - netdev_for_each_mc_addr(ha, netdev) { - qlcnic_nic_add_mac(adapter, ha->addr); - } - } - -send_fw_cmd: - if (mode == VPORT_MISS_MODE_ACCEPT_ALL) { - qlcnic_alloc_lb_filters_mem(adapter); - adapter->mac_learn = 1; - } else { - adapter->mac_learn = 0; - } - - qlcnic_nic_set_promisc(adapter, mode); -} - -int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode) -{ - struct qlcnic_nic_req req; - u64 word; - - memset(&req, 0, sizeof(struct qlcnic_nic_req)); - - req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); - - word = QLCNIC_H2C_OPCODE_SET_MAC_RECEIVE_MODE | - ((u64)adapter->portnum << 16); - req.req_hdr = cpu_to_le64(word); - - req.words[0] = cpu_to_le64(mode); - - return qlcnic_send_cmd_descs(adapter, - (struct cmd_desc_type0 *)&req, 1); -} - -void qlcnic_free_mac_list(struct qlcnic_adapter *adapter) -{ - struct qlcnic_mac_list_s *cur; - struct list_head *head = &adapter->mac_list; - - while (!list_empty(head)) { - cur = list_entry(head->next, struct qlcnic_mac_list_s, list); - qlcnic_sre_macaddr_change(adapter, - cur->mac_addr, 0, QLCNIC_MAC_DEL); - list_del(&cur->list); - kfree(cur); - } -} - -void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter) -{ - struct qlcnic_filter *tmp_fil; - struct hlist_node *tmp_hnode, *n; - struct hlist_head *head; - int i; - - for (i = 0; i < adapter->fhash.fmax; i++) { - head = &(adapter->fhash.fhead[i]); - - hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) - { - if (jiffies > - (QLCNIC_FILTER_AGE * HZ + tmp_fil->ftime)) { - qlcnic_sre_macaddr_change(adapter, - tmp_fil->faddr, tmp_fil->vlan_id, - tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL : - QLCNIC_MAC_DEL); - spin_lock_bh(&adapter->mac_learn_lock); - adapter->fhash.fnum--; - hlist_del(&tmp_fil->fnode); - spin_unlock_bh(&adapter->mac_learn_lock); - kfree(tmp_fil); - } - } - } -} - -void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter) -{ - struct qlcnic_filter *tmp_fil; - struct hlist_node *tmp_hnode, *n; - struct hlist_head *head; - int i; - - for (i = 0; i < adapter->fhash.fmax; i++) { - head = &(adapter->fhash.fhead[i]); - - hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) { - qlcnic_sre_macaddr_change(adapter, tmp_fil->faddr, - tmp_fil->vlan_id, tmp_fil->vlan_id ? - QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL); - spin_lock_bh(&adapter->mac_learn_lock); - adapter->fhash.fnum--; - hlist_del(&tmp_fil->fnode); - spin_unlock_bh(&adapter->mac_learn_lock); - kfree(tmp_fil); - } - } -} - -int qlcnic_set_fw_loopback(struct qlcnic_adapter *adapter, u8 flag) -{ - struct qlcnic_nic_req req; - int rv; - - memset(&req, 0, sizeof(struct qlcnic_nic_req)); - - req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); - req.req_hdr = cpu_to_le64(QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK | - ((u64) adapter->portnum << 16) | ((u64) 0x1 << 32)); - - req.words[0] = cpu_to_le64(flag); - - rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); - if (rv != 0) - dev_err(&adapter->pdev->dev, "%sting loopback mode failed\n", - flag ? "Set" : "Reset"); - return rv; -} - -int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode) -{ - if (qlcnic_set_fw_loopback(adapter, mode)) - return -EIO; - - if (qlcnic_nic_set_promisc(adapter, VPORT_MISS_MODE_ACCEPT_ALL)) { - qlcnic_set_fw_loopback(adapter, mode); - return -EIO; - } - - msleep(1000); - return 0; -} - -void qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter) -{ - int mode = VPORT_MISS_MODE_DROP; - struct net_device *netdev = adapter->netdev; - - qlcnic_set_fw_loopback(adapter, 0); - - if (netdev->flags & IFF_PROMISC) - mode = VPORT_MISS_MODE_ACCEPT_ALL; - else if (netdev->flags & IFF_ALLMULTI) - mode = VPORT_MISS_MODE_ACCEPT_MULTI; - - qlcnic_nic_set_promisc(adapter, mode); - msleep(1000); -} - -/* - * Send the interrupt coalescing parameter set by ethtool to the card. - */ -int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter) -{ - struct qlcnic_nic_req req; - int rv; - - memset(&req, 0, sizeof(struct qlcnic_nic_req)); - - req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); - - req.req_hdr = cpu_to_le64(QLCNIC_CONFIG_INTR_COALESCE | - ((u64) adapter->portnum << 16)); - - req.words[0] = cpu_to_le64(((u64) adapter->ahw->coal.flag) << 32); - req.words[2] = cpu_to_le64(adapter->ahw->coal.rx_packets | - ((u64) adapter->ahw->coal.rx_time_us) << 16); - req.words[5] = cpu_to_le64(adapter->ahw->coal.timer_out | - ((u64) adapter->ahw->coal.type) << 32 | - ((u64) adapter->ahw->coal.sts_ring_mask) << 40); - rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); - if (rv != 0) - dev_err(&adapter->netdev->dev, - "Could not send interrupt coalescing parameters\n"); - return rv; -} - -int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable) -{ - struct qlcnic_nic_req req; - u64 word; - int rv; - - if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) - return 0; - - memset(&req, 0, sizeof(struct qlcnic_nic_req)); - - req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); - - word = QLCNIC_H2C_OPCODE_CONFIG_HW_LRO | ((u64)adapter->portnum << 16); - req.req_hdr = cpu_to_le64(word); - - req.words[0] = cpu_to_le64(enable); - - rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); - if (rv != 0) - dev_err(&adapter->netdev->dev, - "Could not send configure hw lro request\n"); - - return rv; -} - -int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable) -{ - struct qlcnic_nic_req req; - u64 word; - int rv; - - if (!!(adapter->flags & QLCNIC_BRIDGE_ENABLED) == enable) - return 0; - - memset(&req, 0, sizeof(struct qlcnic_nic_req)); - - req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); - - word = QLCNIC_H2C_OPCODE_CONFIG_BRIDGING | - ((u64)adapter->portnum << 16); - req.req_hdr = cpu_to_le64(word); - - req.words[0] = cpu_to_le64(enable); - - rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); - if (rv != 0) - dev_err(&adapter->netdev->dev, - "Could not send configure bridge mode request\n"); - - adapter->flags ^= QLCNIC_BRIDGE_ENABLED; - - return rv; -} - - -#define RSS_HASHTYPE_IP_TCP 0x3 - -int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable) -{ - struct qlcnic_nic_req req; - u64 word; - int i, rv; - - static const u64 key[] = { - 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL, - 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL, - 0x255b0ec26d5a56daULL - }; - - memset(&req, 0, sizeof(struct qlcnic_nic_req)); - req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); - - word = QLCNIC_H2C_OPCODE_CONFIG_RSS | ((u64)adapter->portnum << 16); - req.req_hdr = cpu_to_le64(word); - - /* - * RSS request: - * bits 3-0: hash_method - * 5-4: hash_type_ipv4 - * 7-6: hash_type_ipv6 - * 8: enable - * 9: use indirection table - * 47-10: reserved - * 63-48: indirection table mask - */ - word = ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) | - ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) | - ((u64)(enable & 0x1) << 8) | - ((0x7ULL) << 48); - req.words[0] = cpu_to_le64(word); - for (i = 0; i < 5; i++) - req.words[i+1] = cpu_to_le64(key[i]); - - rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); - if (rv != 0) - dev_err(&adapter->netdev->dev, "could not configure RSS\n"); - - return rv; -} - -int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, int cmd) -{ - struct qlcnic_nic_req req; - struct qlcnic_ipaddr *ipa; - u64 word; - int rv; - - memset(&req, 0, sizeof(struct qlcnic_nic_req)); - req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); - - word = QLCNIC_H2C_OPCODE_CONFIG_IPADDR | ((u64)adapter->portnum << 16); - req.req_hdr = cpu_to_le64(word); - - req.words[0] = cpu_to_le64(cmd); - ipa = (struct qlcnic_ipaddr *)&req.words[1]; - ipa->ipv4 = ip; - - rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); - if (rv != 0) - dev_err(&adapter->netdev->dev, - "could not notify %s IP 0x%x reuqest\n", - (cmd == QLCNIC_IP_UP) ? "Add" : "Remove", ip); - - return rv; -} - -int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable) -{ - struct qlcnic_nic_req req; - u64 word; - int rv; - - memset(&req, 0, sizeof(struct qlcnic_nic_req)); - req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); - - word = QLCNIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16); - req.req_hdr = cpu_to_le64(word); - req.words[0] = cpu_to_le64(enable | (enable << 8)); - - rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); - if (rv != 0) - dev_err(&adapter->netdev->dev, - "could not configure link notification\n"); - - return rv; -} - -int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter) -{ - struct qlcnic_nic_req req; - u64 word; - int rv; - - if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) - return 0; - - memset(&req, 0, sizeof(struct qlcnic_nic_req)); - req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); - - word = QLCNIC_H2C_OPCODE_LRO_REQUEST | - ((u64)adapter->portnum << 16) | - ((u64)QLCNIC_LRO_REQUEST_CLEANUP << 56) ; - - req.req_hdr = cpu_to_le64(word); - - rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); - if (rv != 0) - dev_err(&adapter->netdev->dev, - "could not cleanup lro flows\n"); - - return rv; -} - -/* - * qlcnic_change_mtu - Change the Maximum Transfer Unit - * @returns 0 on success, negative on failure - */ - -int qlcnic_change_mtu(struct net_device *netdev, int mtu) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - int rc = 0; - - if (mtu < P3P_MIN_MTU || mtu > P3P_MAX_MTU) { - dev_err(&adapter->netdev->dev, "%d bytes < mtu < %d bytes" - " not supported\n", P3P_MAX_MTU, P3P_MIN_MTU); - return -EINVAL; - } - - rc = qlcnic_fw_cmd_set_mtu(adapter, mtu); - - if (!rc) - netdev->mtu = mtu; - - return rc; -} - - -u32 qlcnic_fix_features(struct net_device *netdev, u32 features) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - - if ((adapter->flags & QLCNIC_ESWITCH_ENABLED)) { - u32 changed = features ^ netdev->features; - features ^= changed & (NETIF_F_ALL_CSUM | NETIF_F_RXCSUM); - } - - if (!(features & NETIF_F_RXCSUM)) - features &= ~NETIF_F_LRO; - - return features; -} - - -int qlcnic_set_features(struct net_device *netdev, u32 features) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - u32 changed = netdev->features ^ features; - int hw_lro = (features & NETIF_F_LRO) ? QLCNIC_LRO_ENABLED : 0; - - if (!(changed & NETIF_F_LRO)) - return 0; - - netdev->features = features ^ NETIF_F_LRO; - - if (qlcnic_config_hw_lro(adapter, hw_lro)) - return -EIO; - - if ((hw_lro == 0) && qlcnic_send_lro_cleanup(adapter)) - return -EIO; - - return 0; -} - -/* - * Changes the CRB window to the specified window. - */ - /* Returns < 0 if off is not valid, - * 1 if window access is needed. 'off' is set to offset from - * CRB space in 128M pci map - * 0 if no window access is needed. 'off' is set to 2M addr - * In: 'off' is offset from base in 128M pci map - */ -static int -qlcnic_pci_get_crb_addr_2M(struct qlcnic_adapter *adapter, - ulong off, void __iomem **addr) -{ - const struct crb_128M_2M_sub_block_map *m; - - if ((off >= QLCNIC_CRB_MAX) || (off < QLCNIC_PCI_CRBSPACE)) - return -EINVAL; - - off -= QLCNIC_PCI_CRBSPACE; - - /* - * Try direct map - */ - m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)]; - - if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) { - *addr = adapter->ahw->pci_base0 + m->start_2M + - (off - m->start_128M); - return 0; - } - - /* - * Not in direct map, use crb window - */ - *addr = adapter->ahw->pci_base0 + CRB_INDIRECT_2M + (off & MASK(16)); - return 1; -} - -/* - * In: 'off' is offset from CRB space in 128M pci map - * Out: 'off' is 2M pci map addr - * side effect: lock crb window - */ -static int -qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off) -{ - u32 window; - void __iomem *addr = adapter->ahw->pci_base0 + CRB_WINDOW_2M; - - off -= QLCNIC_PCI_CRBSPACE; - - window = CRB_HI(off); - if (window == 0) { - dev_err(&adapter->pdev->dev, "Invalid offset 0x%lx\n", off); - return -EIO; - } - - writel(window, addr); - if (readl(addr) != window) { - if (printk_ratelimit()) - dev_warn(&adapter->pdev->dev, - "failed to set CRB window to %d off 0x%lx\n", - window, off); - return -EIO; - } - return 0; -} - -int -qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, u32 data) -{ - unsigned long flags; - int rv; - void __iomem *addr = NULL; - - rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr); - - if (rv == 0) { - writel(data, addr); - return 0; - } - - if (rv > 0) { - /* indirect access */ - write_lock_irqsave(&adapter->ahw->crb_lock, flags); - crb_win_lock(adapter); - rv = qlcnic_pci_set_crbwindow_2M(adapter, off); - if (!rv) - writel(data, addr); - crb_win_unlock(adapter); - write_unlock_irqrestore(&adapter->ahw->crb_lock, flags); - return rv; - } - - dev_err(&adapter->pdev->dev, - "%s: invalid offset: 0x%016lx\n", __func__, off); - dump_stack(); - return -EIO; -} - -u32 -qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off) -{ - unsigned long flags; - int rv; - u32 data = -1; - void __iomem *addr = NULL; - - rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr); - - if (rv == 0) - return readl(addr); - - if (rv > 0) { - /* indirect access */ - write_lock_irqsave(&adapter->ahw->crb_lock, flags); - crb_win_lock(adapter); - if (!qlcnic_pci_set_crbwindow_2M(adapter, off)) - data = readl(addr); - crb_win_unlock(adapter); - write_unlock_irqrestore(&adapter->ahw->crb_lock, flags); - return data; - } - - dev_err(&adapter->pdev->dev, - "%s: invalid offset: 0x%016lx\n", __func__, off); - dump_stack(); - return -1; -} - - -void __iomem * -qlcnic_get_ioaddr(struct qlcnic_adapter *adapter, u32 offset) -{ - void __iomem *addr = NULL; - - WARN_ON(qlcnic_pci_get_crb_addr_2M(adapter, offset, &addr)); - - return addr; -} - - -static int -qlcnic_pci_set_window_2M(struct qlcnic_adapter *adapter, - u64 addr, u32 *start) -{ - u32 window; - - window = OCM_WIN_P3P(addr); - - writel(window, adapter->ahw->ocm_win_crb); - /* read back to flush */ - readl(adapter->ahw->ocm_win_crb); - - *start = QLCNIC_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr); - return 0; -} - -static int -qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off, - u64 *data, int op) -{ - void __iomem *addr; - int ret; - u32 start; - - mutex_lock(&adapter->ahw->mem_lock); - - ret = qlcnic_pci_set_window_2M(adapter, off, &start); - if (ret != 0) - goto unlock; - - addr = adapter->ahw->pci_base0 + start; - - if (op == 0) /* read */ - *data = readq(addr); - else /* write */ - writeq(*data, addr); - -unlock: - mutex_unlock(&adapter->ahw->mem_lock); - - return ret; -} - -void -qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data) -{ - void __iomem *addr = adapter->ahw->pci_base0 + - QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM); - - mutex_lock(&adapter->ahw->mem_lock); - *data = readq(addr); - mutex_unlock(&adapter->ahw->mem_lock); -} - -void -qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *adapter, u64 off, u64 data) -{ - void __iomem *addr = adapter->ahw->pci_base0 + - QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM); - - mutex_lock(&adapter->ahw->mem_lock); - writeq(data, addr); - mutex_unlock(&adapter->ahw->mem_lock); -} - -#define MAX_CTL_CHECK 1000 - -int -qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter, - u64 off, u64 data) -{ - int i, j, ret; - u32 temp, off8; - void __iomem *mem_crb; - - /* Only 64-bit aligned access */ - if (off & 7) - return -EIO; - - /* P3 onward, test agent base for MIU and SIU is same */ - if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET, - QLCNIC_ADDR_QDR_NET_MAX)) { - mem_crb = qlcnic_get_ioaddr(adapter, - QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE); - goto correct; - } - - if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) { - mem_crb = qlcnic_get_ioaddr(adapter, - QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE); - goto correct; - } - - if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX)) - return qlcnic_pci_mem_access_direct(adapter, off, &data, 1); - - return -EIO; - -correct: - off8 = off & ~0xf; - - mutex_lock(&adapter->ahw->mem_lock); - - writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO)); - writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI)); - - i = 0; - writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL)); - writel((TA_CTL_START | TA_CTL_ENABLE), - (mem_crb + TEST_AGT_CTRL)); - - for (j = 0; j < MAX_CTL_CHECK; j++) { - temp = readl(mem_crb + TEST_AGT_CTRL); - if ((temp & TA_CTL_BUSY) == 0) - break; - } - - if (j >= MAX_CTL_CHECK) { - ret = -EIO; - goto done; - } - - i = (off & 0xf) ? 0 : 2; - writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i)), - mem_crb + MIU_TEST_AGT_WRDATA(i)); - writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i+1)), - mem_crb + MIU_TEST_AGT_WRDATA(i+1)); - i = (off & 0xf) ? 2 : 0; - - writel(data & 0xffffffff, - mem_crb + MIU_TEST_AGT_WRDATA(i)); - writel((data >> 32) & 0xffffffff, - mem_crb + MIU_TEST_AGT_WRDATA(i+1)); - - writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL)); - writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE), - (mem_crb + TEST_AGT_CTRL)); - - for (j = 0; j < MAX_CTL_CHECK; j++) { - temp = readl(mem_crb + TEST_AGT_CTRL); - if ((temp & TA_CTL_BUSY) == 0) - break; - } - - if (j >= MAX_CTL_CHECK) { - if (printk_ratelimit()) - dev_err(&adapter->pdev->dev, - "failed to write through agent\n"); - ret = -EIO; - } else - ret = 0; - -done: - mutex_unlock(&adapter->ahw->mem_lock); - - return ret; -} - -int -qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter, - u64 off, u64 *data) -{ - int j, ret; - u32 temp, off8; - u64 val; - void __iomem *mem_crb; - - /* Only 64-bit aligned access */ - if (off & 7) - return -EIO; - - /* P3 onward, test agent base for MIU and SIU is same */ - if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET, - QLCNIC_ADDR_QDR_NET_MAX)) { - mem_crb = qlcnic_get_ioaddr(adapter, - QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE); - goto correct; - } - - if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) { - mem_crb = qlcnic_get_ioaddr(adapter, - QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE); - goto correct; - } - - if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX)) { - return qlcnic_pci_mem_access_direct(adapter, - off, data, 0); - } - - return -EIO; - -correct: - off8 = off & ~0xf; - - mutex_lock(&adapter->ahw->mem_lock); - - writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO)); - writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI)); - writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL)); - writel((TA_CTL_START | TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL)); - - for (j = 0; j < MAX_CTL_CHECK; j++) { - temp = readl(mem_crb + TEST_AGT_CTRL); - if ((temp & TA_CTL_BUSY) == 0) - break; - } - - if (j >= MAX_CTL_CHECK) { - if (printk_ratelimit()) - dev_err(&adapter->pdev->dev, - "failed to read through agent\n"); - ret = -EIO; - } else { - off8 = MIU_TEST_AGT_RDDATA_LO; - if (off & 0xf) - off8 = MIU_TEST_AGT_RDDATA_UPPER_LO; - - temp = readl(mem_crb + off8 + 4); - val = (u64)temp << 32; - val |= readl(mem_crb + off8); - *data = val; - ret = 0; - } - - mutex_unlock(&adapter->ahw->mem_lock); - - return ret; -} - -int qlcnic_get_board_info(struct qlcnic_adapter *adapter) -{ - int offset, board_type, magic; - struct pci_dev *pdev = adapter->pdev; - - offset = QLCNIC_FW_MAGIC_OFFSET; - if (qlcnic_rom_fast_read(adapter, offset, &magic)) - return -EIO; - - if (magic != QLCNIC_BDINFO_MAGIC) { - dev_err(&pdev->dev, "invalid board config, magic=%08x\n", - magic); - return -EIO; - } - - offset = QLCNIC_BRDTYPE_OFFSET; - if (qlcnic_rom_fast_read(adapter, offset, &board_type)) - return -EIO; - - adapter->ahw->board_type = board_type; - - if (board_type == QLCNIC_BRDTYPE_P3P_4_GB_MM) { - u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I); - if ((gpio & 0x8000) == 0) - board_type = QLCNIC_BRDTYPE_P3P_10G_TP; - } - - switch (board_type) { - case QLCNIC_BRDTYPE_P3P_HMEZ: - case QLCNIC_BRDTYPE_P3P_XG_LOM: - case QLCNIC_BRDTYPE_P3P_10G_CX4: - case QLCNIC_BRDTYPE_P3P_10G_CX4_LP: - case QLCNIC_BRDTYPE_P3P_IMEZ: - case QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS: - case QLCNIC_BRDTYPE_P3P_10G_SFP_CT: - case QLCNIC_BRDTYPE_P3P_10G_SFP_QT: - case QLCNIC_BRDTYPE_P3P_10G_XFP: - case QLCNIC_BRDTYPE_P3P_10000_BASE_T: - adapter->ahw->port_type = QLCNIC_XGBE; - break; - case QLCNIC_BRDTYPE_P3P_REF_QG: - case QLCNIC_BRDTYPE_P3P_4_GB: - case QLCNIC_BRDTYPE_P3P_4_GB_MM: - adapter->ahw->port_type = QLCNIC_GBE; - break; - case QLCNIC_BRDTYPE_P3P_10G_TP: - adapter->ahw->port_type = (adapter->portnum < 2) ? - QLCNIC_XGBE : QLCNIC_GBE; - break; - default: - dev_err(&pdev->dev, "unknown board type %x\n", board_type); - adapter->ahw->port_type = QLCNIC_XGBE; - break; - } - - return 0; -} - -int -qlcnic_wol_supported(struct qlcnic_adapter *adapter) -{ - u32 wol_cfg; - - wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV); - if (wol_cfg & (1UL << adapter->portnum)) { - wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG); - if (wol_cfg & (1 << adapter->portnum)) - return 1; - } - - return 0; -} - -int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate) -{ - struct qlcnic_nic_req req; - int rv; - u64 word; - - memset(&req, 0, sizeof(struct qlcnic_nic_req)); - req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); - - word = QLCNIC_H2C_OPCODE_CONFIG_LED | ((u64)adapter->portnum << 16); - req.req_hdr = cpu_to_le64(word); - - req.words[0] = cpu_to_le64((u64)rate << 32); - req.words[1] = cpu_to_le64(state); - - rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); - if (rv) - dev_err(&adapter->pdev->dev, "LED configuration failed.\n"); - - return rv; -} - -/* FW dump related functions */ -static u32 -qlcnic_dump_crb(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, - u32 *buffer) -{ - int i; - u32 addr, data; - struct __crb *crb = &entry->region.crb; - void __iomem *base = adapter->ahw->pci_base0; - - addr = crb->addr; - - for (i = 0; i < crb->no_ops; i++) { - QLCNIC_RD_DUMP_REG(addr, base, &data); - *buffer++ = cpu_to_le32(addr); - *buffer++ = cpu_to_le32(data); - addr += crb->stride; - } - return crb->no_ops * 2 * sizeof(u32); -} - -static u32 -qlcnic_dump_ctrl(struct qlcnic_adapter *adapter, - struct qlcnic_dump_entry *entry, u32 *buffer) -{ - int i, k, timeout = 0; - void __iomem *base = adapter->ahw->pci_base0; - u32 addr, data; - u8 opcode, no_ops; - struct __ctrl *ctr = &entry->region.ctrl; - struct qlcnic_dump_template_hdr *t_hdr = adapter->ahw->fw_dump.tmpl_hdr; - - addr = ctr->addr; - no_ops = ctr->no_ops; - - for (i = 0; i < no_ops; i++) { - k = 0; - opcode = 0; - for (k = 0; k < 8; k++) { - if (!(ctr->opcode & (1 << k))) - continue; - switch (1 << k) { - case QLCNIC_DUMP_WCRB: - QLCNIC_WR_DUMP_REG(addr, base, ctr->val1); - break; - case QLCNIC_DUMP_RWCRB: - QLCNIC_RD_DUMP_REG(addr, base, &data); - QLCNIC_WR_DUMP_REG(addr, base, data); - break; - case QLCNIC_DUMP_ANDCRB: - QLCNIC_RD_DUMP_REG(addr, base, &data); - QLCNIC_WR_DUMP_REG(addr, base, - (data & ctr->val2)); - break; - case QLCNIC_DUMP_ORCRB: - QLCNIC_RD_DUMP_REG(addr, base, &data); - QLCNIC_WR_DUMP_REG(addr, base, - (data | ctr->val3)); - break; - case QLCNIC_DUMP_POLLCRB: - while (timeout <= ctr->timeout) { - QLCNIC_RD_DUMP_REG(addr, base, &data); - if ((data & ctr->val2) == ctr->val1) - break; - msleep(1); - timeout++; - } - if (timeout > ctr->timeout) { - dev_info(&adapter->pdev->dev, - "Timed out, aborting poll CRB\n"); - return -EINVAL; - } - break; - case QLCNIC_DUMP_RD_SAVE: - if (ctr->index_a) - addr = t_hdr->saved_state[ctr->index_a]; - QLCNIC_RD_DUMP_REG(addr, base, &data); - t_hdr->saved_state[ctr->index_v] = data; - break; - case QLCNIC_DUMP_WRT_SAVED: - if (ctr->index_v) - data = t_hdr->saved_state[ctr->index_v]; - else - data = ctr->val1; - if (ctr->index_a) - addr = t_hdr->saved_state[ctr->index_a]; - QLCNIC_WR_DUMP_REG(addr, base, data); - break; - case QLCNIC_DUMP_MOD_SAVE_ST: - data = t_hdr->saved_state[ctr->index_v]; - data <<= ctr->shl_val; - data >>= ctr->shr_val; - if (ctr->val2) - data &= ctr->val2; - data |= ctr->val3; - data += ctr->val1; - t_hdr->saved_state[ctr->index_v] = data; - break; - default: - dev_info(&adapter->pdev->dev, - "Unknown opcode\n"); - break; - } - } - addr += ctr->stride; - } - return 0; -} - -static u32 -qlcnic_dump_mux(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, - u32 *buffer) -{ - int loop; - u32 val, data = 0; - struct __mux *mux = &entry->region.mux; - void __iomem *base = adapter->ahw->pci_base0; - - val = mux->val; - for (loop = 0; loop < mux->no_ops; loop++) { - QLCNIC_WR_DUMP_REG(mux->addr, base, val); - QLCNIC_RD_DUMP_REG(mux->read_addr, base, &data); - *buffer++ = cpu_to_le32(val); - *buffer++ = cpu_to_le32(data); - val += mux->val_stride; - } - return 2 * mux->no_ops * sizeof(u32); -} - -static u32 -qlcnic_dump_que(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, - u32 *buffer) -{ - int i, loop; - u32 cnt, addr, data, que_id = 0; - void __iomem *base = adapter->ahw->pci_base0; - struct __queue *que = &entry->region.que; - - addr = que->read_addr; - cnt = que->read_addr_cnt; - - for (loop = 0; loop < que->no_ops; loop++) { - QLCNIC_WR_DUMP_REG(que->sel_addr, base, que_id); - addr = que->read_addr; - for (i = 0; i < cnt; i++) { - QLCNIC_RD_DUMP_REG(addr, base, &data); - *buffer++ = cpu_to_le32(data); - addr += que->read_addr_stride; - } - que_id += que->stride; - } - return que->no_ops * cnt * sizeof(u32); -} - -static u32 -qlcnic_dump_ocm(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, - u32 *buffer) -{ - int i; - u32 data; - void __iomem *addr; - struct __ocm *ocm = &entry->region.ocm; - - addr = adapter->ahw->pci_base0 + ocm->read_addr; - for (i = 0; i < ocm->no_ops; i++) { - data = readl(addr); - *buffer++ = cpu_to_le32(data); - addr += ocm->read_addr_stride; - } - return ocm->no_ops * sizeof(u32); -} - -static u32 -qlcnic_read_rom(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, - u32 *buffer) -{ - int i, count = 0; - u32 fl_addr, size, val, lck_val, addr; - struct __mem *rom = &entry->region.mem; - void __iomem *base = adapter->ahw->pci_base0; - - fl_addr = rom->addr; - size = rom->size/4; -lock_try: - lck_val = readl(base + QLCNIC_FLASH_SEM2_LK); - if (!lck_val && count < MAX_CTL_CHECK) { - msleep(10); - count++; - goto lock_try; - } - writel(adapter->ahw->pci_func, (base + QLCNIC_FLASH_LOCK_ID)); - for (i = 0; i < size; i++) { - addr = fl_addr & 0xFFFF0000; - QLCNIC_WR_DUMP_REG(FLASH_ROM_WINDOW, base, addr); - addr = LSW(fl_addr) + FLASH_ROM_DATA; - QLCNIC_RD_DUMP_REG(addr, base, &val); - fl_addr += 4; - *buffer++ = cpu_to_le32(val); - } - readl(base + QLCNIC_FLASH_SEM2_ULK); - return rom->size; -} - -static u32 -qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter, - struct qlcnic_dump_entry *entry, u32 *buffer) -{ - int i; - u32 cnt, val, data, addr; - void __iomem *base = adapter->ahw->pci_base0; - struct __cache *l1 = &entry->region.cache; - - val = l1->init_tag_val; - - for (i = 0; i < l1->no_ops; i++) { - QLCNIC_WR_DUMP_REG(l1->addr, base, val); - QLCNIC_WR_DUMP_REG(l1->ctrl_addr, base, LSW(l1->ctrl_val)); - addr = l1->read_addr; - cnt = l1->read_addr_num; - while (cnt) { - QLCNIC_RD_DUMP_REG(addr, base, &data); - *buffer++ = cpu_to_le32(data); - addr += l1->read_addr_stride; - cnt--; - } - val += l1->stride; - } - return l1->no_ops * l1->read_addr_num * sizeof(u32); -} - -static u32 -qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter, - struct qlcnic_dump_entry *entry, u32 *buffer) -{ - int i; - u32 cnt, val, data, addr; - u8 poll_mask, poll_to, time_out = 0; - void __iomem *base = adapter->ahw->pci_base0; - struct __cache *l2 = &entry->region.cache; - - val = l2->init_tag_val; - poll_mask = LSB(MSW(l2->ctrl_val)); - poll_to = MSB(MSW(l2->ctrl_val)); - - for (i = 0; i < l2->no_ops; i++) { - QLCNIC_WR_DUMP_REG(l2->addr, base, val); - if (LSW(l2->ctrl_val)) - QLCNIC_WR_DUMP_REG(l2->ctrl_addr, base, - LSW(l2->ctrl_val)); - if (!poll_mask) - goto skip_poll; - do { - QLCNIC_RD_DUMP_REG(l2->ctrl_addr, base, &data); - if (!(data & poll_mask)) - break; - msleep(1); - time_out++; - } while (time_out <= poll_to); - - if (time_out > poll_to) { - dev_err(&adapter->pdev->dev, - "Timeout exceeded in %s, aborting dump\n", - __func__); - return -EINVAL; - } -skip_poll: - addr = l2->read_addr; - cnt = l2->read_addr_num; - while (cnt) { - QLCNIC_RD_DUMP_REG(addr, base, &data); - *buffer++ = cpu_to_le32(data); - addr += l2->read_addr_stride; - cnt--; - } - val += l2->stride; - } - return l2->no_ops * l2->read_addr_num * sizeof(u32); -} - -static u32 -qlcnic_read_memory(struct qlcnic_adapter *adapter, - struct qlcnic_dump_entry *entry, u32 *buffer) -{ - u32 addr, data, test, ret = 0; - int i, reg_read; - struct __mem *mem = &entry->region.mem; - void __iomem *base = adapter->ahw->pci_base0; - - reg_read = mem->size; - addr = mem->addr; - /* check for data size of multiple of 16 and 16 byte alignment */ - if ((addr & 0xf) || (reg_read%16)) { - dev_info(&adapter->pdev->dev, - "Unaligned memory addr:0x%x size:0x%x\n", - addr, reg_read); - return -EINVAL; - } - - mutex_lock(&adapter->ahw->mem_lock); - - while (reg_read != 0) { - QLCNIC_WR_DUMP_REG(MIU_TEST_ADDR_LO, base, addr); - QLCNIC_WR_DUMP_REG(MIU_TEST_ADDR_HI, base, 0); - QLCNIC_WR_DUMP_REG(MIU_TEST_CTR, base, - TA_CTL_ENABLE | TA_CTL_START); - - for (i = 0; i < MAX_CTL_CHECK; i++) { - QLCNIC_RD_DUMP_REG(MIU_TEST_CTR, base, &test); - if (!(test & TA_CTL_BUSY)) - break; - } - if (i == MAX_CTL_CHECK) { - if (printk_ratelimit()) { - dev_err(&adapter->pdev->dev, - "failed to read through agent\n"); - ret = -EINVAL; - goto out; - } - } - for (i = 0; i < 4; i++) { - QLCNIC_RD_DUMP_REG(MIU_TEST_READ_DATA[i], base, &data); - *buffer++ = cpu_to_le32(data); - } - addr += 16; - reg_read -= 16; - ret += 16; - } -out: - mutex_unlock(&adapter->ahw->mem_lock); - return mem->size; -} - -static u32 -qlcnic_dump_nop(struct qlcnic_adapter *adapter, - struct qlcnic_dump_entry *entry, u32 *buffer) -{ - entry->hdr.flags |= QLCNIC_DUMP_SKIP; - return 0; -} - -struct qlcnic_dump_operations fw_dump_ops[] = { - { QLCNIC_DUMP_NOP, qlcnic_dump_nop }, - { QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb }, - { QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux }, - { QLCNIC_DUMP_QUEUE, qlcnic_dump_que }, - { QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom }, - { QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm }, - { QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl }, - { QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache }, - { QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache }, - { QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache }, - { QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache }, - { QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache }, - { QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache }, - { QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache }, - { QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache }, - { QLCNIC_DUMP_READ_ROM, qlcnic_read_rom }, - { QLCNIC_DUMP_READ_MEM, qlcnic_read_memory }, - { QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl }, - { QLCNIC_DUMP_TLHDR, qlcnic_dump_nop }, - { QLCNIC_DUMP_RDEND, qlcnic_dump_nop }, -}; - -/* Walk the template and collect dump for each entry in the dump template */ -static int -qlcnic_valid_dump_entry(struct device *dev, struct qlcnic_dump_entry *entry, - u32 size) -{ - int ret = 1; - if (size != entry->hdr.cap_size) { - dev_info(dev, - "Invalidate dump, Type:%d\tMask:%d\tSize:%dCap_size:%d\n", - entry->hdr.type, entry->hdr.mask, size, entry->hdr.cap_size); - dev_info(dev, "Aborting further dump capture\n"); - ret = 0; - } - return ret; -} - -int qlcnic_dump_fw(struct qlcnic_adapter *adapter) -{ - u32 *buffer; - char mesg[64]; - char *msg[] = {mesg, NULL}; - int i, k, ops_cnt, ops_index, dump_size = 0; - u32 entry_offset, dump, no_entries, buf_offset = 0; - struct qlcnic_dump_entry *entry; - struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; - struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr; - - if (fw_dump->clr) { - dev_info(&adapter->pdev->dev, - "Previous dump not cleared, not capturing dump\n"); - return -EIO; - } - /* Calculate the size for dump data area only */ - for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++) - if (i & tmpl_hdr->drv_cap_mask) - dump_size += tmpl_hdr->cap_sizes[k]; - if (!dump_size) - return -EIO; - - fw_dump->data = vzalloc(dump_size); - if (!fw_dump->data) { - dev_info(&adapter->pdev->dev, - "Unable to allocate (%d KB) for fw dump\n", - dump_size/1024); - return -ENOMEM; - } - buffer = fw_dump->data; - fw_dump->size = dump_size; - no_entries = tmpl_hdr->num_entries; - ops_cnt = ARRAY_SIZE(fw_dump_ops); - entry_offset = tmpl_hdr->offset; - tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION; - tmpl_hdr->sys_info[1] = adapter->fw_version; - - for (i = 0; i < no_entries; i++) { - entry = (void *)tmpl_hdr + entry_offset; - if (!(entry->hdr.mask & tmpl_hdr->drv_cap_mask)) { - entry->hdr.flags |= QLCNIC_DUMP_SKIP; - entry_offset += entry->hdr.offset; - continue; - } - /* Find the handler for this entry */ - ops_index = 0; - while (ops_index < ops_cnt) { - if (entry->hdr.type == fw_dump_ops[ops_index].opcode) - break; - ops_index++; - } - if (ops_index == ops_cnt) { - dev_info(&adapter->pdev->dev, - "Invalid entry type %d, exiting dump\n", - entry->hdr.type); - goto error; - } - /* Collect dump for this entry */ - dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer); - if (dump && !qlcnic_valid_dump_entry(&adapter->pdev->dev, entry, - dump)) - entry->hdr.flags |= QLCNIC_DUMP_SKIP; - buf_offset += entry->hdr.cap_size; - entry_offset += entry->hdr.offset; - buffer = fw_dump->data + buf_offset; - } - if (dump_size != buf_offset) { - dev_info(&adapter->pdev->dev, - "Captured(%d) and expected size(%d) do not match\n", - buf_offset, dump_size); - goto error; - } else { - fw_dump->clr = 1; - snprintf(mesg, sizeof(mesg), "FW_DUMP=%s", - adapter->netdev->name); - dev_info(&adapter->pdev->dev, "Dump data, %d bytes captured\n", - fw_dump->size); - /* Send a udev event to notify availability of FW dump */ - kobject_uevent_env(&adapter->pdev->dev.kobj, KOBJ_CHANGE, msg); - return 0; - } -error: - vfree(fw_dump->data); - return -EINVAL; -} diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c deleted file mode 100644 index 3b6741e4754d..000000000000 --- a/drivers/net/qlcnic/qlcnic_init.c +++ /dev/null @@ -1,1898 +0,0 @@ -/* - * QLogic qlcnic NIC Driver - * Copyright (c) 2009-2010 QLogic Corporation - * - * See LICENSE.qlcnic for copyright and licensing details. - */ - -#include -#include -#include -#include -#include "qlcnic.h" - -struct crb_addr_pair { - u32 addr; - u32 data; -}; - -#define QLCNIC_MAX_CRB_XFORM 60 -static unsigned int crb_addr_xform[QLCNIC_MAX_CRB_XFORM]; - -#define crb_addr_transform(name) \ - (crb_addr_xform[QLCNIC_HW_PX_MAP_CRB_##name] = \ - QLCNIC_HW_CRB_HUB_AGT_ADR_##name << 20) - -#define QLCNIC_ADDR_ERROR (0xffffffff) - -static void -qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter, - struct qlcnic_host_rds_ring *rds_ring); - -static int -qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter); - -static void crb_addr_transform_setup(void) -{ - crb_addr_transform(XDMA); - crb_addr_transform(TIMR); - crb_addr_transform(SRE); - crb_addr_transform(SQN3); - crb_addr_transform(SQN2); - crb_addr_transform(SQN1); - crb_addr_transform(SQN0); - crb_addr_transform(SQS3); - crb_addr_transform(SQS2); - crb_addr_transform(SQS1); - crb_addr_transform(SQS0); - crb_addr_transform(RPMX7); - crb_addr_transform(RPMX6); - crb_addr_transform(RPMX5); - crb_addr_transform(RPMX4); - crb_addr_transform(RPMX3); - crb_addr_transform(RPMX2); - crb_addr_transform(RPMX1); - crb_addr_transform(RPMX0); - crb_addr_transform(ROMUSB); - crb_addr_transform(SN); - crb_addr_transform(QMN); - crb_addr_transform(QMS); - crb_addr_transform(PGNI); - crb_addr_transform(PGND); - crb_addr_transform(PGN3); - crb_addr_transform(PGN2); - crb_addr_transform(PGN1); - crb_addr_transform(PGN0); - crb_addr_transform(PGSI); - crb_addr_transform(PGSD); - crb_addr_transform(PGS3); - crb_addr_transform(PGS2); - crb_addr_transform(PGS1); - crb_addr_transform(PGS0); - crb_addr_transform(PS); - crb_addr_transform(PH); - crb_addr_transform(NIU); - crb_addr_transform(I2Q); - crb_addr_transform(EG); - crb_addr_transform(MN); - crb_addr_transform(MS); - crb_addr_transform(CAS2); - crb_addr_transform(CAS1); - crb_addr_transform(CAS0); - crb_addr_transform(CAM); - crb_addr_transform(C2C1); - crb_addr_transform(C2C0); - crb_addr_transform(SMB); - crb_addr_transform(OCM0); - crb_addr_transform(I2C0); -} - -void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter) -{ - struct qlcnic_recv_context *recv_ctx; - struct qlcnic_host_rds_ring *rds_ring; - struct qlcnic_rx_buffer *rx_buf; - int i, ring; - - recv_ctx = adapter->recv_ctx; - for (ring = 0; ring < adapter->max_rds_rings; ring++) { - rds_ring = &recv_ctx->rds_rings[ring]; - for (i = 0; i < rds_ring->num_desc; ++i) { - rx_buf = &(rds_ring->rx_buf_arr[i]); - if (rx_buf->skb == NULL) - continue; - - pci_unmap_single(adapter->pdev, - rx_buf->dma, - rds_ring->dma_size, - PCI_DMA_FROMDEVICE); - - dev_kfree_skb_any(rx_buf->skb); - } - } -} - -void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter) -{ - struct qlcnic_recv_context *recv_ctx; - struct qlcnic_host_rds_ring *rds_ring; - struct qlcnic_rx_buffer *rx_buf; - int i, ring; - - recv_ctx = adapter->recv_ctx; - for (ring = 0; ring < adapter->max_rds_rings; ring++) { - rds_ring = &recv_ctx->rds_rings[ring]; - - INIT_LIST_HEAD(&rds_ring->free_list); - - rx_buf = rds_ring->rx_buf_arr; - for (i = 0; i < rds_ring->num_desc; i++) { - list_add_tail(&rx_buf->list, - &rds_ring->free_list); - rx_buf++; - } - } -} - -void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter) -{ - struct qlcnic_cmd_buffer *cmd_buf; - struct qlcnic_skb_frag *buffrag; - int i, j; - struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; - - cmd_buf = tx_ring->cmd_buf_arr; - for (i = 0; i < tx_ring->num_desc; i++) { - buffrag = cmd_buf->frag_array; - if (buffrag->dma) { - pci_unmap_single(adapter->pdev, buffrag->dma, - buffrag->length, PCI_DMA_TODEVICE); - buffrag->dma = 0ULL; - } - for (j = 0; j < cmd_buf->frag_count; j++) { - buffrag++; - if (buffrag->dma) { - pci_unmap_page(adapter->pdev, buffrag->dma, - buffrag->length, - PCI_DMA_TODEVICE); - buffrag->dma = 0ULL; - } - } - if (cmd_buf->skb) { - dev_kfree_skb_any(cmd_buf->skb); - cmd_buf->skb = NULL; - } - cmd_buf++; - } -} - -void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter) -{ - struct qlcnic_recv_context *recv_ctx; - struct qlcnic_host_rds_ring *rds_ring; - struct qlcnic_host_tx_ring *tx_ring; - int ring; - - recv_ctx = adapter->recv_ctx; - - if (recv_ctx->rds_rings == NULL) - goto skip_rds; - - for (ring = 0; ring < adapter->max_rds_rings; ring++) { - rds_ring = &recv_ctx->rds_rings[ring]; - vfree(rds_ring->rx_buf_arr); - rds_ring->rx_buf_arr = NULL; - } - kfree(recv_ctx->rds_rings); - -skip_rds: - if (adapter->tx_ring == NULL) - return; - - tx_ring = adapter->tx_ring; - vfree(tx_ring->cmd_buf_arr); - tx_ring->cmd_buf_arr = NULL; - kfree(adapter->tx_ring); - adapter->tx_ring = NULL; -} - -int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter) -{ - struct qlcnic_recv_context *recv_ctx; - struct qlcnic_host_rds_ring *rds_ring; - struct qlcnic_host_sds_ring *sds_ring; - struct qlcnic_host_tx_ring *tx_ring; - struct qlcnic_rx_buffer *rx_buf; - int ring, i, size; - - struct qlcnic_cmd_buffer *cmd_buf_arr; - struct net_device *netdev = adapter->netdev; - - size = sizeof(struct qlcnic_host_tx_ring); - tx_ring = kzalloc(size, GFP_KERNEL); - if (tx_ring == NULL) { - dev_err(&netdev->dev, "failed to allocate tx ring struct\n"); - return -ENOMEM; - } - adapter->tx_ring = tx_ring; - - tx_ring->num_desc = adapter->num_txd; - tx_ring->txq = netdev_get_tx_queue(netdev, 0); - - cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring)); - if (cmd_buf_arr == NULL) { - dev_err(&netdev->dev, "failed to allocate cmd buffer ring\n"); - goto err_out; - } - tx_ring->cmd_buf_arr = cmd_buf_arr; - - recv_ctx = adapter->recv_ctx; - - size = adapter->max_rds_rings * sizeof(struct qlcnic_host_rds_ring); - rds_ring = kzalloc(size, GFP_KERNEL); - if (rds_ring == NULL) { - dev_err(&netdev->dev, "failed to allocate rds ring struct\n"); - goto err_out; - } - recv_ctx->rds_rings = rds_ring; - - for (ring = 0; ring < adapter->max_rds_rings; ring++) { - rds_ring = &recv_ctx->rds_rings[ring]; - switch (ring) { - case RCV_RING_NORMAL: - rds_ring->num_desc = adapter->num_rxd; - rds_ring->dma_size = QLCNIC_P3P_RX_BUF_MAX_LEN; - rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN; - break; - - case RCV_RING_JUMBO: - rds_ring->num_desc = adapter->num_jumbo_rxd; - rds_ring->dma_size = - QLCNIC_P3P_RX_JUMBO_BUF_MAX_LEN; - - if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO) - rds_ring->dma_size += QLCNIC_LRO_BUFFER_EXTRA; - - rds_ring->skb_size = - rds_ring->dma_size + NET_IP_ALIGN; - break; - } - rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring)); - if (rds_ring->rx_buf_arr == NULL) { - dev_err(&netdev->dev, "Failed to allocate " - "rx buffer ring %d\n", ring); - goto err_out; - } - INIT_LIST_HEAD(&rds_ring->free_list); - /* - * Now go through all of them, set reference handles - * and put them in the queues. - */ - rx_buf = rds_ring->rx_buf_arr; - for (i = 0; i < rds_ring->num_desc; i++) { - list_add_tail(&rx_buf->list, - &rds_ring->free_list); - rx_buf->ref_handle = i; - rx_buf++; - } - spin_lock_init(&rds_ring->lock); - } - - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &recv_ctx->sds_rings[ring]; - sds_ring->irq = adapter->msix_entries[ring].vector; - sds_ring->adapter = adapter; - sds_ring->num_desc = adapter->num_rxd; - - for (i = 0; i < NUM_RCV_DESC_RINGS; i++) - INIT_LIST_HEAD(&sds_ring->free_list[i]); - } - - return 0; - -err_out: - qlcnic_free_sw_resources(adapter); - return -ENOMEM; -} - -/* - * Utility to translate from internal Phantom CRB address - * to external PCI CRB address. - */ -static u32 qlcnic_decode_crb_addr(u32 addr) -{ - int i; - u32 base_addr, offset, pci_base; - - crb_addr_transform_setup(); - - pci_base = QLCNIC_ADDR_ERROR; - base_addr = addr & 0xfff00000; - offset = addr & 0x000fffff; - - for (i = 0; i < QLCNIC_MAX_CRB_XFORM; i++) { - if (crb_addr_xform[i] == base_addr) { - pci_base = i << 20; - break; - } - } - if (pci_base == QLCNIC_ADDR_ERROR) - return pci_base; - else - return pci_base + offset; -} - -#define QLCNIC_MAX_ROM_WAIT_USEC 100 - -static int qlcnic_wait_rom_done(struct qlcnic_adapter *adapter) -{ - long timeout = 0; - long done = 0; - - cond_resched(); - - while (done == 0) { - done = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_STATUS); - done &= 2; - if (++timeout >= QLCNIC_MAX_ROM_WAIT_USEC) { - dev_err(&adapter->pdev->dev, - "Timeout reached waiting for rom done"); - return -EIO; - } - udelay(1); - } - return 0; -} - -static int do_rom_fast_read(struct qlcnic_adapter *adapter, - u32 addr, u32 *valp) -{ - QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ADDRESS, addr); - QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); - QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 3); - QLCWR32(adapter, QLCNIC_ROMUSB_ROM_INSTR_OPCODE, 0xb); - if (qlcnic_wait_rom_done(adapter)) { - dev_err(&adapter->pdev->dev, "Error waiting for rom done\n"); - return -EIO; - } - /* reset abyte_cnt and dummy_byte_cnt */ - QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 0); - udelay(10); - QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); - - *valp = QLCRD32(adapter, QLCNIC_ROMUSB_ROM_RDATA); - return 0; -} - -static int do_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr, - u8 *bytes, size_t size) -{ - int addridx; - int ret = 0; - - for (addridx = addr; addridx < (addr + size); addridx += 4) { - int v; - ret = do_rom_fast_read(adapter, addridx, &v); - if (ret != 0) - break; - *(__le32 *)bytes = cpu_to_le32(v); - bytes += 4; - } - - return ret; -} - -int -qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr, - u8 *bytes, size_t size) -{ - int ret; - - ret = qlcnic_rom_lock(adapter); - if (ret < 0) - return ret; - - ret = do_rom_fast_read_words(adapter, addr, bytes, size); - - qlcnic_rom_unlock(adapter); - return ret; -} - -int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, u32 addr, u32 *valp) -{ - int ret; - - if (qlcnic_rom_lock(adapter) != 0) - return -EIO; - - ret = do_rom_fast_read(adapter, addr, valp); - qlcnic_rom_unlock(adapter); - return ret; -} - -int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter) -{ - int addr, val; - int i, n, init_delay; - struct crb_addr_pair *buf; - unsigned offset; - u32 off; - struct pci_dev *pdev = adapter->pdev; - - QLCWR32(adapter, CRB_CMDPEG_STATE, 0); - QLCWR32(adapter, CRB_RCVPEG_STATE, 0); - - qlcnic_rom_lock(adapter); - QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xfeffffff); - qlcnic_rom_unlock(adapter); - - /* Init HW CRB block */ - if (qlcnic_rom_fast_read(adapter, 0, &n) != 0 || (n != 0xcafecafe) || - qlcnic_rom_fast_read(adapter, 4, &n) != 0) { - dev_err(&pdev->dev, "ERROR Reading crb_init area: val:%x\n", n); - return -EIO; - } - offset = n & 0xffffU; - n = (n >> 16) & 0xffffU; - - if (n >= 1024) { - dev_err(&pdev->dev, "QLOGIC card flash not initialized.\n"); - return -EIO; - } - - buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL); - if (buf == NULL) { - dev_err(&pdev->dev, "Unable to calloc memory for rom read.\n"); - return -ENOMEM; - } - - for (i = 0; i < n; i++) { - if (qlcnic_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 || - qlcnic_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) { - kfree(buf); - return -EIO; - } - - buf[i].addr = addr; - buf[i].data = val; - } - - for (i = 0; i < n; i++) { - - off = qlcnic_decode_crb_addr(buf[i].addr); - if (off == QLCNIC_ADDR_ERROR) { - dev_err(&pdev->dev, "CRB init value out of range %x\n", - buf[i].addr); - continue; - } - off += QLCNIC_PCI_CRBSPACE; - - if (off & 1) - continue; - - /* skipping cold reboot MAGIC */ - if (off == QLCNIC_CAM_RAM(0x1fc)) - continue; - if (off == (QLCNIC_CRB_I2C0 + 0x1c)) - continue; - if (off == (ROMUSB_GLB + 0xbc)) /* do not reset PCI */ - continue; - if (off == (ROMUSB_GLB + 0xa8)) - continue; - if (off == (ROMUSB_GLB + 0xc8)) /* core clock */ - continue; - if (off == (ROMUSB_GLB + 0x24)) /* MN clock */ - continue; - if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */ - continue; - if ((off & 0x0ff00000) == QLCNIC_CRB_DDR_NET) - continue; - /* skip the function enable register */ - if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION)) - continue; - if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION2)) - continue; - if ((off & 0x0ff00000) == QLCNIC_CRB_SMB) - continue; - - init_delay = 1; - /* After writing this register, HW needs time for CRB */ - /* to quiet down (else crb_window returns 0xffffffff) */ - if (off == QLCNIC_ROMUSB_GLB_SW_RESET) - init_delay = 1000; - - QLCWR32(adapter, off, buf[i].data); - - msleep(init_delay); - } - kfree(buf); - - /* Initialize protocol process engine */ - QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0xec, 0x1e); - QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0x4c, 8); - QLCWR32(adapter, QLCNIC_CRB_PEG_NET_I + 0x4c, 8); - QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x8, 0); - QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0xc, 0); - QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x8, 0); - QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0xc, 0); - QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x8, 0); - QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0xc, 0); - QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x8, 0); - QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0xc, 0); - QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x8, 0); - QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0xc, 0); - msleep(1); - QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0); - QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0); - return 0; -} - -static int qlcnic_cmd_peg_ready(struct qlcnic_adapter *adapter) -{ - u32 val; - int retries = QLCNIC_CMDPEG_CHECK_RETRY_COUNT; - - do { - val = QLCRD32(adapter, CRB_CMDPEG_STATE); - - switch (val) { - case PHAN_INITIALIZE_COMPLETE: - case PHAN_INITIALIZE_ACK: - return 0; - case PHAN_INITIALIZE_FAILED: - goto out_err; - default: - break; - } - - msleep(QLCNIC_CMDPEG_CHECK_DELAY); - - } while (--retries); - - QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED); - -out_err: - dev_err(&adapter->pdev->dev, "Command Peg initialization not " - "complete, state: 0x%x.\n", val); - return -EIO; -} - -static int -qlcnic_receive_peg_ready(struct qlcnic_adapter *adapter) -{ - u32 val; - int retries = QLCNIC_RCVPEG_CHECK_RETRY_COUNT; - - do { - val = QLCRD32(adapter, CRB_RCVPEG_STATE); - - if (val == PHAN_PEG_RCV_INITIALIZED) - return 0; - - msleep(QLCNIC_RCVPEG_CHECK_DELAY); - - } while (--retries); - - if (!retries) { - dev_err(&adapter->pdev->dev, "Receive Peg initialization not " - "complete, state: 0x%x.\n", val); - return -EIO; - } - - return 0; -} - -int -qlcnic_check_fw_status(struct qlcnic_adapter *adapter) -{ - int err; - - err = qlcnic_cmd_peg_ready(adapter); - if (err) - return err; - - err = qlcnic_receive_peg_ready(adapter); - if (err) - return err; - - QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK); - - return err; -} - -int -qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) { - - int timeo; - u32 val; - - val = QLCRD32(adapter, QLCNIC_CRB_DEV_PARTITION_INFO); - val = QLC_DEV_GET_DRV(val, adapter->portnum); - if ((val & 0x3) != QLCNIC_TYPE_NIC) { - dev_err(&adapter->pdev->dev, - "Not an Ethernet NIC func=%u\n", val); - return -EIO; - } - adapter->physical_port = (val >> 2); - if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DEV_INIT_TIMEOUT, &timeo)) - timeo = QLCNIC_INIT_TIMEOUT_SECS; - - adapter->dev_init_timeo = timeo; - - if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DRV_RESET_TIMEOUT, &timeo)) - timeo = QLCNIC_RESET_TIMEOUT_SECS; - - adapter->reset_ack_timeo = timeo; - - return 0; -} - -static int qlcnic_get_flt_entry(struct qlcnic_adapter *adapter, u8 region, - struct qlcnic_flt_entry *region_entry) -{ - struct qlcnic_flt_header flt_hdr; - struct qlcnic_flt_entry *flt_entry; - int i = 0, ret; - u32 entry_size; - - memset(region_entry, 0, sizeof(struct qlcnic_flt_entry)); - ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION, - (u8 *)&flt_hdr, - sizeof(struct qlcnic_flt_header)); - if (ret) { - dev_warn(&adapter->pdev->dev, - "error reading flash layout header\n"); - return -EIO; - } - - entry_size = flt_hdr.len - sizeof(struct qlcnic_flt_header); - flt_entry = (struct qlcnic_flt_entry *)vzalloc(entry_size); - if (flt_entry == NULL) { - dev_warn(&adapter->pdev->dev, "error allocating memory\n"); - return -EIO; - } - - ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION + - sizeof(struct qlcnic_flt_header), - (u8 *)flt_entry, entry_size); - if (ret) { - dev_warn(&adapter->pdev->dev, - "error reading flash layout entries\n"); - goto err_out; - } - - while (i < (entry_size/sizeof(struct qlcnic_flt_entry))) { - if (flt_entry[i].region == region) - break; - i++; - } - if (i >= (entry_size/sizeof(struct qlcnic_flt_entry))) { - dev_warn(&adapter->pdev->dev, - "region=%x not found in %d regions\n", region, i); - ret = -EIO; - goto err_out; - } - memcpy(region_entry, &flt_entry[i], sizeof(struct qlcnic_flt_entry)); - -err_out: - vfree(flt_entry); - return ret; -} - -int -qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter) -{ - struct qlcnic_flt_entry fw_entry; - u32 ver = -1, min_ver; - int ret; - - ret = qlcnic_get_flt_entry(adapter, QLCNIC_FW_IMAGE_REGION, &fw_entry); - if (!ret) - /* 0-4:-signature, 4-8:-fw version */ - qlcnic_rom_fast_read(adapter, fw_entry.start_addr + 4, - (int *)&ver); - else - qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET, - (int *)&ver); - - ver = QLCNIC_DECODE_VERSION(ver); - min_ver = QLCNIC_MIN_FW_VERSION; - - if (ver < min_ver) { - dev_err(&adapter->pdev->dev, - "firmware version %d.%d.%d unsupported." - "Min supported version %d.%d.%d\n", - _major(ver), _minor(ver), _build(ver), - _major(min_ver), _minor(min_ver), _build(min_ver)); - return -EINVAL; - } - - return 0; -} - -static int -qlcnic_has_mn(struct qlcnic_adapter *adapter) -{ - u32 capability; - capability = 0; - - capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY); - if (capability & QLCNIC_PEG_TUNE_MN_PRESENT) - return 1; - - return 0; -} - -static -struct uni_table_desc *qlcnic_get_table_desc(const u8 *unirom, int section) -{ - u32 i; - struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0]; - __le32 entries = cpu_to_le32(directory->num_entries); - - for (i = 0; i < entries; i++) { - - __le32 offs = cpu_to_le32(directory->findex) + - (i * cpu_to_le32(directory->entry_size)); - __le32 tab_type = cpu_to_le32(*((u32 *)&unirom[offs] + 8)); - - if (tab_type == section) - return (struct uni_table_desc *) &unirom[offs]; - } - - return NULL; -} - -#define FILEHEADER_SIZE (14 * 4) - -static int -qlcnic_validate_header(struct qlcnic_adapter *adapter) -{ - const u8 *unirom = adapter->fw->data; - struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0]; - __le32 fw_file_size = adapter->fw->size; - __le32 entries; - __le32 entry_size; - __le32 tab_size; - - if (fw_file_size < FILEHEADER_SIZE) - return -EINVAL; - - entries = cpu_to_le32(directory->num_entries); - entry_size = cpu_to_le32(directory->entry_size); - tab_size = cpu_to_le32(directory->findex) + (entries * entry_size); - - if (fw_file_size < tab_size) - return -EINVAL; - - return 0; -} - -static int -qlcnic_validate_bootld(struct qlcnic_adapter *adapter) -{ - struct uni_table_desc *tab_desc; - struct uni_data_desc *descr; - const u8 *unirom = adapter->fw->data; - int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] + - QLCNIC_UNI_BOOTLD_IDX_OFF)); - __le32 offs; - __le32 tab_size; - __le32 data_size; - - tab_desc = qlcnic_get_table_desc(unirom, QLCNIC_UNI_DIR_SECT_BOOTLD); - - if (!tab_desc) - return -EINVAL; - - tab_size = cpu_to_le32(tab_desc->findex) + - (cpu_to_le32(tab_desc->entry_size) * (idx + 1)); - - if (adapter->fw->size < tab_size) - return -EINVAL; - - offs = cpu_to_le32(tab_desc->findex) + - (cpu_to_le32(tab_desc->entry_size) * (idx)); - descr = (struct uni_data_desc *)&unirom[offs]; - - data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size); - - if (adapter->fw->size < data_size) - return -EINVAL; - - return 0; -} - -static int -qlcnic_validate_fw(struct qlcnic_adapter *adapter) -{ - struct uni_table_desc *tab_desc; - struct uni_data_desc *descr; - const u8 *unirom = adapter->fw->data; - int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] + - QLCNIC_UNI_FIRMWARE_IDX_OFF)); - __le32 offs; - __le32 tab_size; - __le32 data_size; - - tab_desc = qlcnic_get_table_desc(unirom, QLCNIC_UNI_DIR_SECT_FW); - - if (!tab_desc) - return -EINVAL; - - tab_size = cpu_to_le32(tab_desc->findex) + - (cpu_to_le32(tab_desc->entry_size) * (idx + 1)); - - if (adapter->fw->size < tab_size) - return -EINVAL; - - offs = cpu_to_le32(tab_desc->findex) + - (cpu_to_le32(tab_desc->entry_size) * (idx)); - descr = (struct uni_data_desc *)&unirom[offs]; - data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size); - - if (adapter->fw->size < data_size) - return -EINVAL; - - return 0; -} - -static int -qlcnic_validate_product_offs(struct qlcnic_adapter *adapter) -{ - struct uni_table_desc *ptab_descr; - const u8 *unirom = adapter->fw->data; - int mn_present = qlcnic_has_mn(adapter); - __le32 entries; - __le32 entry_size; - __le32 tab_size; - u32 i; - - ptab_descr = qlcnic_get_table_desc(unirom, - QLCNIC_UNI_DIR_SECT_PRODUCT_TBL); - if (!ptab_descr) - return -EINVAL; - - entries = cpu_to_le32(ptab_descr->num_entries); - entry_size = cpu_to_le32(ptab_descr->entry_size); - tab_size = cpu_to_le32(ptab_descr->findex) + (entries * entry_size); - - if (adapter->fw->size < tab_size) - return -EINVAL; - -nomn: - for (i = 0; i < entries; i++) { - - __le32 flags, file_chiprev, offs; - u8 chiprev = adapter->ahw->revision_id; - u32 flagbit; - - offs = cpu_to_le32(ptab_descr->findex) + - (i * cpu_to_le32(ptab_descr->entry_size)); - flags = cpu_to_le32(*((int *)&unirom[offs] + - QLCNIC_UNI_FLAGS_OFF)); - file_chiprev = cpu_to_le32(*((int *)&unirom[offs] + - QLCNIC_UNI_CHIP_REV_OFF)); - - flagbit = mn_present ? 1 : 2; - - if ((chiprev == file_chiprev) && - ((1ULL << flagbit) & flags)) { - adapter->file_prd_off = offs; - return 0; - } - } - if (mn_present) { - mn_present = 0; - goto nomn; - } - return -EINVAL; -} - -static int -qlcnic_validate_unified_romimage(struct qlcnic_adapter *adapter) -{ - if (qlcnic_validate_header(adapter)) { - dev_err(&adapter->pdev->dev, - "unified image: header validation failed\n"); - return -EINVAL; - } - - if (qlcnic_validate_product_offs(adapter)) { - dev_err(&adapter->pdev->dev, - "unified image: product validation failed\n"); - return -EINVAL; - } - - if (qlcnic_validate_bootld(adapter)) { - dev_err(&adapter->pdev->dev, - "unified image: bootld validation failed\n"); - return -EINVAL; - } - - if (qlcnic_validate_fw(adapter)) { - dev_err(&adapter->pdev->dev, - "unified image: firmware validation failed\n"); - return -EINVAL; - } - - return 0; -} - -static -struct uni_data_desc *qlcnic_get_data_desc(struct qlcnic_adapter *adapter, - u32 section, u32 idx_offset) -{ - const u8 *unirom = adapter->fw->data; - int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] + - idx_offset)); - struct uni_table_desc *tab_desc; - __le32 offs; - - tab_desc = qlcnic_get_table_desc(unirom, section); - - if (tab_desc == NULL) - return NULL; - - offs = cpu_to_le32(tab_desc->findex) + - (cpu_to_le32(tab_desc->entry_size) * idx); - - return (struct uni_data_desc *)&unirom[offs]; -} - -static u8 * -qlcnic_get_bootld_offs(struct qlcnic_adapter *adapter) -{ - u32 offs = QLCNIC_BOOTLD_START; - - if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE) - offs = cpu_to_le32((qlcnic_get_data_desc(adapter, - QLCNIC_UNI_DIR_SECT_BOOTLD, - QLCNIC_UNI_BOOTLD_IDX_OFF))->findex); - - return (u8 *)&adapter->fw->data[offs]; -} - -static u8 * -qlcnic_get_fw_offs(struct qlcnic_adapter *adapter) -{ - u32 offs = QLCNIC_IMAGE_START; - - if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE) - offs = cpu_to_le32((qlcnic_get_data_desc(adapter, - QLCNIC_UNI_DIR_SECT_FW, - QLCNIC_UNI_FIRMWARE_IDX_OFF))->findex); - - return (u8 *)&adapter->fw->data[offs]; -} - -static __le32 -qlcnic_get_fw_size(struct qlcnic_adapter *adapter) -{ - if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE) - return cpu_to_le32((qlcnic_get_data_desc(adapter, - QLCNIC_UNI_DIR_SECT_FW, - QLCNIC_UNI_FIRMWARE_IDX_OFF))->size); - else - return cpu_to_le32( - *(u32 *)&adapter->fw->data[QLCNIC_FW_SIZE_OFFSET]); -} - -static __le32 -qlcnic_get_fw_version(struct qlcnic_adapter *adapter) -{ - struct uni_data_desc *fw_data_desc; - const struct firmware *fw = adapter->fw; - __le32 major, minor, sub; - const u8 *ver_str; - int i, ret; - - if (adapter->fw_type != QLCNIC_UNIFIED_ROMIMAGE) - return cpu_to_le32(*(u32 *)&fw->data[QLCNIC_FW_VERSION_OFFSET]); - - fw_data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_FW, - QLCNIC_UNI_FIRMWARE_IDX_OFF); - ver_str = fw->data + cpu_to_le32(fw_data_desc->findex) + - cpu_to_le32(fw_data_desc->size) - 17; - - for (i = 0; i < 12; i++) { - if (!strncmp(&ver_str[i], "REV=", 4)) { - ret = sscanf(&ver_str[i+4], "%u.%u.%u ", - &major, &minor, &sub); - if (ret != 3) - return 0; - else - return major + (minor << 8) + (sub << 16); - } - } - - return 0; -} - -static __le32 -qlcnic_get_bios_version(struct qlcnic_adapter *adapter) -{ - const struct firmware *fw = adapter->fw; - __le32 bios_ver, prd_off = adapter->file_prd_off; - - if (adapter->fw_type != QLCNIC_UNIFIED_ROMIMAGE) - return cpu_to_le32( - *(u32 *)&fw->data[QLCNIC_BIOS_VERSION_OFFSET]); - - bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off]) - + QLCNIC_UNI_BIOS_VERSION_OFF)); - - return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) + (bios_ver >> 24); -} - -static void qlcnic_rom_lock_recovery(struct qlcnic_adapter *adapter) -{ - if (qlcnic_pcie_sem_lock(adapter, 2, QLCNIC_ROM_LOCK_ID)) - dev_info(&adapter->pdev->dev, "Resetting rom_lock\n"); - - qlcnic_pcie_sem_unlock(adapter, 2); -} - -static int -qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter) -{ - u32 heartbeat, ret = -EIO; - int retries = QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT; - - adapter->heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER); - - do { - msleep(QLCNIC_HEARTBEAT_PERIOD_MSECS); - heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER); - if (heartbeat != adapter->heartbeat) { - ret = QLCNIC_RCODE_SUCCESS; - break; - } - } while (--retries); - - return ret; -} - -int -qlcnic_need_fw_reset(struct qlcnic_adapter *adapter) -{ - if ((adapter->flags & QLCNIC_FW_HANG) || - qlcnic_check_fw_hearbeat(adapter)) { - qlcnic_rom_lock_recovery(adapter); - return 1; - } - - if (adapter->need_fw_reset) - return 1; - - if (adapter->fw) - return 1; - - return 0; -} - -static const char *fw_name[] = { - QLCNIC_UNIFIED_ROMIMAGE_NAME, - QLCNIC_FLASH_ROMIMAGE_NAME, -}; - -int -qlcnic_load_firmware(struct qlcnic_adapter *adapter) -{ - u64 *ptr64; - u32 i, flashaddr, size; - const struct firmware *fw = adapter->fw; - struct pci_dev *pdev = adapter->pdev; - - dev_info(&pdev->dev, "loading firmware from %s\n", - fw_name[adapter->fw_type]); - - if (fw) { - __le64 data; - - size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8; - - ptr64 = (u64 *)qlcnic_get_bootld_offs(adapter); - flashaddr = QLCNIC_BOOTLD_START; - - for (i = 0; i < size; i++) { - data = cpu_to_le64(ptr64[i]); - - if (qlcnic_pci_mem_write_2M(adapter, flashaddr, data)) - return -EIO; - - flashaddr += 8; - } - - size = (__force u32)qlcnic_get_fw_size(adapter) / 8; - - ptr64 = (u64 *)qlcnic_get_fw_offs(adapter); - flashaddr = QLCNIC_IMAGE_START; - - for (i = 0; i < size; i++) { - data = cpu_to_le64(ptr64[i]); - - if (qlcnic_pci_mem_write_2M(adapter, - flashaddr, data)) - return -EIO; - - flashaddr += 8; - } - - size = (__force u32)qlcnic_get_fw_size(adapter) % 8; - if (size) { - data = cpu_to_le64(ptr64[i]); - - if (qlcnic_pci_mem_write_2M(adapter, - flashaddr, data)) - return -EIO; - } - - } else { - u64 data; - u32 hi, lo; - int ret; - struct qlcnic_flt_entry bootld_entry; - - ret = qlcnic_get_flt_entry(adapter, QLCNIC_BOOTLD_REGION, - &bootld_entry); - if (!ret) { - size = bootld_entry.size / 8; - flashaddr = bootld_entry.start_addr; - } else { - size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8; - flashaddr = QLCNIC_BOOTLD_START; - dev_info(&pdev->dev, - "using legacy method to get flash fw region"); - } - - for (i = 0; i < size; i++) { - if (qlcnic_rom_fast_read(adapter, - flashaddr, (int *)&lo) != 0) - return -EIO; - if (qlcnic_rom_fast_read(adapter, - flashaddr + 4, (int *)&hi) != 0) - return -EIO; - - data = (((u64)hi << 32) | lo); - - if (qlcnic_pci_mem_write_2M(adapter, - flashaddr, data)) - return -EIO; - - flashaddr += 8; - } - } - msleep(1); - - QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x18, 0x1020); - QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0x80001e); - return 0; -} - -static int -qlcnic_validate_firmware(struct qlcnic_adapter *adapter) -{ - __le32 val; - u32 ver, bios, min_size; - struct pci_dev *pdev = adapter->pdev; - const struct firmware *fw = adapter->fw; - u8 fw_type = adapter->fw_type; - - if (fw_type == QLCNIC_UNIFIED_ROMIMAGE) { - if (qlcnic_validate_unified_romimage(adapter)) - return -EINVAL; - - min_size = QLCNIC_UNI_FW_MIN_SIZE; - } else { - val = cpu_to_le32(*(u32 *)&fw->data[QLCNIC_FW_MAGIC_OFFSET]); - if ((__force u32)val != QLCNIC_BDINFO_MAGIC) - return -EINVAL; - - min_size = QLCNIC_FW_MIN_SIZE; - } - - if (fw->size < min_size) - return -EINVAL; - - val = qlcnic_get_fw_version(adapter); - ver = QLCNIC_DECODE_VERSION(val); - - if (ver < QLCNIC_MIN_FW_VERSION) { - dev_err(&pdev->dev, - "%s: firmware version %d.%d.%d unsupported\n", - fw_name[fw_type], _major(ver), _minor(ver), _build(ver)); - return -EINVAL; - } - - val = qlcnic_get_bios_version(adapter); - qlcnic_rom_fast_read(adapter, QLCNIC_BIOS_VERSION_OFFSET, (int *)&bios); - if ((__force u32)val != bios) { - dev_err(&pdev->dev, "%s: firmware bios is incompatible\n", - fw_name[fw_type]); - return -EINVAL; - } - - QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC); - return 0; -} - -static void -qlcnic_get_next_fwtype(struct qlcnic_adapter *adapter) -{ - u8 fw_type; - - switch (adapter->fw_type) { - case QLCNIC_UNKNOWN_ROMIMAGE: - fw_type = QLCNIC_UNIFIED_ROMIMAGE; - break; - - case QLCNIC_UNIFIED_ROMIMAGE: - default: - fw_type = QLCNIC_FLASH_ROMIMAGE; - break; - } - - adapter->fw_type = fw_type; -} - - - -void qlcnic_request_firmware(struct qlcnic_adapter *adapter) -{ - struct pci_dev *pdev = adapter->pdev; - int rc; - - adapter->fw_type = QLCNIC_UNKNOWN_ROMIMAGE; - -next: - qlcnic_get_next_fwtype(adapter); - - if (adapter->fw_type == QLCNIC_FLASH_ROMIMAGE) { - adapter->fw = NULL; - } else { - rc = request_firmware(&adapter->fw, - fw_name[adapter->fw_type], &pdev->dev); - if (rc != 0) - goto next; - - rc = qlcnic_validate_firmware(adapter); - if (rc != 0) { - release_firmware(adapter->fw); - msleep(1); - goto next; - } - } -} - - -void -qlcnic_release_firmware(struct qlcnic_adapter *adapter) -{ - if (adapter->fw) - release_firmware(adapter->fw); - adapter->fw = NULL; -} - -static void -qlcnic_handle_linkevent(struct qlcnic_adapter *adapter, - struct qlcnic_fw_msg *msg) -{ - u32 cable_OUI; - u16 cable_len; - u16 link_speed; - u8 link_status, module, duplex, autoneg; - u8 lb_status = 0; - struct net_device *netdev = adapter->netdev; - - adapter->has_link_events = 1; - - cable_OUI = msg->body[1] & 0xffffffff; - cable_len = (msg->body[1] >> 32) & 0xffff; - link_speed = (msg->body[1] >> 48) & 0xffff; - - link_status = msg->body[2] & 0xff; - duplex = (msg->body[2] >> 16) & 0xff; - autoneg = (msg->body[2] >> 24) & 0xff; - lb_status = (msg->body[2] >> 32) & 0x3; - - module = (msg->body[2] >> 8) & 0xff; - if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE) - dev_info(&netdev->dev, "unsupported cable: OUI 0x%x, " - "length %d\n", cable_OUI, cable_len); - else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN) - dev_info(&netdev->dev, "unsupported cable length %d\n", - cable_len); - - if (!link_status && (lb_status == QLCNIC_ILB_MODE || - lb_status == QLCNIC_ELB_MODE)) - adapter->ahw->loopback_state |= QLCNIC_LINKEVENT; - - qlcnic_advert_link_change(adapter, link_status); - - if (duplex == LINKEVENT_FULL_DUPLEX) - adapter->link_duplex = DUPLEX_FULL; - else - adapter->link_duplex = DUPLEX_HALF; - - adapter->module_type = module; - adapter->link_autoneg = autoneg; - adapter->link_speed = link_speed; -} - -static void -qlcnic_handle_fw_message(int desc_cnt, int index, - struct qlcnic_host_sds_ring *sds_ring) -{ - struct qlcnic_fw_msg msg; - struct status_desc *desc; - struct qlcnic_adapter *adapter; - struct device *dev; - int i = 0, opcode, ret; - - while (desc_cnt > 0 && i < 8) { - desc = &sds_ring->desc_head[index]; - msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]); - msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]); - - index = get_next_index(index, sds_ring->num_desc); - desc_cnt--; - } - - adapter = sds_ring->adapter; - dev = &adapter->pdev->dev; - opcode = qlcnic_get_nic_msg_opcode(msg.body[0]); - - switch (opcode) { - case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE: - qlcnic_handle_linkevent(adapter, &msg); - break; - case QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK: - ret = (u32)(msg.body[1]); - switch (ret) { - case 0: - adapter->ahw->loopback_state |= QLCNIC_LB_RESPONSE; - break; - case 1: - dev_info(dev, "loopback already in progress\n"); - adapter->diag_cnt = -QLCNIC_TEST_IN_PROGRESS; - break; - case 2: - dev_info(dev, "loopback cable is not connected\n"); - adapter->diag_cnt = -QLCNIC_LB_CABLE_NOT_CONN; - break; - default: - dev_info(dev, "loopback configure request failed," - " ret %x\n", ret); - adapter->diag_cnt = -QLCNIC_UNDEFINED_ERROR; - break; - } - break; - default: - break; - } -} - -static int -qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter, - struct qlcnic_host_rds_ring *rds_ring, - struct qlcnic_rx_buffer *buffer) -{ - struct sk_buff *skb; - dma_addr_t dma; - struct pci_dev *pdev = adapter->pdev; - - skb = dev_alloc_skb(rds_ring->skb_size); - if (!skb) { - adapter->stats.skb_alloc_failure++; - return -ENOMEM; - } - - skb_reserve(skb, NET_IP_ALIGN); - - dma = pci_map_single(pdev, skb->data, - rds_ring->dma_size, PCI_DMA_FROMDEVICE); - - if (pci_dma_mapping_error(pdev, dma)) { - adapter->stats.rx_dma_map_error++; - dev_kfree_skb_any(skb); - return -ENOMEM; - } - - buffer->skb = skb; - buffer->dma = dma; - - return 0; -} - -static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter, - struct qlcnic_host_rds_ring *rds_ring, u16 index, u16 cksum) -{ - struct qlcnic_rx_buffer *buffer; - struct sk_buff *skb; - - buffer = &rds_ring->rx_buf_arr[index]; - - if (unlikely(buffer->skb == NULL)) { - WARN_ON(1); - return NULL; - } - - pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size, - PCI_DMA_FROMDEVICE); - - skb = buffer->skb; - - if (likely((adapter->netdev->features & NETIF_F_RXCSUM) && - (cksum == STATUS_CKSUM_OK || cksum == STATUS_CKSUM_LOOP))) { - adapter->stats.csummed++; - skb->ip_summed = CHECKSUM_UNNECESSARY; - } else { - skb_checksum_none_assert(skb); - } - - skb->dev = adapter->netdev; - - buffer->skb = NULL; - - return skb; -} - -static inline int -qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter, struct sk_buff *skb, - u16 *vlan_tag) -{ - struct ethhdr *eth_hdr; - - if (!__vlan_get_tag(skb, vlan_tag)) { - eth_hdr = (struct ethhdr *) skb->data; - memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2); - skb_pull(skb, VLAN_HLEN); - } - if (!adapter->pvid) - return 0; - - if (*vlan_tag == adapter->pvid) { - /* Outer vlan tag. Packet should follow non-vlan path */ - *vlan_tag = 0xffff; - return 0; - } - if (adapter->flags & QLCNIC_TAGGING_ENABLED) - return 0; - - return -EINVAL; -} - -static struct qlcnic_rx_buffer * -qlcnic_process_rcv(struct qlcnic_adapter *adapter, - struct qlcnic_host_sds_ring *sds_ring, - int ring, u64 sts_data0) -{ - struct net_device *netdev = adapter->netdev; - struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; - struct qlcnic_rx_buffer *buffer; - struct sk_buff *skb; - struct qlcnic_host_rds_ring *rds_ring; - int index, length, cksum, pkt_offset; - u16 vid = 0xffff; - - if (unlikely(ring >= adapter->max_rds_rings)) - return NULL; - - rds_ring = &recv_ctx->rds_rings[ring]; - - index = qlcnic_get_sts_refhandle(sts_data0); - if (unlikely(index >= rds_ring->num_desc)) - return NULL; - - buffer = &rds_ring->rx_buf_arr[index]; - - length = qlcnic_get_sts_totallength(sts_data0); - cksum = qlcnic_get_sts_status(sts_data0); - pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0); - - skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum); - if (!skb) - return buffer; - - if (length > rds_ring->skb_size) - skb_put(skb, rds_ring->skb_size); - else - skb_put(skb, length); - - if (pkt_offset) - skb_pull(skb, pkt_offset); - - if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) { - adapter->stats.rxdropped++; - dev_kfree_skb(skb); - return buffer; - } - - skb->protocol = eth_type_trans(skb, netdev); - - if (vid != 0xffff) - __vlan_hwaccel_put_tag(skb, vid); - - napi_gro_receive(&sds_ring->napi, skb); - - adapter->stats.rx_pkts++; - adapter->stats.rxbytes += length; - - return buffer; -} - -#define QLC_TCP_HDR_SIZE 20 -#define QLC_TCP_TS_OPTION_SIZE 12 -#define QLC_TCP_TS_HDR_SIZE (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE) - -static struct qlcnic_rx_buffer * -qlcnic_process_lro(struct qlcnic_adapter *adapter, - struct qlcnic_host_sds_ring *sds_ring, - int ring, u64 sts_data0, u64 sts_data1) -{ - struct net_device *netdev = adapter->netdev; - struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; - struct qlcnic_rx_buffer *buffer; - struct sk_buff *skb; - struct qlcnic_host_rds_ring *rds_ring; - struct iphdr *iph; - struct tcphdr *th; - bool push, timestamp; - int l2_hdr_offset, l4_hdr_offset; - int index; - u16 lro_length, length, data_offset; - u32 seq_number; - u16 vid = 0xffff; - - if (unlikely(ring > adapter->max_rds_rings)) - return NULL; - - rds_ring = &recv_ctx->rds_rings[ring]; - - index = qlcnic_get_lro_sts_refhandle(sts_data0); - if (unlikely(index > rds_ring->num_desc)) - return NULL; - - buffer = &rds_ring->rx_buf_arr[index]; - - timestamp = qlcnic_get_lro_sts_timestamp(sts_data0); - lro_length = qlcnic_get_lro_sts_length(sts_data0); - l2_hdr_offset = qlcnic_get_lro_sts_l2_hdr_offset(sts_data0); - l4_hdr_offset = qlcnic_get_lro_sts_l4_hdr_offset(sts_data0); - push = qlcnic_get_lro_sts_push_flag(sts_data0); - seq_number = qlcnic_get_lro_sts_seq_number(sts_data1); - - skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK); - if (!skb) - return buffer; - - if (timestamp) - data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE; - else - data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE; - - skb_put(skb, lro_length + data_offset); - - skb_pull(skb, l2_hdr_offset); - - if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) { - adapter->stats.rxdropped++; - dev_kfree_skb(skb); - return buffer; - } - - skb->protocol = eth_type_trans(skb, netdev); - - iph = (struct iphdr *)skb->data; - th = (struct tcphdr *)(skb->data + (iph->ihl << 2)); - - length = (iph->ihl << 2) + (th->doff << 2) + lro_length; - iph->tot_len = htons(length); - iph->check = 0; - iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); - th->psh = push; - th->seq = htonl(seq_number); - - length = skb->len; - - if (vid != 0xffff) - __vlan_hwaccel_put_tag(skb, vid); - netif_receive_skb(skb); - - adapter->stats.lro_pkts++; - adapter->stats.lrobytes += length; - - return buffer; -} - -int -qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max) -{ - struct qlcnic_adapter *adapter = sds_ring->adapter; - struct list_head *cur; - struct status_desc *desc; - struct qlcnic_rx_buffer *rxbuf; - u64 sts_data0, sts_data1; - - int count = 0; - int opcode, ring, desc_cnt; - u32 consumer = sds_ring->consumer; - - while (count < max) { - desc = &sds_ring->desc_head[consumer]; - sts_data0 = le64_to_cpu(desc->status_desc_data[0]); - - if (!(sts_data0 & STATUS_OWNER_HOST)) - break; - - desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0); - opcode = qlcnic_get_sts_opcode(sts_data0); - - switch (opcode) { - case QLCNIC_RXPKT_DESC: - case QLCNIC_OLD_RXPKT_DESC: - case QLCNIC_SYN_OFFLOAD: - ring = qlcnic_get_sts_type(sts_data0); - rxbuf = qlcnic_process_rcv(adapter, sds_ring, - ring, sts_data0); - break; - case QLCNIC_LRO_DESC: - ring = qlcnic_get_lro_sts_type(sts_data0); - sts_data1 = le64_to_cpu(desc->status_desc_data[1]); - rxbuf = qlcnic_process_lro(adapter, sds_ring, - ring, sts_data0, sts_data1); - break; - case QLCNIC_RESPONSE_DESC: - qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring); - default: - goto skip; - } - - WARN_ON(desc_cnt > 1); - - if (likely(rxbuf)) - list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]); - else - adapter->stats.null_rxbuf++; - -skip: - for (; desc_cnt > 0; desc_cnt--) { - desc = &sds_ring->desc_head[consumer]; - desc->status_desc_data[0] = - cpu_to_le64(STATUS_OWNER_PHANTOM); - consumer = get_next_index(consumer, sds_ring->num_desc); - } - count++; - } - - for (ring = 0; ring < adapter->max_rds_rings; ring++) { - struct qlcnic_host_rds_ring *rds_ring = - &adapter->recv_ctx->rds_rings[ring]; - - if (!list_empty(&sds_ring->free_list[ring])) { - list_for_each(cur, &sds_ring->free_list[ring]) { - rxbuf = list_entry(cur, - struct qlcnic_rx_buffer, list); - qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf); - } - spin_lock(&rds_ring->lock); - list_splice_tail_init(&sds_ring->free_list[ring], - &rds_ring->free_list); - spin_unlock(&rds_ring->lock); - } - - qlcnic_post_rx_buffers_nodb(adapter, rds_ring); - } - - if (count) { - sds_ring->consumer = consumer; - writel(consumer, sds_ring->crb_sts_consumer); - } - - return count; -} - -void -qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, - struct qlcnic_host_rds_ring *rds_ring) -{ - struct rcv_desc *pdesc; - struct qlcnic_rx_buffer *buffer; - int count = 0; - u32 producer; - struct list_head *head; - - producer = rds_ring->producer; - - head = &rds_ring->free_list; - while (!list_empty(head)) { - - buffer = list_entry(head->next, struct qlcnic_rx_buffer, list); - - if (!buffer->skb) { - if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer)) - break; - } - - count++; - list_del(&buffer->list); - - /* make a rcv descriptor */ - pdesc = &rds_ring->desc_head[producer]; - pdesc->addr_buffer = cpu_to_le64(buffer->dma); - pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); - pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); - - producer = get_next_index(producer, rds_ring->num_desc); - } - - if (count) { - rds_ring->producer = producer; - writel((producer-1) & (rds_ring->num_desc-1), - rds_ring->crb_rcv_producer); - } -} - -static void -qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter, - struct qlcnic_host_rds_ring *rds_ring) -{ - struct rcv_desc *pdesc; - struct qlcnic_rx_buffer *buffer; - int count = 0; - uint32_t producer; - struct list_head *head; - - if (!spin_trylock(&rds_ring->lock)) - return; - - producer = rds_ring->producer; - - head = &rds_ring->free_list; - while (!list_empty(head)) { - - buffer = list_entry(head->next, struct qlcnic_rx_buffer, list); - - if (!buffer->skb) { - if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer)) - break; - } - - count++; - list_del(&buffer->list); - - /* make a rcv descriptor */ - pdesc = &rds_ring->desc_head[producer]; - pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); - pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); - pdesc->addr_buffer = cpu_to_le64(buffer->dma); - - producer = get_next_index(producer, rds_ring->num_desc); - } - - if (count) { - rds_ring->producer = producer; - writel((producer - 1) & (rds_ring->num_desc - 1), - rds_ring->crb_rcv_producer); - } - spin_unlock(&rds_ring->lock); -} - -static void dump_skb(struct sk_buff *skb) -{ - int i; - unsigned char *data = skb->data; - - printk(KERN_INFO "\n"); - for (i = 0; i < skb->len; i++) { - printk(KERN_INFO "%02x ", data[i]); - if ((i & 0x0f) == 8) - printk(KERN_INFO "\n"); - } -} - -void qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter, - struct qlcnic_host_sds_ring *sds_ring, - int ring, u64 sts_data0) -{ - struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; - struct sk_buff *skb; - struct qlcnic_host_rds_ring *rds_ring; - int index, length, cksum, pkt_offset; - - if (unlikely(ring >= adapter->max_rds_rings)) - return; - - rds_ring = &recv_ctx->rds_rings[ring]; - - index = qlcnic_get_sts_refhandle(sts_data0); - length = qlcnic_get_sts_totallength(sts_data0); - if (unlikely(index >= rds_ring->num_desc)) - return; - - cksum = qlcnic_get_sts_status(sts_data0); - pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0); - - skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum); - if (!skb) - return; - - if (length > rds_ring->skb_size) - skb_put(skb, rds_ring->skb_size); - else - skb_put(skb, length); - - if (pkt_offset) - skb_pull(skb, pkt_offset); - - if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr)) - adapter->diag_cnt++; - else - dump_skb(skb); - - dev_kfree_skb_any(skb); - adapter->stats.rx_pkts++; - adapter->stats.rxbytes += length; - - return; -} - -void -qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring) -{ - struct qlcnic_adapter *adapter = sds_ring->adapter; - struct status_desc *desc; - u64 sts_data0; - int ring, opcode, desc_cnt; - - u32 consumer = sds_ring->consumer; - - desc = &sds_ring->desc_head[consumer]; - sts_data0 = le64_to_cpu(desc->status_desc_data[0]); - - if (!(sts_data0 & STATUS_OWNER_HOST)) - return; - - desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0); - opcode = qlcnic_get_sts_opcode(sts_data0); - switch (opcode) { - case QLCNIC_RESPONSE_DESC: - qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring); - break; - default: - ring = qlcnic_get_sts_type(sts_data0); - qlcnic_process_rcv_diag(adapter, sds_ring, ring, sts_data0); - break; - } - - for (; desc_cnt > 0; desc_cnt--) { - desc = &sds_ring->desc_head[consumer]; - desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM); - consumer = get_next_index(consumer, sds_ring->num_desc); - } - - sds_ring->consumer = consumer; - writel(consumer, sds_ring->crb_sts_consumer); -} - -void -qlcnic_fetch_mac(struct qlcnic_adapter *adapter, u32 off1, u32 off2, - u8 alt_mac, u8 *mac) -{ - u32 mac_low, mac_high; - int i; - - mac_low = QLCRD32(adapter, off1); - mac_high = QLCRD32(adapter, off2); - - if (alt_mac) { - mac_low |= (mac_low >> 16) | (mac_high << 16); - mac_high >>= 16; - } - - for (i = 0; i < 2; i++) - mac[i] = (u8)(mac_high >> ((1 - i) * 8)); - for (i = 2; i < 6; i++) - mac[i] = (u8)(mac_low >> ((5 - i) * 8)); -} diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c deleted file mode 100644 index ec8ef72d38d3..000000000000 --- a/drivers/net/qlcnic/qlcnic_main.c +++ /dev/null @@ -1,4390 +0,0 @@ -/* - * QLogic qlcnic NIC Driver - * Copyright (c) 2009-2010 QLogic Corporation - * - * See LICENSE.qlcnic for copyright and licensing details. - */ - -#include -#include -#include - -#include "qlcnic.h" - -#include -#include -#include -#include -#include -#include -#include -#include - -MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver"); -MODULE_LICENSE("GPL"); -MODULE_VERSION(QLCNIC_LINUX_VERSIONID); -MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME); - -char qlcnic_driver_name[] = "qlcnic"; -static const char qlcnic_driver_string[] = "QLogic 1/10 GbE " - "Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID; - -static struct workqueue_struct *qlcnic_wq; -static int qlcnic_mac_learn; -module_param(qlcnic_mac_learn, int, 0444); -MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)"); - -static int use_msi = 1; -module_param(use_msi, int, 0444); -MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled"); - -static int use_msi_x = 1; -module_param(use_msi_x, int, 0444); -MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled"); - -static int auto_fw_reset = 1; -module_param(auto_fw_reset, int, 0644); -MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled"); - -static int load_fw_file; -module_param(load_fw_file, int, 0444); -MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file"); - -static int qlcnic_config_npars; -module_param(qlcnic_config_npars, int, 0444); -MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled"); - -static int __devinit qlcnic_probe(struct pci_dev *pdev, - const struct pci_device_id *ent); -static void __devexit qlcnic_remove(struct pci_dev *pdev); -static int qlcnic_open(struct net_device *netdev); -static int qlcnic_close(struct net_device *netdev); -static void qlcnic_tx_timeout(struct net_device *netdev); -static void qlcnic_attach_work(struct work_struct *work); -static void qlcnic_fwinit_work(struct work_struct *work); -static void qlcnic_fw_poll_work(struct work_struct *work); -static void qlcnic_schedule_work(struct qlcnic_adapter *adapter, - work_func_t func, int delay); -static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter); -static int qlcnic_poll(struct napi_struct *napi, int budget); -static int qlcnic_rx_poll(struct napi_struct *napi, int budget); -#ifdef CONFIG_NET_POLL_CONTROLLER -static void qlcnic_poll_controller(struct net_device *netdev); -#endif - -static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter); -static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter); -static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter); -static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter); - -static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding); -static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8); -static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter); - -static irqreturn_t qlcnic_tmp_intr(int irq, void *data); -static irqreturn_t qlcnic_intr(int irq, void *data); -static irqreturn_t qlcnic_msi_intr(int irq, void *data); -static irqreturn_t qlcnic_msix_intr(int irq, void *data); - -static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev); -static void qlcnic_restore_indev_addr(struct net_device *dev, unsigned long); -static int qlcnic_start_firmware(struct qlcnic_adapter *); - -static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter); -static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *); -static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32); -static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32); -static int qlcnicvf_start_firmware(struct qlcnic_adapter *); -static void qlcnic_set_netdev_features(struct qlcnic_adapter *, - struct qlcnic_esw_func_cfg *); -static void qlcnic_vlan_rx_add(struct net_device *, u16); -static void qlcnic_vlan_rx_del(struct net_device *, u16); - -/* PCI Device ID Table */ -#define ENTRY(device) \ - {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \ - .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0} - -#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020 - -static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = { - ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X), - {0,} -}; - -MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl); - - -inline void -qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter, - struct qlcnic_host_tx_ring *tx_ring) -{ - writel(tx_ring->producer, tx_ring->crb_cmd_producer); -} - -static const u32 msi_tgt_status[8] = { - ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1, - ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3, - ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5, - ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7 -}; - -static const -struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG; - -static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring) -{ - writel(0, sds_ring->crb_intr_mask); -} - -static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring) -{ - struct qlcnic_adapter *adapter = sds_ring->adapter; - - writel(0x1, sds_ring->crb_intr_mask); - - if (!QLCNIC_IS_MSI_FAMILY(adapter)) - writel(0xfbff, adapter->tgt_mask_reg); -} - -static int -qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count) -{ - int size = sizeof(struct qlcnic_host_sds_ring) * count; - - recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL); - - return recv_ctx->sds_rings == NULL; -} - -static void -qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx) -{ - if (recv_ctx->sds_rings != NULL) - kfree(recv_ctx->sds_rings); - - recv_ctx->sds_rings = NULL; -} - -static int -qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev) -{ - int ring; - struct qlcnic_host_sds_ring *sds_ring; - struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; - - if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings)) - return -ENOMEM; - - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &recv_ctx->sds_rings[ring]; - - if (ring == adapter->max_sds_rings - 1) - netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll, - QLCNIC_NETDEV_WEIGHT/adapter->max_sds_rings); - else - netif_napi_add(netdev, &sds_ring->napi, - qlcnic_rx_poll, QLCNIC_NETDEV_WEIGHT*2); - } - - return 0; -} - -static void -qlcnic_napi_del(struct qlcnic_adapter *adapter) -{ - int ring; - struct qlcnic_host_sds_ring *sds_ring; - struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; - - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &recv_ctx->sds_rings[ring]; - netif_napi_del(&sds_ring->napi); - } - - qlcnic_free_sds_rings(adapter->recv_ctx); -} - -static void -qlcnic_napi_enable(struct qlcnic_adapter *adapter) -{ - int ring; - struct qlcnic_host_sds_ring *sds_ring; - struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; - - if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) - return; - - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &recv_ctx->sds_rings[ring]; - napi_enable(&sds_ring->napi); - qlcnic_enable_int(sds_ring); - } -} - -static void -qlcnic_napi_disable(struct qlcnic_adapter *adapter) -{ - int ring; - struct qlcnic_host_sds_ring *sds_ring; - struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; - - if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) - return; - - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &recv_ctx->sds_rings[ring]; - qlcnic_disable_int(sds_ring); - napi_synchronize(&sds_ring->napi); - napi_disable(&sds_ring->napi); - } -} - -static void qlcnic_clear_stats(struct qlcnic_adapter *adapter) -{ - memset(&adapter->stats, 0, sizeof(adapter->stats)); -} - -static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable) -{ - u32 control; - int pos; - - pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); - if (pos) { - pci_read_config_dword(pdev, pos, &control); - if (enable) - control |= PCI_MSIX_FLAGS_ENABLE; - else - control = 0; - pci_write_config_dword(pdev, pos, control); - } -} - -static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count) -{ - int i; - - for (i = 0; i < count; i++) - adapter->msix_entries[i].entry = i; -} - -static int -qlcnic_read_mac_addr(struct qlcnic_adapter *adapter) -{ - u8 mac_addr[ETH_ALEN]; - struct net_device *netdev = adapter->netdev; - struct pci_dev *pdev = adapter->pdev; - - if (qlcnic_get_mac_address(adapter, mac_addr) != 0) - return -EIO; - - memcpy(netdev->dev_addr, mac_addr, ETH_ALEN); - memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); - memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len); - - /* set station address */ - - if (!is_valid_ether_addr(netdev->perm_addr)) - dev_warn(&pdev->dev, "Bad MAC address %pM.\n", - netdev->dev_addr); - - return 0; -} - -static int qlcnic_set_mac(struct net_device *netdev, void *p) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - struct sockaddr *addr = p; - - if ((adapter->flags & QLCNIC_MAC_OVERRIDE_DISABLED)) - return -EOPNOTSUPP; - - if (!is_valid_ether_addr(addr->sa_data)) - return -EINVAL; - - if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { - netif_device_detach(netdev); - qlcnic_napi_disable(adapter); - } - - memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len); - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); - qlcnic_set_multi(adapter->netdev); - - if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { - netif_device_attach(netdev); - qlcnic_napi_enable(adapter); - } - return 0; -} - -static const struct net_device_ops qlcnic_netdev_ops = { - .ndo_open = qlcnic_open, - .ndo_stop = qlcnic_close, - .ndo_start_xmit = qlcnic_xmit_frame, - .ndo_get_stats = qlcnic_get_stats, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_multicast_list = qlcnic_set_multi, - .ndo_set_mac_address = qlcnic_set_mac, - .ndo_change_mtu = qlcnic_change_mtu, - .ndo_fix_features = qlcnic_fix_features, - .ndo_set_features = qlcnic_set_features, - .ndo_tx_timeout = qlcnic_tx_timeout, - .ndo_vlan_rx_add_vid = qlcnic_vlan_rx_add, - .ndo_vlan_rx_kill_vid = qlcnic_vlan_rx_del, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = qlcnic_poll_controller, -#endif -}; - -static struct qlcnic_nic_template qlcnic_ops = { - .config_bridged_mode = qlcnic_config_bridged_mode, - .config_led = qlcnic_config_led, - .start_firmware = qlcnic_start_firmware -}; - -static struct qlcnic_nic_template qlcnic_vf_ops = { - .config_bridged_mode = qlcnicvf_config_bridged_mode, - .config_led = qlcnicvf_config_led, - .start_firmware = qlcnicvf_start_firmware -}; - -static int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix) -{ - struct pci_dev *pdev = adapter->pdev; - int err = -1; - - adapter->max_sds_rings = 1; - adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED); - qlcnic_set_msix_bit(pdev, 0); - - if (adapter->msix_supported) { - enable_msix: - qlcnic_init_msix_entries(adapter, num_msix); - err = pci_enable_msix(pdev, adapter->msix_entries, num_msix); - if (err == 0) { - adapter->flags |= QLCNIC_MSIX_ENABLED; - qlcnic_set_msix_bit(pdev, 1); - - adapter->max_sds_rings = num_msix; - - dev_info(&pdev->dev, "using msi-x interrupts\n"); - return err; - } - if (err > 0) { - num_msix = rounddown_pow_of_two(err); - if (num_msix) - goto enable_msix; - } - } - return err; -} - - -static void qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter) -{ - const struct qlcnic_legacy_intr_set *legacy_intrp; - struct pci_dev *pdev = adapter->pdev; - - if (use_msi && !pci_enable_msi(pdev)) { - adapter->flags |= QLCNIC_MSI_ENABLED; - adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter, - msi_tgt_status[adapter->ahw->pci_func]); - dev_info(&pdev->dev, "using msi interrupts\n"); - adapter->msix_entries[0].vector = pdev->irq; - return; - } - - legacy_intrp = &legacy_intr[adapter->ahw->pci_func]; - - adapter->int_vec_bit = legacy_intrp->int_vec_bit; - adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter, - legacy_intrp->tgt_status_reg); - adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter, - legacy_intrp->tgt_mask_reg); - adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR); - - adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter, - ISR_INT_STATE_REG); - dev_info(&pdev->dev, "using legacy interrupts\n"); - adapter->msix_entries[0].vector = pdev->irq; -} - -static void -qlcnic_setup_intr(struct qlcnic_adapter *adapter) -{ - int num_msix; - - if (adapter->msix_supported) { - num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(), - QLCNIC_DEF_NUM_STS_DESC_RINGS)); - } else - num_msix = 1; - - if (!qlcnic_enable_msix(adapter, num_msix)) - return; - - qlcnic_enable_msi_legacy(adapter); -} - -static void -qlcnic_teardown_intr(struct qlcnic_adapter *adapter) -{ - if (adapter->flags & QLCNIC_MSIX_ENABLED) - pci_disable_msix(adapter->pdev); - if (adapter->flags & QLCNIC_MSI_ENABLED) - pci_disable_msi(adapter->pdev); -} - -static void -qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter) -{ - if (adapter->ahw->pci_base0 != NULL) - iounmap(adapter->ahw->pci_base0); -} - -static int -qlcnic_init_pci_info(struct qlcnic_adapter *adapter) -{ - struct qlcnic_pci_info *pci_info; - int i, ret = 0; - u8 pfn; - - pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL); - if (!pci_info) - return -ENOMEM; - - adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) * - QLCNIC_MAX_PCI_FUNC, GFP_KERNEL); - if (!adapter->npars) { - ret = -ENOMEM; - goto err_pci_info; - } - - adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) * - QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL); - if (!adapter->eswitch) { - ret = -ENOMEM; - goto err_npars; - } - - ret = qlcnic_get_pci_info(adapter, pci_info); - if (ret) - goto err_eswitch; - - for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { - pfn = pci_info[i].id; - if (pfn > QLCNIC_MAX_PCI_FUNC) { - ret = QL_STATUS_INVALID_PARAM; - goto err_eswitch; - } - adapter->npars[pfn].active = (u8)pci_info[i].active; - adapter->npars[pfn].type = (u8)pci_info[i].type; - adapter->npars[pfn].phy_port = (u8)pci_info[i].default_port; - adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw; - adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw; - } - - for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++) - adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE; - - kfree(pci_info); - return 0; - -err_eswitch: - kfree(adapter->eswitch); - adapter->eswitch = NULL; -err_npars: - kfree(adapter->npars); - adapter->npars = NULL; -err_pci_info: - kfree(pci_info); - - return ret; -} - -static int -qlcnic_set_function_modes(struct qlcnic_adapter *adapter) -{ - u8 id; - u32 ref_count; - int i, ret = 1; - u32 data = QLCNIC_MGMT_FUNC; - void __iomem *priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE; - - /* If other drivers are not in use set their privilege level */ - ref_count = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE); - ret = qlcnic_api_lock(adapter); - if (ret) - goto err_lock; - - if (qlcnic_config_npars) { - for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { - id = i; - if (adapter->npars[i].type != QLCNIC_TYPE_NIC || - id == adapter->ahw->pci_func) - continue; - data |= (qlcnic_config_npars & - QLC_DEV_SET_DRV(0xf, id)); - } - } else { - data = readl(priv_op); - data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw->pci_func)) | - (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC, - adapter->ahw->pci_func)); - } - writel(data, priv_op); - qlcnic_api_unlock(adapter); -err_lock: - return ret; -} - -static void -qlcnic_check_vf(struct qlcnic_adapter *adapter) -{ - void __iomem *msix_base_addr; - void __iomem *priv_op; - u32 func; - u32 msix_base; - u32 op_mode, priv_level; - - /* Determine FW API version */ - adapter->fw_hal_version = readl(adapter->ahw->pci_base0 + - QLCNIC_FW_API); - - /* Find PCI function number */ - pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func); - msix_base_addr = adapter->ahw->pci_base0 + QLCNIC_MSIX_BASE; - msix_base = readl(msix_base_addr); - func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE; - adapter->ahw->pci_func = func; - - /* Determine function privilege level */ - priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE; - op_mode = readl(priv_op); - if (op_mode == QLC_DEV_DRV_DEFAULT) - priv_level = QLCNIC_MGMT_FUNC; - else - priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func); - - if (priv_level == QLCNIC_NON_PRIV_FUNC) { - adapter->op_mode = QLCNIC_NON_PRIV_FUNC; - dev_info(&adapter->pdev->dev, - "HAL Version: %d Non Privileged function\n", - adapter->fw_hal_version); - adapter->nic_ops = &qlcnic_vf_ops; - } else - adapter->nic_ops = &qlcnic_ops; -} - -static int -qlcnic_setup_pci_map(struct qlcnic_adapter *adapter) -{ - void __iomem *mem_ptr0 = NULL; - resource_size_t mem_base; - unsigned long mem_len, pci_len0 = 0; - - struct pci_dev *pdev = adapter->pdev; - - /* remap phys address */ - mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */ - mem_len = pci_resource_len(pdev, 0); - - if (mem_len == QLCNIC_PCI_2MB_SIZE) { - - mem_ptr0 = pci_ioremap_bar(pdev, 0); - if (mem_ptr0 == NULL) { - dev_err(&pdev->dev, "failed to map PCI bar 0\n"); - return -EIO; - } - pci_len0 = mem_len; - } else { - return -EIO; - } - - dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20)); - - adapter->ahw->pci_base0 = mem_ptr0; - adapter->ahw->pci_len0 = pci_len0; - - qlcnic_check_vf(adapter); - - adapter->ahw->ocm_win_crb = qlcnic_get_ioaddr(adapter, - QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG( - adapter->ahw->pci_func))); - - return 0; -} - -static void get_brd_name(struct qlcnic_adapter *adapter, char *name) -{ - struct pci_dev *pdev = adapter->pdev; - int i, found = 0; - - for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) { - if (qlcnic_boards[i].vendor == pdev->vendor && - qlcnic_boards[i].device == pdev->device && - qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor && - qlcnic_boards[i].sub_device == pdev->subsystem_device) { - sprintf(name, "%pM: %s" , - adapter->mac_addr, - qlcnic_boards[i].short_name); - found = 1; - break; - } - - } - - if (!found) - sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr); -} - -static void -qlcnic_check_options(struct qlcnic_adapter *adapter) -{ - u32 fw_major, fw_minor, fw_build, prev_fw_version; - struct pci_dev *pdev = adapter->pdev; - struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; - - prev_fw_version = adapter->fw_version; - - fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR); - fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR); - fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB); - - adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build); - - if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC) { - if (fw_dump->tmpl_hdr == NULL || - adapter->fw_version > prev_fw_version) { - if (fw_dump->tmpl_hdr) - vfree(fw_dump->tmpl_hdr); - if (!qlcnic_fw_cmd_get_minidump_temp(adapter)) - dev_info(&pdev->dev, - "Supports FW dump capability\n"); - } - } - - dev_info(&pdev->dev, "firmware v%d.%d.%d\n", - fw_major, fw_minor, fw_build); - if (adapter->ahw->port_type == QLCNIC_XGBE) { - if (adapter->flags & QLCNIC_ESWITCH_ENABLED) { - adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF; - adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF; - } else { - adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G; - adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G; - } - - adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; - adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; - - } else if (adapter->ahw->port_type == QLCNIC_GBE) { - adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G; - adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G; - adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G; - adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G; - } - - adapter->msix_supported = !!use_msi_x; - - adapter->num_txd = MAX_CMD_DESCRIPTORS; - - adapter->max_rds_rings = MAX_RDS_RINGS; -} - -static int -qlcnic_initialize_nic(struct qlcnic_adapter *adapter) -{ - int err; - struct qlcnic_info nic_info; - - err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw->pci_func); - if (err) - return err; - - adapter->physical_port = (u8)nic_info.phys_port; - adapter->switch_mode = nic_info.switch_mode; - adapter->max_tx_ques = nic_info.max_tx_ques; - adapter->max_rx_ques = nic_info.max_rx_ques; - adapter->capabilities = nic_info.capabilities; - adapter->max_mac_filters = nic_info.max_mac_filters; - adapter->max_mtu = nic_info.max_mtu; - - if (adapter->capabilities & BIT_6) - adapter->flags |= QLCNIC_ESWITCH_ENABLED; - else - adapter->flags &= ~QLCNIC_ESWITCH_ENABLED; - - return err; -} - -static void -qlcnic_set_vlan_config(struct qlcnic_adapter *adapter, - struct qlcnic_esw_func_cfg *esw_cfg) -{ - if (esw_cfg->discard_tagged) - adapter->flags &= ~QLCNIC_TAGGING_ENABLED; - else - adapter->flags |= QLCNIC_TAGGING_ENABLED; - - if (esw_cfg->vlan_id) - adapter->pvid = esw_cfg->vlan_id; - else - adapter->pvid = 0; -} - -static void -qlcnic_vlan_rx_add(struct net_device *netdev, u16 vid) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - set_bit(vid, adapter->vlans); -} - -static void -qlcnic_vlan_rx_del(struct net_device *netdev, u16 vid) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - - qlcnic_restore_indev_addr(netdev, NETDEV_DOWN); - clear_bit(vid, adapter->vlans); -} - -static void -qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter, - struct qlcnic_esw_func_cfg *esw_cfg) -{ - adapter->flags &= ~(QLCNIC_MACSPOOF | QLCNIC_MAC_OVERRIDE_DISABLED | - QLCNIC_PROMISC_DISABLED); - - if (esw_cfg->mac_anti_spoof) - adapter->flags |= QLCNIC_MACSPOOF; - - if (!esw_cfg->mac_override) - adapter->flags |= QLCNIC_MAC_OVERRIDE_DISABLED; - - if (!esw_cfg->promisc_mode) - adapter->flags |= QLCNIC_PROMISC_DISABLED; - - qlcnic_set_netdev_features(adapter, esw_cfg); -} - -static int -qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter) -{ - struct qlcnic_esw_func_cfg esw_cfg; - - if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) - return 0; - - esw_cfg.pci_func = adapter->ahw->pci_func; - if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg)) - return -EIO; - qlcnic_set_vlan_config(adapter, &esw_cfg); - qlcnic_set_eswitch_port_features(adapter, &esw_cfg); - - return 0; -} - -static void -qlcnic_set_netdev_features(struct qlcnic_adapter *adapter, - struct qlcnic_esw_func_cfg *esw_cfg) -{ - struct net_device *netdev = adapter->netdev; - unsigned long features, vlan_features; - - features = (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM | - NETIF_F_IPV6_CSUM | NETIF_F_GRO); - vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_FILTER); - - if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) { - features |= (NETIF_F_TSO | NETIF_F_TSO6); - vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6); - } - - if (netdev->features & NETIF_F_LRO) - features |= NETIF_F_LRO; - - if (esw_cfg->offload_flags & BIT_0) { - netdev->features |= features; - if (!(esw_cfg->offload_flags & BIT_1)) - netdev->features &= ~NETIF_F_TSO; - if (!(esw_cfg->offload_flags & BIT_2)) - netdev->features &= ~NETIF_F_TSO6; - } else { - netdev->features &= ~features; - } - - netdev->vlan_features = (features & vlan_features); -} - -static int -qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter) -{ - void __iomem *priv_op; - u32 op_mode, priv_level; - int err = 0; - - err = qlcnic_initialize_nic(adapter); - if (err) - return err; - - if (adapter->flags & QLCNIC_ADAPTER_INITIALIZED) - return 0; - - priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE; - op_mode = readl(priv_op); - priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func); - - if (op_mode == QLC_DEV_DRV_DEFAULT) - priv_level = QLCNIC_MGMT_FUNC; - else - priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func); - - if (adapter->flags & QLCNIC_ESWITCH_ENABLED) { - if (priv_level == QLCNIC_MGMT_FUNC) { - adapter->op_mode = QLCNIC_MGMT_FUNC; - err = qlcnic_init_pci_info(adapter); - if (err) - return err; - /* Set privilege level for other functions */ - qlcnic_set_function_modes(adapter); - dev_info(&adapter->pdev->dev, - "HAL Version: %d, Management function\n", - adapter->fw_hal_version); - } else if (priv_level == QLCNIC_PRIV_FUNC) { - adapter->op_mode = QLCNIC_PRIV_FUNC; - dev_info(&adapter->pdev->dev, - "HAL Version: %d, Privileged function\n", - adapter->fw_hal_version); - } - } - - adapter->flags |= QLCNIC_ADAPTER_INITIALIZED; - - return err; -} - -static int -qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter) -{ - struct qlcnic_esw_func_cfg esw_cfg; - struct qlcnic_npar_info *npar; - u8 i; - - if (adapter->need_fw_reset) - return 0; - - for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { - if (adapter->npars[i].type != QLCNIC_TYPE_NIC) - continue; - memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg)); - esw_cfg.pci_func = i; - esw_cfg.offload_flags = BIT_0; - esw_cfg.mac_override = BIT_0; - esw_cfg.promisc_mode = BIT_0; - if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) - esw_cfg.offload_flags |= (BIT_1 | BIT_2); - if (qlcnic_config_switch_port(adapter, &esw_cfg)) - return -EIO; - npar = &adapter->npars[i]; - npar->pvid = esw_cfg.vlan_id; - npar->mac_override = esw_cfg.mac_override; - npar->mac_anti_spoof = esw_cfg.mac_anti_spoof; - npar->discard_tagged = esw_cfg.discard_tagged; - npar->promisc_mode = esw_cfg.promisc_mode; - npar->offload_flags = esw_cfg.offload_flags; - } - - return 0; -} - -static int -qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter, - struct qlcnic_npar_info *npar, int pci_func) -{ - struct qlcnic_esw_func_cfg esw_cfg; - esw_cfg.op_mode = QLCNIC_PORT_DEFAULTS; - esw_cfg.pci_func = pci_func; - esw_cfg.vlan_id = npar->pvid; - esw_cfg.mac_override = npar->mac_override; - esw_cfg.discard_tagged = npar->discard_tagged; - esw_cfg.mac_anti_spoof = npar->mac_anti_spoof; - esw_cfg.offload_flags = npar->offload_flags; - esw_cfg.promisc_mode = npar->promisc_mode; - if (qlcnic_config_switch_port(adapter, &esw_cfg)) - return -EIO; - - esw_cfg.op_mode = QLCNIC_ADD_VLAN; - if (qlcnic_config_switch_port(adapter, &esw_cfg)) - return -EIO; - - return 0; -} - -static int -qlcnic_reset_npar_config(struct qlcnic_adapter *adapter) -{ - int i, err; - struct qlcnic_npar_info *npar; - struct qlcnic_info nic_info; - - if (!adapter->need_fw_reset) - return 0; - - /* Set the NPAR config data after FW reset */ - for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { - npar = &adapter->npars[i]; - if (npar->type != QLCNIC_TYPE_NIC) - continue; - err = qlcnic_get_nic_info(adapter, &nic_info, i); - if (err) - return err; - nic_info.min_tx_bw = npar->min_bw; - nic_info.max_tx_bw = npar->max_bw; - err = qlcnic_set_nic_info(adapter, &nic_info); - if (err) - return err; - - if (npar->enable_pm) { - err = qlcnic_config_port_mirroring(adapter, - npar->dest_npar, 1, i); - if (err) - return err; - } - err = qlcnic_reset_eswitch_config(adapter, npar, i); - if (err) - return err; - } - return 0; -} - -static int qlcnic_check_npar_opertional(struct qlcnic_adapter *adapter) -{ - u8 npar_opt_timeo = QLCNIC_DEV_NPAR_OPER_TIMEO; - u32 npar_state; - - if (adapter->op_mode == QLCNIC_MGMT_FUNC) - return 0; - - npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE); - while (npar_state != QLCNIC_DEV_NPAR_OPER && --npar_opt_timeo) { - msleep(1000); - npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE); - } - if (!npar_opt_timeo) { - dev_err(&adapter->pdev->dev, - "Waiting for NPAR state to opertional timeout\n"); - return -EIO; - } - return 0; -} - -static int -qlcnic_set_mgmt_operations(struct qlcnic_adapter *adapter) -{ - int err; - - if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) || - adapter->op_mode != QLCNIC_MGMT_FUNC) - return 0; - - err = qlcnic_set_default_offload_settings(adapter); - if (err) - return err; - - err = qlcnic_reset_npar_config(adapter); - if (err) - return err; - - qlcnic_dev_set_npar_ready(adapter); - - return err; -} - -static int -qlcnic_start_firmware(struct qlcnic_adapter *adapter) -{ - int err; - - err = qlcnic_can_start_firmware(adapter); - if (err < 0) - return err; - else if (!err) - goto check_fw_status; - - if (load_fw_file) - qlcnic_request_firmware(adapter); - else { - err = qlcnic_check_flash_fw_ver(adapter); - if (err) - goto err_out; - - adapter->fw_type = QLCNIC_FLASH_ROMIMAGE; - } - - err = qlcnic_need_fw_reset(adapter); - if (err == 0) - goto check_fw_status; - - err = qlcnic_pinit_from_rom(adapter); - if (err) - goto err_out; - - err = qlcnic_load_firmware(adapter); - if (err) - goto err_out; - - qlcnic_release_firmware(adapter); - QLCWR32(adapter, CRB_DRIVER_VERSION, QLCNIC_DRIVER_VERSION); - -check_fw_status: - err = qlcnic_check_fw_status(adapter); - if (err) - goto err_out; - - QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY); - qlcnic_idc_debug_info(adapter, 1); - - err = qlcnic_check_eswitch_mode(adapter); - if (err) { - dev_err(&adapter->pdev->dev, - "Memory allocation failed for eswitch\n"); - goto err_out; - } - err = qlcnic_set_mgmt_operations(adapter); - if (err) - goto err_out; - - qlcnic_check_options(adapter); - adapter->need_fw_reset = 0; - - qlcnic_release_firmware(adapter); - return 0; - -err_out: - QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED); - dev_err(&adapter->pdev->dev, "Device state set to failed\n"); - - qlcnic_release_firmware(adapter); - return err; -} - -static int -qlcnic_request_irq(struct qlcnic_adapter *adapter) -{ - irq_handler_t handler; - struct qlcnic_host_sds_ring *sds_ring; - int err, ring; - - unsigned long flags = 0; - struct net_device *netdev = adapter->netdev; - struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; - - if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) { - handler = qlcnic_tmp_intr; - if (!QLCNIC_IS_MSI_FAMILY(adapter)) - flags |= IRQF_SHARED; - - } else { - if (adapter->flags & QLCNIC_MSIX_ENABLED) - handler = qlcnic_msix_intr; - else if (adapter->flags & QLCNIC_MSI_ENABLED) - handler = qlcnic_msi_intr; - else { - flags |= IRQF_SHARED; - handler = qlcnic_intr; - } - } - adapter->irq = netdev->irq; - - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &recv_ctx->sds_rings[ring]; - sprintf(sds_ring->name, "%s[%d]", netdev->name, ring); - err = request_irq(sds_ring->irq, handler, - flags, sds_ring->name, sds_ring); - if (err) - return err; - } - - return 0; -} - -static void -qlcnic_free_irq(struct qlcnic_adapter *adapter) -{ - int ring; - struct qlcnic_host_sds_ring *sds_ring; - - struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; - - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &recv_ctx->sds_rings[ring]; - free_irq(sds_ring->irq, sds_ring); - } -} - -static int -__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev) -{ - int ring; - struct qlcnic_host_rds_ring *rds_ring; - - if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) - return -EIO; - - if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) - return 0; - if (qlcnic_set_eswitch_port_config(adapter)) - return -EIO; - - if (qlcnic_fw_create_ctx(adapter)) - return -EIO; - - for (ring = 0; ring < adapter->max_rds_rings; ring++) { - rds_ring = &adapter->recv_ctx->rds_rings[ring]; - qlcnic_post_rx_buffers(adapter, rds_ring); - } - - qlcnic_set_multi(netdev); - qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu); - - adapter->ahw->linkup = 0; - - if (adapter->max_sds_rings > 1) - qlcnic_config_rss(adapter, 1); - - qlcnic_config_intr_coalesce(adapter); - - if (netdev->features & NETIF_F_LRO) - qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED); - - qlcnic_napi_enable(adapter); - - qlcnic_linkevent_request(adapter, 1); - - adapter->reset_context = 0; - set_bit(__QLCNIC_DEV_UP, &adapter->state); - return 0; -} - -/* Usage: During resume and firmware recovery module.*/ - -static int -qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev) -{ - int err = 0; - - rtnl_lock(); - if (netif_running(netdev)) - err = __qlcnic_up(adapter, netdev); - rtnl_unlock(); - - return err; -} - -static void -__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev) -{ - if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) - return; - - if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state)) - return; - - smp_mb(); - spin_lock(&adapter->tx_clean_lock); - netif_carrier_off(netdev); - netif_tx_disable(netdev); - - qlcnic_free_mac_list(adapter); - - if (adapter->fhash.fnum) - qlcnic_delete_lb_filters(adapter); - - qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE); - - qlcnic_napi_disable(adapter); - - qlcnic_fw_destroy_ctx(adapter); - - qlcnic_reset_rx_buffers_list(adapter); - qlcnic_release_tx_buffers(adapter); - spin_unlock(&adapter->tx_clean_lock); -} - -/* Usage: During suspend and firmware recovery module */ - -static void -qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev) -{ - rtnl_lock(); - if (netif_running(netdev)) - __qlcnic_down(adapter, netdev); - rtnl_unlock(); - -} - -static int -qlcnic_attach(struct qlcnic_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - struct pci_dev *pdev = adapter->pdev; - int err; - - if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) - return 0; - - err = qlcnic_napi_add(adapter, netdev); - if (err) - return err; - - err = qlcnic_alloc_sw_resources(adapter); - if (err) { - dev_err(&pdev->dev, "Error in setting sw resources\n"); - goto err_out_napi_del; - } - - err = qlcnic_alloc_hw_resources(adapter); - if (err) { - dev_err(&pdev->dev, "Error in setting hw resources\n"); - goto err_out_free_sw; - } - - err = qlcnic_request_irq(adapter); - if (err) { - dev_err(&pdev->dev, "failed to setup interrupt\n"); - goto err_out_free_hw; - } - - qlcnic_create_sysfs_entries(adapter); - - adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC; - return 0; - -err_out_free_hw: - qlcnic_free_hw_resources(adapter); -err_out_free_sw: - qlcnic_free_sw_resources(adapter); -err_out_napi_del: - qlcnic_napi_del(adapter); - return err; -} - -static void -qlcnic_detach(struct qlcnic_adapter *adapter) -{ - if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) - return; - - qlcnic_remove_sysfs_entries(adapter); - - qlcnic_free_hw_resources(adapter); - qlcnic_release_rx_buffers(adapter); - qlcnic_free_irq(adapter); - qlcnic_napi_del(adapter); - qlcnic_free_sw_resources(adapter); - - adapter->is_up = 0; -} - -void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - struct qlcnic_host_sds_ring *sds_ring; - int ring; - - clear_bit(__QLCNIC_DEV_UP, &adapter->state); - if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) { - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &adapter->recv_ctx->sds_rings[ring]; - qlcnic_disable_int(sds_ring); - } - } - - qlcnic_fw_destroy_ctx(adapter); - - qlcnic_detach(adapter); - - adapter->diag_test = 0; - adapter->max_sds_rings = max_sds_rings; - - if (qlcnic_attach(adapter)) - goto out; - - if (netif_running(netdev)) - __qlcnic_up(adapter, netdev); -out: - netif_device_attach(netdev); -} - -static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter) -{ - int err = 0; - adapter->ahw = kzalloc(sizeof(struct qlcnic_hardware_context), - GFP_KERNEL); - if (!adapter->ahw) { - dev_err(&adapter->pdev->dev, - "Failed to allocate recv ctx resources for adapter\n"); - err = -ENOMEM; - goto err_out; - } - adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context), - GFP_KERNEL); - if (!adapter->recv_ctx) { - dev_err(&adapter->pdev->dev, - "Failed to allocate recv ctx resources for adapter\n"); - kfree(adapter->ahw); - adapter->ahw = NULL; - err = -ENOMEM; - goto err_out; - } - /* Initialize interrupt coalesce parameters */ - adapter->ahw->coal.flag = QLCNIC_INTR_DEFAULT; - adapter->ahw->coal.rx_time_us = QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US; - adapter->ahw->coal.rx_packets = QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS; -err_out: - return err; -} - -static void qlcnic_free_adapter_resources(struct qlcnic_adapter *adapter) -{ - kfree(adapter->recv_ctx); - adapter->recv_ctx = NULL; - - if (adapter->ahw->fw_dump.tmpl_hdr) { - vfree(adapter->ahw->fw_dump.tmpl_hdr); - adapter->ahw->fw_dump.tmpl_hdr = NULL; - } - kfree(adapter->ahw); - adapter->ahw = NULL; -} - -int qlcnic_diag_alloc_res(struct net_device *netdev, int test) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - struct qlcnic_host_sds_ring *sds_ring; - struct qlcnic_host_rds_ring *rds_ring; - int ring; - int ret; - - netif_device_detach(netdev); - - if (netif_running(netdev)) - __qlcnic_down(adapter, netdev); - - qlcnic_detach(adapter); - - adapter->max_sds_rings = 1; - adapter->diag_test = test; - - ret = qlcnic_attach(adapter); - if (ret) { - netif_device_attach(netdev); - return ret; - } - - ret = qlcnic_fw_create_ctx(adapter); - if (ret) { - qlcnic_detach(adapter); - netif_device_attach(netdev); - return ret; - } - - for (ring = 0; ring < adapter->max_rds_rings; ring++) { - rds_ring = &adapter->recv_ctx->rds_rings[ring]; - qlcnic_post_rx_buffers(adapter, rds_ring); - } - - if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) { - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &adapter->recv_ctx->sds_rings[ring]; - qlcnic_enable_int(sds_ring); - } - } - - if (adapter->diag_test == QLCNIC_LOOPBACK_TEST) { - adapter->ahw->loopback_state = 0; - qlcnic_linkevent_request(adapter, 1); - } - - set_bit(__QLCNIC_DEV_UP, &adapter->state); - - return 0; -} - -/* Reset context in hardware only */ -static int -qlcnic_reset_hw_context(struct qlcnic_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - - if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) - return -EBUSY; - - netif_device_detach(netdev); - - qlcnic_down(adapter, netdev); - - qlcnic_up(adapter, netdev); - - netif_device_attach(netdev); - - clear_bit(__QLCNIC_RESETTING, &adapter->state); - return 0; -} - -int -qlcnic_reset_context(struct qlcnic_adapter *adapter) -{ - int err = 0; - struct net_device *netdev = adapter->netdev; - - if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) - return -EBUSY; - - if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) { - - netif_device_detach(netdev); - - if (netif_running(netdev)) - __qlcnic_down(adapter, netdev); - - qlcnic_detach(adapter); - - if (netif_running(netdev)) { - err = qlcnic_attach(adapter); - if (!err) - __qlcnic_up(adapter, netdev); - } - - netif_device_attach(netdev); - } - - clear_bit(__QLCNIC_RESETTING, &adapter->state); - return err; -} - -static int -qlcnic_setup_netdev(struct qlcnic_adapter *adapter, - struct net_device *netdev, u8 pci_using_dac) -{ - int err; - struct pci_dev *pdev = adapter->pdev; - - adapter->mc_enabled = 0; - adapter->max_mc_count = 38; - - netdev->netdev_ops = &qlcnic_netdev_ops; - netdev->watchdog_timeo = 5*HZ; - - qlcnic_change_mtu(netdev, netdev->mtu); - - SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops); - - netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; - - if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) - netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; - if (pci_using_dac) - netdev->hw_features |= NETIF_F_HIGHDMA; - - netdev->vlan_features = netdev->hw_features; - - if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX) - netdev->hw_features |= NETIF_F_HW_VLAN_TX; - if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO) - netdev->hw_features |= NETIF_F_LRO; - - netdev->features |= netdev->hw_features | - NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; - - netdev->irq = adapter->msix_entries[0].vector; - - err = register_netdev(netdev); - if (err) { - dev_err(&pdev->dev, "failed to register net device\n"); - return err; - } - - return 0; -} - -static int qlcnic_set_dma_mask(struct pci_dev *pdev, u8 *pci_using_dac) -{ - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && - !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) - *pci_using_dac = 1; - else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) && - !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) - *pci_using_dac = 0; - else { - dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n"); - return -EIO; - } - - return 0; -} - -static int -qlcnic_alloc_msix_entries(struct qlcnic_adapter *adapter, u16 count) -{ - adapter->msix_entries = kcalloc(count, sizeof(struct msix_entry), - GFP_KERNEL); - - if (adapter->msix_entries) - return 0; - - dev_err(&adapter->pdev->dev, "failed allocating msix_entries\n"); - return -ENOMEM; -} - -static int __devinit -qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) -{ - struct net_device *netdev = NULL; - struct qlcnic_adapter *adapter = NULL; - int err; - uint8_t revision_id; - uint8_t pci_using_dac; - char brd_name[QLCNIC_MAX_BOARD_NAME_LEN]; - - err = pci_enable_device(pdev); - if (err) - return err; - - if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { - err = -ENODEV; - goto err_out_disable_pdev; - } - - err = qlcnic_set_dma_mask(pdev, &pci_using_dac); - if (err) - goto err_out_disable_pdev; - - err = pci_request_regions(pdev, qlcnic_driver_name); - if (err) - goto err_out_disable_pdev; - - pci_set_master(pdev); - pci_enable_pcie_error_reporting(pdev); - - netdev = alloc_etherdev(sizeof(struct qlcnic_adapter)); - if (!netdev) { - dev_err(&pdev->dev, "failed to allocate net_device\n"); - err = -ENOMEM; - goto err_out_free_res; - } - - SET_NETDEV_DEV(netdev, &pdev->dev); - - adapter = netdev_priv(netdev); - adapter->netdev = netdev; - adapter->pdev = pdev; - - if (qlcnic_alloc_adapter_resources(adapter)) - goto err_out_free_netdev; - - adapter->dev_rst_time = jiffies; - revision_id = pdev->revision; - adapter->ahw->revision_id = revision_id; - adapter->mac_learn = qlcnic_mac_learn; - - rwlock_init(&adapter->ahw->crb_lock); - mutex_init(&adapter->ahw->mem_lock); - - spin_lock_init(&adapter->tx_clean_lock); - INIT_LIST_HEAD(&adapter->mac_list); - - err = qlcnic_setup_pci_map(adapter); - if (err) - goto err_out_free_hw; - - /* This will be reset for mezz cards */ - adapter->portnum = adapter->ahw->pci_func; - - err = qlcnic_get_board_info(adapter); - if (err) { - dev_err(&pdev->dev, "Error getting board config info.\n"); - goto err_out_iounmap; - } - - err = qlcnic_setup_idc_param(adapter); - if (err) - goto err_out_iounmap; - - adapter->flags |= QLCNIC_NEED_FLR; - - err = adapter->nic_ops->start_firmware(adapter); - if (err) { - dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n"); - goto err_out_decr_ref; - } - - if (qlcnic_read_mac_addr(adapter)) - dev_warn(&pdev->dev, "failed to read mac addr\n"); - - if (adapter->portnum == 0) { - get_brd_name(adapter, brd_name); - - pr_info("%s: %s Board Chip rev 0x%x\n", - module_name(THIS_MODULE), - brd_name, adapter->ahw->revision_id); - } - - qlcnic_clear_stats(adapter); - - err = qlcnic_alloc_msix_entries(adapter, adapter->max_rx_ques); - if (err) - goto err_out_decr_ref; - - qlcnic_setup_intr(adapter); - - err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac); - if (err) - goto err_out_disable_msi; - - pci_set_drvdata(pdev, adapter); - - qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY); - - switch (adapter->ahw->port_type) { - case QLCNIC_GBE: - dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n", - adapter->netdev->name); - break; - case QLCNIC_XGBE: - dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n", - adapter->netdev->name); - break; - } - - if (adapter->mac_learn) - qlcnic_alloc_lb_filters_mem(adapter); - - qlcnic_create_diag_entries(adapter); - - return 0; - -err_out_disable_msi: - qlcnic_teardown_intr(adapter); - kfree(adapter->msix_entries); - -err_out_decr_ref: - qlcnic_clr_all_drv_state(adapter, 0); - -err_out_iounmap: - qlcnic_cleanup_pci_map(adapter); - -err_out_free_hw: - qlcnic_free_adapter_resources(adapter); - -err_out_free_netdev: - free_netdev(netdev); - -err_out_free_res: - pci_release_regions(pdev); - -err_out_disable_pdev: - pci_set_drvdata(pdev, NULL); - pci_disable_device(pdev); - return err; -} - -static void __devexit qlcnic_remove(struct pci_dev *pdev) -{ - struct qlcnic_adapter *adapter; - struct net_device *netdev; - - adapter = pci_get_drvdata(pdev); - if (adapter == NULL) - return; - - netdev = adapter->netdev; - - qlcnic_cancel_fw_work(adapter); - - unregister_netdev(netdev); - - qlcnic_detach(adapter); - - if (adapter->npars != NULL) - kfree(adapter->npars); - if (adapter->eswitch != NULL) - kfree(adapter->eswitch); - - qlcnic_clr_all_drv_state(adapter, 0); - - clear_bit(__QLCNIC_RESETTING, &adapter->state); - - qlcnic_free_lb_filters_mem(adapter); - - qlcnic_teardown_intr(adapter); - kfree(adapter->msix_entries); - - qlcnic_remove_diag_entries(adapter); - - qlcnic_cleanup_pci_map(adapter); - - qlcnic_release_firmware(adapter); - - pci_disable_pcie_error_reporting(pdev); - pci_release_regions(pdev); - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); - - qlcnic_free_adapter_resources(adapter); - free_netdev(netdev); -} -static int __qlcnic_shutdown(struct pci_dev *pdev) -{ - struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; - int retval; - - netif_device_detach(netdev); - - qlcnic_cancel_fw_work(adapter); - - if (netif_running(netdev)) - qlcnic_down(adapter, netdev); - - qlcnic_clr_all_drv_state(adapter, 0); - - clear_bit(__QLCNIC_RESETTING, &adapter->state); - - retval = pci_save_state(pdev); - if (retval) - return retval; - - if (qlcnic_wol_supported(adapter)) { - pci_enable_wake(pdev, PCI_D3cold, 1); - pci_enable_wake(pdev, PCI_D3hot, 1); - } - - return 0; -} - -static void qlcnic_shutdown(struct pci_dev *pdev) -{ - if (__qlcnic_shutdown(pdev)) - return; - - pci_disable_device(pdev); -} - -#ifdef CONFIG_PM -static int -qlcnic_suspend(struct pci_dev *pdev, pm_message_t state) -{ - int retval; - - retval = __qlcnic_shutdown(pdev); - if (retval) - return retval; - - pci_set_power_state(pdev, pci_choose_state(pdev, state)); - return 0; -} - -static int -qlcnic_resume(struct pci_dev *pdev) -{ - struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; - int err; - - err = pci_enable_device(pdev); - if (err) - return err; - - pci_set_power_state(pdev, PCI_D0); - pci_set_master(pdev); - pci_restore_state(pdev); - - err = adapter->nic_ops->start_firmware(adapter); - if (err) { - dev_err(&pdev->dev, "failed to start firmware\n"); - return err; - } - - if (netif_running(netdev)) { - err = qlcnic_up(adapter, netdev); - if (err) - goto done; - - qlcnic_restore_indev_addr(netdev, NETDEV_UP); - } -done: - netif_device_attach(netdev); - qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY); - return 0; -} -#endif - -static int qlcnic_open(struct net_device *netdev) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - int err; - - netif_carrier_off(netdev); - - err = qlcnic_attach(adapter); - if (err) - return err; - - err = __qlcnic_up(adapter, netdev); - if (err) - goto err_out; - - netif_start_queue(netdev); - - return 0; - -err_out: - qlcnic_detach(adapter); - return err; -} - -/* - * qlcnic_close - Disables a network interface entry point - */ -static int qlcnic_close(struct net_device *netdev) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - - __qlcnic_down(adapter, netdev); - return 0; -} - -void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter) -{ - void *head; - int i; - - if (adapter->fhash.fmax && adapter->fhash.fhead) - return; - - spin_lock_init(&adapter->mac_learn_lock); - - head = kcalloc(QLCNIC_LB_MAX_FILTERS, sizeof(struct hlist_head), - GFP_KERNEL); - if (!head) - return; - - adapter->fhash.fmax = QLCNIC_LB_MAX_FILTERS; - adapter->fhash.fhead = head; - - for (i = 0; i < adapter->fhash.fmax; i++) - INIT_HLIST_HEAD(&adapter->fhash.fhead[i]); -} - -static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter) -{ - if (adapter->fhash.fmax && adapter->fhash.fhead) - kfree(adapter->fhash.fhead); - - adapter->fhash.fhead = NULL; - adapter->fhash.fmax = 0; -} - -static void qlcnic_change_filter(struct qlcnic_adapter *adapter, - u64 uaddr, __le16 vlan_id, struct qlcnic_host_tx_ring *tx_ring) -{ - struct cmd_desc_type0 *hwdesc; - struct qlcnic_nic_req *req; - struct qlcnic_mac_req *mac_req; - struct qlcnic_vlan_req *vlan_req; - u32 producer; - u64 word; - - producer = tx_ring->producer; - hwdesc = &tx_ring->desc_head[tx_ring->producer]; - - req = (struct qlcnic_nic_req *)hwdesc; - memset(req, 0, sizeof(struct qlcnic_nic_req)); - req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23); - - word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16); - req->req_hdr = cpu_to_le64(word); - - mac_req = (struct qlcnic_mac_req *)&(req->words[0]); - mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD; - memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN); - - vlan_req = (struct qlcnic_vlan_req *)&req->words[1]; - vlan_req->vlan_id = vlan_id; - - tx_ring->producer = get_next_index(producer, tx_ring->num_desc); - smp_mb(); -} - -#define QLCNIC_MAC_HASH(MAC)\ - ((((MAC) & 0x70000) >> 0x10) | (((MAC) & 0x70000000000ULL) >> 0x25)) - -static void -qlcnic_send_filter(struct qlcnic_adapter *adapter, - struct qlcnic_host_tx_ring *tx_ring, - struct cmd_desc_type0 *first_desc, - struct sk_buff *skb) -{ - struct ethhdr *phdr = (struct ethhdr *)(skb->data); - struct qlcnic_filter *fil, *tmp_fil; - struct hlist_node *tmp_hnode, *n; - struct hlist_head *head; - u64 src_addr = 0; - __le16 vlan_id = 0; - u8 hindex; - - if (!compare_ether_addr(phdr->h_source, adapter->mac_addr)) - return; - - if (adapter->fhash.fnum >= adapter->fhash.fmax) - return; - - /* Only NPAR capable devices support vlan based learning*/ - if (adapter->flags & QLCNIC_ESWITCH_ENABLED) - vlan_id = first_desc->vlan_TCI; - memcpy(&src_addr, phdr->h_source, ETH_ALEN); - hindex = QLCNIC_MAC_HASH(src_addr) & (QLCNIC_LB_MAX_FILTERS - 1); - head = &(adapter->fhash.fhead[hindex]); - - hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) { - if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) && - tmp_fil->vlan_id == vlan_id) { - - if (jiffies > - (QLCNIC_READD_AGE * HZ + tmp_fil->ftime)) - qlcnic_change_filter(adapter, src_addr, vlan_id, - tx_ring); - tmp_fil->ftime = jiffies; - return; - } - } - - fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC); - if (!fil) - return; - - qlcnic_change_filter(adapter, src_addr, vlan_id, tx_ring); - - fil->ftime = jiffies; - fil->vlan_id = vlan_id; - memcpy(fil->faddr, &src_addr, ETH_ALEN); - spin_lock(&adapter->mac_learn_lock); - hlist_add_head(&(fil->fnode), head); - adapter->fhash.fnum++; - spin_unlock(&adapter->mac_learn_lock); -} - -static int -qlcnic_tx_pkt(struct qlcnic_adapter *adapter, - struct cmd_desc_type0 *first_desc, - struct sk_buff *skb) -{ - u8 opcode = 0, hdr_len = 0; - u16 flags = 0, vlan_tci = 0; - int copied, offset, copy_len; - struct cmd_desc_type0 *hwdesc; - struct vlan_ethhdr *vh; - struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; - u16 protocol = ntohs(skb->protocol); - u32 producer = tx_ring->producer; - - if (protocol == ETH_P_8021Q) { - vh = (struct vlan_ethhdr *)skb->data; - flags = FLAGS_VLAN_TAGGED; - vlan_tci = vh->h_vlan_TCI; - } else if (vlan_tx_tag_present(skb)) { - flags = FLAGS_VLAN_OOB; - vlan_tci = vlan_tx_tag_get(skb); - } - if (unlikely(adapter->pvid)) { - if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED)) - return -EIO; - if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED)) - goto set_flags; - - flags = FLAGS_VLAN_OOB; - vlan_tci = adapter->pvid; - } -set_flags: - qlcnic_set_tx_vlan_tci(first_desc, vlan_tci); - qlcnic_set_tx_flags_opcode(first_desc, flags, opcode); - - if (*(skb->data) & BIT_0) { - flags |= BIT_0; - memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN); - } - opcode = TX_ETHER_PKT; - if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) && - skb_shinfo(skb)->gso_size > 0) { - - hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); - - first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); - first_desc->total_hdr_length = hdr_len; - - opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO; - - /* For LSO, we need to copy the MAC/IP/TCP headers into - * the descriptor ring */ - copied = 0; - offset = 2; - - if (flags & FLAGS_VLAN_OOB) { - first_desc->total_hdr_length += VLAN_HLEN; - first_desc->tcp_hdr_offset = VLAN_HLEN; - first_desc->ip_hdr_offset = VLAN_HLEN; - /* Only in case of TSO on vlan device */ - flags |= FLAGS_VLAN_TAGGED; - - /* Create a TSO vlan header template for firmware */ - - hwdesc = &tx_ring->desc_head[producer]; - tx_ring->cmd_buf_arr[producer].skb = NULL; - - copy_len = min((int)sizeof(struct cmd_desc_type0) - - offset, hdr_len + VLAN_HLEN); - - vh = (struct vlan_ethhdr *)((char *) hwdesc + 2); - skb_copy_from_linear_data(skb, vh, 12); - vh->h_vlan_proto = htons(ETH_P_8021Q); - vh->h_vlan_TCI = htons(vlan_tci); - - skb_copy_from_linear_data_offset(skb, 12, - (char *)vh + 16, copy_len - 16); - - copied = copy_len - VLAN_HLEN; - offset = 0; - - producer = get_next_index(producer, tx_ring->num_desc); - } - - while (copied < hdr_len) { - - copy_len = min((int)sizeof(struct cmd_desc_type0) - - offset, (hdr_len - copied)); - - hwdesc = &tx_ring->desc_head[producer]; - tx_ring->cmd_buf_arr[producer].skb = NULL; - - skb_copy_from_linear_data_offset(skb, copied, - (char *) hwdesc + offset, copy_len); - - copied += copy_len; - offset = 0; - - producer = get_next_index(producer, tx_ring->num_desc); - } - - tx_ring->producer = producer; - smp_mb(); - adapter->stats.lso_frames++; - - } else if (skb->ip_summed == CHECKSUM_PARTIAL) { - u8 l4proto; - - if (protocol == ETH_P_IP) { - l4proto = ip_hdr(skb)->protocol; - - if (l4proto == IPPROTO_TCP) - opcode = TX_TCP_PKT; - else if (l4proto == IPPROTO_UDP) - opcode = TX_UDP_PKT; - } else if (protocol == ETH_P_IPV6) { - l4proto = ipv6_hdr(skb)->nexthdr; - - if (l4proto == IPPROTO_TCP) - opcode = TX_TCPV6_PKT; - else if (l4proto == IPPROTO_UDP) - opcode = TX_UDPV6_PKT; - } - } - first_desc->tcp_hdr_offset += skb_transport_offset(skb); - first_desc->ip_hdr_offset += skb_network_offset(skb); - qlcnic_set_tx_flags_opcode(first_desc, flags, opcode); - - return 0; -} - -static int -qlcnic_map_tx_skb(struct pci_dev *pdev, - struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf) -{ - struct qlcnic_skb_frag *nf; - struct skb_frag_struct *frag; - int i, nr_frags; - dma_addr_t map; - - nr_frags = skb_shinfo(skb)->nr_frags; - nf = &pbuf->frag_array[0]; - - map = pci_map_single(pdev, skb->data, - skb_headlen(skb), PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(pdev, map)) - goto out_err; - - nf->dma = map; - nf->length = skb_headlen(skb); - - for (i = 0; i < nr_frags; i++) { - frag = &skb_shinfo(skb)->frags[i]; - nf = &pbuf->frag_array[i+1]; - - map = pci_map_page(pdev, frag->page, frag->page_offset, - frag->size, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(pdev, map)) - goto unwind; - - nf->dma = map; - nf->length = frag->size; - } - - return 0; - -unwind: - while (--i >= 0) { - nf = &pbuf->frag_array[i+1]; - pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE); - } - - nf = &pbuf->frag_array[0]; - pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE); - -out_err: - return -ENOMEM; -} - -static void -qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb, - struct qlcnic_cmd_buffer *pbuf) -{ - struct qlcnic_skb_frag *nf = &pbuf->frag_array[0]; - int nr_frags = skb_shinfo(skb)->nr_frags; - int i; - - for (i = 0; i < nr_frags; i++) { - nf = &pbuf->frag_array[i+1]; - pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE); - } - - nf = &pbuf->frag_array[0]; - pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE); - pbuf->skb = NULL; -} - -static inline void -qlcnic_clear_cmddesc(u64 *desc) -{ - desc[0] = 0ULL; - desc[2] = 0ULL; - desc[7] = 0ULL; -} - -netdev_tx_t -qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; - struct qlcnic_cmd_buffer *pbuf; - struct qlcnic_skb_frag *buffrag; - struct cmd_desc_type0 *hwdesc, *first_desc; - struct pci_dev *pdev; - struct ethhdr *phdr; - int delta = 0; - int i, k; - - u32 producer; - int frag_count; - u32 num_txd = tx_ring->num_desc; - - if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) { - netif_stop_queue(netdev); - return NETDEV_TX_BUSY; - } - - if (adapter->flags & QLCNIC_MACSPOOF) { - phdr = (struct ethhdr *)skb->data; - if (compare_ether_addr(phdr->h_source, - adapter->mac_addr)) - goto drop_packet; - } - - frag_count = skb_shinfo(skb)->nr_frags + 1; - /* 14 frags supported for normal packet and - * 32 frags supported for TSO packet - */ - if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) { - - for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++) - delta += skb_shinfo(skb)->frags[i].size; - - if (!__pskb_pull_tail(skb, delta)) - goto drop_packet; - - frag_count = 1 + skb_shinfo(skb)->nr_frags; - } - - if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) { - netif_stop_queue(netdev); - if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) - netif_start_queue(netdev); - else { - adapter->stats.xmit_off++; - return NETDEV_TX_BUSY; - } - } - - producer = tx_ring->producer; - pbuf = &tx_ring->cmd_buf_arr[producer]; - - pdev = adapter->pdev; - - first_desc = hwdesc = &tx_ring->desc_head[producer]; - qlcnic_clear_cmddesc((u64 *)hwdesc); - - if (qlcnic_map_tx_skb(pdev, skb, pbuf)) { - adapter->stats.tx_dma_map_error++; - goto drop_packet; - } - - pbuf->skb = skb; - pbuf->frag_count = frag_count; - - qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len); - qlcnic_set_tx_port(first_desc, adapter->portnum); - - for (i = 0; i < frag_count; i++) { - - k = i % 4; - - if ((k == 0) && (i > 0)) { - /* move to next desc.*/ - producer = get_next_index(producer, num_txd); - hwdesc = &tx_ring->desc_head[producer]; - qlcnic_clear_cmddesc((u64 *)hwdesc); - tx_ring->cmd_buf_arr[producer].skb = NULL; - } - - buffrag = &pbuf->frag_array[i]; - - hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length); - switch (k) { - case 0: - hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma); - break; - case 1: - hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma); - break; - case 2: - hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma); - break; - case 3: - hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma); - break; - } - } - - tx_ring->producer = get_next_index(producer, num_txd); - smp_mb(); - - if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb))) - goto unwind_buff; - - if (adapter->mac_learn) - qlcnic_send_filter(adapter, tx_ring, first_desc, skb); - - adapter->stats.txbytes += skb->len; - adapter->stats.xmitcalled++; - - qlcnic_update_cmd_producer(adapter, tx_ring); - - return NETDEV_TX_OK; - -unwind_buff: - qlcnic_unmap_buffers(pdev, skb, pbuf); -drop_packet: - adapter->stats.txdropped++; - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; -} - -static int qlcnic_check_temp(struct qlcnic_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - u32 temp, temp_state, temp_val; - int rv = 0; - - temp = QLCRD32(adapter, CRB_TEMP_STATE); - - temp_state = qlcnic_get_temp_state(temp); - temp_val = qlcnic_get_temp_val(temp); - - if (temp_state == QLCNIC_TEMP_PANIC) { - dev_err(&netdev->dev, - "Device temperature %d degrees C exceeds" - " maximum allowed. Hardware has been shut down.\n", - temp_val); - rv = 1; - } else if (temp_state == QLCNIC_TEMP_WARN) { - if (adapter->temp == QLCNIC_TEMP_NORMAL) { - dev_err(&netdev->dev, - "Device temperature %d degrees C " - "exceeds operating range." - " Immediate action needed.\n", - temp_val); - } - } else { - if (adapter->temp == QLCNIC_TEMP_WARN) { - dev_info(&netdev->dev, - "Device temperature is now %d degrees C" - " in normal range.\n", temp_val); - } - } - adapter->temp = temp_state; - return rv; -} - -void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup) -{ - struct net_device *netdev = adapter->netdev; - - if (adapter->ahw->linkup && !linkup) { - netdev_info(netdev, "NIC Link is down\n"); - adapter->ahw->linkup = 0; - if (netif_running(netdev)) { - netif_carrier_off(netdev); - netif_stop_queue(netdev); - } - } else if (!adapter->ahw->linkup && linkup) { - netdev_info(netdev, "NIC Link is up\n"); - adapter->ahw->linkup = 1; - if (netif_running(netdev)) { - netif_carrier_on(netdev); - netif_wake_queue(netdev); - } - } -} - -static void qlcnic_tx_timeout(struct net_device *netdev) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - - if (test_bit(__QLCNIC_RESETTING, &adapter->state)) - return; - - dev_err(&netdev->dev, "transmit timeout, resetting.\n"); - - if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS) - adapter->need_fw_reset = 1; - else - adapter->reset_context = 1; -} - -static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - struct net_device_stats *stats = &netdev->stats; - - stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts; - stats->tx_packets = adapter->stats.xmitfinished; - stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes; - stats->tx_bytes = adapter->stats.txbytes; - stats->rx_dropped = adapter->stats.rxdropped; - stats->tx_dropped = adapter->stats.txdropped; - - return stats; -} - -static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter) -{ - u32 status; - - status = readl(adapter->isr_int_vec); - - if (!(status & adapter->int_vec_bit)) - return IRQ_NONE; - - /* check interrupt state machine, to be sure */ - status = readl(adapter->crb_int_state_reg); - if (!ISR_LEGACY_INT_TRIGGERED(status)) - return IRQ_NONE; - - writel(0xffffffff, adapter->tgt_status_reg); - /* read twice to ensure write is flushed */ - readl(adapter->isr_int_vec); - readl(adapter->isr_int_vec); - - return IRQ_HANDLED; -} - -static irqreturn_t qlcnic_tmp_intr(int irq, void *data) -{ - struct qlcnic_host_sds_ring *sds_ring = data; - struct qlcnic_adapter *adapter = sds_ring->adapter; - - if (adapter->flags & QLCNIC_MSIX_ENABLED) - goto done; - else if (adapter->flags & QLCNIC_MSI_ENABLED) { - writel(0xffffffff, adapter->tgt_status_reg); - goto done; - } - - if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE) - return IRQ_NONE; - -done: - adapter->diag_cnt++; - qlcnic_enable_int(sds_ring); - return IRQ_HANDLED; -} - -static irqreturn_t qlcnic_intr(int irq, void *data) -{ - struct qlcnic_host_sds_ring *sds_ring = data; - struct qlcnic_adapter *adapter = sds_ring->adapter; - - if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE) - return IRQ_NONE; - - napi_schedule(&sds_ring->napi); - - return IRQ_HANDLED; -} - -static irqreturn_t qlcnic_msi_intr(int irq, void *data) -{ - struct qlcnic_host_sds_ring *sds_ring = data; - struct qlcnic_adapter *adapter = sds_ring->adapter; - - /* clear interrupt */ - writel(0xffffffff, adapter->tgt_status_reg); - - napi_schedule(&sds_ring->napi); - return IRQ_HANDLED; -} - -static irqreturn_t qlcnic_msix_intr(int irq, void *data) -{ - struct qlcnic_host_sds_ring *sds_ring = data; - - napi_schedule(&sds_ring->napi); - return IRQ_HANDLED; -} - -static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter) -{ - u32 sw_consumer, hw_consumer; - int count = 0, i; - struct qlcnic_cmd_buffer *buffer; - struct pci_dev *pdev = adapter->pdev; - struct net_device *netdev = adapter->netdev; - struct qlcnic_skb_frag *frag; - int done; - struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; - - if (!spin_trylock(&adapter->tx_clean_lock)) - return 1; - - sw_consumer = tx_ring->sw_consumer; - hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); - - while (sw_consumer != hw_consumer) { - buffer = &tx_ring->cmd_buf_arr[sw_consumer]; - if (buffer->skb) { - frag = &buffer->frag_array[0]; - pci_unmap_single(pdev, frag->dma, frag->length, - PCI_DMA_TODEVICE); - frag->dma = 0ULL; - for (i = 1; i < buffer->frag_count; i++) { - frag++; - pci_unmap_page(pdev, frag->dma, frag->length, - PCI_DMA_TODEVICE); - frag->dma = 0ULL; - } - - adapter->stats.xmitfinished++; - dev_kfree_skb_any(buffer->skb); - buffer->skb = NULL; - } - - sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc); - if (++count >= MAX_STATUS_HANDLE) - break; - } - - if (count && netif_running(netdev)) { - tx_ring->sw_consumer = sw_consumer; - - smp_mb(); - - if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) { - if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) { - netif_wake_queue(netdev); - adapter->stats.xmit_on++; - } - } - adapter->tx_timeo_cnt = 0; - } - /* - * If everything is freed up to consumer then check if the ring is full - * If the ring is full then check if more needs to be freed and - * schedule the call back again. - * - * This happens when there are 2 CPUs. One could be freeing and the - * other filling it. If the ring is full when we get out of here and - * the card has already interrupted the host then the host can miss the - * interrupt. - * - * There is still a possible race condition and the host could miss an - * interrupt. The card has to take care of this. - */ - hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); - done = (sw_consumer == hw_consumer); - spin_unlock(&adapter->tx_clean_lock); - - return done; -} - -static int qlcnic_poll(struct napi_struct *napi, int budget) -{ - struct qlcnic_host_sds_ring *sds_ring = - container_of(napi, struct qlcnic_host_sds_ring, napi); - - struct qlcnic_adapter *adapter = sds_ring->adapter; - - int tx_complete; - int work_done; - - tx_complete = qlcnic_process_cmd_ring(adapter); - - work_done = qlcnic_process_rcv_ring(sds_ring, budget); - - if ((work_done < budget) && tx_complete) { - napi_complete(&sds_ring->napi); - if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) - qlcnic_enable_int(sds_ring); - } - - return work_done; -} - -static int qlcnic_rx_poll(struct napi_struct *napi, int budget) -{ - struct qlcnic_host_sds_ring *sds_ring = - container_of(napi, struct qlcnic_host_sds_ring, napi); - - struct qlcnic_adapter *adapter = sds_ring->adapter; - int work_done; - - work_done = qlcnic_process_rcv_ring(sds_ring, budget); - - if (work_done < budget) { - napi_complete(&sds_ring->napi); - if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) - qlcnic_enable_int(sds_ring); - } - - return work_done; -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -static void qlcnic_poll_controller(struct net_device *netdev) -{ - int ring; - struct qlcnic_host_sds_ring *sds_ring; - struct qlcnic_adapter *adapter = netdev_priv(netdev); - struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; - - disable_irq(adapter->irq); - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &recv_ctx->sds_rings[ring]; - qlcnic_intr(adapter->irq, sds_ring); - } - enable_irq(adapter->irq); -} -#endif - -static void -qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding) -{ - u32 val; - - val = adapter->portnum & 0xf; - val |= encoding << 7; - val |= (jiffies - adapter->dev_rst_time) << 8; - - QLCWR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val); - adapter->dev_rst_time = jiffies; -} - -static int -qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state) -{ - u32 val; - - WARN_ON(state != QLCNIC_DEV_NEED_RESET && - state != QLCNIC_DEV_NEED_QUISCENT); - - if (qlcnic_api_lock(adapter)) - return -EIO; - - val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); - - if (state == QLCNIC_DEV_NEED_RESET) - QLC_DEV_SET_RST_RDY(val, adapter->portnum); - else if (state == QLCNIC_DEV_NEED_QUISCENT) - QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum); - - QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val); - - qlcnic_api_unlock(adapter); - - return 0; -} - -static int -qlcnic_clr_drv_state(struct qlcnic_adapter *adapter) -{ - u32 val; - - if (qlcnic_api_lock(adapter)) - return -EBUSY; - - val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); - QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum); - QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val); - - qlcnic_api_unlock(adapter); - - return 0; -} - -static void -qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed) -{ - u32 val; - - if (qlcnic_api_lock(adapter)) - goto err; - - val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE); - QLC_DEV_CLR_REF_CNT(val, adapter->portnum); - QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val); - - if (failed) { - QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED); - dev_info(&adapter->pdev->dev, - "Device state set to Failed. Please Reboot\n"); - } else if (!(val & 0x11111111)) - QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD); - - val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); - QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum); - QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val); - - qlcnic_api_unlock(adapter); -err: - adapter->fw_fail_cnt = 0; - adapter->flags &= ~QLCNIC_FW_HANG; - clear_bit(__QLCNIC_START_FW, &adapter->state); - clear_bit(__QLCNIC_RESETTING, &adapter->state); -} - -/* Grab api lock, before checking state */ -static int -qlcnic_check_drv_state(struct qlcnic_adapter *adapter) -{ - int act, state, active_mask; - - state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); - act = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE); - - if (adapter->flags & QLCNIC_FW_RESET_OWNER) { - active_mask = (~(1 << (adapter->ahw->pci_func * 4))); - act = act & active_mask; - } - - if (((state & 0x11111111) == (act & 0x11111111)) || - ((act & 0x11111111) == ((state >> 1) & 0x11111111))) - return 0; - else - return 1; -} - -static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter) -{ - u32 val = QLCRD32(adapter, QLCNIC_CRB_DRV_IDC_VER); - - if (val != QLCNIC_DRV_IDC_VER) { - dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's" - " idc ver = %x; reqd = %x\n", QLCNIC_DRV_IDC_VER, val); - } - - return 0; -} - -static int -qlcnic_can_start_firmware(struct qlcnic_adapter *adapter) -{ - u32 val, prev_state; - u8 dev_init_timeo = adapter->dev_init_timeo; - u8 portnum = adapter->portnum; - u8 ret; - - if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state)) - return 1; - - if (qlcnic_api_lock(adapter)) - return -1; - - val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE); - if (!(val & (1 << (portnum * 4)))) { - QLC_DEV_SET_REF_CNT(val, portnum); - QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val); - } - - prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); - QLCDB(adapter, HW, "Device state = %u\n", prev_state); - - switch (prev_state) { - case QLCNIC_DEV_COLD: - QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING); - QLCWR32(adapter, QLCNIC_CRB_DRV_IDC_VER, QLCNIC_DRV_IDC_VER); - qlcnic_idc_debug_info(adapter, 0); - qlcnic_api_unlock(adapter); - return 1; - - case QLCNIC_DEV_READY: - ret = qlcnic_check_idc_ver(adapter); - qlcnic_api_unlock(adapter); - return ret; - - case QLCNIC_DEV_NEED_RESET: - val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); - QLC_DEV_SET_RST_RDY(val, portnum); - QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val); - break; - - case QLCNIC_DEV_NEED_QUISCENT: - val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); - QLC_DEV_SET_QSCNT_RDY(val, portnum); - QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val); - break; - - case QLCNIC_DEV_FAILED: - dev_err(&adapter->pdev->dev, "Device in failed state.\n"); - qlcnic_api_unlock(adapter); - return -1; - - case QLCNIC_DEV_INITIALIZING: - case QLCNIC_DEV_QUISCENT: - break; - } - - qlcnic_api_unlock(adapter); - - do { - msleep(1000); - prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); - - if (prev_state == QLCNIC_DEV_QUISCENT) - continue; - } while ((prev_state != QLCNIC_DEV_READY) && --dev_init_timeo); - - if (!dev_init_timeo) { - dev_err(&adapter->pdev->dev, - "Waiting for device to initialize timeout\n"); - return -1; - } - - if (qlcnic_api_lock(adapter)) - return -1; - - val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); - QLC_DEV_CLR_RST_QSCNT(val, portnum); - QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val); - - ret = qlcnic_check_idc_ver(adapter); - qlcnic_api_unlock(adapter); - - return ret; -} - -static void -qlcnic_fwinit_work(struct work_struct *work) -{ - struct qlcnic_adapter *adapter = container_of(work, - struct qlcnic_adapter, fw_work.work); - u32 dev_state = 0xf; - u32 val; - - if (qlcnic_api_lock(adapter)) - goto err_ret; - - dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); - if (dev_state == QLCNIC_DEV_QUISCENT || - dev_state == QLCNIC_DEV_NEED_QUISCENT) { - qlcnic_api_unlock(adapter); - qlcnic_schedule_work(adapter, qlcnic_fwinit_work, - FW_POLL_DELAY * 2); - return; - } - - if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) { - qlcnic_api_unlock(adapter); - goto wait_npar; - } - - if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) { - dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n", - adapter->reset_ack_timeo); - goto skip_ack_check; - } - - if (!qlcnic_check_drv_state(adapter)) { -skip_ack_check: - dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); - - if (dev_state == QLCNIC_DEV_NEED_RESET) { - QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, - QLCNIC_DEV_INITIALIZING); - set_bit(__QLCNIC_START_FW, &adapter->state); - QLCDB(adapter, DRV, "Restarting fw\n"); - qlcnic_idc_debug_info(adapter, 0); - val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); - QLC_DEV_SET_RST_RDY(val, adapter->portnum); - QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val); - } - - qlcnic_api_unlock(adapter); - - rtnl_lock(); - if (adapter->ahw->fw_dump.enable && - (adapter->flags & QLCNIC_FW_RESET_OWNER)) { - QLCDB(adapter, DRV, "Take FW dump\n"); - qlcnic_dump_fw(adapter); - adapter->flags |= QLCNIC_FW_HANG; - } - rtnl_unlock(); - - adapter->flags &= ~QLCNIC_FW_RESET_OWNER; - if (!adapter->nic_ops->start_firmware(adapter)) { - qlcnic_schedule_work(adapter, qlcnic_attach_work, 0); - adapter->fw_wait_cnt = 0; - return; - } - goto err_ret; - } - - qlcnic_api_unlock(adapter); - -wait_npar: - dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); - QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state); - - switch (dev_state) { - case QLCNIC_DEV_READY: - if (!adapter->nic_ops->start_firmware(adapter)) { - qlcnic_schedule_work(adapter, qlcnic_attach_work, 0); - adapter->fw_wait_cnt = 0; - return; - } - case QLCNIC_DEV_FAILED: - break; - default: - qlcnic_schedule_work(adapter, - qlcnic_fwinit_work, FW_POLL_DELAY); - return; - } - -err_ret: - dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u " - "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt); - netif_device_attach(adapter->netdev); - qlcnic_clr_all_drv_state(adapter, 0); -} - -static void -qlcnic_detach_work(struct work_struct *work) -{ - struct qlcnic_adapter *adapter = container_of(work, - struct qlcnic_adapter, fw_work.work); - struct net_device *netdev = adapter->netdev; - u32 status; - - netif_device_detach(netdev); - - /* Dont grab rtnl lock during Quiscent mode */ - if (adapter->dev_state == QLCNIC_DEV_NEED_QUISCENT) { - if (netif_running(netdev)) - __qlcnic_down(adapter, netdev); - } else - qlcnic_down(adapter, netdev); - - status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1); - - if (status & QLCNIC_RCODE_FATAL_ERROR) - goto err_ret; - - if (adapter->temp == QLCNIC_TEMP_PANIC) - goto err_ret; - /* Dont ack if this instance is the reset owner */ - if (!(adapter->flags & QLCNIC_FW_RESET_OWNER)) { - if (qlcnic_set_drv_state(adapter, adapter->dev_state)) - goto err_ret; - } - - adapter->fw_wait_cnt = 0; - - qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY); - - return; - -err_ret: - dev_err(&adapter->pdev->dev, "detach failed; status=%d temp=%d\n", - status, adapter->temp); - netif_device_attach(netdev); - qlcnic_clr_all_drv_state(adapter, 1); -} - -/*Transit NPAR state to NON Operational */ -static void -qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter) -{ - u32 state; - - state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE); - if (state == QLCNIC_DEV_NPAR_NON_OPER) - return; - - if (qlcnic_api_lock(adapter)) - return; - QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER); - qlcnic_api_unlock(adapter); -} - -/*Transit to RESET state from READY state only */ -void -qlcnic_dev_request_reset(struct qlcnic_adapter *adapter) -{ - u32 state; - - adapter->need_fw_reset = 1; - if (qlcnic_api_lock(adapter)) - return; - - state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); - - if (state == QLCNIC_DEV_READY) { - QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET); - adapter->flags |= QLCNIC_FW_RESET_OWNER; - QLCDB(adapter, DRV, "NEED_RESET state set\n"); - qlcnic_idc_debug_info(adapter, 0); - } - - QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER); - qlcnic_api_unlock(adapter); -} - -/* Transit to NPAR READY state from NPAR NOT READY state */ -static void -qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter) -{ - if (qlcnic_api_lock(adapter)) - return; - - QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_OPER); - QLCDB(adapter, DRV, "NPAR operational state set\n"); - - qlcnic_api_unlock(adapter); -} - -static void -qlcnic_schedule_work(struct qlcnic_adapter *adapter, - work_func_t func, int delay) -{ - if (test_bit(__QLCNIC_AER, &adapter->state)) - return; - - INIT_DELAYED_WORK(&adapter->fw_work, func); - queue_delayed_work(qlcnic_wq, &adapter->fw_work, - round_jiffies_relative(delay)); -} - -static void -qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter) -{ - while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) - msleep(10); - - cancel_delayed_work_sync(&adapter->fw_work); -} - -static void -qlcnic_attach_work(struct work_struct *work) -{ - struct qlcnic_adapter *adapter = container_of(work, - struct qlcnic_adapter, fw_work.work); - struct net_device *netdev = adapter->netdev; - u32 npar_state; - - if (adapter->op_mode != QLCNIC_MGMT_FUNC) { - npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE); - if (adapter->fw_wait_cnt++ > QLCNIC_DEV_NPAR_OPER_TIMEO) - qlcnic_clr_all_drv_state(adapter, 0); - else if (npar_state != QLCNIC_DEV_NPAR_OPER) - qlcnic_schedule_work(adapter, qlcnic_attach_work, - FW_POLL_DELAY); - else - goto attach; - QLCDB(adapter, DRV, "Waiting for NPAR state to operational\n"); - return; - } -attach: - if (netif_running(netdev)) { - if (qlcnic_up(adapter, netdev)) - goto done; - - qlcnic_restore_indev_addr(netdev, NETDEV_UP); - } - -done: - netif_device_attach(netdev); - adapter->fw_fail_cnt = 0; - adapter->flags &= ~QLCNIC_FW_HANG; - clear_bit(__QLCNIC_RESETTING, &adapter->state); - - if (!qlcnic_clr_drv_state(adapter)) - qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, - FW_POLL_DELAY); -} - -static int -qlcnic_check_health(struct qlcnic_adapter *adapter) -{ - u32 state = 0, heartbeat; - struct net_device *netdev = adapter->netdev; - - if (qlcnic_check_temp(adapter)) - goto detach; - - if (adapter->need_fw_reset) - qlcnic_dev_request_reset(adapter); - - state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); - if (state == QLCNIC_DEV_NEED_RESET) { - qlcnic_set_npar_non_operational(adapter); - adapter->need_fw_reset = 1; - } else if (state == QLCNIC_DEV_NEED_QUISCENT) - goto detach; - - heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER); - if (heartbeat != adapter->heartbeat) { - adapter->heartbeat = heartbeat; - adapter->fw_fail_cnt = 0; - if (adapter->need_fw_reset) - goto detach; - - if (adapter->reset_context && auto_fw_reset) { - qlcnic_reset_hw_context(adapter); - adapter->netdev->trans_start = jiffies; - } - - return 0; - } - - if (++adapter->fw_fail_cnt < FW_FAIL_THRESH) - return 0; - - adapter->flags |= QLCNIC_FW_HANG; - - qlcnic_dev_request_reset(adapter); - - if (auto_fw_reset) - clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state); - - dev_info(&netdev->dev, "firmware hang detected\n"); - dev_info(&adapter->pdev->dev, "Dumping hw/fw registers\n" - "PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n" - "PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n" - "PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n" - "PEG_NET_4_PC: 0x%x\n", - QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1), - QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS2), - QLCRD32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x3c), - QLCRD32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x3c), - QLCRD32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x3c), - QLCRD32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x3c), - QLCRD32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c)); -detach: - adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state : - QLCNIC_DEV_NEED_RESET; - - if (auto_fw_reset && - !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) { - - qlcnic_schedule_work(adapter, qlcnic_detach_work, 0); - QLCDB(adapter, DRV, "fw recovery scheduled.\n"); - } - - return 1; -} - -static void -qlcnic_fw_poll_work(struct work_struct *work) -{ - struct qlcnic_adapter *adapter = container_of(work, - struct qlcnic_adapter, fw_work.work); - - if (test_bit(__QLCNIC_RESETTING, &adapter->state)) - goto reschedule; - - - if (qlcnic_check_health(adapter)) - return; - - if (adapter->fhash.fnum) - qlcnic_prune_lb_filters(adapter); - -reschedule: - qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY); -} - -static int qlcnic_is_first_func(struct pci_dev *pdev) -{ - struct pci_dev *oth_pdev; - int val = pdev->devfn; - - while (val-- > 0) { - oth_pdev = pci_get_domain_bus_and_slot(pci_domain_nr - (pdev->bus), pdev->bus->number, - PCI_DEVFN(PCI_SLOT(pdev->devfn), val)); - if (!oth_pdev) - continue; - - if (oth_pdev->current_state != PCI_D3cold) { - pci_dev_put(oth_pdev); - return 0; - } - pci_dev_put(oth_pdev); - } - return 1; -} - -static int qlcnic_attach_func(struct pci_dev *pdev) -{ - int err, first_func; - struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; - - pdev->error_state = pci_channel_io_normal; - - err = pci_enable_device(pdev); - if (err) - return err; - - pci_set_power_state(pdev, PCI_D0); - pci_set_master(pdev); - pci_restore_state(pdev); - - first_func = qlcnic_is_first_func(pdev); - - if (qlcnic_api_lock(adapter)) - return -EINVAL; - - if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) { - adapter->need_fw_reset = 1; - set_bit(__QLCNIC_START_FW, &adapter->state); - QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING); - QLCDB(adapter, DRV, "Restarting fw\n"); - } - qlcnic_api_unlock(adapter); - - err = adapter->nic_ops->start_firmware(adapter); - if (err) - return err; - - qlcnic_clr_drv_state(adapter); - qlcnic_setup_intr(adapter); - - if (netif_running(netdev)) { - err = qlcnic_attach(adapter); - if (err) { - qlcnic_clr_all_drv_state(adapter, 1); - clear_bit(__QLCNIC_AER, &adapter->state); - netif_device_attach(netdev); - return err; - } - - err = qlcnic_up(adapter, netdev); - if (err) - goto done; - - qlcnic_restore_indev_addr(netdev, NETDEV_UP); - } - done: - netif_device_attach(netdev); - return err; -} - -static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) -{ - struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; - - if (state == pci_channel_io_perm_failure) - return PCI_ERS_RESULT_DISCONNECT; - - if (state == pci_channel_io_normal) - return PCI_ERS_RESULT_RECOVERED; - - set_bit(__QLCNIC_AER, &adapter->state); - netif_device_detach(netdev); - - cancel_delayed_work_sync(&adapter->fw_work); - - if (netif_running(netdev)) - qlcnic_down(adapter, netdev); - - qlcnic_detach(adapter); - qlcnic_teardown_intr(adapter); - - clear_bit(__QLCNIC_RESETTING, &adapter->state); - - pci_save_state(pdev); - pci_disable_device(pdev); - - return PCI_ERS_RESULT_NEED_RESET; -} - -static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev) -{ - return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT : - PCI_ERS_RESULT_RECOVERED; -} - -static void qlcnic_io_resume(struct pci_dev *pdev) -{ - struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); - - pci_cleanup_aer_uncorrect_error_status(pdev); - - if (QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) == QLCNIC_DEV_READY && - test_and_clear_bit(__QLCNIC_AER, &adapter->state)) - qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, - FW_POLL_DELAY); -} - -static int -qlcnicvf_start_firmware(struct qlcnic_adapter *adapter) -{ - int err; - - err = qlcnic_can_start_firmware(adapter); - if (err) - return err; - - err = qlcnic_check_npar_opertional(adapter); - if (err) - return err; - - err = qlcnic_initialize_nic(adapter); - if (err) - return err; - - qlcnic_check_options(adapter); - - err = qlcnic_set_eswitch_port_config(adapter); - if (err) - return err; - - adapter->need_fw_reset = 0; - - return err; -} - -static int -qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable) -{ - return -EOPNOTSUPP; -} - -static int -qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate) -{ - return -EOPNOTSUPP; -} - -static ssize_t -qlcnic_store_bridged_mode(struct device *dev, - struct device_attribute *attr, const char *buf, size_t len) -{ - struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - unsigned long new; - int ret = -EINVAL; - - if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)) - goto err_out; - - if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) - goto err_out; - - if (strict_strtoul(buf, 2, &new)) - goto err_out; - - if (!adapter->nic_ops->config_bridged_mode(adapter, !!new)) - ret = len; - -err_out: - return ret; -} - -static ssize_t -qlcnic_show_bridged_mode(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - int bridged_mode = 0; - - if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG) - bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED); - - return sprintf(buf, "%d\n", bridged_mode); -} - -static struct device_attribute dev_attr_bridged_mode = { - .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)}, - .show = qlcnic_show_bridged_mode, - .store = qlcnic_store_bridged_mode, -}; - -static ssize_t -qlcnic_store_diag_mode(struct device *dev, - struct device_attribute *attr, const char *buf, size_t len) -{ - struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - unsigned long new; - - if (strict_strtoul(buf, 2, &new)) - return -EINVAL; - - if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED)) - adapter->flags ^= QLCNIC_DIAG_ENABLED; - - return len; -} - -static ssize_t -qlcnic_show_diag_mode(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - - return sprintf(buf, "%d\n", - !!(adapter->flags & QLCNIC_DIAG_ENABLED)); -} - -static struct device_attribute dev_attr_diag_mode = { - .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)}, - .show = qlcnic_show_diag_mode, - .store = qlcnic_store_diag_mode, -}; - -int qlcnic_validate_max_rss(struct net_device *netdev, u8 max_hw, u8 val) -{ - if (!use_msi_x && !use_msi) { - netdev_info(netdev, "no msix or msi support, hence no rss\n"); - return -EINVAL; - } - - if ((val > max_hw) || (val < 2) || !is_power_of_2(val)) { - netdev_info(netdev, "rss_ring valid range [2 - %x] in " - " powers of 2\n", max_hw); - return -EINVAL; - } - return 0; - -} - -int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data) -{ - struct net_device *netdev = adapter->netdev; - int err = 0; - - if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) - return -EBUSY; - - netif_device_detach(netdev); - if (netif_running(netdev)) - __qlcnic_down(adapter, netdev); - qlcnic_detach(adapter); - qlcnic_teardown_intr(adapter); - - if (qlcnic_enable_msix(adapter, data)) { - netdev_info(netdev, "failed setting max_rss; rss disabled\n"); - qlcnic_enable_msi_legacy(adapter); - } - - if (netif_running(netdev)) { - err = qlcnic_attach(adapter); - if (err) - goto done; - err = __qlcnic_up(adapter, netdev); - if (err) - goto done; - qlcnic_restore_indev_addr(netdev, NETDEV_UP); - } - done: - netif_device_attach(netdev); - clear_bit(__QLCNIC_RESETTING, &adapter->state); - return err; -} - -static int -qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter, - loff_t offset, size_t size) -{ - size_t crb_size = 4; - - if (!(adapter->flags & QLCNIC_DIAG_ENABLED)) - return -EIO; - - if (offset < QLCNIC_PCI_CRBSPACE) { - if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, - QLCNIC_PCI_CAMQM_END)) - crb_size = 8; - else - return -EINVAL; - } - - if ((size != crb_size) || (offset & (crb_size-1))) - return -EINVAL; - - return 0; -} - -static ssize_t -qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t offset, size_t size) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - u32 data; - u64 qmdata; - int ret; - - ret = qlcnic_sysfs_validate_crb(adapter, offset, size); - if (ret != 0) - return ret; - - if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) { - qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata); - memcpy(buf, &qmdata, size); - } else { - data = QLCRD32(adapter, offset); - memcpy(buf, &data, size); - } - return size; -} - -static ssize_t -qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t offset, size_t size) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - u32 data; - u64 qmdata; - int ret; - - ret = qlcnic_sysfs_validate_crb(adapter, offset, size); - if (ret != 0) - return ret; - - if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) { - memcpy(&qmdata, buf, size); - qlcnic_pci_camqm_write_2M(adapter, offset, qmdata); - } else { - memcpy(&data, buf, size); - QLCWR32(adapter, offset, data); - } - return size; -} - -static int -qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter, - loff_t offset, size_t size) -{ - if (!(adapter->flags & QLCNIC_DIAG_ENABLED)) - return -EIO; - - if ((size != 8) || (offset & 0x7)) - return -EIO; - - return 0; -} - -static ssize_t -qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t offset, size_t size) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - u64 data; - int ret; - - ret = qlcnic_sysfs_validate_mem(adapter, offset, size); - if (ret != 0) - return ret; - - if (qlcnic_pci_mem_read_2M(adapter, offset, &data)) - return -EIO; - - memcpy(buf, &data, size); - - return size; -} - -static ssize_t -qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t offset, size_t size) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - u64 data; - int ret; - - ret = qlcnic_sysfs_validate_mem(adapter, offset, size); - if (ret != 0) - return ret; - - memcpy(&data, buf, size); - - if (qlcnic_pci_mem_write_2M(adapter, offset, data)) - return -EIO; - - return size; -} - -static struct bin_attribute bin_attr_crb = { - .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)}, - .size = 0, - .read = qlcnic_sysfs_read_crb, - .write = qlcnic_sysfs_write_crb, -}; - -static struct bin_attribute bin_attr_mem = { - .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)}, - .size = 0, - .read = qlcnic_sysfs_read_mem, - .write = qlcnic_sysfs_write_mem, -}; - -static int -validate_pm_config(struct qlcnic_adapter *adapter, - struct qlcnic_pm_func_cfg *pm_cfg, int count) -{ - - u8 src_pci_func, s_esw_id, d_esw_id; - u8 dest_pci_func; - int i; - - for (i = 0; i < count; i++) { - src_pci_func = pm_cfg[i].pci_func; - dest_pci_func = pm_cfg[i].dest_npar; - if (src_pci_func >= QLCNIC_MAX_PCI_FUNC - || dest_pci_func >= QLCNIC_MAX_PCI_FUNC) - return QL_STATUS_INVALID_PARAM; - - if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC) - return QL_STATUS_INVALID_PARAM; - - if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC) - return QL_STATUS_INVALID_PARAM; - - s_esw_id = adapter->npars[src_pci_func].phy_port; - d_esw_id = adapter->npars[dest_pci_func].phy_port; - - if (s_esw_id != d_esw_id) - return QL_STATUS_INVALID_PARAM; - - } - return 0; - -} - -static ssize_t -qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, char *buf, loff_t offset, size_t size) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - struct qlcnic_pm_func_cfg *pm_cfg; - u32 id, action, pci_func; - int count, rem, i, ret; - - count = size / sizeof(struct qlcnic_pm_func_cfg); - rem = size % sizeof(struct qlcnic_pm_func_cfg); - if (rem) - return QL_STATUS_INVALID_PARAM; - - pm_cfg = (struct qlcnic_pm_func_cfg *) buf; - - ret = validate_pm_config(adapter, pm_cfg, count); - if (ret) - return ret; - for (i = 0; i < count; i++) { - pci_func = pm_cfg[i].pci_func; - action = !!pm_cfg[i].action; - id = adapter->npars[pci_func].phy_port; - ret = qlcnic_config_port_mirroring(adapter, id, - action, pci_func); - if (ret) - return ret; - } - - for (i = 0; i < count; i++) { - pci_func = pm_cfg[i].pci_func; - id = adapter->npars[pci_func].phy_port; - adapter->npars[pci_func].enable_pm = !!pm_cfg[i].action; - adapter->npars[pci_func].dest_npar = id; - } - return size; -} - -static ssize_t -qlcnic_sysfs_read_pm_config(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, char *buf, loff_t offset, size_t size) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC]; - int i; - - if (size != sizeof(pm_cfg)) - return QL_STATUS_INVALID_PARAM; - - for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { - if (adapter->npars[i].type != QLCNIC_TYPE_NIC) - continue; - pm_cfg[i].action = adapter->npars[i].enable_pm; - pm_cfg[i].dest_npar = 0; - pm_cfg[i].pci_func = i; - } - memcpy(buf, &pm_cfg, size); - - return size; -} - -static int -validate_esw_config(struct qlcnic_adapter *adapter, - struct qlcnic_esw_func_cfg *esw_cfg, int count) -{ - u32 op_mode; - u8 pci_func; - int i; - - op_mode = readl(adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE); - - for (i = 0; i < count; i++) { - pci_func = esw_cfg[i].pci_func; - if (pci_func >= QLCNIC_MAX_PCI_FUNC) - return QL_STATUS_INVALID_PARAM; - - if (adapter->op_mode == QLCNIC_MGMT_FUNC) - if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC) - return QL_STATUS_INVALID_PARAM; - - switch (esw_cfg[i].op_mode) { - case QLCNIC_PORT_DEFAULTS: - if (QLC_DEV_GET_DRV(op_mode, pci_func) != - QLCNIC_NON_PRIV_FUNC) { - if (esw_cfg[i].mac_anti_spoof != 0) - return QL_STATUS_INVALID_PARAM; - if (esw_cfg[i].mac_override != 1) - return QL_STATUS_INVALID_PARAM; - if (esw_cfg[i].promisc_mode != 1) - return QL_STATUS_INVALID_PARAM; - } - break; - case QLCNIC_ADD_VLAN: - if (!IS_VALID_VLAN(esw_cfg[i].vlan_id)) - return QL_STATUS_INVALID_PARAM; - if (!esw_cfg[i].op_type) - return QL_STATUS_INVALID_PARAM; - break; - case QLCNIC_DEL_VLAN: - if (!esw_cfg[i].op_type) - return QL_STATUS_INVALID_PARAM; - break; - default: - return QL_STATUS_INVALID_PARAM; - } - } - return 0; -} - -static ssize_t -qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, char *buf, loff_t offset, size_t size) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - struct qlcnic_esw_func_cfg *esw_cfg; - struct qlcnic_npar_info *npar; - int count, rem, i, ret; - u8 pci_func, op_mode = 0; - - count = size / sizeof(struct qlcnic_esw_func_cfg); - rem = size % sizeof(struct qlcnic_esw_func_cfg); - if (rem) - return QL_STATUS_INVALID_PARAM; - - esw_cfg = (struct qlcnic_esw_func_cfg *) buf; - ret = validate_esw_config(adapter, esw_cfg, count); - if (ret) - return ret; - - for (i = 0; i < count; i++) { - if (adapter->op_mode == QLCNIC_MGMT_FUNC) - if (qlcnic_config_switch_port(adapter, &esw_cfg[i])) - return QL_STATUS_INVALID_PARAM; - - if (adapter->ahw->pci_func != esw_cfg[i].pci_func) - continue; - - op_mode = esw_cfg[i].op_mode; - qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]); - esw_cfg[i].op_mode = op_mode; - esw_cfg[i].pci_func = adapter->ahw->pci_func; - - switch (esw_cfg[i].op_mode) { - case QLCNIC_PORT_DEFAULTS: - qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]); - break; - case QLCNIC_ADD_VLAN: - qlcnic_set_vlan_config(adapter, &esw_cfg[i]); - break; - case QLCNIC_DEL_VLAN: - esw_cfg[i].vlan_id = 0; - qlcnic_set_vlan_config(adapter, &esw_cfg[i]); - break; - } - } - - if (adapter->op_mode != QLCNIC_MGMT_FUNC) - goto out; - - for (i = 0; i < count; i++) { - pci_func = esw_cfg[i].pci_func; - npar = &adapter->npars[pci_func]; - switch (esw_cfg[i].op_mode) { - case QLCNIC_PORT_DEFAULTS: - npar->promisc_mode = esw_cfg[i].promisc_mode; - npar->mac_override = esw_cfg[i].mac_override; - npar->offload_flags = esw_cfg[i].offload_flags; - npar->mac_anti_spoof = esw_cfg[i].mac_anti_spoof; - npar->discard_tagged = esw_cfg[i].discard_tagged; - break; - case QLCNIC_ADD_VLAN: - npar->pvid = esw_cfg[i].vlan_id; - break; - case QLCNIC_DEL_VLAN: - npar->pvid = 0; - break; - } - } -out: - return size; -} - -static ssize_t -qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, char *buf, loff_t offset, size_t size) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC]; - u8 i; - - if (size != sizeof(esw_cfg)) - return QL_STATUS_INVALID_PARAM; - - for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { - if (adapter->npars[i].type != QLCNIC_TYPE_NIC) - continue; - esw_cfg[i].pci_func = i; - if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i])) - return QL_STATUS_INVALID_PARAM; - } - memcpy(buf, &esw_cfg, size); - - return size; -} - -static int -validate_npar_config(struct qlcnic_adapter *adapter, - struct qlcnic_npar_func_cfg *np_cfg, int count) -{ - u8 pci_func, i; - - for (i = 0; i < count; i++) { - pci_func = np_cfg[i].pci_func; - if (pci_func >= QLCNIC_MAX_PCI_FUNC) - return QL_STATUS_INVALID_PARAM; - - if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC) - return QL_STATUS_INVALID_PARAM; - - if (!IS_VALID_BW(np_cfg[i].min_bw) || - !IS_VALID_BW(np_cfg[i].max_bw)) - return QL_STATUS_INVALID_PARAM; - } - return 0; -} - -static ssize_t -qlcnic_sysfs_write_npar_config(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, char *buf, loff_t offset, size_t size) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - struct qlcnic_info nic_info; - struct qlcnic_npar_func_cfg *np_cfg; - int i, count, rem, ret; - u8 pci_func; - - count = size / sizeof(struct qlcnic_npar_func_cfg); - rem = size % sizeof(struct qlcnic_npar_func_cfg); - if (rem) - return QL_STATUS_INVALID_PARAM; - - np_cfg = (struct qlcnic_npar_func_cfg *) buf; - ret = validate_npar_config(adapter, np_cfg, count); - if (ret) - return ret; - - for (i = 0; i < count ; i++) { - pci_func = np_cfg[i].pci_func; - ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func); - if (ret) - return ret; - nic_info.pci_func = pci_func; - nic_info.min_tx_bw = np_cfg[i].min_bw; - nic_info.max_tx_bw = np_cfg[i].max_bw; - ret = qlcnic_set_nic_info(adapter, &nic_info); - if (ret) - return ret; - adapter->npars[i].min_bw = nic_info.min_tx_bw; - adapter->npars[i].max_bw = nic_info.max_tx_bw; - } - - return size; - -} -static ssize_t -qlcnic_sysfs_read_npar_config(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, char *buf, loff_t offset, size_t size) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - struct qlcnic_info nic_info; - struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC]; - int i, ret; - - if (size != sizeof(np_cfg)) - return QL_STATUS_INVALID_PARAM; - - for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) { - if (adapter->npars[i].type != QLCNIC_TYPE_NIC) - continue; - ret = qlcnic_get_nic_info(adapter, &nic_info, i); - if (ret) - return ret; - - np_cfg[i].pci_func = i; - np_cfg[i].op_mode = (u8)nic_info.op_mode; - np_cfg[i].port_num = nic_info.phys_port; - np_cfg[i].fw_capab = nic_info.capabilities; - np_cfg[i].min_bw = nic_info.min_tx_bw ; - np_cfg[i].max_bw = nic_info.max_tx_bw; - np_cfg[i].max_tx_queues = nic_info.max_tx_ques; - np_cfg[i].max_rx_queues = nic_info.max_rx_ques; - } - memcpy(buf, &np_cfg, size); - return size; -} - -static ssize_t -qlcnic_sysfs_get_port_stats(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, char *buf, loff_t offset, size_t size) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - struct qlcnic_esw_statistics port_stats; - int ret; - - if (size != sizeof(struct qlcnic_esw_statistics)) - return QL_STATUS_INVALID_PARAM; - - if (offset >= QLCNIC_MAX_PCI_FUNC) - return QL_STATUS_INVALID_PARAM; - - memset(&port_stats, 0, size); - ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER, - &port_stats.rx); - if (ret) - return ret; - - ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER, - &port_stats.tx); - if (ret) - return ret; - - memcpy(buf, &port_stats, size); - return size; -} - -static ssize_t -qlcnic_sysfs_get_esw_stats(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, char *buf, loff_t offset, size_t size) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - struct qlcnic_esw_statistics esw_stats; - int ret; - - if (size != sizeof(struct qlcnic_esw_statistics)) - return QL_STATUS_INVALID_PARAM; - - if (offset >= QLCNIC_NIU_MAX_XG_PORTS) - return QL_STATUS_INVALID_PARAM; - - memset(&esw_stats, 0, size); - ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER, - &esw_stats.rx); - if (ret) - return ret; - - ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER, - &esw_stats.tx); - if (ret) - return ret; - - memcpy(buf, &esw_stats, size); - return size; -} - -static ssize_t -qlcnic_sysfs_clear_esw_stats(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, char *buf, loff_t offset, size_t size) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - int ret; - - if (offset >= QLCNIC_NIU_MAX_XG_PORTS) - return QL_STATUS_INVALID_PARAM; - - ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset, - QLCNIC_QUERY_RX_COUNTER); - if (ret) - return ret; - - ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset, - QLCNIC_QUERY_TX_COUNTER); - if (ret) - return ret; - - return size; -} - -static ssize_t -qlcnic_sysfs_clear_port_stats(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, char *buf, loff_t offset, size_t size) -{ - - struct device *dev = container_of(kobj, struct device, kobj); - struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - int ret; - - if (offset >= QLCNIC_MAX_PCI_FUNC) - return QL_STATUS_INVALID_PARAM; - - ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset, - QLCNIC_QUERY_RX_COUNTER); - if (ret) - return ret; - - ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset, - QLCNIC_QUERY_TX_COUNTER); - if (ret) - return ret; - - return size; -} - -static ssize_t -qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, char *buf, loff_t offset, size_t size) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC]; - struct qlcnic_pci_info *pci_info; - int i, ret; - - if (size != sizeof(pci_cfg)) - return QL_STATUS_INVALID_PARAM; - - pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL); - if (!pci_info) - return -ENOMEM; - - ret = qlcnic_get_pci_info(adapter, pci_info); - if (ret) { - kfree(pci_info); - return ret; - } - - for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) { - pci_cfg[i].pci_func = pci_info[i].id; - pci_cfg[i].func_type = pci_info[i].type; - pci_cfg[i].port_num = pci_info[i].default_port; - pci_cfg[i].min_bw = pci_info[i].tx_min_bw; - pci_cfg[i].max_bw = pci_info[i].tx_max_bw; - memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN); - } - memcpy(buf, &pci_cfg, size); - kfree(pci_info); - return size; -} -static struct bin_attribute bin_attr_npar_config = { - .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)}, - .size = 0, - .read = qlcnic_sysfs_read_npar_config, - .write = qlcnic_sysfs_write_npar_config, -}; - -static struct bin_attribute bin_attr_pci_config = { - .attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)}, - .size = 0, - .read = qlcnic_sysfs_read_pci_config, - .write = NULL, -}; - -static struct bin_attribute bin_attr_port_stats = { - .attr = {.name = "port_stats", .mode = (S_IRUGO | S_IWUSR)}, - .size = 0, - .read = qlcnic_sysfs_get_port_stats, - .write = qlcnic_sysfs_clear_port_stats, -}; - -static struct bin_attribute bin_attr_esw_stats = { - .attr = {.name = "esw_stats", .mode = (S_IRUGO | S_IWUSR)}, - .size = 0, - .read = qlcnic_sysfs_get_esw_stats, - .write = qlcnic_sysfs_clear_esw_stats, -}; - -static struct bin_attribute bin_attr_esw_config = { - .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)}, - .size = 0, - .read = qlcnic_sysfs_read_esw_config, - .write = qlcnic_sysfs_write_esw_config, -}; - -static struct bin_attribute bin_attr_pm_config = { - .attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)}, - .size = 0, - .read = qlcnic_sysfs_read_pm_config, - .write = qlcnic_sysfs_write_pm_config, -}; - -static void -qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter) -{ - struct device *dev = &adapter->pdev->dev; - - if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG) - if (device_create_file(dev, &dev_attr_bridged_mode)) - dev_warn(dev, - "failed to create bridged_mode sysfs entry\n"); -} - -static void -qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter) -{ - struct device *dev = &adapter->pdev->dev; - - if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG) - device_remove_file(dev, &dev_attr_bridged_mode); -} - -static void -qlcnic_create_diag_entries(struct qlcnic_adapter *adapter) -{ - struct device *dev = &adapter->pdev->dev; - - if (device_create_bin_file(dev, &bin_attr_port_stats)) - dev_info(dev, "failed to create port stats sysfs entry"); - - if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) - return; - if (device_create_file(dev, &dev_attr_diag_mode)) - dev_info(dev, "failed to create diag_mode sysfs entry\n"); - if (device_create_bin_file(dev, &bin_attr_crb)) - dev_info(dev, "failed to create crb sysfs entry\n"); - if (device_create_bin_file(dev, &bin_attr_mem)) - dev_info(dev, "failed to create mem sysfs entry\n"); - if (device_create_bin_file(dev, &bin_attr_pci_config)) - dev_info(dev, "failed to create pci config sysfs entry"); - if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) - return; - if (device_create_bin_file(dev, &bin_attr_esw_config)) - dev_info(dev, "failed to create esw config sysfs entry"); - if (adapter->op_mode != QLCNIC_MGMT_FUNC) - return; - if (device_create_bin_file(dev, &bin_attr_npar_config)) - dev_info(dev, "failed to create npar config sysfs entry"); - if (device_create_bin_file(dev, &bin_attr_pm_config)) - dev_info(dev, "failed to create pm config sysfs entry"); - if (device_create_bin_file(dev, &bin_attr_esw_stats)) - dev_info(dev, "failed to create eswitch stats sysfs entry"); -} - -static void -qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter) -{ - struct device *dev = &adapter->pdev->dev; - - device_remove_bin_file(dev, &bin_attr_port_stats); - - if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) - return; - device_remove_file(dev, &dev_attr_diag_mode); - device_remove_bin_file(dev, &bin_attr_crb); - device_remove_bin_file(dev, &bin_attr_mem); - device_remove_bin_file(dev, &bin_attr_pci_config); - if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) - return; - device_remove_bin_file(dev, &bin_attr_esw_config); - if (adapter->op_mode != QLCNIC_MGMT_FUNC) - return; - device_remove_bin_file(dev, &bin_attr_npar_config); - device_remove_bin_file(dev, &bin_attr_pm_config); - device_remove_bin_file(dev, &bin_attr_esw_stats); -} - -#ifdef CONFIG_INET - -#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops) - -static void -qlcnic_config_indev_addr(struct qlcnic_adapter *adapter, - struct net_device *dev, unsigned long event) -{ - struct in_device *indev; - - indev = in_dev_get(dev); - if (!indev) - return; - - for_ifa(indev) { - switch (event) { - case NETDEV_UP: - qlcnic_config_ipaddr(adapter, - ifa->ifa_address, QLCNIC_IP_UP); - break; - case NETDEV_DOWN: - qlcnic_config_ipaddr(adapter, - ifa->ifa_address, QLCNIC_IP_DOWN); - break; - default: - break; - } - } endfor_ifa(indev); - - in_dev_put(indev); -} - -static void -qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - struct net_device *dev; - u16 vid; - - qlcnic_config_indev_addr(adapter, netdev, event); - - for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) { - dev = __vlan_find_dev_deep(netdev, vid); - if (!dev) - continue; - qlcnic_config_indev_addr(adapter, dev, event); - } -} - -static int qlcnic_netdev_event(struct notifier_block *this, - unsigned long event, void *ptr) -{ - struct qlcnic_adapter *adapter; - struct net_device *dev = (struct net_device *)ptr; - -recheck: - if (dev == NULL) - goto done; - - if (dev->priv_flags & IFF_802_1Q_VLAN) { - dev = vlan_dev_real_dev(dev); - goto recheck; - } - - if (!is_qlcnic_netdev(dev)) - goto done; - - adapter = netdev_priv(dev); - - if (!adapter) - goto done; - - if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) - goto done; - - qlcnic_config_indev_addr(adapter, dev, event); -done: - return NOTIFY_DONE; -} - -static int -qlcnic_inetaddr_event(struct notifier_block *this, - unsigned long event, void *ptr) -{ - struct qlcnic_adapter *adapter; - struct net_device *dev; - - struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; - - dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL; - -recheck: - if (dev == NULL) - goto done; - - if (dev->priv_flags & IFF_802_1Q_VLAN) { - dev = vlan_dev_real_dev(dev); - goto recheck; - } - - if (!is_qlcnic_netdev(dev)) - goto done; - - adapter = netdev_priv(dev); - - if (!adapter) - goto done; - - if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) - goto done; - - switch (event) { - case NETDEV_UP: - qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP); - break; - case NETDEV_DOWN: - qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN); - break; - default: - break; - } - -done: - return NOTIFY_DONE; -} - -static struct notifier_block qlcnic_netdev_cb = { - .notifier_call = qlcnic_netdev_event, -}; - -static struct notifier_block qlcnic_inetaddr_cb = { - .notifier_call = qlcnic_inetaddr_event, -}; -#else -static void -qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event) -{ } -#endif -static struct pci_error_handlers qlcnic_err_handler = { - .error_detected = qlcnic_io_error_detected, - .slot_reset = qlcnic_io_slot_reset, - .resume = qlcnic_io_resume, -}; - -static struct pci_driver qlcnic_driver = { - .name = qlcnic_driver_name, - .id_table = qlcnic_pci_tbl, - .probe = qlcnic_probe, - .remove = __devexit_p(qlcnic_remove), -#ifdef CONFIG_PM - .suspend = qlcnic_suspend, - .resume = qlcnic_resume, -#endif - .shutdown = qlcnic_shutdown, - .err_handler = &qlcnic_err_handler - -}; - -static int __init qlcnic_init_module(void) -{ - int ret; - - printk(KERN_INFO "%s\n", qlcnic_driver_string); - - qlcnic_wq = create_singlethread_workqueue("qlcnic"); - if (qlcnic_wq == NULL) { - printk(KERN_ERR "qlcnic: cannot create workqueue\n"); - return -ENOMEM; - } - -#ifdef CONFIG_INET - register_netdevice_notifier(&qlcnic_netdev_cb); - register_inetaddr_notifier(&qlcnic_inetaddr_cb); -#endif - - ret = pci_register_driver(&qlcnic_driver); - if (ret) { -#ifdef CONFIG_INET - unregister_inetaddr_notifier(&qlcnic_inetaddr_cb); - unregister_netdevice_notifier(&qlcnic_netdev_cb); -#endif - destroy_workqueue(qlcnic_wq); - } - - return ret; -} - -module_init(qlcnic_init_module); - -static void __exit qlcnic_exit_module(void) -{ - - pci_unregister_driver(&qlcnic_driver); - -#ifdef CONFIG_INET - unregister_inetaddr_notifier(&qlcnic_inetaddr_cb); - unregister_netdevice_notifier(&qlcnic_netdev_cb); -#endif - destroy_workqueue(qlcnic_wq); -} - -module_exit(qlcnic_exit_module); diff --git a/drivers/net/qlge/Makefile b/drivers/net/qlge/Makefile deleted file mode 100644 index 8a197658d76f..000000000000 --- a/drivers/net/qlge/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -# -# Makefile for the Qlogic 10GbE PCI Express ethernet driver -# - -obj-$(CONFIG_QLGE) += qlge.o - -qlge-objs := qlge_main.o qlge_dbg.o qlge_mpi.o qlge_ethtool.o diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h deleted file mode 100644 index 8731f79c9efc..000000000000 --- a/drivers/net/qlge/qlge.h +++ /dev/null @@ -1,2334 +0,0 @@ -/* - * QLogic QLA41xx NIC HBA Driver - * Copyright (c) 2003-2006 QLogic Corporation - * - * See LICENSE.qlge for copyright and licensing details. - */ -#ifndef _QLGE_H_ -#define _QLGE_H_ - -#include -#include -#include -#include -#include - -/* - * General definitions... - */ -#define DRV_NAME "qlge" -#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " -#define DRV_VERSION "v1.00.00.29.00.00-01" - -#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ - -#define QLGE_VENDOR_ID 0x1077 -#define QLGE_DEVICE_ID_8012 0x8012 -#define QLGE_DEVICE_ID_8000 0x8000 -#define MAX_CPUS 8 -#define MAX_TX_RINGS MAX_CPUS -#define MAX_RX_RINGS ((MAX_CPUS * 2) + 1) - -#define NUM_TX_RING_ENTRIES 256 -#define NUM_RX_RING_ENTRIES 256 - -#define NUM_SMALL_BUFFERS 512 -#define NUM_LARGE_BUFFERS 512 -#define DB_PAGE_SIZE 4096 - -/* Calculate the number of (4k) pages required to - * contain a buffer queue of the given length. - */ -#define MAX_DB_PAGES_PER_BQ(x) \ - (((x * sizeof(u64)) / DB_PAGE_SIZE) + \ - (((x * sizeof(u64)) % DB_PAGE_SIZE) ? 1 : 0)) - -#define RX_RING_SHADOW_SPACE (sizeof(u64) + \ - MAX_DB_PAGES_PER_BQ(NUM_SMALL_BUFFERS) * sizeof(u64) + \ - MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64)) -#define LARGE_BUFFER_MAX_SIZE 8192 -#define LARGE_BUFFER_MIN_SIZE 2048 - -#define MAX_CQ 128 -#define DFLT_COALESCE_WAIT 100 /* 100 usec wait for coalescing */ -#define MAX_INTER_FRAME_WAIT 10 /* 10 usec max interframe-wait for coalescing */ -#define DFLT_INTER_FRAME_WAIT (MAX_INTER_FRAME_WAIT/2) -#define UDELAY_COUNT 3 -#define UDELAY_DELAY 100 - - -#define TX_DESC_PER_IOCB 8 -/* The maximum number of frags we handle is based - * on PAGE_SIZE... - */ -#if (PAGE_SHIFT == 12) || (PAGE_SHIFT == 13) /* 4k & 8k pages */ -#define TX_DESC_PER_OAL ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2) -#else /* all other page sizes */ -#define TX_DESC_PER_OAL 0 -#endif - -/* Word shifting for converting 64-bit - * address to a series of 16-bit words. - * This is used for some MPI firmware - * mailbox commands. - */ -#define LSW(x) ((u16)(x)) -#define MSW(x) ((u16)((u32)(x) >> 16)) -#define LSD(x) ((u32)((u64)(x))) -#define MSD(x) ((u32)((((u64)(x)) >> 32))) - -/* MPI test register definitions. This register - * is used for determining alternate NIC function's - * PCI->func number. - */ -enum { - MPI_TEST_FUNC_PORT_CFG = 0x1002, - MPI_TEST_FUNC_PRB_CTL = 0x100e, - MPI_TEST_FUNC_PRB_EN = 0x18a20000, - MPI_TEST_FUNC_RST_STS = 0x100a, - MPI_TEST_FUNC_RST_FRC = 0x00000003, - MPI_TEST_NIC_FUNC_MASK = 0x00000007, - MPI_TEST_NIC1_FUNCTION_ENABLE = (1 << 0), - MPI_TEST_NIC1_FUNCTION_MASK = 0x0000000e, - MPI_TEST_NIC1_FUNC_SHIFT = 1, - MPI_TEST_NIC2_FUNCTION_ENABLE = (1 << 4), - MPI_TEST_NIC2_FUNCTION_MASK = 0x000000e0, - MPI_TEST_NIC2_FUNC_SHIFT = 5, - MPI_TEST_FC1_FUNCTION_ENABLE = (1 << 8), - MPI_TEST_FC1_FUNCTION_MASK = 0x00000e00, - MPI_TEST_FC1_FUNCTION_SHIFT = 9, - MPI_TEST_FC2_FUNCTION_ENABLE = (1 << 12), - MPI_TEST_FC2_FUNCTION_MASK = 0x0000e000, - MPI_TEST_FC2_FUNCTION_SHIFT = 13, - - MPI_NIC_READ = 0x00000000, - MPI_NIC_REG_BLOCK = 0x00020000, - MPI_NIC_FUNCTION_SHIFT = 6, -}; - -/* - * Processor Address Register (PROC_ADDR) bit definitions. - */ -enum { - - /* Misc. stuff */ - MAILBOX_COUNT = 16, - MAILBOX_TIMEOUT = 5, - - PROC_ADDR_RDY = (1 << 31), - PROC_ADDR_R = (1 << 30), - PROC_ADDR_ERR = (1 << 29), - PROC_ADDR_DA = (1 << 28), - PROC_ADDR_FUNC0_MBI = 0x00001180, - PROC_ADDR_FUNC0_MBO = (PROC_ADDR_FUNC0_MBI + MAILBOX_COUNT), - PROC_ADDR_FUNC0_CTL = 0x000011a1, - PROC_ADDR_FUNC2_MBI = 0x00001280, - PROC_ADDR_FUNC2_MBO = (PROC_ADDR_FUNC2_MBI + MAILBOX_COUNT), - PROC_ADDR_FUNC2_CTL = 0x000012a1, - PROC_ADDR_MPI_RISC = 0x00000000, - PROC_ADDR_MDE = 0x00010000, - PROC_ADDR_REGBLOCK = 0x00020000, - PROC_ADDR_RISC_REG = 0x00030000, -}; - -/* - * System Register (SYS) bit definitions. - */ -enum { - SYS_EFE = (1 << 0), - SYS_FAE = (1 << 1), - SYS_MDC = (1 << 2), - SYS_DST = (1 << 3), - SYS_DWC = (1 << 4), - SYS_EVW = (1 << 5), - SYS_OMP_DLY_MASK = 0x3f000000, - /* - * There are no values defined as of edit #15. - */ - SYS_ODI = (1 << 14), -}; - -/* - * Reset/Failover Register (RST_FO) bit definitions. - */ -enum { - RST_FO_TFO = (1 << 0), - RST_FO_RR_MASK = 0x00060000, - RST_FO_RR_CQ_CAM = 0x00000000, - RST_FO_RR_DROP = 0x00000002, - RST_FO_RR_DQ = 0x00000004, - RST_FO_RR_RCV_FUNC_CQ = 0x00000006, - RST_FO_FRB = (1 << 12), - RST_FO_MOP = (1 << 13), - RST_FO_REG = (1 << 14), - RST_FO_FR = (1 << 15), -}; - -/* - * Function Specific Control Register (FSC) bit definitions. - */ -enum { - FSC_DBRST_MASK = 0x00070000, - FSC_DBRST_256 = 0x00000000, - FSC_DBRST_512 = 0x00000001, - FSC_DBRST_768 = 0x00000002, - FSC_DBRST_1024 = 0x00000003, - FSC_DBL_MASK = 0x00180000, - FSC_DBL_DBRST = 0x00000000, - FSC_DBL_MAX_PLD = 0x00000008, - FSC_DBL_MAX_BRST = 0x00000010, - FSC_DBL_128_BYTES = 0x00000018, - FSC_EC = (1 << 5), - FSC_EPC_MASK = 0x00c00000, - FSC_EPC_INBOUND = (1 << 6), - FSC_EPC_OUTBOUND = (1 << 7), - FSC_VM_PAGESIZE_MASK = 0x07000000, - FSC_VM_PAGE_2K = 0x00000100, - FSC_VM_PAGE_4K = 0x00000200, - FSC_VM_PAGE_8K = 0x00000300, - FSC_VM_PAGE_64K = 0x00000600, - FSC_SH = (1 << 11), - FSC_DSB = (1 << 12), - FSC_STE = (1 << 13), - FSC_FE = (1 << 15), -}; - -/* - * Host Command Status Register (CSR) bit definitions. - */ -enum { - CSR_ERR_STS_MASK = 0x0000003f, - /* - * There are no valued defined as of edit #15. - */ - CSR_RR = (1 << 8), - CSR_HRI = (1 << 9), - CSR_RP = (1 << 10), - CSR_CMD_PARM_SHIFT = 22, - CSR_CMD_NOP = 0x00000000, - CSR_CMD_SET_RST = 0x10000000, - CSR_CMD_CLR_RST = 0x20000000, - CSR_CMD_SET_PAUSE = 0x30000000, - CSR_CMD_CLR_PAUSE = 0x40000000, - CSR_CMD_SET_H2R_INT = 0x50000000, - CSR_CMD_CLR_H2R_INT = 0x60000000, - CSR_CMD_PAR_EN = 0x70000000, - CSR_CMD_SET_BAD_PAR = 0x80000000, - CSR_CMD_CLR_BAD_PAR = 0x90000000, - CSR_CMD_CLR_R2PCI_INT = 0xa0000000, -}; - -/* - * Configuration Register (CFG) bit definitions. - */ -enum { - CFG_LRQ = (1 << 0), - CFG_DRQ = (1 << 1), - CFG_LR = (1 << 2), - CFG_DR = (1 << 3), - CFG_LE = (1 << 5), - CFG_LCQ = (1 << 6), - CFG_DCQ = (1 << 7), - CFG_Q_SHIFT = 8, - CFG_Q_MASK = 0x7f000000, -}; - -/* - * Status Register (STS) bit definitions. - */ -enum { - STS_FE = (1 << 0), - STS_PI = (1 << 1), - STS_PL0 = (1 << 2), - STS_PL1 = (1 << 3), - STS_PI0 = (1 << 4), - STS_PI1 = (1 << 5), - STS_FUNC_ID_MASK = 0x000000c0, - STS_FUNC_ID_SHIFT = 6, - STS_F0E = (1 << 8), - STS_F1E = (1 << 9), - STS_F2E = (1 << 10), - STS_F3E = (1 << 11), - STS_NFE = (1 << 12), -}; - -/* - * Interrupt Enable Register (INTR_EN) bit definitions. - */ -enum { - INTR_EN_INTR_MASK = 0x007f0000, - INTR_EN_TYPE_MASK = 0x03000000, - INTR_EN_TYPE_ENABLE = 0x00000100, - INTR_EN_TYPE_DISABLE = 0x00000200, - INTR_EN_TYPE_READ = 0x00000300, - INTR_EN_IHD = (1 << 13), - INTR_EN_IHD_MASK = (INTR_EN_IHD << 16), - INTR_EN_EI = (1 << 14), - INTR_EN_EN = (1 << 15), -}; - -/* - * Interrupt Mask Register (INTR_MASK) bit definitions. - */ -enum { - INTR_MASK_PI = (1 << 0), - INTR_MASK_HL0 = (1 << 1), - INTR_MASK_LH0 = (1 << 2), - INTR_MASK_HL1 = (1 << 3), - INTR_MASK_LH1 = (1 << 4), - INTR_MASK_SE = (1 << 5), - INTR_MASK_LSC = (1 << 6), - INTR_MASK_MC = (1 << 7), - INTR_MASK_LINK_IRQS = INTR_MASK_LSC | INTR_MASK_SE | INTR_MASK_MC, -}; - -/* - * Register (REV_ID) bit definitions. - */ -enum { - REV_ID_MASK = 0x0000000f, - REV_ID_NICROLL_SHIFT = 0, - REV_ID_NICREV_SHIFT = 4, - REV_ID_XGROLL_SHIFT = 8, - REV_ID_XGREV_SHIFT = 12, - REV_ID_CHIPREV_SHIFT = 28, -}; - -/* - * Force ECC Error Register (FRC_ECC_ERR) bit definitions. - */ -enum { - FRC_ECC_ERR_VW = (1 << 12), - FRC_ECC_ERR_VB = (1 << 13), - FRC_ECC_ERR_NI = (1 << 14), - FRC_ECC_ERR_NO = (1 << 15), - FRC_ECC_PFE_SHIFT = 16, - FRC_ECC_ERR_DO = (1 << 18), - FRC_ECC_P14 = (1 << 19), -}; - -/* - * Error Status Register (ERR_STS) bit definitions. - */ -enum { - ERR_STS_NOF = (1 << 0), - ERR_STS_NIF = (1 << 1), - ERR_STS_DRP = (1 << 2), - ERR_STS_XGP = (1 << 3), - ERR_STS_FOU = (1 << 4), - ERR_STS_FOC = (1 << 5), - ERR_STS_FOF = (1 << 6), - ERR_STS_FIU = (1 << 7), - ERR_STS_FIC = (1 << 8), - ERR_STS_FIF = (1 << 9), - ERR_STS_MOF = (1 << 10), - ERR_STS_TA = (1 << 11), - ERR_STS_MA = (1 << 12), - ERR_STS_MPE = (1 << 13), - ERR_STS_SCE = (1 << 14), - ERR_STS_STE = (1 << 15), - ERR_STS_FOW = (1 << 16), - ERR_STS_UE = (1 << 17), - ERR_STS_MCH = (1 << 26), - ERR_STS_LOC_SHIFT = 27, -}; - -/* - * RAM Debug Address Register (RAM_DBG_ADDR) bit definitions. - */ -enum { - RAM_DBG_ADDR_FW = (1 << 30), - RAM_DBG_ADDR_FR = (1 << 31), -}; - -/* - * Semaphore Register (SEM) bit definitions. - */ -enum { - /* - * Example: - * reg = SEM_XGMAC0_MASK | (SEM_SET << SEM_XGMAC0_SHIFT) - */ - SEM_CLEAR = 0, - SEM_SET = 1, - SEM_FORCE = 3, - SEM_XGMAC0_SHIFT = 0, - SEM_XGMAC1_SHIFT = 2, - SEM_ICB_SHIFT = 4, - SEM_MAC_ADDR_SHIFT = 6, - SEM_FLASH_SHIFT = 8, - SEM_PROBE_SHIFT = 10, - SEM_RT_IDX_SHIFT = 12, - SEM_PROC_REG_SHIFT = 14, - SEM_XGMAC0_MASK = 0x00030000, - SEM_XGMAC1_MASK = 0x000c0000, - SEM_ICB_MASK = 0x00300000, - SEM_MAC_ADDR_MASK = 0x00c00000, - SEM_FLASH_MASK = 0x03000000, - SEM_PROBE_MASK = 0x0c000000, - SEM_RT_IDX_MASK = 0x30000000, - SEM_PROC_REG_MASK = 0xc0000000, -}; - -/* - * 10G MAC Address Register (XGMAC_ADDR) bit definitions. - */ -enum { - XGMAC_ADDR_RDY = (1 << 31), - XGMAC_ADDR_R = (1 << 30), - XGMAC_ADDR_XME = (1 << 29), - - /* XGMAC control registers */ - PAUSE_SRC_LO = 0x00000100, - PAUSE_SRC_HI = 0x00000104, - GLOBAL_CFG = 0x00000108, - GLOBAL_CFG_RESET = (1 << 0), - GLOBAL_CFG_JUMBO = (1 << 6), - GLOBAL_CFG_TX_STAT_EN = (1 << 10), - GLOBAL_CFG_RX_STAT_EN = (1 << 11), - TX_CFG = 0x0000010c, - TX_CFG_RESET = (1 << 0), - TX_CFG_EN = (1 << 1), - TX_CFG_PREAM = (1 << 2), - RX_CFG = 0x00000110, - RX_CFG_RESET = (1 << 0), - RX_CFG_EN = (1 << 1), - RX_CFG_PREAM = (1 << 2), - FLOW_CTL = 0x0000011c, - PAUSE_OPCODE = 0x00000120, - PAUSE_TIMER = 0x00000124, - PAUSE_FRM_DEST_LO = 0x00000128, - PAUSE_FRM_DEST_HI = 0x0000012c, - MAC_TX_PARAMS = 0x00000134, - MAC_TX_PARAMS_JUMBO = (1 << 31), - MAC_TX_PARAMS_SIZE_SHIFT = 16, - MAC_RX_PARAMS = 0x00000138, - MAC_SYS_INT = 0x00000144, - MAC_SYS_INT_MASK = 0x00000148, - MAC_MGMT_INT = 0x0000014c, - MAC_MGMT_IN_MASK = 0x00000150, - EXT_ARB_MODE = 0x000001fc, - - /* XGMAC TX statistics registers */ - TX_PKTS = 0x00000200, - TX_BYTES = 0x00000208, - TX_MCAST_PKTS = 0x00000210, - TX_BCAST_PKTS = 0x00000218, - TX_UCAST_PKTS = 0x00000220, - TX_CTL_PKTS = 0x00000228, - TX_PAUSE_PKTS = 0x00000230, - TX_64_PKT = 0x00000238, - TX_65_TO_127_PKT = 0x00000240, - TX_128_TO_255_PKT = 0x00000248, - TX_256_511_PKT = 0x00000250, - TX_512_TO_1023_PKT = 0x00000258, - TX_1024_TO_1518_PKT = 0x00000260, - TX_1519_TO_MAX_PKT = 0x00000268, - TX_UNDERSIZE_PKT = 0x00000270, - TX_OVERSIZE_PKT = 0x00000278, - - /* XGMAC statistics control registers */ - RX_HALF_FULL_DET = 0x000002a0, - TX_HALF_FULL_DET = 0x000002a4, - RX_OVERFLOW_DET = 0x000002a8, - TX_OVERFLOW_DET = 0x000002ac, - RX_HALF_FULL_MASK = 0x000002b0, - TX_HALF_FULL_MASK = 0x000002b4, - RX_OVERFLOW_MASK = 0x000002b8, - TX_OVERFLOW_MASK = 0x000002bc, - STAT_CNT_CTL = 0x000002c0, - STAT_CNT_CTL_CLEAR_TX = (1 << 0), - STAT_CNT_CTL_CLEAR_RX = (1 << 1), - AUX_RX_HALF_FULL_DET = 0x000002d0, - AUX_TX_HALF_FULL_DET = 0x000002d4, - AUX_RX_OVERFLOW_DET = 0x000002d8, - AUX_TX_OVERFLOW_DET = 0x000002dc, - AUX_RX_HALF_FULL_MASK = 0x000002f0, - AUX_TX_HALF_FULL_MASK = 0x000002f4, - AUX_RX_OVERFLOW_MASK = 0x000002f8, - AUX_TX_OVERFLOW_MASK = 0x000002fc, - - /* XGMAC RX statistics registers */ - RX_BYTES = 0x00000300, - RX_BYTES_OK = 0x00000308, - RX_PKTS = 0x00000310, - RX_PKTS_OK = 0x00000318, - RX_BCAST_PKTS = 0x00000320, - RX_MCAST_PKTS = 0x00000328, - RX_UCAST_PKTS = 0x00000330, - RX_UNDERSIZE_PKTS = 0x00000338, - RX_OVERSIZE_PKTS = 0x00000340, - RX_JABBER_PKTS = 0x00000348, - RX_UNDERSIZE_FCERR_PKTS = 0x00000350, - RX_DROP_EVENTS = 0x00000358, - RX_FCERR_PKTS = 0x00000360, - RX_ALIGN_ERR = 0x00000368, - RX_SYMBOL_ERR = 0x00000370, - RX_MAC_ERR = 0x00000378, - RX_CTL_PKTS = 0x00000380, - RX_PAUSE_PKTS = 0x00000388, - RX_64_PKTS = 0x00000390, - RX_65_TO_127_PKTS = 0x00000398, - RX_128_255_PKTS = 0x000003a0, - RX_256_511_PKTS = 0x000003a8, - RX_512_TO_1023_PKTS = 0x000003b0, - RX_1024_TO_1518_PKTS = 0x000003b8, - RX_1519_TO_MAX_PKTS = 0x000003c0, - RX_LEN_ERR_PKTS = 0x000003c8, - - /* XGMAC MDIO control registers */ - MDIO_TX_DATA = 0x00000400, - MDIO_RX_DATA = 0x00000410, - MDIO_CMD = 0x00000420, - MDIO_PHY_ADDR = 0x00000430, - MDIO_PORT = 0x00000440, - MDIO_STATUS = 0x00000450, - - XGMAC_REGISTER_END = 0x00000740, -}; - -/* - * Enhanced Transmission Schedule Registers (NIC_ETS,CNA_ETS) bit definitions. - */ -enum { - ETS_QUEUE_SHIFT = 29, - ETS_REF = (1 << 26), - ETS_RS = (1 << 27), - ETS_P = (1 << 28), - ETS_FC_COS_SHIFT = 23, -}; - -/* - * Flash Address Register (FLASH_ADDR) bit definitions. - */ -enum { - FLASH_ADDR_RDY = (1 << 31), - FLASH_ADDR_R = (1 << 30), - FLASH_ADDR_ERR = (1 << 29), -}; - -/* - * Stop CQ Processing Register (CQ_STOP) bit definitions. - */ -enum { - CQ_STOP_QUEUE_MASK = (0x007f0000), - CQ_STOP_TYPE_MASK = (0x03000000), - CQ_STOP_TYPE_START = 0x00000100, - CQ_STOP_TYPE_STOP = 0x00000200, - CQ_STOP_TYPE_READ = 0x00000300, - CQ_STOP_EN = (1 << 15), -}; - -/* - * MAC Protocol Address Index Register (MAC_ADDR_IDX) bit definitions. - */ -enum { - MAC_ADDR_IDX_SHIFT = 4, - MAC_ADDR_TYPE_SHIFT = 16, - MAC_ADDR_TYPE_COUNT = 10, - MAC_ADDR_TYPE_MASK = 0x000f0000, - MAC_ADDR_TYPE_CAM_MAC = 0x00000000, - MAC_ADDR_TYPE_MULTI_MAC = 0x00010000, - MAC_ADDR_TYPE_VLAN = 0x00020000, - MAC_ADDR_TYPE_MULTI_FLTR = 0x00030000, - MAC_ADDR_TYPE_FC_MAC = 0x00040000, - MAC_ADDR_TYPE_MGMT_MAC = 0x00050000, - MAC_ADDR_TYPE_MGMT_VLAN = 0x00060000, - MAC_ADDR_TYPE_MGMT_V4 = 0x00070000, - MAC_ADDR_TYPE_MGMT_V6 = 0x00080000, - MAC_ADDR_TYPE_MGMT_TU_DP = 0x00090000, - MAC_ADDR_ADR = (1 << 25), - MAC_ADDR_RS = (1 << 26), - MAC_ADDR_E = (1 << 27), - MAC_ADDR_MR = (1 << 30), - MAC_ADDR_MW = (1 << 31), - MAX_MULTICAST_ENTRIES = 32, - - /* Entry count and words per entry - * for each address type in the filter. - */ - MAC_ADDR_MAX_CAM_ENTRIES = 512, - MAC_ADDR_MAX_CAM_WCOUNT = 3, - MAC_ADDR_MAX_MULTICAST_ENTRIES = 32, - MAC_ADDR_MAX_MULTICAST_WCOUNT = 2, - MAC_ADDR_MAX_VLAN_ENTRIES = 4096, - MAC_ADDR_MAX_VLAN_WCOUNT = 1, - MAC_ADDR_MAX_MCAST_FLTR_ENTRIES = 4096, - MAC_ADDR_MAX_MCAST_FLTR_WCOUNT = 1, - MAC_ADDR_MAX_FC_MAC_ENTRIES = 4, - MAC_ADDR_MAX_FC_MAC_WCOUNT = 2, - MAC_ADDR_MAX_MGMT_MAC_ENTRIES = 8, - MAC_ADDR_MAX_MGMT_MAC_WCOUNT = 2, - MAC_ADDR_MAX_MGMT_VLAN_ENTRIES = 16, - MAC_ADDR_MAX_MGMT_VLAN_WCOUNT = 1, - MAC_ADDR_MAX_MGMT_V4_ENTRIES = 4, - MAC_ADDR_MAX_MGMT_V4_WCOUNT = 1, - MAC_ADDR_MAX_MGMT_V6_ENTRIES = 4, - MAC_ADDR_MAX_MGMT_V6_WCOUNT = 4, - MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES = 4, - MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT = 1, -}; - -/* - * MAC Protocol Address Index Register (SPLT_HDR) bit definitions. - */ -enum { - SPLT_HDR_EP = (1 << 31), -}; - -/* - * FCoE Receive Configuration Register (FC_RCV_CFG) bit definitions. - */ -enum { - FC_RCV_CFG_ECT = (1 << 15), - FC_RCV_CFG_DFH = (1 << 20), - FC_RCV_CFG_DVF = (1 << 21), - FC_RCV_CFG_RCE = (1 << 27), - FC_RCV_CFG_RFE = (1 << 28), - FC_RCV_CFG_TEE = (1 << 29), - FC_RCV_CFG_TCE = (1 << 30), - FC_RCV_CFG_TFE = (1 << 31), -}; - -/* - * NIC Receive Configuration Register (NIC_RCV_CFG) bit definitions. - */ -enum { - NIC_RCV_CFG_PPE = (1 << 0), - NIC_RCV_CFG_VLAN_MASK = 0x00060000, - NIC_RCV_CFG_VLAN_ALL = 0x00000000, - NIC_RCV_CFG_VLAN_MATCH_ONLY = 0x00000002, - NIC_RCV_CFG_VLAN_MATCH_AND_NON = 0x00000004, - NIC_RCV_CFG_VLAN_NONE_AND_NON = 0x00000006, - NIC_RCV_CFG_RV = (1 << 3), - NIC_RCV_CFG_DFQ_MASK = (0x7f000000), - NIC_RCV_CFG_DFQ_SHIFT = 8, - NIC_RCV_CFG_DFQ = 0, /* HARDCODE default queue to 0. */ -}; - -/* - * Mgmt Receive Configuration Register (MGMT_RCV_CFG) bit definitions. - */ -enum { - MGMT_RCV_CFG_ARP = (1 << 0), - MGMT_RCV_CFG_DHC = (1 << 1), - MGMT_RCV_CFG_DHS = (1 << 2), - MGMT_RCV_CFG_NP = (1 << 3), - MGMT_RCV_CFG_I6N = (1 << 4), - MGMT_RCV_CFG_I6R = (1 << 5), - MGMT_RCV_CFG_DH6 = (1 << 6), - MGMT_RCV_CFG_UD1 = (1 << 7), - MGMT_RCV_CFG_UD0 = (1 << 8), - MGMT_RCV_CFG_BCT = (1 << 9), - MGMT_RCV_CFG_MCT = (1 << 10), - MGMT_RCV_CFG_DM = (1 << 11), - MGMT_RCV_CFG_RM = (1 << 12), - MGMT_RCV_CFG_STL = (1 << 13), - MGMT_RCV_CFG_VLAN_MASK = 0xc0000000, - MGMT_RCV_CFG_VLAN_ALL = 0x00000000, - MGMT_RCV_CFG_VLAN_MATCH_ONLY = 0x00004000, - MGMT_RCV_CFG_VLAN_MATCH_AND_NON = 0x00008000, - MGMT_RCV_CFG_VLAN_NONE_AND_NON = 0x0000c000, -}; - -/* - * Routing Index Register (RT_IDX) bit definitions. - */ -enum { - RT_IDX_IDX_SHIFT = 8, - RT_IDX_TYPE_MASK = 0x000f0000, - RT_IDX_TYPE_SHIFT = 16, - RT_IDX_TYPE_RT = 0x00000000, - RT_IDX_TYPE_RT_INV = 0x00010000, - RT_IDX_TYPE_NICQ = 0x00020000, - RT_IDX_TYPE_NICQ_INV = 0x00030000, - RT_IDX_DST_MASK = 0x00700000, - RT_IDX_DST_RSS = 0x00000000, - RT_IDX_DST_CAM_Q = 0x00100000, - RT_IDX_DST_COS_Q = 0x00200000, - RT_IDX_DST_DFLT_Q = 0x00300000, - RT_IDX_DST_DEST_Q = 0x00400000, - RT_IDX_RS = (1 << 26), - RT_IDX_E = (1 << 27), - RT_IDX_MR = (1 << 30), - RT_IDX_MW = (1 << 31), - - /* Nic Queue format - type 2 bits */ - RT_IDX_BCAST = (1 << 0), - RT_IDX_MCAST = (1 << 1), - RT_IDX_MCAST_MATCH = (1 << 2), - RT_IDX_MCAST_REG_MATCH = (1 << 3), - RT_IDX_MCAST_HASH_MATCH = (1 << 4), - RT_IDX_FC_MACH = (1 << 5), - RT_IDX_ETH_FCOE = (1 << 6), - RT_IDX_CAM_HIT = (1 << 7), - RT_IDX_CAM_BIT0 = (1 << 8), - RT_IDX_CAM_BIT1 = (1 << 9), - RT_IDX_VLAN_TAG = (1 << 10), - RT_IDX_VLAN_MATCH = (1 << 11), - RT_IDX_VLAN_FILTER = (1 << 12), - RT_IDX_ETH_SKIP1 = (1 << 13), - RT_IDX_ETH_SKIP2 = (1 << 14), - RT_IDX_BCAST_MCAST_MATCH = (1 << 15), - RT_IDX_802_3 = (1 << 16), - RT_IDX_LLDP = (1 << 17), - RT_IDX_UNUSED018 = (1 << 18), - RT_IDX_UNUSED019 = (1 << 19), - RT_IDX_UNUSED20 = (1 << 20), - RT_IDX_UNUSED21 = (1 << 21), - RT_IDX_ERR = (1 << 22), - RT_IDX_VALID = (1 << 23), - RT_IDX_TU_CSUM_ERR = (1 << 24), - RT_IDX_IP_CSUM_ERR = (1 << 25), - RT_IDX_MAC_ERR = (1 << 26), - RT_IDX_RSS_TCP6 = (1 << 27), - RT_IDX_RSS_TCP4 = (1 << 28), - RT_IDX_RSS_IPV6 = (1 << 29), - RT_IDX_RSS_IPV4 = (1 << 30), - RT_IDX_RSS_MATCH = (1 << 31), - - /* Hierarchy for the NIC Queue Mask */ - RT_IDX_ALL_ERR_SLOT = 0, - RT_IDX_MAC_ERR_SLOT = 0, - RT_IDX_IP_CSUM_ERR_SLOT = 1, - RT_IDX_TCP_UDP_CSUM_ERR_SLOT = 2, - RT_IDX_BCAST_SLOT = 3, - RT_IDX_MCAST_MATCH_SLOT = 4, - RT_IDX_ALLMULTI_SLOT = 5, - RT_IDX_UNUSED6_SLOT = 6, - RT_IDX_UNUSED7_SLOT = 7, - RT_IDX_RSS_MATCH_SLOT = 8, - RT_IDX_RSS_IPV4_SLOT = 8, - RT_IDX_RSS_IPV6_SLOT = 9, - RT_IDX_RSS_TCP4_SLOT = 10, - RT_IDX_RSS_TCP6_SLOT = 11, - RT_IDX_CAM_HIT_SLOT = 12, - RT_IDX_UNUSED013 = 13, - RT_IDX_UNUSED014 = 14, - RT_IDX_PROMISCUOUS_SLOT = 15, - RT_IDX_MAX_RT_SLOTS = 8, - RT_IDX_MAX_NIC_SLOTS = 16, -}; - -/* - * Serdes Address Register (XG_SERDES_ADDR) bit definitions. - */ -enum { - XG_SERDES_ADDR_RDY = (1 << 31), - XG_SERDES_ADDR_R = (1 << 30), - - XG_SERDES_ADDR_STS = 0x00001E06, - XG_SERDES_ADDR_XFI1_PWR_UP = 0x00000005, - XG_SERDES_ADDR_XFI2_PWR_UP = 0x0000000a, - XG_SERDES_ADDR_XAUI_PWR_DOWN = 0x00000001, - - /* Serdes coredump definitions. */ - XG_SERDES_XAUI_AN_START = 0x00000000, - XG_SERDES_XAUI_AN_END = 0x00000034, - XG_SERDES_XAUI_HSS_PCS_START = 0x00000800, - XG_SERDES_XAUI_HSS_PCS_END = 0x0000880, - XG_SERDES_XFI_AN_START = 0x00001000, - XG_SERDES_XFI_AN_END = 0x00001034, - XG_SERDES_XFI_TRAIN_START = 0x10001050, - XG_SERDES_XFI_TRAIN_END = 0x1000107C, - XG_SERDES_XFI_HSS_PCS_START = 0x00001800, - XG_SERDES_XFI_HSS_PCS_END = 0x00001838, - XG_SERDES_XFI_HSS_TX_START = 0x00001c00, - XG_SERDES_XFI_HSS_TX_END = 0x00001c1f, - XG_SERDES_XFI_HSS_RX_START = 0x00001c40, - XG_SERDES_XFI_HSS_RX_END = 0x00001c5f, - XG_SERDES_XFI_HSS_PLL_START = 0x00001e00, - XG_SERDES_XFI_HSS_PLL_END = 0x00001e1f, -}; - -/* - * NIC Probe Mux Address Register (PRB_MX_ADDR) bit definitions. - */ -enum { - PRB_MX_ADDR_ARE = (1 << 16), - PRB_MX_ADDR_UP = (1 << 15), - PRB_MX_ADDR_SWP = (1 << 14), - - /* Module select values. */ - PRB_MX_ADDR_MAX_MODS = 21, - PRB_MX_ADDR_MOD_SEL_SHIFT = 9, - PRB_MX_ADDR_MOD_SEL_TBD = 0, - PRB_MX_ADDR_MOD_SEL_IDE1 = 1, - PRB_MX_ADDR_MOD_SEL_IDE2 = 2, - PRB_MX_ADDR_MOD_SEL_FRB = 3, - PRB_MX_ADDR_MOD_SEL_ODE1 = 4, - PRB_MX_ADDR_MOD_SEL_ODE2 = 5, - PRB_MX_ADDR_MOD_SEL_DA1 = 6, - PRB_MX_ADDR_MOD_SEL_DA2 = 7, - PRB_MX_ADDR_MOD_SEL_IMP1 = 8, - PRB_MX_ADDR_MOD_SEL_IMP2 = 9, - PRB_MX_ADDR_MOD_SEL_OMP1 = 10, - PRB_MX_ADDR_MOD_SEL_OMP2 = 11, - PRB_MX_ADDR_MOD_SEL_ORS1 = 12, - PRB_MX_ADDR_MOD_SEL_ORS2 = 13, - PRB_MX_ADDR_MOD_SEL_REG = 14, - PRB_MX_ADDR_MOD_SEL_MAC1 = 16, - PRB_MX_ADDR_MOD_SEL_MAC2 = 17, - PRB_MX_ADDR_MOD_SEL_VQM1 = 18, - PRB_MX_ADDR_MOD_SEL_VQM2 = 19, - PRB_MX_ADDR_MOD_SEL_MOP = 20, - /* Bit fields indicating which modules - * are valid for each clock domain. - */ - PRB_MX_ADDR_VALID_SYS_MOD = 0x000f7ff7, - PRB_MX_ADDR_VALID_PCI_MOD = 0x000040c1, - PRB_MX_ADDR_VALID_XGM_MOD = 0x00037309, - PRB_MX_ADDR_VALID_FC_MOD = 0x00003001, - PRB_MX_ADDR_VALID_TOTAL = 34, - - /* Clock domain values. */ - PRB_MX_ADDR_CLOCK_SHIFT = 6, - PRB_MX_ADDR_SYS_CLOCK = 0, - PRB_MX_ADDR_PCI_CLOCK = 2, - PRB_MX_ADDR_FC_CLOCK = 5, - PRB_MX_ADDR_XGM_CLOCK = 6, - - PRB_MX_ADDR_MAX_MUX = 64, -}; - -/* - * Control Register Set Map - */ -enum { - PROC_ADDR = 0, /* Use semaphore */ - PROC_DATA = 0x04, /* Use semaphore */ - SYS = 0x08, - RST_FO = 0x0c, - FSC = 0x10, - CSR = 0x14, - LED = 0x18, - ICB_RID = 0x1c, /* Use semaphore */ - ICB_L = 0x20, /* Use semaphore */ - ICB_H = 0x24, /* Use semaphore */ - CFG = 0x28, - BIOS_ADDR = 0x2c, - STS = 0x30, - INTR_EN = 0x34, - INTR_MASK = 0x38, - ISR1 = 0x3c, - ISR2 = 0x40, - ISR3 = 0x44, - ISR4 = 0x48, - REV_ID = 0x4c, - FRC_ECC_ERR = 0x50, - ERR_STS = 0x54, - RAM_DBG_ADDR = 0x58, - RAM_DBG_DATA = 0x5c, - ECC_ERR_CNT = 0x60, - SEM = 0x64, - GPIO_1 = 0x68, /* Use semaphore */ - GPIO_2 = 0x6c, /* Use semaphore */ - GPIO_3 = 0x70, /* Use semaphore */ - RSVD2 = 0x74, - XGMAC_ADDR = 0x78, /* Use semaphore */ - XGMAC_DATA = 0x7c, /* Use semaphore */ - NIC_ETS = 0x80, - CNA_ETS = 0x84, - FLASH_ADDR = 0x88, /* Use semaphore */ - FLASH_DATA = 0x8c, /* Use semaphore */ - CQ_STOP = 0x90, - PAGE_TBL_RID = 0x94, - WQ_PAGE_TBL_LO = 0x98, - WQ_PAGE_TBL_HI = 0x9c, - CQ_PAGE_TBL_LO = 0xa0, - CQ_PAGE_TBL_HI = 0xa4, - MAC_ADDR_IDX = 0xa8, /* Use semaphore */ - MAC_ADDR_DATA = 0xac, /* Use semaphore */ - COS_DFLT_CQ1 = 0xb0, - COS_DFLT_CQ2 = 0xb4, - ETYPE_SKIP1 = 0xb8, - ETYPE_SKIP2 = 0xbc, - SPLT_HDR = 0xc0, - FC_PAUSE_THRES = 0xc4, - NIC_PAUSE_THRES = 0xc8, - FC_ETHERTYPE = 0xcc, - FC_RCV_CFG = 0xd0, - NIC_RCV_CFG = 0xd4, - FC_COS_TAGS = 0xd8, - NIC_COS_TAGS = 0xdc, - MGMT_RCV_CFG = 0xe0, - RT_IDX = 0xe4, - RT_DATA = 0xe8, - RSVD7 = 0xec, - XG_SERDES_ADDR = 0xf0, - XG_SERDES_DATA = 0xf4, - PRB_MX_ADDR = 0xf8, /* Use semaphore */ - PRB_MX_DATA = 0xfc, /* Use semaphore */ -}; - -#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS -#define SMALL_BUFFER_SIZE 256 -#define SMALL_BUF_MAP_SIZE SMALL_BUFFER_SIZE -#define SPLT_SETTING FSC_DBRST_1024 -#define SPLT_LEN 0 -#define QLGE_SB_PAD 0 -#else -#define SMALL_BUFFER_SIZE 512 -#define SMALL_BUF_MAP_SIZE (SMALL_BUFFER_SIZE / 2) -#define SPLT_SETTING FSC_SH -#define SPLT_LEN (SPLT_HDR_EP | \ - min(SMALL_BUF_MAP_SIZE, 1023)) -#define QLGE_SB_PAD 32 -#endif - -/* - * CAM output format. - */ -enum { - CAM_OUT_ROUTE_FC = 0, - CAM_OUT_ROUTE_NIC = 1, - CAM_OUT_FUNC_SHIFT = 2, - CAM_OUT_RV = (1 << 4), - CAM_OUT_SH = (1 << 15), - CAM_OUT_CQ_ID_SHIFT = 5, -}; - -/* - * Mailbox definitions - */ -enum { - /* Asynchronous Event Notifications */ - AEN_SYS_ERR = 0x00008002, - AEN_LINK_UP = 0x00008011, - AEN_LINK_DOWN = 0x00008012, - AEN_IDC_CMPLT = 0x00008100, - AEN_IDC_REQ = 0x00008101, - AEN_IDC_EXT = 0x00008102, - AEN_DCBX_CHG = 0x00008110, - AEN_AEN_LOST = 0x00008120, - AEN_AEN_SFP_IN = 0x00008130, - AEN_AEN_SFP_OUT = 0x00008131, - AEN_FW_INIT_DONE = 0x00008400, - AEN_FW_INIT_FAIL = 0x00008401, - - /* Mailbox Command Opcodes. */ - MB_CMD_NOP = 0x00000000, - MB_CMD_EX_FW = 0x00000002, - MB_CMD_MB_TEST = 0x00000006, - MB_CMD_CSUM_TEST = 0x00000007, /* Verify Checksum */ - MB_CMD_ABOUT_FW = 0x00000008, - MB_CMD_COPY_RISC_RAM = 0x0000000a, - MB_CMD_LOAD_RISC_RAM = 0x0000000b, - MB_CMD_DUMP_RISC_RAM = 0x0000000c, - MB_CMD_WRITE_RAM = 0x0000000d, - MB_CMD_INIT_RISC_RAM = 0x0000000e, - MB_CMD_READ_RAM = 0x0000000f, - MB_CMD_STOP_FW = 0x00000014, - MB_CMD_MAKE_SYS_ERR = 0x0000002a, - MB_CMD_WRITE_SFP = 0x00000030, - MB_CMD_READ_SFP = 0x00000031, - MB_CMD_INIT_FW = 0x00000060, - MB_CMD_GET_IFCB = 0x00000061, - MB_CMD_GET_FW_STATE = 0x00000069, - MB_CMD_IDC_REQ = 0x00000100, /* Inter-Driver Communication */ - MB_CMD_IDC_ACK = 0x00000101, /* Inter-Driver Communication */ - MB_CMD_SET_WOL_MODE = 0x00000110, /* Wake On Lan */ - MB_WOL_DISABLE = 0, - MB_WOL_MAGIC_PKT = (1 << 1), - MB_WOL_FLTR = (1 << 2), - MB_WOL_UCAST = (1 << 3), - MB_WOL_MCAST = (1 << 4), - MB_WOL_BCAST = (1 << 5), - MB_WOL_LINK_UP = (1 << 6), - MB_WOL_LINK_DOWN = (1 << 7), - MB_WOL_MODE_ON = (1 << 16), /* Wake on Lan Mode on */ - MB_CMD_SET_WOL_FLTR = 0x00000111, /* Wake On Lan Filter */ - MB_CMD_CLEAR_WOL_FLTR = 0x00000112, /* Wake On Lan Filter */ - MB_CMD_SET_WOL_MAGIC = 0x00000113, /* Wake On Lan Magic Packet */ - MB_CMD_CLEAR_WOL_MAGIC = 0x00000114,/* Wake On Lan Magic Packet */ - MB_CMD_SET_WOL_IMMED = 0x00000115, - MB_CMD_PORT_RESET = 0x00000120, - MB_CMD_SET_PORT_CFG = 0x00000122, - MB_CMD_GET_PORT_CFG = 0x00000123, - MB_CMD_GET_LINK_STS = 0x00000124, - MB_CMD_SET_LED_CFG = 0x00000125, /* Set LED Configuration Register */ - QL_LED_BLINK = 0x03e803e8, - MB_CMD_GET_LED_CFG = 0x00000126, /* Get LED Configuration Register */ - MB_CMD_SET_MGMNT_TFK_CTL = 0x00000160, /* Set Mgmnt Traffic Control */ - MB_SET_MPI_TFK_STOP = (1 << 0), - MB_SET_MPI_TFK_RESUME = (1 << 1), - MB_CMD_GET_MGMNT_TFK_CTL = 0x00000161, /* Get Mgmnt Traffic Control */ - MB_GET_MPI_TFK_STOPPED = (1 << 0), - MB_GET_MPI_TFK_FIFO_EMPTY = (1 << 1), - /* Sub-commands for IDC request. - * This describes the reason for the - * IDC request. - */ - MB_CMD_IOP_NONE = 0x0000, - MB_CMD_IOP_PREP_UPDATE_MPI = 0x0001, - MB_CMD_IOP_COMP_UPDATE_MPI = 0x0002, - MB_CMD_IOP_PREP_LINK_DOWN = 0x0010, - MB_CMD_IOP_DVR_START = 0x0100, - MB_CMD_IOP_FLASH_ACC = 0x0101, - MB_CMD_IOP_RESTART_MPI = 0x0102, - MB_CMD_IOP_CORE_DUMP_MPI = 0x0103, - - /* Mailbox Command Status. */ - MB_CMD_STS_GOOD = 0x00004000, /* Success. */ - MB_CMD_STS_INTRMDT = 0x00001000, /* Intermediate Complete. */ - MB_CMD_STS_INVLD_CMD = 0x00004001, /* Invalid. */ - MB_CMD_STS_XFC_ERR = 0x00004002, /* Interface Error. */ - MB_CMD_STS_CSUM_ERR = 0x00004003, /* Csum Error. */ - MB_CMD_STS_ERR = 0x00004005, /* System Error. */ - MB_CMD_STS_PARAM_ERR = 0x00004006, /* Parameter Error. */ -}; - -struct mbox_params { - u32 mbox_in[MAILBOX_COUNT]; - u32 mbox_out[MAILBOX_COUNT]; - int in_count; - int out_count; -}; - -struct flash_params_8012 { - u8 dev_id_str[4]; - __le16 size; - __le16 csum; - __le16 ver; - __le16 sub_dev_id; - u8 mac_addr[6]; - __le16 res; -}; - -/* 8000 device's flash is a different structure - * at a different offset in flash. - */ -#define FUNC0_FLASH_OFFSET 0x140200 -#define FUNC1_FLASH_OFFSET 0x140600 - -/* Flash related data structures. */ -struct flash_params_8000 { - u8 dev_id_str[4]; /* "8000" */ - __le16 ver; - __le16 size; - __le16 csum; - __le16 reserved0; - __le16 total_size; - __le16 entry_count; - u8 data_type0; - u8 data_size0; - u8 mac_addr[6]; - u8 data_type1; - u8 data_size1; - u8 mac_addr1[6]; - u8 data_type2; - u8 data_size2; - __le16 vlan_id; - u8 data_type3; - u8 data_size3; - __le16 last; - u8 reserved1[464]; - __le16 subsys_ven_id; - __le16 subsys_dev_id; - u8 reserved2[4]; -}; - -union flash_params { - struct flash_params_8012 flash_params_8012; - struct flash_params_8000 flash_params_8000; -}; - -/* - * doorbell space for the rx ring context - */ -struct rx_doorbell_context { - u32 cnsmr_idx; /* 0x00 */ - u32 valid; /* 0x04 */ - u32 reserved[4]; /* 0x08-0x14 */ - u32 lbq_prod_idx; /* 0x18 */ - u32 sbq_prod_idx; /* 0x1c */ -}; - -/* - * doorbell space for the tx ring context - */ -struct tx_doorbell_context { - u32 prod_idx; /* 0x00 */ - u32 valid; /* 0x04 */ - u32 reserved[4]; /* 0x08-0x14 */ - u32 lbq_prod_idx; /* 0x18 */ - u32 sbq_prod_idx; /* 0x1c */ -}; - -/* DATA STRUCTURES SHARED WITH HARDWARE. */ -struct tx_buf_desc { - __le64 addr; - __le32 len; -#define TX_DESC_LEN_MASK 0x000fffff -#define TX_DESC_C 0x40000000 -#define TX_DESC_E 0x80000000 -} __packed; - -/* - * IOCB Definitions... - */ - -#define OPCODE_OB_MAC_IOCB 0x01 -#define OPCODE_OB_MAC_TSO_IOCB 0x02 -#define OPCODE_IB_MAC_IOCB 0x20 -#define OPCODE_IB_MPI_IOCB 0x21 -#define OPCODE_IB_AE_IOCB 0x3f - -struct ob_mac_iocb_req { - u8 opcode; - u8 flags1; -#define OB_MAC_IOCB_REQ_OI 0x01 -#define OB_MAC_IOCB_REQ_I 0x02 -#define OB_MAC_IOCB_REQ_D 0x08 -#define OB_MAC_IOCB_REQ_F 0x10 - u8 flags2; - u8 flags3; -#define OB_MAC_IOCB_DFP 0x02 -#define OB_MAC_IOCB_V 0x04 - __le32 reserved1[2]; - __le16 frame_len; -#define OB_MAC_IOCB_LEN_MASK 0x3ffff - __le16 reserved2; - u32 tid; - u32 txq_idx; - __le32 reserved3; - __le16 vlan_tci; - __le16 reserved4; - struct tx_buf_desc tbd[TX_DESC_PER_IOCB]; -} __packed; - -struct ob_mac_iocb_rsp { - u8 opcode; /* */ - u8 flags1; /* */ -#define OB_MAC_IOCB_RSP_OI 0x01 /* */ -#define OB_MAC_IOCB_RSP_I 0x02 /* */ -#define OB_MAC_IOCB_RSP_E 0x08 /* */ -#define OB_MAC_IOCB_RSP_S 0x10 /* too Short */ -#define OB_MAC_IOCB_RSP_L 0x20 /* too Large */ -#define OB_MAC_IOCB_RSP_P 0x40 /* Padded */ - u8 flags2; /* */ - u8 flags3; /* */ -#define OB_MAC_IOCB_RSP_B 0x80 /* */ - u32 tid; - u32 txq_idx; - __le32 reserved[13]; -} __packed; - -struct ob_mac_tso_iocb_req { - u8 opcode; - u8 flags1; -#define OB_MAC_TSO_IOCB_OI 0x01 -#define OB_MAC_TSO_IOCB_I 0x02 -#define OB_MAC_TSO_IOCB_D 0x08 -#define OB_MAC_TSO_IOCB_IP4 0x40 -#define OB_MAC_TSO_IOCB_IP6 0x80 - u8 flags2; -#define OB_MAC_TSO_IOCB_LSO 0x20 -#define OB_MAC_TSO_IOCB_UC 0x40 -#define OB_MAC_TSO_IOCB_TC 0x80 - u8 flags3; -#define OB_MAC_TSO_IOCB_IC 0x01 -#define OB_MAC_TSO_IOCB_DFP 0x02 -#define OB_MAC_TSO_IOCB_V 0x04 - __le32 reserved1[2]; - __le32 frame_len; - u32 tid; - u32 txq_idx; - __le16 total_hdrs_len; - __le16 net_trans_offset; -#define OB_MAC_TRANSPORT_HDR_SHIFT 6 - __le16 vlan_tci; - __le16 mss; - struct tx_buf_desc tbd[TX_DESC_PER_IOCB]; -} __packed; - -struct ob_mac_tso_iocb_rsp { - u8 opcode; - u8 flags1; -#define OB_MAC_TSO_IOCB_RSP_OI 0x01 -#define OB_MAC_TSO_IOCB_RSP_I 0x02 -#define OB_MAC_TSO_IOCB_RSP_E 0x08 -#define OB_MAC_TSO_IOCB_RSP_S 0x10 -#define OB_MAC_TSO_IOCB_RSP_L 0x20 -#define OB_MAC_TSO_IOCB_RSP_P 0x40 - u8 flags2; /* */ - u8 flags3; /* */ -#define OB_MAC_TSO_IOCB_RSP_B 0x8000 - u32 tid; - u32 txq_idx; - __le32 reserved2[13]; -} __packed; - -struct ib_mac_iocb_rsp { - u8 opcode; /* 0x20 */ - u8 flags1; -#define IB_MAC_IOCB_RSP_OI 0x01 /* Overide intr delay */ -#define IB_MAC_IOCB_RSP_I 0x02 /* Disble Intr Generation */ -#define IB_MAC_CSUM_ERR_MASK 0x1c /* A mask to use for csum errs */ -#define IB_MAC_IOCB_RSP_TE 0x04 /* Checksum error */ -#define IB_MAC_IOCB_RSP_NU 0x08 /* No checksum rcvd */ -#define IB_MAC_IOCB_RSP_IE 0x10 /* IPv4 checksum error */ -#define IB_MAC_IOCB_RSP_M_MASK 0x60 /* Multicast info */ -#define IB_MAC_IOCB_RSP_M_NONE 0x00 /* Not mcast frame */ -#define IB_MAC_IOCB_RSP_M_HASH 0x20 /* HASH mcast frame */ -#define IB_MAC_IOCB_RSP_M_REG 0x40 /* Registered mcast frame */ -#define IB_MAC_IOCB_RSP_M_PROM 0x60 /* Promiscuous mcast frame */ -#define IB_MAC_IOCB_RSP_B 0x80 /* Broadcast frame */ - u8 flags2; -#define IB_MAC_IOCB_RSP_P 0x01 /* Promiscuous frame */ -#define IB_MAC_IOCB_RSP_V 0x02 /* Vlan tag present */ -#define IB_MAC_IOCB_RSP_ERR_MASK 0x1c /* */ -#define IB_MAC_IOCB_RSP_ERR_CODE_ERR 0x04 -#define IB_MAC_IOCB_RSP_ERR_OVERSIZE 0x08 -#define IB_MAC_IOCB_RSP_ERR_UNDERSIZE 0x10 -#define IB_MAC_IOCB_RSP_ERR_PREAMBLE 0x14 -#define IB_MAC_IOCB_RSP_ERR_FRAME_LEN 0x18 -#define IB_MAC_IOCB_RSP_ERR_CRC 0x1c -#define IB_MAC_IOCB_RSP_U 0x20 /* UDP packet */ -#define IB_MAC_IOCB_RSP_T 0x40 /* TCP packet */ -#define IB_MAC_IOCB_RSP_FO 0x80 /* Failover port */ - u8 flags3; -#define IB_MAC_IOCB_RSP_RSS_MASK 0x07 /* RSS mask */ -#define IB_MAC_IOCB_RSP_M_NONE 0x00 /* No RSS match */ -#define IB_MAC_IOCB_RSP_M_IPV4 0x04 /* IPv4 RSS match */ -#define IB_MAC_IOCB_RSP_M_IPV6 0x02 /* IPv6 RSS match */ -#define IB_MAC_IOCB_RSP_M_TCP_V4 0x05 /* TCP with IPv4 */ -#define IB_MAC_IOCB_RSP_M_TCP_V6 0x03 /* TCP with IPv6 */ -#define IB_MAC_IOCB_RSP_V4 0x08 /* IPV4 */ -#define IB_MAC_IOCB_RSP_V6 0x10 /* IPV6 */ -#define IB_MAC_IOCB_RSP_IH 0x20 /* Split after IP header */ -#define IB_MAC_IOCB_RSP_DS 0x40 /* data is in small buffer */ -#define IB_MAC_IOCB_RSP_DL 0x80 /* data is in large buffer */ - __le32 data_len; /* */ - __le64 data_addr; /* */ - __le32 rss; /* */ - __le16 vlan_id; /* 12 bits */ -#define IB_MAC_IOCB_RSP_C 0x1000 /* VLAN CFI bit */ -#define IB_MAC_IOCB_RSP_COS_SHIFT 12 /* class of service value */ -#define IB_MAC_IOCB_RSP_VLAN_MASK 0x0ffff - - __le16 reserved1; - __le32 reserved2[6]; - u8 reserved3[3]; - u8 flags4; -#define IB_MAC_IOCB_RSP_HV 0x20 -#define IB_MAC_IOCB_RSP_HS 0x40 -#define IB_MAC_IOCB_RSP_HL 0x80 - __le32 hdr_len; /* */ - __le64 hdr_addr; /* */ -} __packed; - -struct ib_ae_iocb_rsp { - u8 opcode; - u8 flags1; -#define IB_AE_IOCB_RSP_OI 0x01 -#define IB_AE_IOCB_RSP_I 0x02 - u8 event; -#define LINK_UP_EVENT 0x00 -#define LINK_DOWN_EVENT 0x01 -#define CAM_LOOKUP_ERR_EVENT 0x06 -#define SOFT_ECC_ERROR_EVENT 0x07 -#define MGMT_ERR_EVENT 0x08 -#define TEN_GIG_MAC_EVENT 0x09 -#define GPI0_H2L_EVENT 0x10 -#define GPI0_L2H_EVENT 0x20 -#define GPI1_H2L_EVENT 0x11 -#define GPI1_L2H_EVENT 0x21 -#define PCI_ERR_ANON_BUF_RD 0x40 - u8 q_id; - __le32 reserved[15]; -} __packed; - -/* - * These three structures are for generic - * handling of ib and ob iocbs. - */ -struct ql_net_rsp_iocb { - u8 opcode; - u8 flags0; - __le16 length; - __le32 tid; - __le32 reserved[14]; -} __packed; - -struct net_req_iocb { - u8 opcode; - u8 flags0; - __le16 flags1; - __le32 tid; - __le32 reserved1[30]; -} __packed; - -/* - * tx ring initialization control block for chip. - * It is defined as: - * "Work Queue Initialization Control Block" - */ -struct wqicb { - __le16 len; -#define Q_LEN_V (1 << 4) -#define Q_LEN_CPP_CONT 0x0000 -#define Q_LEN_CPP_16 0x0001 -#define Q_LEN_CPP_32 0x0002 -#define Q_LEN_CPP_64 0x0003 -#define Q_LEN_CPP_512 0x0006 - __le16 flags; -#define Q_PRI_SHIFT 1 -#define Q_FLAGS_LC 0x1000 -#define Q_FLAGS_LB 0x2000 -#define Q_FLAGS_LI 0x4000 -#define Q_FLAGS_LO 0x8000 - __le16 cq_id_rss; -#define Q_CQ_ID_RSS_RV 0x8000 - __le16 rid; - __le64 addr; - __le64 cnsmr_idx_addr; -} __packed; - -/* - * rx ring initialization control block for chip. - * It is defined as: - * "Completion Queue Initialization Control Block" - */ -struct cqicb { - u8 msix_vect; - u8 reserved1; - u8 reserved2; - u8 flags; -#define FLAGS_LV 0x08 -#define FLAGS_LS 0x10 -#define FLAGS_LL 0x20 -#define FLAGS_LI 0x40 -#define FLAGS_LC 0x80 - __le16 len; -#define LEN_V (1 << 4) -#define LEN_CPP_CONT 0x0000 -#define LEN_CPP_32 0x0001 -#define LEN_CPP_64 0x0002 -#define LEN_CPP_128 0x0003 - __le16 rid; - __le64 addr; - __le64 prod_idx_addr; - __le16 pkt_delay; - __le16 irq_delay; - __le64 lbq_addr; - __le16 lbq_buf_size; - __le16 lbq_len; /* entry count */ - __le64 sbq_addr; - __le16 sbq_buf_size; - __le16 sbq_len; /* entry count */ -} __packed; - -struct ricb { - u8 base_cq; -#define RSS_L4K 0x80 - u8 flags; -#define RSS_L6K 0x01 -#define RSS_LI 0x02 -#define RSS_LB 0x04 -#define RSS_LM 0x08 -#define RSS_RI4 0x10 -#define RSS_RT4 0x20 -#define RSS_RI6 0x40 -#define RSS_RT6 0x80 - __le16 mask; - u8 hash_cq_id[1024]; - __le32 ipv6_hash_key[10]; - __le32 ipv4_hash_key[4]; -} __packed; - -/* SOFTWARE/DRIVER DATA STRUCTURES. */ - -struct oal { - struct tx_buf_desc oal[TX_DESC_PER_OAL]; -}; - -struct map_list { - DEFINE_DMA_UNMAP_ADDR(mapaddr); - DEFINE_DMA_UNMAP_LEN(maplen); -}; - -struct tx_ring_desc { - struct sk_buff *skb; - struct ob_mac_iocb_req *queue_entry; - u32 index; - struct oal oal; - struct map_list map[MAX_SKB_FRAGS + 1]; - int map_cnt; - struct tx_ring_desc *next; -}; - -struct page_chunk { - struct page *page; /* master page */ - char *va; /* virt addr for this chunk */ - u64 map; /* mapping for master */ - unsigned int offset; /* offset for this chunk */ - unsigned int last_flag; /* flag set for last chunk in page */ -}; - -struct bq_desc { - union { - struct page_chunk pg_chunk; - struct sk_buff *skb; - } p; - __le64 *addr; - u32 index; - DEFINE_DMA_UNMAP_ADDR(mapaddr); - DEFINE_DMA_UNMAP_LEN(maplen); -}; - -#define QL_TXQ_IDX(qdev, skb) (smp_processor_id()%(qdev->tx_ring_count)) - -struct tx_ring { - /* - * queue info. - */ - struct wqicb wqicb; /* structure used to inform chip of new queue */ - void *wq_base; /* pci_alloc:virtual addr for tx */ - dma_addr_t wq_base_dma; /* pci_alloc:dma addr for tx */ - __le32 *cnsmr_idx_sh_reg; /* shadow copy of consumer idx */ - dma_addr_t cnsmr_idx_sh_reg_dma; /* dma-shadow copy of consumer */ - u32 wq_size; /* size in bytes of queue area */ - u32 wq_len; /* number of entries in queue */ - void __iomem *prod_idx_db_reg; /* doorbell area index reg at offset 0x00 */ - void __iomem *valid_db_reg; /* doorbell area valid reg at offset 0x04 */ - u16 prod_idx; /* current value for prod idx */ - u16 cq_id; /* completion (rx) queue for tx completions */ - u8 wq_id; /* queue id for this entry */ - u8 reserved1[3]; - struct tx_ring_desc *q; /* descriptor list for the queue */ - spinlock_t lock; - atomic_t tx_count; /* counts down for every outstanding IO */ - atomic_t queue_stopped; /* Turns queue off when full. */ - struct delayed_work tx_work; - struct ql_adapter *qdev; - u64 tx_packets; - u64 tx_bytes; - u64 tx_errors; -}; - -/* - * Type of inbound queue. - */ -enum { - DEFAULT_Q = 2, /* Handles slow queue and chip/MPI events. */ - TX_Q = 3, /* Handles outbound completions. */ - RX_Q = 4, /* Handles inbound completions. */ -}; - -struct rx_ring { - struct cqicb cqicb; /* The chip's completion queue init control block. */ - - /* Completion queue elements. */ - void *cq_base; - dma_addr_t cq_base_dma; - u32 cq_size; - u32 cq_len; - u16 cq_id; - __le32 *prod_idx_sh_reg; /* Shadowed producer register. */ - dma_addr_t prod_idx_sh_reg_dma; - void __iomem *cnsmr_idx_db_reg; /* PCI doorbell mem area + 0 */ - u32 cnsmr_idx; /* current sw idx */ - struct ql_net_rsp_iocb *curr_entry; /* next entry on queue */ - void __iomem *valid_db_reg; /* PCI doorbell mem area + 0x04 */ - - /* Large buffer queue elements. */ - u32 lbq_len; /* entry count */ - u32 lbq_size; /* size in bytes of queue */ - u32 lbq_buf_size; - void *lbq_base; - dma_addr_t lbq_base_dma; - void *lbq_base_indirect; - dma_addr_t lbq_base_indirect_dma; - struct page_chunk pg_chunk; /* current page for chunks */ - struct bq_desc *lbq; /* array of control blocks */ - void __iomem *lbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x18 */ - u32 lbq_prod_idx; /* current sw prod idx */ - u32 lbq_curr_idx; /* next entry we expect */ - u32 lbq_clean_idx; /* beginning of new descs */ - u32 lbq_free_cnt; /* free buffer desc cnt */ - - /* Small buffer queue elements. */ - u32 sbq_len; /* entry count */ - u32 sbq_size; /* size in bytes of queue */ - u32 sbq_buf_size; - void *sbq_base; - dma_addr_t sbq_base_dma; - void *sbq_base_indirect; - dma_addr_t sbq_base_indirect_dma; - struct bq_desc *sbq; /* array of control blocks */ - void __iomem *sbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x1c */ - u32 sbq_prod_idx; /* current sw prod idx */ - u32 sbq_curr_idx; /* next entry we expect */ - u32 sbq_clean_idx; /* beginning of new descs */ - u32 sbq_free_cnt; /* free buffer desc cnt */ - - /* Misc. handler elements. */ - u32 type; /* Type of queue, tx, rx. */ - u32 irq; /* Which vector this ring is assigned. */ - u32 cpu; /* Which CPU this should run on. */ - char name[IFNAMSIZ + 5]; - struct napi_struct napi; - u8 reserved; - struct ql_adapter *qdev; - u64 rx_packets; - u64 rx_multicast; - u64 rx_bytes; - u64 rx_dropped; - u64 rx_errors; -}; - -/* - * RSS Initialization Control Block - */ -struct hash_id { - u8 value[4]; -}; - -struct nic_stats { - /* - * These stats come from offset 200h to 278h - * in the XGMAC register. - */ - u64 tx_pkts; - u64 tx_bytes; - u64 tx_mcast_pkts; - u64 tx_bcast_pkts; - u64 tx_ucast_pkts; - u64 tx_ctl_pkts; - u64 tx_pause_pkts; - u64 tx_64_pkt; - u64 tx_65_to_127_pkt; - u64 tx_128_to_255_pkt; - u64 tx_256_511_pkt; - u64 tx_512_to_1023_pkt; - u64 tx_1024_to_1518_pkt; - u64 tx_1519_to_max_pkt; - u64 tx_undersize_pkt; - u64 tx_oversize_pkt; - - /* - * These stats come from offset 300h to 3C8h - * in the XGMAC register. - */ - u64 rx_bytes; - u64 rx_bytes_ok; - u64 rx_pkts; - u64 rx_pkts_ok; - u64 rx_bcast_pkts; - u64 rx_mcast_pkts; - u64 rx_ucast_pkts; - u64 rx_undersize_pkts; - u64 rx_oversize_pkts; - u64 rx_jabber_pkts; - u64 rx_undersize_fcerr_pkts; - u64 rx_drop_events; - u64 rx_fcerr_pkts; - u64 rx_align_err; - u64 rx_symbol_err; - u64 rx_mac_err; - u64 rx_ctl_pkts; - u64 rx_pause_pkts; - u64 rx_64_pkts; - u64 rx_65_to_127_pkts; - u64 rx_128_255_pkts; - u64 rx_256_511_pkts; - u64 rx_512_to_1023_pkts; - u64 rx_1024_to_1518_pkts; - u64 rx_1519_to_max_pkts; - u64 rx_len_err_pkts; - /* - * These stats come from offset 500h to 5C8h - * in the XGMAC register. - */ - u64 tx_cbfc_pause_frames0; - u64 tx_cbfc_pause_frames1; - u64 tx_cbfc_pause_frames2; - u64 tx_cbfc_pause_frames3; - u64 tx_cbfc_pause_frames4; - u64 tx_cbfc_pause_frames5; - u64 tx_cbfc_pause_frames6; - u64 tx_cbfc_pause_frames7; - u64 rx_cbfc_pause_frames0; - u64 rx_cbfc_pause_frames1; - u64 rx_cbfc_pause_frames2; - u64 rx_cbfc_pause_frames3; - u64 rx_cbfc_pause_frames4; - u64 rx_cbfc_pause_frames5; - u64 rx_cbfc_pause_frames6; - u64 rx_cbfc_pause_frames7; - u64 rx_nic_fifo_drop; -}; - -/* Firmware coredump internal register address/length pairs. */ -enum { - MPI_CORE_REGS_ADDR = 0x00030000, - MPI_CORE_REGS_CNT = 127, - MPI_CORE_SH_REGS_CNT = 16, - TEST_REGS_ADDR = 0x00001000, - TEST_REGS_CNT = 23, - RMII_REGS_ADDR = 0x00001040, - RMII_REGS_CNT = 64, - FCMAC1_REGS_ADDR = 0x00001080, - FCMAC2_REGS_ADDR = 0x000010c0, - FCMAC_REGS_CNT = 64, - FC1_MBX_REGS_ADDR = 0x00001100, - FC2_MBX_REGS_ADDR = 0x00001240, - FC_MBX_REGS_CNT = 64, - IDE_REGS_ADDR = 0x00001140, - IDE_REGS_CNT = 64, - NIC1_MBX_REGS_ADDR = 0x00001180, - NIC2_MBX_REGS_ADDR = 0x00001280, - NIC_MBX_REGS_CNT = 64, - SMBUS_REGS_ADDR = 0x00001200, - SMBUS_REGS_CNT = 64, - I2C_REGS_ADDR = 0x00001fc0, - I2C_REGS_CNT = 64, - MEMC_REGS_ADDR = 0x00003000, - MEMC_REGS_CNT = 256, - PBUS_REGS_ADDR = 0x00007c00, - PBUS_REGS_CNT = 256, - MDE_REGS_ADDR = 0x00010000, - MDE_REGS_CNT = 6, - CODE_RAM_ADDR = 0x00020000, - CODE_RAM_CNT = 0x2000, - MEMC_RAM_ADDR = 0x00100000, - MEMC_RAM_CNT = 0x2000, -}; - -#define MPI_COREDUMP_COOKIE 0x5555aaaa -struct mpi_coredump_global_header { - u32 cookie; - u8 idString[16]; - u32 timeLo; - u32 timeHi; - u32 imageSize; - u32 headerSize; - u8 info[220]; -}; - -struct mpi_coredump_segment_header { - u32 cookie; - u32 segNum; - u32 segSize; - u32 extra; - u8 description[16]; -}; - -/* Firmware coredump header segment numbers. */ -enum { - CORE_SEG_NUM = 1, - TEST_LOGIC_SEG_NUM = 2, - RMII_SEG_NUM = 3, - FCMAC1_SEG_NUM = 4, - FCMAC2_SEG_NUM = 5, - FC1_MBOX_SEG_NUM = 6, - IDE_SEG_NUM = 7, - NIC1_MBOX_SEG_NUM = 8, - SMBUS_SEG_NUM = 9, - FC2_MBOX_SEG_NUM = 10, - NIC2_MBOX_SEG_NUM = 11, - I2C_SEG_NUM = 12, - MEMC_SEG_NUM = 13, - PBUS_SEG_NUM = 14, - MDE_SEG_NUM = 15, - NIC1_CONTROL_SEG_NUM = 16, - NIC2_CONTROL_SEG_NUM = 17, - NIC1_XGMAC_SEG_NUM = 18, - NIC2_XGMAC_SEG_NUM = 19, - WCS_RAM_SEG_NUM = 20, - MEMC_RAM_SEG_NUM = 21, - XAUI_AN_SEG_NUM = 22, - XAUI_HSS_PCS_SEG_NUM = 23, - XFI_AN_SEG_NUM = 24, - XFI_TRAIN_SEG_NUM = 25, - XFI_HSS_PCS_SEG_NUM = 26, - XFI_HSS_TX_SEG_NUM = 27, - XFI_HSS_RX_SEG_NUM = 28, - XFI_HSS_PLL_SEG_NUM = 29, - MISC_NIC_INFO_SEG_NUM = 30, - INTR_STATES_SEG_NUM = 31, - CAM_ENTRIES_SEG_NUM = 32, - ROUTING_WORDS_SEG_NUM = 33, - ETS_SEG_NUM = 34, - PROBE_DUMP_SEG_NUM = 35, - ROUTING_INDEX_SEG_NUM = 36, - MAC_PROTOCOL_SEG_NUM = 37, - XAUI2_AN_SEG_NUM = 38, - XAUI2_HSS_PCS_SEG_NUM = 39, - XFI2_AN_SEG_NUM = 40, - XFI2_TRAIN_SEG_NUM = 41, - XFI2_HSS_PCS_SEG_NUM = 42, - XFI2_HSS_TX_SEG_NUM = 43, - XFI2_HSS_RX_SEG_NUM = 44, - XFI2_HSS_PLL_SEG_NUM = 45, - SEM_REGS_SEG_NUM = 50 - -}; - -/* There are 64 generic NIC registers. */ -#define NIC_REGS_DUMP_WORD_COUNT 64 -/* XGMAC word count. */ -#define XGMAC_DUMP_WORD_COUNT (XGMAC_REGISTER_END / 4) -/* Word counts for the SERDES blocks. */ -#define XG_SERDES_XAUI_AN_COUNT 14 -#define XG_SERDES_XAUI_HSS_PCS_COUNT 33 -#define XG_SERDES_XFI_AN_COUNT 14 -#define XG_SERDES_XFI_TRAIN_COUNT 12 -#define XG_SERDES_XFI_HSS_PCS_COUNT 15 -#define XG_SERDES_XFI_HSS_TX_COUNT 32 -#define XG_SERDES_XFI_HSS_RX_COUNT 32 -#define XG_SERDES_XFI_HSS_PLL_COUNT 32 - -/* There are 2 CNA ETS and 8 NIC ETS registers. */ -#define ETS_REGS_DUMP_WORD_COUNT 10 - -/* Each probe mux entry stores the probe type plus 64 entries - * that are each each 64-bits in length. There are a total of - * 34 (PRB_MX_ADDR_VALID_TOTAL) valid probes. - */ -#define PRB_MX_ADDR_PRB_WORD_COUNT (1 + (PRB_MX_ADDR_MAX_MUX * 2)) -#define PRB_MX_DUMP_TOT_COUNT (PRB_MX_ADDR_PRB_WORD_COUNT * \ - PRB_MX_ADDR_VALID_TOTAL) -/* Each routing entry consists of 4 32-bit words. - * They are route type, index, index word, and result. - * There are 2 route blocks with 8 entries each and - * 2 NIC blocks with 16 entries each. - * The totol entries is 48 with 4 words each. - */ -#define RT_IDX_DUMP_ENTRIES 48 -#define RT_IDX_DUMP_WORDS_PER_ENTRY 4 -#define RT_IDX_DUMP_TOT_WORDS (RT_IDX_DUMP_ENTRIES * \ - RT_IDX_DUMP_WORDS_PER_ENTRY) -/* There are 10 address blocks in filter, each with - * different entry counts and different word-count-per-entry. - */ -#define MAC_ADDR_DUMP_ENTRIES \ - ((MAC_ADDR_MAX_CAM_ENTRIES * MAC_ADDR_MAX_CAM_WCOUNT) + \ - (MAC_ADDR_MAX_MULTICAST_ENTRIES * MAC_ADDR_MAX_MULTICAST_WCOUNT) + \ - (MAC_ADDR_MAX_VLAN_ENTRIES * MAC_ADDR_MAX_VLAN_WCOUNT) + \ - (MAC_ADDR_MAX_MCAST_FLTR_ENTRIES * MAC_ADDR_MAX_MCAST_FLTR_WCOUNT) + \ - (MAC_ADDR_MAX_FC_MAC_ENTRIES * MAC_ADDR_MAX_FC_MAC_WCOUNT) + \ - (MAC_ADDR_MAX_MGMT_MAC_ENTRIES * MAC_ADDR_MAX_MGMT_MAC_WCOUNT) + \ - (MAC_ADDR_MAX_MGMT_VLAN_ENTRIES * MAC_ADDR_MAX_MGMT_VLAN_WCOUNT) + \ - (MAC_ADDR_MAX_MGMT_V4_ENTRIES * MAC_ADDR_MAX_MGMT_V4_WCOUNT) + \ - (MAC_ADDR_MAX_MGMT_V6_ENTRIES * MAC_ADDR_MAX_MGMT_V6_WCOUNT) + \ - (MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES * MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT)) -#define MAC_ADDR_DUMP_WORDS_PER_ENTRY 2 -#define MAC_ADDR_DUMP_TOT_WORDS (MAC_ADDR_DUMP_ENTRIES * \ - MAC_ADDR_DUMP_WORDS_PER_ENTRY) -/* Maximum of 4 functions whose semaphore registeres are - * in the coredump. - */ -#define MAX_SEMAPHORE_FUNCTIONS 4 -/* Defines for access the MPI shadow registers. */ -#define RISC_124 0x0003007c -#define RISC_127 0x0003007f -#define SHADOW_OFFSET 0xb0000000 -#define SHADOW_REG_SHIFT 20 - -struct ql_nic_misc { - u32 rx_ring_count; - u32 tx_ring_count; - u32 intr_count; - u32 function; -}; - -struct ql_reg_dump { - - /* segment 0 */ - struct mpi_coredump_global_header mpi_global_header; - - /* segment 16 */ - struct mpi_coredump_segment_header nic_regs_seg_hdr; - u32 nic_regs[64]; - - /* segment 30 */ - struct mpi_coredump_segment_header misc_nic_seg_hdr; - struct ql_nic_misc misc_nic_info; - - /* segment 31 */ - /* one interrupt state for each CQ */ - struct mpi_coredump_segment_header intr_states_seg_hdr; - u32 intr_states[MAX_CPUS]; - - /* segment 32 */ - /* 3 cam words each for 16 unicast, - * 2 cam words for each of 32 multicast. - */ - struct mpi_coredump_segment_header cam_entries_seg_hdr; - u32 cam_entries[(16 * 3) + (32 * 3)]; - - /* segment 33 */ - struct mpi_coredump_segment_header nic_routing_words_seg_hdr; - u32 nic_routing_words[16]; - - /* segment 34 */ - struct mpi_coredump_segment_header ets_seg_hdr; - u32 ets[8+2]; -}; - -struct ql_mpi_coredump { - /* segment 0 */ - struct mpi_coredump_global_header mpi_global_header; - - /* segment 1 */ - struct mpi_coredump_segment_header core_regs_seg_hdr; - u32 mpi_core_regs[MPI_CORE_REGS_CNT]; - u32 mpi_core_sh_regs[MPI_CORE_SH_REGS_CNT]; - - /* segment 2 */ - struct mpi_coredump_segment_header test_logic_regs_seg_hdr; - u32 test_logic_regs[TEST_REGS_CNT]; - - /* segment 3 */ - struct mpi_coredump_segment_header rmii_regs_seg_hdr; - u32 rmii_regs[RMII_REGS_CNT]; - - /* segment 4 */ - struct mpi_coredump_segment_header fcmac1_regs_seg_hdr; - u32 fcmac1_regs[FCMAC_REGS_CNT]; - - /* segment 5 */ - struct mpi_coredump_segment_header fcmac2_regs_seg_hdr; - u32 fcmac2_regs[FCMAC_REGS_CNT]; - - /* segment 6 */ - struct mpi_coredump_segment_header fc1_mbx_regs_seg_hdr; - u32 fc1_mbx_regs[FC_MBX_REGS_CNT]; - - /* segment 7 */ - struct mpi_coredump_segment_header ide_regs_seg_hdr; - u32 ide_regs[IDE_REGS_CNT]; - - /* segment 8 */ - struct mpi_coredump_segment_header nic1_mbx_regs_seg_hdr; - u32 nic1_mbx_regs[NIC_MBX_REGS_CNT]; - - /* segment 9 */ - struct mpi_coredump_segment_header smbus_regs_seg_hdr; - u32 smbus_regs[SMBUS_REGS_CNT]; - - /* segment 10 */ - struct mpi_coredump_segment_header fc2_mbx_regs_seg_hdr; - u32 fc2_mbx_regs[FC_MBX_REGS_CNT]; - - /* segment 11 */ - struct mpi_coredump_segment_header nic2_mbx_regs_seg_hdr; - u32 nic2_mbx_regs[NIC_MBX_REGS_CNT]; - - /* segment 12 */ - struct mpi_coredump_segment_header i2c_regs_seg_hdr; - u32 i2c_regs[I2C_REGS_CNT]; - /* segment 13 */ - struct mpi_coredump_segment_header memc_regs_seg_hdr; - u32 memc_regs[MEMC_REGS_CNT]; - - /* segment 14 */ - struct mpi_coredump_segment_header pbus_regs_seg_hdr; - u32 pbus_regs[PBUS_REGS_CNT]; - - /* segment 15 */ - struct mpi_coredump_segment_header mde_regs_seg_hdr; - u32 mde_regs[MDE_REGS_CNT]; - - /* segment 16 */ - struct mpi_coredump_segment_header nic_regs_seg_hdr; - u32 nic_regs[NIC_REGS_DUMP_WORD_COUNT]; - - /* segment 17 */ - struct mpi_coredump_segment_header nic2_regs_seg_hdr; - u32 nic2_regs[NIC_REGS_DUMP_WORD_COUNT]; - - /* segment 18 */ - struct mpi_coredump_segment_header xgmac1_seg_hdr; - u32 xgmac1[XGMAC_DUMP_WORD_COUNT]; - - /* segment 19 */ - struct mpi_coredump_segment_header xgmac2_seg_hdr; - u32 xgmac2[XGMAC_DUMP_WORD_COUNT]; - - /* segment 20 */ - struct mpi_coredump_segment_header code_ram_seg_hdr; - u32 code_ram[CODE_RAM_CNT]; - - /* segment 21 */ - struct mpi_coredump_segment_header memc_ram_seg_hdr; - u32 memc_ram[MEMC_RAM_CNT]; - - /* segment 22 */ - struct mpi_coredump_segment_header xaui_an_hdr; - u32 serdes_xaui_an[XG_SERDES_XAUI_AN_COUNT]; - - /* segment 23 */ - struct mpi_coredump_segment_header xaui_hss_pcs_hdr; - u32 serdes_xaui_hss_pcs[XG_SERDES_XAUI_HSS_PCS_COUNT]; - - /* segment 24 */ - struct mpi_coredump_segment_header xfi_an_hdr; - u32 serdes_xfi_an[XG_SERDES_XFI_AN_COUNT]; - - /* segment 25 */ - struct mpi_coredump_segment_header xfi_train_hdr; - u32 serdes_xfi_train[XG_SERDES_XFI_TRAIN_COUNT]; - - /* segment 26 */ - struct mpi_coredump_segment_header xfi_hss_pcs_hdr; - u32 serdes_xfi_hss_pcs[XG_SERDES_XFI_HSS_PCS_COUNT]; - - /* segment 27 */ - struct mpi_coredump_segment_header xfi_hss_tx_hdr; - u32 serdes_xfi_hss_tx[XG_SERDES_XFI_HSS_TX_COUNT]; - - /* segment 28 */ - struct mpi_coredump_segment_header xfi_hss_rx_hdr; - u32 serdes_xfi_hss_rx[XG_SERDES_XFI_HSS_RX_COUNT]; - - /* segment 29 */ - struct mpi_coredump_segment_header xfi_hss_pll_hdr; - u32 serdes_xfi_hss_pll[XG_SERDES_XFI_HSS_PLL_COUNT]; - - /* segment 30 */ - struct mpi_coredump_segment_header misc_nic_seg_hdr; - struct ql_nic_misc misc_nic_info; - - /* segment 31 */ - /* one interrupt state for each CQ */ - struct mpi_coredump_segment_header intr_states_seg_hdr; - u32 intr_states[MAX_RX_RINGS]; - - /* segment 32 */ - /* 3 cam words each for 16 unicast, - * 2 cam words for each of 32 multicast. - */ - struct mpi_coredump_segment_header cam_entries_seg_hdr; - u32 cam_entries[(16 * 3) + (32 * 3)]; - - /* segment 33 */ - struct mpi_coredump_segment_header nic_routing_words_seg_hdr; - u32 nic_routing_words[16]; - /* segment 34 */ - struct mpi_coredump_segment_header ets_seg_hdr; - u32 ets[ETS_REGS_DUMP_WORD_COUNT]; - - /* segment 35 */ - struct mpi_coredump_segment_header probe_dump_seg_hdr; - u32 probe_dump[PRB_MX_DUMP_TOT_COUNT]; - - /* segment 36 */ - struct mpi_coredump_segment_header routing_reg_seg_hdr; - u32 routing_regs[RT_IDX_DUMP_TOT_WORDS]; - - /* segment 37 */ - struct mpi_coredump_segment_header mac_prot_reg_seg_hdr; - u32 mac_prot_regs[MAC_ADDR_DUMP_TOT_WORDS]; - - /* segment 38 */ - struct mpi_coredump_segment_header xaui2_an_hdr; - u32 serdes2_xaui_an[XG_SERDES_XAUI_AN_COUNT]; - - /* segment 39 */ - struct mpi_coredump_segment_header xaui2_hss_pcs_hdr; - u32 serdes2_xaui_hss_pcs[XG_SERDES_XAUI_HSS_PCS_COUNT]; - - /* segment 40 */ - struct mpi_coredump_segment_header xfi2_an_hdr; - u32 serdes2_xfi_an[XG_SERDES_XFI_AN_COUNT]; - - /* segment 41 */ - struct mpi_coredump_segment_header xfi2_train_hdr; - u32 serdes2_xfi_train[XG_SERDES_XFI_TRAIN_COUNT]; - - /* segment 42 */ - struct mpi_coredump_segment_header xfi2_hss_pcs_hdr; - u32 serdes2_xfi_hss_pcs[XG_SERDES_XFI_HSS_PCS_COUNT]; - - /* segment 43 */ - struct mpi_coredump_segment_header xfi2_hss_tx_hdr; - u32 serdes2_xfi_hss_tx[XG_SERDES_XFI_HSS_TX_COUNT]; - - /* segment 44 */ - struct mpi_coredump_segment_header xfi2_hss_rx_hdr; - u32 serdes2_xfi_hss_rx[XG_SERDES_XFI_HSS_RX_COUNT]; - - /* segment 45 */ - struct mpi_coredump_segment_header xfi2_hss_pll_hdr; - u32 serdes2_xfi_hss_pll[XG_SERDES_XFI_HSS_PLL_COUNT]; - - /* segment 50 */ - /* semaphore register for all 5 functions */ - struct mpi_coredump_segment_header sem_regs_seg_hdr; - u32 sem_regs[MAX_SEMAPHORE_FUNCTIONS]; -}; - -/* - * intr_context structure is used during initialization - * to hook the interrupts. It is also used in a single - * irq environment as a context to the ISR. - */ -struct intr_context { - struct ql_adapter *qdev; - u32 intr; - u32 irq_mask; /* Mask of which rings the vector services. */ - u32 hooked; - u32 intr_en_mask; /* value/mask used to enable this intr */ - u32 intr_dis_mask; /* value/mask used to disable this intr */ - u32 intr_read_mask; /* value/mask used to read this intr */ - char name[IFNAMSIZ * 2]; - atomic_t irq_cnt; /* irq_cnt is used in single vector - * environment. It's incremented for each - * irq handler that is scheduled. When each - * handler finishes it decrements irq_cnt and - * enables interrupts if it's zero. */ - irq_handler_t handler; -}; - -/* adapter flags definitions. */ -enum { - QL_ADAPTER_UP = 0, /* Adapter has been brought up. */ - QL_LEGACY_ENABLED = 1, - QL_MSI_ENABLED = 2, - QL_MSIX_ENABLED = 3, - QL_DMA64 = 4, - QL_PROMISCUOUS = 5, - QL_ALLMULTI = 6, - QL_PORT_CFG = 7, - QL_CAM_RT_SET = 8, - QL_SELFTEST = 9, - QL_LB_LINK_UP = 10, - QL_FRC_COREDUMP = 11, - QL_EEH_FATAL = 12, - QL_ASIC_RECOVERY = 14, /* We are in ascic recovery. */ -}; - -/* link_status bit definitions */ -enum { - STS_LOOPBACK_MASK = 0x00000700, - STS_LOOPBACK_PCS = 0x00000100, - STS_LOOPBACK_HSS = 0x00000200, - STS_LOOPBACK_EXT = 0x00000300, - STS_PAUSE_MASK = 0x000000c0, - STS_PAUSE_STD = 0x00000040, - STS_PAUSE_PRI = 0x00000080, - STS_SPEED_MASK = 0x00000038, - STS_SPEED_100Mb = 0x00000000, - STS_SPEED_1Gb = 0x00000008, - STS_SPEED_10Gb = 0x00000010, - STS_LINK_TYPE_MASK = 0x00000007, - STS_LINK_TYPE_XFI = 0x00000001, - STS_LINK_TYPE_XAUI = 0x00000002, - STS_LINK_TYPE_XFI_BP = 0x00000003, - STS_LINK_TYPE_XAUI_BP = 0x00000004, - STS_LINK_TYPE_10GBASET = 0x00000005, -}; - -/* link_config bit definitions */ -enum { - CFG_JUMBO_FRAME_SIZE = 0x00010000, - CFG_PAUSE_MASK = 0x00000060, - CFG_PAUSE_STD = 0x00000020, - CFG_PAUSE_PRI = 0x00000040, - CFG_DCBX = 0x00000010, - CFG_LOOPBACK_MASK = 0x00000007, - CFG_LOOPBACK_PCS = 0x00000002, - CFG_LOOPBACK_HSS = 0x00000004, - CFG_LOOPBACK_EXT = 0x00000006, - CFG_DEFAULT_MAX_FRAME_SIZE = 0x00002580, -}; - -struct nic_operations { - - int (*get_flash) (struct ql_adapter *); - int (*port_initialize) (struct ql_adapter *); -}; - -/* - * The main Adapter structure definition. - * This structure has all fields relevant to the hardware. - */ -struct ql_adapter { - struct ricb ricb; - unsigned long flags; - u32 wol; - - struct nic_stats nic_stats; - - unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; - - /* PCI Configuration information for this device */ - struct pci_dev *pdev; - struct net_device *ndev; /* Parent NET device */ - - /* Hardware information */ - u32 chip_rev_id; - u32 fw_rev_id; - u32 func; /* PCI function for this adapter */ - u32 alt_func; /* PCI function for alternate adapter */ - u32 port; /* Port number this adapter */ - - spinlock_t adapter_lock; - spinlock_t hw_lock; - spinlock_t stats_lock; - - /* PCI Bus Relative Register Addresses */ - void __iomem *reg_base; - void __iomem *doorbell_area; - u32 doorbell_area_size; - - u32 msg_enable; - - /* Page for Shadow Registers */ - void *rx_ring_shadow_reg_area; - dma_addr_t rx_ring_shadow_reg_dma; - void *tx_ring_shadow_reg_area; - dma_addr_t tx_ring_shadow_reg_dma; - - u32 mailbox_in; - u32 mailbox_out; - struct mbox_params idc_mbc; - struct mutex mpi_mutex; - - int tx_ring_size; - int rx_ring_size; - u32 intr_count; - struct msix_entry *msi_x_entry; - struct intr_context intr_context[MAX_RX_RINGS]; - - int tx_ring_count; /* One per online CPU. */ - u32 rss_ring_count; /* One per irq vector. */ - /* - * rx_ring_count = - * (CPU count * outbound completion rx_ring) + - * (irq_vector_cnt * inbound (RSS) completion rx_ring) - */ - int rx_ring_count; - int ring_mem_size; - void *ring_mem; - - struct rx_ring rx_ring[MAX_RX_RINGS]; - struct tx_ring tx_ring[MAX_TX_RINGS]; - unsigned int lbq_buf_order; - - int rx_csum; - u32 default_rx_queue; - - u16 rx_coalesce_usecs; /* cqicb->int_delay */ - u16 rx_max_coalesced_frames; /* cqicb->pkt_int_delay */ - u16 tx_coalesce_usecs; /* cqicb->int_delay */ - u16 tx_max_coalesced_frames; /* cqicb->pkt_int_delay */ - - u32 xg_sem_mask; - u32 port_link_up; - u32 port_init; - u32 link_status; - struct ql_mpi_coredump *mpi_coredump; - u32 core_is_dumped; - u32 link_config; - u32 led_config; - u32 max_frame_size; - - union flash_params flash; - - struct workqueue_struct *workqueue; - struct delayed_work asic_reset_work; - struct delayed_work mpi_reset_work; - struct delayed_work mpi_work; - struct delayed_work mpi_port_cfg_work; - struct delayed_work mpi_idc_work; - struct delayed_work mpi_core_to_log; - struct completion ide_completion; - const struct nic_operations *nic_ops; - u16 device_id; - struct timer_list timer; - atomic_t lb_count; - /* Keep local copy of current mac address. */ - char current_mac_addr[6]; -}; - -/* - * Typical Register accessor for memory mapped device. - */ -static inline u32 ql_read32(const struct ql_adapter *qdev, int reg) -{ - return readl(qdev->reg_base + reg); -} - -/* - * Typical Register accessor for memory mapped device. - */ -static inline void ql_write32(const struct ql_adapter *qdev, int reg, u32 val) -{ - writel(val, qdev->reg_base + reg); -} - -/* - * Doorbell Registers: - * Doorbell registers are virtual registers in the PCI memory space. - * The space is allocated by the chip during PCI initialization. The - * device driver finds the doorbell address in BAR 3 in PCI config space. - * The registers are used to control outbound and inbound queues. For - * example, the producer index for an outbound queue. Each queue uses - * 1 4k chunk of memory. The lower half of the space is for outbound - * queues. The upper half is for inbound queues. - */ -static inline void ql_write_db_reg(u32 val, void __iomem *addr) -{ - writel(val, addr); - mmiowb(); -} - -/* - * Shadow Registers: - * Outbound queues have a consumer index that is maintained by the chip. - * Inbound queues have a producer index that is maintained by the chip. - * For lower overhead, these registers are "shadowed" to host memory - * which allows the device driver to track the queue progress without - * PCI reads. When an entry is placed on an inbound queue, the chip will - * update the relevant index register and then copy the value to the - * shadow register in host memory. - */ -static inline u32 ql_read_sh_reg(__le32 *addr) -{ - u32 reg; - reg = le32_to_cpu(*addr); - rmb(); - return reg; -} - -extern char qlge_driver_name[]; -extern const char qlge_driver_version[]; -extern const struct ethtool_ops qlge_ethtool_ops; - -extern int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask); -extern void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask); -extern int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data); -extern int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index, - u32 *value); -extern int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value); -extern int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit, - u16 q_id); -void ql_queue_fw_error(struct ql_adapter *qdev); -void ql_mpi_work(struct work_struct *work); -void ql_mpi_reset_work(struct work_struct *work); -void ql_mpi_core_to_log(struct work_struct *work); -int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit); -void ql_queue_asic_error(struct ql_adapter *qdev); -u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr); -void ql_set_ethtool_ops(struct net_device *ndev); -int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data); -void ql_mpi_idc_work(struct work_struct *work); -void ql_mpi_port_cfg_work(struct work_struct *work); -int ql_mb_get_fw_state(struct ql_adapter *qdev); -int ql_cam_route_initialize(struct ql_adapter *qdev); -int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data); -int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data); -int ql_unpause_mpi_risc(struct ql_adapter *qdev); -int ql_pause_mpi_risc(struct ql_adapter *qdev); -int ql_hard_reset_mpi_risc(struct ql_adapter *qdev); -int ql_soft_reset_mpi_risc(struct ql_adapter *qdev); -int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf, - u32 ram_addr, int word_count); -int ql_core_dump(struct ql_adapter *qdev, - struct ql_mpi_coredump *mpi_coredump); -int ql_mb_about_fw(struct ql_adapter *qdev); -int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol); -int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol); -int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config); -int ql_mb_get_led_cfg(struct ql_adapter *qdev); -void ql_link_on(struct ql_adapter *qdev); -void ql_link_off(struct ql_adapter *qdev); -int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control); -int ql_mb_get_port_cfg(struct ql_adapter *qdev); -int ql_mb_set_port_cfg(struct ql_adapter *qdev); -int ql_wait_fifo_empty(struct ql_adapter *qdev); -void ql_get_dump(struct ql_adapter *qdev, void *buff); -void ql_gen_reg_dump(struct ql_adapter *qdev, - struct ql_reg_dump *mpi_coredump); -netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev); -void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *); -int ql_own_firmware(struct ql_adapter *qdev); -int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget); - -/* #define QL_ALL_DUMP */ -/* #define QL_REG_DUMP */ -/* #define QL_DEV_DUMP */ -/* #define QL_CB_DUMP */ -/* #define QL_IB_DUMP */ -/* #define QL_OB_DUMP */ - -#ifdef QL_REG_DUMP -extern void ql_dump_xgmac_control_regs(struct ql_adapter *qdev); -extern void ql_dump_routing_entries(struct ql_adapter *qdev); -extern void ql_dump_regs(struct ql_adapter *qdev); -#define QL_DUMP_REGS(qdev) ql_dump_regs(qdev) -#define QL_DUMP_ROUTE(qdev) ql_dump_routing_entries(qdev) -#define QL_DUMP_XGMAC_CONTROL_REGS(qdev) ql_dump_xgmac_control_regs(qdev) -#else -#define QL_DUMP_REGS(qdev) -#define QL_DUMP_ROUTE(qdev) -#define QL_DUMP_XGMAC_CONTROL_REGS(qdev) -#endif - -#ifdef QL_STAT_DUMP -extern void ql_dump_stat(struct ql_adapter *qdev); -#define QL_DUMP_STAT(qdev) ql_dump_stat(qdev) -#else -#define QL_DUMP_STAT(qdev) -#endif - -#ifdef QL_DEV_DUMP -extern void ql_dump_qdev(struct ql_adapter *qdev); -#define QL_DUMP_QDEV(qdev) ql_dump_qdev(qdev) -#else -#define QL_DUMP_QDEV(qdev) -#endif - -#ifdef QL_CB_DUMP -extern void ql_dump_wqicb(struct wqicb *wqicb); -extern void ql_dump_tx_ring(struct tx_ring *tx_ring); -extern void ql_dump_ricb(struct ricb *ricb); -extern void ql_dump_cqicb(struct cqicb *cqicb); -extern void ql_dump_rx_ring(struct rx_ring *rx_ring); -extern void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id); -#define QL_DUMP_RICB(ricb) ql_dump_ricb(ricb) -#define QL_DUMP_WQICB(wqicb) ql_dump_wqicb(wqicb) -#define QL_DUMP_TX_RING(tx_ring) ql_dump_tx_ring(tx_ring) -#define QL_DUMP_CQICB(cqicb) ql_dump_cqicb(cqicb) -#define QL_DUMP_RX_RING(rx_ring) ql_dump_rx_ring(rx_ring) -#define QL_DUMP_HW_CB(qdev, size, bit, q_id) \ - ql_dump_hw_cb(qdev, size, bit, q_id) -#else -#define QL_DUMP_RICB(ricb) -#define QL_DUMP_WQICB(wqicb) -#define QL_DUMP_TX_RING(tx_ring) -#define QL_DUMP_CQICB(cqicb) -#define QL_DUMP_RX_RING(rx_ring) -#define QL_DUMP_HW_CB(qdev, size, bit, q_id) -#endif - -#ifdef QL_OB_DUMP -extern void ql_dump_tx_desc(struct tx_buf_desc *tbd); -extern void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb); -extern void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp); -#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb) ql_dump_ob_mac_iocb(ob_mac_iocb) -#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp) ql_dump_ob_mac_rsp(ob_mac_rsp) -#else -#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb) -#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp) -#endif - -#ifdef QL_IB_DUMP -extern void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp); -#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp) ql_dump_ib_mac_rsp(ib_mac_rsp) -#else -#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp) -#endif - -#ifdef QL_ALL_DUMP -extern void ql_dump_all(struct ql_adapter *qdev); -#define QL_DUMP_ALL(qdev) ql_dump_all(qdev) -#else -#define QL_DUMP_ALL(qdev) -#endif - -#endif /* _QLGE_H_ */ diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c deleted file mode 100644 index fca804f36d61..000000000000 --- a/drivers/net/qlge/qlge_dbg.c +++ /dev/null @@ -1,2044 +0,0 @@ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include - -#include "qlge.h" - -/* Read a NIC register from the alternate function. */ -static u32 ql_read_other_func_reg(struct ql_adapter *qdev, - u32 reg) -{ - u32 register_to_read; - u32 reg_val; - unsigned int status = 0; - - register_to_read = MPI_NIC_REG_BLOCK - | MPI_NIC_READ - | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT) - | reg; - status = ql_read_mpi_reg(qdev, register_to_read, ®_val); - if (status != 0) - return 0xffffffff; - - return reg_val; -} - -/* Write a NIC register from the alternate function. */ -static int ql_write_other_func_reg(struct ql_adapter *qdev, - u32 reg, u32 reg_val) -{ - u32 register_to_read; - int status = 0; - - register_to_read = MPI_NIC_REG_BLOCK - | MPI_NIC_READ - | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT) - | reg; - status = ql_write_mpi_reg(qdev, register_to_read, reg_val); - - return status; -} - -static int ql_wait_other_func_reg_rdy(struct ql_adapter *qdev, u32 reg, - u32 bit, u32 err_bit) -{ - u32 temp; - int count = 10; - - while (count) { - temp = ql_read_other_func_reg(qdev, reg); - - /* check for errors */ - if (temp & err_bit) - return -1; - else if (temp & bit) - return 0; - mdelay(10); - count--; - } - return -1; -} - -static int ql_read_other_func_serdes_reg(struct ql_adapter *qdev, u32 reg, - u32 *data) -{ - int status; - - /* wait for reg to come ready */ - status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4, - XG_SERDES_ADDR_RDY, 0); - if (status) - goto exit; - - /* set up for reg read */ - ql_write_other_func_reg(qdev, XG_SERDES_ADDR/4, reg | PROC_ADDR_R); - - /* wait for reg to come ready */ - status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4, - XG_SERDES_ADDR_RDY, 0); - if (status) - goto exit; - - /* get the data */ - *data = ql_read_other_func_reg(qdev, (XG_SERDES_DATA / 4)); -exit: - return status; -} - -/* Read out the SERDES registers */ -static int ql_read_serdes_reg(struct ql_adapter *qdev, u32 reg, u32 * data) -{ - int status; - - /* wait for reg to come ready */ - status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0); - if (status) - goto exit; - - /* set up for reg read */ - ql_write32(qdev, XG_SERDES_ADDR, reg | PROC_ADDR_R); - - /* wait for reg to come ready */ - status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0); - if (status) - goto exit; - - /* get the data */ - *data = ql_read32(qdev, XG_SERDES_DATA); -exit: - return status; -} - -static void ql_get_both_serdes(struct ql_adapter *qdev, u32 addr, - u32 *direct_ptr, u32 *indirect_ptr, - unsigned int direct_valid, unsigned int indirect_valid) -{ - unsigned int status; - - status = 1; - if (direct_valid) - status = ql_read_serdes_reg(qdev, addr, direct_ptr); - /* Dead fill any failures or invalids. */ - if (status) - *direct_ptr = 0xDEADBEEF; - - status = 1; - if (indirect_valid) - status = ql_read_other_func_serdes_reg( - qdev, addr, indirect_ptr); - /* Dead fill any failures or invalids. */ - if (status) - *indirect_ptr = 0xDEADBEEF; -} - -static int ql_get_serdes_regs(struct ql_adapter *qdev, - struct ql_mpi_coredump *mpi_coredump) -{ - int status; - unsigned int xfi_direct_valid, xfi_indirect_valid, xaui_direct_valid; - unsigned int xaui_indirect_valid, i; - u32 *direct_ptr, temp; - u32 *indirect_ptr; - - xfi_direct_valid = xfi_indirect_valid = 0; - xaui_direct_valid = xaui_indirect_valid = 1; - - /* The XAUI needs to be read out per port */ - if (qdev->func & 1) { - /* We are NIC 2 */ - status = ql_read_other_func_serdes_reg(qdev, - XG_SERDES_XAUI_HSS_PCS_START, &temp); - if (status) - temp = XG_SERDES_ADDR_XAUI_PWR_DOWN; - if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) == - XG_SERDES_ADDR_XAUI_PWR_DOWN) - xaui_indirect_valid = 0; - - status = ql_read_serdes_reg(qdev, - XG_SERDES_XAUI_HSS_PCS_START, &temp); - if (status) - temp = XG_SERDES_ADDR_XAUI_PWR_DOWN; - - if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) == - XG_SERDES_ADDR_XAUI_PWR_DOWN) - xaui_direct_valid = 0; - } else { - /* We are NIC 1 */ - status = ql_read_other_func_serdes_reg(qdev, - XG_SERDES_XAUI_HSS_PCS_START, &temp); - if (status) - temp = XG_SERDES_ADDR_XAUI_PWR_DOWN; - if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) == - XG_SERDES_ADDR_XAUI_PWR_DOWN) - xaui_indirect_valid = 0; - - status = ql_read_serdes_reg(qdev, - XG_SERDES_XAUI_HSS_PCS_START, &temp); - if (status) - temp = XG_SERDES_ADDR_XAUI_PWR_DOWN; - if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) == - XG_SERDES_ADDR_XAUI_PWR_DOWN) - xaui_direct_valid = 0; - } - - /* - * XFI register is shared so only need to read one - * functions and then check the bits. - */ - status = ql_read_serdes_reg(qdev, XG_SERDES_ADDR_STS, &temp); - if (status) - temp = 0; - - if ((temp & XG_SERDES_ADDR_XFI1_PWR_UP) == - XG_SERDES_ADDR_XFI1_PWR_UP) { - /* now see if i'm NIC 1 or NIC 2 */ - if (qdev->func & 1) - /* I'm NIC 2, so the indirect (NIC1) xfi is up. */ - xfi_indirect_valid = 1; - else - xfi_direct_valid = 1; - } - if ((temp & XG_SERDES_ADDR_XFI2_PWR_UP) == - XG_SERDES_ADDR_XFI2_PWR_UP) { - /* now see if i'm NIC 1 or NIC 2 */ - if (qdev->func & 1) - /* I'm NIC 2, so the indirect (NIC1) xfi is up. */ - xfi_direct_valid = 1; - else - xfi_indirect_valid = 1; - } - - /* Get XAUI_AN register block. */ - if (qdev->func & 1) { - /* Function 2 is direct */ - direct_ptr = mpi_coredump->serdes2_xaui_an; - indirect_ptr = mpi_coredump->serdes_xaui_an; - } else { - /* Function 1 is direct */ - direct_ptr = mpi_coredump->serdes_xaui_an; - indirect_ptr = mpi_coredump->serdes2_xaui_an; - } - - for (i = 0; i <= 0x000000034; i += 4, direct_ptr++, indirect_ptr++) - ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, - xaui_direct_valid, xaui_indirect_valid); - - /* Get XAUI_HSS_PCS register block. */ - if (qdev->func & 1) { - direct_ptr = - mpi_coredump->serdes2_xaui_hss_pcs; - indirect_ptr = - mpi_coredump->serdes_xaui_hss_pcs; - } else { - direct_ptr = - mpi_coredump->serdes_xaui_hss_pcs; - indirect_ptr = - mpi_coredump->serdes2_xaui_hss_pcs; - } - - for (i = 0x800; i <= 0x880; i += 4, direct_ptr++, indirect_ptr++) - ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, - xaui_direct_valid, xaui_indirect_valid); - - /* Get XAUI_XFI_AN register block. */ - if (qdev->func & 1) { - direct_ptr = mpi_coredump->serdes2_xfi_an; - indirect_ptr = mpi_coredump->serdes_xfi_an; - } else { - direct_ptr = mpi_coredump->serdes_xfi_an; - indirect_ptr = mpi_coredump->serdes2_xfi_an; - } - - for (i = 0x1000; i <= 0x1034; i += 4, direct_ptr++, indirect_ptr++) - ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, - xfi_direct_valid, xfi_indirect_valid); - - /* Get XAUI_XFI_TRAIN register block. */ - if (qdev->func & 1) { - direct_ptr = mpi_coredump->serdes2_xfi_train; - indirect_ptr = - mpi_coredump->serdes_xfi_train; - } else { - direct_ptr = mpi_coredump->serdes_xfi_train; - indirect_ptr = - mpi_coredump->serdes2_xfi_train; - } - - for (i = 0x1050; i <= 0x107c; i += 4, direct_ptr++, indirect_ptr++) - ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, - xfi_direct_valid, xfi_indirect_valid); - - /* Get XAUI_XFI_HSS_PCS register block. */ - if (qdev->func & 1) { - direct_ptr = - mpi_coredump->serdes2_xfi_hss_pcs; - indirect_ptr = - mpi_coredump->serdes_xfi_hss_pcs; - } else { - direct_ptr = - mpi_coredump->serdes_xfi_hss_pcs; - indirect_ptr = - mpi_coredump->serdes2_xfi_hss_pcs; - } - - for (i = 0x1800; i <= 0x1838; i += 4, direct_ptr++, indirect_ptr++) - ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, - xfi_direct_valid, xfi_indirect_valid); - - /* Get XAUI_XFI_HSS_TX register block. */ - if (qdev->func & 1) { - direct_ptr = - mpi_coredump->serdes2_xfi_hss_tx; - indirect_ptr = - mpi_coredump->serdes_xfi_hss_tx; - } else { - direct_ptr = mpi_coredump->serdes_xfi_hss_tx; - indirect_ptr = - mpi_coredump->serdes2_xfi_hss_tx; - } - for (i = 0x1c00; i <= 0x1c1f; i++, direct_ptr++, indirect_ptr++) - ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, - xfi_direct_valid, xfi_indirect_valid); - - /* Get XAUI_XFI_HSS_RX register block. */ - if (qdev->func & 1) { - direct_ptr = - mpi_coredump->serdes2_xfi_hss_rx; - indirect_ptr = - mpi_coredump->serdes_xfi_hss_rx; - } else { - direct_ptr = mpi_coredump->serdes_xfi_hss_rx; - indirect_ptr = - mpi_coredump->serdes2_xfi_hss_rx; - } - - for (i = 0x1c40; i <= 0x1c5f; i++, direct_ptr++, indirect_ptr++) - ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, - xfi_direct_valid, xfi_indirect_valid); - - - /* Get XAUI_XFI_HSS_PLL register block. */ - if (qdev->func & 1) { - direct_ptr = - mpi_coredump->serdes2_xfi_hss_pll; - indirect_ptr = - mpi_coredump->serdes_xfi_hss_pll; - } else { - direct_ptr = - mpi_coredump->serdes_xfi_hss_pll; - indirect_ptr = - mpi_coredump->serdes2_xfi_hss_pll; - } - for (i = 0x1e00; i <= 0x1e1f; i++, direct_ptr++, indirect_ptr++) - ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, - xfi_direct_valid, xfi_indirect_valid); - return 0; -} - -static int ql_read_other_func_xgmac_reg(struct ql_adapter *qdev, u32 reg, - u32 *data) -{ - int status = 0; - - /* wait for reg to come ready */ - status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4, - XGMAC_ADDR_RDY, XGMAC_ADDR_XME); - if (status) - goto exit; - - /* set up for reg read */ - ql_write_other_func_reg(qdev, XGMAC_ADDR / 4, reg | XGMAC_ADDR_R); - - /* wait for reg to come ready */ - status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4, - XGMAC_ADDR_RDY, XGMAC_ADDR_XME); - if (status) - goto exit; - - /* get the data */ - *data = ql_read_other_func_reg(qdev, XGMAC_DATA / 4); -exit: - return status; -} - -/* Read the 400 xgmac control/statistics registers - * skipping unused locations. - */ -static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 * buf, - unsigned int other_function) -{ - int status = 0; - int i; - - for (i = PAUSE_SRC_LO; i < XGMAC_REGISTER_END; i += 4, buf++) { - /* We're reading 400 xgmac registers, but we filter out - * serveral locations that are non-responsive to reads. - */ - if ((i == 0x00000114) || - (i == 0x00000118) || - (i == 0x0000013c) || - (i == 0x00000140) || - (i > 0x00000150 && i < 0x000001fc) || - (i > 0x00000278 && i < 0x000002a0) || - (i > 0x000002c0 && i < 0x000002cf) || - (i > 0x000002dc && i < 0x000002f0) || - (i > 0x000003c8 && i < 0x00000400) || - (i > 0x00000400 && i < 0x00000410) || - (i > 0x00000410 && i < 0x00000420) || - (i > 0x00000420 && i < 0x00000430) || - (i > 0x00000430 && i < 0x00000440) || - (i > 0x00000440 && i < 0x00000450) || - (i > 0x00000450 && i < 0x00000500) || - (i > 0x0000054c && i < 0x00000568) || - (i > 0x000005c8 && i < 0x00000600)) { - if (other_function) - status = - ql_read_other_func_xgmac_reg(qdev, i, buf); - else - status = ql_read_xgmac_reg(qdev, i, buf); - - if (status) - *buf = 0xdeadbeef; - break; - } - } - return status; -} - -static int ql_get_ets_regs(struct ql_adapter *qdev, u32 * buf) -{ - int status = 0; - int i; - - for (i = 0; i < 8; i++, buf++) { - ql_write32(qdev, NIC_ETS, i << 29 | 0x08000000); - *buf = ql_read32(qdev, NIC_ETS); - } - - for (i = 0; i < 2; i++, buf++) { - ql_write32(qdev, CNA_ETS, i << 29 | 0x08000000); - *buf = ql_read32(qdev, CNA_ETS); - } - - return status; -} - -static void ql_get_intr_states(struct ql_adapter *qdev, u32 * buf) -{ - int i; - - for (i = 0; i < qdev->rx_ring_count; i++, buf++) { - ql_write32(qdev, INTR_EN, - qdev->intr_context[i].intr_read_mask); - *buf = ql_read32(qdev, INTR_EN); - } -} - -static int ql_get_cam_entries(struct ql_adapter *qdev, u32 * buf) -{ - int i, status; - u32 value[3]; - - status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); - if (status) - return status; - - for (i = 0; i < 16; i++) { - status = ql_get_mac_addr_reg(qdev, - MAC_ADDR_TYPE_CAM_MAC, i, value); - if (status) { - netif_err(qdev, drv, qdev->ndev, - "Failed read of mac index register\n"); - goto err; - } - *buf++ = value[0]; /* lower MAC address */ - *buf++ = value[1]; /* upper MAC address */ - *buf++ = value[2]; /* output */ - } - for (i = 0; i < 32; i++) { - status = ql_get_mac_addr_reg(qdev, - MAC_ADDR_TYPE_MULTI_MAC, i, value); - if (status) { - netif_err(qdev, drv, qdev->ndev, - "Failed read of mac index register\n"); - goto err; - } - *buf++ = value[0]; /* lower Mcast address */ - *buf++ = value[1]; /* upper Mcast address */ - } -err: - ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); - return status; -} - -static int ql_get_routing_entries(struct ql_adapter *qdev, u32 * buf) -{ - int status; - u32 value, i; - - status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); - if (status) - return status; - - for (i = 0; i < 16; i++) { - status = ql_get_routing_reg(qdev, i, &value); - if (status) { - netif_err(qdev, drv, qdev->ndev, - "Failed read of routing index register\n"); - goto err; - } else { - *buf++ = value; - } - } -err: - ql_sem_unlock(qdev, SEM_RT_IDX_MASK); - return status; -} - -/* Read the MPI Processor shadow registers */ -static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 * buf) -{ - u32 i; - int status; - - for (i = 0; i < MPI_CORE_SH_REGS_CNT; i++, buf++) { - status = ql_write_mpi_reg(qdev, RISC_124, - (SHADOW_OFFSET | i << SHADOW_REG_SHIFT)); - if (status) - goto end; - status = ql_read_mpi_reg(qdev, RISC_127, buf); - if (status) - goto end; - } -end: - return status; -} - -/* Read the MPI Processor core registers */ -static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 * buf, - u32 offset, u32 count) -{ - int i, status = 0; - for (i = 0; i < count; i++, buf++) { - status = ql_read_mpi_reg(qdev, offset + i, buf); - if (status) - return status; - } - return status; -} - -/* Read the ASIC probe dump */ -static unsigned int *ql_get_probe(struct ql_adapter *qdev, u32 clock, - u32 valid, u32 *buf) -{ - u32 module, mux_sel, probe, lo_val, hi_val; - - for (module = 0; module < PRB_MX_ADDR_MAX_MODS; module++) { - if (!((valid >> module) & 1)) - continue; - for (mux_sel = 0; mux_sel < PRB_MX_ADDR_MAX_MUX; mux_sel++) { - probe = clock - | PRB_MX_ADDR_ARE - | mux_sel - | (module << PRB_MX_ADDR_MOD_SEL_SHIFT); - ql_write32(qdev, PRB_MX_ADDR, probe); - lo_val = ql_read32(qdev, PRB_MX_DATA); - if (mux_sel == 0) { - *buf = probe; - buf++; - } - probe |= PRB_MX_ADDR_UP; - ql_write32(qdev, PRB_MX_ADDR, probe); - hi_val = ql_read32(qdev, PRB_MX_DATA); - *buf = lo_val; - buf++; - *buf = hi_val; - buf++; - } - } - return buf; -} - -static int ql_get_probe_dump(struct ql_adapter *qdev, unsigned int *buf) -{ - /* First we have to enable the probe mux */ - ql_write_mpi_reg(qdev, MPI_TEST_FUNC_PRB_CTL, MPI_TEST_FUNC_PRB_EN); - buf = ql_get_probe(qdev, PRB_MX_ADDR_SYS_CLOCK, - PRB_MX_ADDR_VALID_SYS_MOD, buf); - buf = ql_get_probe(qdev, PRB_MX_ADDR_PCI_CLOCK, - PRB_MX_ADDR_VALID_PCI_MOD, buf); - buf = ql_get_probe(qdev, PRB_MX_ADDR_XGM_CLOCK, - PRB_MX_ADDR_VALID_XGM_MOD, buf); - buf = ql_get_probe(qdev, PRB_MX_ADDR_FC_CLOCK, - PRB_MX_ADDR_VALID_FC_MOD, buf); - return 0; - -} - -/* Read out the routing index registers */ -static int ql_get_routing_index_registers(struct ql_adapter *qdev, u32 *buf) -{ - int status; - u32 type, index, index_max; - u32 result_index; - u32 result_data; - u32 val; - - status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); - if (status) - return status; - - for (type = 0; type < 4; type++) { - if (type < 2) - index_max = 8; - else - index_max = 16; - for (index = 0; index < index_max; index++) { - val = RT_IDX_RS - | (type << RT_IDX_TYPE_SHIFT) - | (index << RT_IDX_IDX_SHIFT); - ql_write32(qdev, RT_IDX, val); - result_index = 0; - while ((result_index & RT_IDX_MR) == 0) - result_index = ql_read32(qdev, RT_IDX); - result_data = ql_read32(qdev, RT_DATA); - *buf = type; - buf++; - *buf = index; - buf++; - *buf = result_index; - buf++; - *buf = result_data; - buf++; - } - } - ql_sem_unlock(qdev, SEM_RT_IDX_MASK); - return status; -} - -/* Read out the MAC protocol registers */ -static void ql_get_mac_protocol_registers(struct ql_adapter *qdev, u32 *buf) -{ - u32 result_index, result_data; - u32 type; - u32 index; - u32 offset; - u32 val; - u32 initial_val = MAC_ADDR_RS; - u32 max_index; - u32 max_offset; - - for (type = 0; type < MAC_ADDR_TYPE_COUNT; type++) { - switch (type) { - - case 0: /* CAM */ - initial_val |= MAC_ADDR_ADR; - max_index = MAC_ADDR_MAX_CAM_ENTRIES; - max_offset = MAC_ADDR_MAX_CAM_WCOUNT; - break; - case 1: /* Multicast MAC Address */ - max_index = MAC_ADDR_MAX_CAM_WCOUNT; - max_offset = MAC_ADDR_MAX_CAM_WCOUNT; - break; - case 2: /* VLAN filter mask */ - case 3: /* MC filter mask */ - max_index = MAC_ADDR_MAX_CAM_WCOUNT; - max_offset = MAC_ADDR_MAX_CAM_WCOUNT; - break; - case 4: /* FC MAC addresses */ - max_index = MAC_ADDR_MAX_FC_MAC_ENTRIES; - max_offset = MAC_ADDR_MAX_FC_MAC_WCOUNT; - break; - case 5: /* Mgmt MAC addresses */ - max_index = MAC_ADDR_MAX_MGMT_MAC_ENTRIES; - max_offset = MAC_ADDR_MAX_MGMT_MAC_WCOUNT; - break; - case 6: /* Mgmt VLAN addresses */ - max_index = MAC_ADDR_MAX_MGMT_VLAN_ENTRIES; - max_offset = MAC_ADDR_MAX_MGMT_VLAN_WCOUNT; - break; - case 7: /* Mgmt IPv4 address */ - max_index = MAC_ADDR_MAX_MGMT_V4_ENTRIES; - max_offset = MAC_ADDR_MAX_MGMT_V4_WCOUNT; - break; - case 8: /* Mgmt IPv6 address */ - max_index = MAC_ADDR_MAX_MGMT_V6_ENTRIES; - max_offset = MAC_ADDR_MAX_MGMT_V6_WCOUNT; - break; - case 9: /* Mgmt TCP/UDP Dest port */ - max_index = MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES; - max_offset = MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT; - break; - default: - pr_err("Bad type!!! 0x%08x\n", type); - max_index = 0; - max_offset = 0; - break; - } - for (index = 0; index < max_index; index++) { - for (offset = 0; offset < max_offset; offset++) { - val = initial_val - | (type << MAC_ADDR_TYPE_SHIFT) - | (index << MAC_ADDR_IDX_SHIFT) - | (offset); - ql_write32(qdev, MAC_ADDR_IDX, val); - result_index = 0; - while ((result_index & MAC_ADDR_MR) == 0) { - result_index = ql_read32(qdev, - MAC_ADDR_IDX); - } - result_data = ql_read32(qdev, MAC_ADDR_DATA); - *buf = result_index; - buf++; - *buf = result_data; - buf++; - } - } - } -} - -static void ql_get_sem_registers(struct ql_adapter *qdev, u32 *buf) -{ - u32 func_num, reg, reg_val; - int status; - - for (func_num = 0; func_num < MAX_SEMAPHORE_FUNCTIONS ; func_num++) { - reg = MPI_NIC_REG_BLOCK - | (func_num << MPI_NIC_FUNCTION_SHIFT) - | (SEM / 4); - status = ql_read_mpi_reg(qdev, reg, ®_val); - *buf = reg_val; - /* if the read failed then dead fill the element. */ - if (!status) - *buf = 0xdeadbeef; - buf++; - } -} - -/* Create a coredump segment header */ -static void ql_build_coredump_seg_header( - struct mpi_coredump_segment_header *seg_hdr, - u32 seg_number, u32 seg_size, u8 *desc) -{ - memset(seg_hdr, 0, sizeof(struct mpi_coredump_segment_header)); - seg_hdr->cookie = MPI_COREDUMP_COOKIE; - seg_hdr->segNum = seg_number; - seg_hdr->segSize = seg_size; - memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1); -} - -/* - * This function should be called when a coredump / probedump - * is to be extracted from the HBA. It is assumed there is a - * qdev structure that contains the base address of the register - * space for this function as well as a coredump structure that - * will contain the dump. - */ -int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) -{ - int status; - int i; - - if (!mpi_coredump) { - netif_err(qdev, drv, qdev->ndev, "No memory available\n"); - return -ENOMEM; - } - - /* Try to get the spinlock, but dont worry if - * it isn't available. If the firmware died it - * might be holding the sem. - */ - ql_sem_spinlock(qdev, SEM_PROC_REG_MASK); - - status = ql_pause_mpi_risc(qdev); - if (status) { - netif_err(qdev, drv, qdev->ndev, - "Failed RISC pause. Status = 0x%.08x\n", status); - goto err; - } - - /* Insert the global header */ - memset(&(mpi_coredump->mpi_global_header), 0, - sizeof(struct mpi_coredump_global_header)); - mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE; - mpi_coredump->mpi_global_header.headerSize = - sizeof(struct mpi_coredump_global_header); - mpi_coredump->mpi_global_header.imageSize = - sizeof(struct ql_mpi_coredump); - memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump", - sizeof(mpi_coredump->mpi_global_header.idString)); - - /* Get generic NIC reg dump */ - ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr, - NIC1_CONTROL_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->nic_regs), "NIC1 Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->nic2_regs_seg_hdr, - NIC2_CONTROL_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->nic2_regs), "NIC2 Registers"); - - /* Get XGMac registers. (Segment 18, Rev C. step 21) */ - ql_build_coredump_seg_header(&mpi_coredump->xgmac1_seg_hdr, - NIC1_XGMAC_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->xgmac1), "NIC1 XGMac Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->xgmac2_seg_hdr, - NIC2_XGMAC_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->xgmac2), "NIC2 XGMac Registers"); - - if (qdev->func & 1) { - /* Odd means our function is NIC 2 */ - for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++) - mpi_coredump->nic2_regs[i] = - ql_read32(qdev, i * sizeof(u32)); - - for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++) - mpi_coredump->nic_regs[i] = - ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4); - - ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 0); - ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 1); - } else { - /* Even means our function is NIC 1 */ - for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++) - mpi_coredump->nic_regs[i] = - ql_read32(qdev, i * sizeof(u32)); - for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++) - mpi_coredump->nic2_regs[i] = - ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4); - - ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 0); - ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 1); - } - - /* Rev C. Step 20a */ - ql_build_coredump_seg_header(&mpi_coredump->xaui_an_hdr, - XAUI_AN_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes_xaui_an), - "XAUI AN Registers"); - - /* Rev C. Step 20b */ - ql_build_coredump_seg_header(&mpi_coredump->xaui_hss_pcs_hdr, - XAUI_HSS_PCS_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes_xaui_hss_pcs), - "XAUI HSS PCS Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->xfi_an_hdr, XFI_AN_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes_xfi_an), - "XFI AN Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->xfi_train_hdr, - XFI_TRAIN_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes_xfi_train), - "XFI TRAIN Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pcs_hdr, - XFI_HSS_PCS_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes_xfi_hss_pcs), - "XFI HSS PCS Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_tx_hdr, - XFI_HSS_TX_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes_xfi_hss_tx), - "XFI HSS TX Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_rx_hdr, - XFI_HSS_RX_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes_xfi_hss_rx), - "XFI HSS RX Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pll_hdr, - XFI_HSS_PLL_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes_xfi_hss_pll), - "XFI HSS PLL Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->xaui2_an_hdr, - XAUI2_AN_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes2_xaui_an), - "XAUI2 AN Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->xaui2_hss_pcs_hdr, - XAUI2_HSS_PCS_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes2_xaui_hss_pcs), - "XAUI2 HSS PCS Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->xfi2_an_hdr, - XFI2_AN_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes2_xfi_an), - "XFI2 AN Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->xfi2_train_hdr, - XFI2_TRAIN_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes2_xfi_train), - "XFI2 TRAIN Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pcs_hdr, - XFI2_HSS_PCS_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes2_xfi_hss_pcs), - "XFI2 HSS PCS Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_tx_hdr, - XFI2_HSS_TX_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes2_xfi_hss_tx), - "XFI2 HSS TX Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_rx_hdr, - XFI2_HSS_RX_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes2_xfi_hss_rx), - "XFI2 HSS RX Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pll_hdr, - XFI2_HSS_PLL_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes2_xfi_hss_pll), - "XFI2 HSS PLL Registers"); - - status = ql_get_serdes_regs(qdev, mpi_coredump); - if (status) { - netif_err(qdev, drv, qdev->ndev, - "Failed Dump of Serdes Registers. Status = 0x%.08x\n", - status); - goto err; - } - - ql_build_coredump_seg_header(&mpi_coredump->core_regs_seg_hdr, - CORE_SEG_NUM, - sizeof(mpi_coredump->core_regs_seg_hdr) + - sizeof(mpi_coredump->mpi_core_regs) + - sizeof(mpi_coredump->mpi_core_sh_regs), - "Core Registers"); - - /* Get the MPI Core Registers */ - status = ql_get_mpi_regs(qdev, &mpi_coredump->mpi_core_regs[0], - MPI_CORE_REGS_ADDR, MPI_CORE_REGS_CNT); - if (status) - goto err; - /* Get the 16 MPI shadow registers */ - status = ql_get_mpi_shadow_regs(qdev, - &mpi_coredump->mpi_core_sh_regs[0]); - if (status) - goto err; - - /* Get the Test Logic Registers */ - ql_build_coredump_seg_header(&mpi_coredump->test_logic_regs_seg_hdr, - TEST_LOGIC_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->test_logic_regs), - "Test Logic Regs"); - status = ql_get_mpi_regs(qdev, &mpi_coredump->test_logic_regs[0], - TEST_REGS_ADDR, TEST_REGS_CNT); - if (status) - goto err; - - /* Get the RMII Registers */ - ql_build_coredump_seg_header(&mpi_coredump->rmii_regs_seg_hdr, - RMII_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->rmii_regs), - "RMII Registers"); - status = ql_get_mpi_regs(qdev, &mpi_coredump->rmii_regs[0], - RMII_REGS_ADDR, RMII_REGS_CNT); - if (status) - goto err; - - /* Get the FCMAC1 Registers */ - ql_build_coredump_seg_header(&mpi_coredump->fcmac1_regs_seg_hdr, - FCMAC1_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->fcmac1_regs), - "FCMAC1 Registers"); - status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac1_regs[0], - FCMAC1_REGS_ADDR, FCMAC_REGS_CNT); - if (status) - goto err; - - /* Get the FCMAC2 Registers */ - - ql_build_coredump_seg_header(&mpi_coredump->fcmac2_regs_seg_hdr, - FCMAC2_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->fcmac2_regs), - "FCMAC2 Registers"); - - status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac2_regs[0], - FCMAC2_REGS_ADDR, FCMAC_REGS_CNT); - if (status) - goto err; - - /* Get the FC1 MBX Registers */ - ql_build_coredump_seg_header(&mpi_coredump->fc1_mbx_regs_seg_hdr, - FC1_MBOX_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->fc1_mbx_regs), - "FC1 MBox Regs"); - status = ql_get_mpi_regs(qdev, &mpi_coredump->fc1_mbx_regs[0], - FC1_MBX_REGS_ADDR, FC_MBX_REGS_CNT); - if (status) - goto err; - - /* Get the IDE Registers */ - ql_build_coredump_seg_header(&mpi_coredump->ide_regs_seg_hdr, - IDE_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->ide_regs), - "IDE Registers"); - status = ql_get_mpi_regs(qdev, &mpi_coredump->ide_regs[0], - IDE_REGS_ADDR, IDE_REGS_CNT); - if (status) - goto err; - - /* Get the NIC1 MBX Registers */ - ql_build_coredump_seg_header(&mpi_coredump->nic1_mbx_regs_seg_hdr, - NIC1_MBOX_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->nic1_mbx_regs), - "NIC1 MBox Regs"); - status = ql_get_mpi_regs(qdev, &mpi_coredump->nic1_mbx_regs[0], - NIC1_MBX_REGS_ADDR, NIC_MBX_REGS_CNT); - if (status) - goto err; - - /* Get the SMBus Registers */ - ql_build_coredump_seg_header(&mpi_coredump->smbus_regs_seg_hdr, - SMBUS_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->smbus_regs), - "SMBus Registers"); - status = ql_get_mpi_regs(qdev, &mpi_coredump->smbus_regs[0], - SMBUS_REGS_ADDR, SMBUS_REGS_CNT); - if (status) - goto err; - - /* Get the FC2 MBX Registers */ - ql_build_coredump_seg_header(&mpi_coredump->fc2_mbx_regs_seg_hdr, - FC2_MBOX_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->fc2_mbx_regs), - "FC2 MBox Regs"); - status = ql_get_mpi_regs(qdev, &mpi_coredump->fc2_mbx_regs[0], - FC2_MBX_REGS_ADDR, FC_MBX_REGS_CNT); - if (status) - goto err; - - /* Get the NIC2 MBX Registers */ - ql_build_coredump_seg_header(&mpi_coredump->nic2_mbx_regs_seg_hdr, - NIC2_MBOX_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->nic2_mbx_regs), - "NIC2 MBox Regs"); - status = ql_get_mpi_regs(qdev, &mpi_coredump->nic2_mbx_regs[0], - NIC2_MBX_REGS_ADDR, NIC_MBX_REGS_CNT); - if (status) - goto err; - - /* Get the I2C Registers */ - ql_build_coredump_seg_header(&mpi_coredump->i2c_regs_seg_hdr, - I2C_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->i2c_regs), - "I2C Registers"); - status = ql_get_mpi_regs(qdev, &mpi_coredump->i2c_regs[0], - I2C_REGS_ADDR, I2C_REGS_CNT); - if (status) - goto err; - - /* Get the MEMC Registers */ - ql_build_coredump_seg_header(&mpi_coredump->memc_regs_seg_hdr, - MEMC_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->memc_regs), - "MEMC Registers"); - status = ql_get_mpi_regs(qdev, &mpi_coredump->memc_regs[0], - MEMC_REGS_ADDR, MEMC_REGS_CNT); - if (status) - goto err; - - /* Get the PBus Registers */ - ql_build_coredump_seg_header(&mpi_coredump->pbus_regs_seg_hdr, - PBUS_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->pbus_regs), - "PBUS Registers"); - status = ql_get_mpi_regs(qdev, &mpi_coredump->pbus_regs[0], - PBUS_REGS_ADDR, PBUS_REGS_CNT); - if (status) - goto err; - - /* Get the MDE Registers */ - ql_build_coredump_seg_header(&mpi_coredump->mde_regs_seg_hdr, - MDE_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->mde_regs), - "MDE Registers"); - status = ql_get_mpi_regs(qdev, &mpi_coredump->mde_regs[0], - MDE_REGS_ADDR, MDE_REGS_CNT); - if (status) - goto err; - - ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr, - MISC_NIC_INFO_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->misc_nic_info), - "MISC NIC INFO"); - mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count; - mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count; - mpi_coredump->misc_nic_info.intr_count = qdev->intr_count; - mpi_coredump->misc_nic_info.function = qdev->func; - - /* Segment 31 */ - /* Get indexed register values. */ - ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr, - INTR_STATES_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->intr_states), - "INTR States"); - ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]); - - ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr, - CAM_ENTRIES_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->cam_entries), - "CAM Entries"); - status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]); - if (status) - goto err; - - ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr, - ROUTING_WORDS_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->nic_routing_words), - "Routing Words"); - status = ql_get_routing_entries(qdev, - &mpi_coredump->nic_routing_words[0]); - if (status) - goto err; - - /* Segment 34 (Rev C. step 23) */ - ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr, - ETS_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->ets), - "ETS Registers"); - status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]); - if (status) - goto err; - - ql_build_coredump_seg_header(&mpi_coredump->probe_dump_seg_hdr, - PROBE_DUMP_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->probe_dump), - "Probe Dump"); - ql_get_probe_dump(qdev, &mpi_coredump->probe_dump[0]); - - ql_build_coredump_seg_header(&mpi_coredump->routing_reg_seg_hdr, - ROUTING_INDEX_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->routing_regs), - "Routing Regs"); - status = ql_get_routing_index_registers(qdev, - &mpi_coredump->routing_regs[0]); - if (status) - goto err; - - ql_build_coredump_seg_header(&mpi_coredump->mac_prot_reg_seg_hdr, - MAC_PROTOCOL_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->mac_prot_regs), - "MAC Prot Regs"); - ql_get_mac_protocol_registers(qdev, &mpi_coredump->mac_prot_regs[0]); - - /* Get the semaphore registers for all 5 functions */ - ql_build_coredump_seg_header(&mpi_coredump->sem_regs_seg_hdr, - SEM_REGS_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->sem_regs), "Sem Registers"); - - ql_get_sem_registers(qdev, &mpi_coredump->sem_regs[0]); - - /* Prevent the mpi restarting while we dump the memory.*/ - ql_write_mpi_reg(qdev, MPI_TEST_FUNC_RST_STS, MPI_TEST_FUNC_RST_FRC); - - /* clear the pause */ - status = ql_unpause_mpi_risc(qdev); - if (status) { - netif_err(qdev, drv, qdev->ndev, - "Failed RISC unpause. Status = 0x%.08x\n", status); - goto err; - } - - /* Reset the RISC so we can dump RAM */ - status = ql_hard_reset_mpi_risc(qdev); - if (status) { - netif_err(qdev, drv, qdev->ndev, - "Failed RISC reset. Status = 0x%.08x\n", status); - goto err; - } - - ql_build_coredump_seg_header(&mpi_coredump->code_ram_seg_hdr, - WCS_RAM_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->code_ram), - "WCS RAM"); - status = ql_dump_risc_ram_area(qdev, &mpi_coredump->code_ram[0], - CODE_RAM_ADDR, CODE_RAM_CNT); - if (status) { - netif_err(qdev, drv, qdev->ndev, - "Failed Dump of CODE RAM. Status = 0x%.08x\n", - status); - goto err; - } - - /* Insert the segment header */ - ql_build_coredump_seg_header(&mpi_coredump->memc_ram_seg_hdr, - MEMC_RAM_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->memc_ram), - "MEMC RAM"); - status = ql_dump_risc_ram_area(qdev, &mpi_coredump->memc_ram[0], - MEMC_RAM_ADDR, MEMC_RAM_CNT); - if (status) { - netif_err(qdev, drv, qdev->ndev, - "Failed Dump of MEMC RAM. Status = 0x%.08x\n", - status); - goto err; - } -err: - ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */ - return status; - -} - -static void ql_get_core_dump(struct ql_adapter *qdev) -{ - if (!ql_own_firmware(qdev)) { - netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n"); - return; - } - - if (!netif_running(qdev->ndev)) { - netif_err(qdev, ifup, qdev->ndev, - "Force Coredump can only be done from interface that is up\n"); - return; - } - ql_queue_fw_error(qdev); -} - -void ql_gen_reg_dump(struct ql_adapter *qdev, - struct ql_reg_dump *mpi_coredump) -{ - int i, status; - - - memset(&(mpi_coredump->mpi_global_header), 0, - sizeof(struct mpi_coredump_global_header)); - mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE; - mpi_coredump->mpi_global_header.headerSize = - sizeof(struct mpi_coredump_global_header); - mpi_coredump->mpi_global_header.imageSize = - sizeof(struct ql_reg_dump); - memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump", - sizeof(mpi_coredump->mpi_global_header.idString)); - - - /* segment 16 */ - ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr, - MISC_NIC_INFO_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->misc_nic_info), - "MISC NIC INFO"); - mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count; - mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count; - mpi_coredump->misc_nic_info.intr_count = qdev->intr_count; - mpi_coredump->misc_nic_info.function = qdev->func; - - /* Segment 16, Rev C. Step 18 */ - ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr, - NIC1_CONTROL_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->nic_regs), - "NIC Registers"); - /* Get generic reg dump */ - for (i = 0; i < 64; i++) - mpi_coredump->nic_regs[i] = ql_read32(qdev, i * sizeof(u32)); - - /* Segment 31 */ - /* Get indexed register values. */ - ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr, - INTR_STATES_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->intr_states), - "INTR States"); - ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]); - - ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr, - CAM_ENTRIES_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->cam_entries), - "CAM Entries"); - status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]); - if (status) - return; - - ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr, - ROUTING_WORDS_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->nic_routing_words), - "Routing Words"); - status = ql_get_routing_entries(qdev, - &mpi_coredump->nic_routing_words[0]); - if (status) - return; - - /* Segment 34 (Rev C. step 23) */ - ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr, - ETS_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->ets), - "ETS Registers"); - status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]); - if (status) - return; -} - -void ql_get_dump(struct ql_adapter *qdev, void *buff) -{ - /* - * If the dump has already been taken and is stored - * in our internal buffer and if force dump is set then - * just start the spool to dump it to the log file - * and also, take a snapshot of the general regs to - * to the user's buffer or else take complete dump - * to the user's buffer if force is not set. - */ - - if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) { - if (!ql_core_dump(qdev, buff)) - ql_soft_reset_mpi_risc(qdev); - else - netif_err(qdev, drv, qdev->ndev, "coredump failed!\n"); - } else { - ql_gen_reg_dump(qdev, buff); - ql_get_core_dump(qdev); - } -} - -/* Coredump to messages log file using separate worker thread */ -void ql_mpi_core_to_log(struct work_struct *work) -{ - struct ql_adapter *qdev = - container_of(work, struct ql_adapter, mpi_core_to_log.work); - u32 *tmp, count; - int i; - - count = sizeof(struct ql_mpi_coredump) / sizeof(u32); - tmp = (u32 *)qdev->mpi_coredump; - netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev, - "Core is dumping to log file!\n"); - - for (i = 0; i < count; i += 8) { - pr_err("%.08x: %.08x %.08x %.08x %.08x %.08x " - "%.08x %.08x %.08x\n", i, - tmp[i + 0], - tmp[i + 1], - tmp[i + 2], - tmp[i + 3], - tmp[i + 4], - tmp[i + 5], - tmp[i + 6], - tmp[i + 7]); - msleep(5); - } -} - -#ifdef QL_REG_DUMP -static void ql_dump_intr_states(struct ql_adapter *qdev) -{ - int i; - u32 value; - for (i = 0; i < qdev->intr_count; i++) { - ql_write32(qdev, INTR_EN, qdev->intr_context[i].intr_read_mask); - value = ql_read32(qdev, INTR_EN); - pr_err("%s: Interrupt %d is %s\n", - qdev->ndev->name, i, - (value & INTR_EN_EN ? "enabled" : "disabled")); - } -} - -#define DUMP_XGMAC(qdev, reg) \ -do { \ - u32 data; \ - ql_read_xgmac_reg(qdev, reg, &data); \ - pr_err("%s: %s = 0x%.08x\n", qdev->ndev->name, #reg, data); \ -} while (0) - -void ql_dump_xgmac_control_regs(struct ql_adapter *qdev) -{ - if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) { - pr_err("%s: Couldn't get xgmac sem\n", __func__); - return; - } - DUMP_XGMAC(qdev, PAUSE_SRC_LO); - DUMP_XGMAC(qdev, PAUSE_SRC_HI); - DUMP_XGMAC(qdev, GLOBAL_CFG); - DUMP_XGMAC(qdev, TX_CFG); - DUMP_XGMAC(qdev, RX_CFG); - DUMP_XGMAC(qdev, FLOW_CTL); - DUMP_XGMAC(qdev, PAUSE_OPCODE); - DUMP_XGMAC(qdev, PAUSE_TIMER); - DUMP_XGMAC(qdev, PAUSE_FRM_DEST_LO); - DUMP_XGMAC(qdev, PAUSE_FRM_DEST_HI); - DUMP_XGMAC(qdev, MAC_TX_PARAMS); - DUMP_XGMAC(qdev, MAC_RX_PARAMS); - DUMP_XGMAC(qdev, MAC_SYS_INT); - DUMP_XGMAC(qdev, MAC_SYS_INT_MASK); - DUMP_XGMAC(qdev, MAC_MGMT_INT); - DUMP_XGMAC(qdev, MAC_MGMT_IN_MASK); - DUMP_XGMAC(qdev, EXT_ARB_MODE); - ql_sem_unlock(qdev, qdev->xg_sem_mask); -} - -static void ql_dump_ets_regs(struct ql_adapter *qdev) -{ -} - -static void ql_dump_cam_entries(struct ql_adapter *qdev) -{ - int i; - u32 value[3]; - - i = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); - if (i) - return; - for (i = 0; i < 4; i++) { - if (ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value)) { - pr_err("%s: Failed read of mac index register\n", - __func__); - return; - } else { - if (value[0]) - pr_err("%s: CAM index %d CAM Lookup Lower = 0x%.08x:%.08x, Output = 0x%.08x\n", - qdev->ndev->name, i, value[1], value[0], - value[2]); - } - } - for (i = 0; i < 32; i++) { - if (ql_get_mac_addr_reg - (qdev, MAC_ADDR_TYPE_MULTI_MAC, i, value)) { - pr_err("%s: Failed read of mac index register\n", - __func__); - return; - } else { - if (value[0]) - pr_err("%s: MCAST index %d CAM Lookup Lower = 0x%.08x:%.08x\n", - qdev->ndev->name, i, value[1], value[0]); - } - } - ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); -} - -void ql_dump_routing_entries(struct ql_adapter *qdev) -{ - int i; - u32 value; - i = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); - if (i) - return; - for (i = 0; i < 16; i++) { - value = 0; - if (ql_get_routing_reg(qdev, i, &value)) { - pr_err("%s: Failed read of routing index register\n", - __func__); - return; - } else { - if (value) - pr_err("%s: Routing Mask %d = 0x%.08x\n", - qdev->ndev->name, i, value); - } - } - ql_sem_unlock(qdev, SEM_RT_IDX_MASK); -} - -#define DUMP_REG(qdev, reg) \ - pr_err("%-32s= 0x%x\n", #reg, ql_read32(qdev, reg)) - -void ql_dump_regs(struct ql_adapter *qdev) -{ - pr_err("reg dump for function #%d\n", qdev->func); - DUMP_REG(qdev, SYS); - DUMP_REG(qdev, RST_FO); - DUMP_REG(qdev, FSC); - DUMP_REG(qdev, CSR); - DUMP_REG(qdev, ICB_RID); - DUMP_REG(qdev, ICB_L); - DUMP_REG(qdev, ICB_H); - DUMP_REG(qdev, CFG); - DUMP_REG(qdev, BIOS_ADDR); - DUMP_REG(qdev, STS); - DUMP_REG(qdev, INTR_EN); - DUMP_REG(qdev, INTR_MASK); - DUMP_REG(qdev, ISR1); - DUMP_REG(qdev, ISR2); - DUMP_REG(qdev, ISR3); - DUMP_REG(qdev, ISR4); - DUMP_REG(qdev, REV_ID); - DUMP_REG(qdev, FRC_ECC_ERR); - DUMP_REG(qdev, ERR_STS); - DUMP_REG(qdev, RAM_DBG_ADDR); - DUMP_REG(qdev, RAM_DBG_DATA); - DUMP_REG(qdev, ECC_ERR_CNT); - DUMP_REG(qdev, SEM); - DUMP_REG(qdev, GPIO_1); - DUMP_REG(qdev, GPIO_2); - DUMP_REG(qdev, GPIO_3); - DUMP_REG(qdev, XGMAC_ADDR); - DUMP_REG(qdev, XGMAC_DATA); - DUMP_REG(qdev, NIC_ETS); - DUMP_REG(qdev, CNA_ETS); - DUMP_REG(qdev, FLASH_ADDR); - DUMP_REG(qdev, FLASH_DATA); - DUMP_REG(qdev, CQ_STOP); - DUMP_REG(qdev, PAGE_TBL_RID); - DUMP_REG(qdev, WQ_PAGE_TBL_LO); - DUMP_REG(qdev, WQ_PAGE_TBL_HI); - DUMP_REG(qdev, CQ_PAGE_TBL_LO); - DUMP_REG(qdev, CQ_PAGE_TBL_HI); - DUMP_REG(qdev, COS_DFLT_CQ1); - DUMP_REG(qdev, COS_DFLT_CQ2); - DUMP_REG(qdev, SPLT_HDR); - DUMP_REG(qdev, FC_PAUSE_THRES); - DUMP_REG(qdev, NIC_PAUSE_THRES); - DUMP_REG(qdev, FC_ETHERTYPE); - DUMP_REG(qdev, FC_RCV_CFG); - DUMP_REG(qdev, NIC_RCV_CFG); - DUMP_REG(qdev, FC_COS_TAGS); - DUMP_REG(qdev, NIC_COS_TAGS); - DUMP_REG(qdev, MGMT_RCV_CFG); - DUMP_REG(qdev, XG_SERDES_ADDR); - DUMP_REG(qdev, XG_SERDES_DATA); - DUMP_REG(qdev, PRB_MX_ADDR); - DUMP_REG(qdev, PRB_MX_DATA); - ql_dump_intr_states(qdev); - ql_dump_xgmac_control_regs(qdev); - ql_dump_ets_regs(qdev); - ql_dump_cam_entries(qdev); - ql_dump_routing_entries(qdev); -} -#endif - -#ifdef QL_STAT_DUMP - -#define DUMP_STAT(qdev, stat) \ - pr_err("%s = %ld\n", #stat, (unsigned long)qdev->nic_stats.stat) - -void ql_dump_stat(struct ql_adapter *qdev) -{ - pr_err("%s: Enter\n", __func__); - DUMP_STAT(qdev, tx_pkts); - DUMP_STAT(qdev, tx_bytes); - DUMP_STAT(qdev, tx_mcast_pkts); - DUMP_STAT(qdev, tx_bcast_pkts); - DUMP_STAT(qdev, tx_ucast_pkts); - DUMP_STAT(qdev, tx_ctl_pkts); - DUMP_STAT(qdev, tx_pause_pkts); - DUMP_STAT(qdev, tx_64_pkt); - DUMP_STAT(qdev, tx_65_to_127_pkt); - DUMP_STAT(qdev, tx_128_to_255_pkt); - DUMP_STAT(qdev, tx_256_511_pkt); - DUMP_STAT(qdev, tx_512_to_1023_pkt); - DUMP_STAT(qdev, tx_1024_to_1518_pkt); - DUMP_STAT(qdev, tx_1519_to_max_pkt); - DUMP_STAT(qdev, tx_undersize_pkt); - DUMP_STAT(qdev, tx_oversize_pkt); - DUMP_STAT(qdev, rx_bytes); - DUMP_STAT(qdev, rx_bytes_ok); - DUMP_STAT(qdev, rx_pkts); - DUMP_STAT(qdev, rx_pkts_ok); - DUMP_STAT(qdev, rx_bcast_pkts); - DUMP_STAT(qdev, rx_mcast_pkts); - DUMP_STAT(qdev, rx_ucast_pkts); - DUMP_STAT(qdev, rx_undersize_pkts); - DUMP_STAT(qdev, rx_oversize_pkts); - DUMP_STAT(qdev, rx_jabber_pkts); - DUMP_STAT(qdev, rx_undersize_fcerr_pkts); - DUMP_STAT(qdev, rx_drop_events); - DUMP_STAT(qdev, rx_fcerr_pkts); - DUMP_STAT(qdev, rx_align_err); - DUMP_STAT(qdev, rx_symbol_err); - DUMP_STAT(qdev, rx_mac_err); - DUMP_STAT(qdev, rx_ctl_pkts); - DUMP_STAT(qdev, rx_pause_pkts); - DUMP_STAT(qdev, rx_64_pkts); - DUMP_STAT(qdev, rx_65_to_127_pkts); - DUMP_STAT(qdev, rx_128_255_pkts); - DUMP_STAT(qdev, rx_256_511_pkts); - DUMP_STAT(qdev, rx_512_to_1023_pkts); - DUMP_STAT(qdev, rx_1024_to_1518_pkts); - DUMP_STAT(qdev, rx_1519_to_max_pkts); - DUMP_STAT(qdev, rx_len_err_pkts); -}; -#endif - -#ifdef QL_DEV_DUMP - -#define DUMP_QDEV_FIELD(qdev, type, field) \ - pr_err("qdev->%-24s = " type "\n", #field, qdev->field) -#define DUMP_QDEV_DMA_FIELD(qdev, field) \ - pr_err("qdev->%-24s = %llx\n", #field, (unsigned long long)qdev->field) -#define DUMP_QDEV_ARRAY(qdev, type, array, index, field) \ - pr_err("%s[%d].%s = " type "\n", \ - #array, index, #field, qdev->array[index].field); -void ql_dump_qdev(struct ql_adapter *qdev) -{ - int i; - DUMP_QDEV_FIELD(qdev, "%lx", flags); - DUMP_QDEV_FIELD(qdev, "%p", vlgrp); - DUMP_QDEV_FIELD(qdev, "%p", pdev); - DUMP_QDEV_FIELD(qdev, "%p", ndev); - DUMP_QDEV_FIELD(qdev, "%d", chip_rev_id); - DUMP_QDEV_FIELD(qdev, "%p", reg_base); - DUMP_QDEV_FIELD(qdev, "%p", doorbell_area); - DUMP_QDEV_FIELD(qdev, "%d", doorbell_area_size); - DUMP_QDEV_FIELD(qdev, "%x", msg_enable); - DUMP_QDEV_FIELD(qdev, "%p", rx_ring_shadow_reg_area); - DUMP_QDEV_DMA_FIELD(qdev, rx_ring_shadow_reg_dma); - DUMP_QDEV_FIELD(qdev, "%p", tx_ring_shadow_reg_area); - DUMP_QDEV_DMA_FIELD(qdev, tx_ring_shadow_reg_dma); - DUMP_QDEV_FIELD(qdev, "%d", intr_count); - if (qdev->msi_x_entry) - for (i = 0; i < qdev->intr_count; i++) { - DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, vector); - DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, entry); - } - for (i = 0; i < qdev->intr_count; i++) { - DUMP_QDEV_ARRAY(qdev, "%p", intr_context, i, qdev); - DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, intr); - DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, hooked); - DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_en_mask); - DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_dis_mask); - DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_read_mask); - } - DUMP_QDEV_FIELD(qdev, "%d", tx_ring_count); - DUMP_QDEV_FIELD(qdev, "%d", rx_ring_count); - DUMP_QDEV_FIELD(qdev, "%d", ring_mem_size); - DUMP_QDEV_FIELD(qdev, "%p", ring_mem); - DUMP_QDEV_FIELD(qdev, "%d", intr_count); - DUMP_QDEV_FIELD(qdev, "%p", tx_ring); - DUMP_QDEV_FIELD(qdev, "%d", rss_ring_count); - DUMP_QDEV_FIELD(qdev, "%p", rx_ring); - DUMP_QDEV_FIELD(qdev, "%d", default_rx_queue); - DUMP_QDEV_FIELD(qdev, "0x%08x", xg_sem_mask); - DUMP_QDEV_FIELD(qdev, "0x%08x", port_link_up); - DUMP_QDEV_FIELD(qdev, "0x%08x", port_init); -} -#endif - -#ifdef QL_CB_DUMP -void ql_dump_wqicb(struct wqicb *wqicb) -{ - pr_err("Dumping wqicb stuff...\n"); - pr_err("wqicb->len = 0x%x\n", le16_to_cpu(wqicb->len)); - pr_err("wqicb->flags = %x\n", le16_to_cpu(wqicb->flags)); - pr_err("wqicb->cq_id_rss = %d\n", - le16_to_cpu(wqicb->cq_id_rss)); - pr_err("wqicb->rid = 0x%x\n", le16_to_cpu(wqicb->rid)); - pr_err("wqicb->wq_addr = 0x%llx\n", - (unsigned long long) le64_to_cpu(wqicb->addr)); - pr_err("wqicb->wq_cnsmr_idx_addr = 0x%llx\n", - (unsigned long long) le64_to_cpu(wqicb->cnsmr_idx_addr)); -} - -void ql_dump_tx_ring(struct tx_ring *tx_ring) -{ - if (tx_ring == NULL) - return; - pr_err("===================== Dumping tx_ring %d ===============\n", - tx_ring->wq_id); - pr_err("tx_ring->base = %p\n", tx_ring->wq_base); - pr_err("tx_ring->base_dma = 0x%llx\n", - (unsigned long long) tx_ring->wq_base_dma); - pr_err("tx_ring->cnsmr_idx_sh_reg, addr = 0x%p, value = %d\n", - tx_ring->cnsmr_idx_sh_reg, - tx_ring->cnsmr_idx_sh_reg - ? ql_read_sh_reg(tx_ring->cnsmr_idx_sh_reg) : 0); - pr_err("tx_ring->size = %d\n", tx_ring->wq_size); - pr_err("tx_ring->len = %d\n", tx_ring->wq_len); - pr_err("tx_ring->prod_idx_db_reg = %p\n", tx_ring->prod_idx_db_reg); - pr_err("tx_ring->valid_db_reg = %p\n", tx_ring->valid_db_reg); - pr_err("tx_ring->prod_idx = %d\n", tx_ring->prod_idx); - pr_err("tx_ring->cq_id = %d\n", tx_ring->cq_id); - pr_err("tx_ring->wq_id = %d\n", tx_ring->wq_id); - pr_err("tx_ring->q = %p\n", tx_ring->q); - pr_err("tx_ring->tx_count = %d\n", atomic_read(&tx_ring->tx_count)); -} - -void ql_dump_ricb(struct ricb *ricb) -{ - int i; - pr_err("===================== Dumping ricb ===============\n"); - pr_err("Dumping ricb stuff...\n"); - - pr_err("ricb->base_cq = %d\n", ricb->base_cq & 0x1f); - pr_err("ricb->flags = %s%s%s%s%s%s%s%s%s\n", - ricb->base_cq & RSS_L4K ? "RSS_L4K " : "", - ricb->flags & RSS_L6K ? "RSS_L6K " : "", - ricb->flags & RSS_LI ? "RSS_LI " : "", - ricb->flags & RSS_LB ? "RSS_LB " : "", - ricb->flags & RSS_LM ? "RSS_LM " : "", - ricb->flags & RSS_RI4 ? "RSS_RI4 " : "", - ricb->flags & RSS_RT4 ? "RSS_RT4 " : "", - ricb->flags & RSS_RI6 ? "RSS_RI6 " : "", - ricb->flags & RSS_RT6 ? "RSS_RT6 " : ""); - pr_err("ricb->mask = 0x%.04x\n", le16_to_cpu(ricb->mask)); - for (i = 0; i < 16; i++) - pr_err("ricb->hash_cq_id[%d] = 0x%.08x\n", i, - le32_to_cpu(ricb->hash_cq_id[i])); - for (i = 0; i < 10; i++) - pr_err("ricb->ipv6_hash_key[%d] = 0x%.08x\n", i, - le32_to_cpu(ricb->ipv6_hash_key[i])); - for (i = 0; i < 4; i++) - pr_err("ricb->ipv4_hash_key[%d] = 0x%.08x\n", i, - le32_to_cpu(ricb->ipv4_hash_key[i])); -} - -void ql_dump_cqicb(struct cqicb *cqicb) -{ - pr_err("Dumping cqicb stuff...\n"); - - pr_err("cqicb->msix_vect = %d\n", cqicb->msix_vect); - pr_err("cqicb->flags = %x\n", cqicb->flags); - pr_err("cqicb->len = %d\n", le16_to_cpu(cqicb->len)); - pr_err("cqicb->addr = 0x%llx\n", - (unsigned long long) le64_to_cpu(cqicb->addr)); - pr_err("cqicb->prod_idx_addr = 0x%llx\n", - (unsigned long long) le64_to_cpu(cqicb->prod_idx_addr)); - pr_err("cqicb->pkt_delay = 0x%.04x\n", - le16_to_cpu(cqicb->pkt_delay)); - pr_err("cqicb->irq_delay = 0x%.04x\n", - le16_to_cpu(cqicb->irq_delay)); - pr_err("cqicb->lbq_addr = 0x%llx\n", - (unsigned long long) le64_to_cpu(cqicb->lbq_addr)); - pr_err("cqicb->lbq_buf_size = 0x%.04x\n", - le16_to_cpu(cqicb->lbq_buf_size)); - pr_err("cqicb->lbq_len = 0x%.04x\n", - le16_to_cpu(cqicb->lbq_len)); - pr_err("cqicb->sbq_addr = 0x%llx\n", - (unsigned long long) le64_to_cpu(cqicb->sbq_addr)); - pr_err("cqicb->sbq_buf_size = 0x%.04x\n", - le16_to_cpu(cqicb->sbq_buf_size)); - pr_err("cqicb->sbq_len = 0x%.04x\n", - le16_to_cpu(cqicb->sbq_len)); -} - -void ql_dump_rx_ring(struct rx_ring *rx_ring) -{ - if (rx_ring == NULL) - return; - pr_err("===================== Dumping rx_ring %d ===============\n", - rx_ring->cq_id); - pr_err("Dumping rx_ring %d, type = %s%s%s\n", - rx_ring->cq_id, rx_ring->type == DEFAULT_Q ? "DEFAULT" : "", - rx_ring->type == TX_Q ? "OUTBOUND COMPLETIONS" : "", - rx_ring->type == RX_Q ? "INBOUND_COMPLETIONS" : ""); - pr_err("rx_ring->cqicb = %p\n", &rx_ring->cqicb); - pr_err("rx_ring->cq_base = %p\n", rx_ring->cq_base); - pr_err("rx_ring->cq_base_dma = %llx\n", - (unsigned long long) rx_ring->cq_base_dma); - pr_err("rx_ring->cq_size = %d\n", rx_ring->cq_size); - pr_err("rx_ring->cq_len = %d\n", rx_ring->cq_len); - pr_err("rx_ring->prod_idx_sh_reg, addr = 0x%p, value = %d\n", - rx_ring->prod_idx_sh_reg, - rx_ring->prod_idx_sh_reg - ? ql_read_sh_reg(rx_ring->prod_idx_sh_reg) : 0); - pr_err("rx_ring->prod_idx_sh_reg_dma = %llx\n", - (unsigned long long) rx_ring->prod_idx_sh_reg_dma); - pr_err("rx_ring->cnsmr_idx_db_reg = %p\n", - rx_ring->cnsmr_idx_db_reg); - pr_err("rx_ring->cnsmr_idx = %d\n", rx_ring->cnsmr_idx); - pr_err("rx_ring->curr_entry = %p\n", rx_ring->curr_entry); - pr_err("rx_ring->valid_db_reg = %p\n", rx_ring->valid_db_reg); - - pr_err("rx_ring->lbq_base = %p\n", rx_ring->lbq_base); - pr_err("rx_ring->lbq_base_dma = %llx\n", - (unsigned long long) rx_ring->lbq_base_dma); - pr_err("rx_ring->lbq_base_indirect = %p\n", - rx_ring->lbq_base_indirect); - pr_err("rx_ring->lbq_base_indirect_dma = %llx\n", - (unsigned long long) rx_ring->lbq_base_indirect_dma); - pr_err("rx_ring->lbq = %p\n", rx_ring->lbq); - pr_err("rx_ring->lbq_len = %d\n", rx_ring->lbq_len); - pr_err("rx_ring->lbq_size = %d\n", rx_ring->lbq_size); - pr_err("rx_ring->lbq_prod_idx_db_reg = %p\n", - rx_ring->lbq_prod_idx_db_reg); - pr_err("rx_ring->lbq_prod_idx = %d\n", rx_ring->lbq_prod_idx); - pr_err("rx_ring->lbq_curr_idx = %d\n", rx_ring->lbq_curr_idx); - pr_err("rx_ring->lbq_clean_idx = %d\n", rx_ring->lbq_clean_idx); - pr_err("rx_ring->lbq_free_cnt = %d\n", rx_ring->lbq_free_cnt); - pr_err("rx_ring->lbq_buf_size = %d\n", rx_ring->lbq_buf_size); - - pr_err("rx_ring->sbq_base = %p\n", rx_ring->sbq_base); - pr_err("rx_ring->sbq_base_dma = %llx\n", - (unsigned long long) rx_ring->sbq_base_dma); - pr_err("rx_ring->sbq_base_indirect = %p\n", - rx_ring->sbq_base_indirect); - pr_err("rx_ring->sbq_base_indirect_dma = %llx\n", - (unsigned long long) rx_ring->sbq_base_indirect_dma); - pr_err("rx_ring->sbq = %p\n", rx_ring->sbq); - pr_err("rx_ring->sbq_len = %d\n", rx_ring->sbq_len); - pr_err("rx_ring->sbq_size = %d\n", rx_ring->sbq_size); - pr_err("rx_ring->sbq_prod_idx_db_reg addr = %p\n", - rx_ring->sbq_prod_idx_db_reg); - pr_err("rx_ring->sbq_prod_idx = %d\n", rx_ring->sbq_prod_idx); - pr_err("rx_ring->sbq_curr_idx = %d\n", rx_ring->sbq_curr_idx); - pr_err("rx_ring->sbq_clean_idx = %d\n", rx_ring->sbq_clean_idx); - pr_err("rx_ring->sbq_free_cnt = %d\n", rx_ring->sbq_free_cnt); - pr_err("rx_ring->sbq_buf_size = %d\n", rx_ring->sbq_buf_size); - pr_err("rx_ring->cq_id = %d\n", rx_ring->cq_id); - pr_err("rx_ring->irq = %d\n", rx_ring->irq); - pr_err("rx_ring->cpu = %d\n", rx_ring->cpu); - pr_err("rx_ring->qdev = %p\n", rx_ring->qdev); -} - -void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id) -{ - void *ptr; - - pr_err("%s: Enter\n", __func__); - - ptr = kmalloc(size, GFP_ATOMIC); - if (ptr == NULL) { - pr_err("%s: Couldn't allocate a buffer\n", __func__); - return; - } - - if (ql_write_cfg(qdev, ptr, size, bit, q_id)) { - pr_err("%s: Failed to upload control block!\n", __func__); - goto fail_it; - } - switch (bit) { - case CFG_DRQ: - ql_dump_wqicb((struct wqicb *)ptr); - break; - case CFG_DCQ: - ql_dump_cqicb((struct cqicb *)ptr); - break; - case CFG_DR: - ql_dump_ricb((struct ricb *)ptr); - break; - default: - pr_err("%s: Invalid bit value = %x\n", __func__, bit); - break; - } -fail_it: - kfree(ptr); -} -#endif - -#ifdef QL_OB_DUMP -void ql_dump_tx_desc(struct tx_buf_desc *tbd) -{ - pr_err("tbd->addr = 0x%llx\n", - le64_to_cpu((u64) tbd->addr)); - pr_err("tbd->len = %d\n", - le32_to_cpu(tbd->len & TX_DESC_LEN_MASK)); - pr_err("tbd->flags = %s %s\n", - tbd->len & TX_DESC_C ? "C" : ".", - tbd->len & TX_DESC_E ? "E" : "."); - tbd++; - pr_err("tbd->addr = 0x%llx\n", - le64_to_cpu((u64) tbd->addr)); - pr_err("tbd->len = %d\n", - le32_to_cpu(tbd->len & TX_DESC_LEN_MASK)); - pr_err("tbd->flags = %s %s\n", - tbd->len & TX_DESC_C ? "C" : ".", - tbd->len & TX_DESC_E ? "E" : "."); - tbd++; - pr_err("tbd->addr = 0x%llx\n", - le64_to_cpu((u64) tbd->addr)); - pr_err("tbd->len = %d\n", - le32_to_cpu(tbd->len & TX_DESC_LEN_MASK)); - pr_err("tbd->flags = %s %s\n", - tbd->len & TX_DESC_C ? "C" : ".", - tbd->len & TX_DESC_E ? "E" : "."); - -} - -void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb) -{ - struct ob_mac_tso_iocb_req *ob_mac_tso_iocb = - (struct ob_mac_tso_iocb_req *)ob_mac_iocb; - struct tx_buf_desc *tbd; - u16 frame_len; - - pr_err("%s\n", __func__); - pr_err("opcode = %s\n", - (ob_mac_iocb->opcode == OPCODE_OB_MAC_IOCB) ? "MAC" : "TSO"); - pr_err("flags1 = %s %s %s %s %s\n", - ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_OI ? "OI" : "", - ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_I ? "I" : "", - ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_D ? "D" : "", - ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP4 ? "IP4" : "", - ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP6 ? "IP6" : ""); - pr_err("flags2 = %s %s %s\n", - ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "", - ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "", - ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : ""); - pr_err("flags3 = %s %s %s\n", - ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "", - ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "", - ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : ""); - pr_err("tid = %x\n", ob_mac_iocb->tid); - pr_err("txq_idx = %d\n", ob_mac_iocb->txq_idx); - pr_err("vlan_tci = %x\n", ob_mac_tso_iocb->vlan_tci); - if (ob_mac_iocb->opcode == OPCODE_OB_MAC_TSO_IOCB) { - pr_err("frame_len = %d\n", - le32_to_cpu(ob_mac_tso_iocb->frame_len)); - pr_err("mss = %d\n", - le16_to_cpu(ob_mac_tso_iocb->mss)); - pr_err("prot_hdr_len = %d\n", - le16_to_cpu(ob_mac_tso_iocb->total_hdrs_len)); - pr_err("hdr_offset = 0x%.04x\n", - le16_to_cpu(ob_mac_tso_iocb->net_trans_offset)); - frame_len = le32_to_cpu(ob_mac_tso_iocb->frame_len); - } else { - pr_err("frame_len = %d\n", - le16_to_cpu(ob_mac_iocb->frame_len)); - frame_len = le16_to_cpu(ob_mac_iocb->frame_len); - } - tbd = &ob_mac_iocb->tbd[0]; - ql_dump_tx_desc(tbd); -} - -void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp) -{ - pr_err("%s\n", __func__); - pr_err("opcode = %d\n", ob_mac_rsp->opcode); - pr_err("flags = %s %s %s %s %s %s %s\n", - ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_OI ? "OI" : ".", - ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_I ? "I" : ".", - ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_E ? "E" : ".", - ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_S ? "S" : ".", - ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_L ? "L" : ".", - ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_P ? "P" : ".", - ob_mac_rsp->flags2 & OB_MAC_IOCB_RSP_B ? "B" : "."); - pr_err("tid = %x\n", ob_mac_rsp->tid); -} -#endif - -#ifdef QL_IB_DUMP -void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp) -{ - pr_err("%s\n", __func__); - pr_err("opcode = 0x%x\n", ib_mac_rsp->opcode); - pr_err("flags1 = %s%s%s%s%s%s\n", - ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_OI ? "OI " : "", - ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_I ? "I " : "", - ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_TE ? "TE " : "", - ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU ? "NU " : "", - ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_IE ? "IE " : "", - ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_B ? "B " : ""); - - if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) - pr_err("%s%s%s Multicast\n", - (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == - IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "", - (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == - IB_MAC_IOCB_RSP_M_REG ? "Registered" : "", - (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == - IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : ""); - - pr_err("flags2 = %s%s%s%s%s\n", - (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) ? "P " : "", - (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? "V " : "", - (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) ? "U " : "", - (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ? "T " : "", - (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_FO) ? "FO " : ""); - - if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) - pr_err("%s%s%s%s%s error\n", - (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == - IB_MAC_IOCB_RSP_ERR_OVERSIZE ? "oversize" : "", - (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == - IB_MAC_IOCB_RSP_ERR_UNDERSIZE ? "undersize" : "", - (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == - IB_MAC_IOCB_RSP_ERR_PREAMBLE ? "preamble" : "", - (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == - IB_MAC_IOCB_RSP_ERR_FRAME_LEN ? "frame length" : "", - (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == - IB_MAC_IOCB_RSP_ERR_CRC ? "CRC" : ""); - - pr_err("flags3 = %s%s\n", - ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS ? "DS " : "", - ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL ? "DL " : ""); - - if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) - pr_err("RSS flags = %s%s%s%s\n", - ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == - IB_MAC_IOCB_RSP_M_IPV4) ? "IPv4 RSS" : "", - ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == - IB_MAC_IOCB_RSP_M_IPV6) ? "IPv6 RSS " : "", - ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == - IB_MAC_IOCB_RSP_M_TCP_V4) ? "TCP/IPv4 RSS" : "", - ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == - IB_MAC_IOCB_RSP_M_TCP_V6) ? "TCP/IPv6 RSS" : ""); - - pr_err("data_len = %d\n", - le32_to_cpu(ib_mac_rsp->data_len)); - pr_err("data_addr = 0x%llx\n", - (unsigned long long) le64_to_cpu(ib_mac_rsp->data_addr)); - if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) - pr_err("rss = %x\n", - le32_to_cpu(ib_mac_rsp->rss)); - if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) - pr_err("vlan_id = %x\n", - le16_to_cpu(ib_mac_rsp->vlan_id)); - - pr_err("flags4 = %s%s%s\n", - ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV ? "HV " : "", - ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS ? "HS " : "", - ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL ? "HL " : ""); - - if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) { - pr_err("hdr length = %d\n", - le32_to_cpu(ib_mac_rsp->hdr_len)); - pr_err("hdr addr = 0x%llx\n", - (unsigned long long) le64_to_cpu(ib_mac_rsp->hdr_addr)); - } -} -#endif - -#ifdef QL_ALL_DUMP -void ql_dump_all(struct ql_adapter *qdev) -{ - int i; - - QL_DUMP_REGS(qdev); - QL_DUMP_QDEV(qdev); - for (i = 0; i < qdev->tx_ring_count; i++) { - QL_DUMP_TX_RING(&qdev->tx_ring[i]); - QL_DUMP_WQICB((struct wqicb *)&qdev->tx_ring[i]); - } - for (i = 0; i < qdev->rx_ring_count; i++) { - QL_DUMP_RX_RING(&qdev->rx_ring[i]); - QL_DUMP_CQICB((struct cqicb *)&qdev->rx_ring[i]); - } -} -#endif diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c deleted file mode 100644 index 9b67bfea035f..000000000000 --- a/drivers/net/qlge/qlge_ethtool.c +++ /dev/null @@ -1,688 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -#include "qlge.h" - -static const char ql_gstrings_test[][ETH_GSTRING_LEN] = { - "Loopback test (offline)" -}; -#define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN) - -static int ql_update_ring_coalescing(struct ql_adapter *qdev) -{ - int i, status = 0; - struct rx_ring *rx_ring; - struct cqicb *cqicb; - - if (!netif_running(qdev->ndev)) - return status; - - /* Skip the default queue, and update the outbound handler - * queues if they changed. - */ - cqicb = (struct cqicb *)&qdev->rx_ring[qdev->rss_ring_count]; - if (le16_to_cpu(cqicb->irq_delay) != qdev->tx_coalesce_usecs || - le16_to_cpu(cqicb->pkt_delay) != - qdev->tx_max_coalesced_frames) { - for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) { - rx_ring = &qdev->rx_ring[i]; - cqicb = (struct cqicb *)rx_ring; - cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs); - cqicb->pkt_delay = - cpu_to_le16(qdev->tx_max_coalesced_frames); - cqicb->flags = FLAGS_LI; - status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb), - CFG_LCQ, rx_ring->cq_id); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Failed to load CQICB.\n"); - goto exit; - } - } - } - - /* Update the inbound (RSS) handler queues if they changed. */ - cqicb = (struct cqicb *)&qdev->rx_ring[0]; - if (le16_to_cpu(cqicb->irq_delay) != qdev->rx_coalesce_usecs || - le16_to_cpu(cqicb->pkt_delay) != - qdev->rx_max_coalesced_frames) { - for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) { - rx_ring = &qdev->rx_ring[i]; - cqicb = (struct cqicb *)rx_ring; - cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs); - cqicb->pkt_delay = - cpu_to_le16(qdev->rx_max_coalesced_frames); - cqicb->flags = FLAGS_LI; - status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb), - CFG_LCQ, rx_ring->cq_id); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Failed to load CQICB.\n"); - goto exit; - } - } - } -exit: - return status; -} - -static void ql_update_stats(struct ql_adapter *qdev) -{ - u32 i; - u64 data; - u64 *iter = &qdev->nic_stats.tx_pkts; - - spin_lock(&qdev->stats_lock); - if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) { - netif_err(qdev, drv, qdev->ndev, - "Couldn't get xgmac sem.\n"); - goto quit; - } - /* - * Get TX statistics. - */ - for (i = 0x200; i < 0x280; i += 8) { - if (ql_read_xgmac_reg64(qdev, i, &data)) { - netif_err(qdev, drv, qdev->ndev, - "Error reading status register 0x%.04x.\n", - i); - goto end; - } else - *iter = data; - iter++; - } - - /* - * Get RX statistics. - */ - for (i = 0x300; i < 0x3d0; i += 8) { - if (ql_read_xgmac_reg64(qdev, i, &data)) { - netif_err(qdev, drv, qdev->ndev, - "Error reading status register 0x%.04x.\n", - i); - goto end; - } else - *iter = data; - iter++; - } - - /* - * Get Per-priority TX pause frame counter statistics. - */ - for (i = 0x500; i < 0x540; i += 8) { - if (ql_read_xgmac_reg64(qdev, i, &data)) { - netif_err(qdev, drv, qdev->ndev, - "Error reading status register 0x%.04x.\n", - i); - goto end; - } else - *iter = data; - iter++; - } - - /* - * Get Per-priority RX pause frame counter statistics. - */ - for (i = 0x568; i < 0x5a8; i += 8) { - if (ql_read_xgmac_reg64(qdev, i, &data)) { - netif_err(qdev, drv, qdev->ndev, - "Error reading status register 0x%.04x.\n", - i); - goto end; - } else - *iter = data; - iter++; - } - - /* - * Get RX NIC FIFO DROP statistics. - */ - if (ql_read_xgmac_reg64(qdev, 0x5b8, &data)) { - netif_err(qdev, drv, qdev->ndev, - "Error reading status register 0x%.04x.\n", i); - goto end; - } else - *iter = data; -end: - ql_sem_unlock(qdev, qdev->xg_sem_mask); -quit: - spin_unlock(&qdev->stats_lock); - - QL_DUMP_STAT(qdev); -} - -static char ql_stats_str_arr[][ETH_GSTRING_LEN] = { - {"tx_pkts"}, - {"tx_bytes"}, - {"tx_mcast_pkts"}, - {"tx_bcast_pkts"}, - {"tx_ucast_pkts"}, - {"tx_ctl_pkts"}, - {"tx_pause_pkts"}, - {"tx_64_pkts"}, - {"tx_65_to_127_pkts"}, - {"tx_128_to_255_pkts"}, - {"tx_256_511_pkts"}, - {"tx_512_to_1023_pkts"}, - {"tx_1024_to_1518_pkts"}, - {"tx_1519_to_max_pkts"}, - {"tx_undersize_pkts"}, - {"tx_oversize_pkts"}, - {"rx_bytes"}, - {"rx_bytes_ok"}, - {"rx_pkts"}, - {"rx_pkts_ok"}, - {"rx_bcast_pkts"}, - {"rx_mcast_pkts"}, - {"rx_ucast_pkts"}, - {"rx_undersize_pkts"}, - {"rx_oversize_pkts"}, - {"rx_jabber_pkts"}, - {"rx_undersize_fcerr_pkts"}, - {"rx_drop_events"}, - {"rx_fcerr_pkts"}, - {"rx_align_err"}, - {"rx_symbol_err"}, - {"rx_mac_err"}, - {"rx_ctl_pkts"}, - {"rx_pause_pkts"}, - {"rx_64_pkts"}, - {"rx_65_to_127_pkts"}, - {"rx_128_255_pkts"}, - {"rx_256_511_pkts"}, - {"rx_512_to_1023_pkts"}, - {"rx_1024_to_1518_pkts"}, - {"rx_1519_to_max_pkts"}, - {"rx_len_err_pkts"}, - {"tx_cbfc_pause_frames0"}, - {"tx_cbfc_pause_frames1"}, - {"tx_cbfc_pause_frames2"}, - {"tx_cbfc_pause_frames3"}, - {"tx_cbfc_pause_frames4"}, - {"tx_cbfc_pause_frames5"}, - {"tx_cbfc_pause_frames6"}, - {"tx_cbfc_pause_frames7"}, - {"rx_cbfc_pause_frames0"}, - {"rx_cbfc_pause_frames1"}, - {"rx_cbfc_pause_frames2"}, - {"rx_cbfc_pause_frames3"}, - {"rx_cbfc_pause_frames4"}, - {"rx_cbfc_pause_frames5"}, - {"rx_cbfc_pause_frames6"}, - {"rx_cbfc_pause_frames7"}, - {"rx_nic_fifo_drop"}, -}; - -static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf) -{ - switch (stringset) { - case ETH_SS_STATS: - memcpy(buf, ql_stats_str_arr, sizeof(ql_stats_str_arr)); - break; - } -} - -static int ql_get_sset_count(struct net_device *dev, int sset) -{ - switch (sset) { - case ETH_SS_TEST: - return QLGE_TEST_LEN; - case ETH_SS_STATS: - return ARRAY_SIZE(ql_stats_str_arr); - default: - return -EOPNOTSUPP; - } -} - -static void -ql_get_ethtool_stats(struct net_device *ndev, - struct ethtool_stats *stats, u64 *data) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - struct nic_stats *s = &qdev->nic_stats; - - ql_update_stats(qdev); - - *data++ = s->tx_pkts; - *data++ = s->tx_bytes; - *data++ = s->tx_mcast_pkts; - *data++ = s->tx_bcast_pkts; - *data++ = s->tx_ucast_pkts; - *data++ = s->tx_ctl_pkts; - *data++ = s->tx_pause_pkts; - *data++ = s->tx_64_pkt; - *data++ = s->tx_65_to_127_pkt; - *data++ = s->tx_128_to_255_pkt; - *data++ = s->tx_256_511_pkt; - *data++ = s->tx_512_to_1023_pkt; - *data++ = s->tx_1024_to_1518_pkt; - *data++ = s->tx_1519_to_max_pkt; - *data++ = s->tx_undersize_pkt; - *data++ = s->tx_oversize_pkt; - *data++ = s->rx_bytes; - *data++ = s->rx_bytes_ok; - *data++ = s->rx_pkts; - *data++ = s->rx_pkts_ok; - *data++ = s->rx_bcast_pkts; - *data++ = s->rx_mcast_pkts; - *data++ = s->rx_ucast_pkts; - *data++ = s->rx_undersize_pkts; - *data++ = s->rx_oversize_pkts; - *data++ = s->rx_jabber_pkts; - *data++ = s->rx_undersize_fcerr_pkts; - *data++ = s->rx_drop_events; - *data++ = s->rx_fcerr_pkts; - *data++ = s->rx_align_err; - *data++ = s->rx_symbol_err; - *data++ = s->rx_mac_err; - *data++ = s->rx_ctl_pkts; - *data++ = s->rx_pause_pkts; - *data++ = s->rx_64_pkts; - *data++ = s->rx_65_to_127_pkts; - *data++ = s->rx_128_255_pkts; - *data++ = s->rx_256_511_pkts; - *data++ = s->rx_512_to_1023_pkts; - *data++ = s->rx_1024_to_1518_pkts; - *data++ = s->rx_1519_to_max_pkts; - *data++ = s->rx_len_err_pkts; - *data++ = s->tx_cbfc_pause_frames0; - *data++ = s->tx_cbfc_pause_frames1; - *data++ = s->tx_cbfc_pause_frames2; - *data++ = s->tx_cbfc_pause_frames3; - *data++ = s->tx_cbfc_pause_frames4; - *data++ = s->tx_cbfc_pause_frames5; - *data++ = s->tx_cbfc_pause_frames6; - *data++ = s->tx_cbfc_pause_frames7; - *data++ = s->rx_cbfc_pause_frames0; - *data++ = s->rx_cbfc_pause_frames1; - *data++ = s->rx_cbfc_pause_frames2; - *data++ = s->rx_cbfc_pause_frames3; - *data++ = s->rx_cbfc_pause_frames4; - *data++ = s->rx_cbfc_pause_frames5; - *data++ = s->rx_cbfc_pause_frames6; - *data++ = s->rx_cbfc_pause_frames7; - *data++ = s->rx_nic_fifo_drop; -} - -static int ql_get_settings(struct net_device *ndev, - struct ethtool_cmd *ecmd) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - - ecmd->supported = SUPPORTED_10000baseT_Full; - ecmd->advertising = ADVERTISED_10000baseT_Full; - ecmd->autoneg = AUTONEG_ENABLE; - ecmd->transceiver = XCVR_EXTERNAL; - if ((qdev->link_status & STS_LINK_TYPE_MASK) == - STS_LINK_TYPE_10GBASET) { - ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); - ecmd->advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg); - ecmd->port = PORT_TP; - } else { - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_FIBRE; - ecmd->port = PORT_FIBRE; - } - - ethtool_cmd_speed_set(ecmd, SPEED_10000); - ecmd->duplex = DUPLEX_FULL; - - return 0; -} - -static void ql_get_drvinfo(struct net_device *ndev, - struct ethtool_drvinfo *drvinfo) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - strncpy(drvinfo->driver, qlge_driver_name, 32); - strncpy(drvinfo->version, qlge_driver_version, 32); - snprintf(drvinfo->fw_version, 32, "v%d.%d.%d", - (qdev->fw_rev_id & 0x00ff0000) >> 16, - (qdev->fw_rev_id & 0x0000ff00) >> 8, - (qdev->fw_rev_id & 0x000000ff)); - strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32); - drvinfo->n_stats = 0; - drvinfo->testinfo_len = 0; - if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) - drvinfo->regdump_len = sizeof(struct ql_mpi_coredump); - else - drvinfo->regdump_len = sizeof(struct ql_reg_dump); - drvinfo->eedump_len = 0; -} - -static void ql_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - /* What we support. */ - wol->supported = WAKE_MAGIC; - /* What we've currently got set. */ - wol->wolopts = qdev->wol; -} - -static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - int status; - - if (wol->wolopts & ~WAKE_MAGIC) - return -EINVAL; - qdev->wol = wol->wolopts; - - netif_info(qdev, drv, qdev->ndev, "Set wol option 0x%x\n", qdev->wol); - if (!qdev->wol) { - u32 wol = 0; - status = ql_mb_wol_mode(qdev, wol); - netif_err(qdev, drv, qdev->ndev, "WOL %s (wol code 0x%x)\n", - status == 0 ? "cleared successfully" : "clear failed", - wol); - } - - return 0; -} - -static int ql_set_phys_id(struct net_device *ndev, - enum ethtool_phys_id_state state) - -{ - struct ql_adapter *qdev = netdev_priv(ndev); - - switch (state) { - case ETHTOOL_ID_ACTIVE: - /* Save the current LED settings */ - if (ql_mb_get_led_cfg(qdev)) - return -EIO; - - /* Start blinking */ - ql_mb_set_led_cfg(qdev, QL_LED_BLINK); - return 0; - - case ETHTOOL_ID_INACTIVE: - /* Restore LED settings */ - if (ql_mb_set_led_cfg(qdev, qdev->led_config)) - return -EIO; - return 0; - - default: - return -EINVAL; - } -} - -static int ql_start_loopback(struct ql_adapter *qdev) -{ - if (netif_carrier_ok(qdev->ndev)) { - set_bit(QL_LB_LINK_UP, &qdev->flags); - netif_carrier_off(qdev->ndev); - } else - clear_bit(QL_LB_LINK_UP, &qdev->flags); - qdev->link_config |= CFG_LOOPBACK_PCS; - return ql_mb_set_port_cfg(qdev); -} - -static void ql_stop_loopback(struct ql_adapter *qdev) -{ - qdev->link_config &= ~CFG_LOOPBACK_PCS; - ql_mb_set_port_cfg(qdev); - if (test_bit(QL_LB_LINK_UP, &qdev->flags)) { - netif_carrier_on(qdev->ndev); - clear_bit(QL_LB_LINK_UP, &qdev->flags); - } -} - -static void ql_create_lb_frame(struct sk_buff *skb, - unsigned int frame_size) -{ - memset(skb->data, 0xFF, frame_size); - frame_size &= ~1; - memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); - memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); - memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); -} - -void ql_check_lb_frame(struct ql_adapter *qdev, - struct sk_buff *skb) -{ - unsigned int frame_size = skb->len; - - if ((*(skb->data + 3) == 0xFF) && - (*(skb->data + frame_size / 2 + 10) == 0xBE) && - (*(skb->data + frame_size / 2 + 12) == 0xAF)) { - atomic_dec(&qdev->lb_count); - return; - } -} - -static int ql_run_loopback_test(struct ql_adapter *qdev) -{ - int i; - netdev_tx_t rc; - struct sk_buff *skb; - unsigned int size = SMALL_BUF_MAP_SIZE; - - for (i = 0; i < 64; i++) { - skb = netdev_alloc_skb(qdev->ndev, size); - if (!skb) - return -ENOMEM; - - skb->queue_mapping = 0; - skb_put(skb, size); - ql_create_lb_frame(skb, size); - rc = ql_lb_send(skb, qdev->ndev); - if (rc != NETDEV_TX_OK) - return -EPIPE; - atomic_inc(&qdev->lb_count); - } - /* Give queue time to settle before testing results. */ - msleep(2); - ql_clean_lb_rx_ring(&qdev->rx_ring[0], 128); - return atomic_read(&qdev->lb_count) ? -EIO : 0; -} - -static int ql_loopback_test(struct ql_adapter *qdev, u64 *data) -{ - *data = ql_start_loopback(qdev); - if (*data) - goto out; - *data = ql_run_loopback_test(qdev); -out: - ql_stop_loopback(qdev); - return *data; -} - -static void ql_self_test(struct net_device *ndev, - struct ethtool_test *eth_test, u64 *data) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - - if (netif_running(ndev)) { - set_bit(QL_SELFTEST, &qdev->flags); - if (eth_test->flags == ETH_TEST_FL_OFFLINE) { - /* Offline tests */ - if (ql_loopback_test(qdev, &data[0])) - eth_test->flags |= ETH_TEST_FL_FAILED; - - } else { - /* Online tests */ - data[0] = 0; - } - clear_bit(QL_SELFTEST, &qdev->flags); - /* Give link time to come up after - * port configuration changes. - */ - msleep_interruptible(4 * 1000); - } else { - netif_err(qdev, drv, qdev->ndev, - "is down, Loopback test will fail.\n"); - eth_test->flags |= ETH_TEST_FL_FAILED; - } -} - -static int ql_get_regs_len(struct net_device *ndev) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - - if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) - return sizeof(struct ql_mpi_coredump); - else - return sizeof(struct ql_reg_dump); -} - -static void ql_get_regs(struct net_device *ndev, - struct ethtool_regs *regs, void *p) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - - ql_get_dump(qdev, p); - qdev->core_is_dumped = 0; - if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) - regs->len = sizeof(struct ql_mpi_coredump); - else - regs->len = sizeof(struct ql_reg_dump); -} - -static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) -{ - struct ql_adapter *qdev = netdev_priv(dev); - - c->rx_coalesce_usecs = qdev->rx_coalesce_usecs; - c->tx_coalesce_usecs = qdev->tx_coalesce_usecs; - - /* This chip coalesces as follows: - * If a packet arrives, hold off interrupts until - * cqicb->int_delay expires, but if no other packets arrive don't - * wait longer than cqicb->pkt_int_delay. But ethtool doesn't use a - * timer to coalesce on a frame basis. So, we have to take ethtool's - * max_coalesced_frames value and convert it to a delay in microseconds. - * We do this by using a basic thoughput of 1,000,000 frames per - * second @ (1024 bytes). This means one frame per usec. So it's a - * simple one to one ratio. - */ - c->rx_max_coalesced_frames = qdev->rx_max_coalesced_frames; - c->tx_max_coalesced_frames = qdev->tx_max_coalesced_frames; - - return 0; -} - -static int ql_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *c) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - - /* Validate user parameters. */ - if (c->rx_coalesce_usecs > qdev->rx_ring_size / 2) - return -EINVAL; - /* Don't wait more than 10 usec. */ - if (c->rx_max_coalesced_frames > MAX_INTER_FRAME_WAIT) - return -EINVAL; - if (c->tx_coalesce_usecs > qdev->tx_ring_size / 2) - return -EINVAL; - if (c->tx_max_coalesced_frames > MAX_INTER_FRAME_WAIT) - return -EINVAL; - - /* Verify a change took place before updating the hardware. */ - if (qdev->rx_coalesce_usecs == c->rx_coalesce_usecs && - qdev->tx_coalesce_usecs == c->tx_coalesce_usecs && - qdev->rx_max_coalesced_frames == c->rx_max_coalesced_frames && - qdev->tx_max_coalesced_frames == c->tx_max_coalesced_frames) - return 0; - - qdev->rx_coalesce_usecs = c->rx_coalesce_usecs; - qdev->tx_coalesce_usecs = c->tx_coalesce_usecs; - qdev->rx_max_coalesced_frames = c->rx_max_coalesced_frames; - qdev->tx_max_coalesced_frames = c->tx_max_coalesced_frames; - - return ql_update_ring_coalescing(qdev); -} - -static void ql_get_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pause) -{ - struct ql_adapter *qdev = netdev_priv(netdev); - - ql_mb_get_port_cfg(qdev); - if (qdev->link_config & CFG_PAUSE_STD) { - pause->rx_pause = 1; - pause->tx_pause = 1; - } -} - -static int ql_set_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pause) -{ - struct ql_adapter *qdev = netdev_priv(netdev); - int status = 0; - - if ((pause->rx_pause) && (pause->tx_pause)) - qdev->link_config |= CFG_PAUSE_STD; - else if (!pause->rx_pause && !pause->tx_pause) - qdev->link_config &= ~CFG_PAUSE_STD; - else - return -EINVAL; - - status = ql_mb_set_port_cfg(qdev); - return status; -} - -static u32 ql_get_msglevel(struct net_device *ndev) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - return qdev->msg_enable; -} - -static void ql_set_msglevel(struct net_device *ndev, u32 value) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - qdev->msg_enable = value; -} - -const struct ethtool_ops qlge_ethtool_ops = { - .get_settings = ql_get_settings, - .get_drvinfo = ql_get_drvinfo, - .get_wol = ql_get_wol, - .set_wol = ql_set_wol, - .get_regs_len = ql_get_regs_len, - .get_regs = ql_get_regs, - .get_msglevel = ql_get_msglevel, - .set_msglevel = ql_set_msglevel, - .get_link = ethtool_op_get_link, - .set_phys_id = ql_set_phys_id, - .self_test = ql_self_test, - .get_pauseparam = ql_get_pauseparam, - .set_pauseparam = ql_set_pauseparam, - .get_coalesce = ql_get_coalesce, - .set_coalesce = ql_set_coalesce, - .get_sset_count = ql_get_sset_count, - .get_strings = ql_get_strings, - .get_ethtool_stats = ql_get_ethtool_stats, -}; - diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c deleted file mode 100644 index f07e96ec8843..000000000000 --- a/drivers/net/qlge/qlge_main.c +++ /dev/null @@ -1,4987 +0,0 @@ -/* - * QLogic qlge NIC HBA Driver - * Copyright (c) 2003-2008 QLogic Corporation - * See LICENSE.qlge for copyright and licensing details. - * Author: Linux qlge network device driver by - * Ron Mercer - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "qlge.h" - -char qlge_driver_name[] = DRV_NAME; -const char qlge_driver_version[] = DRV_VERSION; - -MODULE_AUTHOR("Ron Mercer "); -MODULE_DESCRIPTION(DRV_STRING " "); -MODULE_LICENSE("GPL"); -MODULE_VERSION(DRV_VERSION); - -static const u32 default_msg = - NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | -/* NETIF_MSG_TIMER | */ - NETIF_MSG_IFDOWN | - NETIF_MSG_IFUP | - NETIF_MSG_RX_ERR | - NETIF_MSG_TX_ERR | -/* NETIF_MSG_TX_QUEUED | */ -/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */ -/* NETIF_MSG_PKTDATA | */ - NETIF_MSG_HW | NETIF_MSG_WOL | 0; - -static int debug = -1; /* defaults above */ -module_param(debug, int, 0664); -MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); - -#define MSIX_IRQ 0 -#define MSI_IRQ 1 -#define LEG_IRQ 2 -static int qlge_irq_type = MSIX_IRQ; -module_param(qlge_irq_type, int, 0664); -MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy."); - -static int qlge_mpi_coredump; -module_param(qlge_mpi_coredump, int, 0); -MODULE_PARM_DESC(qlge_mpi_coredump, - "Option to enable MPI firmware dump. " - "Default is OFF - Do Not allocate memory. "); - -static int qlge_force_coredump; -module_param(qlge_force_coredump, int, 0); -MODULE_PARM_DESC(qlge_force_coredump, - "Option to allow force of firmware core dump. " - "Default is OFF - Do not allow."); - -static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = { - {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)}, - {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)}, - /* required last entry */ - {0,} -}; - -MODULE_DEVICE_TABLE(pci, qlge_pci_tbl); - -static int ql_wol(struct ql_adapter *qdev); -static void qlge_set_multicast_list(struct net_device *ndev); - -/* This hardware semaphore causes exclusive access to - * resources shared between the NIC driver, MPI firmware, - * FCOE firmware and the FC driver. - */ -static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask) -{ - u32 sem_bits = 0; - - switch (sem_mask) { - case SEM_XGMAC0_MASK: - sem_bits = SEM_SET << SEM_XGMAC0_SHIFT; - break; - case SEM_XGMAC1_MASK: - sem_bits = SEM_SET << SEM_XGMAC1_SHIFT; - break; - case SEM_ICB_MASK: - sem_bits = SEM_SET << SEM_ICB_SHIFT; - break; - case SEM_MAC_ADDR_MASK: - sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT; - break; - case SEM_FLASH_MASK: - sem_bits = SEM_SET << SEM_FLASH_SHIFT; - break; - case SEM_PROBE_MASK: - sem_bits = SEM_SET << SEM_PROBE_SHIFT; - break; - case SEM_RT_IDX_MASK: - sem_bits = SEM_SET << SEM_RT_IDX_SHIFT; - break; - case SEM_PROC_REG_MASK: - sem_bits = SEM_SET << SEM_PROC_REG_SHIFT; - break; - default: - netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n"); - return -EINVAL; - } - - ql_write32(qdev, SEM, sem_bits | sem_mask); - return !(ql_read32(qdev, SEM) & sem_bits); -} - -int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask) -{ - unsigned int wait_count = 30; - do { - if (!ql_sem_trylock(qdev, sem_mask)) - return 0; - udelay(100); - } while (--wait_count); - return -ETIMEDOUT; -} - -void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask) -{ - ql_write32(qdev, SEM, sem_mask); - ql_read32(qdev, SEM); /* flush */ -} - -/* This function waits for a specific bit to come ready - * in a given register. It is used mostly by the initialize - * process, but is also used in kernel thread API such as - * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid. - */ -int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit) -{ - u32 temp; - int count = UDELAY_COUNT; - - while (count) { - temp = ql_read32(qdev, reg); - - /* check for errors */ - if (temp & err_bit) { - netif_alert(qdev, probe, qdev->ndev, - "register 0x%.08x access error, value = 0x%.08x!.\n", - reg, temp); - return -EIO; - } else if (temp & bit) - return 0; - udelay(UDELAY_DELAY); - count--; - } - netif_alert(qdev, probe, qdev->ndev, - "Timed out waiting for reg %x to come ready.\n", reg); - return -ETIMEDOUT; -} - -/* The CFG register is used to download TX and RX control blocks - * to the chip. This function waits for an operation to complete. - */ -static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit) -{ - int count = UDELAY_COUNT; - u32 temp; - - while (count) { - temp = ql_read32(qdev, CFG); - if (temp & CFG_LE) - return -EIO; - if (!(temp & bit)) - return 0; - udelay(UDELAY_DELAY); - count--; - } - return -ETIMEDOUT; -} - - -/* Used to issue init control blocks to hw. Maps control block, - * sets address, triggers download, waits for completion. - */ -int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit, - u16 q_id) -{ - u64 map; - int status = 0; - int direction; - u32 mask; - u32 value; - - direction = - (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE : - PCI_DMA_FROMDEVICE; - - map = pci_map_single(qdev->pdev, ptr, size, direction); - if (pci_dma_mapping_error(qdev->pdev, map)) { - netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n"); - return -ENOMEM; - } - - status = ql_sem_spinlock(qdev, SEM_ICB_MASK); - if (status) - return status; - - status = ql_wait_cfg(qdev, bit); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Timed out waiting for CFG to come ready.\n"); - goto exit; - } - - ql_write32(qdev, ICB_L, (u32) map); - ql_write32(qdev, ICB_H, (u32) (map >> 32)); - - mask = CFG_Q_MASK | (bit << 16); - value = bit | (q_id << CFG_Q_SHIFT); - ql_write32(qdev, CFG, (mask | value)); - - /* - * Wait for the bit to clear after signaling hw. - */ - status = ql_wait_cfg(qdev, bit); -exit: - ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */ - pci_unmap_single(qdev->pdev, map, size, direction); - return status; -} - -/* Get a specific MAC address from the CAM. Used for debug and reg dump. */ -int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index, - u32 *value) -{ - u32 offset = 0; - int status; - - switch (type) { - case MAC_ADDR_TYPE_MULTI_MAC: - case MAC_ADDR_TYPE_CAM_MAC: - { - status = - ql_wait_reg_rdy(qdev, - MAC_ADDR_IDX, MAC_ADDR_MW, 0); - if (status) - goto exit; - ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ - (index << MAC_ADDR_IDX_SHIFT) | /* index */ - MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */ - status = - ql_wait_reg_rdy(qdev, - MAC_ADDR_IDX, MAC_ADDR_MR, 0); - if (status) - goto exit; - *value++ = ql_read32(qdev, MAC_ADDR_DATA); - status = - ql_wait_reg_rdy(qdev, - MAC_ADDR_IDX, MAC_ADDR_MW, 0); - if (status) - goto exit; - ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ - (index << MAC_ADDR_IDX_SHIFT) | /* index */ - MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */ - status = - ql_wait_reg_rdy(qdev, - MAC_ADDR_IDX, MAC_ADDR_MR, 0); - if (status) - goto exit; - *value++ = ql_read32(qdev, MAC_ADDR_DATA); - if (type == MAC_ADDR_TYPE_CAM_MAC) { - status = - ql_wait_reg_rdy(qdev, - MAC_ADDR_IDX, MAC_ADDR_MW, 0); - if (status) - goto exit; - ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ - (index << MAC_ADDR_IDX_SHIFT) | /* index */ - MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */ - status = - ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, - MAC_ADDR_MR, 0); - if (status) - goto exit; - *value++ = ql_read32(qdev, MAC_ADDR_DATA); - } - break; - } - case MAC_ADDR_TYPE_VLAN: - case MAC_ADDR_TYPE_MULTI_FLTR: - default: - netif_crit(qdev, ifup, qdev->ndev, - "Address type %d not yet supported.\n", type); - status = -EPERM; - } -exit: - return status; -} - -/* Set up a MAC, multicast or VLAN address for the - * inbound frame matching. - */ -static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type, - u16 index) -{ - u32 offset = 0; - int status = 0; - - switch (type) { - case MAC_ADDR_TYPE_MULTI_MAC: - { - u32 upper = (addr[0] << 8) | addr[1]; - u32 lower = (addr[2] << 24) | (addr[3] << 16) | - (addr[4] << 8) | (addr[5]); - - status = - ql_wait_reg_rdy(qdev, - MAC_ADDR_IDX, MAC_ADDR_MW, 0); - if (status) - goto exit; - ql_write32(qdev, MAC_ADDR_IDX, (offset++) | - (index << MAC_ADDR_IDX_SHIFT) | - type | MAC_ADDR_E); - ql_write32(qdev, MAC_ADDR_DATA, lower); - status = - ql_wait_reg_rdy(qdev, - MAC_ADDR_IDX, MAC_ADDR_MW, 0); - if (status) - goto exit; - ql_write32(qdev, MAC_ADDR_IDX, (offset++) | - (index << MAC_ADDR_IDX_SHIFT) | - type | MAC_ADDR_E); - - ql_write32(qdev, MAC_ADDR_DATA, upper); - status = - ql_wait_reg_rdy(qdev, - MAC_ADDR_IDX, MAC_ADDR_MW, 0); - if (status) - goto exit; - break; - } - case MAC_ADDR_TYPE_CAM_MAC: - { - u32 cam_output; - u32 upper = (addr[0] << 8) | addr[1]; - u32 lower = - (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | - (addr[5]); - - netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, - "Adding %s address %pM at index %d in the CAM.\n", - type == MAC_ADDR_TYPE_MULTI_MAC ? - "MULTICAST" : "UNICAST", - addr, index); - - status = - ql_wait_reg_rdy(qdev, - MAC_ADDR_IDX, MAC_ADDR_MW, 0); - if (status) - goto exit; - ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ - (index << MAC_ADDR_IDX_SHIFT) | /* index */ - type); /* type */ - ql_write32(qdev, MAC_ADDR_DATA, lower); - status = - ql_wait_reg_rdy(qdev, - MAC_ADDR_IDX, MAC_ADDR_MW, 0); - if (status) - goto exit; - ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ - (index << MAC_ADDR_IDX_SHIFT) | /* index */ - type); /* type */ - ql_write32(qdev, MAC_ADDR_DATA, upper); - status = - ql_wait_reg_rdy(qdev, - MAC_ADDR_IDX, MAC_ADDR_MW, 0); - if (status) - goto exit; - ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */ - (index << MAC_ADDR_IDX_SHIFT) | /* index */ - type); /* type */ - /* This field should also include the queue id - and possibly the function id. Right now we hardcode - the route field to NIC core. - */ - cam_output = (CAM_OUT_ROUTE_NIC | - (qdev-> - func << CAM_OUT_FUNC_SHIFT) | - (0 << CAM_OUT_CQ_ID_SHIFT)); - if (qdev->ndev->features & NETIF_F_HW_VLAN_RX) - cam_output |= CAM_OUT_RV; - /* route to NIC core */ - ql_write32(qdev, MAC_ADDR_DATA, cam_output); - break; - } - case MAC_ADDR_TYPE_VLAN: - { - u32 enable_bit = *((u32 *) &addr[0]); - /* For VLAN, the addr actually holds a bit that - * either enables or disables the vlan id we are - * addressing. It's either MAC_ADDR_E on or off. - * That's bit-27 we're talking about. - */ - netif_info(qdev, ifup, qdev->ndev, - "%s VLAN ID %d %s the CAM.\n", - enable_bit ? "Adding" : "Removing", - index, - enable_bit ? "to" : "from"); - - status = - ql_wait_reg_rdy(qdev, - MAC_ADDR_IDX, MAC_ADDR_MW, 0); - if (status) - goto exit; - ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */ - (index << MAC_ADDR_IDX_SHIFT) | /* index */ - type | /* type */ - enable_bit); /* enable/disable */ - break; - } - case MAC_ADDR_TYPE_MULTI_FLTR: - default: - netif_crit(qdev, ifup, qdev->ndev, - "Address type %d not yet supported.\n", type); - status = -EPERM; - } -exit: - return status; -} - -/* Set or clear MAC address in hardware. We sometimes - * have to clear it to prevent wrong frame routing - * especially in a bonding environment. - */ -static int ql_set_mac_addr(struct ql_adapter *qdev, int set) -{ - int status; - char zero_mac_addr[ETH_ALEN]; - char *addr; - - if (set) { - addr = &qdev->current_mac_addr[0]; - netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, - "Set Mac addr %pM\n", addr); - } else { - memset(zero_mac_addr, 0, ETH_ALEN); - addr = &zero_mac_addr[0]; - netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, - "Clearing MAC address\n"); - } - status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); - if (status) - return status; - status = ql_set_mac_addr_reg(qdev, (u8 *) addr, - MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ); - ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); - if (status) - netif_err(qdev, ifup, qdev->ndev, - "Failed to init mac address.\n"); - return status; -} - -void ql_link_on(struct ql_adapter *qdev) -{ - netif_err(qdev, link, qdev->ndev, "Link is up.\n"); - netif_carrier_on(qdev->ndev); - ql_set_mac_addr(qdev, 1); -} - -void ql_link_off(struct ql_adapter *qdev) -{ - netif_err(qdev, link, qdev->ndev, "Link is down.\n"); - netif_carrier_off(qdev->ndev); - ql_set_mac_addr(qdev, 0); -} - -/* Get a specific frame routing value from the CAM. - * Used for debug and reg dump. - */ -int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value) -{ - int status = 0; - - status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0); - if (status) - goto exit; - - ql_write32(qdev, RT_IDX, - RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT)); - status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0); - if (status) - goto exit; - *value = ql_read32(qdev, RT_DATA); -exit: - return status; -} - -/* The NIC function for this chip has 16 routing indexes. Each one can be used - * to route different frame types to various inbound queues. We send broadcast/ - * multicast/error frames to the default queue for slow handling, - * and CAM hit/RSS frames to the fast handling queues. - */ -static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask, - int enable) -{ - int status = -EINVAL; /* Return error if no mask match. */ - u32 value = 0; - - netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, - "%s %s mask %s the routing reg.\n", - enable ? "Adding" : "Removing", - index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" : - index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" : - index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" : - index == RT_IDX_BCAST_SLOT ? "BROADCAST" : - index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" : - index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" : - index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" : - index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" : - index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" : - index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" : - index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" : - index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" : - index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" : - index == RT_IDX_UNUSED013 ? "UNUSED13" : - index == RT_IDX_UNUSED014 ? "UNUSED14" : - index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" : - "(Bad index != RT_IDX)", - enable ? "to" : "from"); - - switch (mask) { - case RT_IDX_CAM_HIT: - { - value = RT_IDX_DST_CAM_Q | /* dest */ - RT_IDX_TYPE_NICQ | /* type */ - (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */ - break; - } - case RT_IDX_VALID: /* Promiscuous Mode frames. */ - { - value = RT_IDX_DST_DFLT_Q | /* dest */ - RT_IDX_TYPE_NICQ | /* type */ - (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */ - break; - } - case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */ - { - value = RT_IDX_DST_DFLT_Q | /* dest */ - RT_IDX_TYPE_NICQ | /* type */ - (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */ - break; - } - case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */ - { - value = RT_IDX_DST_DFLT_Q | /* dest */ - RT_IDX_TYPE_NICQ | /* type */ - (RT_IDX_IP_CSUM_ERR_SLOT << - RT_IDX_IDX_SHIFT); /* index */ - break; - } - case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */ - { - value = RT_IDX_DST_DFLT_Q | /* dest */ - RT_IDX_TYPE_NICQ | /* type */ - (RT_IDX_TCP_UDP_CSUM_ERR_SLOT << - RT_IDX_IDX_SHIFT); /* index */ - break; - } - case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */ - { - value = RT_IDX_DST_DFLT_Q | /* dest */ - RT_IDX_TYPE_NICQ | /* type */ - (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */ - break; - } - case RT_IDX_MCAST: /* Pass up All Multicast frames. */ - { - value = RT_IDX_DST_DFLT_Q | /* dest */ - RT_IDX_TYPE_NICQ | /* type */ - (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */ - break; - } - case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */ - { - value = RT_IDX_DST_DFLT_Q | /* dest */ - RT_IDX_TYPE_NICQ | /* type */ - (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */ - break; - } - case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */ - { - value = RT_IDX_DST_RSS | /* dest */ - RT_IDX_TYPE_NICQ | /* type */ - (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */ - break; - } - case 0: /* Clear the E-bit on an entry. */ - { - value = RT_IDX_DST_DFLT_Q | /* dest */ - RT_IDX_TYPE_NICQ | /* type */ - (index << RT_IDX_IDX_SHIFT);/* index */ - break; - } - default: - netif_err(qdev, ifup, qdev->ndev, - "Mask type %d not yet supported.\n", mask); - status = -EPERM; - goto exit; - } - - if (value) { - status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0); - if (status) - goto exit; - value |= (enable ? RT_IDX_E : 0); - ql_write32(qdev, RT_IDX, value); - ql_write32(qdev, RT_DATA, enable ? mask : 0); - } -exit: - return status; -} - -static void ql_enable_interrupts(struct ql_adapter *qdev) -{ - ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI); -} - -static void ql_disable_interrupts(struct ql_adapter *qdev) -{ - ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16)); -} - -/* If we're running with multiple MSI-X vectors then we enable on the fly. - * Otherwise, we may have multiple outstanding workers and don't want to - * enable until the last one finishes. In this case, the irq_cnt gets - * incremented every time we queue a worker and decremented every time - * a worker finishes. Once it hits zero we enable the interrupt. - */ -u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr) -{ - u32 var = 0; - unsigned long hw_flags = 0; - struct intr_context *ctx = qdev->intr_context + intr; - - if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) { - /* Always enable if we're MSIX multi interrupts and - * it's not the default (zeroeth) interrupt. - */ - ql_write32(qdev, INTR_EN, - ctx->intr_en_mask); - var = ql_read32(qdev, STS); - return var; - } - - spin_lock_irqsave(&qdev->hw_lock, hw_flags); - if (atomic_dec_and_test(&ctx->irq_cnt)) { - ql_write32(qdev, INTR_EN, - ctx->intr_en_mask); - var = ql_read32(qdev, STS); - } - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - return var; -} - -static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr) -{ - u32 var = 0; - struct intr_context *ctx; - - /* HW disables for us if we're MSIX multi interrupts and - * it's not the default (zeroeth) interrupt. - */ - if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) - return 0; - - ctx = qdev->intr_context + intr; - spin_lock(&qdev->hw_lock); - if (!atomic_read(&ctx->irq_cnt)) { - ql_write32(qdev, INTR_EN, - ctx->intr_dis_mask); - var = ql_read32(qdev, STS); - } - atomic_inc(&ctx->irq_cnt); - spin_unlock(&qdev->hw_lock); - return var; -} - -static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev) -{ - int i; - for (i = 0; i < qdev->intr_count; i++) { - /* The enable call does a atomic_dec_and_test - * and enables only if the result is zero. - * So we precharge it here. - */ - if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) || - i == 0)) - atomic_set(&qdev->intr_context[i].irq_cnt, 1); - ql_enable_completion_interrupt(qdev, i); - } - -} - -static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str) -{ - int status, i; - u16 csum = 0; - __le16 *flash = (__le16 *)&qdev->flash; - - status = strncmp((char *)&qdev->flash, str, 4); - if (status) { - netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n"); - return status; - } - - for (i = 0; i < size; i++) - csum += le16_to_cpu(*flash++); - - if (csum) - netif_err(qdev, ifup, qdev->ndev, - "Invalid flash checksum, csum = 0x%.04x.\n", csum); - - return csum; -} - -static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data) -{ - int status = 0; - /* wait for reg to come ready */ - status = ql_wait_reg_rdy(qdev, - FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR); - if (status) - goto exit; - /* set up for reg read */ - ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset); - /* wait for reg to come ready */ - status = ql_wait_reg_rdy(qdev, - FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR); - if (status) - goto exit; - /* This data is stored on flash as an array of - * __le32. Since ql_read32() returns cpu endian - * we need to swap it back. - */ - *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA)); -exit: - return status; -} - -static int ql_get_8000_flash_params(struct ql_adapter *qdev) -{ - u32 i, size; - int status; - __le32 *p = (__le32 *)&qdev->flash; - u32 offset; - u8 mac_addr[6]; - - /* Get flash offset for function and adjust - * for dword access. - */ - if (!qdev->port) - offset = FUNC0_FLASH_OFFSET / sizeof(u32); - else - offset = FUNC1_FLASH_OFFSET / sizeof(u32); - - if (ql_sem_spinlock(qdev, SEM_FLASH_MASK)) - return -ETIMEDOUT; - - size = sizeof(struct flash_params_8000) / sizeof(u32); - for (i = 0; i < size; i++, p++) { - status = ql_read_flash_word(qdev, i+offset, p); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Error reading flash.\n"); - goto exit; - } - } - - status = ql_validate_flash(qdev, - sizeof(struct flash_params_8000) / sizeof(u16), - "8000"); - if (status) { - netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n"); - status = -EINVAL; - goto exit; - } - - /* Extract either manufacturer or BOFM modified - * MAC address. - */ - if (qdev->flash.flash_params_8000.data_type1 == 2) - memcpy(mac_addr, - qdev->flash.flash_params_8000.mac_addr1, - qdev->ndev->addr_len); - else - memcpy(mac_addr, - qdev->flash.flash_params_8000.mac_addr, - qdev->ndev->addr_len); - - if (!is_valid_ether_addr(mac_addr)) { - netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n"); - status = -EINVAL; - goto exit; - } - - memcpy(qdev->ndev->dev_addr, - mac_addr, - qdev->ndev->addr_len); - -exit: - ql_sem_unlock(qdev, SEM_FLASH_MASK); - return status; -} - -static int ql_get_8012_flash_params(struct ql_adapter *qdev) -{ - int i; - int status; - __le32 *p = (__le32 *)&qdev->flash; - u32 offset = 0; - u32 size = sizeof(struct flash_params_8012) / sizeof(u32); - - /* Second function's parameters follow the first - * function's. - */ - if (qdev->port) - offset = size; - - if (ql_sem_spinlock(qdev, SEM_FLASH_MASK)) - return -ETIMEDOUT; - - for (i = 0; i < size; i++, p++) { - status = ql_read_flash_word(qdev, i+offset, p); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Error reading flash.\n"); - goto exit; - } - - } - - status = ql_validate_flash(qdev, - sizeof(struct flash_params_8012) / sizeof(u16), - "8012"); - if (status) { - netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n"); - status = -EINVAL; - goto exit; - } - - if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) { - status = -EINVAL; - goto exit; - } - - memcpy(qdev->ndev->dev_addr, - qdev->flash.flash_params_8012.mac_addr, - qdev->ndev->addr_len); - -exit: - ql_sem_unlock(qdev, SEM_FLASH_MASK); - return status; -} - -/* xgmac register are located behind the xgmac_addr and xgmac_data - * register pair. Each read/write requires us to wait for the ready - * bit before reading/writing the data. - */ -static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data) -{ - int status; - /* wait for reg to come ready */ - status = ql_wait_reg_rdy(qdev, - XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME); - if (status) - return status; - /* write the data to the data reg */ - ql_write32(qdev, XGMAC_DATA, data); - /* trigger the write */ - ql_write32(qdev, XGMAC_ADDR, reg); - return status; -} - -/* xgmac register are located behind the xgmac_addr and xgmac_data - * register pair. Each read/write requires us to wait for the ready - * bit before reading/writing the data. - */ -int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data) -{ - int status = 0; - /* wait for reg to come ready */ - status = ql_wait_reg_rdy(qdev, - XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME); - if (status) - goto exit; - /* set up for reg read */ - ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R); - /* wait for reg to come ready */ - status = ql_wait_reg_rdy(qdev, - XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME); - if (status) - goto exit; - /* get the data */ - *data = ql_read32(qdev, XGMAC_DATA); -exit: - return status; -} - -/* This is used for reading the 64-bit statistics regs. */ -int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data) -{ - int status = 0; - u32 hi = 0; - u32 lo = 0; - - status = ql_read_xgmac_reg(qdev, reg, &lo); - if (status) - goto exit; - - status = ql_read_xgmac_reg(qdev, reg + 4, &hi); - if (status) - goto exit; - - *data = (u64) lo | ((u64) hi << 32); - -exit: - return status; -} - -static int ql_8000_port_initialize(struct ql_adapter *qdev) -{ - int status; - /* - * Get MPI firmware version for driver banner - * and ethool info. - */ - status = ql_mb_about_fw(qdev); - if (status) - goto exit; - status = ql_mb_get_fw_state(qdev); - if (status) - goto exit; - /* Wake up a worker to get/set the TX/RX frame sizes. */ - queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0); -exit: - return status; -} - -/* Take the MAC Core out of reset. - * Enable statistics counting. - * Take the transmitter/receiver out of reset. - * This functionality may be done in the MPI firmware at a - * later date. - */ -static int ql_8012_port_initialize(struct ql_adapter *qdev) -{ - int status = 0; - u32 data; - - if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) { - /* Another function has the semaphore, so - * wait for the port init bit to come ready. - */ - netif_info(qdev, link, qdev->ndev, - "Another function has the semaphore, so wait for the port init bit to come ready.\n"); - status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0); - if (status) { - netif_crit(qdev, link, qdev->ndev, - "Port initialize timed out.\n"); - } - return status; - } - - netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n"); - /* Set the core reset. */ - status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data); - if (status) - goto end; - data |= GLOBAL_CFG_RESET; - status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data); - if (status) - goto end; - - /* Clear the core reset and turn on jumbo for receiver. */ - data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */ - data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */ - data |= GLOBAL_CFG_TX_STAT_EN; - data |= GLOBAL_CFG_RX_STAT_EN; - status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data); - if (status) - goto end; - - /* Enable transmitter, and clear it's reset. */ - status = ql_read_xgmac_reg(qdev, TX_CFG, &data); - if (status) - goto end; - data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */ - data |= TX_CFG_EN; /* Enable the transmitter. */ - status = ql_write_xgmac_reg(qdev, TX_CFG, data); - if (status) - goto end; - - /* Enable receiver and clear it's reset. */ - status = ql_read_xgmac_reg(qdev, RX_CFG, &data); - if (status) - goto end; - data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */ - data |= RX_CFG_EN; /* Enable the receiver. */ - status = ql_write_xgmac_reg(qdev, RX_CFG, data); - if (status) - goto end; - - /* Turn on jumbo. */ - status = - ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16)); - if (status) - goto end; - status = - ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580); - if (status) - goto end; - - /* Signal to the world that the port is enabled. */ - ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init)); -end: - ql_sem_unlock(qdev, qdev->xg_sem_mask); - return status; -} - -static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev) -{ - return PAGE_SIZE << qdev->lbq_buf_order; -} - -/* Get the next large buffer. */ -static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring) -{ - struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx]; - rx_ring->lbq_curr_idx++; - if (rx_ring->lbq_curr_idx == rx_ring->lbq_len) - rx_ring->lbq_curr_idx = 0; - rx_ring->lbq_free_cnt++; - return lbq_desc; -} - -static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev, - struct rx_ring *rx_ring) -{ - struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring); - - pci_dma_sync_single_for_cpu(qdev->pdev, - dma_unmap_addr(lbq_desc, mapaddr), - rx_ring->lbq_buf_size, - PCI_DMA_FROMDEVICE); - - /* If it's the last chunk of our master page then - * we unmap it. - */ - if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size) - == ql_lbq_block_size(qdev)) - pci_unmap_page(qdev->pdev, - lbq_desc->p.pg_chunk.map, - ql_lbq_block_size(qdev), - PCI_DMA_FROMDEVICE); - return lbq_desc; -} - -/* Get the next small buffer. */ -static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring) -{ - struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx]; - rx_ring->sbq_curr_idx++; - if (rx_ring->sbq_curr_idx == rx_ring->sbq_len) - rx_ring->sbq_curr_idx = 0; - rx_ring->sbq_free_cnt++; - return sbq_desc; -} - -/* Update an rx ring index. */ -static void ql_update_cq(struct rx_ring *rx_ring) -{ - rx_ring->cnsmr_idx++; - rx_ring->curr_entry++; - if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) { - rx_ring->cnsmr_idx = 0; - rx_ring->curr_entry = rx_ring->cq_base; - } -} - -static void ql_write_cq_idx(struct rx_ring *rx_ring) -{ - ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg); -} - -static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring, - struct bq_desc *lbq_desc) -{ - if (!rx_ring->pg_chunk.page) { - u64 map; - rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP | - GFP_ATOMIC, - qdev->lbq_buf_order); - if (unlikely(!rx_ring->pg_chunk.page)) { - netif_err(qdev, drv, qdev->ndev, - "page allocation failed.\n"); - return -ENOMEM; - } - rx_ring->pg_chunk.offset = 0; - map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page, - 0, ql_lbq_block_size(qdev), - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(qdev->pdev, map)) { - __free_pages(rx_ring->pg_chunk.page, - qdev->lbq_buf_order); - netif_err(qdev, drv, qdev->ndev, - "PCI mapping failed.\n"); - return -ENOMEM; - } - rx_ring->pg_chunk.map = map; - rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page); - } - - /* Copy the current master pg_chunk info - * to the current descriptor. - */ - lbq_desc->p.pg_chunk = rx_ring->pg_chunk; - - /* Adjust the master page chunk for next - * buffer get. - */ - rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size; - if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) { - rx_ring->pg_chunk.page = NULL; - lbq_desc->p.pg_chunk.last_flag = 1; - } else { - rx_ring->pg_chunk.va += rx_ring->lbq_buf_size; - get_page(rx_ring->pg_chunk.page); - lbq_desc->p.pg_chunk.last_flag = 0; - } - return 0; -} -/* Process (refill) a large buffer queue. */ -static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) -{ - u32 clean_idx = rx_ring->lbq_clean_idx; - u32 start_idx = clean_idx; - struct bq_desc *lbq_desc; - u64 map; - int i; - - while (rx_ring->lbq_free_cnt > 32) { - for (i = 0; i < 16; i++) { - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "lbq: try cleaning clean_idx = %d.\n", - clean_idx); - lbq_desc = &rx_ring->lbq[clean_idx]; - if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) { - netif_err(qdev, ifup, qdev->ndev, - "Could not get a page chunk.\n"); - return; - } - - map = lbq_desc->p.pg_chunk.map + - lbq_desc->p.pg_chunk.offset; - dma_unmap_addr_set(lbq_desc, mapaddr, map); - dma_unmap_len_set(lbq_desc, maplen, - rx_ring->lbq_buf_size); - *lbq_desc->addr = cpu_to_le64(map); - - pci_dma_sync_single_for_device(qdev->pdev, map, - rx_ring->lbq_buf_size, - PCI_DMA_FROMDEVICE); - clean_idx++; - if (clean_idx == rx_ring->lbq_len) - clean_idx = 0; - } - - rx_ring->lbq_clean_idx = clean_idx; - rx_ring->lbq_prod_idx += 16; - if (rx_ring->lbq_prod_idx == rx_ring->lbq_len) - rx_ring->lbq_prod_idx = 0; - rx_ring->lbq_free_cnt -= 16; - } - - if (start_idx != clean_idx) { - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "lbq: updating prod idx = %d.\n", - rx_ring->lbq_prod_idx); - ql_write_db_reg(rx_ring->lbq_prod_idx, - rx_ring->lbq_prod_idx_db_reg); - } -} - -/* Process (refill) a small buffer queue. */ -static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) -{ - u32 clean_idx = rx_ring->sbq_clean_idx; - u32 start_idx = clean_idx; - struct bq_desc *sbq_desc; - u64 map; - int i; - - while (rx_ring->sbq_free_cnt > 16) { - for (i = 0; i < 16; i++) { - sbq_desc = &rx_ring->sbq[clean_idx]; - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "sbq: try cleaning clean_idx = %d.\n", - clean_idx); - if (sbq_desc->p.skb == NULL) { - netif_printk(qdev, rx_status, KERN_DEBUG, - qdev->ndev, - "sbq: getting new skb for index %d.\n", - sbq_desc->index); - sbq_desc->p.skb = - netdev_alloc_skb(qdev->ndev, - SMALL_BUFFER_SIZE); - if (sbq_desc->p.skb == NULL) { - netif_err(qdev, probe, qdev->ndev, - "Couldn't get an skb.\n"); - rx_ring->sbq_clean_idx = clean_idx; - return; - } - skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD); - map = pci_map_single(qdev->pdev, - sbq_desc->p.skb->data, - rx_ring->sbq_buf_size, - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(qdev->pdev, map)) { - netif_err(qdev, ifup, qdev->ndev, - "PCI mapping failed.\n"); - rx_ring->sbq_clean_idx = clean_idx; - dev_kfree_skb_any(sbq_desc->p.skb); - sbq_desc->p.skb = NULL; - return; - } - dma_unmap_addr_set(sbq_desc, mapaddr, map); - dma_unmap_len_set(sbq_desc, maplen, - rx_ring->sbq_buf_size); - *sbq_desc->addr = cpu_to_le64(map); - } - - clean_idx++; - if (clean_idx == rx_ring->sbq_len) - clean_idx = 0; - } - rx_ring->sbq_clean_idx = clean_idx; - rx_ring->sbq_prod_idx += 16; - if (rx_ring->sbq_prod_idx == rx_ring->sbq_len) - rx_ring->sbq_prod_idx = 0; - rx_ring->sbq_free_cnt -= 16; - } - - if (start_idx != clean_idx) { - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "sbq: updating prod idx = %d.\n", - rx_ring->sbq_prod_idx); - ql_write_db_reg(rx_ring->sbq_prod_idx, - rx_ring->sbq_prod_idx_db_reg); - } -} - -static void ql_update_buffer_queues(struct ql_adapter *qdev, - struct rx_ring *rx_ring) -{ - ql_update_sbq(qdev, rx_ring); - ql_update_lbq(qdev, rx_ring); -} - -/* Unmaps tx buffers. Can be called from send() if a pci mapping - * fails at some stage, or from the interrupt when a tx completes. - */ -static void ql_unmap_send(struct ql_adapter *qdev, - struct tx_ring_desc *tx_ring_desc, int mapped) -{ - int i; - for (i = 0; i < mapped; i++) { - if (i == 0 || (i == 7 && mapped > 7)) { - /* - * Unmap the skb->data area, or the - * external sglist (AKA the Outbound - * Address List (OAL)). - * If its the zeroeth element, then it's - * the skb->data area. If it's the 7th - * element and there is more than 6 frags, - * then its an OAL. - */ - if (i == 7) { - netif_printk(qdev, tx_done, KERN_DEBUG, - qdev->ndev, - "unmapping OAL area.\n"); - } - pci_unmap_single(qdev->pdev, - dma_unmap_addr(&tx_ring_desc->map[i], - mapaddr), - dma_unmap_len(&tx_ring_desc->map[i], - maplen), - PCI_DMA_TODEVICE); - } else { - netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev, - "unmapping frag %d.\n", i); - pci_unmap_page(qdev->pdev, - dma_unmap_addr(&tx_ring_desc->map[i], - mapaddr), - dma_unmap_len(&tx_ring_desc->map[i], - maplen), PCI_DMA_TODEVICE); - } - } - -} - -/* Map the buffers for this transmit. This will return - * NETDEV_TX_BUSY or NETDEV_TX_OK based on success. - */ -static int ql_map_send(struct ql_adapter *qdev, - struct ob_mac_iocb_req *mac_iocb_ptr, - struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc) -{ - int len = skb_headlen(skb); - dma_addr_t map; - int frag_idx, err, map_idx = 0; - struct tx_buf_desc *tbd = mac_iocb_ptr->tbd; - int frag_cnt = skb_shinfo(skb)->nr_frags; - - if (frag_cnt) { - netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev, - "frag_cnt = %d.\n", frag_cnt); - } - /* - * Map the skb buffer first. - */ - map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); - - err = pci_dma_mapping_error(qdev->pdev, map); - if (err) { - netif_err(qdev, tx_queued, qdev->ndev, - "PCI mapping failed with error: %d\n", err); - - return NETDEV_TX_BUSY; - } - - tbd->len = cpu_to_le32(len); - tbd->addr = cpu_to_le64(map); - dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map); - dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len); - map_idx++; - - /* - * This loop fills the remainder of the 8 address descriptors - * in the IOCB. If there are more than 7 fragments, then the - * eighth address desc will point to an external list (OAL). - * When this happens, the remainder of the frags will be stored - * in this list. - */ - for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx]; - tbd++; - if (frag_idx == 6 && frag_cnt > 7) { - /* Let's tack on an sglist. - * Our control block will now - * look like this: - * iocb->seg[0] = skb->data - * iocb->seg[1] = frag[0] - * iocb->seg[2] = frag[1] - * iocb->seg[3] = frag[2] - * iocb->seg[4] = frag[3] - * iocb->seg[5] = frag[4] - * iocb->seg[6] = frag[5] - * iocb->seg[7] = ptr to OAL (external sglist) - * oal->seg[0] = frag[6] - * oal->seg[1] = frag[7] - * oal->seg[2] = frag[8] - * oal->seg[3] = frag[9] - * oal->seg[4] = frag[10] - * etc... - */ - /* Tack on the OAL in the eighth segment of IOCB. */ - map = pci_map_single(qdev->pdev, &tx_ring_desc->oal, - sizeof(struct oal), - PCI_DMA_TODEVICE); - err = pci_dma_mapping_error(qdev->pdev, map); - if (err) { - netif_err(qdev, tx_queued, qdev->ndev, - "PCI mapping outbound address list with error: %d\n", - err); - goto map_error; - } - - tbd->addr = cpu_to_le64(map); - /* - * The length is the number of fragments - * that remain to be mapped times the length - * of our sglist (OAL). - */ - tbd->len = - cpu_to_le32((sizeof(struct tx_buf_desc) * - (frag_cnt - frag_idx)) | TX_DESC_C); - dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, - map); - dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, - sizeof(struct oal)); - tbd = (struct tx_buf_desc *)&tx_ring_desc->oal; - map_idx++; - } - - map = - pci_map_page(qdev->pdev, frag->page, - frag->page_offset, frag->size, - PCI_DMA_TODEVICE); - - err = pci_dma_mapping_error(qdev->pdev, map); - if (err) { - netif_err(qdev, tx_queued, qdev->ndev, - "PCI mapping frags failed with error: %d.\n", - err); - goto map_error; - } - - tbd->addr = cpu_to_le64(map); - tbd->len = cpu_to_le32(frag->size); - dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map); - dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, - frag->size); - - } - /* Save the number of segments we've mapped. */ - tx_ring_desc->map_cnt = map_idx; - /* Terminate the last segment. */ - tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E); - return NETDEV_TX_OK; - -map_error: - /* - * If the first frag mapping failed, then i will be zero. - * This causes the unmap of the skb->data area. Otherwise - * we pass in the number of frags that mapped successfully - * so they can be umapped. - */ - ql_unmap_send(qdev, tx_ring_desc, map_idx); - return NETDEV_TX_BUSY; -} - -/* Process an inbound completion from an rx ring. */ -static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev, - struct rx_ring *rx_ring, - struct ib_mac_iocb_rsp *ib_mac_rsp, - u32 length, - u16 vlan_id) -{ - struct sk_buff *skb; - struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); - struct skb_frag_struct *rx_frag; - int nr_frags; - struct napi_struct *napi = &rx_ring->napi; - - napi->dev = qdev->ndev; - - skb = napi_get_frags(napi); - if (!skb) { - netif_err(qdev, drv, qdev->ndev, - "Couldn't get an skb, exiting.\n"); - rx_ring->rx_dropped++; - put_page(lbq_desc->p.pg_chunk.page); - return; - } - prefetch(lbq_desc->p.pg_chunk.va); - rx_frag = skb_shinfo(skb)->frags; - nr_frags = skb_shinfo(skb)->nr_frags; - rx_frag += nr_frags; - rx_frag->page = lbq_desc->p.pg_chunk.page; - rx_frag->page_offset = lbq_desc->p.pg_chunk.offset; - rx_frag->size = length; - - skb->len += length; - skb->data_len += length; - skb->truesize += length; - skb_shinfo(skb)->nr_frags++; - - rx_ring->rx_packets++; - rx_ring->rx_bytes += length; - skb->ip_summed = CHECKSUM_UNNECESSARY; - skb_record_rx_queue(skb, rx_ring->cq_id); - if (vlan_id != 0xffff) - __vlan_hwaccel_put_tag(skb, vlan_id); - napi_gro_frags(napi); -} - -/* Process an inbound completion from an rx ring. */ -static void ql_process_mac_rx_page(struct ql_adapter *qdev, - struct rx_ring *rx_ring, - struct ib_mac_iocb_rsp *ib_mac_rsp, - u32 length, - u16 vlan_id) -{ - struct net_device *ndev = qdev->ndev; - struct sk_buff *skb = NULL; - void *addr; - struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); - struct napi_struct *napi = &rx_ring->napi; - - skb = netdev_alloc_skb(ndev, length); - if (!skb) { - netif_err(qdev, drv, qdev->ndev, - "Couldn't get an skb, need to unwind!.\n"); - rx_ring->rx_dropped++; - put_page(lbq_desc->p.pg_chunk.page); - return; - } - - addr = lbq_desc->p.pg_chunk.va; - prefetch(addr); - - - /* Frame error, so drop the packet. */ - if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { - netif_info(qdev, drv, qdev->ndev, - "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2); - rx_ring->rx_errors++; - goto err_out; - } - - /* The max framesize filter on this chip is set higher than - * MTU since FCoE uses 2k frames. - */ - if (skb->len > ndev->mtu + ETH_HLEN) { - netif_err(qdev, drv, qdev->ndev, - "Segment too small, dropping.\n"); - rx_ring->rx_dropped++; - goto err_out; - } - memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN); - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", - length); - skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page, - lbq_desc->p.pg_chunk.offset+ETH_HLEN, - length-ETH_HLEN); - skb->len += length-ETH_HLEN; - skb->data_len += length-ETH_HLEN; - skb->truesize += length-ETH_HLEN; - - rx_ring->rx_packets++; - rx_ring->rx_bytes += skb->len; - skb->protocol = eth_type_trans(skb, ndev); - skb_checksum_none_assert(skb); - - if ((ndev->features & NETIF_F_RXCSUM) && - !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { - /* TCP frame. */ - if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "TCP checksum done!\n"); - skb->ip_summed = CHECKSUM_UNNECESSARY; - } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && - (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { - /* Unfragmented ipv4 UDP frame. */ - struct iphdr *iph = (struct iphdr *) skb->data; - if (!(iph->frag_off & - cpu_to_be16(IP_MF|IP_OFFSET))) { - skb->ip_summed = CHECKSUM_UNNECESSARY; - netif_printk(qdev, rx_status, KERN_DEBUG, - qdev->ndev, - "TCP checksum done!\n"); - } - } - } - - skb_record_rx_queue(skb, rx_ring->cq_id); - if (vlan_id != 0xffff) - __vlan_hwaccel_put_tag(skb, vlan_id); - if (skb->ip_summed == CHECKSUM_UNNECESSARY) - napi_gro_receive(napi, skb); - else - netif_receive_skb(skb); - return; -err_out: - dev_kfree_skb_any(skb); - put_page(lbq_desc->p.pg_chunk.page); -} - -/* Process an inbound completion from an rx ring. */ -static void ql_process_mac_rx_skb(struct ql_adapter *qdev, - struct rx_ring *rx_ring, - struct ib_mac_iocb_rsp *ib_mac_rsp, - u32 length, - u16 vlan_id) -{ - struct net_device *ndev = qdev->ndev; - struct sk_buff *skb = NULL; - struct sk_buff *new_skb = NULL; - struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring); - - skb = sbq_desc->p.skb; - /* Allocate new_skb and copy */ - new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN); - if (new_skb == NULL) { - netif_err(qdev, probe, qdev->ndev, - "No skb available, drop the packet.\n"); - rx_ring->rx_dropped++; - return; - } - skb_reserve(new_skb, NET_IP_ALIGN); - memcpy(skb_put(new_skb, length), skb->data, length); - skb = new_skb; - - /* Frame error, so drop the packet. */ - if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { - netif_info(qdev, drv, qdev->ndev, - "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2); - dev_kfree_skb_any(skb); - rx_ring->rx_errors++; - return; - } - - /* loopback self test for ethtool */ - if (test_bit(QL_SELFTEST, &qdev->flags)) { - ql_check_lb_frame(qdev, skb); - dev_kfree_skb_any(skb); - return; - } - - /* The max framesize filter on this chip is set higher than - * MTU since FCoE uses 2k frames. - */ - if (skb->len > ndev->mtu + ETH_HLEN) { - dev_kfree_skb_any(skb); - rx_ring->rx_dropped++; - return; - } - - prefetch(skb->data); - skb->dev = ndev; - if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) { - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "%s Multicast.\n", - (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == - IB_MAC_IOCB_RSP_M_HASH ? "Hash" : - (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == - IB_MAC_IOCB_RSP_M_REG ? "Registered" : - (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == - IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : ""); - } - if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "Promiscuous Packet.\n"); - - rx_ring->rx_packets++; - rx_ring->rx_bytes += skb->len; - skb->protocol = eth_type_trans(skb, ndev); - skb_checksum_none_assert(skb); - - /* If rx checksum is on, and there are no - * csum or frame errors. - */ - if ((ndev->features & NETIF_F_RXCSUM) && - !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { - /* TCP frame. */ - if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "TCP checksum done!\n"); - skb->ip_summed = CHECKSUM_UNNECESSARY; - } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && - (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { - /* Unfragmented ipv4 UDP frame. */ - struct iphdr *iph = (struct iphdr *) skb->data; - if (!(iph->frag_off & - ntohs(IP_MF|IP_OFFSET))) { - skb->ip_summed = CHECKSUM_UNNECESSARY; - netif_printk(qdev, rx_status, KERN_DEBUG, - qdev->ndev, - "TCP checksum done!\n"); - } - } - } - - skb_record_rx_queue(skb, rx_ring->cq_id); - if (vlan_id != 0xffff) - __vlan_hwaccel_put_tag(skb, vlan_id); - if (skb->ip_summed == CHECKSUM_UNNECESSARY) - napi_gro_receive(&rx_ring->napi, skb); - else - netif_receive_skb(skb); -} - -static void ql_realign_skb(struct sk_buff *skb, int len) -{ - void *temp_addr = skb->data; - - /* Undo the skb_reserve(skb,32) we did before - * giving to hardware, and realign data on - * a 2-byte boundary. - */ - skb->data -= QLGE_SB_PAD - NET_IP_ALIGN; - skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN; - skb_copy_to_linear_data(skb, temp_addr, - (unsigned int)len); -} - -/* - * This function builds an skb for the given inbound - * completion. It will be rewritten for readability in the near - * future, but for not it works well. - */ -static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, - struct rx_ring *rx_ring, - struct ib_mac_iocb_rsp *ib_mac_rsp) -{ - struct bq_desc *lbq_desc; - struct bq_desc *sbq_desc; - struct sk_buff *skb = NULL; - u32 length = le32_to_cpu(ib_mac_rsp->data_len); - u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len); - - /* - * Handle the header buffer if present. - */ - if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV && - ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) { - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "Header of %d bytes in small buffer.\n", hdr_len); - /* - * Headers fit nicely into a small buffer. - */ - sbq_desc = ql_get_curr_sbuf(rx_ring); - pci_unmap_single(qdev->pdev, - dma_unmap_addr(sbq_desc, mapaddr), - dma_unmap_len(sbq_desc, maplen), - PCI_DMA_FROMDEVICE); - skb = sbq_desc->p.skb; - ql_realign_skb(skb, hdr_len); - skb_put(skb, hdr_len); - sbq_desc->p.skb = NULL; - } - - /* - * Handle the data buffer(s). - */ - if (unlikely(!length)) { /* Is there data too? */ - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "No Data buffer in this packet.\n"); - return skb; - } - - if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) { - if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) { - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "Headers in small, data of %d bytes in small, combine them.\n", - length); - /* - * Data is less than small buffer size so it's - * stuffed in a small buffer. - * For this case we append the data - * from the "data" small buffer to the "header" small - * buffer. - */ - sbq_desc = ql_get_curr_sbuf(rx_ring); - pci_dma_sync_single_for_cpu(qdev->pdev, - dma_unmap_addr - (sbq_desc, mapaddr), - dma_unmap_len - (sbq_desc, maplen), - PCI_DMA_FROMDEVICE); - memcpy(skb_put(skb, length), - sbq_desc->p.skb->data, length); - pci_dma_sync_single_for_device(qdev->pdev, - dma_unmap_addr - (sbq_desc, - mapaddr), - dma_unmap_len - (sbq_desc, - maplen), - PCI_DMA_FROMDEVICE); - } else { - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "%d bytes in a single small buffer.\n", - length); - sbq_desc = ql_get_curr_sbuf(rx_ring); - skb = sbq_desc->p.skb; - ql_realign_skb(skb, length); - skb_put(skb, length); - pci_unmap_single(qdev->pdev, - dma_unmap_addr(sbq_desc, - mapaddr), - dma_unmap_len(sbq_desc, - maplen), - PCI_DMA_FROMDEVICE); - sbq_desc->p.skb = NULL; - } - } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) { - if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) { - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "Header in small, %d bytes in large. Chain large to small!\n", - length); - /* - * The data is in a single large buffer. We - * chain it to the header buffer's skb and let - * it rip. - */ - lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "Chaining page at offset = %d, for %d bytes to skb.\n", - lbq_desc->p.pg_chunk.offset, length); - skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page, - lbq_desc->p.pg_chunk.offset, - length); - skb->len += length; - skb->data_len += length; - skb->truesize += length; - } else { - /* - * The headers and data are in a single large buffer. We - * copy it to a new skb and let it go. This can happen with - * jumbo mtu on a non-TCP/UDP frame. - */ - lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); - skb = netdev_alloc_skb(qdev->ndev, length); - if (skb == NULL) { - netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev, - "No skb available, drop the packet.\n"); - return NULL; - } - pci_unmap_page(qdev->pdev, - dma_unmap_addr(lbq_desc, - mapaddr), - dma_unmap_len(lbq_desc, maplen), - PCI_DMA_FROMDEVICE); - skb_reserve(skb, NET_IP_ALIGN); - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", - length); - skb_fill_page_desc(skb, 0, - lbq_desc->p.pg_chunk.page, - lbq_desc->p.pg_chunk.offset, - length); - skb->len += length; - skb->data_len += length; - skb->truesize += length; - length -= length; - __pskb_pull_tail(skb, - (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? - VLAN_ETH_HLEN : ETH_HLEN); - } - } else { - /* - * The data is in a chain of large buffers - * pointed to by a small buffer. We loop - * thru and chain them to the our small header - * buffer's skb. - * frags: There are 18 max frags and our small - * buffer will hold 32 of them. The thing is, - * we'll use 3 max for our 9000 byte jumbo - * frames. If the MTU goes up we could - * eventually be in trouble. - */ - int size, i = 0; - sbq_desc = ql_get_curr_sbuf(rx_ring); - pci_unmap_single(qdev->pdev, - dma_unmap_addr(sbq_desc, mapaddr), - dma_unmap_len(sbq_desc, maplen), - PCI_DMA_FROMDEVICE); - if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) { - /* - * This is an non TCP/UDP IP frame, so - * the headers aren't split into a small - * buffer. We have to use the small buffer - * that contains our sg list as our skb to - * send upstairs. Copy the sg list here to - * a local buffer and use it to find the - * pages to chain. - */ - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "%d bytes of headers & data in chain of large.\n", - length); - skb = sbq_desc->p.skb; - sbq_desc->p.skb = NULL; - skb_reserve(skb, NET_IP_ALIGN); - } - while (length > 0) { - lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); - size = (length < rx_ring->lbq_buf_size) ? length : - rx_ring->lbq_buf_size; - - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "Adding page %d to skb for %d bytes.\n", - i, size); - skb_fill_page_desc(skb, i, - lbq_desc->p.pg_chunk.page, - lbq_desc->p.pg_chunk.offset, - size); - skb->len += size; - skb->data_len += size; - skb->truesize += size; - length -= size; - i++; - } - __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? - VLAN_ETH_HLEN : ETH_HLEN); - } - return skb; -} - -/* Process an inbound completion from an rx ring. */ -static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev, - struct rx_ring *rx_ring, - struct ib_mac_iocb_rsp *ib_mac_rsp, - u16 vlan_id) -{ - struct net_device *ndev = qdev->ndev; - struct sk_buff *skb = NULL; - - QL_DUMP_IB_MAC_RSP(ib_mac_rsp); - - skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp); - if (unlikely(!skb)) { - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "No skb available, drop packet.\n"); - rx_ring->rx_dropped++; - return; - } - - /* Frame error, so drop the packet. */ - if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { - netif_info(qdev, drv, qdev->ndev, - "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2); - dev_kfree_skb_any(skb); - rx_ring->rx_errors++; - return; - } - - /* The max framesize filter on this chip is set higher than - * MTU since FCoE uses 2k frames. - */ - if (skb->len > ndev->mtu + ETH_HLEN) { - dev_kfree_skb_any(skb); - rx_ring->rx_dropped++; - return; - } - - /* loopback self test for ethtool */ - if (test_bit(QL_SELFTEST, &qdev->flags)) { - ql_check_lb_frame(qdev, skb); - dev_kfree_skb_any(skb); - return; - } - - prefetch(skb->data); - skb->dev = ndev; - if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) { - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n", - (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == - IB_MAC_IOCB_RSP_M_HASH ? "Hash" : - (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == - IB_MAC_IOCB_RSP_M_REG ? "Registered" : - (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == - IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : ""); - rx_ring->rx_multicast++; - } - if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) { - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "Promiscuous Packet.\n"); - } - - skb->protocol = eth_type_trans(skb, ndev); - skb_checksum_none_assert(skb); - - /* If rx checksum is on, and there are no - * csum or frame errors. - */ - if ((ndev->features & NETIF_F_RXCSUM) && - !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { - /* TCP frame. */ - if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "TCP checksum done!\n"); - skb->ip_summed = CHECKSUM_UNNECESSARY; - } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && - (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { - /* Unfragmented ipv4 UDP frame. */ - struct iphdr *iph = (struct iphdr *) skb->data; - if (!(iph->frag_off & - ntohs(IP_MF|IP_OFFSET))) { - skb->ip_summed = CHECKSUM_UNNECESSARY; - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "TCP checksum done!\n"); - } - } - } - - rx_ring->rx_packets++; - rx_ring->rx_bytes += skb->len; - skb_record_rx_queue(skb, rx_ring->cq_id); - if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0)) - __vlan_hwaccel_put_tag(skb, vlan_id); - if (skb->ip_summed == CHECKSUM_UNNECESSARY) - napi_gro_receive(&rx_ring->napi, skb); - else - netif_receive_skb(skb); -} - -/* Process an inbound completion from an rx ring. */ -static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev, - struct rx_ring *rx_ring, - struct ib_mac_iocb_rsp *ib_mac_rsp) -{ - u32 length = le32_to_cpu(ib_mac_rsp->data_len); - u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? - ((le16_to_cpu(ib_mac_rsp->vlan_id) & - IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff; - - QL_DUMP_IB_MAC_RSP(ib_mac_rsp); - - if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) { - /* The data and headers are split into - * separate buffers. - */ - ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp, - vlan_id); - } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) { - /* The data fit in a single small buffer. - * Allocate a new skb, copy the data and - * return the buffer to the free pool. - */ - ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp, - length, vlan_id); - } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) && - !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) && - (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) { - /* TCP packet in a page chunk that's been checksummed. - * Tack it on to our GRO skb and let it go. - */ - ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp, - length, vlan_id); - } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) { - /* Non-TCP packet in a page chunk. Allocate an - * skb, tack it on frags, and send it up. - */ - ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp, - length, vlan_id); - } else { - /* Non-TCP/UDP large frames that span multiple buffers - * can be processed corrrectly by the split frame logic. - */ - ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp, - vlan_id); - } - - return (unsigned long)length; -} - -/* Process an outbound completion from an rx ring. */ -static void ql_process_mac_tx_intr(struct ql_adapter *qdev, - struct ob_mac_iocb_rsp *mac_rsp) -{ - struct tx_ring *tx_ring; - struct tx_ring_desc *tx_ring_desc; - - QL_DUMP_OB_MAC_RSP(mac_rsp); - tx_ring = &qdev->tx_ring[mac_rsp->txq_idx]; - tx_ring_desc = &tx_ring->q[mac_rsp->tid]; - ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt); - tx_ring->tx_bytes += (tx_ring_desc->skb)->len; - tx_ring->tx_packets++; - dev_kfree_skb(tx_ring_desc->skb); - tx_ring_desc->skb = NULL; - - if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E | - OB_MAC_IOCB_RSP_S | - OB_MAC_IOCB_RSP_L | - OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) { - if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) { - netif_warn(qdev, tx_done, qdev->ndev, - "Total descriptor length did not match transfer length.\n"); - } - if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) { - netif_warn(qdev, tx_done, qdev->ndev, - "Frame too short to be valid, not sent.\n"); - } - if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) { - netif_warn(qdev, tx_done, qdev->ndev, - "Frame too long, but sent anyway.\n"); - } - if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) { - netif_warn(qdev, tx_done, qdev->ndev, - "PCI backplane error. Frame not sent.\n"); - } - } - atomic_inc(&tx_ring->tx_count); -} - -/* Fire up a handler to reset the MPI processor. */ -void ql_queue_fw_error(struct ql_adapter *qdev) -{ - ql_link_off(qdev); - queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0); -} - -void ql_queue_asic_error(struct ql_adapter *qdev) -{ - ql_link_off(qdev); - ql_disable_interrupts(qdev); - /* Clear adapter up bit to signal the recovery - * process that it shouldn't kill the reset worker - * thread - */ - clear_bit(QL_ADAPTER_UP, &qdev->flags); - /* Set asic recovery bit to indicate reset process that we are - * in fatal error recovery process rather than normal close - */ - set_bit(QL_ASIC_RECOVERY, &qdev->flags); - queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0); -} - -static void ql_process_chip_ae_intr(struct ql_adapter *qdev, - struct ib_ae_iocb_rsp *ib_ae_rsp) -{ - switch (ib_ae_rsp->event) { - case MGMT_ERR_EVENT: - netif_err(qdev, rx_err, qdev->ndev, - "Management Processor Fatal Error.\n"); - ql_queue_fw_error(qdev); - return; - - case CAM_LOOKUP_ERR_EVENT: - netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n"); - netdev_err(qdev->ndev, "This event shouldn't occur.\n"); - ql_queue_asic_error(qdev); - return; - - case SOFT_ECC_ERROR_EVENT: - netdev_err(qdev->ndev, "Soft ECC error detected.\n"); - ql_queue_asic_error(qdev); - break; - - case PCI_ERR_ANON_BUF_RD: - netdev_err(qdev->ndev, "PCI error occurred when reading " - "anonymous buffers from rx_ring %d.\n", - ib_ae_rsp->q_id); - ql_queue_asic_error(qdev); - break; - - default: - netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n", - ib_ae_rsp->event); - ql_queue_asic_error(qdev); - break; - } -} - -static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring) -{ - struct ql_adapter *qdev = rx_ring->qdev; - u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); - struct ob_mac_iocb_rsp *net_rsp = NULL; - int count = 0; - - struct tx_ring *tx_ring; - /* While there are entries in the completion queue. */ - while (prod != rx_ring->cnsmr_idx) { - - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "cq_id = %d, prod = %d, cnsmr = %d.\n.", - rx_ring->cq_id, prod, rx_ring->cnsmr_idx); - - net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry; - rmb(); - switch (net_rsp->opcode) { - - case OPCODE_OB_MAC_TSO_IOCB: - case OPCODE_OB_MAC_IOCB: - ql_process_mac_tx_intr(qdev, net_rsp); - break; - default: - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "Hit default case, not handled! dropping the packet, opcode = %x.\n", - net_rsp->opcode); - } - count++; - ql_update_cq(rx_ring); - prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); - } - if (!net_rsp) - return 0; - ql_write_cq_idx(rx_ring); - tx_ring = &qdev->tx_ring[net_rsp->txq_idx]; - if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) { - if (atomic_read(&tx_ring->queue_stopped) && - (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4))) - /* - * The queue got stopped because the tx_ring was full. - * Wake it up, because it's now at least 25% empty. - */ - netif_wake_subqueue(qdev->ndev, tx_ring->wq_id); - } - - return count; -} - -static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget) -{ - struct ql_adapter *qdev = rx_ring->qdev; - u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); - struct ql_net_rsp_iocb *net_rsp; - int count = 0; - - /* While there are entries in the completion queue. */ - while (prod != rx_ring->cnsmr_idx) { - - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "cq_id = %d, prod = %d, cnsmr = %d.\n.", - rx_ring->cq_id, prod, rx_ring->cnsmr_idx); - - net_rsp = rx_ring->curr_entry; - rmb(); - switch (net_rsp->opcode) { - case OPCODE_IB_MAC_IOCB: - ql_process_mac_rx_intr(qdev, rx_ring, - (struct ib_mac_iocb_rsp *) - net_rsp); - break; - - case OPCODE_IB_AE_IOCB: - ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *) - net_rsp); - break; - default: - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "Hit default case, not handled! dropping the packet, opcode = %x.\n", - net_rsp->opcode); - break; - } - count++; - ql_update_cq(rx_ring); - prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); - if (count == budget) - break; - } - ql_update_buffer_queues(qdev, rx_ring); - ql_write_cq_idx(rx_ring); - return count; -} - -static int ql_napi_poll_msix(struct napi_struct *napi, int budget) -{ - struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi); - struct ql_adapter *qdev = rx_ring->qdev; - struct rx_ring *trx_ring; - int i, work_done = 0; - struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id]; - - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id); - - /* Service the TX rings first. They start - * right after the RSS rings. */ - for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) { - trx_ring = &qdev->rx_ring[i]; - /* If this TX completion ring belongs to this vector and - * it's not empty then service it. - */ - if ((ctx->irq_mask & (1 << trx_ring->cq_id)) && - (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) != - trx_ring->cnsmr_idx)) { - netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev, - "%s: Servicing TX completion ring %d.\n", - __func__, trx_ring->cq_id); - ql_clean_outbound_rx_ring(trx_ring); - } - } - - /* - * Now service the RSS ring if it's active. - */ - if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != - rx_ring->cnsmr_idx) { - netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev, - "%s: Servicing RX completion ring %d.\n", - __func__, rx_ring->cq_id); - work_done = ql_clean_inbound_rx_ring(rx_ring, budget); - } - - if (work_done < budget) { - napi_complete(napi); - ql_enable_completion_interrupt(qdev, rx_ring->irq); - } - return work_done; -} - -static void qlge_vlan_mode(struct net_device *ndev, u32 features) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - - if (features & NETIF_F_HW_VLAN_RX) { - netif_printk(qdev, ifup, KERN_DEBUG, ndev, - "Turning on VLAN in NIC_RCV_CFG.\n"); - ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK | - NIC_RCV_CFG_VLAN_MATCH_AND_NON); - } else { - netif_printk(qdev, ifup, KERN_DEBUG, ndev, - "Turning off VLAN in NIC_RCV_CFG.\n"); - ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK); - } -} - -static u32 qlge_fix_features(struct net_device *ndev, u32 features) -{ - /* - * Since there is no support for separate rx/tx vlan accel - * enable/disable make sure tx flag is always in same state as rx. - */ - if (features & NETIF_F_HW_VLAN_RX) - features |= NETIF_F_HW_VLAN_TX; - else - features &= ~NETIF_F_HW_VLAN_TX; - - return features; -} - -static int qlge_set_features(struct net_device *ndev, u32 features) -{ - u32 changed = ndev->features ^ features; - - if (changed & NETIF_F_HW_VLAN_RX) - qlge_vlan_mode(ndev, features); - - return 0; -} - -static void __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid) -{ - u32 enable_bit = MAC_ADDR_E; - - if (ql_set_mac_addr_reg - (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) { - netif_err(qdev, ifup, qdev->ndev, - "Failed to init vlan address.\n"); - } -} - -static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - int status; - - status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); - if (status) - return; - - __qlge_vlan_rx_add_vid(qdev, vid); - set_bit(vid, qdev->active_vlans); - - ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); -} - -static void __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid) -{ - u32 enable_bit = 0; - - if (ql_set_mac_addr_reg - (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) { - netif_err(qdev, ifup, qdev->ndev, - "Failed to clear vlan address.\n"); - } -} - -static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - int status; - - status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); - if (status) - return; - - __qlge_vlan_rx_kill_vid(qdev, vid); - clear_bit(vid, qdev->active_vlans); - - ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); -} - -static void qlge_restore_vlan(struct ql_adapter *qdev) -{ - int status; - u16 vid; - - status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); - if (status) - return; - - for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID) - __qlge_vlan_rx_add_vid(qdev, vid); - - ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); -} - -/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */ -static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id) -{ - struct rx_ring *rx_ring = dev_id; - napi_schedule(&rx_ring->napi); - return IRQ_HANDLED; -} - -/* This handles a fatal error, MPI activity, and the default - * rx_ring in an MSI-X multiple vector environment. - * In MSI/Legacy environment it also process the rest of - * the rx_rings. - */ -static irqreturn_t qlge_isr(int irq, void *dev_id) -{ - struct rx_ring *rx_ring = dev_id; - struct ql_adapter *qdev = rx_ring->qdev; - struct intr_context *intr_context = &qdev->intr_context[0]; - u32 var; - int work_done = 0; - - spin_lock(&qdev->hw_lock); - if (atomic_read(&qdev->intr_context[0].irq_cnt)) { - netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev, - "Shared Interrupt, Not ours!\n"); - spin_unlock(&qdev->hw_lock); - return IRQ_NONE; - } - spin_unlock(&qdev->hw_lock); - - var = ql_disable_completion_interrupt(qdev, intr_context->intr); - - /* - * Check for fatal error. - */ - if (var & STS_FE) { - ql_queue_asic_error(qdev); - netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var); - var = ql_read32(qdev, ERR_STS); - netdev_err(qdev->ndev, "Resetting chip. " - "Error Status Register = 0x%x\n", var); - return IRQ_HANDLED; - } - - /* - * Check MPI processor activity. - */ - if ((var & STS_PI) && - (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) { - /* - * We've got an async event or mailbox completion. - * Handle it and clear the source of the interrupt. - */ - netif_err(qdev, intr, qdev->ndev, - "Got MPI processor interrupt.\n"); - ql_disable_completion_interrupt(qdev, intr_context->intr); - ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); - queue_delayed_work_on(smp_processor_id(), - qdev->workqueue, &qdev->mpi_work, 0); - work_done++; - } - - /* - * Get the bit-mask that shows the active queues for this - * pass. Compare it to the queues that this irq services - * and call napi if there's a match. - */ - var = ql_read32(qdev, ISR1); - if (var & intr_context->irq_mask) { - netif_info(qdev, intr, qdev->ndev, - "Waking handler for rx_ring[0].\n"); - ql_disable_completion_interrupt(qdev, intr_context->intr); - napi_schedule(&rx_ring->napi); - work_done++; - } - ql_enable_completion_interrupt(qdev, intr_context->intr); - return work_done ? IRQ_HANDLED : IRQ_NONE; -} - -static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr) -{ - - if (skb_is_gso(skb)) { - int err; - if (skb_header_cloned(skb)) { - err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); - if (err) - return err; - } - - mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB; - mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC; - mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len); - mac_iocb_ptr->total_hdrs_len = - cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb)); - mac_iocb_ptr->net_trans_offset = - cpu_to_le16(skb_network_offset(skb) | - skb_transport_offset(skb) - << OB_MAC_TRANSPORT_HDR_SHIFT); - mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); - mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO; - if (likely(skb->protocol == htons(ETH_P_IP))) { - struct iphdr *iph = ip_hdr(skb); - iph->check = 0; - mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4; - tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, - iph->daddr, 0, - IPPROTO_TCP, - 0); - } else if (skb->protocol == htons(ETH_P_IPV6)) { - mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6; - tcp_hdr(skb)->check = - ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, - &ipv6_hdr(skb)->daddr, - 0, IPPROTO_TCP, 0); - } - return 1; - } - return 0; -} - -static void ql_hw_csum_setup(struct sk_buff *skb, - struct ob_mac_tso_iocb_req *mac_iocb_ptr) -{ - int len; - struct iphdr *iph = ip_hdr(skb); - __sum16 *check; - mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB; - mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len); - mac_iocb_ptr->net_trans_offset = - cpu_to_le16(skb_network_offset(skb) | - skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT); - - mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4; - len = (ntohs(iph->tot_len) - (iph->ihl << 2)); - if (likely(iph->protocol == IPPROTO_TCP)) { - check = &(tcp_hdr(skb)->check); - mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC; - mac_iocb_ptr->total_hdrs_len = - cpu_to_le16(skb_transport_offset(skb) + - (tcp_hdr(skb)->doff << 2)); - } else { - check = &(udp_hdr(skb)->check); - mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC; - mac_iocb_ptr->total_hdrs_len = - cpu_to_le16(skb_transport_offset(skb) + - sizeof(struct udphdr)); - } - *check = ~csum_tcpudp_magic(iph->saddr, - iph->daddr, len, iph->protocol, 0); -} - -static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev) -{ - struct tx_ring_desc *tx_ring_desc; - struct ob_mac_iocb_req *mac_iocb_ptr; - struct ql_adapter *qdev = netdev_priv(ndev); - int tso; - struct tx_ring *tx_ring; - u32 tx_ring_idx = (u32) skb->queue_mapping; - - tx_ring = &qdev->tx_ring[tx_ring_idx]; - - if (skb_padto(skb, ETH_ZLEN)) - return NETDEV_TX_OK; - - if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) { - netif_info(qdev, tx_queued, qdev->ndev, - "%s: shutting down tx queue %d du to lack of resources.\n", - __func__, tx_ring_idx); - netif_stop_subqueue(ndev, tx_ring->wq_id); - atomic_inc(&tx_ring->queue_stopped); - tx_ring->tx_errors++; - return NETDEV_TX_BUSY; - } - tx_ring_desc = &tx_ring->q[tx_ring->prod_idx]; - mac_iocb_ptr = tx_ring_desc->queue_entry; - memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr)); - - mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB; - mac_iocb_ptr->tid = tx_ring_desc->index; - /* We use the upper 32-bits to store the tx queue for this IO. - * When we get the completion we can use it to establish the context. - */ - mac_iocb_ptr->txq_idx = tx_ring_idx; - tx_ring_desc->skb = skb; - - mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len); - - if (vlan_tx_tag_present(skb)) { - netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev, - "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb)); - mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V; - mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb)); - } - tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr); - if (tso < 0) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) { - ql_hw_csum_setup(skb, - (struct ob_mac_tso_iocb_req *)mac_iocb_ptr); - } - if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) != - NETDEV_TX_OK) { - netif_err(qdev, tx_queued, qdev->ndev, - "Could not map the segments.\n"); - tx_ring->tx_errors++; - return NETDEV_TX_BUSY; - } - QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr); - tx_ring->prod_idx++; - if (tx_ring->prod_idx == tx_ring->wq_len) - tx_ring->prod_idx = 0; - wmb(); - - ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg); - netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev, - "tx queued, slot %d, len %d\n", - tx_ring->prod_idx, skb->len); - - atomic_dec(&tx_ring->tx_count); - return NETDEV_TX_OK; -} - - -static void ql_free_shadow_space(struct ql_adapter *qdev) -{ - if (qdev->rx_ring_shadow_reg_area) { - pci_free_consistent(qdev->pdev, - PAGE_SIZE, - qdev->rx_ring_shadow_reg_area, - qdev->rx_ring_shadow_reg_dma); - qdev->rx_ring_shadow_reg_area = NULL; - } - if (qdev->tx_ring_shadow_reg_area) { - pci_free_consistent(qdev->pdev, - PAGE_SIZE, - qdev->tx_ring_shadow_reg_area, - qdev->tx_ring_shadow_reg_dma); - qdev->tx_ring_shadow_reg_area = NULL; - } -} - -static int ql_alloc_shadow_space(struct ql_adapter *qdev) -{ - qdev->rx_ring_shadow_reg_area = - pci_alloc_consistent(qdev->pdev, - PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma); - if (qdev->rx_ring_shadow_reg_area == NULL) { - netif_err(qdev, ifup, qdev->ndev, - "Allocation of RX shadow space failed.\n"); - return -ENOMEM; - } - memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE); - qdev->tx_ring_shadow_reg_area = - pci_alloc_consistent(qdev->pdev, PAGE_SIZE, - &qdev->tx_ring_shadow_reg_dma); - if (qdev->tx_ring_shadow_reg_area == NULL) { - netif_err(qdev, ifup, qdev->ndev, - "Allocation of TX shadow space failed.\n"); - goto err_wqp_sh_area; - } - memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE); - return 0; - -err_wqp_sh_area: - pci_free_consistent(qdev->pdev, - PAGE_SIZE, - qdev->rx_ring_shadow_reg_area, - qdev->rx_ring_shadow_reg_dma); - return -ENOMEM; -} - -static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring) -{ - struct tx_ring_desc *tx_ring_desc; - int i; - struct ob_mac_iocb_req *mac_iocb_ptr; - - mac_iocb_ptr = tx_ring->wq_base; - tx_ring_desc = tx_ring->q; - for (i = 0; i < tx_ring->wq_len; i++) { - tx_ring_desc->index = i; - tx_ring_desc->skb = NULL; - tx_ring_desc->queue_entry = mac_iocb_ptr; - mac_iocb_ptr++; - tx_ring_desc++; - } - atomic_set(&tx_ring->tx_count, tx_ring->wq_len); - atomic_set(&tx_ring->queue_stopped, 0); -} - -static void ql_free_tx_resources(struct ql_adapter *qdev, - struct tx_ring *tx_ring) -{ - if (tx_ring->wq_base) { - pci_free_consistent(qdev->pdev, tx_ring->wq_size, - tx_ring->wq_base, tx_ring->wq_base_dma); - tx_ring->wq_base = NULL; - } - kfree(tx_ring->q); - tx_ring->q = NULL; -} - -static int ql_alloc_tx_resources(struct ql_adapter *qdev, - struct tx_ring *tx_ring) -{ - tx_ring->wq_base = - pci_alloc_consistent(qdev->pdev, tx_ring->wq_size, - &tx_ring->wq_base_dma); - - if ((tx_ring->wq_base == NULL) || - tx_ring->wq_base_dma & WQ_ADDR_ALIGN) { - netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n"); - return -ENOMEM; - } - tx_ring->q = - kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL); - if (tx_ring->q == NULL) - goto err; - - return 0; -err: - pci_free_consistent(qdev->pdev, tx_ring->wq_size, - tx_ring->wq_base, tx_ring->wq_base_dma); - return -ENOMEM; -} - -static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) -{ - struct bq_desc *lbq_desc; - - uint32_t curr_idx, clean_idx; - - curr_idx = rx_ring->lbq_curr_idx; - clean_idx = rx_ring->lbq_clean_idx; - while (curr_idx != clean_idx) { - lbq_desc = &rx_ring->lbq[curr_idx]; - - if (lbq_desc->p.pg_chunk.last_flag) { - pci_unmap_page(qdev->pdev, - lbq_desc->p.pg_chunk.map, - ql_lbq_block_size(qdev), - PCI_DMA_FROMDEVICE); - lbq_desc->p.pg_chunk.last_flag = 0; - } - - put_page(lbq_desc->p.pg_chunk.page); - lbq_desc->p.pg_chunk.page = NULL; - - if (++curr_idx == rx_ring->lbq_len) - curr_idx = 0; - - } -} - -static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) -{ - int i; - struct bq_desc *sbq_desc; - - for (i = 0; i < rx_ring->sbq_len; i++) { - sbq_desc = &rx_ring->sbq[i]; - if (sbq_desc == NULL) { - netif_err(qdev, ifup, qdev->ndev, - "sbq_desc %d is NULL.\n", i); - return; - } - if (sbq_desc->p.skb) { - pci_unmap_single(qdev->pdev, - dma_unmap_addr(sbq_desc, mapaddr), - dma_unmap_len(sbq_desc, maplen), - PCI_DMA_FROMDEVICE); - dev_kfree_skb(sbq_desc->p.skb); - sbq_desc->p.skb = NULL; - } - } -} - -/* Free all large and small rx buffers associated - * with the completion queues for this device. - */ -static void ql_free_rx_buffers(struct ql_adapter *qdev) -{ - int i; - struct rx_ring *rx_ring; - - for (i = 0; i < qdev->rx_ring_count; i++) { - rx_ring = &qdev->rx_ring[i]; - if (rx_ring->lbq) - ql_free_lbq_buffers(qdev, rx_ring); - if (rx_ring->sbq) - ql_free_sbq_buffers(qdev, rx_ring); - } -} - -static void ql_alloc_rx_buffers(struct ql_adapter *qdev) -{ - struct rx_ring *rx_ring; - int i; - - for (i = 0; i < qdev->rx_ring_count; i++) { - rx_ring = &qdev->rx_ring[i]; - if (rx_ring->type != TX_Q) - ql_update_buffer_queues(qdev, rx_ring); - } -} - -static void ql_init_lbq_ring(struct ql_adapter *qdev, - struct rx_ring *rx_ring) -{ - int i; - struct bq_desc *lbq_desc; - __le64 *bq = rx_ring->lbq_base; - - memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc)); - for (i = 0; i < rx_ring->lbq_len; i++) { - lbq_desc = &rx_ring->lbq[i]; - memset(lbq_desc, 0, sizeof(*lbq_desc)); - lbq_desc->index = i; - lbq_desc->addr = bq; - bq++; - } -} - -static void ql_init_sbq_ring(struct ql_adapter *qdev, - struct rx_ring *rx_ring) -{ - int i; - struct bq_desc *sbq_desc; - __le64 *bq = rx_ring->sbq_base; - - memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc)); - for (i = 0; i < rx_ring->sbq_len; i++) { - sbq_desc = &rx_ring->sbq[i]; - memset(sbq_desc, 0, sizeof(*sbq_desc)); - sbq_desc->index = i; - sbq_desc->addr = bq; - bq++; - } -} - -static void ql_free_rx_resources(struct ql_adapter *qdev, - struct rx_ring *rx_ring) -{ - /* Free the small buffer queue. */ - if (rx_ring->sbq_base) { - pci_free_consistent(qdev->pdev, - rx_ring->sbq_size, - rx_ring->sbq_base, rx_ring->sbq_base_dma); - rx_ring->sbq_base = NULL; - } - - /* Free the small buffer queue control blocks. */ - kfree(rx_ring->sbq); - rx_ring->sbq = NULL; - - /* Free the large buffer queue. */ - if (rx_ring->lbq_base) { - pci_free_consistent(qdev->pdev, - rx_ring->lbq_size, - rx_ring->lbq_base, rx_ring->lbq_base_dma); - rx_ring->lbq_base = NULL; - } - - /* Free the large buffer queue control blocks. */ - kfree(rx_ring->lbq); - rx_ring->lbq = NULL; - - /* Free the rx queue. */ - if (rx_ring->cq_base) { - pci_free_consistent(qdev->pdev, - rx_ring->cq_size, - rx_ring->cq_base, rx_ring->cq_base_dma); - rx_ring->cq_base = NULL; - } -} - -/* Allocate queues and buffers for this completions queue based - * on the values in the parameter structure. */ -static int ql_alloc_rx_resources(struct ql_adapter *qdev, - struct rx_ring *rx_ring) -{ - - /* - * Allocate the completion queue for this rx_ring. - */ - rx_ring->cq_base = - pci_alloc_consistent(qdev->pdev, rx_ring->cq_size, - &rx_ring->cq_base_dma); - - if (rx_ring->cq_base == NULL) { - netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n"); - return -ENOMEM; - } - - if (rx_ring->sbq_len) { - /* - * Allocate small buffer queue. - */ - rx_ring->sbq_base = - pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size, - &rx_ring->sbq_base_dma); - - if (rx_ring->sbq_base == NULL) { - netif_err(qdev, ifup, qdev->ndev, - "Small buffer queue allocation failed.\n"); - goto err_mem; - } - - /* - * Allocate small buffer queue control blocks. - */ - rx_ring->sbq = - kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc), - GFP_KERNEL); - if (rx_ring->sbq == NULL) { - netif_err(qdev, ifup, qdev->ndev, - "Small buffer queue control block allocation failed.\n"); - goto err_mem; - } - - ql_init_sbq_ring(qdev, rx_ring); - } - - if (rx_ring->lbq_len) { - /* - * Allocate large buffer queue. - */ - rx_ring->lbq_base = - pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size, - &rx_ring->lbq_base_dma); - - if (rx_ring->lbq_base == NULL) { - netif_err(qdev, ifup, qdev->ndev, - "Large buffer queue allocation failed.\n"); - goto err_mem; - } - /* - * Allocate large buffer queue control blocks. - */ - rx_ring->lbq = - kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc), - GFP_KERNEL); - if (rx_ring->lbq == NULL) { - netif_err(qdev, ifup, qdev->ndev, - "Large buffer queue control block allocation failed.\n"); - goto err_mem; - } - - ql_init_lbq_ring(qdev, rx_ring); - } - - return 0; - -err_mem: - ql_free_rx_resources(qdev, rx_ring); - return -ENOMEM; -} - -static void ql_tx_ring_clean(struct ql_adapter *qdev) -{ - struct tx_ring *tx_ring; - struct tx_ring_desc *tx_ring_desc; - int i, j; - - /* - * Loop through all queues and free - * any resources. - */ - for (j = 0; j < qdev->tx_ring_count; j++) { - tx_ring = &qdev->tx_ring[j]; - for (i = 0; i < tx_ring->wq_len; i++) { - tx_ring_desc = &tx_ring->q[i]; - if (tx_ring_desc && tx_ring_desc->skb) { - netif_err(qdev, ifdown, qdev->ndev, - "Freeing lost SKB %p, from queue %d, index %d.\n", - tx_ring_desc->skb, j, - tx_ring_desc->index); - ql_unmap_send(qdev, tx_ring_desc, - tx_ring_desc->map_cnt); - dev_kfree_skb(tx_ring_desc->skb); - tx_ring_desc->skb = NULL; - } - } - } -} - -static void ql_free_mem_resources(struct ql_adapter *qdev) -{ - int i; - - for (i = 0; i < qdev->tx_ring_count; i++) - ql_free_tx_resources(qdev, &qdev->tx_ring[i]); - for (i = 0; i < qdev->rx_ring_count; i++) - ql_free_rx_resources(qdev, &qdev->rx_ring[i]); - ql_free_shadow_space(qdev); -} - -static int ql_alloc_mem_resources(struct ql_adapter *qdev) -{ - int i; - - /* Allocate space for our shadow registers and such. */ - if (ql_alloc_shadow_space(qdev)) - return -ENOMEM; - - for (i = 0; i < qdev->rx_ring_count; i++) { - if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) { - netif_err(qdev, ifup, qdev->ndev, - "RX resource allocation failed.\n"); - goto err_mem; - } - } - /* Allocate tx queue resources */ - for (i = 0; i < qdev->tx_ring_count; i++) { - if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) { - netif_err(qdev, ifup, qdev->ndev, - "TX resource allocation failed.\n"); - goto err_mem; - } - } - return 0; - -err_mem: - ql_free_mem_resources(qdev); - return -ENOMEM; -} - -/* Set up the rx ring control block and pass it to the chip. - * The control block is defined as - * "Completion Queue Initialization Control Block", or cqicb. - */ -static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) -{ - struct cqicb *cqicb = &rx_ring->cqicb; - void *shadow_reg = qdev->rx_ring_shadow_reg_area + - (rx_ring->cq_id * RX_RING_SHADOW_SPACE); - u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma + - (rx_ring->cq_id * RX_RING_SHADOW_SPACE); - void __iomem *doorbell_area = - qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id)); - int err = 0; - u16 bq_len; - u64 tmp; - __le64 *base_indirect_ptr; - int page_entries; - - /* Set up the shadow registers for this ring. */ - rx_ring->prod_idx_sh_reg = shadow_reg; - rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma; - *rx_ring->prod_idx_sh_reg = 0; - shadow_reg += sizeof(u64); - shadow_reg_dma += sizeof(u64); - rx_ring->lbq_base_indirect = shadow_reg; - rx_ring->lbq_base_indirect_dma = shadow_reg_dma; - shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len)); - shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len)); - rx_ring->sbq_base_indirect = shadow_reg; - rx_ring->sbq_base_indirect_dma = shadow_reg_dma; - - /* PCI doorbell mem area + 0x00 for consumer index register */ - rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area; - rx_ring->cnsmr_idx = 0; - rx_ring->curr_entry = rx_ring->cq_base; - - /* PCI doorbell mem area + 0x04 for valid register */ - rx_ring->valid_db_reg = doorbell_area + 0x04; - - /* PCI doorbell mem area + 0x18 for large buffer consumer */ - rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18); - - /* PCI doorbell mem area + 0x1c */ - rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c); - - memset((void *)cqicb, 0, sizeof(struct cqicb)); - cqicb->msix_vect = rx_ring->irq; - - bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len; - cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT); - - cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma); - - cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma); - - /* - * Set up the control block load flags. - */ - cqicb->flags = FLAGS_LC | /* Load queue base address */ - FLAGS_LV | /* Load MSI-X vector */ - FLAGS_LI; /* Load irq delay values */ - if (rx_ring->lbq_len) { - cqicb->flags |= FLAGS_LL; /* Load lbq values */ - tmp = (u64)rx_ring->lbq_base_dma; - base_indirect_ptr = rx_ring->lbq_base_indirect; - page_entries = 0; - do { - *base_indirect_ptr = cpu_to_le64(tmp); - tmp += DB_PAGE_SIZE; - base_indirect_ptr++; - page_entries++; - } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len)); - cqicb->lbq_addr = - cpu_to_le64(rx_ring->lbq_base_indirect_dma); - bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 : - (u16) rx_ring->lbq_buf_size; - cqicb->lbq_buf_size = cpu_to_le16(bq_len); - bq_len = (rx_ring->lbq_len == 65536) ? 0 : - (u16) rx_ring->lbq_len; - cqicb->lbq_len = cpu_to_le16(bq_len); - rx_ring->lbq_prod_idx = 0; - rx_ring->lbq_curr_idx = 0; - rx_ring->lbq_clean_idx = 0; - rx_ring->lbq_free_cnt = rx_ring->lbq_len; - } - if (rx_ring->sbq_len) { - cqicb->flags |= FLAGS_LS; /* Load sbq values */ - tmp = (u64)rx_ring->sbq_base_dma; - base_indirect_ptr = rx_ring->sbq_base_indirect; - page_entries = 0; - do { - *base_indirect_ptr = cpu_to_le64(tmp); - tmp += DB_PAGE_SIZE; - base_indirect_ptr++; - page_entries++; - } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len)); - cqicb->sbq_addr = - cpu_to_le64(rx_ring->sbq_base_indirect_dma); - cqicb->sbq_buf_size = - cpu_to_le16((u16)(rx_ring->sbq_buf_size)); - bq_len = (rx_ring->sbq_len == 65536) ? 0 : - (u16) rx_ring->sbq_len; - cqicb->sbq_len = cpu_to_le16(bq_len); - rx_ring->sbq_prod_idx = 0; - rx_ring->sbq_curr_idx = 0; - rx_ring->sbq_clean_idx = 0; - rx_ring->sbq_free_cnt = rx_ring->sbq_len; - } - switch (rx_ring->type) { - case TX_Q: - cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs); - cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames); - break; - case RX_Q: - /* Inbound completion handling rx_rings run in - * separate NAPI contexts. - */ - netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix, - 64); - cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs); - cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames); - break; - default: - netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, - "Invalid rx_ring->type = %d.\n", rx_ring->type); - } - netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, - "Initializing rx work queue.\n"); - err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb), - CFG_LCQ, rx_ring->cq_id); - if (err) { - netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n"); - return err; - } - return err; -} - -static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring) -{ - struct wqicb *wqicb = (struct wqicb *)tx_ring; - void __iomem *doorbell_area = - qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id); - void *shadow_reg = qdev->tx_ring_shadow_reg_area + - (tx_ring->wq_id * sizeof(u64)); - u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma + - (tx_ring->wq_id * sizeof(u64)); - int err = 0; - - /* - * Assign doorbell registers for this tx_ring. - */ - /* TX PCI doorbell mem area for tx producer index */ - tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area; - tx_ring->prod_idx = 0; - /* TX PCI doorbell mem area + 0x04 */ - tx_ring->valid_db_reg = doorbell_area + 0x04; - - /* - * Assign shadow registers for this tx_ring. - */ - tx_ring->cnsmr_idx_sh_reg = shadow_reg; - tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma; - - wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT); - wqicb->flags = cpu_to_le16(Q_FLAGS_LC | - Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO); - wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id); - wqicb->rid = 0; - wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma); - - wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma); - - ql_init_tx_ring(qdev, tx_ring); - - err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ, - (u16) tx_ring->wq_id); - if (err) { - netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n"); - return err; - } - netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, - "Successfully loaded WQICB.\n"); - return err; -} - -static void ql_disable_msix(struct ql_adapter *qdev) -{ - if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) { - pci_disable_msix(qdev->pdev); - clear_bit(QL_MSIX_ENABLED, &qdev->flags); - kfree(qdev->msi_x_entry); - qdev->msi_x_entry = NULL; - } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) { - pci_disable_msi(qdev->pdev); - clear_bit(QL_MSI_ENABLED, &qdev->flags); - } -} - -/* We start by trying to get the number of vectors - * stored in qdev->intr_count. If we don't get that - * many then we reduce the count and try again. - */ -static void ql_enable_msix(struct ql_adapter *qdev) -{ - int i, err; - - /* Get the MSIX vectors. */ - if (qlge_irq_type == MSIX_IRQ) { - /* Try to alloc space for the msix struct, - * if it fails then go to MSI/legacy. - */ - qdev->msi_x_entry = kcalloc(qdev->intr_count, - sizeof(struct msix_entry), - GFP_KERNEL); - if (!qdev->msi_x_entry) { - qlge_irq_type = MSI_IRQ; - goto msi; - } - - for (i = 0; i < qdev->intr_count; i++) - qdev->msi_x_entry[i].entry = i; - - /* Loop to get our vectors. We start with - * what we want and settle for what we get. - */ - do { - err = pci_enable_msix(qdev->pdev, - qdev->msi_x_entry, qdev->intr_count); - if (err > 0) - qdev->intr_count = err; - } while (err > 0); - - if (err < 0) { - kfree(qdev->msi_x_entry); - qdev->msi_x_entry = NULL; - netif_warn(qdev, ifup, qdev->ndev, - "MSI-X Enable failed, trying MSI.\n"); - qdev->intr_count = 1; - qlge_irq_type = MSI_IRQ; - } else if (err == 0) { - set_bit(QL_MSIX_ENABLED, &qdev->flags); - netif_info(qdev, ifup, qdev->ndev, - "MSI-X Enabled, got %d vectors.\n", - qdev->intr_count); - return; - } - } -msi: - qdev->intr_count = 1; - if (qlge_irq_type == MSI_IRQ) { - if (!pci_enable_msi(qdev->pdev)) { - set_bit(QL_MSI_ENABLED, &qdev->flags); - netif_info(qdev, ifup, qdev->ndev, - "Running with MSI interrupts.\n"); - return; - } - } - qlge_irq_type = LEG_IRQ; - netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, - "Running with legacy interrupts.\n"); -} - -/* Each vector services 1 RSS ring and and 1 or more - * TX completion rings. This function loops through - * the TX completion rings and assigns the vector that - * will service it. An example would be if there are - * 2 vectors (so 2 RSS rings) and 8 TX completion rings. - * This would mean that vector 0 would service RSS ring 0 - * and TX completion rings 0,1,2 and 3. Vector 1 would - * service RSS ring 1 and TX completion rings 4,5,6 and 7. - */ -static void ql_set_tx_vect(struct ql_adapter *qdev) -{ - int i, j, vect; - u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count; - - if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) { - /* Assign irq vectors to TX rx_rings.*/ - for (vect = 0, j = 0, i = qdev->rss_ring_count; - i < qdev->rx_ring_count; i++) { - if (j == tx_rings_per_vector) { - vect++; - j = 0; - } - qdev->rx_ring[i].irq = vect; - j++; - } - } else { - /* For single vector all rings have an irq - * of zero. - */ - for (i = 0; i < qdev->rx_ring_count; i++) - qdev->rx_ring[i].irq = 0; - } -} - -/* Set the interrupt mask for this vector. Each vector - * will service 1 RSS ring and 1 or more TX completion - * rings. This function sets up a bit mask per vector - * that indicates which rings it services. - */ -static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx) -{ - int j, vect = ctx->intr; - u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count; - - if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) { - /* Add the RSS ring serviced by this vector - * to the mask. - */ - ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id); - /* Add the TX ring(s) serviced by this vector - * to the mask. */ - for (j = 0; j < tx_rings_per_vector; j++) { - ctx->irq_mask |= - (1 << qdev->rx_ring[qdev->rss_ring_count + - (vect * tx_rings_per_vector) + j].cq_id); - } - } else { - /* For single vector we just shift each queue's - * ID into the mask. - */ - for (j = 0; j < qdev->rx_ring_count; j++) - ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id); - } -} - -/* - * Here we build the intr_context structures based on - * our rx_ring count and intr vector count. - * The intr_context structure is used to hook each vector - * to possibly different handlers. - */ -static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev) -{ - int i = 0; - struct intr_context *intr_context = &qdev->intr_context[0]; - - if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) { - /* Each rx_ring has it's - * own intr_context since we have separate - * vectors for each queue. - */ - for (i = 0; i < qdev->intr_count; i++, intr_context++) { - qdev->rx_ring[i].irq = i; - intr_context->intr = i; - intr_context->qdev = qdev; - /* Set up this vector's bit-mask that indicates - * which queues it services. - */ - ql_set_irq_mask(qdev, intr_context); - /* - * We set up each vectors enable/disable/read bits so - * there's no bit/mask calculations in the critical path. - */ - intr_context->intr_en_mask = - INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | - INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD - | i; - intr_context->intr_dis_mask = - INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | - INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK | - INTR_EN_IHD | i; - intr_context->intr_read_mask = - INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | - INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD | - i; - if (i == 0) { - /* The first vector/queue handles - * broadcast/multicast, fatal errors, - * and firmware events. This in addition - * to normal inbound NAPI processing. - */ - intr_context->handler = qlge_isr; - sprintf(intr_context->name, "%s-rx-%d", - qdev->ndev->name, i); - } else { - /* - * Inbound queues handle unicast frames only. - */ - intr_context->handler = qlge_msix_rx_isr; - sprintf(intr_context->name, "%s-rx-%d", - qdev->ndev->name, i); - } - } - } else { - /* - * All rx_rings use the same intr_context since - * there is only one vector. - */ - intr_context->intr = 0; - intr_context->qdev = qdev; - /* - * We set up each vectors enable/disable/read bits so - * there's no bit/mask calculations in the critical path. - */ - intr_context->intr_en_mask = - INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE; - intr_context->intr_dis_mask = - INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | - INTR_EN_TYPE_DISABLE; - intr_context->intr_read_mask = - INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ; - /* - * Single interrupt means one handler for all rings. - */ - intr_context->handler = qlge_isr; - sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name); - /* Set up this vector's bit-mask that indicates - * which queues it services. In this case there is - * a single vector so it will service all RSS and - * TX completion rings. - */ - ql_set_irq_mask(qdev, intr_context); - } - /* Tell the TX completion rings which MSIx vector - * they will be using. - */ - ql_set_tx_vect(qdev); -} - -static void ql_free_irq(struct ql_adapter *qdev) -{ - int i; - struct intr_context *intr_context = &qdev->intr_context[0]; - - for (i = 0; i < qdev->intr_count; i++, intr_context++) { - if (intr_context->hooked) { - if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) { - free_irq(qdev->msi_x_entry[i].vector, - &qdev->rx_ring[i]); - netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev, - "freeing msix interrupt %d.\n", i); - } else { - free_irq(qdev->pdev->irq, &qdev->rx_ring[0]); - netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev, - "freeing msi interrupt %d.\n", i); - } - } - } - ql_disable_msix(qdev); -} - -static int ql_request_irq(struct ql_adapter *qdev) -{ - int i; - int status = 0; - struct pci_dev *pdev = qdev->pdev; - struct intr_context *intr_context = &qdev->intr_context[0]; - - ql_resolve_queues_to_irqs(qdev); - - for (i = 0; i < qdev->intr_count; i++, intr_context++) { - atomic_set(&intr_context->irq_cnt, 0); - if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) { - status = request_irq(qdev->msi_x_entry[i].vector, - intr_context->handler, - 0, - intr_context->name, - &qdev->rx_ring[i]); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Failed request for MSIX interrupt %d.\n", - i); - goto err_irq; - } else { - netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, - "Hooked intr %d, queue type %s, with name %s.\n", - i, - qdev->rx_ring[i].type == DEFAULT_Q ? - "DEFAULT_Q" : - qdev->rx_ring[i].type == TX_Q ? - "TX_Q" : - qdev->rx_ring[i].type == RX_Q ? - "RX_Q" : "", - intr_context->name); - } - } else { - netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, - "trying msi or legacy interrupts.\n"); - netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, - "%s: irq = %d.\n", __func__, pdev->irq); - netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, - "%s: context->name = %s.\n", __func__, - intr_context->name); - netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, - "%s: dev_id = 0x%p.\n", __func__, - &qdev->rx_ring[0]); - status = - request_irq(pdev->irq, qlge_isr, - test_bit(QL_MSI_ENABLED, - &qdev-> - flags) ? 0 : IRQF_SHARED, - intr_context->name, &qdev->rx_ring[0]); - if (status) - goto err_irq; - - netif_err(qdev, ifup, qdev->ndev, - "Hooked intr %d, queue type %s, with name %s.\n", - i, - qdev->rx_ring[0].type == DEFAULT_Q ? - "DEFAULT_Q" : - qdev->rx_ring[0].type == TX_Q ? "TX_Q" : - qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "", - intr_context->name); - } - intr_context->hooked = 1; - } - return status; -err_irq: - netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n"); - ql_free_irq(qdev); - return status; -} - -static int ql_start_rss(struct ql_adapter *qdev) -{ - static const u8 init_hash_seed[] = { - 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, - 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, - 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4, - 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c, - 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa - }; - struct ricb *ricb = &qdev->ricb; - int status = 0; - int i; - u8 *hash_id = (u8 *) ricb->hash_cq_id; - - memset((void *)ricb, 0, sizeof(*ricb)); - - ricb->base_cq = RSS_L4K; - ricb->flags = - (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6); - ricb->mask = cpu_to_le16((u16)(0x3ff)); - - /* - * Fill out the Indirection Table. - */ - for (i = 0; i < 1024; i++) - hash_id[i] = (i & (qdev->rss_ring_count - 1)); - - memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40); - memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16); - - netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n"); - - status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0); - if (status) { - netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n"); - return status; - } - netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, - "Successfully loaded RICB.\n"); - return status; -} - -static int ql_clear_routing_entries(struct ql_adapter *qdev) -{ - int i, status = 0; - - status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); - if (status) - return status; - /* Clear all the entries in the routing table. */ - for (i = 0; i < 16; i++) { - status = ql_set_routing_reg(qdev, i, 0, 0); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Failed to init routing register for CAM packets.\n"); - break; - } - } - ql_sem_unlock(qdev, SEM_RT_IDX_MASK); - return status; -} - -/* Initialize the frame-to-queue routing. */ -static int ql_route_initialize(struct ql_adapter *qdev) -{ - int status = 0; - - /* Clear all the entries in the routing table. */ - status = ql_clear_routing_entries(qdev); - if (status) - return status; - - status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); - if (status) - return status; - - status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT, - RT_IDX_IP_CSUM_ERR, 1); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Failed to init routing register " - "for IP CSUM error packets.\n"); - goto exit; - } - status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT, - RT_IDX_TU_CSUM_ERR, 1); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Failed to init routing register " - "for TCP/UDP CSUM error packets.\n"); - goto exit; - } - status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Failed to init routing register for broadcast packets.\n"); - goto exit; - } - /* If we have more than one inbound queue, then turn on RSS in the - * routing block. - */ - if (qdev->rss_ring_count > 1) { - status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT, - RT_IDX_RSS_MATCH, 1); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Failed to init routing register for MATCH RSS packets.\n"); - goto exit; - } - } - - status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT, - RT_IDX_CAM_HIT, 1); - if (status) - netif_err(qdev, ifup, qdev->ndev, - "Failed to init routing register for CAM packets.\n"); -exit: - ql_sem_unlock(qdev, SEM_RT_IDX_MASK); - return status; -} - -int ql_cam_route_initialize(struct ql_adapter *qdev) -{ - int status, set; - - /* If check if the link is up and use to - * determine if we are setting or clearing - * the MAC address in the CAM. - */ - set = ql_read32(qdev, STS); - set &= qdev->port_link_up; - status = ql_set_mac_addr(qdev, set); - if (status) { - netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n"); - return status; - } - - status = ql_route_initialize(qdev); - if (status) - netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n"); - - return status; -} - -static int ql_adapter_initialize(struct ql_adapter *qdev) -{ - u32 value, mask; - int i; - int status = 0; - - /* - * Set up the System register to halt on errors. - */ - value = SYS_EFE | SYS_FAE; - mask = value << 16; - ql_write32(qdev, SYS, mask | value); - - /* Set the default queue, and VLAN behavior. */ - value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV; - mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16); - ql_write32(qdev, NIC_RCV_CFG, (mask | value)); - - /* Set the MPI interrupt to enabled. */ - ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); - - /* Enable the function, set pagesize, enable error checking. */ - value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND | - FSC_EC | FSC_VM_PAGE_4K; - value |= SPLT_SETTING; - - /* Set/clear header splitting. */ - mask = FSC_VM_PAGESIZE_MASK | - FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16); - ql_write32(qdev, FSC, mask | value); - - ql_write32(qdev, SPLT_HDR, SPLT_LEN); - - /* Set RX packet routing to use port/pci function on which the - * packet arrived on in addition to usual frame routing. - * This is helpful on bonding where both interfaces can have - * the same MAC address. - */ - ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ); - /* Reroute all packets to our Interface. - * They may have been routed to MPI firmware - * due to WOL. - */ - value = ql_read32(qdev, MGMT_RCV_CFG); - value &= ~MGMT_RCV_CFG_RM; - mask = 0xffff0000; - - /* Sticky reg needs clearing due to WOL. */ - ql_write32(qdev, MGMT_RCV_CFG, mask); - ql_write32(qdev, MGMT_RCV_CFG, mask | value); - - /* Default WOL is enable on Mezz cards */ - if (qdev->pdev->subsystem_device == 0x0068 || - qdev->pdev->subsystem_device == 0x0180) - qdev->wol = WAKE_MAGIC; - - /* Start up the rx queues. */ - for (i = 0; i < qdev->rx_ring_count; i++) { - status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Failed to start rx ring[%d].\n", i); - return status; - } - } - - /* If there is more than one inbound completion queue - * then download a RICB to configure RSS. - */ - if (qdev->rss_ring_count > 1) { - status = ql_start_rss(qdev); - if (status) { - netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n"); - return status; - } - } - - /* Start up the tx queues. */ - for (i = 0; i < qdev->tx_ring_count; i++) { - status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Failed to start tx ring[%d].\n", i); - return status; - } - } - - /* Initialize the port and set the max framesize. */ - status = qdev->nic_ops->port_initialize(qdev); - if (status) - netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n"); - - /* Set up the MAC address and frame routing filter. */ - status = ql_cam_route_initialize(qdev); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Failed to init CAM/Routing tables.\n"); - return status; - } - - /* Start NAPI for the RSS queues. */ - for (i = 0; i < qdev->rss_ring_count; i++) { - netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, - "Enabling NAPI for rx_ring[%d].\n", i); - napi_enable(&qdev->rx_ring[i].napi); - } - - return status; -} - -/* Issue soft reset to chip. */ -static int ql_adapter_reset(struct ql_adapter *qdev) -{ - u32 value; - int status = 0; - unsigned long end_jiffies; - - /* Clear all the entries in the routing table. */ - status = ql_clear_routing_entries(qdev); - if (status) { - netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n"); - return status; - } - - end_jiffies = jiffies + - max((unsigned long)1, usecs_to_jiffies(30)); - - /* Check if bit is set then skip the mailbox command and - * clear the bit, else we are in normal reset process. - */ - if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) { - /* Stop management traffic. */ - ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP); - - /* Wait for the NIC and MGMNT FIFOs to empty. */ - ql_wait_fifo_empty(qdev); - } else - clear_bit(QL_ASIC_RECOVERY, &qdev->flags); - - ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR); - - do { - value = ql_read32(qdev, RST_FO); - if ((value & RST_FO_FR) == 0) - break; - cpu_relax(); - } while (time_before(jiffies, end_jiffies)); - - if (value & RST_FO_FR) { - netif_err(qdev, ifdown, qdev->ndev, - "ETIMEDOUT!!! errored out of resetting the chip!\n"); - status = -ETIMEDOUT; - } - - /* Resume management traffic. */ - ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME); - return status; -} - -static void ql_display_dev_info(struct net_device *ndev) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - - netif_info(qdev, probe, qdev->ndev, - "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, " - "XG Roll = %d, XG Rev = %d.\n", - qdev->func, - qdev->port, - qdev->chip_rev_id & 0x0000000f, - qdev->chip_rev_id >> 4 & 0x0000000f, - qdev->chip_rev_id >> 8 & 0x0000000f, - qdev->chip_rev_id >> 12 & 0x0000000f); - netif_info(qdev, probe, qdev->ndev, - "MAC address %pM\n", ndev->dev_addr); -} - -static int ql_wol(struct ql_adapter *qdev) -{ - int status = 0; - u32 wol = MB_WOL_DISABLE; - - /* The CAM is still intact after a reset, but if we - * are doing WOL, then we may need to program the - * routing regs. We would also need to issue the mailbox - * commands to instruct the MPI what to do per the ethtool - * settings. - */ - - if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST | - WAKE_MCAST | WAKE_BCAST)) { - netif_err(qdev, ifdown, qdev->ndev, - "Unsupported WOL paramter. qdev->wol = 0x%x.\n", - qdev->wol); - return -EINVAL; - } - - if (qdev->wol & WAKE_MAGIC) { - status = ql_mb_wol_set_magic(qdev, 1); - if (status) { - netif_err(qdev, ifdown, qdev->ndev, - "Failed to set magic packet on %s.\n", - qdev->ndev->name); - return status; - } else - netif_info(qdev, drv, qdev->ndev, - "Enabled magic packet successfully on %s.\n", - qdev->ndev->name); - - wol |= MB_WOL_MAGIC_PKT; - } - - if (qdev->wol) { - wol |= MB_WOL_MODE_ON; - status = ql_mb_wol_mode(qdev, wol); - netif_err(qdev, drv, qdev->ndev, - "WOL %s (wol code 0x%x) on %s\n", - (status == 0) ? "Successfully set" : "Failed", - wol, qdev->ndev->name); - } - - return status; -} - -static void ql_cancel_all_work_sync(struct ql_adapter *qdev) -{ - - /* Don't kill the reset worker thread if we - * are in the process of recovery. - */ - if (test_bit(QL_ADAPTER_UP, &qdev->flags)) - cancel_delayed_work_sync(&qdev->asic_reset_work); - cancel_delayed_work_sync(&qdev->mpi_reset_work); - cancel_delayed_work_sync(&qdev->mpi_work); - cancel_delayed_work_sync(&qdev->mpi_idc_work); - cancel_delayed_work_sync(&qdev->mpi_core_to_log); - cancel_delayed_work_sync(&qdev->mpi_port_cfg_work); -} - -static int ql_adapter_down(struct ql_adapter *qdev) -{ - int i, status = 0; - - ql_link_off(qdev); - - ql_cancel_all_work_sync(qdev); - - for (i = 0; i < qdev->rss_ring_count; i++) - napi_disable(&qdev->rx_ring[i].napi); - - clear_bit(QL_ADAPTER_UP, &qdev->flags); - - ql_disable_interrupts(qdev); - - ql_tx_ring_clean(qdev); - - /* Call netif_napi_del() from common point. - */ - for (i = 0; i < qdev->rss_ring_count; i++) - netif_napi_del(&qdev->rx_ring[i].napi); - - status = ql_adapter_reset(qdev); - if (status) - netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n", - qdev->func); - ql_free_rx_buffers(qdev); - - return status; -} - -static int ql_adapter_up(struct ql_adapter *qdev) -{ - int err = 0; - - err = ql_adapter_initialize(qdev); - if (err) { - netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n"); - goto err_init; - } - set_bit(QL_ADAPTER_UP, &qdev->flags); - ql_alloc_rx_buffers(qdev); - /* If the port is initialized and the - * link is up the turn on the carrier. - */ - if ((ql_read32(qdev, STS) & qdev->port_init) && - (ql_read32(qdev, STS) & qdev->port_link_up)) - ql_link_on(qdev); - /* Restore rx mode. */ - clear_bit(QL_ALLMULTI, &qdev->flags); - clear_bit(QL_PROMISCUOUS, &qdev->flags); - qlge_set_multicast_list(qdev->ndev); - - /* Restore vlan setting. */ - qlge_restore_vlan(qdev); - - ql_enable_interrupts(qdev); - ql_enable_all_completion_interrupts(qdev); - netif_tx_start_all_queues(qdev->ndev); - - return 0; -err_init: - ql_adapter_reset(qdev); - return err; -} - -static void ql_release_adapter_resources(struct ql_adapter *qdev) -{ - ql_free_mem_resources(qdev); - ql_free_irq(qdev); -} - -static int ql_get_adapter_resources(struct ql_adapter *qdev) -{ - int status = 0; - - if (ql_alloc_mem_resources(qdev)) { - netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n"); - return -ENOMEM; - } - status = ql_request_irq(qdev); - return status; -} - -static int qlge_close(struct net_device *ndev) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - - /* If we hit pci_channel_io_perm_failure - * failure condition, then we already - * brought the adapter down. - */ - if (test_bit(QL_EEH_FATAL, &qdev->flags)) { - netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n"); - clear_bit(QL_EEH_FATAL, &qdev->flags); - return 0; - } - - /* - * Wait for device to recover from a reset. - * (Rarely happens, but possible.) - */ - while (!test_bit(QL_ADAPTER_UP, &qdev->flags)) - msleep(1); - ql_adapter_down(qdev); - ql_release_adapter_resources(qdev); - return 0; -} - -static int ql_configure_rings(struct ql_adapter *qdev) -{ - int i; - struct rx_ring *rx_ring; - struct tx_ring *tx_ring; - int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus()); - unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ? - LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE; - - qdev->lbq_buf_order = get_order(lbq_buf_len); - - /* In a perfect world we have one RSS ring for each CPU - * and each has it's own vector. To do that we ask for - * cpu_cnt vectors. ql_enable_msix() will adjust the - * vector count to what we actually get. We then - * allocate an RSS ring for each. - * Essentially, we are doing min(cpu_count, msix_vector_count). - */ - qdev->intr_count = cpu_cnt; - ql_enable_msix(qdev); - /* Adjust the RSS ring count to the actual vector count. */ - qdev->rss_ring_count = qdev->intr_count; - qdev->tx_ring_count = cpu_cnt; - qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count; - - for (i = 0; i < qdev->tx_ring_count; i++) { - tx_ring = &qdev->tx_ring[i]; - memset((void *)tx_ring, 0, sizeof(*tx_ring)); - tx_ring->qdev = qdev; - tx_ring->wq_id = i; - tx_ring->wq_len = qdev->tx_ring_size; - tx_ring->wq_size = - tx_ring->wq_len * sizeof(struct ob_mac_iocb_req); - - /* - * The completion queue ID for the tx rings start - * immediately after the rss rings. - */ - tx_ring->cq_id = qdev->rss_ring_count + i; - } - - for (i = 0; i < qdev->rx_ring_count; i++) { - rx_ring = &qdev->rx_ring[i]; - memset((void *)rx_ring, 0, sizeof(*rx_ring)); - rx_ring->qdev = qdev; - rx_ring->cq_id = i; - rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */ - if (i < qdev->rss_ring_count) { - /* - * Inbound (RSS) queues. - */ - rx_ring->cq_len = qdev->rx_ring_size; - rx_ring->cq_size = - rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb); - rx_ring->lbq_len = NUM_LARGE_BUFFERS; - rx_ring->lbq_size = - rx_ring->lbq_len * sizeof(__le64); - rx_ring->lbq_buf_size = (u16)lbq_buf_len; - netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, - "lbq_buf_size %d, order = %d\n", - rx_ring->lbq_buf_size, - qdev->lbq_buf_order); - rx_ring->sbq_len = NUM_SMALL_BUFFERS; - rx_ring->sbq_size = - rx_ring->sbq_len * sizeof(__le64); - rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE; - rx_ring->type = RX_Q; - } else { - /* - * Outbound queue handles outbound completions only. - */ - /* outbound cq is same size as tx_ring it services. */ - rx_ring->cq_len = qdev->tx_ring_size; - rx_ring->cq_size = - rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb); - rx_ring->lbq_len = 0; - rx_ring->lbq_size = 0; - rx_ring->lbq_buf_size = 0; - rx_ring->sbq_len = 0; - rx_ring->sbq_size = 0; - rx_ring->sbq_buf_size = 0; - rx_ring->type = TX_Q; - } - } - return 0; -} - -static int qlge_open(struct net_device *ndev) -{ - int err = 0; - struct ql_adapter *qdev = netdev_priv(ndev); - - err = ql_adapter_reset(qdev); - if (err) - return err; - - err = ql_configure_rings(qdev); - if (err) - return err; - - err = ql_get_adapter_resources(qdev); - if (err) - goto error_up; - - err = ql_adapter_up(qdev); - if (err) - goto error_up; - - return err; - -error_up: - ql_release_adapter_resources(qdev); - return err; -} - -static int ql_change_rx_buffers(struct ql_adapter *qdev) -{ - struct rx_ring *rx_ring; - int i, status; - u32 lbq_buf_len; - - /* Wait for an outstanding reset to complete. */ - if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) { - int i = 3; - while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) { - netif_err(qdev, ifup, qdev->ndev, - "Waiting for adapter UP...\n"); - ssleep(1); - } - - if (!i) { - netif_err(qdev, ifup, qdev->ndev, - "Timed out waiting for adapter UP\n"); - return -ETIMEDOUT; - } - } - - status = ql_adapter_down(qdev); - if (status) - goto error; - - /* Get the new rx buffer size. */ - lbq_buf_len = (qdev->ndev->mtu > 1500) ? - LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE; - qdev->lbq_buf_order = get_order(lbq_buf_len); - - for (i = 0; i < qdev->rss_ring_count; i++) { - rx_ring = &qdev->rx_ring[i]; - /* Set the new size. */ - rx_ring->lbq_buf_size = lbq_buf_len; - } - - status = ql_adapter_up(qdev); - if (status) - goto error; - - return status; -error: - netif_alert(qdev, ifup, qdev->ndev, - "Driver up/down cycle failed, closing device.\n"); - set_bit(QL_ADAPTER_UP, &qdev->flags); - dev_close(qdev->ndev); - return status; -} - -static int qlge_change_mtu(struct net_device *ndev, int new_mtu) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - int status; - - if (ndev->mtu == 1500 && new_mtu == 9000) { - netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n"); - } else if (ndev->mtu == 9000 && new_mtu == 1500) { - netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n"); - } else - return -EINVAL; - - queue_delayed_work(qdev->workqueue, - &qdev->mpi_port_cfg_work, 3*HZ); - - ndev->mtu = new_mtu; - - if (!netif_running(qdev->ndev)) { - return 0; - } - - status = ql_change_rx_buffers(qdev); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Changing MTU failed.\n"); - } - - return status; -} - -static struct net_device_stats *qlge_get_stats(struct net_device - *ndev) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - struct rx_ring *rx_ring = &qdev->rx_ring[0]; - struct tx_ring *tx_ring = &qdev->tx_ring[0]; - unsigned long pkts, mcast, dropped, errors, bytes; - int i; - - /* Get RX stats. */ - pkts = mcast = dropped = errors = bytes = 0; - for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) { - pkts += rx_ring->rx_packets; - bytes += rx_ring->rx_bytes; - dropped += rx_ring->rx_dropped; - errors += rx_ring->rx_errors; - mcast += rx_ring->rx_multicast; - } - ndev->stats.rx_packets = pkts; - ndev->stats.rx_bytes = bytes; - ndev->stats.rx_dropped = dropped; - ndev->stats.rx_errors = errors; - ndev->stats.multicast = mcast; - - /* Get TX stats. */ - pkts = errors = bytes = 0; - for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) { - pkts += tx_ring->tx_packets; - bytes += tx_ring->tx_bytes; - errors += tx_ring->tx_errors; - } - ndev->stats.tx_packets = pkts; - ndev->stats.tx_bytes = bytes; - ndev->stats.tx_errors = errors; - return &ndev->stats; -} - -static void qlge_set_multicast_list(struct net_device *ndev) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - struct netdev_hw_addr *ha; - int i, status; - - status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); - if (status) - return; - /* - * Set or clear promiscuous mode if a - * transition is taking place. - */ - if (ndev->flags & IFF_PROMISC) { - if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) { - if (ql_set_routing_reg - (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) { - netif_err(qdev, hw, qdev->ndev, - "Failed to set promiscuous mode.\n"); - } else { - set_bit(QL_PROMISCUOUS, &qdev->flags); - } - } - } else { - if (test_bit(QL_PROMISCUOUS, &qdev->flags)) { - if (ql_set_routing_reg - (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) { - netif_err(qdev, hw, qdev->ndev, - "Failed to clear promiscuous mode.\n"); - } else { - clear_bit(QL_PROMISCUOUS, &qdev->flags); - } - } - } - - /* - * Set or clear all multicast mode if a - * transition is taking place. - */ - if ((ndev->flags & IFF_ALLMULTI) || - (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) { - if (!test_bit(QL_ALLMULTI, &qdev->flags)) { - if (ql_set_routing_reg - (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) { - netif_err(qdev, hw, qdev->ndev, - "Failed to set all-multi mode.\n"); - } else { - set_bit(QL_ALLMULTI, &qdev->flags); - } - } - } else { - if (test_bit(QL_ALLMULTI, &qdev->flags)) { - if (ql_set_routing_reg - (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) { - netif_err(qdev, hw, qdev->ndev, - "Failed to clear all-multi mode.\n"); - } else { - clear_bit(QL_ALLMULTI, &qdev->flags); - } - } - } - - if (!netdev_mc_empty(ndev)) { - status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); - if (status) - goto exit; - i = 0; - netdev_for_each_mc_addr(ha, ndev) { - if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr, - MAC_ADDR_TYPE_MULTI_MAC, i)) { - netif_err(qdev, hw, qdev->ndev, - "Failed to loadmulticast address.\n"); - ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); - goto exit; - } - i++; - } - ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); - if (ql_set_routing_reg - (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) { - netif_err(qdev, hw, qdev->ndev, - "Failed to set multicast match mode.\n"); - } else { - set_bit(QL_ALLMULTI, &qdev->flags); - } - } -exit: - ql_sem_unlock(qdev, SEM_RT_IDX_MASK); -} - -static int qlge_set_mac_address(struct net_device *ndev, void *p) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - struct sockaddr *addr = p; - int status; - - if (!is_valid_ether_addr(addr->sa_data)) - return -EADDRNOTAVAIL; - memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); - /* Update local copy of current mac address. */ - memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len); - - status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); - if (status) - return status; - status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr, - MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ); - if (status) - netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n"); - ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); - return status; -} - -static void qlge_tx_timeout(struct net_device *ndev) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - ql_queue_asic_error(qdev); -} - -static void ql_asic_reset_work(struct work_struct *work) -{ - struct ql_adapter *qdev = - container_of(work, struct ql_adapter, asic_reset_work.work); - int status; - rtnl_lock(); - status = ql_adapter_down(qdev); - if (status) - goto error; - - status = ql_adapter_up(qdev); - if (status) - goto error; - - /* Restore rx mode. */ - clear_bit(QL_ALLMULTI, &qdev->flags); - clear_bit(QL_PROMISCUOUS, &qdev->flags); - qlge_set_multicast_list(qdev->ndev); - - rtnl_unlock(); - return; -error: - netif_alert(qdev, ifup, qdev->ndev, - "Driver up/down cycle failed, closing device\n"); - - set_bit(QL_ADAPTER_UP, &qdev->flags); - dev_close(qdev->ndev); - rtnl_unlock(); -} - -static const struct nic_operations qla8012_nic_ops = { - .get_flash = ql_get_8012_flash_params, - .port_initialize = ql_8012_port_initialize, -}; - -static const struct nic_operations qla8000_nic_ops = { - .get_flash = ql_get_8000_flash_params, - .port_initialize = ql_8000_port_initialize, -}; - -/* Find the pcie function number for the other NIC - * on this chip. Since both NIC functions share a - * common firmware we have the lowest enabled function - * do any common work. Examples would be resetting - * after a fatal firmware error, or doing a firmware - * coredump. - */ -static int ql_get_alt_pcie_func(struct ql_adapter *qdev) -{ - int status = 0; - u32 temp; - u32 nic_func1, nic_func2; - - status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG, - &temp); - if (status) - return status; - - nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) & - MPI_TEST_NIC_FUNC_MASK); - nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) & - MPI_TEST_NIC_FUNC_MASK); - - if (qdev->func == nic_func1) - qdev->alt_func = nic_func2; - else if (qdev->func == nic_func2) - qdev->alt_func = nic_func1; - else - status = -EIO; - - return status; -} - -static int ql_get_board_info(struct ql_adapter *qdev) -{ - int status; - qdev->func = - (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT; - if (qdev->func > 3) - return -EIO; - - status = ql_get_alt_pcie_func(qdev); - if (status) - return status; - - qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1; - if (qdev->port) { - qdev->xg_sem_mask = SEM_XGMAC1_MASK; - qdev->port_link_up = STS_PL1; - qdev->port_init = STS_PI1; - qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI; - qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO; - } else { - qdev->xg_sem_mask = SEM_XGMAC0_MASK; - qdev->port_link_up = STS_PL0; - qdev->port_init = STS_PI0; - qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI; - qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO; - } - qdev->chip_rev_id = ql_read32(qdev, REV_ID); - qdev->device_id = qdev->pdev->device; - if (qdev->device_id == QLGE_DEVICE_ID_8012) - qdev->nic_ops = &qla8012_nic_ops; - else if (qdev->device_id == QLGE_DEVICE_ID_8000) - qdev->nic_ops = &qla8000_nic_ops; - return status; -} - -static void ql_release_all(struct pci_dev *pdev) -{ - struct net_device *ndev = pci_get_drvdata(pdev); - struct ql_adapter *qdev = netdev_priv(ndev); - - if (qdev->workqueue) { - destroy_workqueue(qdev->workqueue); - qdev->workqueue = NULL; - } - - if (qdev->reg_base) - iounmap(qdev->reg_base); - if (qdev->doorbell_area) - iounmap(qdev->doorbell_area); - vfree(qdev->mpi_coredump); - pci_release_regions(pdev); - pci_set_drvdata(pdev, NULL); -} - -static int __devinit ql_init_device(struct pci_dev *pdev, - struct net_device *ndev, int cards_found) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - int err = 0; - - memset((void *)qdev, 0, sizeof(*qdev)); - err = pci_enable_device(pdev); - if (err) { - dev_err(&pdev->dev, "PCI device enable failed.\n"); - return err; - } - - qdev->ndev = ndev; - qdev->pdev = pdev; - pci_set_drvdata(pdev, ndev); - - /* Set PCIe read request size */ - err = pcie_set_readrq(pdev, 4096); - if (err) { - dev_err(&pdev->dev, "Set readrq failed.\n"); - goto err_out1; - } - - err = pci_request_regions(pdev, DRV_NAME); - if (err) { - dev_err(&pdev->dev, "PCI region request failed.\n"); - return err; - } - - pci_set_master(pdev); - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { - set_bit(QL_DMA64, &qdev->flags); - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - } else { - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (!err) - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); - } - - if (err) { - dev_err(&pdev->dev, "No usable DMA configuration.\n"); - goto err_out2; - } - - /* Set PCIe reset type for EEH to fundamental. */ - pdev->needs_freset = 1; - pci_save_state(pdev); - qdev->reg_base = - ioremap_nocache(pci_resource_start(pdev, 1), - pci_resource_len(pdev, 1)); - if (!qdev->reg_base) { - dev_err(&pdev->dev, "Register mapping failed.\n"); - err = -ENOMEM; - goto err_out2; - } - - qdev->doorbell_area_size = pci_resource_len(pdev, 3); - qdev->doorbell_area = - ioremap_nocache(pci_resource_start(pdev, 3), - pci_resource_len(pdev, 3)); - if (!qdev->doorbell_area) { - dev_err(&pdev->dev, "Doorbell register mapping failed.\n"); - err = -ENOMEM; - goto err_out2; - } - - err = ql_get_board_info(qdev); - if (err) { - dev_err(&pdev->dev, "Register access failed.\n"); - err = -EIO; - goto err_out2; - } - qdev->msg_enable = netif_msg_init(debug, default_msg); - spin_lock_init(&qdev->hw_lock); - spin_lock_init(&qdev->stats_lock); - - if (qlge_mpi_coredump) { - qdev->mpi_coredump = - vmalloc(sizeof(struct ql_mpi_coredump)); - if (qdev->mpi_coredump == NULL) { - dev_err(&pdev->dev, "Coredump alloc failed.\n"); - err = -ENOMEM; - goto err_out2; - } - if (qlge_force_coredump) - set_bit(QL_FRC_COREDUMP, &qdev->flags); - } - /* make sure the EEPROM is good */ - err = qdev->nic_ops->get_flash(qdev); - if (err) { - dev_err(&pdev->dev, "Invalid FLASH.\n"); - goto err_out2; - } - - memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); - /* Keep local copy of current mac address. */ - memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len); - - /* Set up the default ring sizes. */ - qdev->tx_ring_size = NUM_TX_RING_ENTRIES; - qdev->rx_ring_size = NUM_RX_RING_ENTRIES; - - /* Set up the coalescing parameters. */ - qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT; - qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT; - qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT; - qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT; - - /* - * Set up the operating parameters. - */ - qdev->workqueue = create_singlethread_workqueue(ndev->name); - INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work); - INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work); - INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work); - INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work); - INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work); - INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log); - init_completion(&qdev->ide_completion); - mutex_init(&qdev->mpi_mutex); - - if (!cards_found) { - dev_info(&pdev->dev, "%s\n", DRV_STRING); - dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n", - DRV_NAME, DRV_VERSION); - } - return 0; -err_out2: - ql_release_all(pdev); -err_out1: - pci_disable_device(pdev); - return err; -} - -static const struct net_device_ops qlge_netdev_ops = { - .ndo_open = qlge_open, - .ndo_stop = qlge_close, - .ndo_start_xmit = qlge_send, - .ndo_change_mtu = qlge_change_mtu, - .ndo_get_stats = qlge_get_stats, - .ndo_set_multicast_list = qlge_set_multicast_list, - .ndo_set_mac_address = qlge_set_mac_address, - .ndo_validate_addr = eth_validate_addr, - .ndo_tx_timeout = qlge_tx_timeout, - .ndo_fix_features = qlge_fix_features, - .ndo_set_features = qlge_set_features, - .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid, -}; - -static void ql_timer(unsigned long data) -{ - struct ql_adapter *qdev = (struct ql_adapter *)data; - u32 var = 0; - - var = ql_read32(qdev, STS); - if (pci_channel_offline(qdev->pdev)) { - netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var); - return; - } - - mod_timer(&qdev->timer, jiffies + (5*HZ)); -} - -static int __devinit qlge_probe(struct pci_dev *pdev, - const struct pci_device_id *pci_entry) -{ - struct net_device *ndev = NULL; - struct ql_adapter *qdev = NULL; - static int cards_found = 0; - int err = 0; - - ndev = alloc_etherdev_mq(sizeof(struct ql_adapter), - min(MAX_CPUS, (int)num_online_cpus())); - if (!ndev) - return -ENOMEM; - - err = ql_init_device(pdev, ndev, cards_found); - if (err < 0) { - free_netdev(ndev); - return err; - } - - qdev = netdev_priv(ndev); - SET_NETDEV_DEV(ndev, &pdev->dev); - ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | - NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN | - NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM; - ndev->features = ndev->hw_features | - NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; - - if (test_bit(QL_DMA64, &qdev->flags)) - ndev->features |= NETIF_F_HIGHDMA; - - /* - * Set up net_device structure. - */ - ndev->tx_queue_len = qdev->tx_ring_size; - ndev->irq = pdev->irq; - - ndev->netdev_ops = &qlge_netdev_ops; - SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops); - ndev->watchdog_timeo = 10 * HZ; - - err = register_netdev(ndev); - if (err) { - dev_err(&pdev->dev, "net device registration failed.\n"); - ql_release_all(pdev); - pci_disable_device(pdev); - return err; - } - /* Start up the timer to trigger EEH if - * the bus goes dead - */ - init_timer_deferrable(&qdev->timer); - qdev->timer.data = (unsigned long)qdev; - qdev->timer.function = ql_timer; - qdev->timer.expires = jiffies + (5*HZ); - add_timer(&qdev->timer); - ql_link_off(qdev); - ql_display_dev_info(ndev); - atomic_set(&qdev->lb_count, 0); - cards_found++; - return 0; -} - -netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev) -{ - return qlge_send(skb, ndev); -} - -int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget) -{ - return ql_clean_inbound_rx_ring(rx_ring, budget); -} - -static void __devexit qlge_remove(struct pci_dev *pdev) -{ - struct net_device *ndev = pci_get_drvdata(pdev); - struct ql_adapter *qdev = netdev_priv(ndev); - del_timer_sync(&qdev->timer); - ql_cancel_all_work_sync(qdev); - unregister_netdev(ndev); - ql_release_all(pdev); - pci_disable_device(pdev); - free_netdev(ndev); -} - -/* Clean up resources without touching hardware. */ -static void ql_eeh_close(struct net_device *ndev) -{ - int i; - struct ql_adapter *qdev = netdev_priv(ndev); - - if (netif_carrier_ok(ndev)) { - netif_carrier_off(ndev); - netif_stop_queue(ndev); - } - - /* Disabling the timer */ - del_timer_sync(&qdev->timer); - ql_cancel_all_work_sync(qdev); - - for (i = 0; i < qdev->rss_ring_count; i++) - netif_napi_del(&qdev->rx_ring[i].napi); - - clear_bit(QL_ADAPTER_UP, &qdev->flags); - ql_tx_ring_clean(qdev); - ql_free_rx_buffers(qdev); - ql_release_adapter_resources(qdev); -} - -/* - * This callback is called by the PCI subsystem whenever - * a PCI bus error is detected. - */ -static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev, - enum pci_channel_state state) -{ - struct net_device *ndev = pci_get_drvdata(pdev); - struct ql_adapter *qdev = netdev_priv(ndev); - - switch (state) { - case pci_channel_io_normal: - return PCI_ERS_RESULT_CAN_RECOVER; - case pci_channel_io_frozen: - netif_device_detach(ndev); - if (netif_running(ndev)) - ql_eeh_close(ndev); - pci_disable_device(pdev); - return PCI_ERS_RESULT_NEED_RESET; - case pci_channel_io_perm_failure: - dev_err(&pdev->dev, - "%s: pci_channel_io_perm_failure.\n", __func__); - ql_eeh_close(ndev); - set_bit(QL_EEH_FATAL, &qdev->flags); - return PCI_ERS_RESULT_DISCONNECT; - } - - /* Request a slot reset. */ - return PCI_ERS_RESULT_NEED_RESET; -} - -/* - * This callback is called after the PCI buss has been reset. - * Basically, this tries to restart the card from scratch. - * This is a shortened version of the device probe/discovery code, - * it resembles the first-half of the () routine. - */ -static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev) -{ - struct net_device *ndev = pci_get_drvdata(pdev); - struct ql_adapter *qdev = netdev_priv(ndev); - - pdev->error_state = pci_channel_io_normal; - - pci_restore_state(pdev); - if (pci_enable_device(pdev)) { - netif_err(qdev, ifup, qdev->ndev, - "Cannot re-enable PCI device after reset.\n"); - return PCI_ERS_RESULT_DISCONNECT; - } - pci_set_master(pdev); - - if (ql_adapter_reset(qdev)) { - netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n"); - set_bit(QL_EEH_FATAL, &qdev->flags); - return PCI_ERS_RESULT_DISCONNECT; - } - - return PCI_ERS_RESULT_RECOVERED; -} - -static void qlge_io_resume(struct pci_dev *pdev) -{ - struct net_device *ndev = pci_get_drvdata(pdev); - struct ql_adapter *qdev = netdev_priv(ndev); - int err = 0; - - if (netif_running(ndev)) { - err = qlge_open(ndev); - if (err) { - netif_err(qdev, ifup, qdev->ndev, - "Device initialization failed after reset.\n"); - return; - } - } else { - netif_err(qdev, ifup, qdev->ndev, - "Device was not running prior to EEH.\n"); - } - mod_timer(&qdev->timer, jiffies + (5*HZ)); - netif_device_attach(ndev); -} - -static struct pci_error_handlers qlge_err_handler = { - .error_detected = qlge_io_error_detected, - .slot_reset = qlge_io_slot_reset, - .resume = qlge_io_resume, -}; - -static int qlge_suspend(struct pci_dev *pdev, pm_message_t state) -{ - struct net_device *ndev = pci_get_drvdata(pdev); - struct ql_adapter *qdev = netdev_priv(ndev); - int err; - - netif_device_detach(ndev); - del_timer_sync(&qdev->timer); - - if (netif_running(ndev)) { - err = ql_adapter_down(qdev); - if (!err) - return err; - } - - ql_wol(qdev); - err = pci_save_state(pdev); - if (err) - return err; - - pci_disable_device(pdev); - - pci_set_power_state(pdev, pci_choose_state(pdev, state)); - - return 0; -} - -#ifdef CONFIG_PM -static int qlge_resume(struct pci_dev *pdev) -{ - struct net_device *ndev = pci_get_drvdata(pdev); - struct ql_adapter *qdev = netdev_priv(ndev); - int err; - - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - err = pci_enable_device(pdev); - if (err) { - netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n"); - return err; - } - pci_set_master(pdev); - - pci_enable_wake(pdev, PCI_D3hot, 0); - pci_enable_wake(pdev, PCI_D3cold, 0); - - if (netif_running(ndev)) { - err = ql_adapter_up(qdev); - if (err) - return err; - } - - mod_timer(&qdev->timer, jiffies + (5*HZ)); - netif_device_attach(ndev); - - return 0; -} -#endif /* CONFIG_PM */ - -static void qlge_shutdown(struct pci_dev *pdev) -{ - qlge_suspend(pdev, PMSG_SUSPEND); -} - -static struct pci_driver qlge_driver = { - .name = DRV_NAME, - .id_table = qlge_pci_tbl, - .probe = qlge_probe, - .remove = __devexit_p(qlge_remove), -#ifdef CONFIG_PM - .suspend = qlge_suspend, - .resume = qlge_resume, -#endif - .shutdown = qlge_shutdown, - .err_handler = &qlge_err_handler -}; - -static int __init qlge_init_module(void) -{ - return pci_register_driver(&qlge_driver); -} - -static void __exit qlge_exit(void) -{ - pci_unregister_driver(&qlge_driver); -} - -module_init(qlge_init_module); -module_exit(qlge_exit); diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c deleted file mode 100644 index ff2bf8a4e247..000000000000 --- a/drivers/net/qlge/qlge_mpi.c +++ /dev/null @@ -1,1284 +0,0 @@ -#include "qlge.h" - -int ql_unpause_mpi_risc(struct ql_adapter *qdev) -{ - u32 tmp; - - /* Un-pause the RISC */ - tmp = ql_read32(qdev, CSR); - if (!(tmp & CSR_RP)) - return -EIO; - - ql_write32(qdev, CSR, CSR_CMD_CLR_PAUSE); - return 0; -} - -int ql_pause_mpi_risc(struct ql_adapter *qdev) -{ - u32 tmp; - int count = UDELAY_COUNT; - - /* Pause the RISC */ - ql_write32(qdev, CSR, CSR_CMD_SET_PAUSE); - do { - tmp = ql_read32(qdev, CSR); - if (tmp & CSR_RP) - break; - mdelay(UDELAY_DELAY); - count--; - } while (count); - return (count == 0) ? -ETIMEDOUT : 0; -} - -int ql_hard_reset_mpi_risc(struct ql_adapter *qdev) -{ - u32 tmp; - int count = UDELAY_COUNT; - - /* Reset the RISC */ - ql_write32(qdev, CSR, CSR_CMD_SET_RST); - do { - tmp = ql_read32(qdev, CSR); - if (tmp & CSR_RR) { - ql_write32(qdev, CSR, CSR_CMD_CLR_RST); - break; - } - mdelay(UDELAY_DELAY); - count--; - } while (count); - return (count == 0) ? -ETIMEDOUT : 0; -} - -int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data) -{ - int status; - /* wait for reg to come ready */ - status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR); - if (status) - goto exit; - /* set up for reg read */ - ql_write32(qdev, PROC_ADDR, reg | PROC_ADDR_R); - /* wait for reg to come ready */ - status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR); - if (status) - goto exit; - /* get the data */ - *data = ql_read32(qdev, PROC_DATA); -exit: - return status; -} - -int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data) -{ - int status = 0; - /* wait for reg to come ready */ - status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR); - if (status) - goto exit; - /* write the data to the data reg */ - ql_write32(qdev, PROC_DATA, data); - /* trigger the write */ - ql_write32(qdev, PROC_ADDR, reg); - /* wait for reg to come ready */ - status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR); - if (status) - goto exit; -exit: - return status; -} - -int ql_soft_reset_mpi_risc(struct ql_adapter *qdev) -{ - int status; - status = ql_write_mpi_reg(qdev, 0x00001010, 1); - return status; -} - -/* Determine if we are in charge of the firwmare. If - * we are the lower of the 2 NIC pcie functions, or if - * we are the higher function and the lower function - * is not enabled. - */ -int ql_own_firmware(struct ql_adapter *qdev) -{ - u32 temp; - - /* If we are the lower of the 2 NIC functions - * on the chip the we are responsible for - * core dump and firmware reset after an error. - */ - if (qdev->func < qdev->alt_func) - return 1; - - /* If we are the higher of the 2 NIC functions - * on the chip and the lower function is not - * enabled, then we are responsible for - * core dump and firmware reset after an error. - */ - temp = ql_read32(qdev, STS); - if (!(temp & (1 << (8 + qdev->alt_func)))) - return 1; - - return 0; - -} - -static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp) -{ - int i, status; - - status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK); - if (status) - return -EBUSY; - for (i = 0; i < mbcp->out_count; i++) { - status = - ql_read_mpi_reg(qdev, qdev->mailbox_out + i, - &mbcp->mbox_out[i]); - if (status) { - netif_err(qdev, drv, qdev->ndev, "Failed mailbox read.\n"); - break; - } - } - ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */ - return status; -} - -/* Wait for a single mailbox command to complete. - * Returns zero on success. - */ -static int ql_wait_mbx_cmd_cmplt(struct ql_adapter *qdev) -{ - int count = 100; - u32 value; - - do { - value = ql_read32(qdev, STS); - if (value & STS_PI) - return 0; - mdelay(UDELAY_DELAY); /* 100ms */ - } while (--count); - return -ETIMEDOUT; -} - -/* Execute a single mailbox command. - * Caller must hold PROC_ADDR semaphore. - */ -static int ql_exec_mb_cmd(struct ql_adapter *qdev, struct mbox_params *mbcp) -{ - int i, status; - - /* - * Make sure there's nothing pending. - * This shouldn't happen. - */ - if (ql_read32(qdev, CSR) & CSR_HRI) - return -EIO; - - status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK); - if (status) - return status; - - /* - * Fill the outbound mailboxes. - */ - for (i = 0; i < mbcp->in_count; i++) { - status = ql_write_mpi_reg(qdev, qdev->mailbox_in + i, - mbcp->mbox_in[i]); - if (status) - goto end; - } - /* - * Wake up the MPI firmware. - */ - ql_write32(qdev, CSR, CSR_CMD_SET_H2R_INT); -end: - ql_sem_unlock(qdev, SEM_PROC_REG_MASK); - return status; -} - -/* We are being asked by firmware to accept - * a change to the port. This is only - * a change to max frame sizes (Tx/Rx), pause - * parameters, or loopback mode. We wake up a worker - * to handler processing this since a mailbox command - * will need to be sent to ACK the request. - */ -static int ql_idc_req_aen(struct ql_adapter *qdev) -{ - int status; - struct mbox_params *mbcp = &qdev->idc_mbc; - - netif_err(qdev, drv, qdev->ndev, "Enter!\n"); - /* Get the status data and start up a thread to - * handle the request. - */ - mbcp = &qdev->idc_mbc; - mbcp->out_count = 4; - status = ql_get_mb_sts(qdev, mbcp); - if (status) { - netif_err(qdev, drv, qdev->ndev, - "Could not read MPI, resetting ASIC!\n"); - ql_queue_asic_error(qdev); - } else { - /* Begin polled mode early so - * we don't get another interrupt - * when we leave mpi_worker. - */ - ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); - queue_delayed_work(qdev->workqueue, &qdev->mpi_idc_work, 0); - } - return status; -} - -/* Process an inter-device event completion. - * If good, signal the caller's completion. - */ -static int ql_idc_cmplt_aen(struct ql_adapter *qdev) -{ - int status; - struct mbox_params *mbcp = &qdev->idc_mbc; - mbcp->out_count = 4; - status = ql_get_mb_sts(qdev, mbcp); - if (status) { - netif_err(qdev, drv, qdev->ndev, - "Could not read MPI, resetting RISC!\n"); - ql_queue_fw_error(qdev); - } else - /* Wake up the sleeping mpi_idc_work thread that is - * waiting for this event. - */ - complete(&qdev->ide_completion); - - return status; -} - -static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp) -{ - int status; - mbcp->out_count = 2; - - status = ql_get_mb_sts(qdev, mbcp); - if (status) { - netif_err(qdev, drv, qdev->ndev, - "%s: Could not get mailbox status.\n", __func__); - return; - } - - qdev->link_status = mbcp->mbox_out[1]; - netif_err(qdev, drv, qdev->ndev, "Link Up.\n"); - - /* If we're coming back from an IDC event - * then set up the CAM and frame routing. - */ - if (test_bit(QL_CAM_RT_SET, &qdev->flags)) { - status = ql_cam_route_initialize(qdev); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Failed to init CAM/Routing tables.\n"); - return; - } else - clear_bit(QL_CAM_RT_SET, &qdev->flags); - } - - /* Queue up a worker to check the frame - * size information, and fix it if it's not - * to our liking. - */ - if (!test_bit(QL_PORT_CFG, &qdev->flags)) { - netif_err(qdev, drv, qdev->ndev, "Queue Port Config Worker!\n"); - set_bit(QL_PORT_CFG, &qdev->flags); - /* Begin polled mode early so - * we don't get another interrupt - * when we leave mpi_worker dpc. - */ - ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); - queue_delayed_work(qdev->workqueue, - &qdev->mpi_port_cfg_work, 0); - } - - ql_link_on(qdev); -} - -static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp) -{ - int status; - - mbcp->out_count = 3; - - status = ql_get_mb_sts(qdev, mbcp); - if (status) - netif_err(qdev, drv, qdev->ndev, "Link down AEN broken!\n"); - - ql_link_off(qdev); -} - -static int ql_sfp_in(struct ql_adapter *qdev, struct mbox_params *mbcp) -{ - int status; - - mbcp->out_count = 5; - - status = ql_get_mb_sts(qdev, mbcp); - if (status) - netif_err(qdev, drv, qdev->ndev, "SFP in AEN broken!\n"); - else - netif_err(qdev, drv, qdev->ndev, "SFP insertion detected.\n"); - - return status; -} - -static int ql_sfp_out(struct ql_adapter *qdev, struct mbox_params *mbcp) -{ - int status; - - mbcp->out_count = 1; - - status = ql_get_mb_sts(qdev, mbcp); - if (status) - netif_err(qdev, drv, qdev->ndev, "SFP out AEN broken!\n"); - else - netif_err(qdev, drv, qdev->ndev, "SFP removal detected.\n"); - - return status; -} - -static int ql_aen_lost(struct ql_adapter *qdev, struct mbox_params *mbcp) -{ - int status; - - mbcp->out_count = 6; - - status = ql_get_mb_sts(qdev, mbcp); - if (status) - netif_err(qdev, drv, qdev->ndev, "Lost AEN broken!\n"); - else { - int i; - netif_err(qdev, drv, qdev->ndev, "Lost AEN detected.\n"); - for (i = 0; i < mbcp->out_count; i++) - netif_err(qdev, drv, qdev->ndev, "mbox_out[%d] = 0x%.08x.\n", - i, mbcp->mbox_out[i]); - - } - - return status; -} - -static void ql_init_fw_done(struct ql_adapter *qdev, struct mbox_params *mbcp) -{ - int status; - - mbcp->out_count = 2; - - status = ql_get_mb_sts(qdev, mbcp); - if (status) { - netif_err(qdev, drv, qdev->ndev, "Firmware did not initialize!\n"); - } else { - netif_err(qdev, drv, qdev->ndev, "Firmware Revision = 0x%.08x.\n", - mbcp->mbox_out[1]); - qdev->fw_rev_id = mbcp->mbox_out[1]; - status = ql_cam_route_initialize(qdev); - if (status) - netif_err(qdev, ifup, qdev->ndev, - "Failed to init CAM/Routing tables.\n"); - } -} - -/* Process an async event and clear it unless it's an - * error condition. - * This can get called iteratively from the mpi_work thread - * when events arrive via an interrupt. - * It also gets called when a mailbox command is polling for - * it's completion. */ -static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp) -{ - int status; - int orig_count = mbcp->out_count; - - /* Just get mailbox zero for now. */ - mbcp->out_count = 1; - status = ql_get_mb_sts(qdev, mbcp); - if (status) { - netif_err(qdev, drv, qdev->ndev, - "Could not read MPI, resetting ASIC!\n"); - ql_queue_asic_error(qdev); - goto end; - } - - switch (mbcp->mbox_out[0]) { - - /* This case is only active when we arrive here - * as a result of issuing a mailbox command to - * the firmware. - */ - case MB_CMD_STS_INTRMDT: - case MB_CMD_STS_GOOD: - case MB_CMD_STS_INVLD_CMD: - case MB_CMD_STS_XFC_ERR: - case MB_CMD_STS_CSUM_ERR: - case MB_CMD_STS_ERR: - case MB_CMD_STS_PARAM_ERR: - /* We can only get mailbox status if we're polling from an - * unfinished command. Get the rest of the status data and - * return back to the caller. - * We only end up here when we're polling for a mailbox - * command completion. - */ - mbcp->out_count = orig_count; - status = ql_get_mb_sts(qdev, mbcp); - return status; - - /* We are being asked by firmware to accept - * a change to the port. This is only - * a change to max frame sizes (Tx/Rx), pause - * parameters, or loopback mode. - */ - case AEN_IDC_REQ: - status = ql_idc_req_aen(qdev); - break; - - /* Process and inbound IDC event. - * This will happen when we're trying to - * change tx/rx max frame size, change pause - * parameters or loopback mode. - */ - case AEN_IDC_CMPLT: - case AEN_IDC_EXT: - status = ql_idc_cmplt_aen(qdev); - break; - - case AEN_LINK_UP: - ql_link_up(qdev, mbcp); - break; - - case AEN_LINK_DOWN: - ql_link_down(qdev, mbcp); - break; - - case AEN_FW_INIT_DONE: - /* If we're in process on executing the firmware, - * then convert the status to normal mailbox status. - */ - if (mbcp->mbox_in[0] == MB_CMD_EX_FW) { - mbcp->out_count = orig_count; - status = ql_get_mb_sts(qdev, mbcp); - mbcp->mbox_out[0] = MB_CMD_STS_GOOD; - return status; - } - ql_init_fw_done(qdev, mbcp); - break; - - case AEN_AEN_SFP_IN: - ql_sfp_in(qdev, mbcp); - break; - - case AEN_AEN_SFP_OUT: - ql_sfp_out(qdev, mbcp); - break; - - /* This event can arrive at boot time or after an - * MPI reset if the firmware failed to initialize. - */ - case AEN_FW_INIT_FAIL: - /* If we're in process on executing the firmware, - * then convert the status to normal mailbox status. - */ - if (mbcp->mbox_in[0] == MB_CMD_EX_FW) { - mbcp->out_count = orig_count; - status = ql_get_mb_sts(qdev, mbcp); - mbcp->mbox_out[0] = MB_CMD_STS_ERR; - return status; - } - netif_err(qdev, drv, qdev->ndev, - "Firmware initialization failed.\n"); - status = -EIO; - ql_queue_fw_error(qdev); - break; - - case AEN_SYS_ERR: - netif_err(qdev, drv, qdev->ndev, "System Error.\n"); - ql_queue_fw_error(qdev); - status = -EIO; - break; - - case AEN_AEN_LOST: - ql_aen_lost(qdev, mbcp); - break; - - case AEN_DCBX_CHG: - /* Need to support AEN 8110 */ - break; - default: - netif_err(qdev, drv, qdev->ndev, - "Unsupported AE %.08x.\n", mbcp->mbox_out[0]); - /* Clear the MPI firmware status. */ - } -end: - ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT); - /* Restore the original mailbox count to - * what the caller asked for. This can get - * changed when a mailbox command is waiting - * for a response and an AEN arrives and - * is handled. - * */ - mbcp->out_count = orig_count; - return status; -} - -/* Execute a single mailbox command. - * mbcp is a pointer to an array of u32. Each - * element in the array contains the value for it's - * respective mailbox register. - */ -static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp) -{ - int status; - unsigned long count; - - mutex_lock(&qdev->mpi_mutex); - - /* Begin polled mode for MPI */ - ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); - - /* Load the mailbox registers and wake up MPI RISC. */ - status = ql_exec_mb_cmd(qdev, mbcp); - if (status) - goto end; - - - /* If we're generating a system error, then there's nothing - * to wait for. - */ - if (mbcp->mbox_in[0] == MB_CMD_MAKE_SYS_ERR) - goto end; - - /* Wait for the command to complete. We loop - * here because some AEN might arrive while - * we're waiting for the mailbox command to - * complete. If more than 5 seconds expire we can - * assume something is wrong. */ - count = jiffies + HZ * MAILBOX_TIMEOUT; - do { - /* Wait for the interrupt to come in. */ - status = ql_wait_mbx_cmd_cmplt(qdev); - if (status) - continue; - - /* Process the event. If it's an AEN, it - * will be handled in-line or a worker - * will be spawned. If it's our completion - * we will catch it below. - */ - status = ql_mpi_handler(qdev, mbcp); - if (status) - goto end; - - /* It's either the completion for our mailbox - * command complete or an AEN. If it's our - * completion then get out. - */ - if (((mbcp->mbox_out[0] & 0x0000f000) == - MB_CMD_STS_GOOD) || - ((mbcp->mbox_out[0] & 0x0000f000) == - MB_CMD_STS_INTRMDT)) - goto done; - } while (time_before(jiffies, count)); - - netif_err(qdev, drv, qdev->ndev, - "Timed out waiting for mailbox complete.\n"); - status = -ETIMEDOUT; - goto end; - -done: - - /* Now we can clear the interrupt condition - * and look at our status. - */ - ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT); - - if (((mbcp->mbox_out[0] & 0x0000f000) != - MB_CMD_STS_GOOD) && - ((mbcp->mbox_out[0] & 0x0000f000) != - MB_CMD_STS_INTRMDT)) { - status = -EIO; - } -end: - /* End polled mode for MPI */ - ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); - mutex_unlock(&qdev->mpi_mutex); - return status; -} - -/* Get MPI firmware version. This will be used for - * driver banner and for ethtool info. - * Returns zero on success. - */ -int ql_mb_about_fw(struct ql_adapter *qdev) -{ - struct mbox_params mbc; - struct mbox_params *mbcp = &mbc; - int status = 0; - - memset(mbcp, 0, sizeof(struct mbox_params)); - - mbcp->in_count = 1; - mbcp->out_count = 3; - - mbcp->mbox_in[0] = MB_CMD_ABOUT_FW; - - status = ql_mailbox_command(qdev, mbcp); - if (status) - return status; - - if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { - netif_err(qdev, drv, qdev->ndev, - "Failed about firmware command\n"); - status = -EIO; - } - - /* Store the firmware version */ - qdev->fw_rev_id = mbcp->mbox_out[1]; - - return status; -} - -/* Get functional state for MPI firmware. - * Returns zero on success. - */ -int ql_mb_get_fw_state(struct ql_adapter *qdev) -{ - struct mbox_params mbc; - struct mbox_params *mbcp = &mbc; - int status = 0; - - memset(mbcp, 0, sizeof(struct mbox_params)); - - mbcp->in_count = 1; - mbcp->out_count = 2; - - mbcp->mbox_in[0] = MB_CMD_GET_FW_STATE; - - status = ql_mailbox_command(qdev, mbcp); - if (status) - return status; - - if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { - netif_err(qdev, drv, qdev->ndev, - "Failed Get Firmware State.\n"); - status = -EIO; - } - - /* If bit zero is set in mbx 1 then the firmware is - * running, but not initialized. This should never - * happen. - */ - if (mbcp->mbox_out[1] & 1) { - netif_err(qdev, drv, qdev->ndev, - "Firmware waiting for initialization.\n"); - status = -EIO; - } - - return status; -} - -/* Send and ACK mailbox command to the firmware to - * let it continue with the change. - */ -static int ql_mb_idc_ack(struct ql_adapter *qdev) -{ - struct mbox_params mbc; - struct mbox_params *mbcp = &mbc; - int status = 0; - - memset(mbcp, 0, sizeof(struct mbox_params)); - - mbcp->in_count = 5; - mbcp->out_count = 1; - - mbcp->mbox_in[0] = MB_CMD_IDC_ACK; - mbcp->mbox_in[1] = qdev->idc_mbc.mbox_out[1]; - mbcp->mbox_in[2] = qdev->idc_mbc.mbox_out[2]; - mbcp->mbox_in[3] = qdev->idc_mbc.mbox_out[3]; - mbcp->mbox_in[4] = qdev->idc_mbc.mbox_out[4]; - - status = ql_mailbox_command(qdev, mbcp); - if (status) - return status; - - if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { - netif_err(qdev, drv, qdev->ndev, "Failed IDC ACK send.\n"); - status = -EIO; - } - return status; -} - -/* Get link settings and maximum frame size settings - * for the current port. - * Most likely will block. - */ -int ql_mb_set_port_cfg(struct ql_adapter *qdev) -{ - struct mbox_params mbc; - struct mbox_params *mbcp = &mbc; - int status = 0; - - memset(mbcp, 0, sizeof(struct mbox_params)); - - mbcp->in_count = 3; - mbcp->out_count = 1; - - mbcp->mbox_in[0] = MB_CMD_SET_PORT_CFG; - mbcp->mbox_in[1] = qdev->link_config; - mbcp->mbox_in[2] = qdev->max_frame_size; - - - status = ql_mailbox_command(qdev, mbcp); - if (status) - return status; - - if (mbcp->mbox_out[0] == MB_CMD_STS_INTRMDT) { - netif_err(qdev, drv, qdev->ndev, - "Port Config sent, wait for IDC.\n"); - } else if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { - netif_err(qdev, drv, qdev->ndev, - "Failed Set Port Configuration.\n"); - status = -EIO; - } - return status; -} - -static int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr, - u32 size) -{ - int status = 0; - struct mbox_params mbc; - struct mbox_params *mbcp = &mbc; - - memset(mbcp, 0, sizeof(struct mbox_params)); - - mbcp->in_count = 9; - mbcp->out_count = 1; - - mbcp->mbox_in[0] = MB_CMD_DUMP_RISC_RAM; - mbcp->mbox_in[1] = LSW(addr); - mbcp->mbox_in[2] = MSW(req_dma); - mbcp->mbox_in[3] = LSW(req_dma); - mbcp->mbox_in[4] = MSW(size); - mbcp->mbox_in[5] = LSW(size); - mbcp->mbox_in[6] = MSW(MSD(req_dma)); - mbcp->mbox_in[7] = LSW(MSD(req_dma)); - mbcp->mbox_in[8] = MSW(addr); - - - status = ql_mailbox_command(qdev, mbcp); - if (status) - return status; - - if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { - netif_err(qdev, drv, qdev->ndev, "Failed to dump risc RAM.\n"); - status = -EIO; - } - return status; -} - -/* Issue a mailbox command to dump RISC RAM. */ -int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf, - u32 ram_addr, int word_count) -{ - int status; - char *my_buf; - dma_addr_t buf_dma; - - my_buf = pci_alloc_consistent(qdev->pdev, word_count * sizeof(u32), - &buf_dma); - if (!my_buf) - return -EIO; - - status = ql_mb_dump_ram(qdev, buf_dma, ram_addr, word_count); - if (!status) - memcpy(buf, my_buf, word_count * sizeof(u32)); - - pci_free_consistent(qdev->pdev, word_count * sizeof(u32), my_buf, - buf_dma); - return status; -} - -/* Get link settings and maximum frame size settings - * for the current port. - * Most likely will block. - */ -int ql_mb_get_port_cfg(struct ql_adapter *qdev) -{ - struct mbox_params mbc; - struct mbox_params *mbcp = &mbc; - int status = 0; - - memset(mbcp, 0, sizeof(struct mbox_params)); - - mbcp->in_count = 1; - mbcp->out_count = 3; - - mbcp->mbox_in[0] = MB_CMD_GET_PORT_CFG; - - status = ql_mailbox_command(qdev, mbcp); - if (status) - return status; - - if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { - netif_err(qdev, drv, qdev->ndev, - "Failed Get Port Configuration.\n"); - status = -EIO; - } else { - netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev, - "Passed Get Port Configuration.\n"); - qdev->link_config = mbcp->mbox_out[1]; - qdev->max_frame_size = mbcp->mbox_out[2]; - } - return status; -} - -int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol) -{ - struct mbox_params mbc; - struct mbox_params *mbcp = &mbc; - int status; - - memset(mbcp, 0, sizeof(struct mbox_params)); - - mbcp->in_count = 2; - mbcp->out_count = 1; - - mbcp->mbox_in[0] = MB_CMD_SET_WOL_MODE; - mbcp->mbox_in[1] = wol; - - - status = ql_mailbox_command(qdev, mbcp); - if (status) - return status; - - if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { - netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n"); - status = -EIO; - } - return status; -} - -int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol) -{ - struct mbox_params mbc; - struct mbox_params *mbcp = &mbc; - int status; - u8 *addr = qdev->ndev->dev_addr; - - memset(mbcp, 0, sizeof(struct mbox_params)); - - mbcp->in_count = 8; - mbcp->out_count = 1; - - mbcp->mbox_in[0] = MB_CMD_SET_WOL_MAGIC; - if (enable_wol) { - mbcp->mbox_in[1] = (u32)addr[0]; - mbcp->mbox_in[2] = (u32)addr[1]; - mbcp->mbox_in[3] = (u32)addr[2]; - mbcp->mbox_in[4] = (u32)addr[3]; - mbcp->mbox_in[5] = (u32)addr[4]; - mbcp->mbox_in[6] = (u32)addr[5]; - mbcp->mbox_in[7] = 0; - } else { - mbcp->mbox_in[1] = 0; - mbcp->mbox_in[2] = 1; - mbcp->mbox_in[3] = 1; - mbcp->mbox_in[4] = 1; - mbcp->mbox_in[5] = 1; - mbcp->mbox_in[6] = 1; - mbcp->mbox_in[7] = 0; - } - - status = ql_mailbox_command(qdev, mbcp); - if (status) - return status; - - if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { - netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n"); - status = -EIO; - } - return status; -} - -/* IDC - Inter Device Communication... - * Some firmware commands require consent of adjacent FCOE - * function. This function waits for the OK, or a - * counter-request for a little more time.i - * The firmware will complete the request if the other - * function doesn't respond. - */ -static int ql_idc_wait(struct ql_adapter *qdev) -{ - int status = -ETIMEDOUT; - long wait_time = 1 * HZ; - struct mbox_params *mbcp = &qdev->idc_mbc; - do { - /* Wait here for the command to complete - * via the IDC process. - */ - wait_time = - wait_for_completion_timeout(&qdev->ide_completion, - wait_time); - if (!wait_time) { - netif_err(qdev, drv, qdev->ndev, "IDC Timeout.\n"); - break; - } - /* Now examine the response from the IDC process. - * We might have a good completion or a request for - * more wait time. - */ - if (mbcp->mbox_out[0] == AEN_IDC_EXT) { - netif_err(qdev, drv, qdev->ndev, - "IDC Time Extension from function.\n"); - wait_time += (mbcp->mbox_out[1] >> 8) & 0x0000000f; - } else if (mbcp->mbox_out[0] == AEN_IDC_CMPLT) { - netif_err(qdev, drv, qdev->ndev, "IDC Success.\n"); - status = 0; - break; - } else { - netif_err(qdev, drv, qdev->ndev, - "IDC: Invalid State 0x%.04x.\n", - mbcp->mbox_out[0]); - status = -EIO; - break; - } - } while (wait_time); - - return status; -} - -int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config) -{ - struct mbox_params mbc; - struct mbox_params *mbcp = &mbc; - int status; - - memset(mbcp, 0, sizeof(struct mbox_params)); - - mbcp->in_count = 2; - mbcp->out_count = 1; - - mbcp->mbox_in[0] = MB_CMD_SET_LED_CFG; - mbcp->mbox_in[1] = led_config; - - - status = ql_mailbox_command(qdev, mbcp); - if (status) - return status; - - if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { - netif_err(qdev, drv, qdev->ndev, - "Failed to set LED Configuration.\n"); - status = -EIO; - } - - return status; -} - -int ql_mb_get_led_cfg(struct ql_adapter *qdev) -{ - struct mbox_params mbc; - struct mbox_params *mbcp = &mbc; - int status; - - memset(mbcp, 0, sizeof(struct mbox_params)); - - mbcp->in_count = 1; - mbcp->out_count = 2; - - mbcp->mbox_in[0] = MB_CMD_GET_LED_CFG; - - status = ql_mailbox_command(qdev, mbcp); - if (status) - return status; - - if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { - netif_err(qdev, drv, qdev->ndev, - "Failed to get LED Configuration.\n"); - status = -EIO; - } else - qdev->led_config = mbcp->mbox_out[1]; - - return status; -} - -int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control) -{ - struct mbox_params mbc; - struct mbox_params *mbcp = &mbc; - int status; - - memset(mbcp, 0, sizeof(struct mbox_params)); - - mbcp->in_count = 1; - mbcp->out_count = 2; - - mbcp->mbox_in[0] = MB_CMD_SET_MGMNT_TFK_CTL; - mbcp->mbox_in[1] = control; - - status = ql_mailbox_command(qdev, mbcp); - if (status) - return status; - - if (mbcp->mbox_out[0] == MB_CMD_STS_GOOD) - return status; - - if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) { - netif_err(qdev, drv, qdev->ndev, - "Command not supported by firmware.\n"); - status = -EINVAL; - } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) { - /* This indicates that the firmware is - * already in the state we are trying to - * change it to. - */ - netif_err(qdev, drv, qdev->ndev, - "Command parameters make no change.\n"); - } - return status; -} - -/* Returns a negative error code or the mailbox command status. */ -static int ql_mb_get_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 *control) -{ - struct mbox_params mbc; - struct mbox_params *mbcp = &mbc; - int status; - - memset(mbcp, 0, sizeof(struct mbox_params)); - *control = 0; - - mbcp->in_count = 1; - mbcp->out_count = 1; - - mbcp->mbox_in[0] = MB_CMD_GET_MGMNT_TFK_CTL; - - status = ql_mailbox_command(qdev, mbcp); - if (status) - return status; - - if (mbcp->mbox_out[0] == MB_CMD_STS_GOOD) { - *control = mbcp->mbox_in[1]; - return status; - } - - if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) { - netif_err(qdev, drv, qdev->ndev, - "Command not supported by firmware.\n"); - status = -EINVAL; - } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) { - netif_err(qdev, drv, qdev->ndev, - "Failed to get MPI traffic control.\n"); - status = -EIO; - } - return status; -} - -int ql_wait_fifo_empty(struct ql_adapter *qdev) -{ - int count = 5; - u32 mgmnt_fifo_empty; - u32 nic_fifo_empty; - - do { - nic_fifo_empty = ql_read32(qdev, STS) & STS_NFE; - ql_mb_get_mgmnt_traffic_ctl(qdev, &mgmnt_fifo_empty); - mgmnt_fifo_empty &= MB_GET_MPI_TFK_FIFO_EMPTY; - if (nic_fifo_empty && mgmnt_fifo_empty) - return 0; - msleep(100); - } while (count-- > 0); - return -ETIMEDOUT; -} - -/* API called in work thread context to set new TX/RX - * maximum frame size values to match MTU. - */ -static int ql_set_port_cfg(struct ql_adapter *qdev) -{ - int status; - status = ql_mb_set_port_cfg(qdev); - if (status) - return status; - status = ql_idc_wait(qdev); - return status; -} - -/* The following routines are worker threads that process - * events that may sleep waiting for completion. - */ - -/* This thread gets the maximum TX and RX frame size values - * from the firmware and, if necessary, changes them to match - * the MTU setting. - */ -void ql_mpi_port_cfg_work(struct work_struct *work) -{ - struct ql_adapter *qdev = - container_of(work, struct ql_adapter, mpi_port_cfg_work.work); - int status; - - status = ql_mb_get_port_cfg(qdev); - if (status) { - netif_err(qdev, drv, qdev->ndev, - "Bug: Failed to get port config data.\n"); - goto err; - } - - if (qdev->link_config & CFG_JUMBO_FRAME_SIZE && - qdev->max_frame_size == - CFG_DEFAULT_MAX_FRAME_SIZE) - goto end; - - qdev->link_config |= CFG_JUMBO_FRAME_SIZE; - qdev->max_frame_size = CFG_DEFAULT_MAX_FRAME_SIZE; - status = ql_set_port_cfg(qdev); - if (status) { - netif_err(qdev, drv, qdev->ndev, - "Bug: Failed to set port config data.\n"); - goto err; - } -end: - clear_bit(QL_PORT_CFG, &qdev->flags); - return; -err: - ql_queue_fw_error(qdev); - goto end; -} - -/* Process an inter-device request. This is issues by - * the firmware in response to another function requesting - * a change to the port. We set a flag to indicate a change - * has been made and then send a mailbox command ACKing - * the change request. - */ -void ql_mpi_idc_work(struct work_struct *work) -{ - struct ql_adapter *qdev = - container_of(work, struct ql_adapter, mpi_idc_work.work); - int status; - struct mbox_params *mbcp = &qdev->idc_mbc; - u32 aen; - int timeout; - - aen = mbcp->mbox_out[1] >> 16; - timeout = (mbcp->mbox_out[1] >> 8) & 0xf; - - switch (aen) { - default: - netif_err(qdev, drv, qdev->ndev, - "Bug: Unhandled IDC action.\n"); - break; - case MB_CMD_PORT_RESET: - case MB_CMD_STOP_FW: - ql_link_off(qdev); - case MB_CMD_SET_PORT_CFG: - /* Signal the resulting link up AEN - * that the frame routing and mac addr - * needs to be set. - * */ - set_bit(QL_CAM_RT_SET, &qdev->flags); - /* Do ACK if required */ - if (timeout) { - status = ql_mb_idc_ack(qdev); - if (status) - netif_err(qdev, drv, qdev->ndev, - "Bug: No pending IDC!\n"); - } else { - netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev, - "IDC ACK not required\n"); - status = 0; /* success */ - } - break; - - /* These sub-commands issued by another (FCoE) - * function are requesting to do an operation - * on the shared resource (MPI environment). - * We currently don't issue these so we just - * ACK the request. - */ - case MB_CMD_IOP_RESTART_MPI: - case MB_CMD_IOP_PREP_LINK_DOWN: - /* Drop the link, reload the routing - * table when link comes up. - */ - ql_link_off(qdev); - set_bit(QL_CAM_RT_SET, &qdev->flags); - /* Fall through. */ - case MB_CMD_IOP_DVR_START: - case MB_CMD_IOP_FLASH_ACC: - case MB_CMD_IOP_CORE_DUMP_MPI: - case MB_CMD_IOP_PREP_UPDATE_MPI: - case MB_CMD_IOP_COMP_UPDATE_MPI: - case MB_CMD_IOP_NONE: /* an IDC without params */ - /* Do ACK if required */ - if (timeout) { - status = ql_mb_idc_ack(qdev); - if (status) - netif_err(qdev, drv, qdev->ndev, - "Bug: No pending IDC!\n"); - } else { - netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev, - "IDC ACK not required\n"); - status = 0; /* success */ - } - break; - } -} - -void ql_mpi_work(struct work_struct *work) -{ - struct ql_adapter *qdev = - container_of(work, struct ql_adapter, mpi_work.work); - struct mbox_params mbc; - struct mbox_params *mbcp = &mbc; - int err = 0; - - mutex_lock(&qdev->mpi_mutex); - /* Begin polled mode for MPI */ - ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); - - while (ql_read32(qdev, STS) & STS_PI) { - memset(mbcp, 0, sizeof(struct mbox_params)); - mbcp->out_count = 1; - /* Don't continue if an async event - * did not complete properly. - */ - err = ql_mpi_handler(qdev, mbcp); - if (err) - break; - } - - /* End polled mode for MPI */ - ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); - mutex_unlock(&qdev->mpi_mutex); - ql_enable_completion_interrupt(qdev, 0); -} - -void ql_mpi_reset_work(struct work_struct *work) -{ - struct ql_adapter *qdev = - container_of(work, struct ql_adapter, mpi_reset_work.work); - cancel_delayed_work_sync(&qdev->mpi_work); - cancel_delayed_work_sync(&qdev->mpi_port_cfg_work); - cancel_delayed_work_sync(&qdev->mpi_idc_work); - /* If we're not the dominant NIC function, - * then there is nothing to do. - */ - if (!ql_own_firmware(qdev)) { - netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n"); - return; - } - - if (!ql_core_dump(qdev, qdev->mpi_coredump)) { - netif_err(qdev, drv, qdev->ndev, "Core is dumped!\n"); - qdev->core_is_dumped = 1; - queue_delayed_work(qdev->workqueue, - &qdev->mpi_core_to_log, 5 * HZ); - } - ql_soft_reset_mpi_risc(qdev); -} -- cgit v1.2.3-59-g8ed1b