aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/vxge
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/vxge')
-rw-r--r--drivers/net/vxge/vxge-config.c10
-rw-r--r--drivers/net/vxge/vxge-config.h17
-rw-r--r--drivers/net/vxge/vxge-main.c173
-rw-r--r--drivers/net/vxge/vxge-main.h8
-rw-r--r--drivers/net/vxge/vxge-reg.h9
-rw-r--r--drivers/net/vxge/vxge-traffic.c12
-rw-r--r--drivers/net/vxge/vxge-traffic.h4
-rw-r--r--drivers/net/vxge/vxge-version.h4
8 files changed, 125 insertions, 112 deletions
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index 58d2551c78ed..9e94c4b0fb18 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -313,14 +313,6 @@ __vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev)
hldev->kdfc = (u8 __iomem *)(hldev->bar0 +
VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64));
break;
- case 2:
- hldev->kdfc = (u8 __iomem *)(hldev->bar1 +
- VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64));
- break;
- case 4:
- hldev->kdfc = (u8 __iomem *)(hldev->bar2 +
- VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64));
- break;
default:
break;
}
@@ -831,8 +823,6 @@ vxge_hw_device_initialize(
sizeof(struct vxge_hw_device_config));
hldev->bar0 = attr->bar0;
- hldev->bar1 = attr->bar1;
- hldev->bar2 = attr->bar2;
hldev->pdev = attr->pdev;
hldev->uld_callbacks.link_up = attr->uld_callbacks.link_up;
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
index afbdf6f4d224..62779a520ca1 100644
--- a/drivers/net/vxge/vxge-config.h
+++ b/drivers/net/vxge/vxge-config.h
@@ -682,8 +682,6 @@ struct __vxge_hw_vpath_handle{
* @major_revision: PCI Device major revision
* @minor_revision: PCI Device minor revision
* @bar0: BAR0 virtual address.
- * @bar1: BAR1 virtual address.
- * @bar2: BAR2 virtual address.
* @pdev: Physical device handle
* @config: Confguration passed by the LL driver at initialization
* @link_state: Link state
@@ -698,8 +696,6 @@ struct __vxge_hw_device {
u8 major_revision;
u8 minor_revision;
void __iomem *bar0;
- void __iomem *bar1;
- void __iomem *bar2;
struct pci_dev *pdev;
struct net_device *ndev;
struct vxge_hw_device_config config;
@@ -788,17 +784,13 @@ struct vxge_hw_device_hw_info {
/**
* struct vxge_hw_device_attr - Device memory spaces.
* @bar0: BAR0 virtual address.
- * @bar1: BAR1 virtual address.
- * @bar2: BAR2 virtual address.
* @pdev: PCI device object.
*
- * Device memory spaces. Includes configuration, BAR0, BAR1, etc. per device
+ * Device memory spaces. Includes configuration, BAR0 etc. per device
* mapped memories. Also, includes a pointer to OS-specific PCI device object.
*/
struct vxge_hw_device_attr {
void __iomem *bar0;
- void __iomem *bar1;
- void __iomem *bar2;
struct pci_dev *pdev;
struct vxge_hw_uld_cbs uld_callbacks;
};
@@ -986,7 +978,9 @@ struct __vxge_hw_fifo {
void *txdlh,
enum vxge_hw_fifo_tcode t_code,
void *userdata,
- void **skb_ptr);
+ struct sk_buff ***skb_ptr,
+ int nr_skb,
+ int *more);
void (*txdl_term)(
void *txdlh,
@@ -1787,7 +1781,8 @@ struct vxge_hw_fifo_attr {
void *txdlh,
enum vxge_hw_fifo_tcode t_code,
void *userdata,
- void **skb_ptr);
+ struct sk_buff ***skb_ptr,
+ int nr_skb, int *more);
void (*txdl_term)(
void *txdlh,
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index 6034497536a4..b378037a29b5 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -87,22 +87,25 @@ static inline int is_vxge_card_up(struct vxgedev *vdev)
static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
{
unsigned long flags = 0;
- struct sk_buff *skb_ptr = NULL;
- struct sk_buff **temp, *head, *skb;
-
- if (spin_trylock_irqsave(&fifo->tx_lock, flags)) {
- vxge_hw_vpath_poll_tx(fifo->handle, (void **)&skb_ptr);
- spin_unlock_irqrestore(&fifo->tx_lock, flags);
- }
- /* free SKBs */
- head = skb_ptr;
- while (head) {
- skb = head;
- temp = (struct sk_buff **)&skb->cb;
- head = *temp;
- *temp = NULL;
- dev_kfree_skb_irq(skb);
- }
+ struct sk_buff **skb_ptr = NULL;
+ struct sk_buff **temp;
+#define NR_SKB_COMPLETED 128
+ struct sk_buff *completed[NR_SKB_COMPLETED];
+ int more;
+
+ do {
+ more = 0;
+ skb_ptr = completed;
+
+ if (spin_trylock_irqsave(&fifo->tx_lock, flags)) {
+ vxge_hw_vpath_poll_tx(fifo->handle, &skb_ptr,
+ NR_SKB_COMPLETED, &more);
+ spin_unlock_irqrestore(&fifo->tx_lock, flags);
+ }
+ /* free SKBs */
+ for (temp = completed; temp != skb_ptr; temp++)
+ dev_kfree_skb_irq(*temp);
+ } while (more) ;
}
static inline void VXGE_COMPLETE_ALL_TX(struct vxgedev *vdev)
@@ -283,6 +286,7 @@ vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size)
skb_reserve(skb, VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
rx_priv->skb = skb;
+ rx_priv->skb_data = NULL;
rx_priv->data_size = skb_size;
vxge_debug_entryexit(VXGE_TRACE,
"%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
@@ -302,7 +306,8 @@ static int vxge_rx_map(void *dtrh, struct vxge_ring *ring)
ring->ndev->name, __func__, __LINE__);
rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
- dma_addr = pci_map_single(ring->pdev, rx_priv->skb->data,
+ rx_priv->skb_data = rx_priv->skb->data;
+ dma_addr = pci_map_single(ring->pdev, rx_priv->skb_data,
rx_priv->data_size, PCI_DMA_FROMDEVICE);
if (dma_addr == 0) {
@@ -374,10 +379,10 @@ vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan,
if (ring->vlgrp && ext_info->vlan &&
(ring->vlan_tag_strip ==
VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE))
- vlan_gro_receive(&ring->napi, ring->vlgrp,
+ vlan_gro_receive(ring->napi_p, ring->vlgrp,
ext_info->vlan, skb);
else
- napi_gro_receive(&ring->napi, skb);
+ napi_gro_receive(ring->napi_p, skb);
} else {
if (ring->vlgrp && vlan &&
(ring->vlan_tag_strip ==
@@ -442,10 +447,12 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
vxge_hw_ring_replenish(ringh, 0);
do {
+ prefetch((char *)dtr + L1_CACHE_BYTES);
rx_priv = vxge_hw_ring_rxd_private_get(dtr);
skb = rx_priv->skb;
data_size = rx_priv->data_size;
data_dma = rx_priv->data_dma;
+ prefetch(rx_priv->skb_data);
vxge_debug_rx(VXGE_TRACE,
"%s: %s:%d skb = 0x%p",
@@ -454,6 +461,8 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
vxge_hw_ring_rxd_1b_get(ringh, dtr, &dma_sizes);
pkt_length = dma_sizes;
+ pkt_length -= ETH_FCS_LEN;
+
vxge_debug_rx(VXGE_TRACE,
"%s: %s:%d Packet Length = %d",
ring->ndev->name, __func__, __LINE__, pkt_length);
@@ -579,8 +588,6 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
if (first_dtr)
vxge_hw_ring_rxd_post_post_wmb(ringh, first_dtr);
- dev->last_rx = jiffies;
-
vxge_debug_entryexit(VXGE_TRACE,
"%s:%d Exiting...",
__func__, __LINE__);
@@ -598,11 +605,10 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
enum vxge_hw_status
vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
enum vxge_hw_fifo_tcode t_code, void *userdata,
- void **skb_ptr)
+ struct sk_buff ***skb_ptr, int nr_skb, int *more)
{
struct vxge_fifo *fifo = (struct vxge_fifo *)userdata;
- struct sk_buff *skb, *head = NULL;
- struct sk_buff **temp;
+ struct sk_buff *skb, **done_skb = *skb_ptr;
int pkt_cnt = 0;
vxge_debug_entryexit(VXGE_TRACE,
@@ -655,9 +661,12 @@ vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
fifo->stats.tx_frms++;
fifo->stats.tx_bytes += skb->len;
- temp = (struct sk_buff **)&skb->cb;
- *temp = head;
- head = skb;
+ *done_skb++ = skb;
+
+ if (--nr_skb <= 0) {
+ *more = 1;
+ break;
+ }
pkt_cnt++;
if (pkt_cnt > fifo->indicate_max_pkts)
@@ -666,11 +675,9 @@ vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
} while (vxge_hw_fifo_txdl_next_completed(fifo_hw,
&dtr, &t_code) == VXGE_HW_OK);
+ *skb_ptr = done_skb;
vxge_wake_tx_queue(fifo, skb);
- if (skb_ptr)
- *skb_ptr = (void *) head;
-
vxge_debug_entryexit(VXGE_TRACE,
"%s: %s:%d Exiting...",
fifo->ndev->name, __func__, __LINE__);
@@ -803,7 +810,7 @@ static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
* NOTE: when device cant queue the pkt, just the trans_start variable will
* not be upadted.
*/
-static int
+static netdev_tx_t
vxge_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct vxge_fifo *fifo = NULL;
@@ -817,7 +824,6 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
u64 dma_pointer;
struct vxge_tx_priv *txdl_priv = NULL;
struct __vxge_hw_fifo *fifo_hw;
- u32 max_mss = 0x0;
int offload_type;
unsigned long flags = 0;
int vpath_no = 0;
@@ -894,6 +900,12 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
goto _exit2;
}
+ /* Last TXD? Stop tx queue to avoid dropping packets. TX
+ * completion will resume the queue.
+ */
+ if (avail == 1)
+ vxge_stop_tx_queue(fifo);
+
status = vxge_hw_fifo_txdl_reserve(fifo_hw, &dtr, &dtr_priv);
if (unlikely(status != VXGE_HW_OK)) {
vxge_debug_tx(VXGE_ERR,
@@ -969,10 +981,6 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
int mss = vxge_tcp_mss(skb);
if (mss) {
- max_mss = dev->mtu + ETH_HLEN -
- VXGE_HW_TCPIP_HEADER_MAX_SIZE;
- if (mss > max_mss)
- mss = max_mss;
vxge_debug_tx(VXGE_TRACE,
"%s: %s:%d mss = %d",
dev->name, __func__, __LINE__, mss);
@@ -1000,7 +1008,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
VXGE_COMPLETE_VPATH_TX(fifo);
vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
dev->name, __func__, __LINE__);
- return 0;
+ return NETDEV_TX_OK;
_exit0:
vxge_debug_tx(VXGE_TRACE, "%s: pci_map_page failed", dev->name);
@@ -1024,7 +1032,7 @@ _exit2:
spin_unlock_irqrestore(&fifo->tx_lock, flags);
VXGE_COMPLETE_VPATH_TX(fifo);
- return 0;
+ return NETDEV_TX_OK;
}
/*
@@ -1049,6 +1057,7 @@ vxge_rx_term(void *dtrh, enum vxge_hw_rxd_state state, void *userdata)
rx_priv->data_size, PCI_DMA_FROMDEVICE);
dev_kfree_skb(rx_priv->skb);
+ rx_priv->skb_data = NULL;
vxge_debug_entryexit(VXGE_TRACE,
"%s: %s:%d Exiting...",
@@ -2137,16 +2146,16 @@ int vxge_open_vpaths(struct vxgedev *vdev)
*/
static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
{
- struct __vxge_hw_device *hldev = (struct __vxge_hw_device *)dev_id;
- struct vxgedev *vdev;
struct net_device *dev;
+ struct __vxge_hw_device *hldev;
u64 reason;
enum vxge_hw_status status;
+ struct vxgedev *vdev = (struct vxgedev *) dev_id;;
vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__);
- dev = hldev->ndev;
- vdev = netdev_priv(dev);
+ dev = vdev->ndev;
+ hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
if (pci_channel_offline(vdev->pdev))
return IRQ_NONE;
@@ -2417,15 +2426,13 @@ static void vxge_rem_isr(struct vxgedev *vdev)
#endif
if (vdev->config.intr_type == INTA) {
synchronize_irq(vdev->pdev->irq);
- free_irq(vdev->pdev->irq, hldev);
+ free_irq(vdev->pdev->irq, vdev);
}
}
static int vxge_add_isr(struct vxgedev *vdev)
{
int ret = 0;
- struct __vxge_hw_device *hldev =
- (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
#ifdef CONFIG_PCI_MSI
int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0;
u64 function_mode = vdev->config.device_hw_info.function_mode;
@@ -2579,7 +2586,7 @@ INTA_MODE:
if (vdev->config.intr_type == INTA) {
ret = request_irq((int) vdev->pdev->irq,
vxge_isr_napi,
- IRQF_SHARED, vdev->desc[0], hldev);
+ IRQF_SHARED, vdev->desc[0], vdev);
if (ret) {
vxge_debug_init(VXGE_ERR,
"%s %s-%d: ISR registration failed",
@@ -2712,11 +2719,15 @@ vxge_open(struct net_device *dev)
netif_napi_add(dev, &vdev->napi, vxge_poll_inta,
vdev->config.napi_weight);
napi_enable(&vdev->napi);
+ for (i = 0; i < vdev->no_of_vpath; i++)
+ vdev->vpaths[i].ring.napi_p = &vdev->napi;
} else {
for (i = 0; i < vdev->no_of_vpath; i++) {
netif_napi_add(dev, &vdev->vpaths[i].ring.napi,
vxge_poll_msix, vdev->config.napi_weight);
napi_enable(&vdev->vpaths[i].ring.napi);
+ vdev->vpaths[i].ring.napi_p =
+ &vdev->vpaths[i].ring.napi;
}
}
@@ -2890,6 +2901,9 @@ int do_vxge_close(struct net_device *dev, int do_io)
vdev = (struct vxgedev *)netdev_priv(dev);
hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
+ if (unlikely(!is_vxge_card_up(vdev)))
+ return 0;
+
/* If vxge_handle_crit_err task is executing,
* wait till it completes. */
while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
@@ -3954,6 +3968,9 @@ static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev,
netif_device_detach(netdev);
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
if (netif_running(netdev)) {
/* Bring down the card, while avoiding PCI I/O */
do_vxge_close(netdev, 0);
@@ -4152,18 +4169,6 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
attr.bar0,
(unsigned long long)pci_resource_start(pdev, 0));
- attr.bar1 = pci_ioremap_bar(pdev, 2);
- if (!attr.bar1) {
- vxge_debug_init(VXGE_ERR,
- "%s : cannot remap io memory bar2", __func__);
- ret = -ENODEV;
- goto _exit3;
- }
- vxge_debug_ll_config(VXGE_TRACE,
- "pci ioremap bar1: %p:0x%llx",
- attr.bar1,
- (unsigned long long)pci_resource_start(pdev, 2));
-
status = vxge_hw_device_hw_info_get(attr.bar0,
&ll_config.device_hw_info);
if (status != VXGE_HW_OK) {
@@ -4171,17 +4176,17 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
"%s: Reading of hardware info failed."
"Please try upgrading the firmware.", VXGE_DRIVER_NAME);
ret = -EINVAL;
- goto _exit4;
+ goto _exit3;
}
if (ll_config.device_hw_info.fw_version.major !=
- VXGE_DRIVER_VERSION_MAJOR) {
+ VXGE_DRIVER_FW_VERSION_MAJOR) {
vxge_debug_init(VXGE_ERR,
- "FW Ver.(maj): %d not driver's expected version: %d",
- ll_config.device_hw_info.fw_version.major,
- VXGE_DRIVER_VERSION_MAJOR);
+ "%s: Incorrect firmware version."
+ "Please upgrade the firmware to version 1.x.x",
+ VXGE_DRIVER_NAME);
ret = -EINVAL;
- goto _exit4;
+ goto _exit3;
}
vpath_mask = ll_config.device_hw_info.vpath_mask;
@@ -4189,7 +4194,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
vxge_debug_ll_config(VXGE_TRACE,
"%s: No vpaths available in device", VXGE_DRIVER_NAME);
ret = -EINVAL;
- goto _exit4;
+ goto _exit3;
}
vxge_debug_ll_config(VXGE_TRACE,
@@ -4222,7 +4227,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
vxge_debug_ll_config(VXGE_ERR,
"%s: No more vpaths to configure", VXGE_DRIVER_NAME);
ret = 0;
- goto _exit4;
+ goto _exit3;
}
/* Setting driver callbacks */
@@ -4235,7 +4240,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
vxge_debug_init(VXGE_ERR,
"Failed to initialize device (%d)", status);
ret = -EINVAL;
- goto _exit4;
+ goto _exit3;
}
vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
@@ -4260,7 +4265,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
if (vxge_device_register(hldev, &ll_config, high_dma, no_of_vpath,
&vdev)) {
ret = -EINVAL;
- goto _exit5;
+ goto _exit4;
}
vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL);
@@ -4271,7 +4276,6 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
hldev->ndev = vdev->ndev;
vdev->mtu = VXGE_HW_DEFAULT_MTU;
vdev->bar0 = attr.bar0;
- vdev->bar1 = attr.bar1;
vdev->max_vpath_supported = max_vpath_supported;
vdev->no_of_vpath = no_of_vpath;
@@ -4336,6 +4340,27 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
ll_config.device_hw_info.fw_version.version,
ll_config.device_hw_info.fw_date.date);
+ if (new_device) {
+ switch (ll_config.device_hw_info.function_mode) {
+ case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
+ vxge_debug_init(VXGE_TRACE,
+ "%s: Single Function Mode Enabled", vdev->ndev->name);
+ break;
+ case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION:
+ vxge_debug_init(VXGE_TRACE,
+ "%s: Multi Function Mode Enabled", vdev->ndev->name);
+ break;
+ case VXGE_HW_FUNCTION_MODE_SRIOV:
+ vxge_debug_init(VXGE_TRACE,
+ "%s: Single Root IOV Mode Enabled", vdev->ndev->name);
+ break;
+ case VXGE_HW_FUNCTION_MODE_MRIOV:
+ vxge_debug_init(VXGE_TRACE,
+ "%s: Multi Root IOV Mode Enabled", vdev->ndev->name);
+ break;
+ }
+ }
+
vxge_print_parm(vdev, vpath_mask);
/* Store the fw version for ethttool option */
@@ -4353,7 +4378,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
"%s: mac_addr_list : memory allocation failed",
vdev->ndev->name);
ret = -EPERM;
- goto _exit6;
+ goto _exit5;
}
macaddr = (u8 *)&entry->macaddr;
memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN);
@@ -4361,6 +4386,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
vdev->vpaths[i].mac_addr_cnt = 1;
}
+ kfree(device_config);
vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
vdev->ndev->name, __func__, __LINE__);
@@ -4370,16 +4396,14 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
return 0;
-_exit6:
+_exit5:
for (i = 0; i < vdev->no_of_vpath; i++)
vxge_free_mac_add_list(&vdev->vpaths[i]);
vxge_device_unregister(hldev);
-_exit5:
+_exit4:
pci_disable_sriov(pdev);
vxge_hw_device_terminate(hldev);
-_exit4:
- iounmap(attr.bar1);
_exit3:
iounmap(attr.bar0);
_exit2:
@@ -4438,7 +4462,6 @@ vxge_remove(struct pci_dev *pdev)
kfree(vdev->vpaths);
iounmap(vdev->bar0);
- iounmap(vdev->bar1);
pci_disable_sriov(pdev);
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h
index 9704b2bd4320..9c36b3a9a63d 100644
--- a/drivers/net/vxge/vxge-main.h
+++ b/drivers/net/vxge/vxge-main.h
@@ -21,7 +21,7 @@
#define VXGE_DRIVER_NAME "vxge"
#define VXGE_DRIVER_VENDOR "Neterion, Inc"
-#define VXGE_DRIVER_VERSION_MAJOR 0
+#define VXGE_DRIVER_FW_VERSION_MAJOR 1
#define DRV_VERSION VXGE_VERSION_MAJOR"."VXGE_VERSION_MINOR"."\
VXGE_VERSION_FIX"."VXGE_VERSION_BUILD"-"\
@@ -260,6 +260,7 @@ struct vxge_ring {
int gro_enable;
struct napi_struct napi;
+ struct napi_struct *napi_p;
#define VXGE_MAX_MAC_ADDR_COUNT 30
@@ -363,7 +364,6 @@ struct vxgedev {
struct __vxge_hw_vpath_handle *vp_handles[VXGE_HW_MAX_VIRTUAL_PATHS];
void __iomem *bar0;
- void __iomem *bar1;
struct vxge_sw_stats stats;
int mtu;
/* Below variables are used for vpath selection to transmit a packet */
@@ -378,6 +378,7 @@ struct vxgedev {
struct vxge_rx_priv {
struct sk_buff *skb;
+ unsigned char *skb_data;
dma_addr_t data_dma;
dma_addr_t data_size;
};
@@ -428,7 +429,8 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
enum vxge_hw_status
vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
- enum vxge_hw_fifo_tcode t_code, void *userdata, void **skb_ptr);
+ enum vxge_hw_fifo_tcode t_code, void *userdata,
+ struct sk_buff ***skb_ptr, int nr_skbs, int *more);
int vxge_close(struct net_device *dev);
diff --git a/drivers/net/vxge/vxge-reg.h b/drivers/net/vxge/vxge-reg.h
index 10f4da32929f..9a3b823e08d4 100644
--- a/drivers/net/vxge/vxge-reg.h
+++ b/drivers/net/vxge/vxge-reg.h
@@ -1784,7 +1784,7 @@ struct vxge_hw_mrpcim_reg {
#define VXGE_HW_XMAC_GEN_ERR_REG_XMACJ_XMAC_FSM_ERR vxge_mBIT(63)
/*0x01e18*/ u64 xmac_gen_err_mask;
/*0x01e20*/ u64 xmac_gen_err_alarm;
-/*0x01e28*/ u64 xmac_link_err_port_reg[2];
+/*0x01e28*/ u64 xmac_link_err_port0_reg;
#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_DOWN vxge_mBIT(3)
#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_UP vxge_mBIT(7)
#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_WENT_DOWN vxge_mBIT(11)
@@ -1798,8 +1798,11 @@ struct vxge_hw_mrpcim_reg {
#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_RATEMGMT_LASI_INV vxge_mBIT(39)
#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMDIO_MDIO_MGR_ACCESS_COMPLETE \
vxge_mBIT(47)
-/*0x01e30*/ u64 xmac_link_err_port_mask[2];
-/*0x01e38*/ u64 xmac_link_err_port_alarm[2];
+/*0x01e30*/ u64 xmac_link_err_port0_mask;
+/*0x01e38*/ u64 xmac_link_err_port0_alarm;
+/*0x01e40*/ u64 xmac_link_err_port1_reg;
+/*0x01e48*/ u64 xmac_link_err_port1_mask;
+/*0x01e50*/ u64 xmac_link_err_port1_alarm;
/*0x01e58*/ u64 xgxs_gen_err_reg;
#define VXGE_HW_XGXS_GEN_ERR_REG_XGXS_XGXS_FSM_ERR vxge_mBIT(63)
/*0x01e60*/ u64 xgxs_gen_err_mask;
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c
index 370f55cbbad7..fe3ae518c69c 100644
--- a/drivers/net/vxge/vxge-traffic.c
+++ b/drivers/net/vxge/vxge-traffic.c
@@ -731,6 +731,7 @@ vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh)
vxge_assert(channel->compl_index < channel->length);
*dtrh = channel->work_arr[channel->compl_index];
+ prefetch(*dtrh);
}
/*
@@ -1070,11 +1071,11 @@ static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
&fifo->nofl_db->control_0);
- wmb();
+ mmiowb();
writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
- wmb();
+ mmiowb();
}
/**
@@ -2508,7 +2509,8 @@ enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
* See also: vxge_hw_vpath_poll_tx().
*/
enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo,
- void **skb_ptr)
+ struct sk_buff ***skb_ptr, int nr_skb,
+ int *more)
{
enum vxge_hw_fifo_tcode t_code;
void *first_txdlh;
@@ -2520,8 +2522,8 @@ enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo,
status = vxge_hw_fifo_txdl_next_completed(fifo,
&first_txdlh, &t_code);
if (status == VXGE_HW_OK)
- if (fifo->callback(fifo, first_txdlh,
- t_code, channel->userdata, skb_ptr) != VXGE_HW_OK)
+ if (fifo->callback(fifo, first_txdlh, t_code,
+ channel->userdata, skb_ptr, nr_skb, more) != VXGE_HW_OK)
status = VXGE_HW_COMPLETIONS_REMAIN;
return status;
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
index 7567a1140d07..461742b4442b 100644
--- a/drivers/net/vxge/vxge-traffic.h
+++ b/drivers/net/vxge/vxge-traffic.h
@@ -35,8 +35,6 @@
VXGE_HW_HEADER_VLAN_SIZE + \
VXGE_HW_HEADER_SNAP_SIZE)
-#define VXGE_HW_TCPIP_HEADER_MAX_SIZE (64 + 64)
-
/* 32bit alignments */
#define VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN 2
#define VXGE_HW_HEADER_802_2_SNAP_ALIGN 2
@@ -2328,7 +2326,7 @@ enum vxge_hw_status vxge_hw_vpath_poll_rx(
enum vxge_hw_status vxge_hw_vpath_poll_tx(
struct __vxge_hw_fifo *fifoh,
- void **skb_ptr);
+ struct sk_buff ***skb_ptr, int nr_skb, int *more);
enum vxge_hw_status vxge_hw_vpath_alarm_process(
struct __vxge_hw_vpath_handle *vpath_handle,
diff --git a/drivers/net/vxge/vxge-version.h b/drivers/net/vxge/vxge-version.h
index 82786ffb7dd9..8fbce7552035 100644
--- a/drivers/net/vxge/vxge-version.h
+++ b/drivers/net/vxge/vxge-version.h
@@ -17,7 +17,7 @@
#define VXGE_VERSION_MAJOR "2"
#define VXGE_VERSION_MINOR "0"
-#define VXGE_VERSION_FIX "4"
-#define VXGE_VERSION_BUILD "17795"
+#define VXGE_VERSION_FIX "5"
+#define VXGE_VERSION_BUILD "18053"
#define VXGE_VERSION_FOR "k"
#endif