aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/cache.c7
-rw-r--r--drivers/infiniband/core/cm.c4
-rw-r--r--drivers/infiniband/core/multicast.c2
-rw-r--r--drivers/infiniband/core/uverbs_ioctl_merge.c21
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c55
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c94
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h3
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c61
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h3
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c24
-rw-r--r--drivers/infiniband/hw/mlx5/main.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c4
-rw-r--r--drivers/infiniband/hw/qedr/qedr_hsi_rdma.h139
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c4
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c4
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c21
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h2
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c2
-rw-r--r--drivers/infiniband/ulp/srpt/Kconfig2
19 files changed, 263 insertions, 191 deletions
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 2bdfc4b4a15c..71a34bee453d 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -517,7 +517,7 @@ static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
return -EINVAL;
if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID)
- return -EAGAIN;
+ return -EINVAL;
memcpy(gid, &table->data_vec[index].gid, sizeof(*gid));
if (attr) {
@@ -1153,8 +1153,9 @@ static void ib_cache_update(struct ib_device *device,
goto err;
}
- pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len *
- sizeof *pkey_cache->table, GFP_KERNEL);
+ pkey_cache = kmalloc(struct_size(pkey_cache, table,
+ tprops->pkey_tbl_len),
+ GFP_KERNEL);
if (!pkey_cache)
goto err;
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 724f123c037f..27a7b0a2e27a 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -4338,8 +4338,8 @@ static void cm_add_one(struct ib_device *ib_device)
int count = 0;
u8 i;
- cm_dev = kzalloc(sizeof(*cm_dev) + sizeof(*port) *
- ib_device->phys_port_cnt, GFP_KERNEL);
+ cm_dev = kzalloc(struct_size(cm_dev, port, ib_device->phys_port_cnt),
+ GFP_KERNEL);
if (!cm_dev)
return;
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index 4eb72ff539fc..6c48f4193dda 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -813,7 +813,7 @@ static void mcast_add_one(struct ib_device *device)
int i;
int count = 0;
- dev = kmalloc(sizeof *dev + device->phys_port_cnt * sizeof *port,
+ dev = kmalloc(struct_size(dev, port, device->phys_port_cnt),
GFP_KERNEL);
if (!dev)
return;
diff --git a/drivers/infiniband/core/uverbs_ioctl_merge.c b/drivers/infiniband/core/uverbs_ioctl_merge.c
index 0f88a1919d51..6ceb672c4d46 100644
--- a/drivers/infiniband/core/uverbs_ioctl_merge.c
+++ b/drivers/infiniband/core/uverbs_ioctl_merge.c
@@ -297,8 +297,7 @@ static struct uverbs_method_spec *build_method_with_attrs(const struct uverbs_me
if (max_attr_buckets >= 0)
num_attr_buckets = max_attr_buckets + 1;
- method = kzalloc(sizeof(*method) +
- num_attr_buckets * sizeof(*method->attr_buckets),
+ method = kzalloc(struct_size(method, attr_buckets, num_attr_buckets),
GFP_KERNEL);
if (!method)
return ERR_PTR(-ENOMEM);
@@ -446,9 +445,9 @@ static struct uverbs_object_spec *build_object_with_methods(const struct uverbs_
if (max_method_buckets >= 0)
num_method_buckets = max_method_buckets + 1;
- object = kzalloc(sizeof(*object) +
- num_method_buckets *
- sizeof(*object->method_buckets), GFP_KERNEL);
+ object = kzalloc(struct_size(object, method_buckets,
+ num_method_buckets),
+ GFP_KERNEL);
if (!object)
return ERR_PTR(-ENOMEM);
@@ -469,8 +468,8 @@ static struct uverbs_object_spec *build_object_with_methods(const struct uverbs_
if (methods_max_bucket < 0)
continue;
- hash = kzalloc(sizeof(*hash) +
- sizeof(*hash->methods) * (methods_max_bucket + 1),
+ hash = kzalloc(struct_size(hash, methods,
+ methods_max_bucket + 1),
GFP_KERNEL);
if (!hash) {
res = -ENOMEM;
@@ -579,8 +578,8 @@ struct uverbs_root_spec *uverbs_alloc_spec_tree(unsigned int num_trees,
if (max_object_buckets >= 0)
num_objects_buckets = max_object_buckets + 1;
- root_spec = kzalloc(sizeof(*root_spec) +
- num_objects_buckets * sizeof(*root_spec->object_buckets),
+ root_spec = kzalloc(struct_size(root_spec, object_buckets,
+ num_objects_buckets),
GFP_KERNEL);
if (!root_spec)
return ERR_PTR(-ENOMEM);
@@ -603,8 +602,8 @@ struct uverbs_root_spec *uverbs_alloc_spec_tree(unsigned int num_trees,
if (objects_max_bucket < 0)
continue;
- hash = kzalloc(sizeof(*hash) +
- sizeof(*hash->objects) * (objects_max_bucket + 1),
+ hash = kzalloc(struct_size(hash, objects,
+ objects_max_bucket + 1),
GFP_KERNEL);
if (!hash) {
res = -ENOMEM;
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index f6c739ec8b62..20b9f31052bf 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -185,12 +185,65 @@ static void bnxt_re_shutdown(void *p)
bnxt_re_ib_unreg(rdev, false);
}
+static void bnxt_re_stop_irq(void *handle)
+{
+ struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
+ struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
+ struct bnxt_qplib_nq *nq;
+ int indx;
+
+ for (indx = BNXT_RE_NQ_IDX; indx < rdev->num_msix; indx++) {
+ nq = &rdev->nq[indx - 1];
+ bnxt_qplib_nq_stop_irq(nq, false);
+ }
+
+ bnxt_qplib_rcfw_stop_irq(rcfw, false);
+}
+
+static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
+{
+ struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
+ struct bnxt_msix_entry *msix_ent = rdev->msix_entries;
+ struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
+ struct bnxt_qplib_nq *nq;
+ int indx, rc;
+
+ if (!ent) {
+ /* Not setting the f/w timeout bit in rcfw.
+ * During the driver unload the first command
+ * to f/w will timeout and that will set the
+ * timeout bit.
+ */
+ dev_err(rdev_to_dev(rdev), "Failed to re-start IRQs\n");
+ return;
+ }
+
+ /* Vectors may change after restart, so update with new vectors
+ * in device sctructure.
+ */
+ for (indx = 0; indx < rdev->num_msix; indx++)
+ rdev->msix_entries[indx].vector = ent[indx].vector;
+
+ bnxt_qplib_rcfw_start_irq(rcfw, msix_ent[BNXT_RE_AEQ_IDX].vector,
+ false);
+ for (indx = BNXT_RE_NQ_IDX ; indx < rdev->num_msix; indx++) {
+ nq = &rdev->nq[indx - 1];
+ rc = bnxt_qplib_nq_start_irq(nq, indx - 1,
+ msix_ent[indx].vector, false);
+ if (rc)
+ dev_warn(rdev_to_dev(rdev),
+ "Failed to reinit NQ index %d\n", indx - 1);
+ }
+}
+
static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
.ulp_async_notifier = NULL,
.ulp_stop = bnxt_re_stop,
.ulp_start = bnxt_re_start,
.ulp_sriov_config = bnxt_re_sriov_config,
- .ulp_shutdown = bnxt_re_shutdown
+ .ulp_shutdown = bnxt_re_shutdown,
+ .ulp_irq_stop = bnxt_re_stop_irq,
+ .ulp_irq_restart = bnxt_re_start_irq
};
/* RoCE -> Net driver */
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 3a78faba8d91..50d8f1fc98d5 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -336,22 +336,32 @@ static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
return IRQ_HANDLED;
}
+void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
+{
+ tasklet_disable(&nq->worker);
+ /* Mask h/w interrupt */
+ NQ_DB(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements);
+ /* Sync with last running IRQ handler */
+ synchronize_irq(nq->vector);
+ if (kill)
+ tasklet_kill(&nq->worker);
+ if (nq->requested) {
+ irq_set_affinity_hint(nq->vector, NULL);
+ free_irq(nq->vector, nq);
+ nq->requested = false;
+ }
+}
+
void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
{
if (nq->cqn_wq) {
destroy_workqueue(nq->cqn_wq);
nq->cqn_wq = NULL;
}
+
/* Make sure the HW is stopped! */
- synchronize_irq(nq->vector);
- tasklet_disable(&nq->worker);
- tasklet_kill(&nq->worker);
+ bnxt_qplib_nq_stop_irq(nq, true);
- if (nq->requested) {
- irq_set_affinity_hint(nq->vector, NULL);
- free_irq(nq->vector, nq);
- nq->requested = false;
- }
if (nq->bar_reg_iomem)
iounmap(nq->bar_reg_iomem);
nq->bar_reg_iomem = NULL;
@@ -361,6 +371,40 @@ void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
nq->vector = 0;
}
+int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
+ int msix_vector, bool need_init)
+{
+ int rc;
+
+ if (nq->requested)
+ return -EFAULT;
+
+ nq->vector = msix_vector;
+ if (need_init)
+ tasklet_init(&nq->worker, bnxt_qplib_service_nq,
+ (unsigned long)nq);
+ else
+ tasklet_enable(&nq->worker);
+
+ snprintf(nq->name, sizeof(nq->name), "bnxt_qplib_nq-%d", nq_indx);
+ rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, nq->name, nq);
+ if (rc)
+ return rc;
+
+ cpumask_clear(&nq->mask);
+ cpumask_set_cpu(nq_indx, &nq->mask);
+ rc = irq_set_affinity_hint(nq->vector, &nq->mask);
+ if (rc) {
+ dev_warn(&nq->pdev->dev,
+ "QPLIB: set affinity failed; vector: %d nq_idx: %d\n",
+ nq->vector, nq_indx);
+ }
+ nq->requested = true;
+ NQ_DB_REARM(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements);
+
+ return rc;
+}
+
int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
int nq_idx, int msix_vector, int bar_reg_offset,
int (*cqn_handler)(struct bnxt_qplib_nq *nq,
@@ -372,41 +416,17 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
resource_size_t nq_base;
int rc = -1;
- nq->pdev = pdev;
- nq->vector = msix_vector;
if (cqn_handler)
nq->cqn_handler = cqn_handler;
if (srqn_handler)
nq->srqn_handler = srqn_handler;
- tasklet_init(&nq->worker, bnxt_qplib_service_nq, (unsigned long)nq);
-
/* Have a task to schedule CQ notifiers in post send case */
nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq");
if (!nq->cqn_wq)
- goto fail;
-
- nq->requested = false;
- memset(nq->name, 0, 32);
- sprintf(nq->name, "bnxt_qplib_nq-%d", nq_idx);
- rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, nq->name, nq);
- if (rc) {
- dev_err(&nq->pdev->dev,
- "Failed to request IRQ for NQ: %#x", rc);
- goto fail;
- }
-
- cpumask_clear(&nq->mask);
- cpumask_set_cpu(nq_idx, &nq->mask);
- rc = irq_set_affinity_hint(nq->vector, &nq->mask);
- if (rc) {
- dev_warn(&nq->pdev->dev,
- "QPLIB: set affinity failed; vector: %d nq_idx: %d\n",
- nq->vector, nq_idx);
- }
+ return -ENOMEM;
- nq->requested = true;
nq->bar_reg = NQ_CONS_PCI_BAR_REGION;
nq->bar_reg_off = bar_reg_offset;
nq_base = pci_resource_start(pdev, nq->bar_reg);
@@ -419,7 +439,13 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
rc = -ENOMEM;
goto fail;
}
- NQ_DB_REARM(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements);
+
+ rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
+ if (rc) {
+ dev_err(&nq->pdev->dev,
+ "QPLIB: Failed to request irq for nq-idx %d", nq_idx);
+ goto fail;
+ }
return 0;
fail:
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index ade9f13c0fd1..72352ca80ace 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -467,7 +467,10 @@ struct bnxt_qplib_nq_work {
struct bnxt_qplib_cq *cq;
};
+void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill);
void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq);
+int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
+ int msix_vector, bool need_init);
int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
int nq_idx, int msix_vector, int bar_reg_offset,
int (*cqn_handler)(struct bnxt_qplib_nq *nq,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 80027a494730..2852d350ada1 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -582,19 +582,29 @@ fail:
return -ENOMEM;
}
-void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
+void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill)
{
- unsigned long indx;
-
- /* Make sure the HW channel is stopped! */
- synchronize_irq(rcfw->vector);
tasklet_disable(&rcfw->worker);
- tasklet_kill(&rcfw->worker);
+ /* Mask h/w interrupts */
+ CREQ_DB(rcfw->creq_bar_reg_iomem, rcfw->creq.cons,
+ rcfw->creq.max_elements);
+ /* Sync with last running IRQ-handler */
+ synchronize_irq(rcfw->vector);
+ if (kill)
+ tasklet_kill(&rcfw->worker);
if (rcfw->requested) {
free_irq(rcfw->vector, rcfw);
rcfw->requested = false;
}
+}
+
+void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
+{
+ unsigned long indx;
+
+ bnxt_qplib_rcfw_stop_irq(rcfw, true);
+
if (rcfw->cmdq_bar_reg_iomem)
iounmap(rcfw->cmdq_bar_reg_iomem);
rcfw->cmdq_bar_reg_iomem = NULL;
@@ -614,6 +624,31 @@ void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
rcfw->vector = 0;
}
+int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
+ bool need_init)
+{
+ int rc;
+
+ if (rcfw->requested)
+ return -EFAULT;
+
+ rcfw->vector = msix_vector;
+ if (need_init)
+ tasklet_init(&rcfw->worker,
+ bnxt_qplib_service_creq, (unsigned long)rcfw);
+ else
+ tasklet_enable(&rcfw->worker);
+ rc = request_irq(rcfw->vector, bnxt_qplib_creq_irq, 0,
+ "bnxt_qplib_creq", rcfw);
+ if (rc)
+ return rc;
+ rcfw->requested = true;
+ CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, rcfw->creq.cons,
+ rcfw->creq.max_elements);
+
+ return 0;
+}
+
int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
struct bnxt_qplib_rcfw *rcfw,
int msix_vector,
@@ -675,27 +710,17 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
rcfw->creq_qp_event_processed = 0;
rcfw->creq_func_event_processed = 0;
- rcfw->vector = msix_vector;
if (aeq_handler)
rcfw->aeq_handler = aeq_handler;
+ init_waitqueue_head(&rcfw->waitq);
- tasklet_init(&rcfw->worker, bnxt_qplib_service_creq,
- (unsigned long)rcfw);
-
- rcfw->requested = false;
- rc = request_irq(rcfw->vector, bnxt_qplib_creq_irq, 0,
- "bnxt_qplib_creq", rcfw);
+ rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_vector, true);
if (rc) {
dev_err(&rcfw->pdev->dev,
"QPLIB: Failed to request IRQ for CREQ rc = 0x%x", rc);
bnxt_qplib_disable_rcfw_channel(rcfw);
return rc;
}
- rcfw->requested = true;
-
- init_waitqueue_head(&rcfw->waitq);
-
- CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, 0, rcfw->creq.max_elements);
init.cmdq_pbl = cpu_to_le64(rcfw->cmdq.pbl[PBL_LVL_0].pg_map_arr[0]);
init.cmdq_size_cmdq_lvl = cpu_to_le16(
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index c7cce2e4185e..46416dfe8830 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -195,7 +195,10 @@ struct bnxt_qplib_rcfw {
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
struct bnxt_qplib_rcfw *rcfw, int qp_tbl_sz);
+void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill);
void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
+int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
+ bool need_init);
int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
struct bnxt_qplib_rcfw *rcfw,
int msix_vector,
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index 87bd6b60cb53..4d4371bf2c7c 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -56,11 +56,6 @@
#include "chip_registers.h"
#include "aspm.h"
-/* link speed vector for Gen3 speed - not in Linux headers */
-#define GEN1_SPEED_VECTOR 0x1
-#define GEN2_SPEED_VECTOR 0x2
-#define GEN3_SPEED_VECTOR 0x3
-
/*
* This file contains PCIe utility routines.
*/
@@ -270,7 +265,7 @@ static u32 extract_speed(u16 linkstat)
case PCI_EXP_LNKSTA_CLS_5_0GB:
speed = 5000; /* Gen 2, 5GHz */
break;
- case GEN3_SPEED_VECTOR:
+ case PCI_EXP_LNKSTA_CLS_8_0GB:
speed = 8000; /* Gen 3, 8GHz */
break;
}
@@ -325,7 +320,7 @@ int pcie_speeds(struct hfi1_devdata *dd)
return ret;
}
- if ((linkcap & PCI_EXP_LNKCAP_SLS) != GEN3_SPEED_VECTOR) {
+ if ((linkcap & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_8_0GB) {
dd_dev_info(dd,
"This HFI is not Gen3 capable, max speed 0x%x, need 0x3\n",
linkcap & PCI_EXP_LNKCAP_SLS);
@@ -702,9 +697,6 @@ const struct pci_error_handlers hfi1_pci_err_handler = {
/* gasket block secondary bus reset delay */
#define SBR_DELAY_US 200000 /* 200ms */
-/* mask for PCIe capability register lnkctl2 target link speed */
-#define LNKCTL2_TARGET_LINK_SPEED_MASK 0xf
-
static uint pcie_target = 3;
module_param(pcie_target, uint, S_IRUGO);
MODULE_PARM_DESC(pcie_target, "PCIe target speed (0 skip, 1-3 Gen1-3)");
@@ -1053,13 +1045,13 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd)
return 0;
if (pcie_target == 1) { /* target Gen1 */
- target_vector = GEN1_SPEED_VECTOR;
+ target_vector = PCI_EXP_LNKCTL2_TLS_2_5GT;
target_speed = 2500;
} else if (pcie_target == 2) { /* target Gen2 */
- target_vector = GEN2_SPEED_VECTOR;
+ target_vector = PCI_EXP_LNKCTL2_TLS_5_0GT;
target_speed = 5000;
} else if (pcie_target == 3) { /* target Gen3 */
- target_vector = GEN3_SPEED_VECTOR;
+ target_vector = PCI_EXP_LNKCTL2_TLS_8_0GT;
target_speed = 8000;
} else {
/* off or invalid target - skip */
@@ -1298,8 +1290,8 @@ retry:
dd_dev_info(dd, "%s: ..old link control2: 0x%x\n", __func__,
(u32)lnkctl2);
/* only write to parent if target is not as high as ours */
- if ((lnkctl2 & LNKCTL2_TARGET_LINK_SPEED_MASK) < target_vector) {
- lnkctl2 &= ~LNKCTL2_TARGET_LINK_SPEED_MASK;
+ if ((lnkctl2 & PCI_EXP_LNKCTL2_TLS) < target_vector) {
+ lnkctl2 &= ~PCI_EXP_LNKCTL2_TLS;
lnkctl2 |= target_vector;
dd_dev_info(dd, "%s: ..new link control2: 0x%x\n", __func__,
(u32)lnkctl2);
@@ -1324,7 +1316,7 @@ retry:
dd_dev_info(dd, "%s: ..old link control2: 0x%x\n", __func__,
(u32)lnkctl2);
- lnkctl2 &= ~LNKCTL2_TARGET_LINK_SPEED_MASK;
+ lnkctl2 &= ~PCI_EXP_LNKCTL2_TLS;
lnkctl2 |= target_vector;
dd_dev_info(dd, "%s: ..new link control2: 0x%x\n", __func__,
(u32)lnkctl2);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 3544150f3469..e52dd21519b4 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -5054,7 +5054,7 @@ mlx5_ib_get_vector_affinity(struct ib_device *ibdev, int comp_vector)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
- return mlx5_get_vector_affinity(dev->mdev, comp_vector);
+ return mlx5_get_vector_affinity_hint(dev->mdev, comp_vector);
}
/* The mlx5_ib_multiport_mutex should be held when calling this function */
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index 2fe503e86c1d..7a31be3c3e73 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -367,7 +367,7 @@ struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
obj_per_chunk = MTHCA_TABLE_CHUNK_SIZE / obj_size;
num_icm = DIV_ROUND_UP(nobj, obj_per_chunk);
- table = kmalloc(sizeof *table + num_icm * sizeof *table->icm, GFP_KERNEL);
+ table = kmalloc(struct_size(table, icm, num_icm), GFP_KERNEL);
if (!table)
return NULL;
@@ -529,7 +529,7 @@ struct mthca_user_db_table *mthca_init_user_db_tab(struct mthca_dev *dev)
return NULL;
npages = dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE;
- db_tab = kmalloc(sizeof *db_tab + npages * sizeof *db_tab->page, GFP_KERNEL);
+ db_tab = kmalloc(struct_size(db_tab, page, npages), GFP_KERNEL);
if (!db_tab)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
index b816c80df50b..7e1f7021396a 100644
--- a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
+++ b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
@@ -116,6 +116,7 @@ enum rdma_cqe_requester_status_enum {
RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR,
RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR,
RDMA_CQE_REQ_STS_XRC_VOILATION_ERR,
+ RDMA_CQE_REQ_STS_SIG_ERR,
MAX_RDMA_CQE_REQUESTER_STATUS_ENUM
};
@@ -152,12 +153,12 @@ struct rdma_rq_sge {
struct regpair addr;
__le32 length;
__le32 flags;
-#define RDMA_RQ_SGE_L_KEY_MASK 0x3FFFFFF
-#define RDMA_RQ_SGE_L_KEY_SHIFT 0
+#define RDMA_RQ_SGE_L_KEY_LO_MASK 0x3FFFFFF
+#define RDMA_RQ_SGE_L_KEY_LO_SHIFT 0
#define RDMA_RQ_SGE_NUM_SGES_MASK 0x7
#define RDMA_RQ_SGE_NUM_SGES_SHIFT 26
-#define RDMA_RQ_SGE_RESERVED0_MASK 0x7
-#define RDMA_RQ_SGE_RESERVED0_SHIFT 29
+#define RDMA_RQ_SGE_L_KEY_HI_MASK 0x7
+#define RDMA_RQ_SGE_L_KEY_HI_SHIFT 29
};
struct rdma_srq_sge {
@@ -241,18 +242,39 @@ enum rdma_dif_io_direction_flg {
MAX_RDMA_DIF_IO_DIRECTION_FLG
};
-/* RDMA DIF Runt Result Structure */
-struct rdma_dif_runt_result {
- __le16 guard_tag;
- __le16 reserved[3];
+struct rdma_dif_params {
+ __le32 base_ref_tag;
+ __le16 app_tag;
+ __le16 app_tag_mask;
+ __le16 runt_crc_value;
+ __le16 flags;
+#define RDMA_DIF_PARAMS_IO_DIRECTION_FLG_MASK 0x1
+#define RDMA_DIF_PARAMS_IO_DIRECTION_FLG_SHIFT 0
+#define RDMA_DIF_PARAMS_BLOCK_SIZE_MASK 0x1
+#define RDMA_DIF_PARAMS_BLOCK_SIZE_SHIFT 1
+#define RDMA_DIF_PARAMS_RUNT_VALID_FLG_MASK 0x1
+#define RDMA_DIF_PARAMS_RUNT_VALID_FLG_SHIFT 2
+#define RDMA_DIF_PARAMS_VALIDATE_CRC_GUARD_MASK 0x1
+#define RDMA_DIF_PARAMS_VALIDATE_CRC_GUARD_SHIFT 3
+#define RDMA_DIF_PARAMS_VALIDATE_REF_TAG_MASK 0x1
+#define RDMA_DIF_PARAMS_VALIDATE_REF_TAG_SHIFT 4
+#define RDMA_DIF_PARAMS_VALIDATE_APP_TAG_MASK 0x1
+#define RDMA_DIF_PARAMS_VALIDATE_APP_TAG_SHIFT 5
+#define RDMA_DIF_PARAMS_CRC_SEED_MASK 0x1
+#define RDMA_DIF_PARAMS_CRC_SEED_SHIFT 6
+#define RDMA_DIF_PARAMS_RX_REF_TAG_CONST_MASK 0x1
+#define RDMA_DIF_PARAMS_RX_REF_TAG_CONST_SHIFT 7
+#define RDMA_DIF_PARAMS_BLOCK_GUARD_TYPE_MASK 0x1
+#define RDMA_DIF_PARAMS_BLOCK_GUARD_TYPE_SHIFT 8
+#define RDMA_DIF_PARAMS_APP_ESCAPE_MASK 0x1
+#define RDMA_DIF_PARAMS_APP_ESCAPE_SHIFT 9
+#define RDMA_DIF_PARAMS_REF_ESCAPE_MASK 0x1
+#define RDMA_DIF_PARAMS_REF_ESCAPE_SHIFT 10
+#define RDMA_DIF_PARAMS_RESERVED4_MASK 0x1F
+#define RDMA_DIF_PARAMS_RESERVED4_SHIFT 11
+ __le32 reserved5;
};
-/* Memory window type enumeration */
-enum rdma_mw_type {
- RDMA_MW_TYPE_1,
- RDMA_MW_TYPE_2A,
- MAX_RDMA_MW_TYPE
-};
struct rdma_sq_atomic_wqe {
__le32 reserved1;
@@ -334,17 +356,17 @@ struct rdma_sq_bind_wqe {
#define RDMA_SQ_BIND_WQE_SE_FLG_SHIFT 3
#define RDMA_SQ_BIND_WQE_INLINE_FLG_MASK 0x1
#define RDMA_SQ_BIND_WQE_INLINE_FLG_SHIFT 4
-#define RDMA_SQ_BIND_WQE_RESERVED0_MASK 0x7
-#define RDMA_SQ_BIND_WQE_RESERVED0_SHIFT 5
+#define RDMA_SQ_BIND_WQE_DIF_ON_HOST_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_BIND_WQE_RESERVED0_MASK 0x3
+#define RDMA_SQ_BIND_WQE_RESERVED0_SHIFT 6
u8 wqe_size;
u8 prev_wqe_size;
u8 bind_ctrl;
#define RDMA_SQ_BIND_WQE_ZERO_BASED_MASK 0x1
#define RDMA_SQ_BIND_WQE_ZERO_BASED_SHIFT 0
-#define RDMA_SQ_BIND_WQE_MW_TYPE_MASK 0x1
-#define RDMA_SQ_BIND_WQE_MW_TYPE_SHIFT 1
-#define RDMA_SQ_BIND_WQE_RESERVED1_MASK 0x3F
-#define RDMA_SQ_BIND_WQE_RESERVED1_SHIFT 2
+#define RDMA_SQ_BIND_WQE_RESERVED1_MASK 0x7F
+#define RDMA_SQ_BIND_WQE_RESERVED1_SHIFT 1
u8 access_ctrl;
#define RDMA_SQ_BIND_WQE_REMOTE_READ_MASK 0x1
#define RDMA_SQ_BIND_WQE_REMOTE_READ_SHIFT 0
@@ -363,6 +385,7 @@ struct rdma_sq_bind_wqe {
__le32 length_lo;
__le32 parent_l_key;
__le32 reserved4;
+ struct rdma_dif_params dif_params;
};
/* First element (16 bytes) of bind wqe */
@@ -392,10 +415,8 @@ struct rdma_sq_bind_wqe_2nd {
u8 bind_ctrl;
#define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_MASK 0x1
#define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_SHIFT 0
-#define RDMA_SQ_BIND_WQE_2ND_MW_TYPE_MASK 0x1
-#define RDMA_SQ_BIND_WQE_2ND_MW_TYPE_SHIFT 1
-#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_MASK 0x3F
-#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_SHIFT 2
+#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_MASK 0x7F
+#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_SHIFT 1
u8 access_ctrl;
#define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_MASK 0x1
#define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_SHIFT 0
@@ -416,6 +437,11 @@ struct rdma_sq_bind_wqe_2nd {
__le32 reserved4;
};
+/* Third element (16 bytes) of bind wqe */
+struct rdma_sq_bind_wqe_3rd {
+ struct rdma_dif_params dif_params;
+};
+
/* Structure with only the SQ WQE common
* fields. Size is of one SQ element (16B)
*/
@@ -486,30 +512,6 @@ struct rdma_sq_fmr_wqe {
u8 length_hi;
__le32 length_lo;
struct regpair pbl_addr;
- __le32 dif_base_ref_tag;
- __le16 dif_app_tag;
- __le16 dif_app_tag_mask;
- __le16 dif_runt_crc_value;
- __le16 dif_flags;
-#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_MASK 0x1
-#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_SHIFT 0
-#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_MASK 0x1
-#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_SHIFT 1
-#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_MASK 0x1
-#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_SHIFT 2
-#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_MASK 0x1
-#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_SHIFT 3
-#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_MASK 0x1
-#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_SHIFT 4
-#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_MASK 0x1
-#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT 5
-#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK 0x1
-#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT 6
-#define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_MASK 0x1
-#define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_SHIFT 7
-#define RDMA_SQ_FMR_WQE_RESERVED4_MASK 0xFF
-#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 8
- __le32 reserved5;
};
/* First element (16 bytes) of fmr wqe */
@@ -566,33 +568,6 @@ struct rdma_sq_fmr_wqe_2nd {
struct regpair pbl_addr;
};
-/* Third element (16 bytes) of fmr wqe */
-struct rdma_sq_fmr_wqe_3rd {
- __le32 dif_base_ref_tag;
- __le16 dif_app_tag;
- __le16 dif_app_tag_mask;
- __le16 dif_runt_crc_value;
- __le16 dif_flags;
-#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_MASK 0x1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_SHIFT 0
-#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_MASK 0x1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_SHIFT 1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_MASK 0x1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_SHIFT 2
-#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_MASK 0x1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_SHIFT 3
-#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_MASK 0x1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_SHIFT 4
-#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_MASK 0x1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT 5
-#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK 0x1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT 6
-#define RDMA_SQ_FMR_WQE_3RD_DIF_RX_REF_TAG_CONST_MASK 0x1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_RX_REF_TAG_CONST_SHIFT 7
-#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK 0xFF
-#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 8
- __le32 reserved5;
-};
struct rdma_sq_local_inv_wqe {
struct regpair reserved;
@@ -637,8 +612,8 @@ struct rdma_sq_rdma_wqe {
#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT 5
#define RDMA_SQ_RDMA_WQE_READ_INV_FLG_MASK 0x1
#define RDMA_SQ_RDMA_WQE_READ_INV_FLG_SHIFT 6
-#define RDMA_SQ_RDMA_WQE_RESERVED0_MASK 0x1
-#define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT 7
+#define RDMA_SQ_RDMA_WQE_RESERVED1_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_RESERVED1_SHIFT 7
u8 wqe_size;
u8 prev_wqe_size;
struct regpair remote_va;
@@ -646,13 +621,9 @@ struct rdma_sq_rdma_wqe {
u8 dif_flags;
#define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_MASK 0x1
#define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_SHIFT 0
-#define RDMA_SQ_RDMA_WQE_DIF_FIRST_RDMA_IN_IO_FLG_MASK 0x1
-#define RDMA_SQ_RDMA_WQE_DIF_FIRST_RDMA_IN_IO_FLG_SHIFT 1
-#define RDMA_SQ_RDMA_WQE_DIF_LAST_RDMA_IN_IO_FLG_MASK 0x1
-#define RDMA_SQ_RDMA_WQE_DIF_LAST_RDMA_IN_IO_FLG_SHIFT 2
-#define RDMA_SQ_RDMA_WQE_RESERVED1_MASK 0x1F
-#define RDMA_SQ_RDMA_WQE_RESERVED1_SHIFT 3
- u8 reserved2[3];
+#define RDMA_SQ_RDMA_WQE_RESERVED2_MASK 0x7F
+#define RDMA_SQ_RDMA_WQE_RESERVED2_SHIFT 1
+ u8 reserved3[3];
};
/* First element (16 bytes) of rdma wqe */
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 614a954d0757..710032f1fad7 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -3276,7 +3276,7 @@ int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
wr->num_sge);
- SET_FIELD(flags, RDMA_RQ_SGE_L_KEY,
+ SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO,
wr->sg_list[i].lkey);
RQ_SGE_SET(rqe, wr->sg_list[i].addr,
@@ -3295,7 +3295,7 @@ int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
/* First one must include the number
* of SGE in the list
*/
- SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, 0);
+ SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO, 0);
SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
RQ_SGE_SET(rqe, 0, 0, flags);
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index cc429b567d0a..49c9541050d4 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -283,7 +283,7 @@ static struct rvt_mr *__rvt_alloc_mr(int count, struct ib_pd *pd)
/* Allocate struct plus pointers to first level page tables. */
m = (count + RVT_SEGSZ - 1) / RVT_SEGSZ;
- mr = kzalloc(sizeof(*mr) + m * sizeof(mr->mr.map[0]), GFP_KERNEL);
+ mr = kzalloc(struct_size(mr, mr.map, m), GFP_KERNEL);
if (!mr)
goto bail;
@@ -730,7 +730,7 @@ struct ib_fmr *rvt_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
/* Allocate struct plus pointers to first level page tables. */
m = (fmr_attr->max_pages + RVT_SEGSZ - 1) / RVT_SEGSZ;
- fmr = kzalloc(sizeof(*fmr) + m * sizeof(fmr->mr.map[0]), GFP_KERNEL);
+ fmr = kzalloc(struct_size(fmr, mr.map, m), GFP_KERNEL);
if (!fmr)
goto bail;
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
index 4be3aef40bd2..267da8215e08 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
@@ -443,17 +443,16 @@ static u8 opa_vnic_get_rc(struct __opa_veswport_info *info,
}
/* opa_vnic_calc_entropy - calculate the packet entropy */
-u8 opa_vnic_calc_entropy(struct opa_vnic_adapter *adapter, struct sk_buff *skb)
+u8 opa_vnic_calc_entropy(struct sk_buff *skb)
{
- u16 hash16;
-
- /*
- * Get flow based 16-bit hash and then XOR the upper and lower bytes
- * to get the entropy.
- * __skb_tx_hash limits qcount to 16 bits. Hence, get 15-bit hash.
- */
- hash16 = __skb_tx_hash(adapter->netdev, skb, BIT(15));
- return (u8)((hash16 >> 8) ^ (hash16 & 0xff));
+ u32 hash = skb_get_hash(skb);
+
+ /* store XOR of all bytes in lower 8 bits */
+ hash ^= hash >> 8;
+ hash ^= hash >> 16;
+
+ /* return lower 8 bits as entropy */
+ return (u8)(hash & 0xFF);
}
/* opa_vnic_get_def_port - get default port based on entropy */
@@ -490,7 +489,7 @@ void opa_vnic_encap_skb(struct opa_vnic_adapter *adapter, struct sk_buff *skb)
hdr = skb_push(skb, OPA_VNIC_HDR_LEN);
- entropy = opa_vnic_calc_entropy(adapter, skb);
+ entropy = opa_vnic_calc_entropy(skb);
def_port = opa_vnic_get_def_port(adapter, entropy);
len = opa_vnic_wire_length(skb);
dlid = opa_vnic_get_dlid(adapter, skb, def_port);
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h b/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h
index afd95f432262..43ac61ffef4a 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h
@@ -299,7 +299,7 @@ struct opa_vnic_adapter *opa_vnic_add_netdev(struct ib_device *ibdev,
void opa_vnic_rem_netdev(struct opa_vnic_adapter *adapter);
void opa_vnic_encap_skb(struct opa_vnic_adapter *adapter, struct sk_buff *skb);
u8 opa_vnic_get_vl(struct opa_vnic_adapter *adapter, struct sk_buff *skb);
-u8 opa_vnic_calc_entropy(struct opa_vnic_adapter *adapter, struct sk_buff *skb);
+u8 opa_vnic_calc_entropy(struct sk_buff *skb);
void opa_vnic_process_vema_config(struct opa_vnic_adapter *adapter);
void opa_vnic_release_mac_tbl(struct opa_vnic_adapter *adapter);
void opa_vnic_query_mac_tbl(struct opa_vnic_adapter *adapter,
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
index ce57e0f10289..0c8aec62a425 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
@@ -104,7 +104,7 @@ static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb,
/* pass entropy and vl as metadata in skb */
mdata = skb_push(skb, sizeof(*mdata));
- mdata->entropy = opa_vnic_calc_entropy(adapter, skb);
+ mdata->entropy = opa_vnic_calc_entropy(skb);
mdata->vl = opa_vnic_get_vl(adapter, skb);
rc = adapter->rn_ops->ndo_select_queue(netdev, skb,
accel_priv, fallback);
diff --git a/drivers/infiniband/ulp/srpt/Kconfig b/drivers/infiniband/ulp/srpt/Kconfig
index 25bf6955b6d0..fb8b7182f05e 100644
--- a/drivers/infiniband/ulp/srpt/Kconfig
+++ b/drivers/infiniband/ulp/srpt/Kconfig
@@ -1,6 +1,6 @@
config INFINIBAND_SRPT
tristate "InfiniBand SCSI RDMA Protocol target support"
- depends on INFINIBAND_ADDR_TRANS && TARGET_CORE
+ depends on INFINIBAND && INFINIBAND_ADDR_TRANS && TARGET_CORE
---help---
Support for the SCSI RDMA Protocol (SRP) Target driver. The