aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-03-09 15:53:03 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2019-03-09 15:53:03 -0800
commita50243b1ddcdd766d0d17fbfeeb1a22e62fdc461 (patch)
tree3dbf847105558eaac3658a46c4934df503c866a2 /drivers/net/ethernet
parentMerge tag 'pci-v5.1-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci (diff)
parentnet/mlx5: ODP support for XRC transport is not enabled by default in FW (diff)
downloadlinux-dev-a50243b1ddcdd766d0d17fbfeeb1a22e62fdc461.tar.xz
linux-dev-a50243b1ddcdd766d0d17fbfeeb1a22e62fdc461.zip
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma updates from Jason Gunthorpe: "This has been a slightly more active cycle than normal with ongoing core changes and quite a lot of collected driver updates. - Various driver fixes for bnxt_re, cxgb4, hns, mlx5, pvrdma, rxe - A new data transfer mode for HFI1 giving higher performance - Significant functional and bug fix update to the mlx5 On-Demand-Paging MR feature - A chip hang reset recovery system for hns - Change mm->pinned_vm to an atomic64 - Update bnxt_re to support a new 57500 chip - A sane netlink 'rdma link add' method for creating rxe devices and fixing the various unregistration race conditions in rxe's unregister flow - Allow lookup up objects by an ID over netlink - Various reworking of the core to driver interface: - drivers should not assume umem SGLs are in PAGE_SIZE chunks - ucontext is accessed via udata not other means - start to make the core code responsible for object memory allocation - drivers should convert struct device to struct ib_device via a helper - drivers have more tools to avoid use after unregister problems" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (280 commits) net/mlx5: ODP support for XRC transport is not enabled by default in FW IB/hfi1: Close race condition on user context disable and close RDMA/umem: Revert broken 'off by one' fix RDMA/umem: minor bug fix in error handling path RDMA/hns: Use GFP_ATOMIC in hns_roce_v2_modify_qp cxgb4: kfree mhp after the debug print IB/rdmavt: Fix concurrency panics in QP post_send and modify to error IB/rdmavt: Fix loopback send with invalidate ordering IB/iser: Fix dma_nents type definition IB/mlx5: Set correct write permissions for implicit ODP MR bnxt_re: Clean cq for kernel consumers only RDMA/uverbs: Don't do double free of allocated PD RDMA: Handle ucontext allocations by IB/core RDMA/core: Fix a WARN() message bnxt_re: fix the regression due to changes in alloc_pbl IB/mlx4: Increase the timeout for CM cache IB/core: Abort page fault handler silently during owning process exit IB/mlx5: Validate correct PD before prefetch MR IB/mlx5: Protect against prefetch of invalid MR RDMA/uverbs: Store PR pointer before it is overwritten ...
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c85
4 files changed, 74 insertions, 34 deletions
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index ea45a9b8179e..cf475873ce81 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -43,9 +43,6 @@ static int bnxt_register_dev(struct bnxt_en_dev *edev, int ulp_id,
if (ulp_id == BNXT_ROCE_ULP) {
unsigned int max_stat_ctxs;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
- return -EOPNOTSUPP;
-
max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS ||
bp->cp_nr_rings == max_stat_ctxs)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index c62a0c830705..38dd41eb959e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -56,6 +56,7 @@ enum {
CPL_TX_DATA_ISO = 0x1F,
CPL_CLOSE_LISTSRV_RPL = 0x20,
+ CPL_GET_TCB_RPL = 0x22,
CPL_L2T_WRITE_RPL = 0x23,
CPL_PASS_OPEN_RPL = 0x24,
CPL_ACT_OPEN_RPL = 0x25,
@@ -688,6 +689,13 @@ struct cpl_get_tcb {
#define NO_REPLY_V(x) ((x) << NO_REPLY_S)
#define NO_REPLY_F NO_REPLY_V(1U)
+struct cpl_get_tcb_rpl {
+ union opcode_tid ot;
+ __u8 cookie;
+ __u8 status;
+ __be16 len;
+};
+
struct cpl_set_tcb_field {
WR_HDR;
union opcode_tid ot;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h b/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
index 3297ce025e8b..1b9afb192f7f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
@@ -41,6 +41,14 @@
#define TCB_SMAC_SEL_V(x) ((x) << TCB_SMAC_SEL_S)
#define TCB_T_FLAGS_W 1
+#define TCB_T_FLAGS_S 0
+#define TCB_T_FLAGS_M 0xffffffffffffffffULL
+#define TCB_T_FLAGS_V(x) ((__u64)(x) << TCB_T_FLAGS_S)
+
+#define TCB_RQ_START_W 30
+#define TCB_RQ_START_S 0
+#define TCB_RQ_START_M 0x3ffffffULL
+#define TCB_RQ_START_V(x) ((x) << TCB_RQ_START_S)
#define TF_CCTRL_ECE_S 60
#define TF_CCTRL_CWR_S 61
@@ -66,4 +74,8 @@
#define TCB_RX_FRAG3_LEN_RAW_W 29
#define TCB_RX_FRAG3_START_IDX_OFFSET_RAW_W 30
#define TCB_PDU_HDR_LEN_W 31
+
+#define TF_RX_PDU_OUT_S 49
+#define TF_RX_PDU_OUT_V(x) ((__u64)(x) << TF_RX_PDU_OUT_S)
+
#endif /* __T4_TCB_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 8391dde869a7..70cc906a102b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -465,20 +465,17 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev)
void *set_hca_cap;
void *set_ctx;
int set_sz;
+ bool do_set = false;
int err;
- if (!MLX5_CAP_GEN(dev, pg))
+ if (!IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) ||
+ !MLX5_CAP_GEN(dev, pg))
return 0;
err = mlx5_core_get_caps(dev, MLX5_CAP_ODP);
if (err)
return err;
- if (!(MLX5_CAP_ODP_MAX(dev, ud_odp_caps.srq_receive) ||
- MLX5_CAP_ODP_MAX(dev, rc_odp_caps.srq_receive) ||
- MLX5_CAP_ODP_MAX(dev, xrc_odp_caps.srq_receive)))
- return 0;
-
set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
set_ctx = kzalloc(set_sz, GFP_KERNEL);
if (!set_ctx)
@@ -488,19 +485,30 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev)
memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_ODP],
MLX5_ST_SZ_BYTES(odp_cap));
- /* set ODP SRQ support for RC/UD and XRC transports */
- MLX5_SET(odp_cap, set_hca_cap, ud_odp_caps.srq_receive,
- MLX5_CAP_ODP_MAX(dev, ud_odp_caps.srq_receive));
-
- MLX5_SET(odp_cap, set_hca_cap, rc_odp_caps.srq_receive,
- MLX5_CAP_ODP_MAX(dev, rc_odp_caps.srq_receive));
-
- MLX5_SET(odp_cap, set_hca_cap, xrc_odp_caps.srq_receive,
- MLX5_CAP_ODP_MAX(dev, xrc_odp_caps.srq_receive));
-
- err = set_caps(dev, set_ctx, set_sz, MLX5_SET_HCA_CAP_OP_MOD_ODP);
+#define ODP_CAP_SET_MAX(dev, field) \
+ do { \
+ u32 _res = MLX5_CAP_ODP_MAX(dev, field); \
+ if (_res) { \
+ do_set = true; \
+ MLX5_SET(odp_cap, set_hca_cap, field, _res); \
+ } \
+ } while (0)
+
+ ODP_CAP_SET_MAX(dev, ud_odp_caps.srq_receive);
+ ODP_CAP_SET_MAX(dev, rc_odp_caps.srq_receive);
+ ODP_CAP_SET_MAX(dev, xrc_odp_caps.srq_receive);
+ ODP_CAP_SET_MAX(dev, xrc_odp_caps.send);
+ ODP_CAP_SET_MAX(dev, xrc_odp_caps.receive);
+ ODP_CAP_SET_MAX(dev, xrc_odp_caps.write);
+ ODP_CAP_SET_MAX(dev, xrc_odp_caps.read);
+ ODP_CAP_SET_MAX(dev, xrc_odp_caps.atomic);
+
+ if (do_set)
+ err = set_caps(dev, set_ctx, set_sz,
+ MLX5_SET_HCA_CAP_OP_MOD_ODP);
kfree(set_ctx);
+
return err;
}
@@ -577,6 +585,33 @@ query_ex:
return err;
}
+static int set_hca_cap(struct mlx5_core_dev *dev)
+{
+ struct pci_dev *pdev = dev->pdev;
+ int err;
+
+ err = handle_hca_cap(dev);
+ if (err) {
+ dev_err(&pdev->dev, "handle_hca_cap failed\n");
+ goto out;
+ }
+
+ err = handle_hca_cap_atomic(dev);
+ if (err) {
+ dev_err(&pdev->dev, "handle_hca_cap_atomic failed\n");
+ goto out;
+ }
+
+ err = handle_hca_cap_odp(dev);
+ if (err) {
+ dev_err(&pdev->dev, "handle_hca_cap_odp failed\n");
+ goto out;
+ }
+
+out:
+ return err;
+}
+
static int set_hca_ctrl(struct mlx5_core_dev *dev)
{
struct mlx5_reg_host_endianness he_in;
@@ -969,21 +1004,9 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto reclaim_boot_pages;
}
- err = handle_hca_cap(dev);
+ err = set_hca_cap(dev);
if (err) {
- dev_err(&pdev->dev, "handle_hca_cap failed\n");
- goto reclaim_boot_pages;
- }
-
- err = handle_hca_cap_atomic(dev);
- if (err) {
- dev_err(&pdev->dev, "handle_hca_cap_atomic failed\n");
- goto reclaim_boot_pages;
- }
-
- err = handle_hca_cap_odp(dev);
- if (err) {
- dev_err(&pdev->dev, "handle_hca_cap_odp failed\n");
+ dev_err(&pdev->dev, "set_hca_cap failed\n");
goto reclaim_boot_pages;
}