aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx4
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/mlx4')
-rw-r--r--drivers/infiniband/hw/mlx4/ah.c8
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c10
-rw-r--r--drivers/infiniband/hw/mlx4/main.c23
-rw-r--r--drivers/infiniband/hw/mlx4/mcg.c1
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h19
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c286
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c52
7 files changed, 345 insertions, 54 deletions
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
index 538c46a73248..6dee4fdc5d67 100644
--- a/drivers/infiniband/hw/mlx4/ah.c
+++ b/drivers/infiniband/hw/mlx4/ah.c
@@ -92,12 +92,10 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd,
int ret;
memcpy(&in6, grh->dgid.raw, sizeof(in6));
- if (rdma_is_multicast_addr(&in6)) {
+ if (rdma_is_multicast_addr(&in6))
is_mcast = 1;
- rdma_get_mcast_mac(&in6, ah->av.eth.mac);
- } else {
- memcpy(ah->av.eth.mac, ah_attr->roce.dmac, ETH_ALEN);
- }
+
+ memcpy(ah->av.eth.mac, ah_attr->roce.dmac, ETH_ALEN);
ret = ib_get_cached_gid(pd->device, rdma_ah_get_port_num(ah_attr),
grh->sgid_index, &sgid, &gid_attr);
if (ret)
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index cab796341697..bf4f14a1b4fc 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -140,14 +140,18 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *cont
{
int err;
int cqe_size = dev->dev->caps.cqe_size;
+ int shift;
+ int n;
*umem = ib_umem_get(context, buf_addr, cqe * cqe_size,
IB_ACCESS_LOCAL_WRITE, 1);
if (IS_ERR(*umem))
return PTR_ERR(*umem);
- err = mlx4_mtt_init(dev->dev, ib_umem_page_count(*umem),
- (*umem)->page_shift, &buf->mtt);
+ n = ib_umem_page_count(*umem);
+ shift = mlx4_ib_umem_calc_optimal_mtt_size(*umem, 0, &n);
+ err = mlx4_mtt_init(dev->dev, n, shift, &buf->mtt);
+
if (err)
goto err_buf;
@@ -768,11 +772,13 @@ repoll:
switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
case MLX4_OPCODE_RDMA_WRITE_IMM:
wc->wc_flags |= IB_WC_WITH_IMM;
+ /* fall through */
case MLX4_OPCODE_RDMA_WRITE:
wc->opcode = IB_WC_RDMA_WRITE;
break;
case MLX4_OPCODE_SEND_IMM:
wc->wc_flags |= IB_WC_WITH_IMM;
+ /* fall through */
case MLX4_OPCODE_SEND:
case MLX4_OPCODE_SEND_INVAL:
wc->opcode = IB_WC_SEND;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index c636842c5be0..8c8a16791a3f 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -563,6 +563,9 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->max_wq_type_rq = props->max_qp;
}
+ props->cq_caps.max_cq_moderation_count = MLX4_MAX_CQ_COUNT;
+ props->cq_caps.max_cq_moderation_period = MLX4_MAX_CQ_PERIOD;
+
if (!mlx4_is_slave(dev->dev))
err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
@@ -581,6 +584,23 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
sizeof(struct mlx4_wqe_data_seg);
}
+ if (uhw->outlen >= resp.response_length + sizeof(resp.rss_caps)) {
+ resp.response_length += sizeof(resp.rss_caps);
+ if (props->rss_caps.supported_qpts) {
+ resp.rss_caps.rx_hash_function =
+ MLX4_IB_RX_HASH_FUNC_TOEPLITZ;
+ resp.rss_caps.rx_hash_fields_mask =
+ MLX4_IB_RX_HASH_SRC_IPV4 |
+ MLX4_IB_RX_HASH_DST_IPV4 |
+ MLX4_IB_RX_HASH_SRC_IPV6 |
+ MLX4_IB_RX_HASH_DST_IPV6 |
+ MLX4_IB_RX_HASH_SRC_PORT_TCP |
+ MLX4_IB_RX_HASH_DST_PORT_TCP |
+ MLX4_IB_RX_HASH_SRC_PORT_UDP |
+ MLX4_IB_RX_HASH_DST_PORT_UDP;
+ }
+ }
+
if (uhw->outlen) {
err = ib_copy_to_udata(uhw, &resp, resp.response_length);
if (err)
@@ -2733,6 +2753,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->ib_dev.get_dev_fw_str = get_fw_ver_str;
ibdev->ib_dev.disassociate_ucontext = mlx4_ib_disassociate_ucontext;
+ ibdev->ib_dev.uverbs_ex_cmd_mask |=
+ (1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ);
+
if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) &&
((mlx4_ib_port_link_layer(&ibdev->ib_dev, 1) ==
IB_LINK_LAYER_ETHERNET) ||
diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
index 70eb9f917303..81ffc007e0a1 100644
--- a/drivers/infiniband/hw/mlx4/mcg.c
+++ b/drivers/infiniband/hw/mlx4/mcg.c
@@ -944,6 +944,7 @@ int mlx4_ib_mcg_multiplex_handler(struct ib_device *ibdev, int port,
switch (sa_mad->mad_hdr.method) {
case IB_MGMT_METHOD_SET:
may_create = 1;
+ /* fall through */
case IB_SA_METHOD_DELETE:
req = kzalloc(sizeof *req, GFP_KERNEL);
if (!req)
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 1fa19820355a..e14919c15b06 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -47,6 +47,7 @@
#include <linux/mlx4/device.h>
#include <linux/mlx4/doorbell.h>
#include <linux/mlx4/qp.h>
+#include <linux/mlx4/cq.h>
#define MLX4_IB_DRV_NAME "mlx4_ib"
@@ -644,12 +645,18 @@ enum query_device_resp_mask {
QUERY_DEVICE_RESP_MASK_TIMESTAMP = 1UL << 0,
};
+struct mlx4_ib_rss_caps {
+ __u64 rx_hash_fields_mask; /* enum mlx4_rx_hash_fields */
+ __u8 rx_hash_function; /* enum mlx4_rx_hash_function_flags */
+ __u8 reserved[7];
+};
+
struct mlx4_uverbs_ex_query_device_resp {
- __u32 comp_mask;
- __u32 response_length;
- __u64 hca_core_clock_offset;
- __u32 max_inl_recv_sz;
- __u32 reserved;
+ __u32 comp_mask;
+ __u32 response_length;
+ __u64 hca_core_clock_offset;
+ __u32 max_inl_recv_sz;
+ struct mlx4_ib_rss_caps rss_caps;
};
static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
@@ -929,5 +936,7 @@ struct ib_rwq_ind_table
struct ib_rwq_ind_table_init_attr *init_attr,
struct ib_udata *udata);
int mlx4_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
+int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
+ int *num_of_mtts);
#endif /* MLX4_IB_H */
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index e6f77f63da75..4975f3e6596e 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -87,50 +87,286 @@ err_free:
return ERR_PTR(err);
}
+enum {
+ MLX4_MAX_MTT_SHIFT = 31
+};
+
+static int mlx4_ib_umem_write_mtt_block(struct mlx4_ib_dev *dev,
+ struct mlx4_mtt *mtt,
+ u64 mtt_size, u64 mtt_shift, u64 len,
+ u64 cur_start_addr, u64 *pages,
+ int *start_index, int *npages)
+{
+ u64 cur_end_addr = cur_start_addr + len;
+ u64 cur_end_addr_aligned = 0;
+ u64 mtt_entries;
+ int err = 0;
+ int k;
+
+ len += (cur_start_addr & (mtt_size - 1ULL));
+ cur_end_addr_aligned = round_up(cur_end_addr, mtt_size);
+ len += (cur_end_addr_aligned - cur_end_addr);
+ if (len & (mtt_size - 1ULL)) {
+ pr_warn("write_block: len %llx is not aligned to mtt_size %llx\n",
+ len, mtt_size);
+ return -EINVAL;
+ }
+
+ mtt_entries = (len >> mtt_shift);
+
+ /*
+ * Align the MTT start address to the mtt_size.
+ * Required to handle cases when the MR starts in the middle of an MTT
+ * record. Was not required in old code since the physical addresses
+ * provided by the dma subsystem were page aligned, which was also the
+ * MTT size.
+ */
+ cur_start_addr = round_down(cur_start_addr, mtt_size);
+ /* A new block is started ... */
+ for (k = 0; k < mtt_entries; ++k) {
+ pages[*npages] = cur_start_addr + (mtt_size * k);
+ (*npages)++;
+ /*
+ * Be friendly to mlx4_write_mtt() and pass it chunks of
+ * appropriate size.
+ */
+ if (*npages == PAGE_SIZE / sizeof(u64)) {
+ err = mlx4_write_mtt(dev->dev, mtt, *start_index,
+ *npages, pages);
+ if (err)
+ return err;
+
+ (*start_index) += *npages;
+ *npages = 0;
+ }
+ }
+
+ return 0;
+}
+
+static inline u64 alignment_of(u64 ptr)
+{
+ return ilog2(ptr & (~(ptr - 1)));
+}
+
+static int mlx4_ib_umem_calc_block_mtt(u64 next_block_start,
+ u64 current_block_end,
+ u64 block_shift)
+{
+ /* Check whether the alignment of the new block is aligned as well as
+ * the previous block.
+ * Block address must start with zeros till size of entity_size.
+ */
+ if ((next_block_start & ((1ULL << block_shift) - 1ULL)) != 0)
+ /*
+ * It is not as well aligned as the previous block-reduce the
+ * mtt size accordingly. Here we take the last right bit which
+ * is 1.
+ */
+ block_shift = alignment_of(next_block_start);
+
+ /*
+ * Check whether the alignment of the end of previous block - is it
+ * aligned as well as the start of the block
+ */
+ if (((current_block_end) & ((1ULL << block_shift) - 1ULL)) != 0)
+ /*
+ * It is not as well aligned as the start of the block -
+ * reduce the mtt size accordingly.
+ */
+ block_shift = alignment_of(current_block_end);
+
+ return block_shift;
+}
+
int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
struct ib_umem *umem)
{
u64 *pages;
- int i, k, entry;
- int n;
- int len;
+ u64 len = 0;
int err = 0;
+ u64 mtt_size;
+ u64 cur_start_addr = 0;
+ u64 mtt_shift;
+ int start_index = 0;
+ int npages = 0;
struct scatterlist *sg;
+ int i;
pages = (u64 *) __get_free_page(GFP_KERNEL);
if (!pages)
return -ENOMEM;
- i = n = 0;
+ mtt_shift = mtt->page_shift;
+ mtt_size = 1ULL << mtt_shift;
- for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
- len = sg_dma_len(sg) >> mtt->page_shift;
- for (k = 0; k < len; ++k) {
- pages[i++] = sg_dma_address(sg) +
- (k << umem->page_shift);
- /*
- * Be friendly to mlx4_write_mtt() and
- * pass it chunks of appropriate size.
- */
- if (i == PAGE_SIZE / sizeof (u64)) {
- err = mlx4_write_mtt(dev->dev, mtt, n,
- i, pages);
- if (err)
- goto out;
- n += i;
- i = 0;
- }
+ for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
+ if (cur_start_addr + len == sg_dma_address(sg)) {
+ /* still the same block */
+ len += sg_dma_len(sg);
+ continue;
}
+ /*
+ * A new block is started ...
+ * If len is malaligned, write an extra mtt entry to cover the
+ * misaligned area (round up the division)
+ */
+ err = mlx4_ib_umem_write_mtt_block(dev, mtt, mtt_size,
+ mtt_shift, len,
+ cur_start_addr,
+ pages, &start_index,
+ &npages);
+ if (err)
+ goto out;
+
+ cur_start_addr = sg_dma_address(sg);
+ len = sg_dma_len(sg);
+ }
+
+ /* Handle the last block */
+ if (len > 0) {
+ /*
+ * If len is malaligned, write an extra mtt entry to cover
+ * the misaligned area (round up the division)
+ */
+ err = mlx4_ib_umem_write_mtt_block(dev, mtt, mtt_size,
+ mtt_shift, len,
+ cur_start_addr, pages,
+ &start_index, &npages);
+ if (err)
+ goto out;
}
- if (i)
- err = mlx4_write_mtt(dev->dev, mtt, n, i, pages);
+ if (npages)
+ err = mlx4_write_mtt(dev->dev, mtt, start_index, npages, pages);
out:
free_page((unsigned long) pages);
return err;
}
+/*
+ * Calculate optimal mtt size based on contiguous pages.
+ * Function will return also the number of pages that are not aligned to the
+ * calculated mtt_size to be added to total number of pages. For that we should
+ * check the first chunk length & last chunk length and if not aligned to
+ * mtt_size we should increment the non_aligned_pages number. All chunks in the
+ * middle already handled as part of mtt shift calculation for both their start
+ * & end addresses.
+ */
+int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
+ int *num_of_mtts)
+{
+ u64 block_shift = MLX4_MAX_MTT_SHIFT;
+ u64 min_shift = umem->page_shift;
+ u64 last_block_aligned_end = 0;
+ u64 current_block_start = 0;
+ u64 first_block_start = 0;
+ u64 current_block_len = 0;
+ u64 last_block_end = 0;
+ struct scatterlist *sg;
+ u64 current_block_end;
+ u64 misalignment_bits;
+ u64 next_block_start;
+ u64 total_len = 0;
+ int i;
+
+ for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
+ /*
+ * Initialization - save the first chunk start as the
+ * current_block_start - block means contiguous pages.
+ */
+ if (current_block_len == 0 && current_block_start == 0) {
+ current_block_start = sg_dma_address(sg);
+ first_block_start = current_block_start;
+ /*
+ * Find the bits that are different between the physical
+ * address and the virtual address for the start of the
+ * MR.
+ * umem_get aligned the start_va to a page boundary.
+ * Therefore, we need to align the start va to the same
+ * boundary.
+ * misalignment_bits is needed to handle the case of a
+ * single memory region. In this case, the rest of the
+ * logic will not reduce the block size. If we use a
+ * block size which is bigger than the alignment of the
+ * misalignment bits, we might use the virtual page
+ * number instead of the physical page number, resulting
+ * in access to the wrong data.
+ */
+ misalignment_bits =
+ (start_va & (~(((u64)(BIT(umem->page_shift))) - 1ULL)))
+ ^ current_block_start;
+ block_shift = min(alignment_of(misalignment_bits),
+ block_shift);
+ }
+
+ /*
+ * Go over the scatter entries and check if they continue the
+ * previous scatter entry.
+ */
+ next_block_start = sg_dma_address(sg);
+ current_block_end = current_block_start + current_block_len;
+ /* If we have a split (non-contig.) between two blocks */
+ if (current_block_end != next_block_start) {
+ block_shift = mlx4_ib_umem_calc_block_mtt
+ (next_block_start,
+ current_block_end,
+ block_shift);
+
+ /*
+ * If we reached the minimum shift for 4k page we stop
+ * the loop.
+ */
+ if (block_shift <= min_shift)
+ goto end;
+
+ /*
+ * If not saved yet we are in first block - we save the
+ * length of first block to calculate the
+ * non_aligned_pages number at the end.
+ */
+ total_len += current_block_len;
+
+ /* Start a new block */
+ current_block_start = next_block_start;
+ current_block_len = sg_dma_len(sg);
+ continue;
+ }
+ /* The scatter entry is another part of the current block,
+ * increase the block size.
+ * An entry in the scatter can be larger than 4k (page) as of
+ * dma mapping which merge some blocks together.
+ */
+ current_block_len += sg_dma_len(sg);
+ }
+
+ /* Account for the last block in the total len */
+ total_len += current_block_len;
+ /* Add to the first block the misalignment that it suffers from. */
+ total_len += (first_block_start & ((1ULL << block_shift) - 1ULL));
+ last_block_end = current_block_start + current_block_len;
+ last_block_aligned_end = round_up(last_block_end, 1 << block_shift);
+ total_len += (last_block_aligned_end - last_block_end);
+
+ if (total_len & ((1ULL << block_shift) - 1ULL))
+ pr_warn("misaligned total length detected (%llu, %llu)!",
+ total_len, block_shift);
+
+ *num_of_mtts = total_len >> block_shift;
+end:
+ if (block_shift < min_shift) {
+ /*
+ * If shift is less than the min we set a warning and return the
+ * min shift.
+ */
+ pr_warn("umem_calc_optimal_mtt_size - unexpected shift %lld\n", block_shift);
+
+ block_shift = min_shift;
+ }
+ return block_shift;
+}
+
struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata)
@@ -155,7 +391,7 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
}
n = ib_umem_page_count(mr->umem);
- shift = mr->umem->page_shift;
+ shift = mlx4_ib_umem_calc_optimal_mtt_size(mr->umem, start, &n);
err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length,
convert_access(access_flags), n, shift, &mr->mmr);
@@ -406,7 +642,6 @@ struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
goto err_free_mr;
mr->max_pages = max_num_sg;
-
err = mlx4_mr_enable(dev->dev, &mr->mmr);
if (err)
goto err_free_pl;
@@ -417,6 +652,7 @@ struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
return &mr->ibmr;
err_free_pl:
+ mr->ibmr.device = pd->device;
mlx4_free_priv_pages(mr);
err_free_mr:
(void) mlx4_mr_free(dev->dev, &mr->mmr);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index b6b33d99b0b4..caf490ab24c8 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -666,6 +666,19 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
return (-EOPNOTSUPP);
}
+ if (ucmd->rx_hash_fields_mask & ~(MLX4_IB_RX_HASH_SRC_IPV4 |
+ MLX4_IB_RX_HASH_DST_IPV4 |
+ MLX4_IB_RX_HASH_SRC_IPV6 |
+ MLX4_IB_RX_HASH_DST_IPV6 |
+ MLX4_IB_RX_HASH_SRC_PORT_TCP |
+ MLX4_IB_RX_HASH_DST_PORT_TCP |
+ MLX4_IB_RX_HASH_SRC_PORT_UDP |
+ MLX4_IB_RX_HASH_DST_PORT_UDP)) {
+ pr_debug("RX Hash fields_mask has unsupported mask (0x%llx)\n",
+ ucmd->rx_hash_fields_mask);
+ return (-EOPNOTSUPP);
+ }
+
if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV4) &&
(ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV4)) {
rss_ctx->flags = MLX4_RSS_IPV4;
@@ -691,11 +704,11 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
return (-EOPNOTSUPP);
}
- if (rss_ctx->flags & MLX4_RSS_IPV4) {
+ if (rss_ctx->flags & MLX4_RSS_IPV4)
rss_ctx->flags |= MLX4_RSS_UDP_IPV4;
- } else if (rss_ctx->flags & MLX4_RSS_IPV6) {
+ if (rss_ctx->flags & MLX4_RSS_IPV6)
rss_ctx->flags |= MLX4_RSS_UDP_IPV6;
- } else {
+ if (!(rss_ctx->flags & (MLX4_RSS_IPV6 | MLX4_RSS_IPV4))) {
pr_debug("RX Hash fields_mask is not supported - UDP must be set with IPv4 or IPv6\n");
return (-EOPNOTSUPP);
}
@@ -707,15 +720,14 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) &&
(ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) {
- if (rss_ctx->flags & MLX4_RSS_IPV4) {
+ if (rss_ctx->flags & MLX4_RSS_IPV4)
rss_ctx->flags |= MLX4_RSS_TCP_IPV4;
- } else if (rss_ctx->flags & MLX4_RSS_IPV6) {
+ if (rss_ctx->flags & MLX4_RSS_IPV6)
rss_ctx->flags |= MLX4_RSS_TCP_IPV6;
- } else {
+ if (!(rss_ctx->flags & (MLX4_RSS_IPV6 | MLX4_RSS_IPV4))) {
pr_debug("RX Hash fields_mask is not supported - TCP must be set with IPv4 or IPv6\n");
return (-EOPNOTSUPP);
}
-
} else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) ||
(ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) {
pr_debug("RX Hash fields_mask is not supported - both TCP SRC and DST must be set\n");
@@ -1038,6 +1050,8 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
struct mlx4_ib_create_wq wq;
} ucmd;
size_t copy_len;
+ int shift;
+ int n;
copy_len = (src == MLX4_IB_QP_SRC) ?
sizeof(struct mlx4_ib_create_qp) :
@@ -1100,8 +1114,10 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
goto err;
}
- err = mlx4_mtt_init(dev->dev, ib_umem_page_count(qp->umem),
- qp->umem->page_shift, &qp->mtt);
+ n = ib_umem_page_count(qp->umem);
+ shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
+ err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
+
if (err)
goto err_buf;
@@ -2182,11 +2198,6 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type,
context->flags = cpu_to_be32((to_mlx4_state(new_state) << 28) |
(to_mlx4_st(dev, qp->mlx4_ib_qp_type) << 16));
- if (rwq_ind_tbl) {
- fill_qp_rss_context(context, qp);
- context->flags |= cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET);
- }
-
if (!(attr_mask & IB_QP_PATH_MIG_STATE))
context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11);
else {
@@ -2216,7 +2227,7 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type,
context->mtu_msgmax = (IB_MTU_4096 << 5) |
ilog2(dev->dev->caps.max_gso_sz);
else
- context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
+ context->mtu_msgmax = (IB_MTU_4096 << 5) | 13;
} else if (attr_mask & IB_QP_PATH_MTU) {
if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) {
pr_err("path MTU (%u) is invalid\n",
@@ -2387,6 +2398,7 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type,
context->pd = cpu_to_be32(pd->pdn);
if (!rwq_ind_tbl) {
+ context->params1 = cpu_to_be32(MLX4_IB_ACK_REQ_FREQ << 28);
get_cqs(qp, src_type, &send_cq, &recv_cq);
} else { /* Set dummy CQs to be compatible with HV and PRM */
send_cq = to_mcq(rwq_ind_tbl->ind_tbl[0]->cq);
@@ -2394,7 +2406,6 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type,
}
context->cqn_send = cpu_to_be32(send_cq->mcq.cqn);
context->cqn_recv = cpu_to_be32(recv_cq->mcq.cqn);
- context->params1 = cpu_to_be32(MLX4_IB_ACK_REQ_FREQ << 28);
/* Set "fast registration enabled" for all kernel QPs */
if (!ibuobject)
@@ -2513,7 +2524,7 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type,
MLX4_IB_LINK_TYPE_ETH;
if (dev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
/* set QP to receive both tunneled & non-tunneled packets */
- if (!(context->flags & cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET)))
+ if (!rwq_ind_tbl)
context->srqn = cpu_to_be32(7 << 28);
}
}
@@ -2562,6 +2573,13 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type,
}
}
+ if (rwq_ind_tbl &&
+ cur_state == IB_QPS_RESET &&
+ new_state == IB_QPS_INIT) {
+ fill_qp_rss_context(context, qp);
+ context->flags |= cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET);
+ }
+
err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state),
to_mlx4_state(new_state), context, optpar,
sqd_event, &qp->mqp);