diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-03-09 15:53:03 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-03-09 15:53:03 -0800 |
commit | a50243b1ddcdd766d0d17fbfeeb1a22e62fdc461 (patch) | |
tree | 3dbf847105558eaac3658a46c4934df503c866a2 /drivers/infiniband/hw/bnxt_re/qplib_res.c | |
parent | Merge tag 'pci-v5.1-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci (diff) | |
parent | net/mlx5: ODP support for XRC transport is not enabled by default in FW (diff) | |
download | linux-dev-a50243b1ddcdd766d0d17fbfeeb1a22e62fdc461.tar.xz linux-dev-a50243b1ddcdd766d0d17fbfeeb1a22e62fdc461.zip |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma updates from Jason Gunthorpe:
"This has been a slightly more active cycle than normal with ongoing
core changes and quite a lot of collected driver updates.
- Various driver fixes for bnxt_re, cxgb4, hns, mlx5, pvrdma, rxe
- A new data transfer mode for HFI1 giving higher performance
- Significant functional and bug fix update to the mlx5
On-Demand-Paging MR feature
- A chip hang reset recovery system for hns
- Change mm->pinned_vm to an atomic64
- Update bnxt_re to support a new 57500 chip
- A sane netlink 'rdma link add' method for creating rxe devices and
fixing the various unregistration race conditions in rxe's
unregister flow
- Allow lookup up objects by an ID over netlink
- Various reworking of the core to driver interface:
- drivers should not assume umem SGLs are in PAGE_SIZE chunks
- ucontext is accessed via udata not other means
- start to make the core code responsible for object memory
allocation
- drivers should convert struct device to struct ib_device via a
helper
- drivers have more tools to avoid use after unregister problems"
* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (280 commits)
net/mlx5: ODP support for XRC transport is not enabled by default in FW
IB/hfi1: Close race condition on user context disable and close
RDMA/umem: Revert broken 'off by one' fix
RDMA/umem: minor bug fix in error handling path
RDMA/hns: Use GFP_ATOMIC in hns_roce_v2_modify_qp
cxgb4: kfree mhp after the debug print
IB/rdmavt: Fix concurrency panics in QP post_send and modify to error
IB/rdmavt: Fix loopback send with invalidate ordering
IB/iser: Fix dma_nents type definition
IB/mlx5: Set correct write permissions for implicit ODP MR
bnxt_re: Clean cq for kernel consumers only
RDMA/uverbs: Don't do double free of allocated PD
RDMA: Handle ucontext allocations by IB/core
RDMA/core: Fix a WARN() message
bnxt_re: fix the regression due to changes in alloc_pbl
IB/mlx4: Increase the timeout for CM cache
IB/core: Abort page fault handler silently during owning process exit
IB/mlx5: Validate correct PD before prefetch MR
IB/mlx5: Protect against prefetch of invalid MR
RDMA/uverbs: Store PR pointer before it is overwritten
...
Diffstat (limited to 'drivers/infiniband/hw/bnxt_re/qplib_res.c')
-rw-r--r-- | drivers/infiniband/hw/bnxt_re/qplib_res.c | 22 |
1 files changed, 12 insertions, 10 deletions
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c index 57d4951679cb..0bc24f934829 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c @@ -85,7 +85,7 @@ static void __free_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl, static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl, struct scatterlist *sghead, u32 pages, u32 pg_size) { - struct scatterlist *sg; + struct sg_dma_page_iter sg_iter; bool is_umem = false; int i; @@ -116,13 +116,11 @@ static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl, } else { i = 0; is_umem = true; - for_each_sg(sghead, sg, pages, i) { - pbl->pg_map_arr[i] = sg_dma_address(sg); - pbl->pg_arr[i] = sg_virt(sg); - if (!pbl->pg_arr[i]) - goto fail; - + for_each_sg_dma_page (sghead, &sg_iter, pages, 0) { + pbl->pg_map_arr[i] = sg_page_iter_dma_address(&sg_iter); + pbl->pg_arr[i] = NULL; pbl->pg_count++; + i++; } } @@ -330,13 +328,13 @@ void bnxt_qplib_free_ctx(struct pci_dev *pdev, */ int bnxt_qplib_alloc_ctx(struct pci_dev *pdev, struct bnxt_qplib_ctx *ctx, - bool virt_fn) + bool virt_fn, bool is_p5) { int i, j, k, rc = 0; int fnz_idx = -1; __le64 **pbl_ptr; - if (virt_fn) + if (virt_fn || is_p5) goto stats_alloc; /* QPC Tables */ @@ -762,7 +760,11 @@ static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev, { memset(stats, 0, sizeof(*stats)); stats->fw_id = -1; - stats->size = sizeof(struct ctx_hw_stats); + /* 128 byte aligned context memory is required only for 57500. + * However making this unconditional, it does not harm previous + * generation. + */ + stats->size = ALIGN(sizeof(struct ctx_hw_stats), 128); stats->dma = dma_alloc_coherent(&pdev->dev, stats->size, &stats->dma_map, GFP_KERNEL); if (!stats->dma) { |