From c536277e0db1ad2e9fbb9dfd940c3565a14d9c52 Mon Sep 17 00:00:00 2001 From: Bernard Metzler Date: Thu, 22 Aug 2019 19:37:38 +0200 Subject: RDMA/siw: Fix 64/32bit pointer inconsistency Fixes improper casting between addresses and unsigned types. Changes siw_pbl_get_buffer() function to return appropriate dma_addr_t, and not u64. Also fixes debug prints. Now any potentially kernel private pointers are printed formatted as '%pK', to allow keeping that information secret. Fixes: d941bfe500be ("RDMA/siw: Change CQ flags from 64->32 bits") Fixes: b0fff7317bb4 ("rdma/siw: completion queue methods") Fixes: 8b6a361b8c48 ("rdma/siw: receive path") Fixes: b9be6f18cf9e ("rdma/siw: transmit path") Fixes: f29dd55b0236 ("rdma/siw: queue pair methods") Fixes: 2251334dcac9 ("rdma/siw: application buffer management") Fixes: 303ae1cdfdf7 ("rdma/siw: application interface") Fixes: 6c52fdc244b5 ("rdma/siw: connection management") Fixes: a531975279f3 ("rdma/siw: main include file") Reported-by: Geert Uytterhoeven Reported-by: Jason Gunthorpe Reported-by: Leon Romanovsky Signed-off-by: Bernard Metzler Link: https://lore.kernel.org/r/20190822173738.26817-1-bmt@zurich.ibm.com Signed-off-by: Doug Ledford --- drivers/infiniband/sw/siw/siw_verbs.c | 40 +++++++++++++++++------------------ 1 file changed, 20 insertions(+), 20 deletions(-) (limited to 'drivers/infiniband/sw/siw/siw_verbs.c') diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c index e7f3a2379d9d..da52c90e06d4 100644 --- a/drivers/infiniband/sw/siw/siw_verbs.c +++ b/drivers/infiniband/sw/siw/siw_verbs.c @@ -424,8 +424,7 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd, */ qp->srq = to_siw_srq(attrs->srq); qp->attrs.rq_size = 0; - siw_dbg(base_dev, "QP [%u]: [SRQ 0x%p] attached\n", - qp->qp_num, qp->srq); + siw_dbg(base_dev, "QP [%u]: SRQ attached\n", qp->qp_num); } else if (num_rqe) { if (qp->kernel_verbs) qp->recvq = vzalloc(num_rqe * sizeof(struct siw_rqe)); @@ -610,7 +609,7 @@ int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata) base_ucontext); struct siw_qp_attrs qp_attrs; - siw_dbg_qp(qp, "state %d, cep 0x%p\n", qp->attrs.state, qp->cep); + siw_dbg_qp(qp, "state %d\n", qp->attrs.state); /* * Mark QP as in process of destruction to prevent from @@ -662,7 +661,7 @@ static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr, void *kbuf = &sqe->sge[1]; int num_sge = core_wr->num_sge, bytes = 0; - sqe->sge[0].laddr = (u64)kbuf; + sqe->sge[0].laddr = (uintptr_t)kbuf; sqe->sge[0].lkey = 0; while (num_sge--) { @@ -825,7 +824,7 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr, break; case IB_WR_REG_MR: - sqe->base_mr = (uint64_t)reg_wr(wr)->mr; + sqe->base_mr = (uintptr_t)reg_wr(wr)->mr; sqe->rkey = reg_wr(wr)->key; sqe->access = reg_wr(wr)->access & IWARP_ACCESS_MASK; sqe->opcode = SIW_OP_REG_MR; @@ -842,8 +841,9 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr, rv = -EINVAL; break; } - siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%p\n", - sqe->opcode, sqe->flags, (void *)sqe->id); + siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%pK\n", + sqe->opcode, sqe->flags, + (void *)(uintptr_t)sqe->id); if (unlikely(rv < 0)) break; @@ -1205,8 +1205,8 @@ struct ib_mr *siw_reg_user_mr(struct ib_pd *pd, u64 start, u64 len, unsigned long mem_limit = rlimit(RLIMIT_MEMLOCK); int rv; - siw_dbg_pd(pd, "start: 0x%016llx, va: 0x%016llx, len: %llu\n", - (unsigned long long)start, (unsigned long long)rnic_va, + siw_dbg_pd(pd, "start: 0x%pK, va: 0x%pK, len: %llu\n", + (void *)(uintptr_t)start, (void *)(uintptr_t)rnic_va, (unsigned long long)len); if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) { @@ -1363,7 +1363,7 @@ int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle, struct siw_mem *mem = mr->mem; struct siw_pbl *pbl = mem->pbl; struct siw_pble *pble; - u64 pbl_size; + unsigned long pbl_size; int i, rv; if (!pbl) { @@ -1402,16 +1402,18 @@ int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle, pbl_size += sg_dma_len(slp); } siw_dbg_mem(mem, - "sge[%d], size %llu, addr 0x%016llx, total %llu\n", - i, pble->size, pble->addr, pbl_size); + "sge[%d], size %u, addr 0x%p, total %lu\n", + i, pble->size, (void *)(uintptr_t)pble->addr, + pbl_size); } rv = ib_sg_to_pages(base_mr, sl, num_sle, sg_off, siw_set_pbl_page); if (rv > 0) { mem->len = base_mr->length; mem->va = base_mr->iova; siw_dbg_mem(mem, - "%llu bytes, start 0x%016llx, %u SLE to %u entries\n", - mem->len, mem->va, num_sle, pbl->num_buf); + "%llu bytes, start 0x%pK, %u SLE to %u entries\n", + mem->len, (void *)(uintptr_t)mem->va, num_sle, + pbl->num_buf); } return rv; } @@ -1529,7 +1531,7 @@ int siw_create_srq(struct ib_srq *base_srq, } spin_lock_init(&srq->lock); - siw_dbg_pd(base_srq->pd, "[SRQ 0x%p]: success\n", srq); + siw_dbg_pd(base_srq->pd, "[SRQ]: success\n"); return 0; @@ -1650,8 +1652,7 @@ int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr, if (unlikely(!srq->kernel_verbs)) { siw_dbg_pd(base_srq->pd, - "[SRQ 0x%p]: no kernel post_recv for mapped srq\n", - srq); + "[SRQ]: no kernel post_recv for mapped srq\n"); rv = -EINVAL; goto out; } @@ -1673,8 +1674,7 @@ int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr, } if (unlikely(wr->num_sge > srq->max_sge)) { siw_dbg_pd(base_srq->pd, - "[SRQ 0x%p]: too many sge's: %d\n", srq, - wr->num_sge); + "[SRQ]: too many sge's: %d\n", wr->num_sge); rv = -EINVAL; break; } @@ -1693,7 +1693,7 @@ int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr, spin_unlock_irqrestore(&srq->lock, flags); out: if (unlikely(rv < 0)) { - siw_dbg_pd(base_srq->pd, "[SRQ 0x%p]: error %d\n", srq, rv); + siw_dbg_pd(base_srq->pd, "[SRQ]: error %d\n", rv); *bad_wr = wr; } return rv; -- cgit v1.2.3-59-g8ed1b