diff options
author | Xiao Yang <yangx.jy@fujitsu.com> | 2022-07-05 22:52:11 +0800 |
---|---|---|
committer | Leon Romanovsky <leonro@nvidia.com> | 2022-07-18 14:36:11 +0300 |
commit | 882736fb3b55a48908134d41aceeea8b0ef3385b (patch) | |
tree | b60dbbd2180246abf6679f5646e407d96cec7cb3 /drivers/infiniband/sw | |
parent | RDMA/rxe: Fix BUG: KASAN: null-ptr-deref in rxe_qp_do_cleanup (diff) | |
download | linux-dev-882736fb3b55a48908134d41aceeea8b0ef3385b.tar.xz linux-dev-882736fb3b55a48908134d41aceeea8b0ef3385b.zip |
RDMA/rxe: Add common rxe_prepare_res()
It's redundant to prepare resources for Read and Atomic
requests by different functions. Replace them by a common
rxe_prepare_res() with different parameters. In addition,
the common rxe_prepare_res() can also be used by new Flush
and Atomic Write requests in the future.
Link: https://lore.kernel.org/r/20220705145212.12014-1-yangx.jy@fujitsu.com
Signed-off-by: Xiao Yang <yangx.jy@fujitsu.com>
Reviewed-by: Bob Pearson <rpearsonhpe@gmail.com>
Signed-off-by: Leon Romanovsky <leon@kernel.org>
Diffstat (limited to 'drivers/infiniband/sw')
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe_resp.c | 71 |
1 files changed, 32 insertions, 39 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index ccdfc1a6b659..5536582b8fe4 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -553,27 +553,48 @@ out: return rc; } -/* Guarantee atomicity of atomic operations at the machine level. */ -static DEFINE_SPINLOCK(atomic_ops_lock); - -static struct resp_res *rxe_prepare_atomic_res(struct rxe_qp *qp, - struct rxe_pkt_info *pkt) +static struct resp_res *rxe_prepare_res(struct rxe_qp *qp, + struct rxe_pkt_info *pkt, + int type) { struct resp_res *res; + u32 pkts; res = &qp->resp.resources[qp->resp.res_head]; rxe_advance_resp_resource(qp); free_rd_atomic_resource(qp, res); - res->type = RXE_ATOMIC_MASK; - res->first_psn = pkt->psn; - res->last_psn = pkt->psn; - res->cur_psn = pkt->psn; + res->type = type; res->replay = 0; + switch (type) { + case RXE_READ_MASK: + res->read.va = qp->resp.va + qp->resp.offset; + res->read.va_org = qp->resp.va + qp->resp.offset; + res->read.resid = qp->resp.resid; + res->read.length = qp->resp.resid; + res->read.rkey = qp->resp.rkey; + + pkts = max_t(u32, (reth_len(pkt) + qp->mtu - 1)/qp->mtu, 1); + res->first_psn = pkt->psn; + res->cur_psn = pkt->psn; + res->last_psn = (pkt->psn + pkts - 1) & BTH_PSN_MASK; + + res->state = rdatm_res_state_new; + break; + case RXE_ATOMIC_MASK: + res->first_psn = pkt->psn; + res->last_psn = pkt->psn; + res->cur_psn = pkt->psn; + break; + } + return res; } +/* Guarantee atomicity of atomic operations at the machine level. */ +static DEFINE_SPINLOCK(atomic_ops_lock); + static enum resp_states rxe_atomic_reply(struct rxe_qp *qp, struct rxe_pkt_info *pkt) { @@ -584,7 +605,7 @@ static enum resp_states rxe_atomic_reply(struct rxe_qp *qp, u64 value; if (!res) { - res = rxe_prepare_atomic_res(qp, pkt); + res = rxe_prepare_res(qp, pkt, RXE_ATOMIC_MASK); qp->resp.res = res; } @@ -680,34 +701,6 @@ static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp, return skb; } -static struct resp_res *rxe_prepare_read_res(struct rxe_qp *qp, - struct rxe_pkt_info *pkt) -{ - struct resp_res *res; - u32 pkts; - - res = &qp->resp.resources[qp->resp.res_head]; - rxe_advance_resp_resource(qp); - free_rd_atomic_resource(qp, res); - - res->type = RXE_READ_MASK; - res->replay = 0; - res->read.va = qp->resp.va + qp->resp.offset; - res->read.va_org = qp->resp.va + qp->resp.offset; - res->read.resid = qp->resp.resid; - res->read.length = qp->resp.resid; - res->read.rkey = qp->resp.rkey; - - pkts = max_t(u32, (reth_len(pkt) + qp->mtu - 1)/qp->mtu, 1); - res->first_psn = pkt->psn; - res->cur_psn = pkt->psn; - res->last_psn = (pkt->psn + pkts - 1) & BTH_PSN_MASK; - - res->state = rdatm_res_state_new; - - return res; -} - /** * rxe_recheck_mr - revalidate MR from rkey and get a reference * @qp: the qp @@ -778,7 +771,7 @@ static enum resp_states read_reply(struct rxe_qp *qp, struct rxe_mr *mr; if (!res) { - res = rxe_prepare_read_res(qp, req_pkt); + res = rxe_prepare_res(qp, req_pkt, RXE_READ_MASK); qp->resp.res = res; } |