aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/sw/rxe/rxe_req.c
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2016-09-19 13:57:26 +0200
committerDoug Ledford <dledford@redhat.com>2016-12-12 16:31:45 -0500
commita0fa72683e78979ef1123d679b1c40ae28bd9096 (patch)
tree5222fc413b272f38aa61ca3c7811c04f05444087 /drivers/infiniband/sw/rxe/rxe_req.c
parentnet/mlx5e: Add tc support for FWD rule with counter (diff)
downloadlinux-dev-a0fa72683e78979ef1123d679b1c40ae28bd9096.tar.xz
linux-dev-a0fa72683e78979ef1123d679b1c40ae28bd9096.zip
IB/rxe: avoid putting a large struct rxe_qp on stack
A race condition fix added an rxe_qp structure to the stack in order to be able to perform rollback in rxe_requester(), but the structure is large enough to trigger the warning for possible stack overflow: drivers/infiniband/sw/rxe/rxe_req.c: In function 'rxe_requester': drivers/infiniband/sw/rxe/rxe_req.c:757:1: error: the frame size of 2064 bytes is larger than 1024 bytes [-Werror=frame-larger-than=] This changes the rollback function to only save the psn inside the qp, which is the only field we access in the rollback_qp anyway. Fixes: 3050b9985024 ("IB/rxe: Fix race condition between requester and completer") Signed-off-by: Arnd Bergmann <arnd@arndb.de> Reviewed-by: Leon Romanovsky <leonro@mellanox.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'drivers/infiniband/sw/rxe/rxe_req.c')
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 832846b73ea0..205222909e53 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -548,23 +548,23 @@ static void update_wqe_psn(struct rxe_qp *qp,
static void save_state(struct rxe_send_wqe *wqe,
struct rxe_qp *qp,
struct rxe_send_wqe *rollback_wqe,
- struct rxe_qp *rollback_qp)
+ u32 *rollback_psn)
{
rollback_wqe->state = wqe->state;
rollback_wqe->first_psn = wqe->first_psn;
rollback_wqe->last_psn = wqe->last_psn;
- rollback_qp->req.psn = qp->req.psn;
+ *rollback_psn = qp->req.psn;
}
static void rollback_state(struct rxe_send_wqe *wqe,
struct rxe_qp *qp,
struct rxe_send_wqe *rollback_wqe,
- struct rxe_qp *rollback_qp)
+ u32 rollback_psn)
{
wqe->state = rollback_wqe->state;
wqe->first_psn = rollback_wqe->first_psn;
wqe->last_psn = rollback_wqe->last_psn;
- qp->req.psn = rollback_qp->req.psn;
+ qp->req.psn = rollback_psn;
}
static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
@@ -593,8 +593,8 @@ int rxe_requester(void *arg)
int mtu;
int opcode;
int ret;
- struct rxe_qp rollback_qp;
struct rxe_send_wqe rollback_wqe;
+ u32 rollback_psn;
next_wqe:
if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR))
@@ -718,7 +718,7 @@ next_wqe:
* rxe_xmit_packet().
* Otherwise, completer might initiate an unjustified retry flow.
*/
- save_state(wqe, qp, &rollback_wqe, &rollback_qp);
+ save_state(wqe, qp, &rollback_wqe, &rollback_psn);
update_wqe_state(qp, wqe, &pkt);
update_wqe_psn(qp, wqe, &pkt, payload);
ret = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, &pkt, skb);
@@ -726,7 +726,7 @@ next_wqe:
qp->need_req_skb = 1;
kfree_skb(skb);
- rollback_state(wqe, qp, &rollback_wqe, &rollback_qp);
+ rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
if (ret == -EAGAIN) {
rxe_run_task(&qp->req.task, 1);