diff options
Diffstat (limited to '')
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe_qp.c | 263 |
1 files changed, 116 insertions, 147 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index ab72db68b58f..c5451a4488ca 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c @@ -19,33 +19,33 @@ static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap, int has_srq) { if (cap->max_send_wr > rxe->attr.max_qp_wr) { - rxe_dbg(rxe, "invalid send wr = %u > %d\n", + rxe_dbg_dev(rxe, "invalid send wr = %u > %d\n", cap->max_send_wr, rxe->attr.max_qp_wr); goto err1; } if (cap->max_send_sge > rxe->attr.max_send_sge) { - rxe_dbg(rxe, "invalid send sge = %u > %d\n", + rxe_dbg_dev(rxe, "invalid send sge = %u > %d\n", cap->max_send_sge, rxe->attr.max_send_sge); goto err1; } if (!has_srq) { if (cap->max_recv_wr > rxe->attr.max_qp_wr) { - rxe_dbg(rxe, "invalid recv wr = %u > %d\n", + rxe_dbg_dev(rxe, "invalid recv wr = %u > %d\n", cap->max_recv_wr, rxe->attr.max_qp_wr); goto err1; } if (cap->max_recv_sge > rxe->attr.max_recv_sge) { - rxe_dbg(rxe, "invalid recv sge = %u > %d\n", + rxe_dbg_dev(rxe, "invalid recv sge = %u > %d\n", cap->max_recv_sge, rxe->attr.max_recv_sge); goto err1; } } if (cap->max_inline_data > rxe->max_inline_data) { - rxe_dbg(rxe, "invalid max inline data = %u > %d\n", + rxe_dbg_dev(rxe, "invalid max inline data = %u > %d\n", cap->max_inline_data, rxe->max_inline_data); goto err1; } @@ -73,7 +73,7 @@ int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init) } if (!init->recv_cq || !init->send_cq) { - rxe_dbg(rxe, "missing cq\n"); + rxe_dbg_dev(rxe, "missing cq\n"); goto err1; } @@ -82,14 +82,14 @@ int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init) if (init->qp_type == IB_QPT_GSI) { if (!rdma_is_port_valid(&rxe->ib_dev, port_num)) { - rxe_dbg(rxe, "invalid port = %d\n", port_num); + rxe_dbg_dev(rxe, "invalid port = %d\n", port_num); goto err1; } port = &rxe->port; if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) { - rxe_dbg(rxe, "GSI QP exists for port %d\n", port_num); + rxe_dbg_dev(rxe, "GSI QP exists for port %d\n", port_num); goto err1; } } @@ -231,8 +231,6 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, qp->req.wqe_index = queue_get_producer(qp->sq.queue, QUEUE_TYPE_FROM_CLIENT); - qp->req.state = QP_STATE_RESET; - qp->comp.state = QP_STATE_RESET; qp->req.opcode = -1; qp->comp.opcode = -1; @@ -287,7 +285,6 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp, qp->resp.opcode = OPCODE_NONE; qp->resp.msn = 0; - qp->resp.state = QP_STATE_RESET; return 0; } @@ -328,8 +325,10 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd, if (err) goto err2; + spin_lock_bh(&qp->state_lock); qp->attr.qp_state = IB_QPS_RESET; qp->valid = 1; + spin_unlock_bh(&qp->state_lock); return 0; @@ -380,30 +379,9 @@ int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init) return 0; } -/* called by the modify qp verb, this routine checks all the parameters before - * making any changes - */ int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp, struct ib_qp_attr *attr, int mask) { - enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ? - attr->cur_qp_state : qp->attr.qp_state; - enum ib_qp_state new_state = (mask & IB_QP_STATE) ? - attr->qp_state : cur_state; - - if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask)) { - rxe_dbg_qp(qp, "invalid mask or state\n"); - goto err1; - } - - if (mask & IB_QP_STATE) { - if (cur_state == IB_QPS_SQD) { - if (qp->req.state == QP_STATE_DRAIN && - new_state != IB_QPS_ERR) - goto err1; - } - } - if (mask & IB_QP_PORT) { if (!rdma_is_port_valid(&rxe->ib_dev, attr->port_num)) { rxe_dbg_qp(qp, "invalid port %d\n", attr->port_num); @@ -473,29 +451,18 @@ static void rxe_qp_reset(struct rxe_qp *qp) { /* stop tasks from running */ rxe_disable_task(&qp->resp.task); + rxe_disable_task(&qp->comp.task); + rxe_disable_task(&qp->req.task); - /* stop request/comp */ - if (qp->sq.queue) { - if (qp_type(qp) == IB_QPT_RC) - rxe_disable_task(&qp->comp.task); - rxe_disable_task(&qp->req.task); - } - - /* move qp to the reset state */ - qp->req.state = QP_STATE_RESET; - qp->comp.state = QP_STATE_RESET; - qp->resp.state = QP_STATE_RESET; + /* drain work and packet queuesc */ + rxe_requester(qp); + rxe_completer(qp); + rxe_responder(qp); - /* let state machines reset themselves drain work and packet queues - * etc. - */ - __rxe_do_task(&qp->resp.task); - - if (qp->sq.queue) { - __rxe_do_task(&qp->comp.task); - __rxe_do_task(&qp->req.task); + if (qp->rq.queue) + rxe_queue_reset(qp->rq.queue); + if (qp->sq.queue) rxe_queue_reset(qp->sq.queue); - } /* cleanup attributes */ atomic_set(&qp->ssn, 0); @@ -518,54 +485,103 @@ static void rxe_qp_reset(struct rxe_qp *qp) /* reenable tasks */ rxe_enable_task(&qp->resp.task); - - if (qp->sq.queue) { - if (qp_type(qp) == IB_QPT_RC) - rxe_enable_task(&qp->comp.task); - - rxe_enable_task(&qp->req.task); - } -} - -/* drain the send queue */ -static void rxe_qp_drain(struct rxe_qp *qp) -{ - if (qp->sq.queue) { - if (qp->req.state != QP_STATE_DRAINED) { - qp->req.state = QP_STATE_DRAIN; - if (qp_type(qp) == IB_QPT_RC) - rxe_sched_task(&qp->comp.task); - else - __rxe_do_task(&qp->comp.task); - rxe_sched_task(&qp->req.task); - } - } + rxe_enable_task(&qp->comp.task); + rxe_enable_task(&qp->req.task); } /* move the qp to the error state */ void rxe_qp_error(struct rxe_qp *qp) { - qp->req.state = QP_STATE_ERROR; - qp->resp.state = QP_STATE_ERROR; - qp->comp.state = QP_STATE_ERROR; + spin_lock_bh(&qp->state_lock); qp->attr.qp_state = IB_QPS_ERR; /* drain work and packet queues */ rxe_sched_task(&qp->resp.task); + rxe_sched_task(&qp->comp.task); + rxe_sched_task(&qp->req.task); + spin_unlock_bh(&qp->state_lock); +} - if (qp_type(qp) == IB_QPT_RC) - rxe_sched_task(&qp->comp.task); - else - __rxe_do_task(&qp->comp.task); +static void rxe_qp_sqd(struct rxe_qp *qp, struct ib_qp_attr *attr, + int mask) +{ + spin_lock_bh(&qp->state_lock); + qp->attr.sq_draining = 1; + rxe_sched_task(&qp->comp.task); rxe_sched_task(&qp->req.task); + spin_unlock_bh(&qp->state_lock); } +/* caller should hold qp->state_lock */ +static int __qp_chk_state(struct rxe_qp *qp, struct ib_qp_attr *attr, + int mask) +{ + enum ib_qp_state cur_state; + enum ib_qp_state new_state; + + cur_state = (mask & IB_QP_CUR_STATE) ? + attr->cur_qp_state : qp->attr.qp_state; + new_state = (mask & IB_QP_STATE) ? + attr->qp_state : cur_state; + + if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask)) + return -EINVAL; + + if (mask & IB_QP_STATE && cur_state == IB_QPS_SQD) { + if (qp->attr.sq_draining && new_state != IB_QPS_ERR) + return -EINVAL; + } + + return 0; +} + +static const char *const qps2str[] = { + [IB_QPS_RESET] = "RESET", + [IB_QPS_INIT] = "INIT", + [IB_QPS_RTR] = "RTR", + [IB_QPS_RTS] = "RTS", + [IB_QPS_SQD] = "SQD", + [IB_QPS_SQE] = "SQE", + [IB_QPS_ERR] = "ERR", +}; + /* called by the modify qp verb */ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, struct ib_udata *udata) { int err; + if (mask & IB_QP_CUR_STATE) + qp->attr.cur_qp_state = attr->qp_state; + + if (mask & IB_QP_STATE) { + spin_lock_bh(&qp->state_lock); + err = __qp_chk_state(qp, attr, mask); + if (!err) { + qp->attr.qp_state = attr->qp_state; + rxe_dbg_qp(qp, "state -> %s\n", + qps2str[attr->qp_state]); + } + spin_unlock_bh(&qp->state_lock); + + if (err) + return err; + + switch (attr->qp_state) { + case IB_QPS_RESET: + rxe_qp_reset(qp); + break; + case IB_QPS_SQD: + rxe_qp_sqd(qp, attr, mask); + break; + case IB_QPS_ERR: + rxe_qp_error(qp); + break; + default: + break; + } + } + if (mask & IB_QP_MAX_QP_RD_ATOMIC) { int max_rd_atomic = attr->max_rd_atomic ? roundup_pow_of_two(attr->max_rd_atomic) : 0; @@ -587,9 +603,6 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, return err; } - if (mask & IB_QP_CUR_STATE) - qp->attr.cur_qp_state = attr->qp_state; - if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY) qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify; @@ -669,50 +682,6 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, if (mask & IB_QP_DEST_QPN) qp->attr.dest_qp_num = attr->dest_qp_num; - if (mask & IB_QP_STATE) { - qp->attr.qp_state = attr->qp_state; - - switch (attr->qp_state) { - case IB_QPS_RESET: - rxe_dbg_qp(qp, "state -> RESET\n"); - rxe_qp_reset(qp); - break; - - case IB_QPS_INIT: - rxe_dbg_qp(qp, "state -> INIT\n"); - qp->req.state = QP_STATE_INIT; - qp->resp.state = QP_STATE_INIT; - qp->comp.state = QP_STATE_INIT; - break; - - case IB_QPS_RTR: - rxe_dbg_qp(qp, "state -> RTR\n"); - qp->resp.state = QP_STATE_READY; - break; - - case IB_QPS_RTS: - rxe_dbg_qp(qp, "state -> RTS\n"); - qp->req.state = QP_STATE_READY; - qp->comp.state = QP_STATE_READY; - break; - - case IB_QPS_SQD: - rxe_dbg_qp(qp, "state -> SQD\n"); - rxe_qp_drain(qp); - break; - - case IB_QPS_SQE: - rxe_dbg_qp(qp, "state -> SQE !!?\n"); - /* Not possible from modify_qp. */ - break; - - case IB_QPS_ERR: - rxe_dbg_qp(qp, "state -> ERR\n"); - rxe_qp_error(qp); - break; - } - } - return 0; } @@ -736,18 +705,15 @@ int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask) rxe_av_to_attr(&qp->pri_av, &attr->ah_attr); rxe_av_to_attr(&qp->alt_av, &attr->alt_ah_attr); - if (qp->req.state == QP_STATE_DRAIN) { - attr->sq_draining = 1; - /* applications that get this state - * typically spin on it. yield the - * processor - */ + /* Applications that get this state typically spin on it. + * Yield the processor + */ + spin_lock_bh(&qp->state_lock); + if (qp->attr.sq_draining) { + spin_unlock_bh(&qp->state_lock); cond_resched(); - } else { - attr->sq_draining = 0; } - - rxe_dbg_qp(qp, "attr->sq_draining = %d\n", attr->sq_draining); + spin_unlock_bh(&qp->state_lock); return 0; } @@ -771,26 +737,29 @@ static void rxe_qp_do_cleanup(struct work_struct *work) { struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work); + spin_lock_bh(&qp->state_lock); qp->valid = 0; + spin_unlock_bh(&qp->state_lock); qp->qp_timeout_jiffies = 0; - rxe_cleanup_task(&qp->resp.task); if (qp_type(qp) == IB_QPT_RC) { del_timer_sync(&qp->retrans_timer); del_timer_sync(&qp->rnr_nak_timer); } - rxe_cleanup_task(&qp->req.task); - rxe_cleanup_task(&qp->comp.task); + if (qp->resp.task.func) + rxe_cleanup_task(&qp->resp.task); - /* flush out any receive wr's or pending requests */ if (qp->req.task.func) - __rxe_do_task(&qp->req.task); + rxe_cleanup_task(&qp->req.task); - if (qp->sq.queue) { - __rxe_do_task(&qp->comp.task); - __rxe_do_task(&qp->req.task); - } + if (qp->comp.task.func) + rxe_cleanup_task(&qp->comp.task); + + /* flush out any receive wr's or pending requests */ + rxe_requester(qp); + rxe_completer(qp); + rxe_responder(qp); if (qp->sq.queue) rxe_queue_cleanup(qp->sq.queue); |