aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/qib/qib_rc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/qib/qib_rc.c')
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c179
1 files changed, 46 insertions, 133 deletions
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 031433cb7206..12658e3fe154 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -38,7 +38,6 @@
/* cut down ridiculously long IB macro names */
#define OP(x) IB_OPCODE_RC_##x
-static void rc_timeout(unsigned long arg);
static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,
u32 psn, u32 pmtu)
@@ -50,19 +49,10 @@ static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,
ss->sg_list = wqe->sg_list + 1;
ss->num_sge = wqe->wr.num_sge;
ss->total_len = wqe->length;
- qib_skip_sge(ss, len, 0);
+ rvt_skip_sge(ss, len, false);
return wqe->length - len;
}
-static void start_timer(struct rvt_qp *qp)
-{
- qp->s_flags |= RVT_S_TIMER;
- qp->s_timer.function = rc_timeout;
- /* 4.096 usec. * (1 << qp->timeout) */
- qp->s_timer.expires = jiffies + qp->timeout_jiffies;
- add_timer(&qp->s_timer);
-}
-
/**
* qib_make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
* @dev: the device for this QP
@@ -144,7 +134,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp,
qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
e->sent = 1;
}
- ohdr->u.aeth = qib_compute_aeth(qp);
+ ohdr->u.aeth = rvt_compute_aeth(qp);
hwords++;
qp->s_ack_rdma_psn = e->psn;
bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK;
@@ -153,7 +143,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp,
qp->s_cur_sge = NULL;
len = 0;
qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
- ohdr->u.at.aeth = qib_compute_aeth(qp);
+ ohdr->u.at.aeth = rvt_compute_aeth(qp);
ib_u64_put(e->atomic_data, &ohdr->u.at.atomic_ack_eth);
hwords += sizeof(ohdr->u.at) / sizeof(u32);
bth2 = e->psn & QIB_PSN_MASK;
@@ -174,7 +164,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp,
if (len > pmtu)
len = pmtu;
else {
- ohdr->u.aeth = qib_compute_aeth(qp);
+ ohdr->u.aeth = rvt_compute_aeth(qp);
hwords++;
qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
e = &qp->s_ack_queue[qp->s_tail_ack_queue];
@@ -197,11 +187,11 @@ normal:
qp->s_cur_sge = NULL;
if (qp->s_nak_state)
ohdr->u.aeth =
- cpu_to_be32((qp->r_msn & QIB_MSN_MASK) |
+ cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
(qp->s_nak_state <<
- QIB_AETH_CREDIT_SHIFT));
+ IB_AETH_CREDIT_SHIFT));
else
- ohdr->u.aeth = qib_compute_aeth(qp);
+ ohdr->u.aeth = rvt_compute_aeth(qp);
hwords++;
len = 0;
bth0 = OP(ACKNOWLEDGE) << 24;
@@ -257,7 +247,7 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
goto bail;
/* We are in the error state, flush the work request. */
smp_read_barrier_depends(); /* see post_one_send() */
- if (qp->s_last == ACCESS_ONCE(qp->s_head))
+ if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
if (atomic_read(&priv->s_dma_busy)) {
@@ -303,7 +293,8 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
newreq = 0;
if (qp->s_cur == qp->s_tail) {
/* Check if send work queue is empty. */
- if (qp->s_tail == qp->s_head)
+ smp_read_barrier_depends(); /* see post_one_send() */
+ if (qp->s_tail == READ_ONCE(qp->s_head))
goto bail;
/*
* If a fence is requested, wait for previous
@@ -330,7 +321,7 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
case IB_WR_SEND_WITH_IMM:
/* If no credit, return. */
if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
- qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) {
+ rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
goto bail;
}
@@ -361,7 +352,7 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
case IB_WR_RDMA_WRITE_WITH_IMM:
/* If no credit, return. */
if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
- qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) {
+ rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
goto bail;
}
@@ -657,11 +648,11 @@ void qib_send_rc_ack(struct rvt_qp *qp)
if (qp->s_mig_state == IB_MIG_MIGRATED)
bth0 |= IB_BTH_MIG_REQ;
if (qp->r_nak_state)
- ohdr->u.aeth = cpu_to_be32((qp->r_msn & QIB_MSN_MASK) |
+ ohdr->u.aeth = cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
(qp->r_nak_state <<
- QIB_AETH_CREDIT_SHIFT));
+ IB_AETH_CREDIT_SHIFT));
else
- ohdr->u.aeth = qib_compute_aeth(qp);
+ ohdr->u.aeth = rvt_compute_aeth(qp);
lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 |
qp->remote_ah_attr.sl << 4;
hdr.lrh[0] = cpu_to_be16(lrh0);
@@ -836,7 +827,7 @@ done:
* Back up requester to resend the last un-ACKed request.
* The QP r_lock and s_lock should be held and interrupts disabled.
*/
-static void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
+void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
{
struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
struct qib_ibport *ibp;
@@ -869,46 +860,6 @@ static void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
}
/*
- * This is called from s_timer for missing responses.
- */
-static void rc_timeout(unsigned long arg)
-{
- struct rvt_qp *qp = (struct rvt_qp *)arg;
- struct qib_ibport *ibp;
- unsigned long flags;
-
- spin_lock_irqsave(&qp->r_lock, flags);
- spin_lock(&qp->s_lock);
- if (qp->s_flags & RVT_S_TIMER) {
- ibp = to_iport(qp->ibqp.device, qp->port_num);
- ibp->rvp.n_rc_timeouts++;
- qp->s_flags &= ~RVT_S_TIMER;
- del_timer(&qp->s_timer);
- qib_restart_rc(qp, qp->s_last_psn + 1, 1);
- qib_schedule_send(qp);
- }
- spin_unlock(&qp->s_lock);
- spin_unlock_irqrestore(&qp->r_lock, flags);
-}
-
-/*
- * This is called from s_timer for RNR timeouts.
- */
-void qib_rc_rnr_retry(unsigned long arg)
-{
- struct rvt_qp *qp = (struct rvt_qp *)arg;
- unsigned long flags;
-
- spin_lock_irqsave(&qp->s_lock, flags);
- if (qp->s_flags & RVT_S_WAIT_RNR) {
- qp->s_flags &= ~RVT_S_WAIT_RNR;
- del_timer(&qp->s_timer);
- qib_schedule_send(qp);
- }
- spin_unlock_irqrestore(&qp->s_lock, flags);
-}
-
-/*
* Set qp->s_sending_psn to the next PSN after the given one.
* This would be psn+1 except when RDMA reads are present.
*/
@@ -944,7 +895,7 @@ void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
u32 opcode;
u32 psn;
- if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
+ if (!(ib_rvt_state_ops[qp->state] & RVT_SEND_OR_FLUSH_OR_RECV_OK))
return;
/* Find out where the BTH is */
@@ -971,7 +922,7 @@ void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
!(qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) &&
(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
- start_timer(qp);
+ rvt_add_retry_timer(qp);
while (qp->s_last != qp->s_acked) {
u32 s_last;
@@ -1084,12 +1035,6 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
u32 ack_psn;
int diff;
- /* Remove QP from retry timer */
- if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
- qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
- del_timer(&qp->s_timer);
- }
-
/*
* Note that NAKs implicitly ACK outstanding SEND and RDMA write
* requests and implicitly NAK RDMA read and atomic requests issued
@@ -1097,7 +1042,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
* request but will include an ACK'ed request(s).
*/
ack_psn = psn;
- if (aeth >> 29)
+ if (aeth >> IB_AETH_NAK_SHIFT)
ack_psn--;
wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
ibp = to_iport(qp->ibqp.device, qp->port_num);
@@ -1177,7 +1122,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
break;
}
- switch (aeth >> 29) {
+ switch (aeth >> IB_AETH_NAK_SHIFT) {
case 0: /* ACK */
this_cpu_inc(*ibp->rvp.rc_acks);
if (qp->s_acked != qp->s_tail) {
@@ -1185,27 +1130,30 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
* We are expecting more ACKs so
* reset the retransmit timer.
*/
- start_timer(qp);
+ rvt_mod_retry_timer(qp);
/*
* We can stop resending the earlier packets and
* continue with the next packet the receiver wants.
*/
if (qib_cmp24(qp->s_psn, psn) <= 0)
reset_psn(qp, psn + 1);
- } else if (qib_cmp24(qp->s_psn, psn) <= 0) {
- qp->s_state = OP(SEND_LAST);
- qp->s_psn = psn + 1;
+ } else {
+ /* No more acks - kill all timers */
+ rvt_stop_rc_timers(qp);
+ if (qib_cmp24(qp->s_psn, psn) <= 0) {
+ qp->s_state = OP(SEND_LAST);
+ qp->s_psn = psn + 1;
+ }
}
if (qp->s_flags & RVT_S_WAIT_ACK) {
qp->s_flags &= ~RVT_S_WAIT_ACK;
qib_schedule_send(qp);
}
- qib_get_credit(qp, aeth);
+ rvt_get_credit(qp, aeth);
qp->s_rnr_retry = qp->s_rnr_retry_cnt;
qp->s_retry = qp->s_retry_cnt;
update_last_psn(qp, psn);
- ret = 1;
- goto bail;
+ return 1;
case 1: /* RNR NAK */
ibp->rvp.n_rnr_naks++;
@@ -1228,21 +1176,17 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
reset_psn(qp, psn);
qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK);
- qp->s_flags |= RVT_S_WAIT_RNR;
- qp->s_timer.function = qib_rc_rnr_retry;
- qp->s_timer.expires = jiffies + usecs_to_jiffies(
- ib_qib_rnr_table[(aeth >> QIB_AETH_CREDIT_SHIFT) &
- QIB_AETH_CREDIT_MASK]);
- add_timer(&qp->s_timer);
- goto bail;
+ rvt_stop_rc_timers(qp);
+ rvt_add_rnr_timer(qp, aeth);
+ return 0;
case 3: /* NAK */
if (qp->s_acked == qp->s_tail)
goto bail;
/* The last valid PSN is the previous PSN. */
update_last_psn(qp, psn - 1);
- switch ((aeth >> QIB_AETH_CREDIT_SHIFT) &
- QIB_AETH_CREDIT_MASK) {
+ switch ((aeth >> IB_AETH_CREDIT_SHIFT) &
+ IB_AETH_CREDIT_MASK) {
case 0: /* PSN sequence error */
ibp->rvp.n_seq_naks++;
/*
@@ -1290,6 +1234,7 @@ reserved:
}
bail:
+ rvt_stop_rc_timers(qp);
return ret;
}
@@ -1303,10 +1248,7 @@ static void rdma_seq_err(struct rvt_qp *qp, struct qib_ibport *ibp, u32 psn,
struct rvt_swqe *wqe;
/* Remove QP from retry timer */
- if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
- qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
- del_timer(&qp->s_timer);
- }
+ rvt_stop_rc_timers(qp);
wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
@@ -1390,7 +1332,7 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
/* Ignore invalid responses. */
smp_read_barrier_depends(); /* see post_one_send */
- if (qib_cmp24(psn, ACCESS_ONCE(qp->s_next_psn)) >= 0)
+ if (qib_cmp24(psn, READ_ONCE(qp->s_next_psn)) >= 0)
goto ack_done;
/* Ignore duplicate responses. */
@@ -1399,8 +1341,8 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
/* Update credits for "ghost" ACKs */
if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
aeth = be32_to_cpu(ohdr->u.aeth);
- if ((aeth >> 29) == 0)
- qib_get_credit(qp, aeth);
+ if ((aeth >> IB_AETH_NAK_SHIFT) == 0)
+ rvt_get_credit(qp, aeth);
}
goto ack_done;
}
@@ -1461,8 +1403,7 @@ read_middle:
* We got a response so update the timeout.
* 4.096 usec. * (1 << qp->timeout)
*/
- qp->s_flags |= RVT_S_TIMER;
- mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies);
+ rvt_mod_retry_timer(qp);
if (qp->s_flags & RVT_S_WAIT_ACK) {
qp->s_flags &= ~RVT_S_WAIT_ACK;
qib_schedule_send(qp);
@@ -1764,25 +1705,6 @@ send_ack:
return 0;
}
-void qib_rc_error(struct rvt_qp *qp, enum ib_wc_status err)
-{
- unsigned long flags;
- int lastwqe;
-
- spin_lock_irqsave(&qp->s_lock, flags);
- lastwqe = rvt_error_qp(qp, err);
- spin_unlock_irqrestore(&qp->s_lock, flags);
-
- if (lastwqe) {
- struct ib_event ev;
-
- ev.device = qp->ibqp.device;
- ev.element.qp = &qp->ibqp;
- ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
- qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
- }
-}
-
static inline void qib_update_ack_queue(struct rvt_qp *qp, unsigned n)
{
unsigned next;
@@ -1894,17 +1816,8 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr,
break;
}
- if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST)) {
- qp->r_flags |= RVT_R_COMM_EST;
- if (qp->ibqp.event_handler) {
- struct ib_event ev;
-
- ev.device = qp->ibqp.device;
- ev.element.qp = &qp->ibqp;
- ev.event = IB_EVENT_COMM_EST;
- qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
- }
- }
+ if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
+ rvt_comm_est(qp);
/* OK, process the packet. */
switch (opcode) {
@@ -2196,7 +2109,7 @@ rnr_nak:
return;
nack_op_err:
- qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
+ rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
qp->r_ack_psn = qp->r_psn;
/* Queue NAK for later */
@@ -2210,7 +2123,7 @@ nack_op_err:
nack_inv_unlck:
spin_unlock_irqrestore(&qp->s_lock, flags);
nack_inv:
- qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
+ rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
qp->r_nak_state = IB_NAK_INVALID_REQUEST;
qp->r_ack_psn = qp->r_psn;
/* Queue NAK for later */
@@ -2224,7 +2137,7 @@ nack_inv:
nack_acc_unlck:
spin_unlock_irqrestore(&qp->s_lock, flags);
nack_acc:
- qib_rc_error(qp, IB_WC_LOC_PROT_ERR);
+ rvt_rc_error(qp, IB_WC_LOC_PROT_ERR);
qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
qp->r_ack_psn = qp->r_psn;
send_ack: