aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/qib
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/qib')
-rw-r--r--drivers/infiniband/hw/qib/qib.h4
-rw-r--r--drivers/infiniband/hw/qib/qib_common.h16
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c203
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c18
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c47
-rw-r--r--drivers/infiniband/hw/qib/qib_sdma.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c21
-rw-r--r--drivers/infiniband/hw/qib/qib_tx.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c17
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c7
15 files changed, 183 insertions, 170 deletions
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 3593983df7ba..61de0654820e 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -45,6 +45,7 @@
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/scatterlist.h>
+#include <linux/slab.h>
#include <linux/io.h>
#include <linux/fs.h>
#include <linux/completion.h>
@@ -326,6 +327,9 @@ struct qib_verbs_txreq {
#define QIB_DEFAULT_MTU 4096
+/* max number of IB ports supported per HCA */
+#define QIB_MAX_IB_PORTS 2
+
/*
* Possible IB config parameters for f_get/set_ib_table()
*/
diff --git a/drivers/infiniband/hw/qib/qib_common.h b/drivers/infiniband/hw/qib/qib_common.h
index b3955ed8f794..145da4040883 100644
--- a/drivers/infiniband/hw/qib/qib_common.h
+++ b/drivers/infiniband/hw/qib/qib_common.h
@@ -279,7 +279,7 @@ struct qib_base_info {
* may not be implemented; the user code must deal with this if it
* cares, or it must abort after initialization reports the difference.
*/
-#define QIB_USER_SWMINOR 10
+#define QIB_USER_SWMINOR 11
#define QIB_USER_SWVERSION ((QIB_USER_SWMAJOR << 16) | QIB_USER_SWMINOR)
@@ -302,6 +302,18 @@ struct qib_base_info {
#define QIB_KERN_SWVERSION ((QIB_KERN_TYPE << 31) | QIB_USER_SWVERSION)
/*
+ * If the unit is specified via open, HCA choice is fixed. If port is
+ * specified, it's also fixed. Otherwise we try to spread contexts
+ * across ports and HCAs, using different algorithims. WITHIN is
+ * the old default, prior to this mechanism.
+ */
+#define QIB_PORT_ALG_ACROSS 0 /* round robin contexts across HCAs, then
+ * ports; this is the default */
+#define QIB_PORT_ALG_WITHIN 1 /* use all contexts on an HCA (round robin
+ * active ports within), then next HCA */
+#define QIB_PORT_ALG_COUNT 2 /* number of algorithm choices */
+
+/*
* This structure is passed to qib_userinit() to tell the driver where
* user code buffers are, sizes, etc. The offsets and sizes of the
* fields must remain unchanged, for binary compatibility. It can
@@ -319,7 +331,7 @@ struct qib_user_info {
/* size of struct base_info to write to */
__u32 spu_base_info_size;
- __u32 _spu_unused3;
+ __u32 spu_port_alg; /* which QIB_PORT_ALG_*; unused user minor < 11 */
/*
* If two or more processes wish to share a context, each process
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index f15ce076ac49..9cd193603fb1 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -335,7 +335,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
smp_rmb(); /* prevent speculative reads of dma'ed hdrq */
}
- for (last = 0, i = 1; !last; i += !last) {
+ for (last = 0, i = 1; !last && i <= 64; i += !last) {
hdr = dd->f_get_msgheader(dd, rhf_addr);
eflags = qib_hdrget_err_flags(rhf_addr);
etype = qib_hdrget_rcv_type(rhf_addr);
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index a142a9eb5226..6b11645edf35 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -1294,128 +1294,130 @@ bail:
return ret;
}
-static inline int usable(struct qib_pportdata *ppd, int active_only)
+static inline int usable(struct qib_pportdata *ppd)
{
struct qib_devdata *dd = ppd->dd;
- u32 linkok = active_only ? QIBL_LINKACTIVE :
- (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE);
return dd && (dd->flags & QIB_PRESENT) && dd->kregbase && ppd->lid &&
- (ppd->lflags & linkok);
+ (ppd->lflags & QIBL_LINKACTIVE);
}
-static int find_free_ctxt(int unit, struct file *fp,
- const struct qib_user_info *uinfo)
+/*
+ * Select a context on the given device, either using a requested port
+ * or the port based on the context number.
+ */
+static int choose_port_ctxt(struct file *fp, struct qib_devdata *dd, u32 port,
+ const struct qib_user_info *uinfo)
{
- struct qib_devdata *dd = qib_lookup(unit);
struct qib_pportdata *ppd = NULL;
- int ret;
- u32 ctxt;
+ int ret, ctxt;
- if (!dd || (uinfo->spu_port && uinfo->spu_port > dd->num_pports)) {
- ret = -ENODEV;
- goto bail;
- }
-
- /*
- * If users requests specific port, only try that one port, else
- * select "best" port below, based on context.
- */
- if (uinfo->spu_port) {
- ppd = dd->pport + uinfo->spu_port - 1;
- if (!usable(ppd, 0)) {
+ if (port) {
+ if (!usable(dd->pport + port - 1)) {
ret = -ENETDOWN;
- goto bail;
- }
+ goto done;
+ } else
+ ppd = dd->pport + port - 1;
}
-
- for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
- if (dd->rcd[ctxt])
- continue;
- /*
- * The setting and clearing of user context rcd[x] protected
- * by the qib_mutex
- */
- if (!ppd) {
- /* choose port based on ctxt, if up, else 1st up */
- ppd = dd->pport + (ctxt % dd->num_pports);
- if (!usable(ppd, 0)) {
- int i;
- for (i = 0; i < dd->num_pports; i++) {
- ppd = dd->pport + i;
- if (usable(ppd, 0))
- break;
- }
- if (i == dd->num_pports) {
- ret = -ENETDOWN;
- goto bail;
- }
- }
+ for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts && dd->rcd[ctxt];
+ ctxt++)
+ ;
+ if (ctxt == dd->cfgctxts) {
+ ret = -EBUSY;
+ goto done;
+ }
+ if (!ppd) {
+ u32 pidx = ctxt % dd->num_pports;
+ if (usable(dd->pport + pidx))
+ ppd = dd->pport + pidx;
+ else {
+ for (pidx = 0; pidx < dd->num_pports && !ppd;
+ pidx++)
+ if (usable(dd->pport + pidx))
+ ppd = dd->pport + pidx;
}
- ret = setup_ctxt(ppd, ctxt, fp, uinfo);
- goto bail;
}
- ret = -EBUSY;
+ ret = ppd ? setup_ctxt(ppd, ctxt, fp, uinfo) : -ENETDOWN;
+done:
+ return ret;
+}
+
+static int find_free_ctxt(int unit, struct file *fp,
+ const struct qib_user_info *uinfo)
+{
+ struct qib_devdata *dd = qib_lookup(unit);
+ int ret;
+
+ if (!dd || (uinfo->spu_port && uinfo->spu_port > dd->num_pports))
+ ret = -ENODEV;
+ else
+ ret = choose_port_ctxt(fp, dd, uinfo->spu_port, uinfo);
-bail:
return ret;
}
-static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo)
+static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
+ unsigned alg)
{
- struct qib_pportdata *ppd;
- int ret = 0, devmax;
- int npresent, nup;
- int ndev;
+ struct qib_devdata *udd = NULL;
+ int ret = 0, devmax, npresent, nup, ndev, dusable = 0, i;
u32 port = uinfo->spu_port, ctxt;
devmax = qib_count_units(&npresent, &nup);
+ if (!npresent) {
+ ret = -ENXIO;
+ goto done;
+ }
+ if (nup == 0) {
+ ret = -ENETDOWN;
+ goto done;
+ }
- for (ndev = 0; ndev < devmax; ndev++) {
- struct qib_devdata *dd = qib_lookup(ndev);
-
- /* device portion of usable() */
- if (!(dd && (dd->flags & QIB_PRESENT) && dd->kregbase))
- continue;
- for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
- if (dd->rcd[ctxt])
+ if (alg == QIB_PORT_ALG_ACROSS) {
+ unsigned inuse = ~0U;
+ /* find device (with ACTIVE ports) with fewest ctxts in use */
+ for (ndev = 0; ndev < devmax; ndev++) {
+ struct qib_devdata *dd = qib_lookup(ndev);
+ unsigned cused = 0, cfree = 0;
+ if (!dd)
continue;
- if (port) {
- if (port > dd->num_pports)
- continue;
- ppd = dd->pport + port - 1;
- if (!usable(ppd, 0))
- continue;
- } else {
- /*
- * choose port based on ctxt, if up, else
- * first port that's up for multi-port HCA
- */
- ppd = dd->pport + (ctxt % dd->num_pports);
- if (!usable(ppd, 0)) {
- int j;
-
- ppd = NULL;
- for (j = 0; j < dd->num_pports &&
- !ppd; j++)
- if (usable(dd->pport + j, 0))
- ppd = dd->pport + j;
- if (!ppd)
- continue; /* to next unit */
- }
+ if (port && port <= dd->num_pports &&
+ usable(dd->pport + port - 1))
+ dusable = 1;
+ else
+ for (i = 0; i < dd->num_pports; i++)
+ if (usable(dd->pport + i))
+ dusable++;
+ if (!dusable)
+ continue;
+ for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts;
+ ctxt++)
+ if (dd->rcd[ctxt])
+ cused++;
+ else
+ cfree++;
+ if (cfree && cused < inuse) {
+ udd = dd;
+ inuse = cused;
}
- ret = setup_ctxt(ppd, ctxt, fp, uinfo);
+ }
+ if (udd) {
+ ret = choose_port_ctxt(fp, udd, port, uinfo);
goto done;
}
+ } else {
+ for (ndev = 0; ndev < devmax; ndev++) {
+ struct qib_devdata *dd = qib_lookup(ndev);
+ if (dd) {
+ ret = choose_port_ctxt(fp, dd, port, uinfo);
+ if (!ret)
+ goto done;
+ if (ret == -EBUSY)
+ dusable++;
+ }
+ }
}
-
- if (npresent) {
- if (nup == 0)
- ret = -ENETDOWN;
- else
- ret = -EBUSY;
- } else
- ret = -ENXIO;
+ ret = dusable ? -EBUSY : -ENETDOWN;
done:
return ret;
@@ -1481,7 +1483,7 @@ static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
{
int ret;
int i_minor;
- unsigned swmajor, swminor;
+ unsigned swmajor, swminor, alg = QIB_PORT_ALG_ACROSS;
/* Check to be sure we haven't already initialized this file */
if (ctxt_fp(fp)) {
@@ -1498,6 +1500,9 @@ static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
swminor = uinfo->spu_userversion & 0xffff;
+ if (swminor >= 11 && uinfo->spu_port_alg < QIB_PORT_ALG_COUNT)
+ alg = uinfo->spu_port_alg;
+
mutex_lock(&qib_mutex);
if (qib_compatible_subctxts(swmajor, swminor) &&
@@ -1514,7 +1519,7 @@ static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
if (i_minor)
ret = find_free_ctxt(i_minor - 1, fp, uinfo);
else
- ret = get_a_ctxt(fp, uinfo);
+ ret = get_a_ctxt(fp, uinfo, alg);
done_chk_sdma:
if (!ret) {
@@ -1862,7 +1867,7 @@ static int disarm_req_delay(struct qib_ctxtdata *rcd)
{
int ret = 0;
- if (!usable(rcd->ppd, 1)) {
+ if (!usable(rcd->ppd)) {
int i;
/*
* if link is down, or otherwise not usable, delay
@@ -1881,7 +1886,7 @@ static int disarm_req_delay(struct qib_ctxtdata *rcd)
set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
&rcd->user_event_mask[i]);
}
- for (i = 0; !usable(rcd->ppd, 1) && i < 300; i++)
+ for (i = 0; !usable(rcd->ppd) && i < 300; i++)
msleep(100);
ret = -ENETDOWN;
}
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index 844954bf417b..9f989c0ba9d3 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -135,8 +135,8 @@ static ssize_t driver_names_read(struct file *file, char __user *buf,
}
static const struct file_operations driver_ops[] = {
- { .read = driver_stats_read, },
- { .read = driver_names_read, },
+ { .read = driver_stats_read, .llseek = generic_file_llseek, },
+ { .read = driver_names_read, .llseek = generic_file_llseek, },
};
/* read the per-device counters */
@@ -164,8 +164,8 @@ static ssize_t dev_names_read(struct file *file, char __user *buf,
}
static const struct file_operations cntr_ops[] = {
- { .read = dev_counters_read, },
- { .read = dev_names_read, },
+ { .read = dev_counters_read, .llseek = generic_file_llseek, },
+ { .read = dev_names_read, .llseek = generic_file_llseek, },
};
/*
@@ -210,9 +210,9 @@ static ssize_t portcntrs_2_read(struct file *file, char __user *buf,
}
static const struct file_operations portcntr_ops[] = {
- { .read = portnames_read, },
- { .read = portcntrs_1_read, },
- { .read = portcntrs_2_read, },
+ { .read = portnames_read, .llseek = generic_file_llseek, },
+ { .read = portcntrs_1_read, .llseek = generic_file_llseek, },
+ { .read = portcntrs_2_read, .llseek = generic_file_llseek, },
};
/*
@@ -261,8 +261,8 @@ static ssize_t qsfp_2_read(struct file *file, char __user *buf,
}
static const struct file_operations qsfp_ops[] = {
- { .read = qsfp_1_read, },
- { .read = qsfp_2_read, },
+ { .read = qsfp_1_read, .llseek = generic_file_llseek, },
+ { .read = qsfp_2_read, .llseek = generic_file_llseek, },
};
static ssize_t flash_read(struct file *file, char __user *buf,
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 5eedf83e2c3b..584d443b5335 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -5864,7 +5864,7 @@ static void write_7322_initregs(struct qib_devdata *dd)
* Doesn't clear any of the error bits that might be set.
*/
val = TIDFLOW_ERRBITS; /* these are W1C */
- for (i = 0; i < dd->ctxtcnt; i++) {
+ for (i = 0; i < dd->cfgctxts; i++) {
int flow;
for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
@@ -7271,6 +7271,8 @@ static int serdes_7322_init(struct qib_pportdata *ppd)
ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
data = qib_read_kreg_port(ppd, krp_serdesctrl);
+ /* Turn off IB latency mode */
+ data &= ~SYM_MASK(IBSerdesCtrl_0, IB_LAT_MODE);
qib_write_kreg_port(ppd, krp_serdesctrl, data |
SYM_MASK(IBSerdesCtrl_0, RXLOSEN));
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index a873dd596e81..f1d16d3a01f6 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -93,7 +93,7 @@ unsigned long *qib_cpulist;
void qib_set_ctxtcnt(struct qib_devdata *dd)
{
if (!qib_cfgctxts)
- dd->cfgctxts = dd->ctxtcnt;
+ dd->cfgctxts = dd->first_user_ctxt + num_online_cpus();
else if (qib_cfgctxts < dd->num_pports)
dd->cfgctxts = dd->ctxtcnt;
else if (qib_cfgctxts <= dd->ctxtcnt)
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index e0f65e39076b..6c39851d2ded 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -450,7 +450,7 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
*
* Flushes both send and receive work queues.
* Returns true if last WQE event should be generated.
- * The QP s_lock should be held and interrupts disabled.
+ * The QP r_lock and s_lock should be held and interrupts disabled.
* If we are already in error state, just return.
*/
int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 40c0a373719c..a0931119bd78 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -868,7 +868,7 @@ done:
/*
* Back up requester to resend the last un-ACKed request.
- * The QP s_lock should be held and interrupts disabled.
+ * The QP r_lock and s_lock should be held and interrupts disabled.
*/
static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait)
{
@@ -911,7 +911,8 @@ static void rc_timeout(unsigned long arg)
struct qib_ibport *ibp;
unsigned long flags;
- spin_lock_irqsave(&qp->s_lock, flags);
+ spin_lock_irqsave(&qp->r_lock, flags);
+ spin_lock(&qp->s_lock);
if (qp->s_flags & QIB_S_TIMER) {
ibp = to_iport(qp->ibqp.device, qp->port_num);
ibp->n_rc_timeouts++;
@@ -920,7 +921,8 @@ static void rc_timeout(unsigned long arg)
qib_restart_rc(qp, qp->s_last_psn + 1, 1);
qib_schedule_send(qp);
}
- spin_unlock_irqrestore(&qp->s_lock, flags);
+ spin_unlock(&qp->s_lock);
+ spin_unlock_irqrestore(&qp->r_lock, flags);
}
/*
@@ -1414,10 +1416,6 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
spin_lock_irqsave(&qp->s_lock, flags);
- /* Double check we can process this now that we hold the s_lock. */
- if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
- goto ack_done;
-
/* Ignore invalid responses. */
if (qib_cmp24(psn, qp->s_next_psn) >= 0)
goto ack_done;
@@ -1661,9 +1659,6 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
ibp->n_rc_dupreq++;
spin_lock_irqsave(&qp->s_lock, flags);
- /* Double check we can process this now that we hold the s_lock. */
- if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
- goto unlock_done;
for (i = qp->r_head_ack_queue; ; i = prev) {
if (i == qp->s_tail_ack_queue)
@@ -1878,9 +1873,6 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
psn = be32_to_cpu(ohdr->bth[2]);
opcode >>= 24;
- /* Prevent simultaneous processing after APM on different CPUs */
- spin_lock(&qp->r_lock);
-
/*
* Process responses (ACKs) before anything else. Note that the
* packet sequence number will be for something in the send work
@@ -1891,14 +1883,14 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
qib_rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn,
hdrsize, pmtu, rcd);
- goto runlock;
+ return;
}
/* Compute 24 bits worth of difference. */
diff = qib_cmp24(psn, qp->r_psn);
if (unlikely(diff)) {
if (qib_rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd))
- goto runlock;
+ return;
goto send_ack;
}
@@ -2090,9 +2082,6 @@ send_last:
if (next > QIB_MAX_RDMA_ATOMIC)
next = 0;
spin_lock_irqsave(&qp->s_lock, flags);
- /* Double check we can process this while holding the s_lock. */
- if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
- goto srunlock;
if (unlikely(next == qp->s_tail_ack_queue)) {
if (!qp->s_ack_queue[next].sent)
goto nack_inv_unlck;
@@ -2146,7 +2135,7 @@ send_last:
qp->s_flags |= QIB_S_RESP_PENDING;
qib_schedule_send(qp);
- goto srunlock;
+ goto sunlock;
}
case OP(COMPARE_SWAP):
@@ -2165,9 +2154,6 @@ send_last:
if (next > QIB_MAX_RDMA_ATOMIC)
next = 0;
spin_lock_irqsave(&qp->s_lock, flags);
- /* Double check we can process this while holding the s_lock. */
- if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
- goto srunlock;
if (unlikely(next == qp->s_tail_ack_queue)) {
if (!qp->s_ack_queue[next].sent)
goto nack_inv_unlck;
@@ -2213,7 +2199,7 @@ send_last:
qp->s_flags |= QIB_S_RESP_PENDING;
qib_schedule_send(qp);
- goto srunlock;
+ goto sunlock;
}
default:
@@ -2227,7 +2213,7 @@ send_last:
/* Send an ACK if requested or required. */
if (psn & (1 << 31))
goto send_ack;
- goto runlock;
+ return;
rnr_nak:
qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
@@ -2238,7 +2224,7 @@ rnr_nak:
atomic_inc(&qp->refcount);
list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
}
- goto runlock;
+ return;
nack_op_err:
qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
@@ -2250,7 +2236,7 @@ nack_op_err:
atomic_inc(&qp->refcount);
list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
}
- goto runlock;
+ return;
nack_inv_unlck:
spin_unlock_irqrestore(&qp->s_lock, flags);
@@ -2264,7 +2250,7 @@ nack_inv:
atomic_inc(&qp->refcount);
list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
}
- goto runlock;
+ return;
nack_acc_unlck:
spin_unlock_irqrestore(&qp->s_lock, flags);
@@ -2274,13 +2260,6 @@ nack_acc:
qp->r_ack_psn = qp->r_psn;
send_ack:
qib_send_rc_ack(qp);
-runlock:
- spin_unlock(&qp->r_lock);
- return;
-
-srunlock:
- spin_unlock_irqrestore(&qp->s_lock, flags);
- spin_unlock(&qp->r_lock);
return;
sunlock:
diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c
index b8456881f7f6..cad44491320b 100644
--- a/drivers/infiniband/hw/qib/qib_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_sdma.c
@@ -656,6 +656,7 @@ unmap:
}
qp = tx->qp;
qib_put_txreq(tx);
+ spin_lock(&qp->r_lock);
spin_lock(&qp->s_lock);
if (qp->ibqp.qp_type == IB_QPT_RC) {
/* XXX what about error sending RDMA read responses? */
@@ -664,6 +665,7 @@ unmap:
} else if (qp->s_wqe)
qib_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
spin_unlock(&qp->s_lock);
+ spin_unlock(&qp->r_lock);
/* return zero to process the next send work request */
goto unlock;
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index dab4d9f4a2cc..d50a33fe8bbc 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -347,7 +347,7 @@ static struct kobj_type qib_sl2vl_ktype = {
#define QIB_DIAGC_ATTR(N) \
static struct qib_diagc_attr qib_diagc_attr_##N = { \
- .attr = { .name = __stringify(N), .mode = 0444 }, \
+ .attr = { .name = __stringify(N), .mode = 0664 }, \
.counter = offsetof(struct qib_ibport, n_##N) \
}
@@ -403,8 +403,27 @@ static ssize_t diagc_attr_show(struct kobject *kobj, struct attribute *attr,
return sprintf(buf, "%u\n", *(u32 *)((char *)qibp + dattr->counter));
}
+static ssize_t diagc_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t size)
+{
+ struct qib_diagc_attr *dattr =
+ container_of(attr, struct qib_diagc_attr, attr);
+ struct qib_pportdata *ppd =
+ container_of(kobj, struct qib_pportdata, diagc_kobj);
+ struct qib_ibport *qibp = &ppd->ibport_data;
+ char *endp;
+ long val = simple_strtol(buf, &endp, 0);
+
+ if (val < 0 || endp == buf)
+ return -EINVAL;
+
+ *(u32 *)((char *) qibp + dattr->counter) = val;
+ return size;
+}
+
static const struct sysfs_ops qib_diagc_ops = {
.show = diagc_attr_show,
+ .store = diagc_attr_store,
};
static struct kobj_type qib_diagc_ktype = {
diff --git a/drivers/infiniband/hw/qib/qib_tx.c b/drivers/infiniband/hw/qib/qib_tx.c
index af30232b6831..7f36454c225e 100644
--- a/drivers/infiniband/hw/qib/qib_tx.c
+++ b/drivers/infiniband/hw/qib/qib_tx.c
@@ -170,7 +170,7 @@ static int find_ctxt(struct qib_devdata *dd, unsigned bufn)
void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask,
unsigned cnt)
{
- struct qib_pportdata *ppd, *pppd[dd->num_pports];
+ struct qib_pportdata *ppd, *pppd[QIB_MAX_IB_PORTS];
unsigned i;
unsigned long flags;
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
index 6c7fe78cca64..b9c8b6346c1b 100644
--- a/drivers/infiniband/hw/qib/qib_uc.c
+++ b/drivers/infiniband/hw/qib/qib_uc.c
@@ -272,9 +272,6 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
opcode >>= 24;
memset(&wc, 0, sizeof wc);
- /* Prevent simultaneous processing after APM on different CPUs */
- spin_lock(&qp->r_lock);
-
/* Compare the PSN verses the expected PSN. */
if (unlikely(qib_cmp24(psn, qp->r_psn) != 0)) {
/*
@@ -534,7 +531,6 @@ rdma_last:
}
qp->r_psn++;
qp->r_state = opcode;
- spin_unlock(&qp->r_lock);
return;
rewind:
@@ -542,12 +538,10 @@ rewind:
qp->r_sge.num_sge = 0;
drop:
ibp->n_pkt_drops++;
- spin_unlock(&qp->r_lock);
return;
op_err:
qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
- spin_unlock(&qp->r_lock);
return;
sunlock:
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index c838cda73347..e1b3da2a1f85 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -535,13 +535,6 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
wc.byte_len = tlen + sizeof(struct ib_grh);
/*
- * We need to serialize getting a receive work queue entry and
- * generating a completion for it against QPs sending to this QP
- * locally.
- */
- spin_lock(&qp->r_lock);
-
- /*
* Get the next work request entry to find where to put the data.
*/
if (qp->r_flags & QIB_R_REUSE_SGE)
@@ -552,19 +545,19 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
ret = qib_get_rwqe(qp, 0);
if (ret < 0) {
qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
- goto bail_unlock;
+ return;
}
if (!ret) {
if (qp->ibqp.qp_num == 0)
ibp->n_vl15_dropped++;
- goto bail_unlock;
+ return;
}
}
/* Silently drop packets which are too big. */
if (unlikely(wc.byte_len > qp->r_len)) {
qp->r_flags |= QIB_R_REUSE_SGE;
ibp->n_pkt_drops++;
- goto bail_unlock;
+ return;
}
if (has_grh) {
qib_copy_sge(&qp->r_sge, &hdr->u.l.grh,
@@ -579,7 +572,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
qp->r_sge.sge = *qp->r_sge.sg_list++;
}
if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
- goto bail_unlock;
+ return;
wc.wr_id = qp->r_wr_id;
wc.status = IB_WC_SUCCESS;
wc.opcode = IB_WC_RECV;
@@ -601,7 +594,5 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
(ohdr->bth[0] &
cpu_to_be32(IB_BTH_SOLICITED)) != 0);
-bail_unlock:
- spin_unlock(&qp->r_lock);
bail:;
}
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index cda8f4173d23..9fab40488850 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -550,10 +550,12 @@ static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
{
struct qib_ibport *ibp = &rcd->ppd->ibport_data;
+ spin_lock(&qp->r_lock);
+
/* Check for valid receive state. */
if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
ibp->n_pkt_drops++;
- return;
+ goto unlock;
}
switch (qp->ibqp.qp_type) {
@@ -577,6 +579,9 @@ static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
default:
break;
}
+
+unlock:
+ spin_unlock(&qp->r_lock);
}
/**