aboutsummaryrefslogtreecommitdiffstats
path: root/include/rdma/rdmavt_qp.h
diff options
context:
space:
mode:
authorMike Marciniszyn <mike.marciniszyn@intel.com>2016-07-25 13:39:39 -0700
committerDoug Ledford <dledford@redhat.com>2016-08-02 16:00:58 -0400
commit856cc4c237add46510c8ae91764f4eda31a9e1cf (patch)
tree346dbab7f942e9afd36b7c1b2bb25451d3106e36 /include/rdma/rdmavt_qp.h
parentIB/hfi1: Fix trace message units (diff)
downloadlinux-dev-856cc4c237add46510c8ae91764f4eda31a9e1cf.tar.xz
linux-dev-856cc4c237add46510c8ae91764f4eda31a9e1cf.zip
IB/hfi1: Add the capability for reserved operations
This fix allows for support of in-kernel reserved operations without impacting the ULP user. The low level driver can register a non-zero value which will be transparently added to the send queue size and hidden from the ULP in every respect. ULP post sends will never see a full queue due to a reserved post send and reserved operations will never exceed that registered value. The s_avail will continue to track the ULP swqe availability and the difference between the reserved value and the reserved in use will track reserved availabity. Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com> Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com> Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'include/rdma/rdmavt_qp.h')
-rw-r--r--include/rdma/rdmavt_qp.h50
1 files changed, 50 insertions, 0 deletions
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
index b0ab12b30f1e..56adcfcabe0b 100644
--- a/include/rdma/rdmavt_qp.h
+++ b/include/rdma/rdmavt_qp.h
@@ -145,6 +145,11 @@
(RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND)
/*
+ * Internal send flags
+ */
+#define RVT_SEND_RESERVE_USED IB_SEND_RESERVED_START
+
+/*
* Send work request queue entry.
* The size of the sg_list is determined when the QP is created and stored
* in qp->s_max_sge.
@@ -232,6 +237,7 @@ struct rvt_ack_entry {
#define RVT_OPERATION_ATOMIC 0x00000002
#define RVT_OPERATION_ATOMIC_SGE 0x00000004
#define RVT_OPERATION_LOCAL 0x00000008
+#define RVT_OPERATION_USE_RESERVE 0x00000010
#define RVT_OPERATION_MAX (IB_WR_RESERVED10 + 1)
@@ -328,6 +334,7 @@ struct rvt_qp {
u32 s_next_psn; /* PSN for next request */
u32 s_avail; /* number of entries avail */
u32 s_ssn; /* SSN of tail entry */
+ atomic_t s_reserved_used; /* reserved entries in use */
spinlock_t s_lock ____cacheline_aligned_in_smp;
u32 s_flags;
@@ -459,6 +466,49 @@ static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n)
rq->max_sge * sizeof(struct ib_sge)) * n);
}
+/**
+ * rvt_qp_wqe_reserve - reserve operation
+ * @qp - the rvt qp
+ * @wqe - the send wqe
+ *
+ * This routine used in post send to record
+ * a wqe relative reserved operation use.
+ */
+static inline void rvt_qp_wqe_reserve(
+ struct rvt_qp *qp,
+ struct rvt_swqe *wqe)
+{
+ wqe->wr.send_flags |= RVT_SEND_RESERVE_USED;
+ atomic_inc(&qp->s_reserved_used);
+}
+
+/**
+ * rvt_qp_wqe_unreserve - clean reserved operation
+ * @qp - the rvt qp
+ * @wqe - the send wqe
+ *
+ * This decrements the reserve use count.
+ *
+ * This call MUST precede the change to
+ * s_last to insure that post send sees a stable
+ * s_avail.
+ *
+ * An smp_mp__after_atomic() is used to insure
+ * the compiler does not juggle the order of the s_last
+ * ring index and the decrementing of s_reserved_used.
+ */
+static inline void rvt_qp_wqe_unreserve(
+ struct rvt_qp *qp,
+ struct rvt_swqe *wqe)
+{
+ if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED)) {
+ wqe->wr.send_flags &= ~RVT_SEND_RESERVE_USED;
+ atomic_dec(&qp->s_reserved_used);
+ /* insure no compiler re-order up to s_last change */
+ smp_mb__after_atomic();
+ }
+}
+
extern const int ib_rvt_state_ops[];
struct rvt_dev_info;