aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/core/mad_rmpp.c
diff options
context:
space:
mode:
authorSean Hefty <sean.hefty@intel.com>2005-10-25 10:51:39 -0700
committerRoland Dreier <rolandd@cisco.com>2005-10-25 10:51:39 -0700
commit34816ad98efe4d47ffd858a0345321f9d85d9420 (patch)
tree8a5ed6a9b80e667c4c02d9993711ced06d158555 /drivers/infiniband/core/mad_rmpp.c
parent[IB] CM: Fix initialization of QP attributes for UC QPs. (diff)
downloadlinux-dev-34816ad98efe4d47ffd858a0345321f9d85d9420.tar.xz
linux-dev-34816ad98efe4d47ffd858a0345321f9d85d9420.zip
[IB] Fix MAD layer DMA mappings to avoid touching data buffer once mapped
The MAD layer was violating the DMA API by touching data buffers used for sends after the DMA mapping was done. This causes problems on non-cache-coherent architectures, because the device doing DMA won't see updates to the payload buffers that exist only in the CPU cache. Fix this by having all MAD consumers use ib_create_send_mad() to allocate their send buffers, and moving the DMA mapping into the MAD layer so it can be done just before calling send (and after any modifications of the send buffer by the MAD layer). Tested on a non-cache-coherent PowerPC 440SPe system. Signed-off-by: Sean Hefty <sean.hefty@intel.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/core/mad_rmpp.c')
-rw-r--r--drivers/infiniband/core/mad_rmpp.c87
1 files changed, 39 insertions, 48 deletions
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index e23836d0e21b..ba112cd5f93c 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -103,12 +103,12 @@ void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent)
static int data_offset(u8 mgmt_class)
{
if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
- return offsetof(struct ib_sa_mad, data);
+ return IB_MGMT_SA_HDR;
else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
(mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
- return offsetof(struct ib_vendor_mad, data);
+ return IB_MGMT_VENDOR_HDR;
else
- return offsetof(struct ib_rmpp_mad, data);
+ return IB_MGMT_RMPP_HDR;
}
static void format_ack(struct ib_rmpp_mad *ack,
@@ -135,21 +135,18 @@ static void ack_recv(struct mad_rmpp_recv *rmpp_recv,
struct ib_mad_recv_wc *recv_wc)
{
struct ib_mad_send_buf *msg;
- struct ib_send_wr *bad_send_wr;
- int hdr_len, ret;
+ int ret;
- hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr);
msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp,
- recv_wc->wc->pkey_index, rmpp_recv->ah, 1,
- hdr_len, sizeof(struct ib_rmpp_mad) - hdr_len,
- GFP_KERNEL);
+ recv_wc->wc->pkey_index, 1, IB_MGMT_RMPP_HDR,
+ IB_MGMT_RMPP_DATA, GFP_KERNEL);
if (!msg)
return;
- format_ack((struct ib_rmpp_mad *) msg->mad,
- (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv);
- ret = ib_post_send_mad(&rmpp_recv->agent->agent, &msg->send_wr,
- &bad_send_wr);
+ format_ack(msg->mad, (struct ib_rmpp_mad *) recv_wc->recv_buf.mad,
+ rmpp_recv);
+ msg->ah = rmpp_recv->ah;
+ ret = ib_post_send_mad(msg, NULL);
if (ret)
ib_free_send_mad(msg);
}
@@ -160,30 +157,31 @@ static int alloc_response_msg(struct ib_mad_agent *agent,
{
struct ib_mad_send_buf *m;
struct ib_ah *ah;
- int hdr_len;
ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc,
recv_wc->recv_buf.grh, agent->port_num);
if (IS_ERR(ah))
return PTR_ERR(ah);
- hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr);
m = ib_create_send_mad(agent, recv_wc->wc->src_qp,
- recv_wc->wc->pkey_index, ah, 1, hdr_len,
- sizeof(struct ib_rmpp_mad) - hdr_len,
- GFP_KERNEL);
+ recv_wc->wc->pkey_index, 1,
+ IB_MGMT_RMPP_HDR, IB_MGMT_RMPP_DATA, GFP_KERNEL);
if (IS_ERR(m)) {
ib_destroy_ah(ah);
return PTR_ERR(m);
}
+ m->ah = ah;
*msg = m;
return 0;
}
-static void free_msg(struct ib_mad_send_buf *msg)
+void ib_rmpp_send_handler(struct ib_mad_send_wc *mad_send_wc)
{
- ib_destroy_ah(msg->send_wr.wr.ud.ah);
- ib_free_send_mad(msg);
+ struct ib_rmpp_mad *rmpp_mad = mad_send_wc->send_buf->mad;
+
+ if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_ACK)
+ ib_destroy_ah(mad_send_wc->send_buf->ah);
+ ib_free_send_mad(mad_send_wc->send_buf);
}
static void nack_recv(struct ib_mad_agent_private *agent,
@@ -191,14 +189,13 @@ static void nack_recv(struct ib_mad_agent_private *agent,
{
struct ib_mad_send_buf *msg;
struct ib_rmpp_mad *rmpp_mad;
- struct ib_send_wr *bad_send_wr;
int ret;
ret = alloc_response_msg(&agent->agent, recv_wc, &msg);
if (ret)
return;
- rmpp_mad = (struct ib_rmpp_mad *) msg->mad;
+ rmpp_mad = msg->mad;
memcpy(rmpp_mad, recv_wc->recv_buf.mad,
data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class));
@@ -210,9 +207,11 @@ static void nack_recv(struct ib_mad_agent_private *agent,
rmpp_mad->rmpp_hdr.seg_num = 0;
rmpp_mad->rmpp_hdr.paylen_newwin = 0;
- ret = ib_post_send_mad(&agent->agent, &msg->send_wr, &bad_send_wr);
- if (ret)
- free_msg(msg);
+ ret = ib_post_send_mad(msg, NULL);
+ if (ret) {
+ ib_destroy_ah(msg->ah);
+ ib_free_send_mad(msg);
+ }
}
static void recv_timeout_handler(void *data)
@@ -585,7 +584,7 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
int timeout;
u32 paylen;
- rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr;
+ rmpp_mad = mad_send_wr->send_buf.mad;
ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
rmpp_mad->rmpp_hdr.seg_num = cpu_to_be32(mad_send_wr->seg_num);
@@ -612,7 +611,7 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
}
/* 2 seconds for an ACK until we can find the packet lifetime */
- timeout = mad_send_wr->send_wr.wr.ud.timeout_ms;
+ timeout = mad_send_wr->send_buf.timeout_ms;
if (!timeout || timeout > 2000)
mad_send_wr->timeout = msecs_to_jiffies(2000);
mad_send_wr->seg_num++;
@@ -640,7 +639,7 @@ static void abort_send(struct ib_mad_agent_private *agent, __be64 tid,
wc.status = IB_WC_REM_ABORT_ERR;
wc.vendor_err = rmpp_status;
- wc.wr_id = mad_send_wr->wr_id;
+ wc.send_buf = &mad_send_wr->send_buf;
ib_mad_complete_send_wr(mad_send_wr, &wc);
return;
out:
@@ -694,12 +693,12 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
if (seg_num > mad_send_wr->last_ack) {
mad_send_wr->last_ack = seg_num;
- mad_send_wr->retries = mad_send_wr->send_wr.wr.ud.retries;
+ mad_send_wr->retries = mad_send_wr->send_buf.retries;
}
mad_send_wr->newwin = newwin;
if (mad_send_wr->last_ack == mad_send_wr->total_seg) {
/* If no response is expected, the ACK completes the send */
- if (!mad_send_wr->send_wr.wr.ud.timeout_ms) {
+ if (!mad_send_wr->send_buf.timeout_ms) {
struct ib_mad_send_wc wc;
ib_mark_mad_done(mad_send_wr);
@@ -707,13 +706,13 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
wc.status = IB_WC_SUCCESS;
wc.vendor_err = 0;
- wc.wr_id = mad_send_wr->wr_id;
+ wc.send_buf = &mad_send_wr->send_buf;
ib_mad_complete_send_wr(mad_send_wr, &wc);
return;
}
if (mad_send_wr->refcount == 1)
- ib_reset_mad_timeout(mad_send_wr, mad_send_wr->
- send_wr.wr.ud.timeout_ms);
+ ib_reset_mad_timeout(mad_send_wr,
+ mad_send_wr->send_buf.timeout_ms);
} else if (mad_send_wr->refcount == 1 &&
mad_send_wr->seg_num < mad_send_wr->newwin &&
mad_send_wr->seg_num <= mad_send_wr->total_seg) {
@@ -842,7 +841,7 @@ int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr)
struct ib_rmpp_mad *rmpp_mad;
int i, total_len, ret;
- rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr;
+ rmpp_mad = mad_send_wr->send_buf.mad;
if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
IB_MGMT_RMPP_FLAG_ACTIVE))
return IB_RMPP_RESULT_UNHANDLED;
@@ -863,7 +862,7 @@ int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr)
mad_send_wr->total_seg = (total_len - mad_send_wr->data_offset) /
(sizeof(struct ib_rmpp_mad) - mad_send_wr->data_offset);
- mad_send_wr->pad = total_len - offsetof(struct ib_rmpp_mad, data) -
+ mad_send_wr->pad = total_len - IB_MGMT_RMPP_HDR -
be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
/* We need to wait for the final ACK even if there isn't a response */
@@ -878,23 +877,15 @@ int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr,
struct ib_mad_send_wc *mad_send_wc)
{
struct ib_rmpp_mad *rmpp_mad;
- struct ib_mad_send_buf *msg;
int ret;
- rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr;
+ rmpp_mad = mad_send_wr->send_buf.mad;
if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
IB_MGMT_RMPP_FLAG_ACTIVE))
return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */
- if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) {
- msg = (struct ib_mad_send_buf *) (unsigned long)
- mad_send_wc->wr_id;
- if (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_ACK)
- ib_free_send_mad(msg);
- else
- free_msg(msg);
+ if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA)
return IB_RMPP_RESULT_INTERNAL; /* ACK, STOP, or ABORT */
- }
if (mad_send_wc->status != IB_WC_SUCCESS ||
mad_send_wr->status != IB_WC_SUCCESS)
@@ -905,7 +896,7 @@ int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr,
if (mad_send_wr->last_ack == mad_send_wr->total_seg) {
mad_send_wr->timeout =
- msecs_to_jiffies(mad_send_wr->send_wr.wr.ud.timeout_ms);
+ msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
return IB_RMPP_RESULT_PROCESSED; /* Send done */
}
@@ -926,7 +917,7 @@ int ib_retry_rmpp(struct ib_mad_send_wr_private *mad_send_wr)
struct ib_rmpp_mad *rmpp_mad;
int ret;
- rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr;
+ rmpp_mad = mad_send_wr->send_buf.mad;
if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
IB_MGMT_RMPP_FLAG_ACTIVE))
return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */