aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/core/mad.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-08-21 13:18:24 -0700
committerTejun Heo <tj@kernel.org>2012-08-21 13:18:24 -0700
commite7c2f967445dd2041f0f8e3179cca22bb8bb7f79 (patch)
treecb6c1d3593d2497e740d313f55592f41e8ae2039 /drivers/infiniband/core/mad.c
parentworkqueue: use irqsafe timer for delayed_work (diff)
downloadlinux-dev-e7c2f967445dd2041f0f8e3179cca22bb8bb7f79.tar.xz
linux-dev-e7c2f967445dd2041f0f8e3179cca22bb8bb7f79.zip
workqueue: use mod_delayed_work() instead of __cancel + queue
Now that mod_delayed_work() is safe to call from IRQ handlers, __cancel_delayed_work() followed by queue_delayed_work() can be replaced with mod_delayed_work(). Most conversions are straight-forward except for the following. * net/core/link_watch.c: linkwatch_schedule_work() was doing a quite elaborate dancing around its delayed_work. Collapse it such that linkwatch_work is queued for immediate execution if LW_URGENT and existing timer is kept otherwise. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: Tomi Valkeinen <tomi.valkeinen@ti.com>
Diffstat (limited to 'drivers/infiniband/core/mad.c')
-rw-r--r--drivers/infiniband/core/mad.c14
1 files changed, 5 insertions, 9 deletions
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index b0d0bc8a6fb6..b5938147fc89 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -2013,13 +2013,11 @@ static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
if (time_after(mad_agent_priv->timeout,
mad_send_wr->timeout)) {
mad_agent_priv->timeout = mad_send_wr->timeout;
- __cancel_delayed_work(&mad_agent_priv->timed_work);
delay = mad_send_wr->timeout - jiffies;
if ((long)delay <= 0)
delay = 1;
- queue_delayed_work(mad_agent_priv->qp_info->
- port_priv->wq,
- &mad_agent_priv->timed_work, delay);
+ mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
+ &mad_agent_priv->timed_work, delay);
}
}
}
@@ -2052,11 +2050,9 @@ static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
list_add(&mad_send_wr->agent_list, list_item);
/* Reschedule a work item if we have a shorter timeout */
- if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) {
- __cancel_delayed_work(&mad_agent_priv->timed_work);
- queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
- &mad_agent_priv->timed_work, delay);
- }
+ if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
+ mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
+ &mad_agent_priv->timed_work, delay);
}
void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,