aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2006-11-22 14:54:01 +0000
committerDavid Howells <dhowells@redhat.com>2006-11-22 14:54:01 +0000
commit52bad64d95bd89e08c49ec5a071fa6dcbe5a1a9c (patch)
tree5849b4e3c17daa70a7e81cfdeaddac9ac8a0e953 /net
parentMerge branch 'merge' of master.kernel.org:/pub/scm/linux/kernel/git/paulus/powerpc (diff)
downloadlinux-dev-52bad64d95bd89e08c49ec5a071fa6dcbe5a1a9c.tar.xz
linux-dev-52bad64d95bd89e08c49ec5a071fa6dcbe5a1a9c.zip
WorkStruct: Separate delayable and non-delayable events.
Separate delayable work items from non-delayable work items be splitting them into a separate structure (delayed_work), which incorporates a work_struct and the timer_list removed from work_struct. The work_struct struct is huge, and this limits it's usefulness. On a 64-bit architecture it's nearly 100 bytes in size. This reduces that by half for the non-delayable type of event. Signed-Off-By: David Howells <dhowells@redhat.com>
Diffstat (limited to 'net')
-rw-r--r--net/core/link_watch.c9
-rw-r--r--net/sunrpc/cache.c4
-rw-r--r--net/sunrpc/rpc_pipe.c3
-rw-r--r--net/sunrpc/xprtsock.c6
4 files changed, 11 insertions, 11 deletions
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index 4b36114744c5..f2ed09e25dfd 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -35,7 +35,7 @@ static unsigned long linkwatch_flags;
static unsigned long linkwatch_nextevent;
static void linkwatch_event(void *dummy);
-static DECLARE_WORK(linkwatch_work, linkwatch_event, NULL);
+static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event, NULL);
static LIST_HEAD(lweventlist);
static DEFINE_SPINLOCK(lweventlist_lock);
@@ -171,10 +171,9 @@ void linkwatch_fire_event(struct net_device *dev)
unsigned long delay = linkwatch_nextevent - jiffies;
/* If we wrap around we'll delay it by at most HZ. */
- if (!delay || delay > HZ)
- schedule_work(&linkwatch_work);
- else
- schedule_delayed_work(&linkwatch_work, delay);
+ if (delay > HZ)
+ delay = 0;
+ schedule_delayed_work(&linkwatch_work, delay);
}
}
}
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 00cb388ece03..d5725cb1491e 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -285,7 +285,7 @@ static struct file_operations content_file_operations;
static struct file_operations cache_flush_operations;
static void do_cache_clean(void *data);
-static DECLARE_WORK(cache_cleaner, do_cache_clean, NULL);
+static DECLARE_DELAYED_WORK(cache_cleaner, do_cache_clean, NULL);
void cache_register(struct cache_detail *cd)
{
@@ -337,7 +337,7 @@ void cache_register(struct cache_detail *cd)
spin_unlock(&cache_list_lock);
/* start the cleaning process */
- schedule_work(&cache_cleaner);
+ schedule_delayed_work(&cache_cleaner, 0);
}
int cache_unregister(struct cache_detail *cd)
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 9a0b41a97f90..97be3f7fed44 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -837,7 +837,8 @@ init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
INIT_LIST_HEAD(&rpci->pipe);
rpci->pipelen = 0;
init_waitqueue_head(&rpci->waitq);
- INIT_WORK(&rpci->queue_timeout, rpc_timeout_upcall_queue, rpci);
+ INIT_DELAYED_WORK(&rpci->queue_timeout,
+ rpc_timeout_upcall_queue, rpci);
rpci->ops = NULL;
}
}
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 757fc91ef25d..3c7532cd009e 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1262,7 +1262,7 @@ static void xs_connect(struct rpc_task *task)
xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO;
} else {
dprintk("RPC: xs_connect scheduled xprt %p\n", xprt);
- schedule_work(&xprt->connect_worker);
+ schedule_delayed_work(&xprt->connect_worker, 0);
/* flush_scheduled_work can sleep... */
if (!RPC_IS_ASYNC(task))
@@ -1375,7 +1375,7 @@ int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to)
/* XXX: header size can vary due to auth type, IPv6, etc. */
xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
- INIT_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt);
+ INIT_DELAYED_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt);
xprt->bind_timeout = XS_BIND_TO;
xprt->connect_timeout = XS_UDP_CONN_TO;
xprt->reestablish_timeout = XS_UDP_REEST_TO;
@@ -1420,7 +1420,7 @@ int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to)
xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
- INIT_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt);
+ INIT_DELAYED_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt);
xprt->bind_timeout = XS_BIND_TO;
xprt->connect_timeout = XS_TCP_CONN_TO;
xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;