aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfs
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2006-03-20 13:44:37 -0500
committerTrond Myklebust <Trond.Myklebust@netapp.com>2006-03-20 13:44:37 -0500
commit3feb2d49394b7874348a6e43c076b780c1d222c5 (patch)
tree6d27d4170fd9435574bd804033793f9d0d69a355 /fs/nfs
parentNFS: Debugging code for nfs_direct_(read|write)_schedule() (diff)
downloadlinux-dev-3feb2d49394b7874348a6e43c076b780c1d222c5.tar.xz
linux-dev-3feb2d49394b7874348a6e43c076b780c1d222c5.zip
NFS: Uninline nfs_writedata_(alloc|free) and nfs_readdata_(alloc|free)
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs/nfs')
-rw-r--r--fs/nfs/read.c32
-rw-r--r--fs/nfs/write.c32
2 files changed, 62 insertions, 2 deletions
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 2da255f0247f..3961524fd4ab 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -40,10 +40,40 @@ static const struct rpc_call_ops nfs_read_partial_ops;
static const struct rpc_call_ops nfs_read_full_ops;
static kmem_cache_t *nfs_rdata_cachep;
-mempool_t *nfs_rdata_mempool;
+static mempool_t *nfs_rdata_mempool;
#define MIN_POOL_READ (32)
+struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount)
+{
+ struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, SLAB_NOFS);
+
+ if (p) {
+ memset(p, 0, sizeof(*p));
+ INIT_LIST_HEAD(&p->pages);
+ if (pagecount < NFS_PAGEVEC_SIZE)
+ p->pagevec = &p->page_array[0];
+ else {
+ size_t size = ++pagecount * sizeof(struct page *);
+ p->pagevec = kmalloc(size, GFP_NOFS);
+ if (p->pagevec) {
+ memset(p->pagevec, 0, size);
+ } else {
+ mempool_free(p, nfs_rdata_mempool);
+ p = NULL;
+ }
+ }
+ }
+ return p;
+}
+
+void nfs_readdata_free(struct nfs_read_data *p)
+{
+ if (p && (p->pagevec != &p->page_array[0]))
+ kfree(p->pagevec);
+ mempool_free(p, nfs_rdata_mempool);
+}
+
void nfs_readdata_release(void *data)
{
nfs_readdata_free(data);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index f7c8be0beccf..647e3217c2e1 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -86,7 +86,7 @@ static const struct rpc_call_ops nfs_write_full_ops;
static const struct rpc_call_ops nfs_commit_ops;
static kmem_cache_t *nfs_wdata_cachep;
-mempool_t *nfs_wdata_mempool;
+static mempool_t *nfs_wdata_mempool;
static mempool_t *nfs_commit_mempool;
static DECLARE_WAIT_QUEUE_HEAD(nfs_write_congestion);
@@ -119,6 +119,36 @@ void nfs_commit_free(struct nfs_write_data *p)
mempool_free(p, nfs_commit_mempool);
}
+struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount)
+{
+ struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, SLAB_NOFS);
+
+ if (p) {
+ memset(p, 0, sizeof(*p));
+ INIT_LIST_HEAD(&p->pages);
+ if (pagecount < NFS_PAGEVEC_SIZE)
+ p->pagevec = &p->page_array[0];
+ else {
+ size_t size = ++pagecount * sizeof(struct page *);
+ p->pagevec = kmalloc(size, GFP_NOFS);
+ if (p->pagevec) {
+ memset(p->pagevec, 0, size);
+ } else {
+ mempool_free(p, nfs_wdata_mempool);
+ p = NULL;
+ }
+ }
+ }
+ return p;
+}
+
+void nfs_writedata_free(struct nfs_write_data *p)
+{
+ if (p && (p->pagevec != &p->page_array[0]))
+ kfree(p->pagevec);
+ mempool_free(p, nfs_wdata_mempool);
+}
+
void nfs_writedata_release(void *wdata)
{
nfs_writedata_free(wdata);