aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/ramster
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/ramster')
-rw-r--r--drivers/staging/ramster/Kconfig25
-rw-r--r--drivers/staging/ramster/Makefile7
-rw-r--r--drivers/staging/ramster/TODO13
-rw-r--r--drivers/staging/ramster/cluster/Makefile3
-rw-r--r--drivers/staging/ramster/ramster.h113
-rw-r--r--drivers/staging/ramster/ramster/heartbeat.c (renamed from drivers/staging/ramster/cluster/heartbeat.c)6
-rw-r--r--drivers/staging/ramster/ramster/heartbeat.h (renamed from drivers/staging/ramster/cluster/heartbeat.h)0
-rw-r--r--drivers/staging/ramster/ramster/masklog.c (renamed from drivers/staging/ramster/cluster/masklog.c)0
-rw-r--r--drivers/staging/ramster/ramster/masklog.h (renamed from drivers/staging/ramster/cluster/masklog.h)0
-rw-r--r--drivers/staging/ramster/ramster/nodemanager.c (renamed from drivers/staging/ramster/cluster/nodemanager.c)15
-rw-r--r--drivers/staging/ramster/ramster/nodemanager.h (renamed from drivers/staging/ramster/cluster/nodemanager.h)0
-rw-r--r--drivers/staging/ramster/ramster/r2net.c (renamed from drivers/staging/ramster/r2net.c)79
-rw-r--r--drivers/staging/ramster/ramster/ramster.c985
-rw-r--r--drivers/staging/ramster/ramster/ramster.h161
-rw-r--r--drivers/staging/ramster/ramster/ramster_nodemanager.h (renamed from drivers/staging/ramster/cluster/ramster_nodemanager.h)0
-rw-r--r--drivers/staging/ramster/ramster/tcp.c (renamed from drivers/staging/ramster/cluster/tcp.c)53
-rw-r--r--drivers/staging/ramster/ramster/tcp.h (renamed from drivers/staging/ramster/cluster/tcp.h)0
-rw-r--r--drivers/staging/ramster/ramster/tcp_internal.h (renamed from drivers/staging/ramster/cluster/tcp_internal.h)0
-rw-r--r--drivers/staging/ramster/tmem.c313
-rw-r--r--drivers/staging/ramster/tmem.h109
-rw-r--r--drivers/staging/ramster/xvmalloc.c509
-rw-r--r--drivers/staging/ramster/xvmalloc.h30
-rw-r--r--drivers/staging/ramster/xvmalloc_int.h95
-rw-r--r--drivers/staging/ramster/zbud.c1060
-rw-r--r--drivers/staging/ramster/zbud.h33
-rw-r--r--drivers/staging/ramster/zcache-main.c3544
-rw-r--r--drivers/staging/ramster/zcache.h55
27 files changed, 3680 insertions, 3528 deletions
diff --git a/drivers/staging/ramster/Kconfig b/drivers/staging/ramster/Kconfig
index 8349887827dc..843c54101438 100644
--- a/drivers/staging/ramster/Kconfig
+++ b/drivers/staging/ramster/Kconfig
@@ -1,13 +1,30 @@
+config ZCACHE2
+ bool "Dynamic compression of swap pages and clean pagecache pages"
+ depends on CRYPTO=y && SWAP=y && CLEANCACHE && FRONTSWAP && !ZCACHE
+ select CRYPTO_LZO
+ default n
+ help
+ Zcache2 doubles RAM efficiency while providing a significant
+ performance boosts on many workloads. Zcache2 uses
+ compression and an in-kernel implementation of transcendent
+ memory to store clean page cache pages and swap in RAM,
+ providing a noticeable reduction in disk I/O. Zcache2
+ is a complete rewrite of the older zcache; it was intended to
+ be a merge but that has been blocked due to political and
+ technical disagreements. It is intended that they will merge
+ again in the future. Until then, zcache2 is a single-node
+ version of ramster.
+
config RAMSTER
bool "Cross-machine RAM capacity sharing, aka peer-to-peer tmem"
- depends on (CLEANCACHE || FRONTSWAP) && CONFIGFS_FS=y && !ZCACHE && !XVMALLOC && !HIGHMEM && NET
- select LZO_COMPRESS
- select LZO_DECOMPRESS
+ depends on CONFIGFS_FS=y && SYSFS=y && !HIGHMEM && ZCACHE2=y
+ # must ensure struct page is 8-byte aligned
+ select HAVE_ALIGNED_STRUCT_PAGE if !64_BIT
default n
help
RAMster allows RAM on other machines in a cluster to be utilized
dynamically and symmetrically instead of swapping to a local swap
disk, thus improving performance on memory-constrained workloads
while minimizing total RAM across the cluster. RAMster, like
- zcache, compresses swap pages into local RAM, but then remotifies
+ zcache2, compresses swap pages into local RAM, but then remotifies
the compressed pages to another node in the RAMster cluster.
diff --git a/drivers/staging/ramster/Makefile b/drivers/staging/ramster/Makefile
index bcc13c87f996..2d8b9d0a6a8b 100644
--- a/drivers/staging/ramster/Makefile
+++ b/drivers/staging/ramster/Makefile
@@ -1 +1,6 @@
-obj-$(CONFIG_RAMSTER) += zcache-main.o tmem.o r2net.o xvmalloc.o cluster/
+zcache-y := zcache-main.o tmem.o zbud.o
+zcache-$(CONFIG_RAMSTER) += ramster/ramster.o ramster/r2net.o
+zcache-$(CONFIG_RAMSTER) += ramster/nodemanager.o ramster/tcp.o
+zcache-$(CONFIG_RAMSTER) += ramster/heartbeat.o ramster/masklog.o
+
+obj-$(CONFIG_ZCACHE2) += zcache.o
diff --git a/drivers/staging/ramster/TODO b/drivers/staging/ramster/TODO
deleted file mode 100644
index 46fcf0c58acf..000000000000
--- a/drivers/staging/ramster/TODO
+++ /dev/null
@@ -1,13 +0,0 @@
-For this staging driver, RAMster duplicates code from drivers/staging/zcache
-then incorporates changes to the local copy of the code. For V5, it also
-directly incorporates the soon-to-be-removed drivers/staging/zram/xvmalloc.[ch]
-as all testing has been done with xvmalloc rather than the new zsmalloc.
-Before RAMster can be promoted from staging, the zcache and RAMster drivers
-should be either merged or reorganized to separate out common code.
-
-Until V4, RAMster duplicated code from fs/ocfs2/cluster, but this made
-RAMster incompatible with ocfs2 running in the same kernel and included
-lots of code that could be removed. As of V5, the ocfs2 code has been
-mined and made RAMster-specific, made to communicate with a userland
-ramster-tools package rather than ocfs2-tools, and can co-exist with ocfs2
-both in the same kernel and in userland on the same machine.
diff --git a/drivers/staging/ramster/cluster/Makefile b/drivers/staging/ramster/cluster/Makefile
deleted file mode 100644
index 9c6943652c01..000000000000
--- a/drivers/staging/ramster/cluster/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-obj-$(CONFIG_RAMSTER) += ramster_nodemanager.o
-
-ramster_nodemanager-objs := heartbeat.o masklog.o nodemanager.o tcp.o
diff --git a/drivers/staging/ramster/ramster.h b/drivers/staging/ramster/ramster.h
index 0c9455e8dcd8..1b71aea2ff62 100644
--- a/drivers/staging/ramster/ramster.h
+++ b/drivers/staging/ramster/ramster.h
@@ -1,118 +1,59 @@
+
/*
- * ramster.h
+ * zcache/ramster.h
*
- * Peer-to-peer transcendent memory
+ * Placeholder to resolve ramster references when !CONFIG_RAMSTER
+ * Real ramster.h lives in ramster subdirectory.
*
* Copyright (c) 2009-2012, Dan Magenheimer, Oracle Corp.
*/
-#ifndef _RAMSTER_H_
-#define _RAMSTER_H_
-
-/*
- * format of remote pampd:
- * bit 0 == intransit
- * bit 1 == is_remote... if this bit is set, then
- * bit 2-9 == remotenode
- * bit 10-22 == size
- * bit 23-30 == cksum
- */
-#define FAKE_PAMPD_INTRANSIT_BITS 1
-#define FAKE_PAMPD_ISREMOTE_BITS 1
-#define FAKE_PAMPD_REMOTENODE_BITS 8
-#define FAKE_PAMPD_REMOTESIZE_BITS 13
-#define FAKE_PAMPD_CHECKSUM_BITS 8
-
-#define FAKE_PAMPD_INTRANSIT_SHIFT 0
-#define FAKE_PAMPD_ISREMOTE_SHIFT (FAKE_PAMPD_INTRANSIT_SHIFT + \
- FAKE_PAMPD_INTRANSIT_BITS)
-#define FAKE_PAMPD_REMOTENODE_SHIFT (FAKE_PAMPD_ISREMOTE_SHIFT + \
- FAKE_PAMPD_ISREMOTE_BITS)
-#define FAKE_PAMPD_REMOTESIZE_SHIFT (FAKE_PAMPD_REMOTENODE_SHIFT + \
- FAKE_PAMPD_REMOTENODE_BITS)
-#define FAKE_PAMPD_CHECKSUM_SHIFT (FAKE_PAMPD_REMOTESIZE_SHIFT + \
- FAKE_PAMPD_REMOTESIZE_BITS)
+#ifndef _ZCACHE_RAMSTER_H_
+#define _ZCACHE_RAMSTER_H_
-#define FAKE_PAMPD_MASK(x) ((1UL << (x)) - 1)
-
-static inline void *pampd_make_remote(int remotenode, size_t size,
- unsigned char cksum)
+#ifdef CONFIG_RAMSTER
+#include "ramster/ramster.h"
+#else
+static inline void ramster_init(bool x, bool y, bool z)
{
- unsigned long fake_pampd = 0;
- fake_pampd |= 1UL << FAKE_PAMPD_ISREMOTE_SHIFT;
- fake_pampd |= ((unsigned long)remotenode &
- FAKE_PAMPD_MASK(FAKE_PAMPD_REMOTENODE_BITS)) <<
- FAKE_PAMPD_REMOTENODE_SHIFT;
- fake_pampd |= ((unsigned long)size &
- FAKE_PAMPD_MASK(FAKE_PAMPD_REMOTESIZE_BITS)) <<
- FAKE_PAMPD_REMOTESIZE_SHIFT;
- fake_pampd |= ((unsigned long)cksum &
- FAKE_PAMPD_MASK(FAKE_PAMPD_CHECKSUM_BITS)) <<
- FAKE_PAMPD_CHECKSUM_SHIFT;
- return (void *)fake_pampd;
}
-static inline unsigned int pampd_remote_node(void *pampd)
+static inline void ramster_register_pamops(struct tmem_pamops *p)
{
- unsigned long fake_pampd = (unsigned long)pampd;
- return (fake_pampd >> FAKE_PAMPD_REMOTENODE_SHIFT) &
- FAKE_PAMPD_MASK(FAKE_PAMPD_REMOTENODE_BITS);
}
-static inline unsigned int pampd_remote_size(void *pampd)
+static inline int ramster_remotify_pageframe(bool b)
{
- unsigned long fake_pampd = (unsigned long)pampd;
- return (fake_pampd >> FAKE_PAMPD_REMOTESIZE_SHIFT) &
- FAKE_PAMPD_MASK(FAKE_PAMPD_REMOTESIZE_BITS);
+ return 0;
}
-static inline unsigned char pampd_remote_cksum(void *pampd)
+static inline void *ramster_pampd_free(void *v, struct tmem_pool *p,
+ struct tmem_oid *o, uint32_t u, bool b)
{
- unsigned long fake_pampd = (unsigned long)pampd;
- return (fake_pampd >> FAKE_PAMPD_CHECKSUM_SHIFT) &
- FAKE_PAMPD_MASK(FAKE_PAMPD_CHECKSUM_BITS);
+ return NULL;
}
-static inline bool pampd_is_remote(void *pampd)
+static inline int ramster_do_preload_flnode(struct tmem_pool *p)
{
- unsigned long fake_pampd = (unsigned long)pampd;
- return (fake_pampd >> FAKE_PAMPD_ISREMOTE_SHIFT) &
- FAKE_PAMPD_MASK(FAKE_PAMPD_ISREMOTE_BITS);
+ return -1;
}
-static inline bool pampd_is_intransit(void *pampd)
+static inline bool pampd_is_remote(void *v)
{
- unsigned long fake_pampd = (unsigned long)pampd;
- return (fake_pampd >> FAKE_PAMPD_INTRANSIT_SHIFT) &
- FAKE_PAMPD_MASK(FAKE_PAMPD_INTRANSIT_BITS);
+ return false;
}
-/* note that it is a BUG for intransit to be set without isremote also set */
-static inline void *pampd_mark_intransit(void *pampd)
+static inline void ramster_count_foreign_pages(bool b, int i)
{
- unsigned long fake_pampd = (unsigned long)pampd;
-
- fake_pampd |= 1UL << FAKE_PAMPD_ISREMOTE_SHIFT;
- fake_pampd |= 1UL << FAKE_PAMPD_INTRANSIT_SHIFT;
- return (void *)fake_pampd;
}
-static inline void *pampd_mask_intransit_and_remote(void *marked_pampd)
+static inline void ramster_cpu_up(int cpu)
{
- unsigned long pampd = (unsigned long)marked_pampd;
-
- pampd &= ~(1UL << FAKE_PAMPD_INTRANSIT_SHIFT);
- pampd &= ~(1UL << FAKE_PAMPD_ISREMOTE_SHIFT);
- return (void *)pampd;
}
-extern int ramster_remote_async_get(struct tmem_xhandle *,
- bool, int, size_t, uint8_t, void *extra);
-extern int ramster_remote_put(struct tmem_xhandle *, char *, size_t,
- bool, int *);
-extern int ramster_remote_flush(struct tmem_xhandle *, int);
-extern int ramster_remote_flush_object(struct tmem_xhandle *, int);
-extern int r2net_register_handlers(void);
-extern int r2net_remote_target_node_set(int);
+static inline void ramster_cpu_down(int cpu)
+{
+}
+#endif
-#endif /* _TMEM_H */
+#endif /* _ZCACHE_RAMSTER_H */
diff --git a/drivers/staging/ramster/cluster/heartbeat.c b/drivers/staging/ramster/ramster/heartbeat.c
index 00209490756e..75d3fe80b055 100644
--- a/drivers/staging/ramster/cluster/heartbeat.c
+++ b/drivers/staging/ramster/ramster/heartbeat.c
@@ -172,8 +172,7 @@ static void r2hb_hb_group_drop_item(struct config_group *group,
struct config_item *item)
{
if (r2hb_global_heartbeat_active()) {
- printk(KERN_NOTICE "ramster: Heartbeat %s "
- "on region %s (%s)\n",
+ pr_notice("ramster: Heartbeat %s on region %s (%s)\n",
"stopped/aborted", config_item_name(item),
"no region");
}
@@ -265,8 +264,7 @@ ssize_t r2hb_hb_group_mode_store(struct r2hb_hb_group *group,
ret = r2hb_global_hearbeat_mode_set(i);
if (!ret)
- printk(KERN_NOTICE "ramster: Heartbeat mode "
- "set to %s\n",
+ pr_notice("ramster: Heartbeat mode set to %s\n",
r2hb_heartbeat_mode_desc[i]);
return count;
}
diff --git a/drivers/staging/ramster/cluster/heartbeat.h b/drivers/staging/ramster/ramster/heartbeat.h
index 6cbc775bd63b..6cbc775bd63b 100644
--- a/drivers/staging/ramster/cluster/heartbeat.h
+++ b/drivers/staging/ramster/ramster/heartbeat.h
diff --git a/drivers/staging/ramster/cluster/masklog.c b/drivers/staging/ramster/ramster/masklog.c
index 1261d8579aae..1261d8579aae 100644
--- a/drivers/staging/ramster/cluster/masklog.c
+++ b/drivers/staging/ramster/ramster/masklog.c
diff --git a/drivers/staging/ramster/cluster/masklog.h b/drivers/staging/ramster/ramster/masklog.h
index 918ae110b699..918ae110b699 100644
--- a/drivers/staging/ramster/cluster/masklog.h
+++ b/drivers/staging/ramster/ramster/masklog.h
diff --git a/drivers/staging/ramster/cluster/nodemanager.c b/drivers/staging/ramster/ramster/nodemanager.c
index de0e5c8da6ea..c0f48158735d 100644
--- a/drivers/staging/ramster/cluster/nodemanager.c
+++ b/drivers/staging/ramster/ramster/nodemanager.c
@@ -528,7 +528,8 @@ static ssize_t r2nm_cluster_attr_idle_timeout_ms_write(
r2net_num_connected_peers());
ret = -EINVAL;
} else if (val <= cluster->cl_keepalive_delay_ms) {
- mlog(ML_NOTICE, "r2net: idle timeout must be larger "
+ mlog(ML_NOTICE,
+ "r2net: idle timeout must be larger "
"than keepalive delay\n");
ret = -EINVAL;
} else {
@@ -563,7 +564,8 @@ static ssize_t r2nm_cluster_attr_keepalive_delay_ms_write(
r2net_num_connected_peers());
ret = -EINVAL;
} else if (val >= cluster->cl_idle_timeout_ms) {
- mlog(ML_NOTICE, "r2net: keepalive delay must be "
+ mlog(ML_NOTICE,
+ "r2net: keepalive delay must be "
"smaller than idle timeout\n");
ret = -EINVAL;
} else {
@@ -612,7 +614,7 @@ static ssize_t r2nm_cluster_attr_fence_method_write(
if (strncasecmp(page, r2nm_fence_method_desc[i], count - 1))
continue;
if (cluster->cl_fence_method != i) {
- printk(KERN_INFO "ramster: Changing fence method to %s\n",
+ pr_info("ramster: Changing fence method to %s\n",
r2nm_fence_method_desc[i]);
cluster->cl_fence_method = i;
}
@@ -967,7 +969,7 @@ static int __init init_r2nm(void)
mutex_init(&r2nm_cluster_group.cs_subsys.su_mutex);
ret = configfs_register_subsystem(&r2nm_cluster_group.cs_subsys);
if (ret) {
- printk(KERN_ERR "nodemanager: Registration returned %d\n", ret);
+ pr_err("nodemanager: Registration returned %d\n", ret);
goto out_callbacks;
}
@@ -988,5 +990,6 @@ out:
MODULE_AUTHOR("Oracle");
MODULE_LICENSE("GPL");
-module_init(init_r2nm)
-module_exit(exit_r2nm)
+/* module_init(init_r2nm) */
+late_initcall(init_r2nm);
+/* module_exit(exit_r2nm) */
diff --git a/drivers/staging/ramster/cluster/nodemanager.h b/drivers/staging/ramster/ramster/nodemanager.h
index 41a04df5842c..41a04df5842c 100644
--- a/drivers/staging/ramster/cluster/nodemanager.h
+++ b/drivers/staging/ramster/ramster/nodemanager.h
diff --git a/drivers/staging/ramster/r2net.c b/drivers/staging/ramster/ramster/r2net.c
index fc830c3ac74b..34818dc65612 100644
--- a/drivers/staging/ramster/r2net.c
+++ b/drivers/staging/ramster/ramster/r2net.c
@@ -1,7 +1,7 @@
/*
* r2net.c
*
- * Copyright (c) 2011, Dan Magenheimer, Oracle Corp.
+ * Copyright (c) 2011-2012, Dan Magenheimer, Oracle Corp.
*
* Ramster_r2net provides an interface between zcache and r2net.
*
@@ -9,10 +9,10 @@
*/
#include <linux/list.h>
-#include "cluster/tcp.h"
-#include "cluster/nodemanager.h"
-#include "tmem.h"
-#include "zcache.h"
+#include "tcp.h"
+#include "nodemanager.h"
+#include "../tmem.h"
+#include "../zcache.h"
#include "ramster.h"
#define RAMSTER_TESTING
@@ -33,7 +33,7 @@ enum {
#define RMSTR_R2NET_MAX_LEN \
(R2NET_MAX_PAYLOAD_BYTES - sizeof(struct tmem_xhandle))
-#include "cluster/tcp_internal.h"
+#include "tcp_internal.h"
static struct r2nm_node *r2net_target_node;
static int r2net_target_nodenum;
@@ -72,8 +72,8 @@ static int ramster_remote_async_get_request_handler(struct r2net_msg *msg,
*(struct tmem_xhandle *)pdata = xh;
pdata += sizeof(struct tmem_xhandle);
local_irq_save(flags);
- found = zcache_get(xh.client_id, xh.pool_id, &xh.oid, xh.index,
- pdata, &size, 1, get_and_free ? 1 : -1);
+ found = zcache_get_page(xh.client_id, xh.pool_id, &xh.oid, xh.index,
+ pdata, &size, true, get_and_free ? 1 : -1);
local_irq_restore(flags);
if (found < 0) {
/* a zero size indicates the get failed */
@@ -98,7 +98,7 @@ static int ramster_remote_async_get_reply_handler(struct r2net_msg *msg,
in += sizeof(struct tmem_xhandle);
datalen -= sizeof(struct tmem_xhandle);
BUG_ON(datalen < 0 || datalen > PAGE_SIZE);
- ret = zcache_localify(xh->pool_id, &xh->oid, xh->index,
+ ret = ramster_localify(xh->pool_id, &xh->oid, xh->index,
in, datalen, xh->extra);
#ifdef RAMSTER_TESTING
if (ret == -EEXIST)
@@ -123,8 +123,8 @@ int ramster_remote_put_handler(struct r2net_msg *msg,
p += sizeof(struct tmem_xhandle);
zcache_autocreate_pool(xh->client_id, xh->pool_id, ephemeral);
local_irq_save(flags);
- ret = zcache_put(xh->client_id, xh->pool_id, &xh->oid, xh->index,
- p, datalen, 1, ephemeral ? 1 : -1);
+ ret = zcache_put_page(xh->client_id, xh->pool_id, &xh->oid, xh->index,
+ p, datalen, true, ephemeral);
local_irq_restore(flags);
return ret;
}
@@ -137,7 +137,8 @@ int ramster_remote_flush_handler(struct r2net_msg *msg,
xh = (struct tmem_xhandle *)p;
p += sizeof(struct tmem_xhandle);
- (void)zcache_flush(xh->client_id, xh->pool_id, &xh->oid, xh->index);
+ (void)zcache_flush_page(xh->client_id, xh->pool_id,
+ &xh->oid, xh->index);
return 0;
}
@@ -153,15 +154,16 @@ int ramster_remote_flobj_handler(struct r2net_msg *msg,
return 0;
}
-int ramster_remote_async_get(struct tmem_xhandle *xh, bool free, int remotenode,
+int r2net_remote_async_get(struct tmem_xhandle *xh, bool free, int remotenode,
size_t expect_size, uint8_t expect_cksum,
void *extra)
{
- int ret = -1, status;
+ int nodenum, ret = -1, status;
struct r2nm_node *node = NULL;
struct kvec vec[1];
size_t veclen = 1;
u32 msg_type;
+ struct r2net_node *nn;
node = r2nm_get_node_by_num(remotenode);
if (node == NULL)
@@ -172,6 +174,21 @@ int ramster_remote_async_get(struct tmem_xhandle *xh, bool free, int remotenode,
xh->extra = extra;
vec[0].iov_len = sizeof(*xh);
vec[0].iov_base = xh;
+
+ node = r2net_target_node;
+ if (!node)
+ goto out;
+
+ nodenum = r2net_target_nodenum;
+
+ r2nm_node_get(node);
+ nn = r2net_nn_from_num(nodenum);
+ if (nn->nn_persistent_error || !nn->nn_sc_valid) {
+ ret = -ENOTCONN;
+ r2nm_node_put(node);
+ goto out;
+ }
+
if (free)
msg_type = RMSTR_TMEM_ASYNC_GET_AND_FREE_REQUEST;
else
@@ -180,8 +197,13 @@ int ramster_remote_async_get(struct tmem_xhandle *xh, bool free, int remotenode,
vec, veclen, remotenode, &status);
r2nm_node_put(node);
if (ret < 0) {
+ if (ret == -ENOTCONN || ret == -EHOSTDOWN)
+ goto out;
+ if (ret == -EAGAIN)
+ goto out;
/* FIXME handle bad message possibilities here? */
- pr_err("UNTESTED ret<0 in ramster_remote_async_get\n");
+ pr_err("UNTESTED ret<0 in ramster_remote_async_get: ret=%d\n",
+ ret);
}
ret = status;
out:
@@ -219,7 +241,7 @@ static void ramster_check_irq_counts(void)
}
#endif
-int ramster_remote_put(struct tmem_xhandle *xh, char *data, size_t size,
+int r2net_remote_put(struct tmem_xhandle *xh, char *data, size_t size,
bool ephemeral, int *remotenode)
{
int nodenum, ret = -1, status;
@@ -227,9 +249,7 @@ int ramster_remote_put(struct tmem_xhandle *xh, char *data, size_t size,
struct kvec vec[2];
size_t veclen = 2;
u32 msg_type;
-#ifdef RAMSTER_TESTING
struct r2net_node *nn;
-#endif
BUG_ON(size > RMSTR_R2NET_MAX_LEN);
xh->client_id = r2nm_this_node(); /* which node is putting */
@@ -237,6 +257,7 @@ int ramster_remote_put(struct tmem_xhandle *xh, char *data, size_t size,
vec[0].iov_base = xh;
vec[1].iov_len = size;
vec[1].iov_base = data;
+
node = r2net_target_node;
if (!node)
goto out;
@@ -245,10 +266,12 @@ int ramster_remote_put(struct tmem_xhandle *xh, char *data, size_t size,
r2nm_node_get(node);
-#ifdef RAMSTER_TESTING
nn = r2net_nn_from_num(nodenum);
- WARN_ON_ONCE(nn->nn_persistent_error || !nn->nn_sc_valid);
-#endif
+ if (nn->nn_persistent_error || !nn->nn_sc_valid) {
+ ret = -ENOTCONN;
+ r2nm_node_put(node);
+ goto out;
+ }
if (ephemeral)
msg_type = RMSTR_TMEM_PUT_EPH;
@@ -261,16 +284,6 @@ int ramster_remote_put(struct tmem_xhandle *xh, char *data, size_t size,
ret = r2net_send_message_vec(msg_type, RMSTR_KEY, vec, veclen,
nodenum, &status);
-#ifdef RAMSTER_TESTING
- if (ret != 0) {
- static unsigned long cnt;
- cnt++;
- if (!(cnt&(cnt-1)))
- pr_err("ramster_remote_put: message failed, ret=%d, cnt=%lu\n",
- ret, cnt);
- ret = -1;
- }
-#endif
if (ret < 0)
ret = -1;
else {
@@ -283,7 +296,7 @@ out:
return ret;
}
-int ramster_remote_flush(struct tmem_xhandle *xh, int remotenode)
+int r2net_remote_flush(struct tmem_xhandle *xh, int remotenode)
{
int ret = -1, status;
struct r2nm_node *node = NULL;
@@ -303,7 +316,7 @@ int ramster_remote_flush(struct tmem_xhandle *xh, int remotenode)
return ret;
}
-int ramster_remote_flush_object(struct tmem_xhandle *xh, int remotenode)
+int r2net_remote_flush_object(struct tmem_xhandle *xh, int remotenode)
{
int ret = -1, status;
struct r2nm_node *node = NULL;
diff --git a/drivers/staging/ramster/ramster/ramster.c b/drivers/staging/ramster/ramster/ramster.c
new file mode 100644
index 000000000000..c06709f39682
--- /dev/null
+++ b/drivers/staging/ramster/ramster/ramster.c
@@ -0,0 +1,985 @@
+/*
+ * ramster.c
+ *
+ * Copyright (c) 2010-2012, Dan Magenheimer, Oracle Corp.
+ *
+ * RAMster implements peer-to-peer transcendent memory, allowing a "cluster" of
+ * kernels to dynamically pool their RAM so that a RAM-hungry workload on one
+ * machine can temporarily and transparently utilize RAM on another machine
+ * which is presumably idle or running a non-RAM-hungry workload.
+ *
+ * RAMster combines a clustering and messaging foundation based on the ocfs2
+ * cluster layer with the in-kernel compression implementation of zcache, and
+ * adds code to glue them together. When a page is "put" to RAMster, it is
+ * compressed and stored locally. Periodically, a thread will "remotify" these
+ * pages by sending them via messages to a remote machine. When the page is
+ * later needed as indicated by a page fault, a "get" is issued. If the data
+ * is local, it is uncompressed and the fault is resolved. If the data is
+ * remote, a message is sent to fetch the data and the faulting thread sleeps;
+ * when the data arrives, the thread awakens, the data is decompressed and
+ * the fault is resolved.
+
+ * As of V5, clusters up to eight nodes are supported; each node can remotify
+ * pages to one specified node, so clusters can be configured as clients to
+ * a "memory server". Some simple policy is in place that will need to be
+ * refined over time. Larger clusters and fault-resistant protocols can also
+ * be added over time.
+ */
+
+#include <linux/module.h>
+#include <linux/cpu.h>
+#include <linux/highmem.h>
+#include <linux/list.h>
+#include <linux/lzo.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/atomic.h>
+#include <linux/frontswap.h>
+#include "../tmem.h"
+#include "../zcache.h"
+#include "../zbud.h"
+#include "ramster.h"
+#include "ramster_nodemanager.h"
+#include "tcp.h"
+
+#define RAMSTER_TESTING
+
+#ifndef CONFIG_SYSFS
+#error "ramster needs sysfs to define cluster nodes to use"
+#endif
+
+static bool use_cleancache __read_mostly;
+static bool use_frontswap __read_mostly;
+static bool use_frontswap_exclusive_gets __read_mostly;
+
+/* These must be sysfs not debugfs as they are checked/used by userland!! */
+static unsigned long ramster_interface_revision __read_mostly =
+ R2NM_API_VERSION; /* interface revision must match userspace! */
+static unsigned long ramster_pers_remotify_enable __read_mostly;
+static unsigned long ramster_eph_remotify_enable __read_mostly;
+static atomic_t ramster_remote_pers_pages = ATOMIC_INIT(0);
+#define MANUAL_NODES 8
+static bool ramster_nodes_manual_up[MANUAL_NODES] __read_mostly;
+static int ramster_remote_target_nodenum __read_mostly = -1;
+
+/* these counters are made available via debugfs */
+static long ramster_flnodes;
+static atomic_t ramster_flnodes_atomic = ATOMIC_INIT(0);
+static unsigned long ramster_flnodes_max;
+static long ramster_foreign_eph_pages;
+static atomic_t ramster_foreign_eph_pages_atomic = ATOMIC_INIT(0);
+static unsigned long ramster_foreign_eph_pages_max;
+static long ramster_foreign_pers_pages;
+static atomic_t ramster_foreign_pers_pages_atomic = ATOMIC_INIT(0);
+static unsigned long ramster_foreign_pers_pages_max;
+static unsigned long ramster_eph_pages_remoted;
+static unsigned long ramster_pers_pages_remoted;
+static unsigned long ramster_eph_pages_remote_failed;
+static unsigned long ramster_pers_pages_remote_failed;
+static unsigned long ramster_remote_eph_pages_succ_get;
+static unsigned long ramster_remote_pers_pages_succ_get;
+static unsigned long ramster_remote_eph_pages_unsucc_get;
+static unsigned long ramster_remote_pers_pages_unsucc_get;
+static unsigned long ramster_pers_pages_remote_nomem;
+static unsigned long ramster_remote_objects_flushed;
+static unsigned long ramster_remote_object_flushes_failed;
+static unsigned long ramster_remote_pages_flushed;
+static unsigned long ramster_remote_page_flushes_failed;
+/* FIXME frontswap selfshrinking knobs in debugfs? */
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#define zdfs debugfs_create_size_t
+#define zdfs64 debugfs_create_u64
+static int __init ramster_debugfs_init(void)
+{
+ struct dentry *root = debugfs_create_dir("ramster", NULL);
+ if (root == NULL)
+ return -ENXIO;
+
+ zdfs("eph_pages_remoted", S_IRUGO, root, &ramster_eph_pages_remoted);
+ zdfs("pers_pages_remoted", S_IRUGO, root, &ramster_pers_pages_remoted);
+ zdfs("eph_pages_remote_failed", S_IRUGO, root,
+ &ramster_eph_pages_remote_failed);
+ zdfs("pers_pages_remote_failed", S_IRUGO, root,
+ &ramster_pers_pages_remote_failed);
+ zdfs("remote_eph_pages_succ_get", S_IRUGO, root,
+ &ramster_remote_eph_pages_succ_get);
+ zdfs("remote_pers_pages_succ_get", S_IRUGO, root,
+ &ramster_remote_pers_pages_succ_get);
+ zdfs("remote_eph_pages_unsucc_get", S_IRUGO, root,
+ &ramster_remote_eph_pages_unsucc_get);
+ zdfs("remote_pers_pages_unsucc_get", S_IRUGO, root,
+ &ramster_remote_pers_pages_unsucc_get);
+ zdfs("pers_pages_remote_nomem", S_IRUGO, root,
+ &ramster_pers_pages_remote_nomem);
+ zdfs("remote_objects_flushed", S_IRUGO, root,
+ &ramster_remote_objects_flushed);
+ zdfs("remote_pages_flushed", S_IRUGO, root,
+ &ramster_remote_pages_flushed);
+ zdfs("remote_object_flushes_failed", S_IRUGO, root,
+ &ramster_remote_object_flushes_failed);
+ zdfs("remote_page_flushes_failed", S_IRUGO, root,
+ &ramster_remote_page_flushes_failed);
+ zdfs("foreign_eph_pages", S_IRUGO, root,
+ &ramster_foreign_eph_pages);
+ zdfs("foreign_eph_pages_max", S_IRUGO, root,
+ &ramster_foreign_eph_pages_max);
+ zdfs("foreign_pers_pages", S_IRUGO, root,
+ &ramster_foreign_pers_pages);
+ zdfs("foreign_pers_pages_max", S_IRUGO, root,
+ &ramster_foreign_pers_pages_max);
+ return 0;
+}
+#undef zdebugfs
+#undef zdfs64
+#endif
+
+static LIST_HEAD(ramster_rem_op_list);
+static DEFINE_SPINLOCK(ramster_rem_op_list_lock);
+static DEFINE_PER_CPU(struct ramster_preload, ramster_preloads);
+
+static DEFINE_PER_CPU(unsigned char *, ramster_remoteputmem1);
+static DEFINE_PER_CPU(unsigned char *, ramster_remoteputmem2);
+
+static struct kmem_cache *ramster_flnode_cache __read_mostly;
+
+static struct flushlist_node *ramster_flnode_alloc(struct tmem_pool *pool)
+{
+ struct flushlist_node *flnode = NULL;
+ struct ramster_preload *kp;
+
+ kp = &__get_cpu_var(ramster_preloads);
+ flnode = kp->flnode;
+ BUG_ON(flnode == NULL);
+ kp->flnode = NULL;
+ ramster_flnodes = atomic_inc_return(&ramster_flnodes_atomic);
+ if (ramster_flnodes > ramster_flnodes_max)
+ ramster_flnodes_max = ramster_flnodes;
+ return flnode;
+}
+
+/* the "flush list" asynchronously collects pages to remotely flush */
+#define FLUSH_ENTIRE_OBJECT ((uint32_t)-1)
+static void ramster_flnode_free(struct flushlist_node *flnode,
+ struct tmem_pool *pool)
+{
+ int flnodes;
+
+ flnodes = atomic_dec_return(&ramster_flnodes_atomic);
+ BUG_ON(flnodes < 0);
+ kmem_cache_free(ramster_flnode_cache, flnode);
+}
+
+int ramster_do_preload_flnode(struct tmem_pool *pool)
+{
+ struct ramster_preload *kp;
+ struct flushlist_node *flnode;
+ int ret = -ENOMEM;
+
+ BUG_ON(!irqs_disabled());
+ if (unlikely(ramster_flnode_cache == NULL))
+ BUG();
+ kp = &__get_cpu_var(ramster_preloads);
+ flnode = kmem_cache_alloc(ramster_flnode_cache, GFP_ATOMIC);
+ if (unlikely(flnode == NULL) && kp->flnode == NULL)
+ BUG(); /* FIXME handle more gracefully, but how??? */
+ else if (kp->flnode == NULL)
+ kp->flnode = flnode;
+ else
+ kmem_cache_free(ramster_flnode_cache, flnode);
+ return ret;
+}
+
+/*
+ * Called by the message handler after a (still compressed) page has been
+ * fetched from the remote machine in response to an "is_remote" tmem_get
+ * or persistent tmem_localify. For a tmem_get, "extra" is the address of
+ * the page that is to be filled to successfully resolve the tmem_get; for
+ * a (persistent) tmem_localify, "extra" is NULL (as the data is placed only
+ * in the local zcache). "data" points to "size" bytes of (compressed) data
+ * passed in the message. In the case of a persistent remote get, if
+ * pre-allocation was successful (see ramster_repatriate_preload), the page
+ * is placed into both local zcache and at "extra".
+ */
+int ramster_localify(int pool_id, struct tmem_oid *oidp, uint32_t index,
+ char *data, unsigned int size, void *extra)
+{
+ int ret = -ENOENT;
+ unsigned long flags;
+ struct tmem_pool *pool;
+ bool eph, delete = false;
+ void *pampd, *saved_hb;
+ struct tmem_obj *obj;
+
+ pool = zcache_get_pool_by_id(LOCAL_CLIENT, pool_id);
+ if (unlikely(pool == NULL))
+ /* pool doesn't exist anymore */
+ goto out;
+ eph = is_ephemeral(pool);
+ local_irq_save(flags); /* FIXME: maybe only disable softirqs? */
+ pampd = tmem_localify_get_pampd(pool, oidp, index, &obj, &saved_hb);
+ if (pampd == NULL) {
+ /* hmmm... must have been a flush while waiting */
+#ifdef RAMSTER_TESTING
+ pr_err("UNTESTED pampd==NULL in ramster_localify\n");
+#endif
+ if (eph)
+ ramster_remote_eph_pages_unsucc_get++;
+ else
+ ramster_remote_pers_pages_unsucc_get++;
+ obj = NULL;
+ goto finish;
+ } else if (unlikely(!pampd_is_remote(pampd))) {
+ /* hmmm... must have been a dup put while waiting */
+#ifdef RAMSTER_TESTING
+ pr_err("UNTESTED dup while waiting in ramster_localify\n");
+#endif
+ if (eph)
+ ramster_remote_eph_pages_unsucc_get++;
+ else
+ ramster_remote_pers_pages_unsucc_get++;
+ obj = NULL;
+ pampd = NULL;
+ ret = -EEXIST;
+ goto finish;
+ } else if (size == 0) {
+ /* no remote data, delete the local is_remote pampd */
+ pampd = NULL;
+ if (eph)
+ ramster_remote_eph_pages_unsucc_get++;
+ else
+ BUG();
+ delete = true;
+ goto finish;
+ }
+ if (pampd_is_intransit(pampd)) {
+ /*
+ * a pampd is marked intransit if it is remote and space has
+ * been allocated for it locally (note, only happens for
+ * persistent pages, in which case the remote copy is freed)
+ */
+ BUG_ON(eph);
+ pampd = pampd_mask_intransit_and_remote(pampd);
+ zbud_copy_to_zbud(pampd, data, size);
+ } else {
+ /*
+ * setting pampd to NULL tells tmem_localify_finish to leave
+ * pampd alone... meaning it is left pointing to the
+ * remote copy
+ */
+ pampd = NULL;
+ obj = NULL;
+ }
+ /*
+ * but in all cases, we decompress direct-to-memory to complete
+ * the remotify and return success
+ */
+ BUG_ON(extra == NULL);
+ zcache_decompress_to_page(data, size, (struct page *)extra);
+ if (eph)
+ ramster_remote_eph_pages_succ_get++;
+ else
+ ramster_remote_pers_pages_succ_get++;
+ ret = 0;
+finish:
+ tmem_localify_finish(obj, index, pampd, saved_hb, delete);
+ zcache_put_pool(pool);
+ local_irq_restore(flags);
+out:
+ return ret;
+}
+
+void ramster_pampd_new_obj(struct tmem_obj *obj)
+{
+ obj->extra = NULL;
+}
+
+void ramster_pampd_free_obj(struct tmem_pool *pool, struct tmem_obj *obj,
+ bool pool_destroy)
+{
+ struct flushlist_node *flnode;
+
+ BUG_ON(preemptible());
+ if (obj->extra == NULL)
+ return;
+ if (pool_destroy && is_ephemeral(pool))
+ /* FIXME don't bother with remote eph data for now */
+ return;
+ BUG_ON(!pampd_is_remote(obj->extra));
+ flnode = ramster_flnode_alloc(pool);
+ flnode->xh.client_id = pampd_remote_node(obj->extra);
+ flnode->xh.pool_id = pool->pool_id;
+ flnode->xh.oid = obj->oid;
+ flnode->xh.index = FLUSH_ENTIRE_OBJECT;
+ flnode->rem_op.op = RAMSTER_REMOTIFY_FLUSH_OBJ;
+ spin_lock(&ramster_rem_op_list_lock);
+ list_add(&flnode->rem_op.list, &ramster_rem_op_list);
+ spin_unlock(&ramster_rem_op_list_lock);
+}
+
+/*
+ * Called on a remote persistent tmem_get to attempt to preallocate
+ * local storage for the data contained in the remote persistent page.
+ * If successfully preallocated, returns the pampd, marked as remote and
+ * in_transit. Else returns NULL. Note that the appropriate tmem data
+ * structure must be locked.
+ */
+void *ramster_pampd_repatriate_preload(void *pampd, struct tmem_pool *pool,
+ struct tmem_oid *oidp, uint32_t index,
+ bool *intransit)
+{
+ int clen = pampd_remote_size(pampd), c;
+ void *ret_pampd = NULL;
+ unsigned long flags;
+ struct tmem_handle th;
+
+ BUG_ON(!pampd_is_remote(pampd));
+ BUG_ON(is_ephemeral(pool));
+ if (use_frontswap_exclusive_gets)
+ /* don't need local storage */
+ goto out;
+ if (pampd_is_intransit(pampd)) {
+ /*
+ * to avoid multiple allocations (and maybe a memory leak)
+ * don't preallocate if already in the process of being
+ * repatriated
+ */
+ *intransit = true;
+ goto out;
+ }
+ *intransit = false;
+ local_irq_save(flags);
+ th.client_id = pampd_remote_node(pampd);
+ th.pool_id = pool->pool_id;
+ th.oid = *oidp;
+ th.index = index;
+ ret_pampd = zcache_pampd_create(NULL, clen, true, false, &th);
+ if (ret_pampd != NULL) {
+ /*
+ * a pampd is marked intransit if it is remote and space has
+ * been allocated for it locally (note, only happens for
+ * persistent pages, in which case the remote copy is freed)
+ */
+ ret_pampd = pampd_mark_intransit(ret_pampd);
+ c = atomic_dec_return(&ramster_remote_pers_pages);
+ WARN_ON_ONCE(c < 0);
+ } else {
+ ramster_pers_pages_remote_nomem++;
+ }
+ local_irq_restore(flags);
+out:
+ return ret_pampd;
+}
+
+/*
+ * Called on a remote tmem_get to invoke a message to fetch the page.
+ * Might sleep so no tmem locks can be held. "extra" is passed
+ * all the way through the round-trip messaging to ramster_localify.
+ */
+int ramster_pampd_repatriate(void *fake_pampd, void *real_pampd,
+ struct tmem_pool *pool,
+ struct tmem_oid *oid, uint32_t index,
+ bool free, void *extra)
+{
+ struct tmem_xhandle xh;
+ int ret;
+
+ if (pampd_is_intransit(real_pampd))
+ /* have local space pre-reserved, so free remote copy */
+ free = true;
+ xh = tmem_xhandle_fill(LOCAL_CLIENT, pool, oid, index);
+ /* unreliable request/response for now */
+ ret = r2net_remote_async_get(&xh, free,
+ pampd_remote_node(fake_pampd),
+ pampd_remote_size(fake_pampd),
+ pampd_remote_cksum(fake_pampd),
+ extra);
+ return ret;
+}
+
+bool ramster_pampd_is_remote(void *pampd)
+{
+ return pampd_is_remote(pampd);
+}
+
+int ramster_pampd_replace_in_obj(void *new_pampd, struct tmem_obj *obj)
+{
+ int ret = -1;
+
+ if (new_pampd != NULL) {
+ if (obj->extra == NULL)
+ obj->extra = new_pampd;
+ /* enforce that all remote pages in an object reside
+ * in the same node! */
+ else if (pampd_remote_node(new_pampd) !=
+ pampd_remote_node((void *)(obj->extra)))
+ BUG();
+ ret = 0;
+ }
+ return ret;
+}
+
+void *ramster_pampd_free(void *pampd, struct tmem_pool *pool,
+ struct tmem_oid *oid, uint32_t index, bool acct)
+{
+ bool eph = is_ephemeral(pool);
+ void *local_pampd = NULL;
+ int c;
+
+ BUG_ON(preemptible());
+ BUG_ON(!pampd_is_remote(pampd));
+ WARN_ON(acct == false);
+ if (oid == NULL) {
+ /*
+ * a NULL oid means to ignore this pampd free
+ * as the remote freeing will be handled elsewhere
+ */
+ } else if (eph) {
+ /* FIXME remote flush optional but probably good idea */
+ } else if (pampd_is_intransit(pampd)) {
+ /* did a pers remote get_and_free, so just free local */
+ local_pampd = pampd_mask_intransit_and_remote(pampd);
+ } else {
+ struct flushlist_node *flnode =
+ ramster_flnode_alloc(pool);
+
+ flnode->xh.client_id = pampd_remote_node(pampd);
+ flnode->xh.pool_id = pool->pool_id;
+ flnode->xh.oid = *oid;
+ flnode->xh.index = index;
+ flnode->rem_op.op = RAMSTER_REMOTIFY_FLUSH_PAGE;
+ spin_lock(&ramster_rem_op_list_lock);
+ list_add(&flnode->rem_op.list, &ramster_rem_op_list);
+ spin_unlock(&ramster_rem_op_list_lock);
+ c = atomic_dec_return(&ramster_remote_pers_pages);
+ WARN_ON_ONCE(c < 0);
+ }
+ return local_pampd;
+}
+
+void ramster_count_foreign_pages(bool eph, int count)
+{
+ int c;
+
+ BUG_ON(count != 1 && count != -1);
+ if (eph) {
+ if (count > 0) {
+ c = atomic_inc_return(
+ &ramster_foreign_eph_pages_atomic);
+ if (c > ramster_foreign_eph_pages_max)
+ ramster_foreign_eph_pages_max = c;
+ } else {
+ c = atomic_dec_return(&ramster_foreign_eph_pages_atomic);
+ WARN_ON_ONCE(c < 0);
+ }
+ ramster_foreign_eph_pages = c;
+ } else {
+ if (count > 0) {
+ c = atomic_inc_return(
+ &ramster_foreign_pers_pages_atomic);
+ if (c > ramster_foreign_pers_pages_max)
+ ramster_foreign_pers_pages_max = c;
+ } else {
+ c = atomic_dec_return(
+ &ramster_foreign_pers_pages_atomic);
+ WARN_ON_ONCE(c < 0);
+ }
+ ramster_foreign_pers_pages = c;
+ }
+}
+
+/*
+ * For now, just push over a few pages every few seconds to
+ * ensure that it basically works
+ */
+static struct workqueue_struct *ramster_remotify_workqueue;
+static void ramster_remotify_process(struct work_struct *work);
+static DECLARE_DELAYED_WORK(ramster_remotify_worker,
+ ramster_remotify_process);
+
+static void ramster_remotify_queue_delayed_work(unsigned long delay)
+{
+ if (!queue_delayed_work(ramster_remotify_workqueue,
+ &ramster_remotify_worker, delay))
+ pr_err("ramster_remotify: bad workqueue\n");
+}
+
+static void ramster_remote_flush_page(struct flushlist_node *flnode)
+{
+ struct tmem_xhandle *xh;
+ int remotenode, ret;
+
+ preempt_disable();
+ xh = &flnode->xh;
+ remotenode = flnode->xh.client_id;
+ ret = r2net_remote_flush(xh, remotenode);
+ if (ret >= 0)
+ ramster_remote_pages_flushed++;
+ else
+ ramster_remote_page_flushes_failed++;
+ preempt_enable_no_resched();
+ ramster_flnode_free(flnode, NULL);
+}
+
+static void ramster_remote_flush_object(struct flushlist_node *flnode)
+{
+ struct tmem_xhandle *xh;
+ int remotenode, ret;
+
+ preempt_disable();
+ xh = &flnode->xh;
+ remotenode = flnode->xh.client_id;
+ ret = r2net_remote_flush_object(xh, remotenode);
+ if (ret >= 0)
+ ramster_remote_objects_flushed++;
+ else
+ ramster_remote_object_flushes_failed++;
+ preempt_enable_no_resched();
+ ramster_flnode_free(flnode, NULL);
+}
+
+int ramster_remotify_pageframe(bool eph)
+{
+ struct tmem_xhandle xh;
+ unsigned int size;
+ int remotenode, ret, zbuds;
+ struct tmem_pool *pool;
+ unsigned long flags;
+ unsigned char cksum;
+ char *p;
+ int i, j;
+ unsigned char *tmpmem[2];
+ struct tmem_handle th[2];
+ unsigned int zsize[2];
+
+ tmpmem[0] = __get_cpu_var(ramster_remoteputmem1);
+ tmpmem[1] = __get_cpu_var(ramster_remoteputmem2);
+ local_bh_disable();
+ zbuds = zbud_make_zombie_lru(&th[0], &tmpmem[0], &zsize[0], eph);
+ /* now OK to release lock set in caller */
+ local_bh_enable();
+ if (zbuds == 0)
+ goto out;
+ BUG_ON(zbuds > 2);
+ for (i = 0; i < zbuds; i++) {
+ xh.client_id = th[i].client_id;
+ xh.pool_id = th[i].pool_id;
+ xh.oid = th[i].oid;
+ xh.index = th[i].index;
+ size = zsize[i];
+ BUG_ON(size == 0 || size > zbud_max_buddy_size());
+ for (p = tmpmem[i], cksum = 0, j = 0; j < size; j++)
+ cksum += *p++;
+ ret = r2net_remote_put(&xh, tmpmem[i], size, eph, &remotenode);
+ if (ret != 0) {
+ /*
+ * This is some form of a memory leak... if the remote put
+ * fails, there will never be another attempt to remotify
+ * this page. But since we've dropped the zv pointer,
+ * the page may have been freed or the data replaced
+ * so we can't just "put it back" in the remote op list.
+ * Even if we could, not sure where to put it in the list
+ * because there may be flushes that must be strictly
+ * ordered vs the put. So leave this as a FIXME for now.
+ * But count them so we know if it becomes a problem.
+ */
+ if (eph)
+ ramster_eph_pages_remote_failed++;
+ else
+ ramster_pers_pages_remote_failed++;
+ break;
+ } else {
+ if (!eph)
+ atomic_inc(&ramster_remote_pers_pages);
+ }
+ if (eph)
+ ramster_eph_pages_remoted++;
+ else
+ ramster_pers_pages_remoted++;
+ /*
+ * data was successfully remoted so change the local version to
+ * point to the remote node where it landed
+ */
+ local_bh_disable();
+ pool = zcache_get_pool_by_id(LOCAL_CLIENT, xh.pool_id);
+ local_irq_save(flags);
+ (void)tmem_replace(pool, &xh.oid, xh.index,
+ pampd_make_remote(remotenode, size, cksum));
+ local_irq_restore(flags);
+ zcache_put_pool(pool);
+ local_bh_enable();
+ }
+out:
+ return zbuds;
+}
+
+static void zcache_do_remotify_flushes(void)
+{
+ struct ramster_remotify_hdr *rem_op;
+ union remotify_list_node *u;
+
+ while (1) {
+ spin_lock(&ramster_rem_op_list_lock);
+ if (list_empty(&ramster_rem_op_list)) {
+ spin_unlock(&ramster_rem_op_list_lock);
+ goto out;
+ }
+ rem_op = list_first_entry(&ramster_rem_op_list,
+ struct ramster_remotify_hdr, list);
+ list_del_init(&rem_op->list);
+ spin_unlock(&ramster_rem_op_list_lock);
+ u = (union remotify_list_node *)rem_op;
+ switch (rem_op->op) {
+ case RAMSTER_REMOTIFY_FLUSH_PAGE:
+ ramster_remote_flush_page((struct flushlist_node *)u);
+ break;
+ case RAMSTER_REMOTIFY_FLUSH_OBJ:
+ ramster_remote_flush_object((struct flushlist_node *)u);
+ break;
+ default:
+ BUG();
+ }
+ }
+out:
+ return;
+}
+
+static void ramster_remotify_process(struct work_struct *work)
+{
+ static bool remotify_in_progress;
+ int i;
+
+ BUG_ON(irqs_disabled());
+ if (remotify_in_progress)
+ goto requeue;
+ if (ramster_remote_target_nodenum == -1)
+ goto requeue;
+ remotify_in_progress = true;
+ if (use_cleancache && ramster_eph_remotify_enable) {
+ for (i = 0; i < 100; i++) {
+ zcache_do_remotify_flushes();
+ (void)ramster_remotify_pageframe(true);
+ }
+ }
+ if (use_frontswap && ramster_pers_remotify_enable) {
+ for (i = 0; i < 100; i++) {
+ zcache_do_remotify_flushes();
+ (void)ramster_remotify_pageframe(false);
+ }
+ }
+ remotify_in_progress = false;
+requeue:
+ ramster_remotify_queue_delayed_work(HZ);
+}
+
+void __init ramster_remotify_init(void)
+{
+ unsigned long n = 60UL;
+ ramster_remotify_workqueue =
+ create_singlethread_workqueue("ramster_remotify");
+ ramster_remotify_queue_delayed_work(n * HZ);
+}
+
+static ssize_t ramster_manual_node_up_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ int i;
+ char *p = buf;
+ for (i = 0; i < MANUAL_NODES; i++)
+ if (ramster_nodes_manual_up[i])
+ p += sprintf(p, "%d ", i);
+ p += sprintf(p, "\n");
+ return p - buf;
+}
+
+static ssize_t ramster_manual_node_up_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ int err;
+ unsigned long node_num;
+
+ err = kstrtoul(buf, 10, &node_num);
+ if (err) {
+ pr_err("ramster: bad strtoul?\n");
+ return -EINVAL;
+ }
+ if (node_num >= MANUAL_NODES) {
+ pr_err("ramster: bad node_num=%lu?\n", node_num);
+ return -EINVAL;
+ }
+ if (ramster_nodes_manual_up[node_num]) {
+ pr_err("ramster: node %d already up, ignoring\n",
+ (int)node_num);
+ } else {
+ ramster_nodes_manual_up[node_num] = true;
+ r2net_hb_node_up_manual((int)node_num);
+ }
+ return count;
+}
+
+static struct kobj_attribute ramster_manual_node_up_attr = {
+ .attr = { .name = "manual_node_up", .mode = 0644 },
+ .show = ramster_manual_node_up_show,
+ .store = ramster_manual_node_up_store,
+};
+
+static ssize_t ramster_remote_target_nodenum_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ if (ramster_remote_target_nodenum == -1UL)
+ return sprintf(buf, "unset\n");
+ else
+ return sprintf(buf, "%d\n", ramster_remote_target_nodenum);
+}
+
+static ssize_t ramster_remote_target_nodenum_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ int err;
+ unsigned long node_num;
+
+ err = kstrtoul(buf, 10, &node_num);
+ if (err) {
+ pr_err("ramster: bad strtoul?\n");
+ return -EINVAL;
+ } else if (node_num == -1UL) {
+ pr_err("ramster: disabling all remotification, "
+ "data may still reside on remote nodes however\n");
+ return -EINVAL;
+ } else if (node_num >= MANUAL_NODES) {
+ pr_err("ramster: bad node_num=%lu?\n", node_num);
+ return -EINVAL;
+ } else if (!ramster_nodes_manual_up[node_num]) {
+ pr_err("ramster: node %d not up, ignoring setting "
+ "of remotification target\n", (int)node_num);
+ } else if (r2net_remote_target_node_set((int)node_num) >= 0) {
+ pr_info("ramster: node %d set as remotification target\n",
+ (int)node_num);
+ ramster_remote_target_nodenum = (int)node_num;
+ } else {
+ pr_err("ramster: bad num to node node_num=%d?\n",
+ (int)node_num);
+ return -EINVAL;
+ }
+ return count;
+}
+
+static struct kobj_attribute ramster_remote_target_nodenum_attr = {
+ .attr = { .name = "remote_target_nodenum", .mode = 0644 },
+ .show = ramster_remote_target_nodenum_show,
+ .store = ramster_remote_target_nodenum_store,
+};
+
+#define RAMSTER_SYSFS_RO(_name) \
+ static ssize_t ramster_##_name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf) \
+ { \
+ return sprintf(buf, "%lu\n", ramster_##_name); \
+ } \
+ static struct kobj_attribute ramster_##_name##_attr = { \
+ .attr = { .name = __stringify(_name), .mode = 0444 }, \
+ .show = ramster_##_name##_show, \
+ }
+
+#define RAMSTER_SYSFS_RW(_name) \
+ static ssize_t ramster_##_name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf) \
+ { \
+ return sprintf(buf, "%lu\n", ramster_##_name); \
+ } \
+ static ssize_t ramster_##_name##_store(struct kobject *kobj, \
+ struct kobj_attribute *attr, const char *buf, size_t count) \
+ { \
+ int err; \
+ unsigned long enable; \
+ err = kstrtoul(buf, 10, &enable); \
+ if (err) \
+ return -EINVAL; \
+ ramster_##_name = enable; \
+ return count; \
+ } \
+ static struct kobj_attribute ramster_##_name##_attr = { \
+ .attr = { .name = __stringify(_name), .mode = 0644 }, \
+ .show = ramster_##_name##_show, \
+ .store = ramster_##_name##_store, \
+ }
+
+#define RAMSTER_SYSFS_RO_ATOMIC(_name) \
+ static ssize_t ramster_##_name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf) \
+ { \
+ return sprintf(buf, "%d\n", atomic_read(&ramster_##_name)); \
+ } \
+ static struct kobj_attribute ramster_##_name##_attr = { \
+ .attr = { .name = __stringify(_name), .mode = 0444 }, \
+ .show = ramster_##_name##_show, \
+ }
+
+RAMSTER_SYSFS_RO(interface_revision);
+RAMSTER_SYSFS_RO_ATOMIC(remote_pers_pages);
+RAMSTER_SYSFS_RW(pers_remotify_enable);
+RAMSTER_SYSFS_RW(eph_remotify_enable);
+
+static struct attribute *ramster_attrs[] = {
+ &ramster_interface_revision_attr.attr,
+ &ramster_remote_pers_pages_attr.attr,
+ &ramster_manual_node_up_attr.attr,
+ &ramster_remote_target_nodenum_attr.attr,
+ &ramster_pers_remotify_enable_attr.attr,
+ &ramster_eph_remotify_enable_attr.attr,
+ NULL,
+};
+
+static struct attribute_group ramster_attr_group = {
+ .attrs = ramster_attrs,
+ .name = "ramster",
+};
+
+/*
+ * frontswap selfshrinking
+ */
+
+/* In HZ, controls frequency of worker invocation. */
+static unsigned int selfshrink_interval __read_mostly = 5;
+/* Enable/disable with sysfs. */
+static bool frontswap_selfshrinking __read_mostly;
+
+static void selfshrink_process(struct work_struct *work);
+static DECLARE_DELAYED_WORK(selfshrink_worker, selfshrink_process);
+
+/* Enable/disable with kernel boot option. */
+static bool use_frontswap_selfshrink __initdata = true;
+
+/*
+ * The default values for the following parameters were deemed reasonable
+ * by experimentation, may be workload-dependent, and can all be
+ * adjusted via sysfs.
+ */
+
+/* Control rate for frontswap shrinking. Higher hysteresis is slower. */
+static unsigned int frontswap_hysteresis __read_mostly = 20;
+
+/*
+ * Number of selfshrink worker invocations to wait before observing that
+ * frontswap selfshrinking should commence. Note that selfshrinking does
+ * not use a separate worker thread.
+ */
+static unsigned int frontswap_inertia __read_mostly = 3;
+
+/* Countdown to next invocation of frontswap_shrink() */
+static unsigned long frontswap_inertia_counter;
+
+/*
+ * Invoked by the selfshrink worker thread, uses current number of pages
+ * in frontswap (frontswap_curr_pages()), previous status, and control
+ * values (hysteresis and inertia) to determine if frontswap should be
+ * shrunk and what the new frontswap size should be. Note that
+ * frontswap_shrink is essentially a partial swapoff that immediately
+ * transfers pages from the "swap device" (frontswap) back into kernel
+ * RAM; despite the name, frontswap "shrinking" is very different from
+ * the "shrinker" interface used by the kernel MM subsystem to reclaim
+ * memory.
+ */
+static void frontswap_selfshrink(void)
+{
+ static unsigned long cur_frontswap_pages;
+ static unsigned long last_frontswap_pages;
+ static unsigned long tgt_frontswap_pages;
+
+ last_frontswap_pages = cur_frontswap_pages;
+ cur_frontswap_pages = frontswap_curr_pages();
+ if (!cur_frontswap_pages ||
+ (cur_frontswap_pages > last_frontswap_pages)) {
+ frontswap_inertia_counter = frontswap_inertia;
+ return;
+ }
+ if (frontswap_inertia_counter && --frontswap_inertia_counter)
+ return;
+ if (cur_frontswap_pages <= frontswap_hysteresis)
+ tgt_frontswap_pages = 0;
+ else
+ tgt_frontswap_pages = cur_frontswap_pages -
+ (cur_frontswap_pages / frontswap_hysteresis);
+ frontswap_shrink(tgt_frontswap_pages);
+}
+
+static int __init ramster_nofrontswap_selfshrink_setup(char *s)
+{
+ use_frontswap_selfshrink = false;
+ return 1;
+}
+
+__setup("noselfshrink", ramster_nofrontswap_selfshrink_setup);
+
+static void selfshrink_process(struct work_struct *work)
+{
+ if (frontswap_selfshrinking && frontswap_enabled) {
+ frontswap_selfshrink();
+ schedule_delayed_work(&selfshrink_worker,
+ selfshrink_interval * HZ);
+ }
+}
+
+void ramster_cpu_up(int cpu)
+{
+ unsigned char *p1 = kzalloc(PAGE_SIZE, GFP_KERNEL | __GFP_REPEAT);
+ unsigned char *p2 = kzalloc(PAGE_SIZE, GFP_KERNEL | __GFP_REPEAT);
+ BUG_ON(!p1 || !p2);
+ per_cpu(ramster_remoteputmem1, cpu) = p1;
+ per_cpu(ramster_remoteputmem2, cpu) = p2;
+}
+
+void ramster_cpu_down(int cpu)
+{
+ struct ramster_preload *kp;
+
+ kfree(per_cpu(ramster_remoteputmem1, cpu));
+ per_cpu(ramster_remoteputmem1, cpu) = NULL;
+ kfree(per_cpu(ramster_remoteputmem2, cpu));
+ per_cpu(ramster_remoteputmem2, cpu) = NULL;
+ kp = &per_cpu(ramster_preloads, cpu);
+ if (kp->flnode) {
+ kmem_cache_free(ramster_flnode_cache, kp->flnode);
+ kp->flnode = NULL;
+ }
+}
+
+void ramster_register_pamops(struct tmem_pamops *pamops)
+{
+ pamops->free_obj = ramster_pampd_free_obj;
+ pamops->new_obj = ramster_pampd_new_obj;
+ pamops->replace_in_obj = ramster_pampd_replace_in_obj;
+ pamops->is_remote = ramster_pampd_is_remote;
+ pamops->repatriate = ramster_pampd_repatriate;
+ pamops->repatriate_preload = ramster_pampd_repatriate_preload;
+}
+
+void __init ramster_init(bool cleancache, bool frontswap,
+ bool frontswap_exclusive_gets)
+{
+ int ret = 0;
+
+ if (cleancache)
+ use_cleancache = true;
+ if (frontswap)
+ use_frontswap = true;
+ if (frontswap_exclusive_gets)
+ use_frontswap_exclusive_gets = true;
+ ramster_debugfs_init();
+ ret = sysfs_create_group(mm_kobj, &ramster_attr_group);
+ if (ret)
+ pr_err("ramster: can't create sysfs for ramster\n");
+ (void)r2net_register_handlers();
+ INIT_LIST_HEAD(&ramster_rem_op_list);
+ ramster_flnode_cache = kmem_cache_create("ramster_flnode",
+ sizeof(struct flushlist_node), 0, 0, NULL);
+ frontswap_selfshrinking = use_frontswap_selfshrink;
+ if (frontswap_selfshrinking) {
+ pr_info("ramster: Initializing frontswap selfshrink driver.\n");
+ schedule_delayed_work(&selfshrink_worker,
+ selfshrink_interval * HZ);
+ }
+ ramster_remotify_init();
+}
diff --git a/drivers/staging/ramster/ramster/ramster.h b/drivers/staging/ramster/ramster/ramster.h
new file mode 100644
index 000000000000..12ae56f09ca4
--- /dev/null
+++ b/drivers/staging/ramster/ramster/ramster.h
@@ -0,0 +1,161 @@
+/*
+ * ramster.h
+ *
+ * Peer-to-peer transcendent memory
+ *
+ * Copyright (c) 2009-2012, Dan Magenheimer, Oracle Corp.
+ */
+
+#ifndef _RAMSTER_RAMSTER_H_
+#define _RAMSTER_RAMSTER_H_
+
+#include "../tmem.h"
+
+enum ramster_remotify_op {
+ RAMSTER_REMOTIFY_FLUSH_PAGE,
+ RAMSTER_REMOTIFY_FLUSH_OBJ,
+};
+
+struct ramster_remotify_hdr {
+ enum ramster_remotify_op op;
+ struct list_head list;
+};
+
+struct flushlist_node {
+ struct ramster_remotify_hdr rem_op;
+ struct tmem_xhandle xh;
+};
+
+struct ramster_preload {
+ struct flushlist_node *flnode;
+};
+
+union remotify_list_node {
+ struct ramster_remotify_hdr rem_op;
+ struct {
+ struct ramster_remotify_hdr rem_op;
+ struct tmem_handle th;
+ } zbud_hdr;
+ struct flushlist_node flist;
+};
+
+/*
+ * format of remote pampd:
+ * bit 0 is reserved for zbud (in-page buddy selection)
+ * bit 1 == intransit
+ * bit 2 == is_remote... if this bit is set, then
+ * bit 3-10 == remotenode
+ * bit 11-23 == size
+ * bit 24-31 == cksum
+ */
+#define FAKE_PAMPD_INTRANSIT_BITS 1
+#define FAKE_PAMPD_ISREMOTE_BITS 1
+#define FAKE_PAMPD_REMOTENODE_BITS 8
+#define FAKE_PAMPD_REMOTESIZE_BITS 13
+#define FAKE_PAMPD_CHECKSUM_BITS 8
+
+#define FAKE_PAMPD_INTRANSIT_SHIFT 1
+#define FAKE_PAMPD_ISREMOTE_SHIFT (FAKE_PAMPD_INTRANSIT_SHIFT + \
+ FAKE_PAMPD_INTRANSIT_BITS)
+#define FAKE_PAMPD_REMOTENODE_SHIFT (FAKE_PAMPD_ISREMOTE_SHIFT + \
+ FAKE_PAMPD_ISREMOTE_BITS)
+#define FAKE_PAMPD_REMOTESIZE_SHIFT (FAKE_PAMPD_REMOTENODE_SHIFT + \
+ FAKE_PAMPD_REMOTENODE_BITS)
+#define FAKE_PAMPD_CHECKSUM_SHIFT (FAKE_PAMPD_REMOTESIZE_SHIFT + \
+ FAKE_PAMPD_REMOTESIZE_BITS)
+
+#define FAKE_PAMPD_MASK(x) ((1UL << (x)) - 1)
+
+static inline void *pampd_make_remote(int remotenode, size_t size,
+ unsigned char cksum)
+{
+ unsigned long fake_pampd = 0;
+ fake_pampd |= 1UL << FAKE_PAMPD_ISREMOTE_SHIFT;
+ fake_pampd |= ((unsigned long)remotenode &
+ FAKE_PAMPD_MASK(FAKE_PAMPD_REMOTENODE_BITS)) <<
+ FAKE_PAMPD_REMOTENODE_SHIFT;
+ fake_pampd |= ((unsigned long)size &
+ FAKE_PAMPD_MASK(FAKE_PAMPD_REMOTESIZE_BITS)) <<
+ FAKE_PAMPD_REMOTESIZE_SHIFT;
+ fake_pampd |= ((unsigned long)cksum &
+ FAKE_PAMPD_MASK(FAKE_PAMPD_CHECKSUM_BITS)) <<
+ FAKE_PAMPD_CHECKSUM_SHIFT;
+ return (void *)fake_pampd;
+}
+
+static inline unsigned int pampd_remote_node(void *pampd)
+{
+ unsigned long fake_pampd = (unsigned long)pampd;
+ return (fake_pampd >> FAKE_PAMPD_REMOTENODE_SHIFT) &
+ FAKE_PAMPD_MASK(FAKE_PAMPD_REMOTENODE_BITS);
+}
+
+static inline unsigned int pampd_remote_size(void *pampd)
+{
+ unsigned long fake_pampd = (unsigned long)pampd;
+ return (fake_pampd >> FAKE_PAMPD_REMOTESIZE_SHIFT) &
+ FAKE_PAMPD_MASK(FAKE_PAMPD_REMOTESIZE_BITS);
+}
+
+static inline unsigned char pampd_remote_cksum(void *pampd)
+{
+ unsigned long fake_pampd = (unsigned long)pampd;
+ return (fake_pampd >> FAKE_PAMPD_CHECKSUM_SHIFT) &
+ FAKE_PAMPD_MASK(FAKE_PAMPD_CHECKSUM_BITS);
+}
+
+static inline bool pampd_is_remote(void *pampd)
+{
+ unsigned long fake_pampd = (unsigned long)pampd;
+ return (fake_pampd >> FAKE_PAMPD_ISREMOTE_SHIFT) &
+ FAKE_PAMPD_MASK(FAKE_PAMPD_ISREMOTE_BITS);
+}
+
+static inline bool pampd_is_intransit(void *pampd)
+{
+ unsigned long fake_pampd = (unsigned long)pampd;
+ return (fake_pampd >> FAKE_PAMPD_INTRANSIT_SHIFT) &
+ FAKE_PAMPD_MASK(FAKE_PAMPD_INTRANSIT_BITS);
+}
+
+/* note that it is a BUG for intransit to be set without isremote also set */
+static inline void *pampd_mark_intransit(void *pampd)
+{
+ unsigned long fake_pampd = (unsigned long)pampd;
+
+ fake_pampd |= 1UL << FAKE_PAMPD_ISREMOTE_SHIFT;
+ fake_pampd |= 1UL << FAKE_PAMPD_INTRANSIT_SHIFT;
+ return (void *)fake_pampd;
+}
+
+static inline void *pampd_mask_intransit_and_remote(void *marked_pampd)
+{
+ unsigned long pampd = (unsigned long)marked_pampd;
+
+ pampd &= ~(1UL << FAKE_PAMPD_INTRANSIT_SHIFT);
+ pampd &= ~(1UL << FAKE_PAMPD_ISREMOTE_SHIFT);
+ return (void *)pampd;
+}
+
+extern int r2net_remote_async_get(struct tmem_xhandle *,
+ bool, int, size_t, uint8_t, void *extra);
+extern int r2net_remote_put(struct tmem_xhandle *, char *, size_t,
+ bool, int *);
+extern int r2net_remote_flush(struct tmem_xhandle *, int);
+extern int r2net_remote_flush_object(struct tmem_xhandle *, int);
+extern int r2net_register_handlers(void);
+extern int r2net_remote_target_node_set(int);
+
+extern int ramster_remotify_pageframe(bool);
+extern void ramster_init(bool, bool, bool);
+extern void ramster_register_pamops(struct tmem_pamops *);
+extern int ramster_localify(int, struct tmem_oid *oidp, uint32_t, char *,
+ unsigned int, void *);
+extern void *ramster_pampd_free(void *, struct tmem_pool *, struct tmem_oid *,
+ uint32_t, bool);
+extern void ramster_count_foreign_pages(bool, int);
+extern int ramster_do_preload_flnode(struct tmem_pool *);
+extern void ramster_cpu_up(int);
+extern void ramster_cpu_down(int);
+
+#endif /* _RAMSTER_RAMSTER_H */
diff --git a/drivers/staging/ramster/cluster/ramster_nodemanager.h b/drivers/staging/ramster/ramster/ramster_nodemanager.h
index 49f879d943ab..49f879d943ab 100644
--- a/drivers/staging/ramster/cluster/ramster_nodemanager.h
+++ b/drivers/staging/ramster/ramster/ramster_nodemanager.h
diff --git a/drivers/staging/ramster/cluster/tcp.c b/drivers/staging/ramster/ramster/tcp.c
index d0a07d722b61..aa2a1a763aa4 100644
--- a/drivers/staging/ramster/cluster/tcp.c
+++ b/drivers/staging/ramster/ramster/tcp.c
@@ -541,8 +541,7 @@ static void r2net_set_nn_state(struct r2net_node *nn,
}
if (was_valid && !valid) {
- printk(KERN_NOTICE "ramster: No longer connected to "
- SC_NODEF_FMT "\n",
+ pr_notice("ramster: No longer connected to " SC_NODEF_FMT "\n",
old_sc->sc_node->nd_name, old_sc->sc_node->nd_num,
&old_sc->sc_node->nd_ipv4_address,
ntohs(old_sc->sc_node->nd_ipv4_port));
@@ -551,7 +550,7 @@ static void r2net_set_nn_state(struct r2net_node *nn,
if (!was_valid && valid) {
cancel_delayed_work(&nn->nn_connect_expired);
- printk(KERN_NOTICE "ramster: %s " SC_NODEF_FMT "\n",
+ pr_notice("ramster: %s " SC_NODEF_FMT "\n",
r2nm_this_node() > sc->sc_node->nd_num ?
"Connected to" : "Accepted connection from",
sc->sc_node->nd_name, sc->sc_node->nd_num,
@@ -644,7 +643,7 @@ static void r2net_state_change(struct sock *sk)
r2net_sc_queue_work(sc, &sc->sc_connect_work);
break;
default:
- printk(KERN_INFO "ramster: Connection to "
+ pr_info("ramster: Connection to "
SC_NODEF_FMT " shutdown, state %d\n",
sc->sc_node->nd_name, sc->sc_node->nd_num,
&sc->sc_node->nd_ipv4_address,
@@ -1160,7 +1159,8 @@ int r2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec,
/* wait on other node's handler */
r2net_set_nst_status_time(&nst);
- wait_event(nsw.ns_wq, r2net_nsw_completed(nn, &nsw));
+ wait_event(nsw.ns_wq, r2net_nsw_completed(nn, &nsw) ||
+ nn->nn_persistent_error || !nn->nn_sc_valid);
r2net_update_send_stats(&nst, sc);
@@ -1325,8 +1325,10 @@ static int r2net_process_message(struct r2net_sock_container *sc,
if (be16_to_cpu(hdr->data_len) > nmh->nh_max_len)
syserr = R2NET_ERR_OVERFLOW;
- if (syserr != R2NET_ERR_NONE)
+ if (syserr != R2NET_ERR_NONE) {
+ pr_err("ramster_r2net, message length problem\n");
goto out_respond;
+ }
r2net_set_func_start_time(sc);
sc->sc_msg_key = be32_to_cpu(hdr->key);
@@ -1393,7 +1395,7 @@ static int r2net_check_handshake(struct r2net_sock_container *sc)
struct r2net_node *nn = r2net_nn_from_num(sc->sc_node->nd_num);
if (hand->protocol_version != cpu_to_be64(R2NET_PROTOCOL_VERSION)) {
- printk(KERN_NOTICE "ramster: " SC_NODEF_FMT " Advertised net "
+ pr_notice("ramster: " SC_NODEF_FMT " Advertised net "
"protocol version %llu but %llu is required. "
"Disconnecting.\n", sc->sc_node->nd_name,
sc->sc_node->nd_num, &sc->sc_node->nd_ipv4_address,
@@ -1413,7 +1415,7 @@ static int r2net_check_handshake(struct r2net_sock_container *sc)
*/
if (be32_to_cpu(hand->r2net_idle_timeout_ms) !=
r2net_idle_timeout()) {
- printk(KERN_NOTICE "ramster: " SC_NODEF_FMT " uses a network "
+ pr_notice("ramster: " SC_NODEF_FMT " uses a network "
"idle timeout of %u ms, but we use %u ms locally. "
"Disconnecting.\n", sc->sc_node->nd_name,
sc->sc_node->nd_num, &sc->sc_node->nd_ipv4_address,
@@ -1426,7 +1428,7 @@ static int r2net_check_handshake(struct r2net_sock_container *sc)
if (be32_to_cpu(hand->r2net_keepalive_delay_ms) !=
r2net_keepalive_delay()) {
- printk(KERN_NOTICE "ramster: " SC_NODEF_FMT " uses a keepalive "
+ pr_notice("ramster: " SC_NODEF_FMT " uses a keepalive "
"delay of %u ms, but we use %u ms locally. "
"Disconnecting.\n", sc->sc_node->nd_name,
sc->sc_node->nd_num, &sc->sc_node->nd_ipv4_address,
@@ -1439,7 +1441,7 @@ static int r2net_check_handshake(struct r2net_sock_container *sc)
if (be32_to_cpu(hand->r2hb_heartbeat_timeout_ms) !=
R2HB_MAX_WRITE_TIMEOUT_MS) {
- printk(KERN_NOTICE "ramster: " SC_NODEF_FMT " uses a heartbeat "
+ pr_notice("ramster: " SC_NODEF_FMT " uses a heartbeat "
"timeout of %u ms, but we use %u ms locally. "
"Disconnecting.\n", sc->sc_node->nd_name,
sc->sc_node->nd_num, &sc->sc_node->nd_ipv4_address,
@@ -1516,6 +1518,7 @@ static int r2net_advance_rx(struct r2net_sock_container *sc)
if (be16_to_cpu(hdr->data_len) >
R2NET_MAX_PAYLOAD_BYTES)
ret = -EOVERFLOW;
+ WARN_ON_ONCE(ret == -EOVERFLOW);
}
}
if (ret <= 0)
@@ -1583,7 +1586,6 @@ static void r2net_rx_until_empty(struct work_struct *work)
/* not permanent so read failed handshake can retry */
r2net_ensure_shutdown(nn, sc, 0);
}
-
sc_put(sc);
}
@@ -1659,6 +1661,7 @@ static void r2net_sc_send_keep_req(struct work_struct *work)
static void r2net_idle_timer(unsigned long data)
{
struct r2net_sock_container *sc = (struct r2net_sock_container *)data;
+ struct r2net_node *nn = r2net_nn_from_num(sc->sc_node->nd_num);
#ifdef CONFIG_DEBUG_FS
unsigned long msecs = ktime_to_ms(ktime_get()) -
ktime_to_ms(sc->sc_tv_timer);
@@ -1666,7 +1669,7 @@ static void r2net_idle_timer(unsigned long data)
unsigned long msecs = r2net_idle_timeout();
#endif
- printk(KERN_NOTICE "ramster: Connection to " SC_NODEF_FMT " has been "
+ pr_notice("ramster: Connection to " SC_NODEF_FMT " has been "
"idle for %lu.%lu secs, shutting it down.\n",
sc->sc_node->nd_name, sc->sc_node->nd_num,
&sc->sc_node->nd_ipv4_address, ntohs(sc->sc_node->nd_ipv4_port),
@@ -1676,13 +1679,8 @@ static void r2net_idle_timer(unsigned long data)
* Initialize the nn_timeout so that the next connection attempt
* will continue in r2net_start_connect.
*/
- /* Avoid spurious shutdowns... not sure if this is still necessary */
- pr_err("ramster_idle_timer, skipping shutdown work\n");
-#if 0
- /* old code used to do these two lines */
atomic_set(&nn->nn_timeout, 1);
r2net_sc_queue_work(sc, &sc->sc_shutdown_work);
-#endif
}
static void r2net_sc_reset_idle_timer(struct r2net_sock_container *sc)
@@ -1807,7 +1805,7 @@ static void r2net_start_connect(struct work_struct *work)
out:
if (ret) {
- printk(KERN_NOTICE "ramster: Connect attempt to " SC_NODEF_FMT
+ pr_notice("ramster: Connect attempt to " SC_NODEF_FMT
" failed with errno %d\n", sc->sc_node->nd_name,
sc->sc_node->nd_num, &sc->sc_node->nd_ipv4_address,
ntohs(sc->sc_node->nd_ipv4_port), ret);
@@ -1833,7 +1831,7 @@ static void r2net_connect_expired(struct work_struct *work)
spin_lock(&nn->nn_lock);
if (!nn->nn_sc_valid) {
- printk(KERN_NOTICE "ramster: No connection established with "
+ pr_notice("ramster: No connection established with "
"node %u after %u.%u seconds, giving up.\n",
r2net_num_from_nn(nn),
r2net_idle_timeout() / 1000,
@@ -1969,7 +1967,7 @@ static int r2net_accept_one(struct socket *sock)
node = r2nm_get_node_by_ip(sin.sin_addr.s_addr);
if (node == NULL) {
- printk(KERN_NOTICE "ramster: Attempt to connect from unknown "
+ pr_notice("ramster: Attempt to connect from unknown "
"node at %pI4:%d\n", &sin.sin_addr.s_addr,
ntohs(sin.sin_port));
ret = -EINVAL;
@@ -1978,7 +1976,7 @@ static int r2net_accept_one(struct socket *sock)
if (r2nm_this_node() >= node->nd_num) {
local_node = r2nm_get_node_by_num(r2nm_this_node());
- printk(KERN_NOTICE "ramster: Unexpected connect attempt seen "
+ pr_notice("ramster: Unexpected connect attempt seen "
"at node '%s' (%u, %pI4:%d) from node '%s' (%u, "
"%pI4:%d)\n", local_node->nd_name, local_node->nd_num,
&(local_node->nd_ipv4_address),
@@ -2008,7 +2006,7 @@ static int r2net_accept_one(struct socket *sock)
ret = 0;
spin_unlock(&nn->nn_lock);
if (ret) {
- printk(KERN_NOTICE "ramster: Attempt to connect from node '%s' "
+ pr_notice("ramster: Attempt to connect from node '%s' "
"at %pI4:%d but it already has an open connection\n",
node->nd_name, &sin.sin_addr.s_addr,
ntohs(sin.sin_port));
@@ -2091,8 +2089,7 @@ static int r2net_open_listening_sock(__be32 addr, __be16 port)
ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
if (ret < 0) {
- printk(KERN_ERR "ramster: Error %d while creating socket\n",
- ret);
+ pr_err("ramster: Error %d while creating socket\n", ret);
goto out;
}
@@ -2106,17 +2103,17 @@ static int r2net_open_listening_sock(__be32 addr, __be16 port)
r2net_listen_sock = sock;
INIT_WORK(&r2net_listen_work, r2net_accept_many);
- sock->sk->sk_reuse = SK_CAN_REUSE;
+ sock->sk->sk_reuse = /* SK_CAN_REUSE FIXME FOR 3.4 */ 1;
ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin));
if (ret < 0) {
- printk(KERN_ERR "ramster: Error %d while binding socket at "
- "%pI4:%u\n", ret, &addr, ntohs(port));
+ pr_err("ramster: Error %d while binding socket at %pI4:%u\n",
+ ret, &addr, ntohs(port));
goto out;
}
ret = sock->ops->listen(sock, 64);
if (ret < 0)
- printk(KERN_ERR "ramster: Error %d while listening on %pI4:%u\n",
+ pr_err("ramster: Error %d while listening on %pI4:%u\n",
ret, &addr, ntohs(port));
out:
diff --git a/drivers/staging/ramster/cluster/tcp.h b/drivers/staging/ramster/ramster/tcp.h
index 9d05833452b5..9d05833452b5 100644
--- a/drivers/staging/ramster/cluster/tcp.h
+++ b/drivers/staging/ramster/ramster/tcp.h
diff --git a/drivers/staging/ramster/cluster/tcp_internal.h b/drivers/staging/ramster/ramster/tcp_internal.h
index 4d8cc9f96fd2..4d8cc9f96fd2 100644
--- a/drivers/staging/ramster/cluster/tcp_internal.h
+++ b/drivers/staging/ramster/ramster/tcp_internal.h
diff --git a/drivers/staging/ramster/tmem.c b/drivers/staging/ramster/tmem.c
index 8f2f6892d8d3..a2b7e03b6062 100644
--- a/drivers/staging/ramster/tmem.c
+++ b/drivers/staging/ramster/tmem.c
@@ -1,33 +1,43 @@
/*
* In-kernel transcendent memory (generic implementation)
*
- * Copyright (c) 2009-2011, Dan Magenheimer, Oracle Corp.
+ * Copyright (c) 2009-2012, Dan Magenheimer, Oracle Corp.
*
* The primary purpose of Transcedent Memory ("tmem") is to map object-oriented
* "handles" (triples containing a pool id, and object id, and an index), to
* pages in a page-accessible memory (PAM). Tmem references the PAM pages via
* an abstract "pampd" (PAM page-descriptor), which can be operated on by a
* set of functions (pamops). Each pampd contains some representation of
- * PAGE_SIZE bytes worth of data. Tmem must support potentially millions of
- * pages and must be able to insert, find, and delete these pages at a
- * potential frequency of thousands per second concurrently across many CPUs,
- * (and, if used with KVM, across many vcpus across many guests).
- * Tmem is tracked with a hierarchy of data structures, organized by
- * the elements in a handle-tuple: pool_id, object_id, and page index.
- * One or more "clients" (e.g. guests) each provide one or more tmem_pools.
- * Each pool, contains a hash table of rb_trees of tmem_objs. Each
- * tmem_obj contains a radix-tree-like tree of pointers, with intermediate
- * nodes called tmem_objnodes. Each leaf pointer in this tree points to
- * a pampd, which is accessible only through a small set of callbacks
- * registered by the PAM implementation (see tmem_register_pamops). Tmem
- * does all memory allocation via a set of callbacks registered by the tmem
- * host implementation (e.g. see tmem_register_hostops).
+ * PAGE_SIZE bytes worth of data. For those familiar with key-value stores,
+ * the tmem handle is a three-level hierarchical key, and the value is always
+ * reconstituted (but not necessarily stored) as PAGE_SIZE bytes and is
+ * referenced in the datastore by the pampd. The hierarchy is required
+ * to ensure that certain invalidation functions can be performed efficiently
+ * (i.e. flush all indexes associated with this object_id, or
+ * flush all objects associated with this pool).
+ *
+ * Tmem must support potentially millions of pages and must be able to insert,
+ * find, and delete these pages at a potential frequency of thousands per
+ * second concurrently across many CPUs, (and, if used with KVM, across many
+ * vcpus across many guests). Tmem is tracked with a hierarchy of data
+ * structures, organized by the elements in the handle-tuple: pool_id,
+ * object_id, and page index. One or more "clients" (e.g. guests) each
+ * provide one or more tmem_pools. Each pool, contains a hash table of
+ * rb_trees of tmem_objs. Each tmem_obj contains a radix-tree-like tree
+ * of pointers, with intermediate nodes called tmem_objnodes. Each leaf
+ * pointer in this tree points to a pampd, which is accessible only through
+ * a small set of callbacks registered by the PAM implementation (see
+ * tmem_register_pamops). Tmem only needs to memory allocation for objs
+ * and objnodes and this is done via a set of callbacks that must be
+ * registered by the tmem host implementation (e.g. see tmem_register_hostops).
*/
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/atomic.h>
+#ifdef CONFIG_RAMSTER
#include <linux/delay.h>
+#endif
#include "tmem.h"
@@ -52,7 +62,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
/*
* A tmem host implementation must use this function to register
- * callbacks for a page-accessible memory (PAM) implementation
+ * callbacks for a page-accessible memory (PAM) implementation.
*/
static struct tmem_pamops tmem_pamops;
@@ -67,42 +77,62 @@ void tmem_register_pamops(struct tmem_pamops *m)
* So an rb_tree is an ideal data structure to manage tmem_objs. But because
* of the potentially huge number of tmem_objs, each pool manages a hashtable
* of rb_trees to reduce search, insert, delete, and rebalancing time.
- * Each hashbucket also has a lock to manage concurrent access.
+ * Each hashbucket also has a lock to manage concurrent access and no
+ * searches, inserts, or deletions can be performed unless the lock is held.
+ * As a result, care must be taken to ensure tmem routines are not called
+ * recursively; the vast majority of the time, a recursive call may work
+ * but a deadlock will occur a small fraction of the time due to the
+ * hashbucket lock.
*
- * The following routines manage tmem_objs. When any tmem_obj is accessed,
- * the hashbucket lock must be held.
+ * The following routines manage tmem_objs. In all of these routines,
+ * the hashbucket lock is already held.
*/
-/* searches for object==oid in pool, returns locked object if found */
-static struct tmem_obj *tmem_obj_find(struct tmem_hashbucket *hb,
- struct tmem_oid *oidp)
+/* Search for object==oid in pool, returns object if found. */
+static struct tmem_obj *__tmem_obj_find(struct tmem_hashbucket *hb,
+ struct tmem_oid *oidp,
+ struct rb_node **parent,
+ struct rb_node ***link)
{
- struct rb_node *rbnode;
- struct tmem_obj *obj;
+ struct rb_node *_parent = NULL, **rbnode;
+ struct tmem_obj *obj = NULL;
- rbnode = hb->obj_rb_root.rb_node;
- while (rbnode) {
- BUG_ON(RB_EMPTY_NODE(rbnode));
- obj = rb_entry(rbnode, struct tmem_obj, rb_tree_node);
+ rbnode = &hb->obj_rb_root.rb_node;
+ while (*rbnode) {
+ BUG_ON(RB_EMPTY_NODE(*rbnode));
+ _parent = *rbnode;
+ obj = rb_entry(*rbnode, struct tmem_obj,
+ rb_tree_node);
switch (tmem_oid_compare(oidp, &obj->oid)) {
case 0: /* equal */
goto out;
case -1:
- rbnode = rbnode->rb_left;
+ rbnode = &(*rbnode)->rb_left;
break;
case 1:
- rbnode = rbnode->rb_right;
+ rbnode = &(*rbnode)->rb_right;
break;
}
}
+
+ if (parent)
+ *parent = _parent;
+ if (link)
+ *link = rbnode;
obj = NULL;
out:
return obj;
}
-static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *);
+static struct tmem_obj *tmem_obj_find(struct tmem_hashbucket *hb,
+ struct tmem_oid *oidp)
+{
+ return __tmem_obj_find(hb, oidp, NULL, NULL);
+}
-/* free an object that has no more pampds in it */
+static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *, bool);
+
+/* Free an object that has no more pampds in it. */
static void tmem_obj_free(struct tmem_obj *obj, struct tmem_hashbucket *hb)
{
struct tmem_pool *pool;
@@ -113,7 +143,7 @@ static void tmem_obj_free(struct tmem_obj *obj, struct tmem_hashbucket *hb)
pool = obj->pool;
BUG_ON(pool == NULL);
if (obj->objnode_tree_root != NULL) /* may be "stump" with no leaves */
- tmem_pampd_destroy_all_in_obj(obj);
+ tmem_pampd_destroy_all_in_obj(obj, false);
BUG_ON(obj->objnode_tree_root != NULL);
BUG_ON((long)obj->objnode_count != 0);
atomic_dec(&pool->obj_count);
@@ -125,15 +155,14 @@ static void tmem_obj_free(struct tmem_obj *obj, struct tmem_hashbucket *hb)
}
/*
- * initialize, and insert an tmem_object_root (called only if find failed)
+ * Initialize, and insert an tmem_object_root (called only if find failed).
*/
static void tmem_obj_init(struct tmem_obj *obj, struct tmem_hashbucket *hb,
struct tmem_pool *pool,
struct tmem_oid *oidp)
{
struct rb_root *root = &hb->obj_rb_root;
- struct rb_node **new = &(root->rb_node), *parent = NULL;
- struct tmem_obj *this;
+ struct rb_node **new = NULL, *parent = NULL;
BUG_ON(pool == NULL);
atomic_inc(&pool->obj_count);
@@ -143,24 +172,15 @@ static void tmem_obj_init(struct tmem_obj *obj, struct tmem_hashbucket *hb,
obj->oid = *oidp;
obj->objnode_count = 0;
obj->pampd_count = 0;
- (*tmem_pamops.new_obj)(obj);
+#ifdef CONFIG_RAMSTER
+ if (tmem_pamops.new_obj != NULL)
+ (*tmem_pamops.new_obj)(obj);
+#endif
SET_SENTINEL(obj, OBJ);
- while (*new) {
- BUG_ON(RB_EMPTY_NODE(*new));
- this = rb_entry(*new, struct tmem_obj, rb_tree_node);
- parent = *new;
- switch (tmem_oid_compare(oidp, &this->oid)) {
- case 0:
- BUG(); /* already present; should never happen! */
- break;
- case -1:
- new = &(*new)->rb_left;
- break;
- case 1:
- new = &(*new)->rb_right;
- break;
- }
- }
+
+ if (__tmem_obj_find(hb, oidp, &parent, &new))
+ BUG();
+
rb_link_node(&obj->rb_tree_node, parent, new);
rb_insert_color(&obj->rb_tree_node, root);
}
@@ -170,7 +190,7 @@ static void tmem_obj_init(struct tmem_obj *obj, struct tmem_hashbucket *hb,
* "ephemeral" vs "persistent". These attributes apply to all tmem_objs
* and all pampds that belong to a tmem_pool. A tmem_pool is created
* or deleted relatively rarely (for example, when a filesystem is
- * mounted or unmounted.
+ * mounted or unmounted).
*/
/* flush all data from a pool and, optionally, free it */
@@ -188,7 +208,7 @@ static void tmem_pool_flush(struct tmem_pool *pool, bool destroy)
while (rbnode != NULL) {
obj = rb_entry(rbnode, struct tmem_obj, rb_tree_node);
rbnode = rb_next(rbnode);
- tmem_pampd_destroy_all_in_obj(obj);
+ tmem_pampd_destroy_all_in_obj(obj, true);
tmem_obj_free(obj, hb);
(*tmem_hostops.obj_free)(obj, pool);
}
@@ -274,7 +294,7 @@ static void tmem_objnode_free(struct tmem_objnode *objnode)
}
/*
- * lookup index in object and return associated pampd (or NULL if not found)
+ * Lookup index in object and return associated pampd (or NULL if not found).
*/
static void **__tmem_pampd_lookup_in_obj(struct tmem_obj *obj, uint32_t index)
{
@@ -316,6 +336,7 @@ static void *tmem_pampd_lookup_in_obj(struct tmem_obj *obj, uint32_t index)
return slot != NULL ? *slot : NULL;
}
+#ifdef CONFIG_RAMSTER
static void *tmem_pampd_replace_in_obj(struct tmem_obj *obj, uint32_t index,
void *new_pampd, bool no_free)
{
@@ -333,6 +354,7 @@ static void *tmem_pampd_replace_in_obj(struct tmem_obj *obj, uint32_t index,
}
return ret;
}
+#endif
static int tmem_pampd_add_to_obj(struct tmem_obj *obj, uint32_t index,
void *pampd)
@@ -470,7 +492,7 @@ out:
return slot;
}
-/* recursively walk the objnode_tree destroying pampds and objnodes */
+/* Recursively walk the objnode_tree destroying pampds and objnodes. */
static void tmem_objnode_node_destroy(struct tmem_obj *obj,
struct tmem_objnode *objnode,
unsigned int ht)
@@ -495,7 +517,8 @@ static void tmem_objnode_node_destroy(struct tmem_obj *obj,
}
}
-static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *obj)
+static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *obj,
+ bool pool_destroy)
{
if (obj->objnode_tree_root == NULL)
return;
@@ -510,7 +533,10 @@ static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *obj)
obj->objnode_tree_height = 0;
}
obj->objnode_tree_root = NULL;
- (*tmem_pamops.free_obj)(obj->pool, obj);
+#ifdef CONFIG_RAMSTER
+ if (tmem_pamops.free_obj != NULL)
+ (*tmem_pamops.free_obj)(obj->pool, obj, pool_destroy);
+#endif
}
/*
@@ -523,17 +549,16 @@ static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *obj)
*/
/*
- * "Put" a page, e.g. copy a page from the kernel into newly allocated
- * PAM space (if such space is available). Tmem_put is complicated by
- * a corner case: What if a page with matching handle already exists in
- * tmem? To guarantee coherency, one of two actions is necessary: Either
- * the data for the page must be overwritten, or the page must be
- * "flushed" so that the data is not accessible to a subsequent "get".
- * Since these "duplicate puts" are relatively rare, this implementation
- * always flushes for simplicity.
+ * "Put" a page, e.g. associate the passed pampd with the passed handle.
+ * Tmem_put is complicated by a corner case: What if a page with matching
+ * handle already exists in tmem? To guarantee coherency, one of two
+ * actions is necessary: Either the data for the page must be overwritten,
+ * or the page must be "flushed" so that the data is not accessible to a
+ * subsequent "get". Since these "duplicate puts" are relatively rare,
+ * this implementation always flushes for simplicity.
*/
int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
- char *data, size_t size, bool raw, int ephemeral)
+ bool raw, void *pampd_to_use)
{
struct tmem_obj *obj = NULL, *objfound = NULL, *objnew = NULL;
void *pampd = NULL, *pampd_del = NULL;
@@ -566,19 +591,17 @@ int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
}
BUG_ON(obj == NULL);
BUG_ON(((objnew != obj) && (objfound != obj)) || (objnew == objfound));
- pampd = (*tmem_pamops.create)(data, size, raw, ephemeral,
- obj->pool, &obj->oid, index);
- if (unlikely(pampd == NULL))
- goto free;
+ pampd = pampd_to_use;
+ BUG_ON(pampd_to_use == NULL);
ret = tmem_pampd_add_to_obj(obj, index, pampd);
if (unlikely(ret == -ENOMEM))
/* may have partially built objnode tree ("stump") */
goto delete_and_free;
+ (*tmem_pamops.create_finish)(pampd, is_ephemeral(pool));
goto out;
delete_and_free:
(void)tmem_pampd_delete_from_obj(obj, index);
-free:
if (pampd)
(*tmem_pamops.free)(pampd, pool, NULL, 0, true);
if (objnew) {
@@ -590,6 +613,16 @@ out:
return ret;
}
+#ifdef CONFIG_RAMSTER
+/*
+ * For ramster only: The following routines provide a two-step sequence
+ * to allow the caller to replace a pampd in the tmem data structures with
+ * another pampd. Here, we lookup the passed handle and, if found, return the
+ * associated pampd and object, leaving the hashbucket locked and returning
+ * a reference to it. The caller is expected to immediately call the
+ * matching tmem_localify_finish routine which will handles the replacement
+ * and unlocks the hashbucket.
+ */
void *tmem_localify_get_pampd(struct tmem_pool *pool, struct tmem_oid *oidp,
uint32_t index, struct tmem_obj **ret_obj,
void **saved_hb)
@@ -618,6 +651,7 @@ void tmem_localify_finish(struct tmem_obj *obj, uint32_t index,
if (pampd != NULL) {
BUG_ON(obj == NULL);
(void)tmem_pampd_replace_in_obj(obj, index, pampd, 1);
+ (*tmem_pamops.create_finish)(pampd, is_ephemeral(obj->pool));
} else if (delete) {
BUG_ON(obj == NULL);
(void)tmem_pampd_delete_from_obj(obj, index);
@@ -625,6 +659,9 @@ void tmem_localify_finish(struct tmem_obj *obj, uint32_t index,
spin_unlock(&hb->lock);
}
+/*
+ * For ramster only. Helper function to support asynchronous tmem_get.
+ */
static int tmem_repatriate(void **ppampd, struct tmem_hashbucket *hb,
struct tmem_pool *pool, struct tmem_oid *oidp,
uint32_t index, bool free, char *data)
@@ -633,7 +670,6 @@ static int tmem_repatriate(void **ppampd, struct tmem_hashbucket *hb,
bool intransit = false;
int ret = 0;
-
if (!is_ephemeral(pool))
new_pampd = (*tmem_pamops.repatriate_preload)(
old_pampd, pool, oidp, index, &intransit);
@@ -646,60 +682,91 @@ static int tmem_repatriate(void **ppampd, struct tmem_hashbucket *hb,
if (!intransit)
ret = (*tmem_pamops.repatriate)(old_pampd, new_pampd, pool,
oidp, index, free, data);
+ if (ret == -EAGAIN) {
+ /* rare I think, but should cond_resched()??? */
+ usleep_range(10, 1000);
+ } else if (ret == -ENOTCONN || ret == -EHOSTDOWN) {
+ ret = -1;
+ } else if (ret != 0 && ret != -ENOENT) {
+ ret = -1;
+ }
+ /* note hb->lock has now been unlocked */
+ return ret;
+}
+
+/*
+ * For ramster only. If a page in tmem matches the handle, replace the
+ * page so that any subsequent "get" gets the new page. Returns 0 if
+ * there was a page to replace, else returns -1.
+ */
+int tmem_replace(struct tmem_pool *pool, struct tmem_oid *oidp,
+ uint32_t index, void *new_pampd)
+{
+ struct tmem_obj *obj;
+ int ret = -1;
+ struct tmem_hashbucket *hb;
+
+ hb = &pool->hashbucket[tmem_oid_hash(oidp)];
+ spin_lock(&hb->lock);
+ obj = tmem_obj_find(hb, oidp);
+ if (obj == NULL)
+ goto out;
+ new_pampd = tmem_pampd_replace_in_obj(obj, index, new_pampd, 0);
+ /* if we bug here, pamops wasn't properly set up for ramster */
+ BUG_ON(tmem_pamops.replace_in_obj == NULL);
+ ret = (*tmem_pamops.replace_in_obj)(new_pampd, obj);
+out:
+ spin_unlock(&hb->lock);
return ret;
}
+#endif
/*
- * "Get" a page, e.g. if one can be found, copy the tmem page with the
- * matching handle from PAM space to the kernel. By tmem definition,
- * when a "get" is successful on an ephemeral page, the page is "flushed",
- * and when a "get" is successful on a persistent page, the page is retained
- * in tmem. Note that to preserve
+ * "Get" a page, e.g. if a pampd can be found matching the passed handle,
+ * use a pamops callback to recreated the page from the pampd with the
+ * matching handle. By tmem definition, when a "get" is successful on
+ * an ephemeral page, the page is "flushed", and when a "get" is successful
+ * on a persistent page, the page is retained in tmem. Note that to preserve
* coherency, "get" can never be skipped if tmem contains the data.
* That is, if a get is done with a certain handle and fails, any
* subsequent "get" must also fail (unless of course there is a
* "put" done with the same handle).
-
*/
int tmem_get(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
- char *data, size_t *size, bool raw, int get_and_free)
+ char *data, size_t *sizep, bool raw, int get_and_free)
{
struct tmem_obj *obj;
- void *pampd;
+ void *pampd = NULL;
bool ephemeral = is_ephemeral(pool);
int ret = -1;
struct tmem_hashbucket *hb;
bool free = (get_and_free == 1) || ((get_and_free == 0) && ephemeral);
- bool lock_held = 0;
+ bool lock_held = false;
void **ppampd;
-again:
- hb = &pool->hashbucket[tmem_oid_hash(oidp)];
- spin_lock(&hb->lock);
- lock_held = 1;
- obj = tmem_obj_find(hb, oidp);
- if (obj == NULL)
- goto out;
- ppampd = __tmem_pampd_lookup_in_obj(obj, index);
- if (ppampd == NULL)
- goto out;
- if (tmem_pamops.is_remote(*ppampd)) {
- ret = tmem_repatriate(ppampd, hb, pool, oidp,
- index, free, data);
- lock_held = 0; /* note hb->lock has been unlocked */
- if (ret == -EAGAIN) {
- /* rare I think, but should cond_resched()??? */
- usleep_range(10, 1000);
- goto again;
- } else if (ret != 0) {
- if (ret != -ENOENT)
- pr_err("UNTESTED case in tmem_get, ret=%d\n",
- ret);
- ret = -1;
+ do {
+ hb = &pool->hashbucket[tmem_oid_hash(oidp)];
+ spin_lock(&hb->lock);
+ lock_held = true;
+ obj = tmem_obj_find(hb, oidp);
+ if (obj == NULL)
+ goto out;
+ ppampd = __tmem_pampd_lookup_in_obj(obj, index);
+ if (ppampd == NULL)
goto out;
+#ifdef CONFIG_RAMSTER
+ if ((tmem_pamops.is_remote != NULL) &&
+ tmem_pamops.is_remote(*ppampd)) {
+ ret = tmem_repatriate(ppampd, hb, pool, oidp,
+ index, free, data);
+ /* tmem_repatriate releases hb->lock */
+ lock_held = false;
+ *sizep = PAGE_SIZE;
+ if (ret != -EAGAIN)
+ goto out;
}
- goto out;
- }
+#endif
+ } while (ret == -EAGAIN);
if (free)
pampd = tmem_pampd_delete_from_obj(obj, index);
else
@@ -715,10 +782,10 @@ again:
}
if (free)
ret = (*tmem_pamops.get_data_and_free)(
- data, size, raw, pampd, pool, oidp, index);
+ data, sizep, raw, pampd, pool, oidp, index);
else
ret = (*tmem_pamops.get_data)(
- data, size, raw, pampd, pool, oidp, index);
+ data, sizep, raw, pampd, pool, oidp, index);
if (ret < 0)
goto out;
ret = 0;
@@ -762,30 +829,6 @@ out:
}
/*
- * If a page in tmem matches the handle, replace the page so that any
- * subsequent "get" gets the new page. Returns the new page if
- * there was a page to replace, else returns NULL.
- */
-int tmem_replace(struct tmem_pool *pool, struct tmem_oid *oidp,
- uint32_t index, void *new_pampd)
-{
- struct tmem_obj *obj;
- int ret = -1;
- struct tmem_hashbucket *hb;
-
- hb = &pool->hashbucket[tmem_oid_hash(oidp)];
- spin_lock(&hb->lock);
- obj = tmem_obj_find(hb, oidp);
- if (obj == NULL)
- goto out;
- new_pampd = tmem_pampd_replace_in_obj(obj, index, new_pampd, 0);
- ret = (*tmem_pamops.replace_in_obj)(new_pampd, obj);
-out:
- spin_unlock(&hb->lock);
- return ret;
-}
-
-/*
* "Flush" all pages in tmem matching this oid.
*/
int tmem_flush_object(struct tmem_pool *pool, struct tmem_oid *oidp)
@@ -799,7 +842,7 @@ int tmem_flush_object(struct tmem_pool *pool, struct tmem_oid *oidp)
obj = tmem_obj_find(hb, oidp);
if (obj == NULL)
goto out;
- tmem_pampd_destroy_all_in_obj(obj);
+ tmem_pampd_destroy_all_in_obj(obj, false);
tmem_obj_free(obj, hb);
(*tmem_hostops.obj_free)(obj, pool);
ret = 0;
diff --git a/drivers/staging/ramster/tmem.h b/drivers/staging/ramster/tmem.h
index 47f1918c8314..adbe5a8f28aa 100644
--- a/drivers/staging/ramster/tmem.h
+++ b/drivers/staging/ramster/tmem.h
@@ -3,23 +3,20 @@
*
* Transcendent memory
*
- * Copyright (c) 2009-2011, Dan Magenheimer, Oracle Corp.
+ * Copyright (c) 2009-2012, Dan Magenheimer, Oracle Corp.
*/
#ifndef _TMEM_H_
#define _TMEM_H_
+#include <linux/types.h>
#include <linux/highmem.h>
#include <linux/hash.h>
#include <linux/atomic.h>
/*
- * These are pre-defined by the Xen<->Linux ABI
+ * These are defined by the Xen<->Linux ABI so should remain consistent
*/
-#define TMEM_PUT_PAGE 4
-#define TMEM_GET_PAGE 5
-#define TMEM_FLUSH_PAGE 6
-#define TMEM_FLUSH_OBJECT 7
#define TMEM_POOL_PERSIST 1
#define TMEM_POOL_SHARED 2
#define TMEM_POOL_PRECOMPRESSED 4
@@ -31,7 +28,7 @@
* sentinels have proven very useful for debugging but can be removed
* or disabled before final merge.
*/
-#define SENTINELS
+#undef SENTINELS
#ifdef SENTINELS
#define DECL_SENTINEL uint32_t sentinel;
#define SET_SENTINEL(_x, _y) (_x->sentinel = _y##_SENTINEL)
@@ -46,7 +43,7 @@
#define ASSERT_INVERTED_SENTINEL(_x, _y) do { } while (0)
#endif
-#define ASSERT_SPINLOCK(_l) WARN_ON(!spin_is_locked(_l))
+#define ASSERT_SPINLOCK(_l) lockdep_assert_held(_l)
/*
* A pool is the highest-level data structure managed by tmem and
@@ -88,31 +85,6 @@ struct tmem_oid {
uint64_t oid[3];
};
-struct tmem_xhandle {
- uint8_t client_id;
- uint8_t xh_data_cksum;
- uint16_t xh_data_size;
- uint16_t pool_id;
- struct tmem_oid oid;
- uint32_t index;
- void *extra;
-};
-
-static inline struct tmem_xhandle tmem_xhandle_fill(uint16_t client_id,
- struct tmem_pool *pool,
- struct tmem_oid *oidp,
- uint32_t index)
-{
- struct tmem_xhandle xh;
- xh.client_id = client_id;
- xh.xh_data_cksum = (uint8_t)-1;
- xh.xh_data_size = (uint16_t)-1;
- xh.pool_id = pool->pool_id;
- xh.oid = *oidp;
- xh.index = index;
- return xh;
-}
-
static inline void tmem_oid_set_invalid(struct tmem_oid *oidp)
{
oidp->oid[0] = oidp->oid[1] = oidp->oid[2] = -1UL;
@@ -154,6 +126,34 @@ static inline unsigned tmem_oid_hash(struct tmem_oid *oidp)
TMEM_HASH_BUCKET_BITS);
}
+#ifdef CONFIG_RAMSTER
+struct tmem_xhandle {
+ uint8_t client_id;
+ uint8_t xh_data_cksum;
+ uint16_t xh_data_size;
+ uint16_t pool_id;
+ struct tmem_oid oid;
+ uint32_t index;
+ void *extra;
+};
+
+static inline struct tmem_xhandle tmem_xhandle_fill(uint16_t client_id,
+ struct tmem_pool *pool,
+ struct tmem_oid *oidp,
+ uint32_t index)
+{
+ struct tmem_xhandle xh;
+ xh.client_id = client_id;
+ xh.xh_data_cksum = (uint8_t)-1;
+ xh.xh_data_size = (uint16_t)-1;
+ xh.pool_id = pool->pool_id;
+ xh.oid = *oidp;
+ xh.index = index;
+ return xh;
+}
+#endif
+
+
/*
* A tmem_obj contains an identifier (oid), pointers to the parent
* pool and the rb_tree to which it belongs, counters, and an ordered
@@ -171,11 +171,15 @@ struct tmem_obj {
unsigned int objnode_tree_height;
unsigned long objnode_count;
long pampd_count;
- /* for current design of ramster, all pages belonging to
+#ifdef CONFIG_RAMSTER
+ /*
+ * for current design of ramster, all pages belonging to
* an object reside on the same remotenode and extra is
* used to record the number of the remotenode so a
- * flush-object operation can specify it */
- void *extra; /* for use by pampd implementation */
+ * flush-object operation can specify it
+ */
+ void *extra; /* for private use by pampd implementation */
+#endif
DECL_SENTINEL
};
@@ -193,10 +197,17 @@ struct tmem_objnode {
unsigned int slots_in_use;
};
+struct tmem_handle {
+ struct tmem_oid oid; /* 24 bytes */
+ uint32_t index;
+ uint16_t pool_id;
+ uint16_t client_id;
+};
+
+
/* pampd abstract datatype methods provided by the PAM implementation */
struct tmem_pamops {
- void *(*create)(char *, size_t, bool, int,
- struct tmem_pool *, struct tmem_oid *, uint32_t);
+ void (*create_finish)(void *, bool);
int (*get_data)(char *, size_t *, bool, void *, struct tmem_pool *,
struct tmem_oid *, uint32_t);
int (*get_data_and_free)(char *, size_t *, bool, void *,
@@ -204,14 +215,16 @@ struct tmem_pamops {
uint32_t);
void (*free)(void *, struct tmem_pool *,
struct tmem_oid *, uint32_t, bool);
- void (*free_obj)(struct tmem_pool *, struct tmem_obj *);
- bool (*is_remote)(void *);
+#ifdef CONFIG_RAMSTER
+ void (*new_obj)(struct tmem_obj *);
+ void (*free_obj)(struct tmem_pool *, struct tmem_obj *, bool);
void *(*repatriate_preload)(void *, struct tmem_pool *,
struct tmem_oid *, uint32_t, bool *);
int (*repatriate)(void *, void *, struct tmem_pool *,
struct tmem_oid *, uint32_t, bool, void *);
- void (*new_obj)(struct tmem_obj *);
+ bool (*is_remote)(void *);
int (*replace_in_obj)(void *, struct tmem_obj *);
+#endif
};
extern void tmem_register_pamops(struct tmem_pamops *m);
@@ -226,9 +239,15 @@ extern void tmem_register_hostops(struct tmem_hostops *m);
/* core tmem accessor functions */
extern int tmem_put(struct tmem_pool *, struct tmem_oid *, uint32_t index,
- char *, size_t, bool, int);
+ bool, void *);
extern int tmem_get(struct tmem_pool *, struct tmem_oid *, uint32_t index,
char *, size_t *, bool, int);
+extern int tmem_flush_page(struct tmem_pool *, struct tmem_oid *,
+ uint32_t index);
+extern int tmem_flush_object(struct tmem_pool *, struct tmem_oid *);
+extern int tmem_destroy_pool(struct tmem_pool *);
+extern void tmem_new_pool(struct tmem_pool *, uint32_t);
+#ifdef CONFIG_RAMSTER
extern int tmem_replace(struct tmem_pool *, struct tmem_oid *, uint32_t index,
void *);
extern void *tmem_localify_get_pampd(struct tmem_pool *, struct tmem_oid *,
@@ -236,9 +255,5 @@ extern void *tmem_localify_get_pampd(struct tmem_pool *, struct tmem_oid *,
void **);
extern void tmem_localify_finish(struct tmem_obj *, uint32_t index,
void *, void *, bool);
-extern int tmem_flush_page(struct tmem_pool *, struct tmem_oid *,
- uint32_t index);
-extern int tmem_flush_object(struct tmem_pool *, struct tmem_oid *);
-extern int tmem_destroy_pool(struct tmem_pool *);
-extern void tmem_new_pool(struct tmem_pool *, uint32_t);
+#endif
#endif /* _TMEM_H */
diff --git a/drivers/staging/ramster/xvmalloc.c b/drivers/staging/ramster/xvmalloc.c
deleted file mode 100644
index 44ceb0b823a9..000000000000
--- a/drivers/staging/ramster/xvmalloc.c
+++ /dev/null
@@ -1,509 +0,0 @@
-/*
- * xvmalloc memory allocator
- *
- * Copyright (C) 2008, 2009, 2010 Nitin Gupta
- *
- * This code is released using a dual license strategy: BSD/GPL
- * You can choose the licence that better fits your requirements.
- *
- * Released under the terms of 3-clause BSD License
- * Released under the terms of GNU General Public License Version 2.0
- */
-
-#ifdef CONFIG_ZRAM_DEBUG
-#define DEBUG
-#endif
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/bitops.h>
-#include <linux/errno.h>
-#include <linux/highmem.h>
-#include <linux/init.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-
-#include "xvmalloc.h"
-#include "xvmalloc_int.h"
-
-static void stat_inc(u64 *value)
-{
- *value = *value + 1;
-}
-
-static void stat_dec(u64 *value)
-{
- *value = *value - 1;
-}
-
-static int test_flag(struct block_header *block, enum blockflags flag)
-{
- return block->prev & BIT(flag);
-}
-
-static void set_flag(struct block_header *block, enum blockflags flag)
-{
- block->prev |= BIT(flag);
-}
-
-static void clear_flag(struct block_header *block, enum blockflags flag)
-{
- block->prev &= ~BIT(flag);
-}
-
-/*
- * Given <page, offset> pair, provide a dereferencable pointer.
- * This is called from xv_malloc/xv_free path, so it
- * needs to be fast.
- */
-static void *get_ptr_atomic(struct page *page, u16 offset)
-{
- unsigned char *base;
-
- base = kmap_atomic(page);
- return base + offset;
-}
-
-static void put_ptr_atomic(void *ptr)
-{
- kunmap_atomic(ptr);
-}
-
-static u32 get_blockprev(struct block_header *block)
-{
- return block->prev & PREV_MASK;
-}
-
-static void set_blockprev(struct block_header *block, u16 new_offset)
-{
- block->prev = new_offset | (block->prev & FLAGS_MASK);
-}
-
-static struct block_header *BLOCK_NEXT(struct block_header *block)
-{
- return (struct block_header *)
- ((char *)block + block->size + XV_ALIGN);
-}
-
-/*
- * Get index of free list containing blocks of maximum size
- * which is less than or equal to given size.
- */
-static u32 get_index_for_insert(u32 size)
-{
- if (unlikely(size > XV_MAX_ALLOC_SIZE))
- size = XV_MAX_ALLOC_SIZE;
- size &= ~FL_DELTA_MASK;
- return (size - XV_MIN_ALLOC_SIZE) >> FL_DELTA_SHIFT;
-}
-
-/*
- * Get index of free list having blocks of size greater than
- * or equal to requested size.
- */
-static u32 get_index(u32 size)
-{
- if (unlikely(size < XV_MIN_ALLOC_SIZE))
- size = XV_MIN_ALLOC_SIZE;
- size = ALIGN(size, FL_DELTA);
- return (size - XV_MIN_ALLOC_SIZE) >> FL_DELTA_SHIFT;
-}
-
-/**
- * find_block - find block of at least given size
- * @pool: memory pool to search from
- * @size: size of block required
- * @page: page containing required block
- * @offset: offset within the page where block is located.
- *
- * Searches two level bitmap to locate block of at least
- * the given size. If such a block is found, it provides
- * <page, offset> to identify this block and returns index
- * in freelist where we found this block.
- * Otherwise, returns 0 and <page, offset> params are not touched.
- */
-static u32 find_block(struct xv_pool *pool, u32 size,
- struct page **page, u32 *offset)
-{
- ulong flbitmap, slbitmap;
- u32 flindex, slindex, slbitstart;
-
- /* There are no free blocks in this pool */
- if (!pool->flbitmap)
- return 0;
-
- /* Get freelist index corresponding to this size */
- slindex = get_index(size);
- slbitmap = pool->slbitmap[slindex / BITS_PER_LONG];
- slbitstart = slindex % BITS_PER_LONG;
-
- /*
- * If freelist is not empty at this index, we found the
- * block - head of this list. This is approximate best-fit match.
- */
- if (test_bit(slbitstart, &slbitmap)) {
- *page = pool->freelist[slindex].page;
- *offset = pool->freelist[slindex].offset;
- return slindex;
- }
-
- /*
- * No best-fit found. Search a bit further in bitmap for a free block.
- * Second level bitmap consists of series of 32-bit chunks. Search
- * further in the chunk where we expected a best-fit, starting from
- * index location found above.
- */
- slbitstart++;
- slbitmap >>= slbitstart;
-
- /* Skip this search if we were already at end of this bitmap chunk */
- if ((slbitstart != BITS_PER_LONG) && slbitmap) {
- slindex += __ffs(slbitmap) + 1;
- *page = pool->freelist[slindex].page;
- *offset = pool->freelist[slindex].offset;
- return slindex;
- }
-
- /* Now do a full two-level bitmap search to find next nearest fit */
- flindex = slindex / BITS_PER_LONG;
-
- flbitmap = (pool->flbitmap) >> (flindex + 1);
- if (!flbitmap)
- return 0;
-
- flindex += __ffs(flbitmap) + 1;
- slbitmap = pool->slbitmap[flindex];
- slindex = (flindex * BITS_PER_LONG) + __ffs(slbitmap);
- *page = pool->freelist[slindex].page;
- *offset = pool->freelist[slindex].offset;
-
- return slindex;
-}
-
-/*
- * Insert block at <page, offset> in freelist of given pool.
- * freelist used depends on block size.
- */
-static void insert_block(struct xv_pool *pool, struct page *page, u32 offset,
- struct block_header *block)
-{
- u32 flindex, slindex;
- struct block_header *nextblock;
-
- slindex = get_index_for_insert(block->size);
- flindex = slindex / BITS_PER_LONG;
-
- block->link.prev_page = NULL;
- block->link.prev_offset = 0;
- block->link.next_page = pool->freelist[slindex].page;
- block->link.next_offset = pool->freelist[slindex].offset;
- pool->freelist[slindex].page = page;
- pool->freelist[slindex].offset = offset;
-
- if (block->link.next_page) {
- nextblock = get_ptr_atomic(block->link.next_page,
- block->link.next_offset);
- nextblock->link.prev_page = page;
- nextblock->link.prev_offset = offset;
- put_ptr_atomic(nextblock);
- /* If there was a next page then the free bits are set. */
- return;
- }
-
- __set_bit(slindex % BITS_PER_LONG, &pool->slbitmap[flindex]);
- __set_bit(flindex, &pool->flbitmap);
-}
-
-/*
- * Remove block from freelist. Index 'slindex' identifies the freelist.
- */
-static void remove_block(struct xv_pool *pool, struct page *page, u32 offset,
- struct block_header *block, u32 slindex)
-{
- u32 flindex = slindex / BITS_PER_LONG;
- struct block_header *tmpblock;
-
- if (block->link.prev_page) {
- tmpblock = get_ptr_atomic(block->link.prev_page,
- block->link.prev_offset);
- tmpblock->link.next_page = block->link.next_page;
- tmpblock->link.next_offset = block->link.next_offset;
- put_ptr_atomic(tmpblock);
- }
-
- if (block->link.next_page) {
- tmpblock = get_ptr_atomic(block->link.next_page,
- block->link.next_offset);
- tmpblock->link.prev_page = block->link.prev_page;
- tmpblock->link.prev_offset = block->link.prev_offset;
- put_ptr_atomic(tmpblock);
- }
-
- /* Is this block is at the head of the freelist? */
- if (pool->freelist[slindex].page == page
- && pool->freelist[slindex].offset == offset) {
-
- pool->freelist[slindex].page = block->link.next_page;
- pool->freelist[slindex].offset = block->link.next_offset;
-
- if (pool->freelist[slindex].page) {
- struct block_header *tmpblock;
- tmpblock = get_ptr_atomic(pool->freelist[slindex].page,
- pool->freelist[slindex].offset);
- tmpblock->link.prev_page = NULL;
- tmpblock->link.prev_offset = 0;
- put_ptr_atomic(tmpblock);
- } else {
- /* This freelist bucket is empty */
- __clear_bit(slindex % BITS_PER_LONG,
- &pool->slbitmap[flindex]);
- if (!pool->slbitmap[flindex])
- __clear_bit(flindex, &pool->flbitmap);
- }
- }
-
- block->link.prev_page = NULL;
- block->link.prev_offset = 0;
- block->link.next_page = NULL;
- block->link.next_offset = 0;
-}
-
-/*
- * Allocate a page and add it to freelist of given pool.
- */
-static int grow_pool(struct xv_pool *pool, gfp_t flags)
-{
- struct page *page;
- struct block_header *block;
-
- page = alloc_page(flags);
- if (unlikely(!page))
- return -ENOMEM;
-
- stat_inc(&pool->total_pages);
-
- spin_lock(&pool->lock);
- block = get_ptr_atomic(page, 0);
-
- block->size = PAGE_SIZE - XV_ALIGN;
- set_flag(block, BLOCK_FREE);
- clear_flag(block, PREV_FREE);
- set_blockprev(block, 0);
-
- insert_block(pool, page, 0, block);
-
- put_ptr_atomic(block);
- spin_unlock(&pool->lock);
-
- return 0;
-}
-
-/*
- * Create a memory pool. Allocates freelist, bitmaps and other
- * per-pool metadata.
- */
-struct xv_pool *xv_create_pool(void)
-{
- u32 ovhd_size;
- struct xv_pool *pool;
-
- ovhd_size = roundup(sizeof(*pool), PAGE_SIZE);
- pool = kzalloc(ovhd_size, GFP_KERNEL);
- if (!pool)
- return NULL;
-
- spin_lock_init(&pool->lock);
-
- return pool;
-}
-EXPORT_SYMBOL_GPL(xv_create_pool);
-
-void xv_destroy_pool(struct xv_pool *pool)
-{
- kfree(pool);
-}
-EXPORT_SYMBOL_GPL(xv_destroy_pool);
-
-/**
- * xv_malloc - Allocate block of given size from pool.
- * @pool: pool to allocate from
- * @size: size of block to allocate
- * @page: page no. that holds the object
- * @offset: location of object within page
- *
- * On success, <page, offset> identifies block allocated
- * and 0 is returned. On failure, <page, offset> is set to
- * 0 and -ENOMEM is returned.
- *
- * Allocation requests with size > XV_MAX_ALLOC_SIZE will fail.
- */
-int xv_malloc(struct xv_pool *pool, u32 size, struct page **page,
- u32 *offset, gfp_t flags)
-{
- int error;
- u32 index, tmpsize, origsize, tmpoffset;
- struct block_header *block, *tmpblock;
-
- *page = NULL;
- *offset = 0;
- origsize = size;
-
- if (unlikely(!size || size > XV_MAX_ALLOC_SIZE))
- return -ENOMEM;
-
- size = ALIGN(size, XV_ALIGN);
-
- spin_lock(&pool->lock);
-
- index = find_block(pool, size, page, offset);
-
- if (!*page) {
- spin_unlock(&pool->lock);
- if (flags & GFP_NOWAIT)
- return -ENOMEM;
- error = grow_pool(pool, flags);
- if (unlikely(error))
- return error;
-
- spin_lock(&pool->lock);
- index = find_block(pool, size, page, offset);
- }
-
- if (!*page) {
- spin_unlock(&pool->lock);
- return -ENOMEM;
- }
-
- block = get_ptr_atomic(*page, *offset);
-
- remove_block(pool, *page, *offset, block, index);
-
- /* Split the block if required */
- tmpoffset = *offset + size + XV_ALIGN;
- tmpsize = block->size - size;
- tmpblock = (struct block_header *)((char *)block + size + XV_ALIGN);
- if (tmpsize) {
- tmpblock->size = tmpsize - XV_ALIGN;
- set_flag(tmpblock, BLOCK_FREE);
- clear_flag(tmpblock, PREV_FREE);
-
- set_blockprev(tmpblock, *offset);
- if (tmpblock->size >= XV_MIN_ALLOC_SIZE)
- insert_block(pool, *page, tmpoffset, tmpblock);
-
- if (tmpoffset + XV_ALIGN + tmpblock->size != PAGE_SIZE) {
- tmpblock = BLOCK_NEXT(tmpblock);
- set_blockprev(tmpblock, tmpoffset);
- }
- } else {
- /* This block is exact fit */
- if (tmpoffset != PAGE_SIZE)
- clear_flag(tmpblock, PREV_FREE);
- }
-
- block->size = origsize;
- clear_flag(block, BLOCK_FREE);
-
- put_ptr_atomic(block);
- spin_unlock(&pool->lock);
-
- *offset += XV_ALIGN;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(xv_malloc);
-
-/*
- * Free block identified with <page, offset>
- */
-void xv_free(struct xv_pool *pool, struct page *page, u32 offset)
-{
- void *page_start;
- struct block_header *block, *tmpblock;
-
- offset -= XV_ALIGN;
-
- spin_lock(&pool->lock);
-
- page_start = get_ptr_atomic(page, 0);
- block = (struct block_header *)((char *)page_start + offset);
-
- /* Catch double free bugs */
- BUG_ON(test_flag(block, BLOCK_FREE));
-
- block->size = ALIGN(block->size, XV_ALIGN);
-
- tmpblock = BLOCK_NEXT(block);
- if (offset + block->size + XV_ALIGN == PAGE_SIZE)
- tmpblock = NULL;
-
- /* Merge next block if its free */
- if (tmpblock && test_flag(tmpblock, BLOCK_FREE)) {
- /*
- * Blocks smaller than XV_MIN_ALLOC_SIZE
- * are not inserted in any free list.
- */
- if (tmpblock->size >= XV_MIN_ALLOC_SIZE) {
- remove_block(pool, page,
- offset + block->size + XV_ALIGN, tmpblock,
- get_index_for_insert(tmpblock->size));
- }
- block->size += tmpblock->size + XV_ALIGN;
- }
-
- /* Merge previous block if its free */
- if (test_flag(block, PREV_FREE)) {
- tmpblock = (struct block_header *)((char *)(page_start) +
- get_blockprev(block));
- offset = offset - tmpblock->size - XV_ALIGN;
-
- if (tmpblock->size >= XV_MIN_ALLOC_SIZE)
- remove_block(pool, page, offset, tmpblock,
- get_index_for_insert(tmpblock->size));
-
- tmpblock->size += block->size + XV_ALIGN;
- block = tmpblock;
- }
-
- /* No used objects in this page. Free it. */
- if (block->size == PAGE_SIZE - XV_ALIGN) {
- put_ptr_atomic(page_start);
- spin_unlock(&pool->lock);
-
- __free_page(page);
- stat_dec(&pool->total_pages);
- return;
- }
-
- set_flag(block, BLOCK_FREE);
- if (block->size >= XV_MIN_ALLOC_SIZE)
- insert_block(pool, page, offset, block);
-
- if (offset + block->size + XV_ALIGN != PAGE_SIZE) {
- tmpblock = BLOCK_NEXT(block);
- set_flag(tmpblock, PREV_FREE);
- set_blockprev(tmpblock, offset);
- }
-
- put_ptr_atomic(page_start);
- spin_unlock(&pool->lock);
-}
-EXPORT_SYMBOL_GPL(xv_free);
-
-u32 xv_get_object_size(void *obj)
-{
- struct block_header *blk;
-
- blk = (struct block_header *)((char *)(obj) - XV_ALIGN);
- return blk->size;
-}
-EXPORT_SYMBOL_GPL(xv_get_object_size);
-
-/*
- * Returns total memory used by allocator (userdata + metadata)
- */
-u64 xv_get_total_size_bytes(struct xv_pool *pool)
-{
- return pool->total_pages << PAGE_SHIFT;
-}
-EXPORT_SYMBOL_GPL(xv_get_total_size_bytes);
diff --git a/drivers/staging/ramster/xvmalloc.h b/drivers/staging/ramster/xvmalloc.h
deleted file mode 100644
index 5b1a81aa5faf..000000000000
--- a/drivers/staging/ramster/xvmalloc.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * xvmalloc memory allocator
- *
- * Copyright (C) 2008, 2009, 2010 Nitin Gupta
- *
- * This code is released using a dual license strategy: BSD/GPL
- * You can choose the licence that better fits your requirements.
- *
- * Released under the terms of 3-clause BSD License
- * Released under the terms of GNU General Public License Version 2.0
- */
-
-#ifndef _XV_MALLOC_H_
-#define _XV_MALLOC_H_
-
-#include <linux/types.h>
-
-struct xv_pool;
-
-struct xv_pool *xv_create_pool(void);
-void xv_destroy_pool(struct xv_pool *pool);
-
-int xv_malloc(struct xv_pool *pool, u32 size, struct page **page,
- u32 *offset, gfp_t flags);
-void xv_free(struct xv_pool *pool, struct page *page, u32 offset);
-
-u32 xv_get_object_size(void *obj);
-u64 xv_get_total_size_bytes(struct xv_pool *pool);
-
-#endif
diff --git a/drivers/staging/ramster/xvmalloc_int.h b/drivers/staging/ramster/xvmalloc_int.h
deleted file mode 100644
index b5f1f7febcf6..000000000000
--- a/drivers/staging/ramster/xvmalloc_int.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * xvmalloc memory allocator
- *
- * Copyright (C) 2008, 2009, 2010 Nitin Gupta
- *
- * This code is released using a dual license strategy: BSD/GPL
- * You can choose the licence that better fits your requirements.
- *
- * Released under the terms of 3-clause BSD License
- * Released under the terms of GNU General Public License Version 2.0
- */
-
-#ifndef _XV_MALLOC_INT_H_
-#define _XV_MALLOC_INT_H_
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-
-/* User configurable params */
-
-/* Must be power of two */
-#ifdef CONFIG_64BIT
-#define XV_ALIGN_SHIFT 3
-#else
-#define XV_ALIGN_SHIFT 2
-#endif
-#define XV_ALIGN (1 << XV_ALIGN_SHIFT)
-#define XV_ALIGN_MASK (XV_ALIGN - 1)
-
-/* This must be greater than sizeof(link_free) */
-#define XV_MIN_ALLOC_SIZE 32
-#define XV_MAX_ALLOC_SIZE (PAGE_SIZE - XV_ALIGN)
-
-/*
- * Free lists are separated by FL_DELTA bytes
- * This value is 3 for 4k pages and 4 for 64k pages, for any
- * other page size, a conservative (PAGE_SHIFT - 9) is used.
- */
-#if PAGE_SHIFT == 16
-#define FL_DELTA_SHIFT 4
-#else
-#define FL_DELTA_SHIFT (PAGE_SHIFT - 9)
-#endif
-#define FL_DELTA (1 << FL_DELTA_SHIFT)
-#define FL_DELTA_MASK (FL_DELTA - 1)
-#define NUM_FREE_LISTS ((XV_MAX_ALLOC_SIZE - XV_MIN_ALLOC_SIZE) \
- / FL_DELTA + 1)
-
-#define MAX_FLI DIV_ROUND_UP(NUM_FREE_LISTS, BITS_PER_LONG)
-
-/* End of user params */
-
-enum blockflags {
- BLOCK_FREE,
- PREV_FREE,
- __NR_BLOCKFLAGS,
-};
-
-#define FLAGS_MASK XV_ALIGN_MASK
-#define PREV_MASK (~FLAGS_MASK)
-
-struct freelist_entry {
- struct page *page;
- u16 offset;
- u16 pad;
-};
-
-struct link_free {
- struct page *prev_page;
- struct page *next_page;
- u16 prev_offset;
- u16 next_offset;
-};
-
-struct block_header {
- union {
- /* This common header must be XV_ALIGN bytes */
- u8 common[XV_ALIGN];
- struct {
- u16 size;
- u16 prev;
- };
- };
- struct link_free link;
-};
-
-struct xv_pool {
- ulong flbitmap;
- ulong slbitmap[MAX_FLI];
- u64 total_pages; /* stats */
- struct freelist_entry freelist[NUM_FREE_LISTS];
- spinlock_t lock;
-};
-
-#endif
diff --git a/drivers/staging/ramster/zbud.c b/drivers/staging/ramster/zbud.c
new file mode 100644
index 000000000000..a7c436127aa1
--- /dev/null
+++ b/drivers/staging/ramster/zbud.c
@@ -0,0 +1,1060 @@
+/*
+ * zbud.c - Compression buddies allocator
+ *
+ * Copyright (c) 2010-2012, Dan Magenheimer, Oracle Corp.
+ *
+ * Compression buddies ("zbud") provides for efficiently packing two
+ * (or, possibly in the future, more) compressed pages ("zpages") into
+ * a single "raw" pageframe and for tracking both zpages and pageframes
+ * so that whole pageframes can be easily reclaimed in LRU-like order.
+ * It is designed to be used in conjunction with transcendent memory
+ * ("tmem"); for example separate LRU lists are maintained for persistent
+ * vs. ephemeral pages.
+ *
+ * A zbudpage is an overlay for a struct page and thus each zbudpage
+ * refers to a physical pageframe of RAM. When the caller passes a
+ * struct page from the kernel's page allocator, zbud "transforms" it
+ * to a zbudpage which sets/uses a different set of fields than the
+ * struct-page and thus must "untransform" it back by reinitializing
+ * certain fields before the struct-page can be freed. The fields
+ * of a zbudpage include a page lock for controlling access to the
+ * corresponding pageframe, and there is a size field for each zpage.
+ * Each zbudpage also lives on two linked lists: a "budlist" which is
+ * used to support efficient buddying of zpages; and an "lru" which
+ * is used for reclaiming pageframes in approximately least-recently-used
+ * order.
+ *
+ * A zbudpageframe is a pageframe divided up into aligned 64-byte "chunks"
+ * which contain the compressed data for zero, one, or two zbuds. Contained
+ * with the compressed data is a tmem_handle which is a key to allow
+ * the same data to be found via the tmem interface so the zpage can
+ * be invalidated (for ephemeral pages) or repatriated to the swap cache
+ * (for persistent pages). The contents of a zbudpageframe must never
+ * be accessed without holding the page lock for the corresponding
+ * zbudpage and, to accomodate highmem machines, the contents may
+ * only be examined or changes when kmapped. Thus, when in use, a
+ * kmapped zbudpageframe is referred to in the zbud code as "void *zbpg".
+ *
+ * Note that the term "zbud" refers to the combination of a zpage and
+ * a tmem_handle that is stored as one of possibly two "buddied" zpages;
+ * it also generically refers to this allocator... sorry for any confusion.
+ *
+ * A zbudref is a pointer to a struct zbudpage (which can be cast to a
+ * struct page), with the LSB either cleared or set to indicate, respectively,
+ * the first or second zpage in the zbudpageframe. Since a zbudref can be
+ * cast to a pointer, it is used as the tmem "pampd" pointer and uniquely
+ * references a stored tmem page and so is the only zbud data structure
+ * externally visible to zbud.c/zbud.h.
+ *
+ * Since we wish to reclaim entire pageframes but zpages may be randomly
+ * added and deleted to any given pageframe, we approximate LRU by
+ * promoting a pageframe to MRU when a zpage is added to it, but
+ * leaving it at the current place in the list when a zpage is deleted
+ * from it. As a side effect, zpages that are difficult to buddy (e.g.
+ * very large paages) will be reclaimed faster than average, which seems
+ * reasonable.
+ *
+ * In the current implementation, no more than two zpages may be stored in
+ * any pageframe and no zpage ever crosses a pageframe boundary. While
+ * other zpage allocation mechanisms may allow greater density, this two
+ * zpage-per-pageframe limit both ensures simple reclaim of pageframes
+ * (including garbage collection of references to the contents of those
+ * pageframes from tmem data structures) AND avoids the need for compaction.
+ * With additional complexity, zbud could be modified to support storing
+ * up to three zpages per pageframe or, to handle larger average zpages,
+ * up to three zpages per pair of pageframes, but it is not clear if the
+ * additional complexity would be worth it. So consider it an exercise
+ * for future developers.
+ *
+ * Note also that zbud does no page allocation or freeing. This is so
+ * that the caller has complete control over and, for accounting, visibility
+ * into if/when pages are allocated and freed.
+ *
+ * Finally, note that zbud limits the size of zpages it can store; the
+ * caller must check the zpage size with zbud_max_buddy_size before
+ * storing it, else BUGs will result. User beware.
+ */
+
+#include <linux/module.h>
+#include <linux/highmem.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/pagemap.h>
+#include <linux/atomic.h>
+#include <linux/bug.h>
+#include "tmem.h"
+#include "zcache.h"
+#include "zbud.h"
+
+/*
+ * We need to ensure that a struct zbudpage is never larger than a
+ * struct page. This is checked with a BUG_ON in zbud_init.
+ *
+ * The unevictable field indicates that a zbud is being added to the
+ * zbudpage. Since this is a two-phase process (due to tmem locking),
+ * this field locks the zbudpage against eviction when a zbud match
+ * or creation is in process. Since this addition process may occur
+ * in parallel for two zbuds in one zbudpage, the field is a counter
+ * that must not exceed two.
+ */
+struct zbudpage {
+ union {
+ struct page page;
+ struct {
+ unsigned long space_for_flags;
+ struct {
+ unsigned zbud0_size:12;
+ unsigned zbud1_size:12;
+ unsigned unevictable:2;
+ };
+ struct list_head budlist;
+ struct list_head lru;
+ };
+ };
+};
+
+struct zbudref {
+ union {
+ struct zbudpage *zbudpage;
+ unsigned long zbudref;
+ };
+};
+
+#define CHUNK_SHIFT 6
+#define CHUNK_SIZE (1 << CHUNK_SHIFT)
+#define CHUNK_MASK (~(CHUNK_SIZE-1))
+#define NCHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
+#define MAX_CHUNK (NCHUNKS-1)
+
+/*
+ * The following functions deal with the difference between struct
+ * page and struct zbudpage. Note the hack of using the pageflags
+ * from struct page; this is to avoid duplicating all the complex
+ * pageflag macros.
+ */
+static inline void zbudpage_spin_lock(struct zbudpage *zbudpage)
+{
+ struct page *page = (struct page *)zbudpage;
+
+ while (unlikely(test_and_set_bit_lock(PG_locked, &page->flags))) {
+ do {
+ cpu_relax();
+ } while (test_bit(PG_locked, &page->flags));
+ }
+}
+
+static inline void zbudpage_spin_unlock(struct zbudpage *zbudpage)
+{
+ struct page *page = (struct page *)zbudpage;
+
+ clear_bit(PG_locked, &page->flags);
+}
+
+static inline int zbudpage_spin_trylock(struct zbudpage *zbudpage)
+{
+ return trylock_page((struct page *)zbudpage);
+}
+
+static inline int zbudpage_is_locked(struct zbudpage *zbudpage)
+{
+ return PageLocked((struct page *)zbudpage);
+}
+
+static inline void *kmap_zbudpage_atomic(struct zbudpage *zbudpage)
+{
+ return kmap_atomic((struct page *)zbudpage);
+}
+
+/*
+ * A dying zbudpage is an ephemeral page in the process of being evicted.
+ * Any data contained in the zbudpage is invalid and we are just waiting for
+ * the tmem pampds to be invalidated before freeing the page
+ */
+static inline int zbudpage_is_dying(struct zbudpage *zbudpage)
+{
+ struct page *page = (struct page *)zbudpage;
+
+ return test_bit(PG_reclaim, &page->flags);
+}
+
+static inline void zbudpage_set_dying(struct zbudpage *zbudpage)
+{
+ struct page *page = (struct page *)zbudpage;
+
+ set_bit(PG_reclaim, &page->flags);
+}
+
+static inline void zbudpage_clear_dying(struct zbudpage *zbudpage)
+{
+ struct page *page = (struct page *)zbudpage;
+
+ clear_bit(PG_reclaim, &page->flags);
+}
+
+/*
+ * A zombie zbudpage is a persistent page in the process of being evicted.
+ * The data contained in the zbudpage is valid and we are just waiting for
+ * the tmem pampds to be invalidated before freeing the page
+ */
+static inline int zbudpage_is_zombie(struct zbudpage *zbudpage)
+{
+ struct page *page = (struct page *)zbudpage;
+
+ return test_bit(PG_dirty, &page->flags);
+}
+
+static inline void zbudpage_set_zombie(struct zbudpage *zbudpage)
+{
+ struct page *page = (struct page *)zbudpage;
+
+ set_bit(PG_dirty, &page->flags);
+}
+
+static inline void zbudpage_clear_zombie(struct zbudpage *zbudpage)
+{
+ struct page *page = (struct page *)zbudpage;
+
+ clear_bit(PG_dirty, &page->flags);
+}
+
+static inline void kunmap_zbudpage_atomic(void *zbpg)
+{
+ kunmap_atomic(zbpg);
+}
+
+/*
+ * zbud "translation" and helper functions
+ */
+
+static inline struct zbudpage *zbudref_to_zbudpage(struct zbudref *zref)
+{
+ unsigned long zbud = (unsigned long)zref;
+ zbud &= ~1UL;
+ return (struct zbudpage *)zbud;
+}
+
+static inline struct zbudref *zbudpage_to_zbudref(struct zbudpage *zbudpage,
+ unsigned budnum)
+{
+ unsigned long zbud = (unsigned long)zbudpage;
+ BUG_ON(budnum > 1);
+ zbud |= budnum;
+ return (struct zbudref *)zbud;
+}
+
+static inline int zbudref_budnum(struct zbudref *zbudref)
+{
+ unsigned long zbud = (unsigned long)zbudref;
+ return zbud & 1UL;
+}
+
+static inline unsigned zbud_max_size(void)
+{
+ return MAX_CHUNK << CHUNK_SHIFT;
+}
+
+static inline unsigned zbud_size_to_chunks(unsigned size)
+{
+ BUG_ON(size == 0 || size > zbud_max_size());
+ return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
+}
+
+/* can only be used between kmap_zbudpage_atomic/kunmap_zbudpage_atomic! */
+static inline char *zbud_data(void *zbpg,
+ unsigned budnum, unsigned size)
+{
+ char *p;
+
+ BUG_ON(size == 0 || size > zbud_max_size());
+ p = (char *)zbpg;
+ if (budnum == 1)
+ p += PAGE_SIZE - ((size + CHUNK_SIZE - 1) & CHUNK_MASK);
+ return p;
+}
+
+/*
+ * These are all informative and exposed through debugfs... except for
+ * the arrays... anyone know how to do that? To avoid confusion for
+ * debugfs viewers, some of these should also be atomic_long_t, but
+ * I don't know how to expose atomics via debugfs either...
+ */
+static unsigned long zbud_eph_pageframes;
+static unsigned long zbud_pers_pageframes;
+static unsigned long zbud_eph_zpages;
+static unsigned long zbud_pers_zpages;
+static u64 zbud_eph_zbytes;
+static u64 zbud_pers_zbytes;
+static unsigned long zbud_eph_evicted_pageframes;
+static unsigned long zbud_pers_evicted_pageframes;
+static unsigned long zbud_eph_cumul_zpages;
+static unsigned long zbud_pers_cumul_zpages;
+static u64 zbud_eph_cumul_zbytes;
+static u64 zbud_pers_cumul_zbytes;
+static unsigned long zbud_eph_cumul_chunk_counts[NCHUNKS];
+static unsigned long zbud_pers_cumul_chunk_counts[NCHUNKS];
+static unsigned long zbud_eph_buddied_count;
+static unsigned long zbud_pers_buddied_count;
+static unsigned long zbud_eph_unbuddied_count;
+static unsigned long zbud_pers_unbuddied_count;
+static unsigned long zbud_eph_zombie_count;
+static unsigned long zbud_pers_zombie_count;
+static atomic_t zbud_eph_zombie_atomic;
+static atomic_t zbud_pers_zombie_atomic;
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#define zdfs debugfs_create_size_t
+#define zdfs64 debugfs_create_u64
+static int zbud_debugfs_init(void)
+{
+ struct dentry *root = debugfs_create_dir("zbud", NULL);
+ if (root == NULL)
+ return -ENXIO;
+
+ /*
+ * would be nice to dump the sizes of the unbuddied
+ * arrays, like was done with sysfs, but it doesn't
+ * look like debugfs is flexible enough to do that
+ */
+ zdfs64("eph_zbytes", S_IRUGO, root, &zbud_eph_zbytes);
+ zdfs64("eph_cumul_zbytes", S_IRUGO, root, &zbud_eph_cumul_zbytes);
+ zdfs64("pers_zbytes", S_IRUGO, root, &zbud_pers_zbytes);
+ zdfs64("pers_cumul_zbytes", S_IRUGO, root, &zbud_pers_cumul_zbytes);
+ zdfs("eph_cumul_zpages", S_IRUGO, root, &zbud_eph_cumul_zpages);
+ zdfs("eph_evicted_pageframes", S_IRUGO, root,
+ &zbud_eph_evicted_pageframes);
+ zdfs("eph_zpages", S_IRUGO, root, &zbud_eph_zpages);
+ zdfs("eph_pageframes", S_IRUGO, root, &zbud_eph_pageframes);
+ zdfs("eph_buddied_count", S_IRUGO, root, &zbud_eph_buddied_count);
+ zdfs("eph_unbuddied_count", S_IRUGO, root, &zbud_eph_unbuddied_count);
+ zdfs("pers_cumul_zpages", S_IRUGO, root, &zbud_pers_cumul_zpages);
+ zdfs("pers_evicted_pageframes", S_IRUGO, root,
+ &zbud_pers_evicted_pageframes);
+ zdfs("pers_zpages", S_IRUGO, root, &zbud_pers_zpages);
+ zdfs("pers_pageframes", S_IRUGO, root, &zbud_pers_pageframes);
+ zdfs("pers_buddied_count", S_IRUGO, root, &zbud_pers_buddied_count);
+ zdfs("pers_unbuddied_count", S_IRUGO, root, &zbud_pers_unbuddied_count);
+ zdfs("pers_zombie_count", S_IRUGO, root, &zbud_pers_zombie_count);
+ return 0;
+}
+#undef zdfs
+#undef zdfs64
+#endif
+
+/* protects the buddied list and all unbuddied lists */
+static DEFINE_SPINLOCK(zbud_eph_lists_lock);
+static DEFINE_SPINLOCK(zbud_pers_lists_lock);
+
+struct zbud_unbuddied {
+ struct list_head list;
+ unsigned count;
+};
+
+/* list N contains pages with N chunks USED and NCHUNKS-N unused */
+/* element 0 is never used but optimizing that isn't worth it */
+static struct zbud_unbuddied zbud_eph_unbuddied[NCHUNKS];
+static struct zbud_unbuddied zbud_pers_unbuddied[NCHUNKS];
+static LIST_HEAD(zbud_eph_lru_list);
+static LIST_HEAD(zbud_pers_lru_list);
+static LIST_HEAD(zbud_eph_buddied_list);
+static LIST_HEAD(zbud_pers_buddied_list);
+static LIST_HEAD(zbud_eph_zombie_list);
+static LIST_HEAD(zbud_pers_zombie_list);
+
+/*
+ * Given a struct page, transform it to a zbudpage so that it can be
+ * used by zbud and initialize fields as necessary.
+ */
+static inline struct zbudpage *zbud_init_zbudpage(struct page *page, bool eph)
+{
+ struct zbudpage *zbudpage = (struct zbudpage *)page;
+
+ BUG_ON(page == NULL);
+ INIT_LIST_HEAD(&zbudpage->budlist);
+ INIT_LIST_HEAD(&zbudpage->lru);
+ zbudpage->zbud0_size = 0;
+ zbudpage->zbud1_size = 0;
+ zbudpage->unevictable = 0;
+ if (eph)
+ zbud_eph_pageframes++;
+ else
+ zbud_pers_pageframes++;
+ return zbudpage;
+}
+
+/* "Transform" a zbudpage back to a struct page suitable to free. */
+static inline struct page *zbud_unuse_zbudpage(struct zbudpage *zbudpage,
+ bool eph)
+{
+ struct page *page = (struct page *)zbudpage;
+
+ BUG_ON(!list_empty(&zbudpage->budlist));
+ BUG_ON(!list_empty(&zbudpage->lru));
+ BUG_ON(zbudpage->zbud0_size != 0);
+ BUG_ON(zbudpage->zbud1_size != 0);
+ BUG_ON(!PageLocked(page));
+ BUG_ON(zbudpage->unevictable != 0);
+ BUG_ON(zbudpage_is_dying(zbudpage));
+ BUG_ON(zbudpage_is_zombie(zbudpage));
+ if (eph)
+ zbud_eph_pageframes--;
+ else
+ zbud_pers_pageframes--;
+ zbudpage_spin_unlock(zbudpage);
+ reset_page_mapcount(page);
+ init_page_count(page);
+ page->index = 0;
+ return page;
+}
+
+/* Mark a zbud as unused and do accounting */
+static inline void zbud_unuse_zbud(struct zbudpage *zbudpage,
+ int budnum, bool eph)
+{
+ unsigned size;
+
+ BUG_ON(!zbudpage_is_locked(zbudpage));
+ if (budnum == 0) {
+ size = zbudpage->zbud0_size;
+ zbudpage->zbud0_size = 0;
+ } else {
+ size = zbudpage->zbud1_size;
+ zbudpage->zbud1_size = 0;
+ }
+ if (eph) {
+ zbud_eph_zbytes -= size;
+ zbud_eph_zpages--;
+ } else {
+ zbud_pers_zbytes -= size;
+ zbud_pers_zpages--;
+ }
+}
+
+/*
+ * Given a zbudpage/budnum/size, a tmem handle, and a kmapped pointer
+ * to some data, set up the zbud appropriately including data copying
+ * and accounting. Note that if cdata is NULL, the data copying is
+ * skipped. (This is useful for lazy writes such as for RAMster.)
+ */
+static void zbud_init_zbud(struct zbudpage *zbudpage, struct tmem_handle *th,
+ bool eph, void *cdata,
+ unsigned budnum, unsigned size)
+{
+ char *to;
+ void *zbpg;
+ struct tmem_handle *to_th;
+ unsigned nchunks = zbud_size_to_chunks(size);
+
+ BUG_ON(!zbudpage_is_locked(zbudpage));
+ zbpg = kmap_zbudpage_atomic(zbudpage);
+ to = zbud_data(zbpg, budnum, size);
+ to_th = (struct tmem_handle *)to;
+ to_th->index = th->index;
+ to_th->oid = th->oid;
+ to_th->pool_id = th->pool_id;
+ to_th->client_id = th->client_id;
+ to += sizeof(struct tmem_handle);
+ if (cdata != NULL)
+ memcpy(to, cdata, size - sizeof(struct tmem_handle));
+ kunmap_zbudpage_atomic(zbpg);
+ if (budnum == 0)
+ zbudpage->zbud0_size = size;
+ else
+ zbudpage->zbud1_size = size;
+ if (eph) {
+ zbud_eph_cumul_chunk_counts[nchunks]++;
+ zbud_eph_zpages++;
+ zbud_eph_cumul_zpages++;
+ zbud_eph_zbytes += size;
+ zbud_eph_cumul_zbytes += size;
+ } else {
+ zbud_pers_cumul_chunk_counts[nchunks]++;
+ zbud_pers_zpages++;
+ zbud_pers_cumul_zpages++;
+ zbud_pers_zbytes += size;
+ zbud_pers_cumul_zbytes += size;
+ }
+}
+
+/*
+ * Given a locked dying zbudpage, read out the tmem handles from the data,
+ * unlock the page, then use the handles to tell tmem to flush out its
+ * references
+ */
+static void zbud_evict_tmem(struct zbudpage *zbudpage)
+{
+ int i, j;
+ uint32_t pool_id[2], client_id[2];
+ uint32_t index[2];
+ struct tmem_oid oid[2];
+ struct tmem_pool *pool;
+ void *zbpg;
+ struct tmem_handle *th;
+ unsigned size;
+
+ /* read out the tmem handles from the data and set aside */
+ zbpg = kmap_zbudpage_atomic(zbudpage);
+ for (i = 0, j = 0; i < 2; i++) {
+ size = (i == 0) ? zbudpage->zbud0_size : zbudpage->zbud1_size;
+ if (size) {
+ th = (struct tmem_handle *)zbud_data(zbpg, i, size);
+ client_id[j] = th->client_id;
+ pool_id[j] = th->pool_id;
+ oid[j] = th->oid;
+ index[j] = th->index;
+ j++;
+ zbud_unuse_zbud(zbudpage, i, true);
+ }
+ }
+ kunmap_zbudpage_atomic(zbpg);
+ zbudpage_spin_unlock(zbudpage);
+ /* zbudpage is now an unlocked dying... tell tmem to flush pointers */
+ for (i = 0; i < j; i++) {
+ pool = zcache_get_pool_by_id(client_id[i], pool_id[i]);
+ if (pool != NULL) {
+ tmem_flush_page(pool, &oid[i], index[i]);
+ zcache_put_pool(pool);
+ }
+ }
+}
+
+/*
+ * Externally callable zbud handling routines.
+ */
+
+/*
+ * Return the maximum size compressed page that can be stored (secretly
+ * setting aside space for the tmem handle.
+ */
+unsigned int zbud_max_buddy_size(void)
+{
+ return zbud_max_size() - sizeof(struct tmem_handle);
+}
+
+/*
+ * Given a zbud reference, free the corresponding zbud from all lists,
+ * mark it as unused, do accounting, and if the freeing of the zbud
+ * frees up an entire pageframe, return it to the caller (else NULL).
+ */
+struct page *zbud_free_and_delist(struct zbudref *zref, bool eph,
+ unsigned int *zsize, unsigned int *zpages)
+{
+ unsigned long budnum = zbudref_budnum(zref);
+ struct zbudpage *zbudpage = zbudref_to_zbudpage(zref);
+ struct page *page = NULL;
+ unsigned chunks, bud_size, other_bud_size;
+ spinlock_t *lists_lock =
+ eph ? &zbud_eph_lists_lock : &zbud_pers_lists_lock;
+ struct zbud_unbuddied *unbud =
+ eph ? zbud_eph_unbuddied : zbud_pers_unbuddied;
+
+
+ spin_lock(lists_lock);
+ zbudpage_spin_lock(zbudpage);
+ if (zbudpage_is_dying(zbudpage)) {
+ /* ignore dying zbudpage... see zbud_evict_pageframe_lru() */
+ zbudpage_spin_unlock(zbudpage);
+ spin_unlock(lists_lock);
+ *zpages = 0;
+ *zsize = 0;
+ goto out;
+ }
+ if (budnum == 0) {
+ bud_size = zbudpage->zbud0_size;
+ other_bud_size = zbudpage->zbud1_size;
+ } else {
+ bud_size = zbudpage->zbud1_size;
+ other_bud_size = zbudpage->zbud0_size;
+ }
+ *zsize = bud_size - sizeof(struct tmem_handle);
+ *zpages = 1;
+ zbud_unuse_zbud(zbudpage, budnum, eph);
+ if (other_bud_size == 0) { /* was unbuddied: unlist and free */
+ chunks = zbud_size_to_chunks(bud_size) ;
+ if (zbudpage_is_zombie(zbudpage)) {
+ if (eph)
+ zbud_pers_zombie_count =
+ atomic_dec_return(&zbud_eph_zombie_atomic);
+ else
+ zbud_pers_zombie_count =
+ atomic_dec_return(&zbud_pers_zombie_atomic);
+ zbudpage_clear_zombie(zbudpage);
+ } else {
+ BUG_ON(list_empty(&unbud[chunks].list));
+ list_del_init(&zbudpage->budlist);
+ unbud[chunks].count--;
+ }
+ list_del_init(&zbudpage->lru);
+ spin_unlock(lists_lock);
+ if (eph)
+ zbud_eph_unbuddied_count--;
+ else
+ zbud_pers_unbuddied_count--;
+ page = zbud_unuse_zbudpage(zbudpage, eph);
+ } else { /* was buddied: move remaining buddy to unbuddied list */
+ chunks = zbud_size_to_chunks(other_bud_size) ;
+ if (!zbudpage_is_zombie(zbudpage)) {
+ list_del_init(&zbudpage->budlist);
+ list_add_tail(&zbudpage->budlist, &unbud[chunks].list);
+ unbud[chunks].count++;
+ }
+ if (eph) {
+ zbud_eph_buddied_count--;
+ zbud_eph_unbuddied_count++;
+ } else {
+ zbud_pers_unbuddied_count++;
+ zbud_pers_buddied_count--;
+ }
+ /* don't mess with lru, no need to move it */
+ zbudpage_spin_unlock(zbudpage);
+ spin_unlock(lists_lock);
+ }
+out:
+ return page;
+}
+
+/*
+ * Given a tmem handle, and a kmapped pointer to compressed data of
+ * the given size, try to find an unbuddied zbudpage in which to
+ * create a zbud. If found, put it there, mark the zbudpage unevictable,
+ * and return a zbudref to it. Else return NULL.
+ */
+struct zbudref *zbud_match_prep(struct tmem_handle *th, bool eph,
+ void *cdata, unsigned size)
+{
+ struct zbudpage *zbudpage = NULL, *zbudpage2;
+ unsigned long budnum = 0UL;
+ unsigned nchunks;
+ int i, found_good_buddy = 0;
+ spinlock_t *lists_lock =
+ eph ? &zbud_eph_lists_lock : &zbud_pers_lists_lock;
+ struct zbud_unbuddied *unbud =
+ eph ? zbud_eph_unbuddied : zbud_pers_unbuddied;
+
+ size += sizeof(struct tmem_handle);
+ nchunks = zbud_size_to_chunks(size);
+ for (i = MAX_CHUNK - nchunks + 1; i > 0; i--) {
+ spin_lock(lists_lock);
+ if (!list_empty(&unbud[i].list)) {
+ list_for_each_entry_safe(zbudpage, zbudpage2,
+ &unbud[i].list, budlist) {
+ if (zbudpage_spin_trylock(zbudpage)) {
+ found_good_buddy = i;
+ goto found_unbuddied;
+ }
+ }
+ }
+ spin_unlock(lists_lock);
+ }
+ zbudpage = NULL;
+ goto out;
+
+found_unbuddied:
+ BUG_ON(!zbudpage_is_locked(zbudpage));
+ BUG_ON(!((zbudpage->zbud0_size == 0) ^ (zbudpage->zbud1_size == 0)));
+ if (zbudpage->zbud0_size == 0)
+ budnum = 0UL;
+ else if (zbudpage->zbud1_size == 0)
+ budnum = 1UL;
+ list_del_init(&zbudpage->budlist);
+ if (eph) {
+ list_add_tail(&zbudpage->budlist, &zbud_eph_buddied_list);
+ unbud[found_good_buddy].count--;
+ zbud_eph_unbuddied_count--;
+ zbud_eph_buddied_count++;
+ /* "promote" raw zbudpage to most-recently-used */
+ list_del_init(&zbudpage->lru);
+ list_add_tail(&zbudpage->lru, &zbud_eph_lru_list);
+ } else {
+ list_add_tail(&zbudpage->budlist, &zbud_pers_buddied_list);
+ unbud[found_good_buddy].count--;
+ zbud_pers_unbuddied_count--;
+ zbud_pers_buddied_count++;
+ /* "promote" raw zbudpage to most-recently-used */
+ list_del_init(&zbudpage->lru);
+ list_add_tail(&zbudpage->lru, &zbud_pers_lru_list);
+ }
+ zbud_init_zbud(zbudpage, th, eph, cdata, budnum, size);
+ zbudpage->unevictable++;
+ BUG_ON(zbudpage->unevictable == 3);
+ zbudpage_spin_unlock(zbudpage);
+ spin_unlock(lists_lock);
+out:
+ return zbudpage_to_zbudref(zbudpage, budnum);
+
+}
+
+/*
+ * Given a tmem handle, and a kmapped pointer to compressed data of
+ * the given size, and a newly allocated struct page, create an unevictable
+ * zbud in that new page and return a zbudref to it.
+ */
+struct zbudref *zbud_create_prep(struct tmem_handle *th, bool eph,
+ void *cdata, unsigned size,
+ struct page *newpage)
+{
+ struct zbudpage *zbudpage;
+ unsigned long budnum = 0;
+ unsigned nchunks;
+ spinlock_t *lists_lock =
+ eph ? &zbud_eph_lists_lock : &zbud_pers_lists_lock;
+ struct zbud_unbuddied *unbud =
+ eph ? zbud_eph_unbuddied : zbud_pers_unbuddied;
+
+#if 0
+ /* this may be worth it later to support decompress-in-place? */
+ static unsigned long counter;
+ budnum = counter++ & 1; /* alternate using zbud0 and zbud1 */
+#endif
+
+ if (size > zbud_max_buddy_size())
+ return NULL;
+ if (newpage == NULL)
+ return NULL;
+
+ size += sizeof(struct tmem_handle);
+ nchunks = zbud_size_to_chunks(size) ;
+ spin_lock(lists_lock);
+ zbudpage = zbud_init_zbudpage(newpage, eph);
+ zbudpage_spin_lock(zbudpage);
+ list_add_tail(&zbudpage->budlist, &unbud[nchunks].list);
+ if (eph) {
+ list_add_tail(&zbudpage->lru, &zbud_eph_lru_list);
+ zbud_eph_unbuddied_count++;
+ } else {
+ list_add_tail(&zbudpage->lru, &zbud_pers_lru_list);
+ zbud_pers_unbuddied_count++;
+ }
+ unbud[nchunks].count++;
+ zbud_init_zbud(zbudpage, th, eph, cdata, budnum, size);
+ zbudpage->unevictable++;
+ BUG_ON(zbudpage->unevictable == 3);
+ zbudpage_spin_unlock(zbudpage);
+ spin_unlock(lists_lock);
+ return zbudpage_to_zbudref(zbudpage, budnum);
+}
+
+/*
+ * Finish creation of a zbud by, assuming another zbud isn't being created
+ * in parallel, marking it evictable.
+ */
+void zbud_create_finish(struct zbudref *zref, bool eph)
+{
+ struct zbudpage *zbudpage = zbudref_to_zbudpage(zref);
+ spinlock_t *lists_lock =
+ eph ? &zbud_eph_lists_lock : &zbud_pers_lists_lock;
+
+ spin_lock(lists_lock);
+ zbudpage_spin_lock(zbudpage);
+ BUG_ON(zbudpage_is_dying(zbudpage));
+ zbudpage->unevictable--;
+ BUG_ON((int)zbudpage->unevictable < 0);
+ zbudpage_spin_unlock(zbudpage);
+ spin_unlock(lists_lock);
+}
+
+/*
+ * Given a zbudref and a struct page, decompress the data from
+ * the zbud into the physical page represented by the struct page
+ * by upcalling to zcache_decompress
+ */
+int zbud_decompress(struct page *data_page, struct zbudref *zref, bool eph,
+ void (*decompress)(char *, unsigned int, char *))
+{
+ struct zbudpage *zbudpage = zbudref_to_zbudpage(zref);
+ unsigned long budnum = zbudref_budnum(zref);
+ void *zbpg;
+ char *to_va, *from_va;
+ unsigned size;
+ int ret = -1;
+ spinlock_t *lists_lock =
+ eph ? &zbud_eph_lists_lock : &zbud_pers_lists_lock;
+
+ spin_lock(lists_lock);
+ zbudpage_spin_lock(zbudpage);
+ if (zbudpage_is_dying(zbudpage)) {
+ /* ignore dying zbudpage... see zbud_evict_pageframe_lru() */
+ goto out;
+ }
+ zbpg = kmap_zbudpage_atomic(zbudpage);
+ to_va = kmap_atomic(data_page);
+ if (budnum == 0)
+ size = zbudpage->zbud0_size;
+ else
+ size = zbudpage->zbud1_size;
+ BUG_ON(size == 0 || size > zbud_max_size());
+ from_va = zbud_data(zbpg, budnum, size);
+ from_va += sizeof(struct tmem_handle);
+ size -= sizeof(struct tmem_handle);
+ decompress(from_va, size, to_va);
+ kunmap_atomic(to_va);
+ kunmap_zbudpage_atomic(zbpg);
+ ret = 0;
+out:
+ zbudpage_spin_unlock(zbudpage);
+ spin_unlock(lists_lock);
+ return ret;
+}
+
+/*
+ * Given a zbudref and a kernel pointer, copy the data from
+ * the zbud to the kernel pointer.
+ */
+int zbud_copy_from_zbud(char *to_va, struct zbudref *zref,
+ size_t *sizep, bool eph)
+{
+ struct zbudpage *zbudpage = zbudref_to_zbudpage(zref);
+ unsigned long budnum = zbudref_budnum(zref);
+ void *zbpg;
+ char *from_va;
+ unsigned size;
+ int ret = -1;
+ spinlock_t *lists_lock =
+ eph ? &zbud_eph_lists_lock : &zbud_pers_lists_lock;
+
+ spin_lock(lists_lock);
+ zbudpage_spin_lock(zbudpage);
+ if (zbudpage_is_dying(zbudpage)) {
+ /* ignore dying zbudpage... see zbud_evict_pageframe_lru() */
+ goto out;
+ }
+ zbpg = kmap_zbudpage_atomic(zbudpage);
+ if (budnum == 0)
+ size = zbudpage->zbud0_size;
+ else
+ size = zbudpage->zbud1_size;
+ BUG_ON(size == 0 || size > zbud_max_size());
+ from_va = zbud_data(zbpg, budnum, size);
+ from_va += sizeof(struct tmem_handle);
+ size -= sizeof(struct tmem_handle);
+ *sizep = size;
+ memcpy(to_va, from_va, size);
+
+ kunmap_zbudpage_atomic(zbpg);
+ ret = 0;
+out:
+ zbudpage_spin_unlock(zbudpage);
+ spin_unlock(lists_lock);
+ return ret;
+}
+
+/*
+ * Given a zbudref and a kernel pointer, copy the data from
+ * the kernel pointer to the zbud.
+ */
+int zbud_copy_to_zbud(struct zbudref *zref, char *from_va, bool eph)
+{
+ struct zbudpage *zbudpage = zbudref_to_zbudpage(zref);
+ unsigned long budnum = zbudref_budnum(zref);
+ void *zbpg;
+ char *to_va;
+ unsigned size;
+ int ret = -1;
+ spinlock_t *lists_lock =
+ eph ? &zbud_eph_lists_lock : &zbud_pers_lists_lock;
+
+ spin_lock(lists_lock);
+ zbudpage_spin_lock(zbudpage);
+ if (zbudpage_is_dying(zbudpage)) {
+ /* ignore dying zbudpage... see zbud_evict_pageframe_lru() */
+ goto out;
+ }
+ zbpg = kmap_zbudpage_atomic(zbudpage);
+ if (budnum == 0)
+ size = zbudpage->zbud0_size;
+ else
+ size = zbudpage->zbud1_size;
+ BUG_ON(size == 0 || size > zbud_max_size());
+ to_va = zbud_data(zbpg, budnum, size);
+ to_va += sizeof(struct tmem_handle);
+ size -= sizeof(struct tmem_handle);
+ memcpy(to_va, from_va, size);
+
+ kunmap_zbudpage_atomic(zbpg);
+ ret = 0;
+out:
+ zbudpage_spin_unlock(zbudpage);
+ spin_unlock(lists_lock);
+ return ret;
+}
+
+/*
+ * Choose an ephemeral LRU zbudpage that is evictable (not locked), ensure
+ * there are no references to it remaining, and return the now unused
+ * (and re-init'ed) struct page and the total amount of compressed
+ * data that was evicted.
+ */
+struct page *zbud_evict_pageframe_lru(unsigned int *zsize, unsigned int *zpages)
+{
+ struct zbudpage *zbudpage = NULL, *zbudpage2;
+ struct zbud_unbuddied *unbud = zbud_eph_unbuddied;
+ struct page *page = NULL;
+ bool irqs_disabled = irqs_disabled();
+
+ /*
+ * Since this can be called indirectly from cleancache_put, which
+ * has interrupts disabled, as well as frontswap_put, which does not,
+ * we need to be able to handle both cases, even though it is ugly.
+ */
+ if (irqs_disabled)
+ spin_lock(&zbud_eph_lists_lock);
+ else
+ spin_lock_bh(&zbud_eph_lists_lock);
+ *zsize = 0;
+ if (list_empty(&zbud_eph_lru_list))
+ goto unlock_out;
+ list_for_each_entry_safe(zbudpage, zbudpage2, &zbud_eph_lru_list, lru) {
+ /* skip a locked zbudpage */
+ if (unlikely(!zbudpage_spin_trylock(zbudpage)))
+ continue;
+ /* skip an unevictable zbudpage */
+ if (unlikely(zbudpage->unevictable != 0)) {
+ zbudpage_spin_unlock(zbudpage);
+ continue;
+ }
+ /* got a locked evictable page */
+ goto evict_page;
+
+ }
+unlock_out:
+ /* no unlocked evictable pages, give up */
+ if (irqs_disabled)
+ spin_unlock(&zbud_eph_lists_lock);
+ else
+ spin_unlock_bh(&zbud_eph_lists_lock);
+ goto out;
+
+evict_page:
+ list_del_init(&zbudpage->budlist);
+ list_del_init(&zbudpage->lru);
+ zbudpage_set_dying(zbudpage);
+ /*
+ * the zbudpage is now "dying" and attempts to read, write,
+ * or delete data from it will be ignored
+ */
+ if (zbudpage->zbud0_size != 0 && zbudpage->zbud1_size != 0) {
+ *zsize = zbudpage->zbud0_size + zbudpage->zbud1_size -
+ (2 * sizeof(struct tmem_handle));
+ *zpages = 2;
+ } else if (zbudpage->zbud0_size != 0) {
+ unbud[zbud_size_to_chunks(zbudpage->zbud0_size)].count--;
+ *zsize = zbudpage->zbud0_size - sizeof(struct tmem_handle);
+ *zpages = 1;
+ } else if (zbudpage->zbud1_size != 0) {
+ unbud[zbud_size_to_chunks(zbudpage->zbud1_size)].count--;
+ *zsize = zbudpage->zbud1_size - sizeof(struct tmem_handle);
+ *zpages = 1;
+ } else {
+ BUG();
+ }
+ spin_unlock(&zbud_eph_lists_lock);
+ zbud_eph_evicted_pageframes++;
+ if (*zpages == 1)
+ zbud_eph_unbuddied_count--;
+ else
+ zbud_eph_buddied_count--;
+ zbud_evict_tmem(zbudpage);
+ zbudpage_spin_lock(zbudpage);
+ zbudpage_clear_dying(zbudpage);
+ page = zbud_unuse_zbudpage(zbudpage, true);
+ if (!irqs_disabled)
+ local_bh_enable();
+out:
+ return page;
+}
+
+/*
+ * Choose a persistent LRU zbudpage that is evictable (not locked), zombify it,
+ * read the tmem_handle(s) out of it into the passed array, and return the
+ * number of zbuds. Caller must perform necessary tmem functions and,
+ * indirectly, zbud functions to fetch any valid data and cause the
+ * now-zombified zbudpage to eventually be freed. We track the zombified
+ * zbudpage count so it is possible to observe if there is a leak.
+ FIXME: describe (ramster) case where data pointers are passed in for memcpy
+ */
+unsigned int zbud_make_zombie_lru(struct tmem_handle *th, unsigned char **data,
+ unsigned int *zsize, bool eph)
+{
+ struct zbudpage *zbudpage = NULL, *zbudpag2;
+ struct tmem_handle *thfrom;
+ char *from_va;
+ void *zbpg;
+ unsigned size;
+ int ret = 0, i;
+ spinlock_t *lists_lock =
+ eph ? &zbud_eph_lists_lock : &zbud_pers_lists_lock;
+ struct list_head *lru_list =
+ eph ? &zbud_eph_lru_list : &zbud_pers_lru_list;
+
+ spin_lock_bh(lists_lock);
+ if (list_empty(lru_list))
+ goto out;
+ list_for_each_entry_safe(zbudpage, zbudpag2, lru_list, lru) {
+ /* skip a locked zbudpage */
+ if (unlikely(!zbudpage_spin_trylock(zbudpage)))
+ continue;
+ /* skip an unevictable zbudpage */
+ if (unlikely(zbudpage->unevictable != 0)) {
+ zbudpage_spin_unlock(zbudpage);
+ continue;
+ }
+ /* got a locked evictable page */
+ goto zombify_page;
+ }
+ /* no unlocked evictable pages, give up */
+ goto out;
+
+zombify_page:
+ /* got an unlocked evictable page, zombify it */
+ list_del_init(&zbudpage->budlist);
+ zbudpage_set_zombie(zbudpage);
+ /* FIXME what accounting do I need to do here? */
+ list_del_init(&zbudpage->lru);
+ if (eph) {
+ list_add_tail(&zbudpage->lru, &zbud_eph_zombie_list);
+ zbud_eph_zombie_count =
+ atomic_inc_return(&zbud_eph_zombie_atomic);
+ } else {
+ list_add_tail(&zbudpage->lru, &zbud_pers_zombie_list);
+ zbud_pers_zombie_count =
+ atomic_inc_return(&zbud_pers_zombie_atomic);
+ }
+ /* FIXME what accounting do I need to do here? */
+ zbpg = kmap_zbudpage_atomic(zbudpage);
+ for (i = 0; i < 2; i++) {
+ size = (i == 0) ? zbudpage->zbud0_size : zbudpage->zbud1_size;
+ if (size) {
+ from_va = zbud_data(zbpg, i, size);
+ thfrom = (struct tmem_handle *)from_va;
+ from_va += sizeof(struct tmem_handle);
+ size -= sizeof(struct tmem_handle);
+ if (th != NULL)
+ th[ret] = *thfrom;
+ if (data != NULL)
+ memcpy(data[ret], from_va, size);
+ if (zsize != NULL)
+ *zsize++ = size;
+ ret++;
+ }
+ }
+ kunmap_zbudpage_atomic(zbpg);
+ zbudpage_spin_unlock(zbudpage);
+out:
+ spin_unlock_bh(lists_lock);
+ return ret;
+}
+
+void __init zbud_init(void)
+{
+ int i;
+
+#ifdef CONFIG_DEBUG_FS
+ zbud_debugfs_init();
+#endif
+ BUG_ON((sizeof(struct tmem_handle) * 2 > CHUNK_SIZE));
+ BUG_ON(sizeof(struct zbudpage) > sizeof(struct page));
+ for (i = 0; i < NCHUNKS; i++) {
+ INIT_LIST_HEAD(&zbud_eph_unbuddied[i].list);
+ INIT_LIST_HEAD(&zbud_pers_unbuddied[i].list);
+ }
+}
diff --git a/drivers/staging/ramster/zbud.h b/drivers/staging/ramster/zbud.h
new file mode 100644
index 000000000000..891e8a7d5aa5
--- /dev/null
+++ b/drivers/staging/ramster/zbud.h
@@ -0,0 +1,33 @@
+/*
+ * zbud.h
+ *
+ * Copyright (c) 2010-2012, Dan Magenheimer, Oracle Corp.
+ *
+ */
+
+#ifndef _ZBUD_H_
+#define _ZBUD_H_
+
+#include "tmem.h"
+
+struct zbudref;
+
+extern unsigned int zbud_max_buddy_size(void);
+extern struct zbudref *zbud_match_prep(struct tmem_handle *th, bool eph,
+ void *cdata, unsigned size);
+extern struct zbudref *zbud_create_prep(struct tmem_handle *th, bool eph,
+ void *cdata, unsigned size,
+ struct page *newpage);
+extern void zbud_create_finish(struct zbudref *, bool);
+extern int zbud_decompress(struct page *, struct zbudref *, bool,
+ void (*func)(char *, unsigned int, char *));
+extern int zbud_copy_from_zbud(char *, struct zbudref *, size_t *, bool);
+extern int zbud_copy_to_zbud(struct zbudref *, char *, bool);
+extern struct page *zbud_free_and_delist(struct zbudref *, bool eph,
+ unsigned int *, unsigned int *);
+extern struct page *zbud_evict_pageframe_lru(unsigned int *, unsigned int *);
+extern unsigned int zbud_make_zombie_lru(struct tmem_handle *, unsigned char **,
+ unsigned int *, bool);
+extern void zbud_init(void);
+
+#endif /* _ZBUD_H_ */
diff --git a/drivers/staging/ramster/zcache-main.c b/drivers/staging/ramster/zcache-main.c
index d46764b5aaba..a09dd5cc1cea 100644
--- a/drivers/staging/ramster/zcache-main.c
+++ b/drivers/staging/ramster/zcache-main.c
@@ -5,1392 +5,322 @@
* Copyright (c) 2010,2011, Nitin Gupta
*
* Zcache provides an in-kernel "host implementation" for transcendent memory
- * and, thus indirectly, for cleancache and frontswap. Zcache includes two
- * page-accessible memory [1] interfaces, both utilizing lzo1x compression:
- * 1) "compression buddies" ("zbud") is used for ephemeral pages
- * 2) xvmalloc is used for persistent pages.
- * Xvmalloc (based on the TLSF allocator) has very low fragmentation
- * so maximizes space efficiency, while zbud allows pairs (and potentially,
- * in the future, more than a pair of) compressed pages to be closely linked
- * so that reclaiming can be done via the kernel's physical-page-oriented
- * "shrinker" interface.
- *
- * [1] For a definition of page-accessible memory (aka PAM), see:
- * http://marc.info/?l=linux-mm&m=127811271605009
- * RAMSTER TODO:
- * - handle remotifying of buddied pages (see zbud_remotify_zbpg)
- * - kernel boot params: nocleancache/nofrontswap don't always work?!?
+ * ("tmem") and, thus indirectly, for cleancache and frontswap. Zcache uses
+ * lzo1x compression to improve density and an embedded allocator called
+ * "zbud" which "buddies" two compressed pages semi-optimally in each physical
+ * pageframe. Zbud is integrally tied into tmem to allow pageframes to
+ * be "reclaimed" efficiently.
*/
#include <linux/module.h>
#include <linux/cpu.h>
#include <linux/highmem.h>
#include <linux/list.h>
-#include <linux/lzo.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/atomic.h>
#include <linux/math64.h>
-#include "tmem.h"
-#include "zcache.h"
-#include "ramster.h"
-#include "cluster/tcp.h"
+#include <linux/crypto.h>
-#include "xvmalloc.h" /* temporary until change to zsmalloc */
-
-#define RAMSTER_TESTING
-
-#if (!defined(CONFIG_CLEANCACHE) && !defined(CONFIG_FRONTSWAP))
-#error "ramster is useless without CONFIG_CLEANCACHE or CONFIG_FRONTSWAP"
-#endif
-#ifdef CONFIG_CLEANCACHE
#include <linux/cleancache.h>
-#endif
-#ifdef CONFIG_FRONTSWAP
#include <linux/frontswap.h>
-#endif
-
-enum ramster_remotify_op {
- RAMSTER_REMOTIFY_EPH_PUT,
- RAMSTER_REMOTIFY_PERS_PUT,
- RAMSTER_REMOTIFY_FLUSH_PAGE,
- RAMSTER_REMOTIFY_FLUSH_OBJ,
- RAMSTER_INTRANSIT_PERS
-};
-
-struct ramster_remotify_hdr {
- enum ramster_remotify_op op;
- struct list_head list;
-};
-
-#define ZBH_SENTINEL 0x43214321
-#define ZBPG_SENTINEL 0xdeadbeef
-
-#define ZBUD_MAX_BUDS 2
-
-struct zbud_hdr {
- struct ramster_remotify_hdr rem_op;
- uint16_t client_id;
- uint16_t pool_id;
- struct tmem_oid oid;
- uint32_t index;
- uint16_t size; /* compressed size in bytes, zero means unused */
- DECL_SENTINEL
-};
-
-#define ZVH_SENTINEL 0x43214321
-static const int zv_max_page_size = (PAGE_SIZE / 8) * 7;
-
-struct zv_hdr {
- struct ramster_remotify_hdr rem_op;
- uint16_t client_id;
- uint16_t pool_id;
- struct tmem_oid oid;
- uint32_t index;
- DECL_SENTINEL
-};
-
-struct flushlist_node {
- struct ramster_remotify_hdr rem_op;
- struct tmem_xhandle xh;
-};
-
-union {
- struct ramster_remotify_hdr rem_op;
- struct zv_hdr zv;
- struct zbud_hdr zbud;
- struct flushlist_node flist;
-} remotify_list_node;
-
-static LIST_HEAD(zcache_rem_op_list);
-static DEFINE_SPINLOCK(zcache_rem_op_list_lock);
-
-#if 0
-/* this is more aggressive but may cause other problems? */
-#define ZCACHE_GFP_MASK (GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN)
+#include "tmem.h"
+#include "zcache.h"
+#include "zbud.h"
+#include "ramster.h"
+#ifdef CONFIG_RAMSTER
+static int ramster_enabled;
#else
-#define ZCACHE_GFP_MASK \
- (__GFP_FS | __GFP_NORETRY | __GFP_NOWARN | __GFP_NOMEMALLOC)
+#define ramster_enabled 0
#endif
-#define MAX_POOLS_PER_CLIENT 16
-
-#define MAX_CLIENTS 16
-#define LOCAL_CLIENT ((uint16_t)-1)
-
-MODULE_LICENSE("GPL");
-
-struct zcache_client {
- struct tmem_pool *tmem_pools[MAX_POOLS_PER_CLIENT];
- struct xv_pool *xvpool;
- bool allocated;
- atomic_t refcount;
-};
-
-static struct zcache_client zcache_host;
-static struct zcache_client zcache_clients[MAX_CLIENTS];
-
-static inline uint16_t get_client_id_from_client(struct zcache_client *cli)
+#ifndef __PG_WAS_ACTIVE
+static inline bool PageWasActive(struct page *page)
{
- BUG_ON(cli == NULL);
- if (cli == &zcache_host)
- return LOCAL_CLIENT;
- return cli - &zcache_clients[0];
+ return true;
}
-static inline bool is_local_client(struct zcache_client *cli)
+static inline void SetPageWasActive(struct page *page)
{
- return cli == &zcache_host;
-}
-
-/**********
- * Compression buddies ("zbud") provides for packing two (or, possibly
- * in the future, more) compressed ephemeral pages into a single "raw"
- * (physical) page and tracking them with data structures so that
- * the raw pages can be easily reclaimed.
- *
- * A zbud page ("zbpg") is an aligned page containing a list_head,
- * a lock, and two "zbud headers". The remainder of the physical
- * page is divided up into aligned 64-byte "chunks" which contain
- * the compressed data for zero, one, or two zbuds. Each zbpg
- * resides on: (1) an "unused list" if it has no zbuds; (2) a
- * "buddied" list if it is fully populated with two zbuds; or
- * (3) one of PAGE_SIZE/64 "unbuddied" lists indexed by how many chunks
- * the one unbuddied zbud uses. The data inside a zbpg cannot be
- * read or written unless the zbpg's lock is held.
- */
-
-struct zbud_page {
- struct list_head bud_list;
- spinlock_t lock;
- struct zbud_hdr buddy[ZBUD_MAX_BUDS];
- DECL_SENTINEL
- /* followed by NUM_CHUNK aligned CHUNK_SIZE-byte chunks */
-};
-
-#define CHUNK_SHIFT 6
-#define CHUNK_SIZE (1 << CHUNK_SHIFT)
-#define CHUNK_MASK (~(CHUNK_SIZE-1))
-#define NCHUNKS (((PAGE_SIZE - sizeof(struct zbud_page)) & \
- CHUNK_MASK) >> CHUNK_SHIFT)
-#define MAX_CHUNK (NCHUNKS-1)
-
-static struct {
- struct list_head list;
- unsigned count;
-} zbud_unbuddied[NCHUNKS];
-/* list N contains pages with N chunks USED and NCHUNKS-N unused */
-/* element 0 is never used but optimizing that isn't worth it */
-static unsigned long zbud_cumul_chunk_counts[NCHUNKS];
-
-struct list_head zbud_buddied_list;
-static unsigned long zcache_zbud_buddied_count;
-
-/* protects the buddied list and all unbuddied lists */
-static DEFINE_SPINLOCK(zbud_budlists_spinlock);
-
-static atomic_t zcache_zbud_curr_raw_pages;
-static atomic_t zcache_zbud_curr_zpages;
-static unsigned long zcache_zbud_curr_zbytes;
-static unsigned long zcache_zbud_cumul_zpages;
-static unsigned long zcache_zbud_cumul_zbytes;
-static unsigned long zcache_compress_poor;
-static unsigned long zcache_policy_percent_exceeded;
-static unsigned long zcache_mean_compress_poor;
-
-/*
- * RAMster counters
- * - Remote pages are pages with a local pampd but the data is remote
- * - Foreign pages are pages stored locally but belonging to another node
- */
-static atomic_t ramster_remote_pers_pages = ATOMIC_INIT(0);
-static unsigned long ramster_pers_remotify_enable;
-static unsigned long ramster_eph_remotify_enable;
-static unsigned long ramster_eph_pages_remoted;
-static unsigned long ramster_eph_pages_remote_failed;
-static unsigned long ramster_pers_pages_remoted;
-static unsigned long ramster_pers_pages_remote_failed;
-static unsigned long ramster_pers_pages_remote_nomem;
-static unsigned long ramster_remote_objects_flushed;
-static unsigned long ramster_remote_object_flushes_failed;
-static unsigned long ramster_remote_pages_flushed;
-static unsigned long ramster_remote_page_flushes_failed;
-static unsigned long ramster_remote_eph_pages_succ_get;
-static unsigned long ramster_remote_pers_pages_succ_get;
-static unsigned long ramster_remote_eph_pages_unsucc_get;
-static unsigned long ramster_remote_pers_pages_unsucc_get;
-static atomic_t ramster_curr_flnode_count = ATOMIC_INIT(0);
-static unsigned long ramster_curr_flnode_count_max;
-static atomic_t ramster_foreign_eph_pampd_count = ATOMIC_INIT(0);
-static unsigned long ramster_foreign_eph_pampd_count_max;
-static atomic_t ramster_foreign_pers_pampd_count = ATOMIC_INIT(0);
-static unsigned long ramster_foreign_pers_pampd_count_max;
-
-/* forward references */
-static void *zcache_get_free_page(void);
-static void zcache_free_page(void *p);
-
-/*
- * zbud helper functions
- */
-
-static inline unsigned zbud_max_buddy_size(void)
-{
- return MAX_CHUNK << CHUNK_SHIFT;
}
+#endif
-static inline unsigned zbud_size_to_chunks(unsigned size)
+#ifdef FRONTSWAP_HAS_EXCLUSIVE_GETS
+static bool frontswap_has_exclusive_gets __read_mostly = true;
+#else
+static bool frontswap_has_exclusive_gets __read_mostly;
+static inline void frontswap_tmem_exclusive_gets(bool b)
{
- BUG_ON(size == 0 || size > zbud_max_buddy_size());
- return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
-}
-
-static inline int zbud_budnum(struct zbud_hdr *zh)
-{
- unsigned offset = (unsigned long)zh & (PAGE_SIZE - 1);
- struct zbud_page *zbpg = NULL;
- unsigned budnum = -1U;
- int i;
-
- for (i = 0; i < ZBUD_MAX_BUDS; i++)
- if (offset == offsetof(typeof(*zbpg), buddy[i])) {
- budnum = i;
- break;
- }
- BUG_ON(budnum == -1U);
- return budnum;
-}
-
-static char *zbud_data(struct zbud_hdr *zh, unsigned size)
-{
- struct zbud_page *zbpg;
- char *p;
- unsigned budnum;
-
- ASSERT_SENTINEL(zh, ZBH);
- budnum = zbud_budnum(zh);
- BUG_ON(size == 0 || size > zbud_max_buddy_size());
- zbpg = container_of(zh, struct zbud_page, buddy[budnum]);
- ASSERT_SPINLOCK(&zbpg->lock);
- p = (char *)zbpg;
- if (budnum == 0)
- p += ((sizeof(struct zbud_page) + CHUNK_SIZE - 1) &
- CHUNK_MASK);
- else if (budnum == 1)
- p += PAGE_SIZE - ((size + CHUNK_SIZE - 1) & CHUNK_MASK);
- return p;
-}
-
-static void zbud_copy_from_pampd(char *data, size_t *size, struct zbud_hdr *zh)
-{
- struct zbud_page *zbpg;
- char *p;
- unsigned budnum;
-
- ASSERT_SENTINEL(zh, ZBH);
- budnum = zbud_budnum(zh);
- zbpg = container_of(zh, struct zbud_page, buddy[budnum]);
- spin_lock(&zbpg->lock);
- BUG_ON(zh->size > *size);
- p = (char *)zbpg;
- if (budnum == 0)
- p += ((sizeof(struct zbud_page) + CHUNK_SIZE - 1) &
- CHUNK_MASK);
- else if (budnum == 1)
- p += PAGE_SIZE - ((zh->size + CHUNK_SIZE - 1) & CHUNK_MASK);
- /* client should be filled in by caller */
- memcpy(data, p, zh->size);
- *size = zh->size;
- spin_unlock(&zbpg->lock);
-}
-
-/*
- * zbud raw page management
- */
-
-static struct zbud_page *zbud_alloc_raw_page(void)
-{
- struct zbud_page *zbpg = NULL;
- struct zbud_hdr *zh0, *zh1;
- zbpg = zcache_get_free_page();
- if (likely(zbpg != NULL)) {
- INIT_LIST_HEAD(&zbpg->bud_list);
- zh0 = &zbpg->buddy[0]; zh1 = &zbpg->buddy[1];
- spin_lock_init(&zbpg->lock);
- atomic_inc(&zcache_zbud_curr_raw_pages);
- INIT_LIST_HEAD(&zbpg->bud_list);
- SET_SENTINEL(zbpg, ZBPG);
- zh0->size = 0; zh1->size = 0;
- tmem_oid_set_invalid(&zh0->oid);
- tmem_oid_set_invalid(&zh1->oid);
- }
- return zbpg;
}
+#endif
-static void zbud_free_raw_page(struct zbud_page *zbpg)
-{
- struct zbud_hdr *zh0 = &zbpg->buddy[0], *zh1 = &zbpg->buddy[1];
+static int zcache_enabled __read_mostly;
+static int disable_cleancache __read_mostly;
+static int disable_frontswap __read_mostly;
+static int disable_frontswap_ignore_nonactive __read_mostly;
+static int disable_cleancache_ignore_nonactive __read_mostly;
+static char *namestr __read_mostly = "zcache";
- ASSERT_SENTINEL(zbpg, ZBPG);
- BUG_ON(!list_empty(&zbpg->bud_list));
- ASSERT_SPINLOCK(&zbpg->lock);
- BUG_ON(zh0->size != 0 || tmem_oid_valid(&zh0->oid));
- BUG_ON(zh1->size != 0 || tmem_oid_valid(&zh1->oid));
- INVERT_SENTINEL(zbpg, ZBPG);
- spin_unlock(&zbpg->lock);
- atomic_dec(&zcache_zbud_curr_raw_pages);
- zcache_free_page(zbpg);
-}
+#define ZCACHE_GFP_MASK \
+ (__GFP_FS | __GFP_NORETRY | __GFP_NOWARN | __GFP_NOMEMALLOC)
-/*
- * core zbud handling routines
- */
+MODULE_LICENSE("GPL");
-static unsigned zbud_free(struct zbud_hdr *zh)
-{
- unsigned size;
-
- ASSERT_SENTINEL(zh, ZBH);
- BUG_ON(!tmem_oid_valid(&zh->oid));
- size = zh->size;
- BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size());
- zh->size = 0;
- tmem_oid_set_invalid(&zh->oid);
- INVERT_SENTINEL(zh, ZBH);
- zcache_zbud_curr_zbytes -= size;
- atomic_dec(&zcache_zbud_curr_zpages);
- return size;
-}
-
-static void zbud_free_and_delist(struct zbud_hdr *zh)
-{
- unsigned chunks;
- struct zbud_hdr *zh_other;
- unsigned budnum = zbud_budnum(zh), size;
- struct zbud_page *zbpg =
- container_of(zh, struct zbud_page, buddy[budnum]);
-
- /* FIXME, should be BUG_ON, pool destruction path doesn't disable
- * interrupts tmem_destroy_pool()->tmem_pampd_destroy_all_in_obj()->
- * tmem_objnode_node_destroy()-> zcache_pampd_free() */
- WARN_ON(!irqs_disabled());
- spin_lock(&zbpg->lock);
- if (list_empty(&zbpg->bud_list)) {
- /* ignore zombie page... see zbud_evict_pages() */
- spin_unlock(&zbpg->lock);
- return;
- }
- size = zbud_free(zh);
- ASSERT_SPINLOCK(&zbpg->lock);
- zh_other = &zbpg->buddy[(budnum == 0) ? 1 : 0];
- if (zh_other->size == 0) { /* was unbuddied: unlist and free */
- chunks = zbud_size_to_chunks(size) ;
- spin_lock(&zbud_budlists_spinlock);
- BUG_ON(list_empty(&zbud_unbuddied[chunks].list));
- list_del_init(&zbpg->bud_list);
- zbud_unbuddied[chunks].count--;
- spin_unlock(&zbud_budlists_spinlock);
- zbud_free_raw_page(zbpg);
- } else { /* was buddied: move remaining buddy to unbuddied list */
- chunks = zbud_size_to_chunks(zh_other->size) ;
- spin_lock(&zbud_budlists_spinlock);
- list_del_init(&zbpg->bud_list);
- zcache_zbud_buddied_count--;
- list_add_tail(&zbpg->bud_list, &zbud_unbuddied[chunks].list);
- zbud_unbuddied[chunks].count++;
- spin_unlock(&zbud_budlists_spinlock);
- spin_unlock(&zbpg->lock);
- }
-}
+/* crypto API for zcache */
+#define ZCACHE_COMP_NAME_SZ CRYPTO_MAX_ALG_NAME
+static char zcache_comp_name[ZCACHE_COMP_NAME_SZ] __read_mostly;
+static struct crypto_comp * __percpu *zcache_comp_pcpu_tfms __read_mostly;
-static struct zbud_hdr *zbud_create(uint16_t client_id, uint16_t pool_id,
- struct tmem_oid *oid,
- uint32_t index, struct page *page,
- void *cdata, unsigned size)
-{
- struct zbud_hdr *zh0, *zh1, *zh = NULL;
- struct zbud_page *zbpg = NULL, *ztmp;
- unsigned nchunks;
- char *to;
- int i, found_good_buddy = 0;
-
- nchunks = zbud_size_to_chunks(size) ;
- for (i = MAX_CHUNK - nchunks + 1; i > 0; i--) {
- spin_lock(&zbud_budlists_spinlock);
- if (!list_empty(&zbud_unbuddied[i].list)) {
- list_for_each_entry_safe(zbpg, ztmp,
- &zbud_unbuddied[i].list, bud_list) {
- if (spin_trylock(&zbpg->lock)) {
- found_good_buddy = i;
- goto found_unbuddied;
- }
- }
- }
- spin_unlock(&zbud_budlists_spinlock);
- }
- /* didn't find a good buddy, try allocating a new page */
- zbpg = zbud_alloc_raw_page();
- if (unlikely(zbpg == NULL))
- goto out;
- /* ok, have a page, now compress the data before taking locks */
- spin_lock(&zbud_budlists_spinlock);
- spin_lock(&zbpg->lock);
- list_add_tail(&zbpg->bud_list, &zbud_unbuddied[nchunks].list);
- zbud_unbuddied[nchunks].count++;
- zh = &zbpg->buddy[0];
- goto init_zh;
-
-found_unbuddied:
- ASSERT_SPINLOCK(&zbpg->lock);
- zh0 = &zbpg->buddy[0]; zh1 = &zbpg->buddy[1];
- BUG_ON(!((zh0->size == 0) ^ (zh1->size == 0)));
- if (zh0->size != 0) { /* buddy0 in use, buddy1 is vacant */
- ASSERT_SENTINEL(zh0, ZBH);
- zh = zh1;
- } else if (zh1->size != 0) { /* buddy1 in use, buddy0 is vacant */
- ASSERT_SENTINEL(zh1, ZBH);
- zh = zh0;
- } else
- BUG();
- list_del_init(&zbpg->bud_list);
- zbud_unbuddied[found_good_buddy].count--;
- list_add_tail(&zbpg->bud_list, &zbud_buddied_list);
- zcache_zbud_buddied_count++;
-
-init_zh:
- SET_SENTINEL(zh, ZBH);
- zh->size = size;
- zh->index = index;
- zh->oid = *oid;
- zh->pool_id = pool_id;
- zh->client_id = client_id;
- to = zbud_data(zh, size);
- memcpy(to, cdata, size);
- spin_unlock(&zbpg->lock);
- spin_unlock(&zbud_budlists_spinlock);
- zbud_cumul_chunk_counts[nchunks]++;
- atomic_inc(&zcache_zbud_curr_zpages);
- zcache_zbud_cumul_zpages++;
- zcache_zbud_curr_zbytes += size;
- zcache_zbud_cumul_zbytes += size;
-out:
- return zh;
-}
+enum comp_op {
+ ZCACHE_COMPOP_COMPRESS,
+ ZCACHE_COMPOP_DECOMPRESS
+};
-static int zbud_decompress(struct page *page, struct zbud_hdr *zh)
+static inline int zcache_comp_op(enum comp_op op,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen)
{
- struct zbud_page *zbpg;
- unsigned budnum = zbud_budnum(zh);
- size_t out_len = PAGE_SIZE;
- char *to_va, *from_va;
- unsigned size;
- int ret = 0;
+ struct crypto_comp *tfm;
+ int ret = -1;
- zbpg = container_of(zh, struct zbud_page, buddy[budnum]);
- spin_lock(&zbpg->lock);
- if (list_empty(&zbpg->bud_list)) {
- /* ignore zombie page... see zbud_evict_pages() */
+ BUG_ON(!zcache_comp_pcpu_tfms);
+ tfm = *per_cpu_ptr(zcache_comp_pcpu_tfms, get_cpu());
+ BUG_ON(!tfm);
+ switch (op) {
+ case ZCACHE_COMPOP_COMPRESS:
+ ret = crypto_comp_compress(tfm, src, slen, dst, dlen);
+ break;
+ case ZCACHE_COMPOP_DECOMPRESS:
+ ret = crypto_comp_decompress(tfm, src, slen, dst, dlen);
+ break;
+ default:
ret = -EINVAL;
- goto out;
}
- ASSERT_SENTINEL(zh, ZBH);
- BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size());
- to_va = kmap_atomic(page);
- size = zh->size;
- from_va = zbud_data(zh, size);
- ret = lzo1x_decompress_safe(from_va, size, to_va, &out_len);
- BUG_ON(ret != LZO_E_OK);
- BUG_ON(out_len != PAGE_SIZE);
- kunmap_atomic(to_va);
-out:
- spin_unlock(&zbpg->lock);
+ put_cpu();
return ret;
}
/*
- * The following routines handle shrinking of ephemeral pages by evicting
- * pages "least valuable" first.
- */
-
-static unsigned long zcache_evicted_raw_pages;
-static unsigned long zcache_evicted_buddied_pages;
-static unsigned long zcache_evicted_unbuddied_pages;
-
-static struct tmem_pool *zcache_get_pool_by_id(uint16_t cli_id,
- uint16_t poolid);
-static void zcache_put_pool(struct tmem_pool *pool);
-
-/*
- * Flush and free all zbuds in a zbpg, then free the pageframe
- */
-static void zbud_evict_zbpg(struct zbud_page *zbpg)
-{
- struct zbud_hdr *zh;
- int i, j;
- uint32_t pool_id[ZBUD_MAX_BUDS], client_id[ZBUD_MAX_BUDS];
- uint32_t index[ZBUD_MAX_BUDS];
- struct tmem_oid oid[ZBUD_MAX_BUDS];
- struct tmem_pool *pool;
- unsigned long flags;
-
- ASSERT_SPINLOCK(&zbpg->lock);
- for (i = 0, j = 0; i < ZBUD_MAX_BUDS; i++) {
- zh = &zbpg->buddy[i];
- if (zh->size) {
- client_id[j] = zh->client_id;
- pool_id[j] = zh->pool_id;
- oid[j] = zh->oid;
- index[j] = zh->index;
- j++;
- }
- }
- spin_unlock(&zbpg->lock);
- for (i = 0; i < j; i++) {
- pool = zcache_get_pool_by_id(client_id[i], pool_id[i]);
- BUG_ON(pool == NULL);
- local_irq_save(flags);
- /* these flushes should dispose of any local storage */
- tmem_flush_page(pool, &oid[i], index[i]);
- local_irq_restore(flags);
- zcache_put_pool(pool);
- }
-}
-
-/*
- * Free nr pages. This code is funky because we want to hold the locks
- * protecting various lists for as short a time as possible, and in some
- * circumstances the list may change asynchronously when the list lock is
- * not held. In some cases we also trylock not only to avoid waiting on a
- * page in use by another cpu, but also to avoid potential deadlock due to
- * lock inversion.
- */
-static void zbud_evict_pages(int nr)
-{
- struct zbud_page *zbpg;
- int i, newly_unused_pages = 0;
-
-
- /* now try freeing unbuddied pages, starting with least space avail */
- for (i = 0; i < MAX_CHUNK; i++) {
-retry_unbud_list_i:
- spin_lock_bh(&zbud_budlists_spinlock);
- if (list_empty(&zbud_unbuddied[i].list)) {
- spin_unlock_bh(&zbud_budlists_spinlock);
- continue;
- }
- list_for_each_entry(zbpg, &zbud_unbuddied[i].list, bud_list) {
- if (unlikely(!spin_trylock(&zbpg->lock)))
- continue;
- zbud_unbuddied[i].count--;
- spin_unlock(&zbud_budlists_spinlock);
- zcache_evicted_unbuddied_pages++;
- /* want budlists unlocked when doing zbpg eviction */
- zbud_evict_zbpg(zbpg);
- newly_unused_pages++;
- local_bh_enable();
- if (--nr <= 0)
- goto evict_unused;
- goto retry_unbud_list_i;
- }
- spin_unlock_bh(&zbud_budlists_spinlock);
- }
-
- /* as a last resort, free buddied pages */
-retry_bud_list:
- spin_lock_bh(&zbud_budlists_spinlock);
- if (list_empty(&zbud_buddied_list)) {
- spin_unlock_bh(&zbud_budlists_spinlock);
- goto evict_unused;
- }
- list_for_each_entry(zbpg, &zbud_buddied_list, bud_list) {
- if (unlikely(!spin_trylock(&zbpg->lock)))
- continue;
- zcache_zbud_buddied_count--;
- spin_unlock(&zbud_budlists_spinlock);
- zcache_evicted_buddied_pages++;
- /* want budlists unlocked when doing zbpg eviction */
- zbud_evict_zbpg(zbpg);
- newly_unused_pages++;
- local_bh_enable();
- if (--nr <= 0)
- goto evict_unused;
- goto retry_bud_list;
- }
- spin_unlock_bh(&zbud_budlists_spinlock);
-
-evict_unused:
- return;
-}
-
-static DEFINE_PER_CPU(unsigned char *, zcache_remoteputmem);
-
-static int zbud_remotify_zbud(struct tmem_xhandle *xh, char *data,
- size_t size)
-{
- struct tmem_pool *pool;
- int i, remotenode, ret = -1;
- unsigned char cksum, *p;
- unsigned long flags;
-
- for (p = data, cksum = 0, i = 0; i < size; i++)
- cksum += *p;
- ret = ramster_remote_put(xh, data, size, true, &remotenode);
- if (ret == 0) {
- /* data was successfully remoted so change the local version
- * to point to the remote node where it landed */
- pool = zcache_get_pool_by_id(LOCAL_CLIENT, xh->pool_id);
- BUG_ON(pool == NULL);
- local_irq_save(flags);
- /* tmem_replace will also free up any local space */
- (void)tmem_replace(pool, &xh->oid, xh->index,
- pampd_make_remote(remotenode, size, cksum));
- local_irq_restore(flags);
- zcache_put_pool(pool);
- ramster_eph_pages_remoted++;
- ret = 0;
- } else
- ramster_eph_pages_remote_failed++;
- return ret;
-}
-
-static int zbud_remotify_zbpg(struct zbud_page *zbpg)
-{
- struct zbud_hdr *zh1, *zh2 = NULL;
- struct tmem_xhandle xh1, xh2 = { 0 };
- char *data1 = NULL, *data2 = NULL;
- size_t size1 = 0, size2 = 0;
- int ret = 0;
- unsigned char *tmpmem = __get_cpu_var(zcache_remoteputmem);
-
- ASSERT_SPINLOCK(&zbpg->lock);
- if (zbpg->buddy[0].size == 0)
- zh1 = &zbpg->buddy[1];
- else if (zbpg->buddy[1].size == 0)
- zh1 = &zbpg->buddy[0];
- else {
- zh1 = &zbpg->buddy[0];
- zh2 = &zbpg->buddy[1];
- }
- /* don't remotify pages that are already remotified */
- if (zh1->client_id != LOCAL_CLIENT)
- zh1 = NULL;
- if ((zh2 != NULL) && (zh2->client_id != LOCAL_CLIENT))
- zh2 = NULL;
-
- /* copy the data and metadata so can release lock */
- if (zh1 != NULL) {
- xh1.client_id = zh1->client_id;
- xh1.pool_id = zh1->pool_id;
- xh1.oid = zh1->oid;
- xh1.index = zh1->index;
- size1 = zh1->size;
- data1 = zbud_data(zh1, size1);
- memcpy(tmpmem, zbud_data(zh1, size1), size1);
- data1 = tmpmem;
- tmpmem += size1;
- }
- if (zh2 != NULL) {
- xh2.client_id = zh2->client_id;
- xh2.pool_id = zh2->pool_id;
- xh2.oid = zh2->oid;
- xh2.index = zh2->index;
- size2 = zh2->size;
- memcpy(tmpmem, zbud_data(zh2, size2), size2);
- data2 = tmpmem;
- }
- spin_unlock(&zbpg->lock);
- preempt_enable();
-
- /* OK, no locks held anymore, remotify one or both zbuds */
- if (zh1 != NULL)
- ret = zbud_remotify_zbud(&xh1, data1, size1);
- if (zh2 != NULL)
- ret |= zbud_remotify_zbud(&xh2, data2, size2);
- return ret;
-}
-
-void zbud_remotify_pages(int nr)
-{
- struct zbud_page *zbpg;
- int i, ret;
-
- /*
- * for now just try remotifying unbuddied pages, starting with
- * least space avail
- */
- for (i = 0; i < MAX_CHUNK; i++) {
-retry_unbud_list_i:
- preempt_disable(); /* enable in zbud_remotify_zbpg */
- spin_lock_bh(&zbud_budlists_spinlock);
- if (list_empty(&zbud_unbuddied[i].list)) {
- spin_unlock_bh(&zbud_budlists_spinlock);
- preempt_enable();
- continue; /* next i in for loop */
- }
- list_for_each_entry(zbpg, &zbud_unbuddied[i].list, bud_list) {
- if (unlikely(!spin_trylock(&zbpg->lock)))
- continue; /* next list_for_each_entry */
- zbud_unbuddied[i].count--;
- /* want budlists unlocked when doing zbpg remotify */
- spin_unlock_bh(&zbud_budlists_spinlock);
- ret = zbud_remotify_zbpg(zbpg);
- /* preemption is re-enabled in zbud_remotify_zbpg */
- if (ret == 0) {
- if (--nr <= 0)
- goto out;
- goto retry_unbud_list_i;
- }
- /* if fail to remotify any page, quit */
- pr_err("TESTING zbud_remotify_pages failed on page,"
- " trying to re-add\n");
- spin_lock_bh(&zbud_budlists_spinlock);
- spin_lock(&zbpg->lock);
- list_add_tail(&zbpg->bud_list, &zbud_unbuddied[i].list);
- zbud_unbuddied[i].count++;
- spin_unlock(&zbpg->lock);
- spin_unlock_bh(&zbud_budlists_spinlock);
- pr_err("TESTING zbud_remotify_pages failed on page,"
- " finished re-add\n");
- goto out;
- }
- spin_unlock_bh(&zbud_budlists_spinlock);
- preempt_enable();
- }
-
-next_buddied_zbpg:
- preempt_disable(); /* enable in zbud_remotify_zbpg */
- spin_lock_bh(&zbud_budlists_spinlock);
- if (list_empty(&zbud_buddied_list))
- goto unlock_out;
- list_for_each_entry(zbpg, &zbud_buddied_list, bud_list) {
- if (unlikely(!spin_trylock(&zbpg->lock)))
- continue; /* next list_for_each_entry */
- zcache_zbud_buddied_count--;
- /* want budlists unlocked when doing zbpg remotify */
- spin_unlock_bh(&zbud_budlists_spinlock);
- ret = zbud_remotify_zbpg(zbpg);
- /* preemption is re-enabled in zbud_remotify_zbpg */
- if (ret == 0) {
- if (--nr <= 0)
- goto out;
- goto next_buddied_zbpg;
- }
- /* if fail to remotify any page, quit */
- pr_err("TESTING zbud_remotify_pages failed on BUDDIED page,"
- " trying to re-add\n");
- spin_lock_bh(&zbud_budlists_spinlock);
- spin_lock(&zbpg->lock);
- list_add_tail(&zbpg->bud_list, &zbud_buddied_list);
- zcache_zbud_buddied_count++;
- spin_unlock(&zbpg->lock);
- spin_unlock_bh(&zbud_budlists_spinlock);
- pr_err("TESTING zbud_remotify_pages failed on BUDDIED page,"
- " finished re-add\n");
- goto out;
- }
-unlock_out:
- spin_unlock_bh(&zbud_budlists_spinlock);
- preempt_enable();
-out:
- return;
-}
-
-/* the "flush list" asynchronously collects pages to remotely flush */
-#define FLUSH_ENTIRE_OBJECT ((uint32_t)-1)
-static void ramster_flnode_free(struct flushlist_node *,
- struct tmem_pool *);
-
-static void zcache_remote_flush_page(struct flushlist_node *flnode)
-{
- struct tmem_xhandle *xh;
- int remotenode, ret;
-
- preempt_disable();
- xh = &flnode->xh;
- remotenode = flnode->xh.client_id;
- ret = ramster_remote_flush(xh, remotenode);
- if (ret >= 0)
- ramster_remote_pages_flushed++;
- else
- ramster_remote_page_flushes_failed++;
- preempt_enable_no_resched();
- ramster_flnode_free(flnode, NULL);
-}
-
-static void zcache_remote_flush_object(struct flushlist_node *flnode)
-{
- struct tmem_xhandle *xh;
- int remotenode, ret;
-
- preempt_disable();
- xh = &flnode->xh;
- remotenode = flnode->xh.client_id;
- ret = ramster_remote_flush_object(xh, remotenode);
- if (ret >= 0)
- ramster_remote_objects_flushed++;
- else
- ramster_remote_object_flushes_failed++;
- preempt_enable_no_resched();
- ramster_flnode_free(flnode, NULL);
-}
-
-static void zcache_remote_eph_put(struct zbud_hdr *zbud)
-{
- /* FIXME */
-}
-
-static void zcache_remote_pers_put(struct zv_hdr *zv)
-{
- struct tmem_xhandle xh;
- uint16_t size;
- bool ephemeral;
- int remotenode, ret = -1;
- char *data;
- struct tmem_pool *pool;
- unsigned long flags;
- unsigned char cksum;
- char *p;
- int i;
- unsigned char *tmpmem = __get_cpu_var(zcache_remoteputmem);
-
- ASSERT_SENTINEL(zv, ZVH);
- BUG_ON(zv->client_id != LOCAL_CLIENT);
- local_bh_disable();
- xh.client_id = zv->client_id;
- xh.pool_id = zv->pool_id;
- xh.oid = zv->oid;
- xh.index = zv->index;
- size = xv_get_object_size(zv) - sizeof(*zv);
- BUG_ON(size == 0 || size > zv_max_page_size);
- data = (char *)zv + sizeof(*zv);
- for (p = data, cksum = 0, i = 0; i < size; i++)
- cksum += *p;
- memcpy(tmpmem, data, size);
- data = tmpmem;
- pool = zcache_get_pool_by_id(zv->client_id, zv->pool_id);
- ephemeral = is_ephemeral(pool);
- zcache_put_pool(pool);
- /* now OK to release lock set in caller */
- spin_unlock(&zcache_rem_op_list_lock);
- local_bh_enable();
- preempt_disable();
- ret = ramster_remote_put(&xh, data, size, ephemeral, &remotenode);
- preempt_enable_no_resched();
- if (ret != 0) {
- /*
- * This is some form of a memory leak... if the remote put
- * fails, there will never be another attempt to remotify
- * this page. But since we've dropped the zv pointer,
- * the page may have been freed or the data replaced
- * so we can't just "put it back" in the remote op list.
- * Even if we could, not sure where to put it in the list
- * because there may be flushes that must be strictly
- * ordered vs the put. So leave this as a FIXME for now.
- * But count them so we know if it becomes a problem.
- */
- ramster_pers_pages_remote_failed++;
- goto out;
- } else
- atomic_inc(&ramster_remote_pers_pages);
- ramster_pers_pages_remoted++;
- /*
- * data was successfully remoted so change the local version to
- * point to the remote node where it landed
- */
- local_bh_disable();
- pool = zcache_get_pool_by_id(LOCAL_CLIENT, xh.pool_id);
- local_irq_save(flags);
- (void)tmem_replace(pool, &xh.oid, xh.index,
- pampd_make_remote(remotenode, size, cksum));
- local_irq_restore(flags);
- zcache_put_pool(pool);
- local_bh_enable();
-out:
- return;
-}
-
-static void zcache_do_remotify_ops(int nr)
-{
- struct ramster_remotify_hdr *rem_op;
- union remotify_list_node *u;
-
- while (1) {
- if (!nr)
- goto out;
- spin_lock(&zcache_rem_op_list_lock);
- if (list_empty(&zcache_rem_op_list)) {
- spin_unlock(&zcache_rem_op_list_lock);
- goto out;
- }
- rem_op = list_first_entry(&zcache_rem_op_list,
- struct ramster_remotify_hdr, list);
- list_del_init(&rem_op->list);
- if (rem_op->op != RAMSTER_REMOTIFY_PERS_PUT)
- spin_unlock(&zcache_rem_op_list_lock);
- u = (union remotify_list_node *)rem_op;
- switch (rem_op->op) {
- case RAMSTER_REMOTIFY_EPH_PUT:
-BUG();
- zcache_remote_eph_put((struct zbud_hdr *)rem_op);
- break;
- case RAMSTER_REMOTIFY_PERS_PUT:
- zcache_remote_pers_put((struct zv_hdr *)rem_op);
- break;
- case RAMSTER_REMOTIFY_FLUSH_PAGE:
- zcache_remote_flush_page((struct flushlist_node *)u);
- break;
- case RAMSTER_REMOTIFY_FLUSH_OBJ:
- zcache_remote_flush_object((struct flushlist_node *)u);
- break;
- default:
- BUG();
- }
- }
-out:
- return;
-}
-
-/*
- * Communicate interface revision with userspace
- */
-#include "cluster/ramster_nodemanager.h"
-static unsigned long ramster_interface_revision = R2NM_API_VERSION;
-
-/*
- * For now, just push over a few pages every few seconds to
- * ensure that it basically works
- */
-static struct workqueue_struct *ramster_remotify_workqueue;
-static void ramster_remotify_process(struct work_struct *work);
-static DECLARE_DELAYED_WORK(ramster_remotify_worker,
- ramster_remotify_process);
-
-static void ramster_remotify_queue_delayed_work(unsigned long delay)
-{
- if (!queue_delayed_work(ramster_remotify_workqueue,
- &ramster_remotify_worker, delay))
- pr_err("ramster_remotify: bad workqueue\n");
-}
-
-
-static int use_frontswap;
-static int use_cleancache;
-static int ramster_remote_target_nodenum = -1;
-static void ramster_remotify_process(struct work_struct *work)
-{
- static bool remotify_in_progress;
-
- BUG_ON(irqs_disabled());
- if (remotify_in_progress)
- ramster_remotify_queue_delayed_work(HZ);
- else if (ramster_remote_target_nodenum != -1) {
- remotify_in_progress = true;
-#ifdef CONFIG_CLEANCACHE
- if (use_cleancache && ramster_eph_remotify_enable)
- zbud_remotify_pages(5000); /* FIXME is this a good number? */
-#endif
-#ifdef CONFIG_FRONTSWAP
- if (use_frontswap && ramster_pers_remotify_enable)
- zcache_do_remotify_ops(500); /* FIXME is this a good number? */
-#endif
- remotify_in_progress = false;
- ramster_remotify_queue_delayed_work(HZ);
- }
-}
-
-static void ramster_remotify_init(void)
-{
- unsigned long n = 60UL;
- ramster_remotify_workqueue =
- create_singlethread_workqueue("ramster_remotify");
- ramster_remotify_queue_delayed_work(n * HZ);
-}
-
-
-static void zbud_init(void)
-{
- int i;
-
- INIT_LIST_HEAD(&zbud_buddied_list);
- zcache_zbud_buddied_count = 0;
- for (i = 0; i < NCHUNKS; i++) {
- INIT_LIST_HEAD(&zbud_unbuddied[i].list);
- zbud_unbuddied[i].count = 0;
- }
-}
-
-#ifdef CONFIG_SYSFS
-/*
- * These sysfs routines show a nice distribution of how many zbpg's are
- * currently (and have ever been placed) in each unbuddied list. It's fun
- * to watch but can probably go away before final merge.
+ * policy parameters
*/
-static int zbud_show_unbuddied_list_counts(char *buf)
-{
- int i;
- char *p = buf;
-
- for (i = 0; i < NCHUNKS; i++)
- p += sprintf(p, "%u ", zbud_unbuddied[i].count);
- return p - buf;
-}
-
-static int zbud_show_cumul_chunk_counts(char *buf)
-{
- unsigned long i, chunks = 0, total_chunks = 0, sum_total_chunks = 0;
- unsigned long total_chunks_lte_21 = 0, total_chunks_lte_32 = 0;
- unsigned long total_chunks_lte_42 = 0;
- char *p = buf;
-
- for (i = 0; i < NCHUNKS; i++) {
- p += sprintf(p, "%lu ", zbud_cumul_chunk_counts[i]);
- chunks += zbud_cumul_chunk_counts[i];
- total_chunks += zbud_cumul_chunk_counts[i];
- sum_total_chunks += i * zbud_cumul_chunk_counts[i];
- if (i == 21)
- total_chunks_lte_21 = total_chunks;
- if (i == 32)
- total_chunks_lte_32 = total_chunks;
- if (i == 42)
- total_chunks_lte_42 = total_chunks;
- }
- p += sprintf(p, "<=21:%lu <=32:%lu <=42:%lu, mean:%lu\n",
- total_chunks_lte_21, total_chunks_lte_32, total_chunks_lte_42,
- chunks == 0 ? 0 : sum_total_chunks / chunks);
- return p - buf;
-}
-#endif
-/**********
- * This "zv" PAM implementation combines the TLSF-based xvMalloc
- * with lzo1x compression to maximize the amount of data that can
- * be packed into a physical page.
- *
- * Zv represents a PAM page with the index and object (plus a "size" value
- * necessary for decompression) immediately preceding the compressed data.
- */
-
-/* rudimentary policy limits */
-/* total number of persistent pages may not exceed this percentage */
-static unsigned int zv_page_count_policy_percent = 75;
/*
* byte count defining poor compression; pages with greater zsize will be
* rejected
*/
-static unsigned int zv_max_zsize = (PAGE_SIZE / 8) * 7;
+static unsigned int zbud_max_zsize __read_mostly = (PAGE_SIZE / 8) * 7;
/*
* byte count defining poor *mean* compression; pages with greater zsize
* will be rejected until sufficient better-compressed pages are accepted
* driving the mean below this threshold
*/
-static unsigned int zv_max_mean_zsize = (PAGE_SIZE / 8) * 5;
-
-static atomic_t zv_curr_dist_counts[NCHUNKS];
-static atomic_t zv_cumul_dist_counts[NCHUNKS];
+static unsigned int zbud_max_mean_zsize __read_mostly = (PAGE_SIZE / 8) * 5;
-
-static struct zv_hdr *zv_create(struct zcache_client *cli, uint32_t pool_id,
- struct tmem_oid *oid, uint32_t index,
- void *cdata, unsigned clen)
-{
- struct page *page;
- struct zv_hdr *zv = NULL;
- uint32_t offset;
- int alloc_size = clen + sizeof(struct zv_hdr);
- int chunks = (alloc_size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
- int ret;
-
- BUG_ON(!irqs_disabled());
- BUG_ON(chunks >= NCHUNKS);
- ret = xv_malloc(cli->xvpool, clen + sizeof(struct zv_hdr),
- &page, &offset, ZCACHE_GFP_MASK);
- if (unlikely(ret))
- goto out;
- atomic_inc(&zv_curr_dist_counts[chunks]);
- atomic_inc(&zv_cumul_dist_counts[chunks]);
- zv = kmap_atomic(page) + offset;
- zv->index = index;
- zv->oid = *oid;
- zv->pool_id = pool_id;
- SET_SENTINEL(zv, ZVH);
- INIT_LIST_HEAD(&zv->rem_op.list);
- zv->client_id = get_client_id_from_client(cli);
- zv->rem_op.op = RAMSTER_REMOTIFY_PERS_PUT;
- if (zv->client_id == LOCAL_CLIENT) {
- spin_lock(&zcache_rem_op_list_lock);
- list_add_tail(&zv->rem_op.list, &zcache_rem_op_list);
- spin_unlock(&zcache_rem_op_list_lock);
- }
- memcpy((char *)zv + sizeof(struct zv_hdr), cdata, clen);
- kunmap_atomic(zv);
-out:
- return zv;
-}
-
-/* similar to zv_create, but just reserve space, no data yet */
-static struct zv_hdr *zv_alloc(struct tmem_pool *pool,
- struct tmem_oid *oid, uint32_t index,
- unsigned clen)
-{
- struct zcache_client *cli = pool->client;
- struct page *page;
- struct zv_hdr *zv = NULL;
- uint32_t offset;
- int ret;
-
- BUG_ON(!irqs_disabled());
- BUG_ON(!is_local_client(pool->client));
- ret = xv_malloc(cli->xvpool, clen + sizeof(struct zv_hdr),
- &page, &offset, ZCACHE_GFP_MASK);
- if (unlikely(ret))
- goto out;
- zv = kmap_atomic(page) + offset;
- SET_SENTINEL(zv, ZVH);
- INIT_LIST_HEAD(&zv->rem_op.list);
- zv->client_id = LOCAL_CLIENT;
- zv->rem_op.op = RAMSTER_INTRANSIT_PERS;
- zv->index = index;
- zv->oid = *oid;
- zv->pool_id = pool->pool_id;
- kunmap_atomic(zv);
-out:
- return zv;
-}
-
-static void zv_free(struct xv_pool *xvpool, struct zv_hdr *zv)
-{
- unsigned long flags;
- struct page *page;
- uint32_t offset;
- uint16_t size = xv_get_object_size(zv);
- int chunks = (size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
-
- ASSERT_SENTINEL(zv, ZVH);
- BUG_ON(chunks >= NCHUNKS);
- atomic_dec(&zv_curr_dist_counts[chunks]);
- size -= sizeof(*zv);
- spin_lock(&zcache_rem_op_list_lock);
- size = xv_get_object_size(zv) - sizeof(*zv);
- BUG_ON(size == 0);
- INVERT_SENTINEL(zv, ZVH);
- if (!list_empty(&zv->rem_op.list))
- list_del_init(&zv->rem_op.list);
- spin_unlock(&zcache_rem_op_list_lock);
- page = virt_to_page(zv);
- offset = (unsigned long)zv & ~PAGE_MASK;
- local_irq_save(flags);
- xv_free(xvpool, page, offset);
- local_irq_restore(flags);
-}
-
-static void zv_decompress(struct page *page, struct zv_hdr *zv)
-{
- size_t clen = PAGE_SIZE;
- char *to_va;
- unsigned size;
- int ret;
-
- ASSERT_SENTINEL(zv, ZVH);
- size = xv_get_object_size(zv) - sizeof(*zv);
- BUG_ON(size == 0);
- to_va = kmap_atomic(page);
- ret = lzo1x_decompress_safe((char *)zv + sizeof(*zv),
- size, to_va, &clen);
- kunmap_atomic(to_va);
- BUG_ON(ret != LZO_E_OK);
- BUG_ON(clen != PAGE_SIZE);
-}
-
-static void zv_copy_from_pampd(char *data, size_t *bufsize, struct zv_hdr *zv)
-{
- unsigned size;
-
- ASSERT_SENTINEL(zv, ZVH);
- size = xv_get_object_size(zv) - sizeof(*zv);
- BUG_ON(size == 0 || size > zv_max_page_size);
- BUG_ON(size > *bufsize);
- memcpy(data, (char *)zv + sizeof(*zv), size);
- *bufsize = size;
-}
-
-static void zv_copy_to_pampd(struct zv_hdr *zv, char *data, size_t size)
-{
- unsigned zv_size;
-
- ASSERT_SENTINEL(zv, ZVH);
- zv_size = xv_get_object_size(zv) - sizeof(*zv);
- BUG_ON(zv_size != size);
- BUG_ON(zv_size == 0 || zv_size > zv_max_page_size);
- memcpy((char *)zv + sizeof(*zv), data, size);
-}
-
-#ifdef CONFIG_SYSFS
/*
- * show a distribution of compression stats for zv pages.
+ * for now, used named slabs so can easily track usage; later can
+ * either just use kmalloc, or perhaps add a slab-like allocator
+ * to more carefully manage total memory utilization
*/
+static struct kmem_cache *zcache_objnode_cache;
+static struct kmem_cache *zcache_obj_cache;
-static int zv_curr_dist_counts_show(char *buf)
-{
- unsigned long i, n, chunks = 0, sum_total_chunks = 0;
- char *p = buf;
-
- for (i = 0; i < NCHUNKS; i++) {
- n = atomic_read(&zv_curr_dist_counts[i]);
- p += sprintf(p, "%lu ", n);
- chunks += n;
- sum_total_chunks += i * n;
- }
- p += sprintf(p, "mean:%lu\n",
- chunks == 0 ? 0 : sum_total_chunks / chunks);
- return p - buf;
-}
-
-static int zv_cumul_dist_counts_show(char *buf)
-{
- unsigned long i, n, chunks = 0, sum_total_chunks = 0;
- char *p = buf;
-
- for (i = 0; i < NCHUNKS; i++) {
- n = atomic_read(&zv_cumul_dist_counts[i]);
- p += sprintf(p, "%lu ", n);
- chunks += n;
- sum_total_chunks += i * n;
- }
- p += sprintf(p, "mean:%lu\n",
- chunks == 0 ? 0 : sum_total_chunks / chunks);
- return p - buf;
-}
+static DEFINE_PER_CPU(struct zcache_preload, zcache_preloads) = { 0, };
-/*
- * setting zv_max_zsize via sysfs causes all persistent (e.g. swap)
- * pages that don't compress to less than this value (including metadata
- * overhead) to be rejected. We don't allow the value to get too close
- * to PAGE_SIZE.
- */
-static ssize_t zv_max_zsize_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%u\n", zv_max_zsize);
+/* we try to keep these statistics SMP-consistent */
+static long zcache_obj_count;
+static atomic_t zcache_obj_atomic = ATOMIC_INIT(0);
+static long zcache_obj_count_max;
+static long zcache_objnode_count;
+static atomic_t zcache_objnode_atomic = ATOMIC_INIT(0);
+static long zcache_objnode_count_max;
+static u64 zcache_eph_zbytes;
+static atomic_long_t zcache_eph_zbytes_atomic = ATOMIC_INIT(0);
+static u64 zcache_eph_zbytes_max;
+static u64 zcache_pers_zbytes;
+static atomic_long_t zcache_pers_zbytes_atomic = ATOMIC_INIT(0);
+static u64 zcache_pers_zbytes_max;
+static long zcache_eph_pageframes;
+static atomic_t zcache_eph_pageframes_atomic = ATOMIC_INIT(0);
+static long zcache_eph_pageframes_max;
+static long zcache_pers_pageframes;
+static atomic_t zcache_pers_pageframes_atomic = ATOMIC_INIT(0);
+static long zcache_pers_pageframes_max;
+static long zcache_pageframes_alloced;
+static atomic_t zcache_pageframes_alloced_atomic = ATOMIC_INIT(0);
+static long zcache_pageframes_freed;
+static atomic_t zcache_pageframes_freed_atomic = ATOMIC_INIT(0);
+static long zcache_eph_zpages;
+static atomic_t zcache_eph_zpages_atomic = ATOMIC_INIT(0);
+static long zcache_eph_zpages_max;
+static long zcache_pers_zpages;
+static atomic_t zcache_pers_zpages_atomic = ATOMIC_INIT(0);
+static long zcache_pers_zpages_max;
+
+/* but for the rest of these, counting races are ok */
+static unsigned long zcache_flush_total;
+static unsigned long zcache_flush_found;
+static unsigned long zcache_flobj_total;
+static unsigned long zcache_flobj_found;
+static unsigned long zcache_failed_eph_puts;
+static unsigned long zcache_failed_pers_puts;
+static unsigned long zcache_failed_getfreepages;
+static unsigned long zcache_failed_alloc;
+static unsigned long zcache_put_to_flush;
+static unsigned long zcache_compress_poor;
+static unsigned long zcache_mean_compress_poor;
+static unsigned long zcache_eph_ate_tail;
+static unsigned long zcache_eph_ate_tail_failed;
+static unsigned long zcache_pers_ate_eph;
+static unsigned long zcache_pers_ate_eph_failed;
+static unsigned long zcache_evicted_eph_zpages;
+static unsigned long zcache_evicted_eph_pageframes;
+static unsigned long zcache_last_active_file_pageframes;
+static unsigned long zcache_last_inactive_file_pageframes;
+static unsigned long zcache_last_active_anon_pageframes;
+static unsigned long zcache_last_inactive_anon_pageframes;
+static unsigned long zcache_eph_nonactive_puts_ignored;
+static unsigned long zcache_pers_nonactive_puts_ignored;
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#define zdfs debugfs_create_size_t
+#define zdfs64 debugfs_create_u64
+static int zcache_debugfs_init(void)
+{
+ struct dentry *root = debugfs_create_dir("zcache", NULL);
+ if (root == NULL)
+ return -ENXIO;
+
+ zdfs("obj_count", S_IRUGO, root, &zcache_obj_count);
+ zdfs("obj_count_max", S_IRUGO, root, &zcache_obj_count_max);
+ zdfs("objnode_count", S_IRUGO, root, &zcache_objnode_count);
+ zdfs("objnode_count_max", S_IRUGO, root, &zcache_objnode_count_max);
+ zdfs("flush_total", S_IRUGO, root, &zcache_flush_total);
+ zdfs("flush_found", S_IRUGO, root, &zcache_flush_found);
+ zdfs("flobj_total", S_IRUGO, root, &zcache_flobj_total);
+ zdfs("flobj_found", S_IRUGO, root, &zcache_flobj_found);
+ zdfs("failed_eph_puts", S_IRUGO, root, &zcache_failed_eph_puts);
+ zdfs("failed_pers_puts", S_IRUGO, root, &zcache_failed_pers_puts);
+ zdfs("failed_get_free_pages", S_IRUGO, root,
+ &zcache_failed_getfreepages);
+ zdfs("failed_alloc", S_IRUGO, root, &zcache_failed_alloc);
+ zdfs("put_to_flush", S_IRUGO, root, &zcache_put_to_flush);
+ zdfs("compress_poor", S_IRUGO, root, &zcache_compress_poor);
+ zdfs("mean_compress_poor", S_IRUGO, root, &zcache_mean_compress_poor);
+ zdfs("eph_ate_tail", S_IRUGO, root, &zcache_eph_ate_tail);
+ zdfs("eph_ate_tail_failed", S_IRUGO, root, &zcache_eph_ate_tail_failed);
+ zdfs("pers_ate_eph", S_IRUGO, root, &zcache_pers_ate_eph);
+ zdfs("pers_ate_eph_failed", S_IRUGO, root, &zcache_pers_ate_eph_failed);
+ zdfs("evicted_eph_zpages", S_IRUGO, root, &zcache_evicted_eph_zpages);
+ zdfs("evicted_eph_pageframes", S_IRUGO, root,
+ &zcache_evicted_eph_pageframes);
+ zdfs("eph_pageframes", S_IRUGO, root, &zcache_eph_pageframes);
+ zdfs("eph_pageframes_max", S_IRUGO, root, &zcache_eph_pageframes_max);
+ zdfs("pers_pageframes", S_IRUGO, root, &zcache_pers_pageframes);
+ zdfs("pers_pageframes_max", S_IRUGO, root, &zcache_pers_pageframes_max);
+ zdfs("eph_zpages", S_IRUGO, root, &zcache_eph_zpages);
+ zdfs("eph_zpages_max", S_IRUGO, root, &zcache_eph_zpages_max);
+ zdfs("pers_zpages", S_IRUGO, root, &zcache_pers_zpages);
+ zdfs("pers_zpages_max", S_IRUGO, root, &zcache_pers_zpages_max);
+ zdfs("last_active_file_pageframes", S_IRUGO, root,
+ &zcache_last_active_file_pageframes);
+ zdfs("last_inactive_file_pageframes", S_IRUGO, root,
+ &zcache_last_inactive_file_pageframes);
+ zdfs("last_active_anon_pageframes", S_IRUGO, root,
+ &zcache_last_active_anon_pageframes);
+ zdfs("last_inactive_anon_pageframes", S_IRUGO, root,
+ &zcache_last_inactive_anon_pageframes);
+ zdfs("eph_nonactive_puts_ignored", S_IRUGO, root,
+ &zcache_eph_nonactive_puts_ignored);
+ zdfs("pers_nonactive_puts_ignored", S_IRUGO, root,
+ &zcache_pers_nonactive_puts_ignored);
+ zdfs64("eph_zbytes", S_IRUGO, root, &zcache_eph_zbytes);
+ zdfs64("eph_zbytes_max", S_IRUGO, root, &zcache_eph_zbytes_max);
+ zdfs64("pers_zbytes", S_IRUGO, root, &zcache_pers_zbytes);
+ zdfs64("pers_zbytes_max", S_IRUGO, root, &zcache_pers_zbytes_max);
+ return 0;
}
+#undef zdebugfs
+#undef zdfs64
+#endif
-static ssize_t zv_max_zsize_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t count)
-{
- unsigned long val;
- int err;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- err = kstrtoul(buf, 10, &val);
- if (err || (val == 0) || (val > (PAGE_SIZE / 8) * 7))
- return -EINVAL;
- zv_max_zsize = val;
- return count;
+#define ZCACHE_DEBUG
+#ifdef ZCACHE_DEBUG
+/* developers can call this in case of ooms, e.g. to find memory leaks */
+void zcache_dump(void)
+{
+ pr_info("zcache: obj_count=%lu\n", zcache_obj_count);
+ pr_info("zcache: obj_count_max=%lu\n", zcache_obj_count_max);
+ pr_info("zcache: objnode_count=%lu\n", zcache_objnode_count);
+ pr_info("zcache: objnode_count_max=%lu\n", zcache_objnode_count_max);
+ pr_info("zcache: flush_total=%lu\n", zcache_flush_total);
+ pr_info("zcache: flush_found=%lu\n", zcache_flush_found);
+ pr_info("zcache: flobj_total=%lu\n", zcache_flobj_total);
+ pr_info("zcache: flobj_found=%lu\n", zcache_flobj_found);
+ pr_info("zcache: failed_eph_puts=%lu\n", zcache_failed_eph_puts);
+ pr_info("zcache: failed_pers_puts=%lu\n", zcache_failed_pers_puts);
+ pr_info("zcache: failed_get_free_pages=%lu\n",
+ zcache_failed_getfreepages);
+ pr_info("zcache: failed_alloc=%lu\n", zcache_failed_alloc);
+ pr_info("zcache: put_to_flush=%lu\n", zcache_put_to_flush);
+ pr_info("zcache: compress_poor=%lu\n", zcache_compress_poor);
+ pr_info("zcache: mean_compress_poor=%lu\n",
+ zcache_mean_compress_poor);
+ pr_info("zcache: eph_ate_tail=%lu\n", zcache_eph_ate_tail);
+ pr_info("zcache: eph_ate_tail_failed=%lu\n",
+ zcache_eph_ate_tail_failed);
+ pr_info("zcache: pers_ate_eph=%lu\n", zcache_pers_ate_eph);
+ pr_info("zcache: pers_ate_eph_failed=%lu\n",
+ zcache_pers_ate_eph_failed);
+ pr_info("zcache: evicted_eph_zpages=%lu\n", zcache_evicted_eph_zpages);
+ pr_info("zcache: evicted_eph_pageframes=%lu\n",
+ zcache_evicted_eph_pageframes);
+ pr_info("zcache: eph_pageframes=%lu\n", zcache_eph_pageframes);
+ pr_info("zcache: eph_pageframes_max=%lu\n", zcache_eph_pageframes_max);
+ pr_info("zcache: pers_pageframes=%lu\n", zcache_pers_pageframes);
+ pr_info("zcache: pers_pageframes_max=%lu\n",
+ zcache_pers_pageframes_max);
+ pr_info("zcache: eph_zpages=%lu\n", zcache_eph_zpages);
+ pr_info("zcache: eph_zpages_max=%lu\n", zcache_eph_zpages_max);
+ pr_info("zcache: pers_zpages=%lu\n", zcache_pers_zpages);
+ pr_info("zcache: pers_zpages_max=%lu\n", zcache_pers_zpages_max);
+ pr_info("zcache: eph_zbytes=%llu\n",
+ (unsigned long long)zcache_eph_zbytes);
+ pr_info("zcache: eph_zbytes_max=%llu\n",
+ (unsigned long long)zcache_eph_zbytes_max);
+ pr_info("zcache: pers_zbytes=%llu\n",
+ (unsigned long long)zcache_pers_zbytes);
+ pr_info("zcache: pers_zbytes_max=%llu\n",
+ (unsigned long long)zcache_pers_zbytes_max);
}
+#endif
/*
- * setting zv_max_mean_zsize via sysfs causes all persistent (e.g. swap)
- * pages that don't compress to less than this value (including metadata
- * overhead) to be rejected UNLESS the mean compression is also smaller
- * than this value. In other words, we are load-balancing-by-zsize the
- * accepted pages. Again, we don't allow the value to get too close
- * to PAGE_SIZE.
+ * zcache core code starts here
*/
-static ssize_t zv_max_mean_zsize_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%u\n", zv_max_mean_zsize);
-}
-
-static ssize_t zv_max_mean_zsize_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t count)
-{
- unsigned long val;
- int err;
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- err = kstrtoul(buf, 10, &val);
- if (err || (val == 0) || (val > (PAGE_SIZE / 8) * 7))
- return -EINVAL;
- zv_max_mean_zsize = val;
- return count;
-}
+static struct zcache_client zcache_host;
+static struct zcache_client zcache_clients[MAX_CLIENTS];
-/*
- * setting zv_page_count_policy_percent via sysfs sets an upper bound of
- * persistent (e.g. swap) pages that will be retained according to:
- * (zv_page_count_policy_percent * totalram_pages) / 100)
- * when that limit is reached, further puts will be rejected (until
- * some pages have been flushed). Note that, due to compression,
- * this number may exceed 100; it defaults to 75 and we set an
- * arbitrary limit of 150. A poor choice will almost certainly result
- * in OOM's, so this value should only be changed prudently.
- */
-static ssize_t zv_page_count_policy_percent_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
+static inline bool is_local_client(struct zcache_client *cli)
{
- return sprintf(buf, "%u\n", zv_page_count_policy_percent);
+ return cli == &zcache_host;
}
-static ssize_t zv_page_count_policy_percent_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t count)
+static struct zcache_client *zcache_get_client_by_id(uint16_t cli_id)
{
- unsigned long val;
- int err;
+ struct zcache_client *cli = &zcache_host;
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- err = kstrtoul(buf, 10, &val);
- if (err || (val == 0) || (val > 150))
- return -EINVAL;
- zv_page_count_policy_percent = val;
- return count;
+ if (cli_id != LOCAL_CLIENT) {
+ if (cli_id >= MAX_CLIENTS)
+ goto out;
+ cli = &zcache_clients[cli_id];
+ }
+out:
+ return cli;
}
-static struct kobj_attribute zcache_zv_max_zsize_attr = {
- .attr = { .name = "zv_max_zsize", .mode = 0644 },
- .show = zv_max_zsize_show,
- .store = zv_max_zsize_store,
-};
-
-static struct kobj_attribute zcache_zv_max_mean_zsize_attr = {
- .attr = { .name = "zv_max_mean_zsize", .mode = 0644 },
- .show = zv_max_mean_zsize_show,
- .store = zv_max_mean_zsize_store,
-};
-
-static struct kobj_attribute zcache_zv_page_count_policy_percent_attr = {
- .attr = { .name = "zv_page_count_policy_percent",
- .mode = 0644 },
- .show = zv_page_count_policy_percent_show,
- .store = zv_page_count_policy_percent_store,
-};
-#endif
-
-/*
- * zcache core code starts here
- */
-
-/* useful stats not collected by cleancache or frontswap */
-static unsigned long zcache_flush_total;
-static unsigned long zcache_flush_found;
-static unsigned long zcache_flobj_total;
-static unsigned long zcache_flobj_found;
-static unsigned long zcache_failed_eph_puts;
-static unsigned long zcache_nonactive_puts;
-static unsigned long zcache_failed_pers_puts;
-
/*
* Tmem operations assume the poolid implies the invoking client.
* Zcache only has one client (the kernel itself): LOCAL_CLIENT.
@@ -1398,21 +328,16 @@ static unsigned long zcache_failed_pers_puts;
* of zcache would have one client per guest and each client might
* have a poolid==N.
*/
-static struct tmem_pool *zcache_get_pool_by_id(uint16_t cli_id, uint16_t poolid)
+struct tmem_pool *zcache_get_pool_by_id(uint16_t cli_id, uint16_t poolid)
{
struct tmem_pool *pool = NULL;
struct zcache_client *cli = NULL;
- if (cli_id == LOCAL_CLIENT)
- cli = &zcache_host;
- else {
- if (cli_id >= MAX_CLIENTS)
- goto out;
- cli = &zcache_clients[cli_id];
- if (cli == NULL)
- goto out;
+ cli = zcache_get_client_by_id(cli_id);
+ if (cli == NULL)
+ goto out;
+ if (!is_local_client(cli))
atomic_inc(&cli->refcount);
- }
if (poolid < MAX_POOLS_PER_CLIENT) {
pool = cli->tmem_pools[poolid];
if (pool != NULL)
@@ -1422,7 +347,7 @@ out:
return pool;
}
-static void zcache_put_pool(struct tmem_pool *pool)
+void zcache_put_pool(struct tmem_pool *pool)
{
struct zcache_client *cli = NULL;
@@ -1430,173 +355,26 @@ static void zcache_put_pool(struct tmem_pool *pool)
BUG();
cli = pool->client;
atomic_dec(&pool->refcount);
- atomic_dec(&cli->refcount);
+ if (!is_local_client(cli))
+ atomic_dec(&cli->refcount);
}
int zcache_new_client(uint16_t cli_id)
{
- struct zcache_client *cli = NULL;
+ struct zcache_client *cli;
int ret = -1;
- if (cli_id == LOCAL_CLIENT)
- cli = &zcache_host;
- else if ((unsigned int)cli_id < MAX_CLIENTS)
- cli = &zcache_clients[cli_id];
+ cli = zcache_get_client_by_id(cli_id);
if (cli == NULL)
goto out;
if (cli->allocated)
goto out;
cli->allocated = 1;
-#ifdef CONFIG_FRONTSWAP
- cli->xvpool = xv_create_pool();
- if (cli->xvpool == NULL)
- goto out;
-#endif
- ret = 0;
-out:
- return ret;
-}
-
-/* counters for debugging */
-static unsigned long zcache_failed_get_free_pages;
-static unsigned long zcache_failed_alloc;
-static unsigned long zcache_put_to_flush;
-
-/*
- * for now, used named slabs so can easily track usage; later can
- * either just use kmalloc, or perhaps add a slab-like allocator
- * to more carefully manage total memory utilization
- */
-static struct kmem_cache *zcache_objnode_cache;
-static struct kmem_cache *zcache_obj_cache;
-static struct kmem_cache *ramster_flnode_cache;
-static atomic_t zcache_curr_obj_count = ATOMIC_INIT(0);
-static unsigned long zcache_curr_obj_count_max;
-static atomic_t zcache_curr_objnode_count = ATOMIC_INIT(0);
-static unsigned long zcache_curr_objnode_count_max;
-
-/*
- * to avoid memory allocation recursion (e.g. due to direct reclaim), we
- * preload all necessary data structures so the hostops callbacks never
- * actually do a malloc
- */
-struct zcache_preload {
- void *page;
- struct tmem_obj *obj;
- int nr;
- struct tmem_objnode *objnodes[OBJNODE_TREE_MAX_PATH];
- struct flushlist_node *flnode;
-};
-static DEFINE_PER_CPU(struct zcache_preload, zcache_preloads) = { 0, };
-
-static int zcache_do_preload(struct tmem_pool *pool)
-{
- struct zcache_preload *kp;
- struct tmem_objnode *objnode;
- struct tmem_obj *obj;
- struct flushlist_node *flnode;
- void *page;
- int ret = -ENOMEM;
-
- if (unlikely(zcache_objnode_cache == NULL))
- goto out;
- if (unlikely(zcache_obj_cache == NULL))
- goto out;
- preempt_disable();
- kp = &__get_cpu_var(zcache_preloads);
- while (kp->nr < ARRAY_SIZE(kp->objnodes)) {
- preempt_enable_no_resched();
- objnode = kmem_cache_alloc(zcache_objnode_cache,
- ZCACHE_GFP_MASK);
- if (unlikely(objnode == NULL)) {
- zcache_failed_alloc++;
- goto out;
- }
- preempt_disable();
- kp = &__get_cpu_var(zcache_preloads);
- if (kp->nr < ARRAY_SIZE(kp->objnodes))
- kp->objnodes[kp->nr++] = objnode;
- else
- kmem_cache_free(zcache_objnode_cache, objnode);
- }
- preempt_enable_no_resched();
- obj = kmem_cache_alloc(zcache_obj_cache, ZCACHE_GFP_MASK);
- if (unlikely(obj == NULL)) {
- zcache_failed_alloc++;
- goto out;
- }
- flnode = kmem_cache_alloc(ramster_flnode_cache, ZCACHE_GFP_MASK);
- if (unlikely(flnode == NULL)) {
- zcache_failed_alloc++;
- goto out;
- }
- if (is_ephemeral(pool)) {
- page = (void *)__get_free_page(ZCACHE_GFP_MASK);
- if (unlikely(page == NULL)) {
- zcache_failed_get_free_pages++;
- kmem_cache_free(zcache_obj_cache, obj);
- kmem_cache_free(ramster_flnode_cache, flnode);
- goto out;
- }
- }
- preempt_disable();
- kp = &__get_cpu_var(zcache_preloads);
- if (kp->obj == NULL)
- kp->obj = obj;
- else
- kmem_cache_free(zcache_obj_cache, obj);
- if (kp->flnode == NULL)
- kp->flnode = flnode;
- else
- kmem_cache_free(ramster_flnode_cache, flnode);
- if (is_ephemeral(pool)) {
- if (kp->page == NULL)
- kp->page = page;
- else
- free_page((unsigned long)page);
- }
ret = 0;
out:
return ret;
}
-static int ramster_do_preload_flnode_only(struct tmem_pool *pool)
-{
- struct zcache_preload *kp;
- struct flushlist_node *flnode;
- int ret = -ENOMEM;
-
- BUG_ON(!irqs_disabled());
- if (unlikely(ramster_flnode_cache == NULL))
- BUG();
- kp = &__get_cpu_var(zcache_preloads);
- flnode = kmem_cache_alloc(ramster_flnode_cache, GFP_ATOMIC);
- if (unlikely(flnode == NULL) && kp->flnode == NULL)
- BUG(); /* FIXME handle more gracefully, but how??? */
- else if (kp->flnode == NULL)
- kp->flnode = flnode;
- else
- kmem_cache_free(ramster_flnode_cache, flnode);
- return ret;
-}
-
-static void *zcache_get_free_page(void)
-{
- struct zcache_preload *kp;
- void *page;
-
- kp = &__get_cpu_var(zcache_preloads);
- page = kp->page;
- BUG_ON(page == NULL);
- kp->page = NULL;
- return page;
-}
-
-static void zcache_free_page(void *p)
-{
- free_page((unsigned long)p);
-}
-
/*
* zcache implementation for tmem host ops
*/
@@ -1604,78 +382,56 @@ static void zcache_free_page(void *p)
static struct tmem_objnode *zcache_objnode_alloc(struct tmem_pool *pool)
{
struct tmem_objnode *objnode = NULL;
- unsigned long count;
struct zcache_preload *kp;
+ int i;
kp = &__get_cpu_var(zcache_preloads);
- if (kp->nr <= 0)
- goto out;
- objnode = kp->objnodes[kp->nr - 1];
+ for (i = 0; i < ARRAY_SIZE(kp->objnodes); i++) {
+ objnode = kp->objnodes[i];
+ if (objnode != NULL) {
+ kp->objnodes[i] = NULL;
+ break;
+ }
+ }
BUG_ON(objnode == NULL);
- kp->objnodes[kp->nr - 1] = NULL;
- kp->nr--;
- count = atomic_inc_return(&zcache_curr_objnode_count);
- if (count > zcache_curr_objnode_count_max)
- zcache_curr_objnode_count_max = count;
-out:
+ zcache_objnode_count = atomic_inc_return(&zcache_objnode_atomic);
+ if (zcache_objnode_count > zcache_objnode_count_max)
+ zcache_objnode_count_max = zcache_objnode_count;
return objnode;
}
static void zcache_objnode_free(struct tmem_objnode *objnode,
struct tmem_pool *pool)
{
- atomic_dec(&zcache_curr_objnode_count);
- BUG_ON(atomic_read(&zcache_curr_objnode_count) < 0);
+ zcache_objnode_count =
+ atomic_dec_return(&zcache_objnode_atomic);
+ BUG_ON(zcache_objnode_count < 0);
kmem_cache_free(zcache_objnode_cache, objnode);
}
static struct tmem_obj *zcache_obj_alloc(struct tmem_pool *pool)
{
struct tmem_obj *obj = NULL;
- unsigned long count;
struct zcache_preload *kp;
kp = &__get_cpu_var(zcache_preloads);
obj = kp->obj;
BUG_ON(obj == NULL);
kp->obj = NULL;
- count = atomic_inc_return(&zcache_curr_obj_count);
- if (count > zcache_curr_obj_count_max)
- zcache_curr_obj_count_max = count;
+ zcache_obj_count = atomic_inc_return(&zcache_obj_atomic);
+ if (zcache_obj_count > zcache_obj_count_max)
+ zcache_obj_count_max = zcache_obj_count;
return obj;
}
static void zcache_obj_free(struct tmem_obj *obj, struct tmem_pool *pool)
{
- atomic_dec(&zcache_curr_obj_count);
- BUG_ON(atomic_read(&zcache_curr_obj_count) < 0);
+ zcache_obj_count =
+ atomic_dec_return(&zcache_obj_atomic);
+ BUG_ON(zcache_obj_count < 0);
kmem_cache_free(zcache_obj_cache, obj);
}
-static struct flushlist_node *ramster_flnode_alloc(struct tmem_pool *pool)
-{
- struct flushlist_node *flnode = NULL;
- struct zcache_preload *kp;
- int count;
-
- kp = &__get_cpu_var(zcache_preloads);
- flnode = kp->flnode;
- BUG_ON(flnode == NULL);
- kp->flnode = NULL;
- count = atomic_inc_return(&ramster_curr_flnode_count);
- if (count > ramster_curr_flnode_count_max)
- ramster_curr_flnode_count_max = count;
- return flnode;
-}
-
-static void ramster_flnode_free(struct flushlist_node *flnode,
- struct tmem_pool *pool)
-{
- atomic_dec(&ramster_curr_flnode_count);
- BUG_ON(atomic_read(&ramster_curr_flnode_count) < 0);
- kmem_cache_free(ramster_flnode_cache, flnode);
-}
-
static struct tmem_hostops zcache_hostops = {
.obj_alloc = zcache_obj_alloc,
.obj_free = zcache_obj_free,
@@ -1683,220 +439,363 @@ static struct tmem_hostops zcache_hostops = {
.objnode_free = zcache_objnode_free,
};
-/*
- * zcache implementations for PAM page descriptor ops
- */
+static struct page *zcache_alloc_page(void)
+{
+ struct page *page = alloc_page(ZCACHE_GFP_MASK);
+ if (page != NULL)
+ zcache_pageframes_alloced =
+ atomic_inc_return(&zcache_pageframes_alloced_atomic);
+ return page;
+}
-static inline void dec_and_check(atomic_t *pvar)
+#ifdef FRONTSWAP_HAS_UNUSE
+static void zcache_unacct_page(void)
{
- atomic_dec(pvar);
- /* later when all accounting is fixed, make this a BUG */
- WARN_ON_ONCE(atomic_read(pvar) < 0);
+ zcache_pageframes_freed =
+ atomic_inc_return(&zcache_pageframes_freed_atomic);
}
+#endif
+
+static void zcache_free_page(struct page *page)
+{
+ long curr_pageframes;
+ static long max_pageframes, min_pageframes;
-static atomic_t zcache_curr_eph_pampd_count = ATOMIC_INIT(0);
-static unsigned long zcache_curr_eph_pampd_count_max;
-static atomic_t zcache_curr_pers_pampd_count = ATOMIC_INIT(0);
-static unsigned long zcache_curr_pers_pampd_count_max;
+ if (page == NULL)
+ BUG();
+ __free_page(page);
+ zcache_pageframes_freed =
+ atomic_inc_return(&zcache_pageframes_freed_atomic);
+ curr_pageframes = zcache_pageframes_alloced -
+ atomic_read(&zcache_pageframes_freed_atomic) -
+ atomic_read(&zcache_eph_pageframes_atomic) -
+ atomic_read(&zcache_pers_pageframes_atomic);
+ if (curr_pageframes > max_pageframes)
+ max_pageframes = curr_pageframes;
+ if (curr_pageframes < min_pageframes)
+ min_pageframes = curr_pageframes;
+#ifdef ZCACHE_DEBUG
+ if (curr_pageframes > 2L || curr_pageframes < -2L) {
+ /* pr_info here */
+ }
+#endif
+}
+
+/*
+ * zcache implementations for PAM page descriptor ops
+ */
/* forward reference */
-static int zcache_compress(struct page *from, void **out_va, size_t *out_len);
+static void zcache_compress(struct page *from,
+ void **out_va, unsigned *out_len);
-static int zcache_pampd_eph_create(char *data, size_t size, bool raw,
- struct tmem_pool *pool, struct tmem_oid *oid,
- uint32_t index, void **pampd)
+static struct page *zcache_evict_eph_pageframe(void);
+
+static void *zcache_pampd_eph_create(char *data, size_t size, bool raw,
+ struct tmem_handle *th)
{
- int ret = -1;
- void *cdata = data;
- size_t clen = size;
- struct zcache_client *cli = pool->client;
- uint16_t client_id = get_client_id_from_client(cli);
- struct page *page = NULL;
- unsigned long count;
+ void *pampd = NULL, *cdata = data;
+ unsigned clen = size;
+ struct page *page = (struct page *)(data), *newpage;
if (!raw) {
- page = virt_to_page(data);
- ret = zcache_compress(page, &cdata, &clen);
- if (ret == 0)
- goto out;
- if (clen == 0 || clen > zbud_max_buddy_size()) {
+ zcache_compress(page, &cdata, &clen);
+ if (clen > zbud_max_buddy_size()) {
zcache_compress_poor++;
goto out;
}
+ } else {
+ BUG_ON(clen > zbud_max_buddy_size());
}
- *pampd = (void *)zbud_create(client_id, pool->pool_id, oid,
- index, page, cdata, clen);
- if (*pampd == NULL) {
- ret = -ENOMEM;
+
+ /* look for space via an existing match first */
+ pampd = (void *)zbud_match_prep(th, true, cdata, clen);
+ if (pampd != NULL)
+ goto got_pampd;
+
+ /* no match, now we need to find (or free up) a full page */
+ newpage = zcache_alloc_page();
+ if (newpage != NULL)
+ goto create_in_new_page;
+
+ zcache_failed_getfreepages++;
+ /* can't allocate a page, evict an ephemeral page via LRU */
+ newpage = zcache_evict_eph_pageframe();
+ if (newpage == NULL) {
+ zcache_eph_ate_tail_failed++;
goto out;
}
- ret = 0;
- count = atomic_inc_return(&zcache_curr_eph_pampd_count);
- if (count > zcache_curr_eph_pampd_count_max)
- zcache_curr_eph_pampd_count_max = count;
- if (client_id != LOCAL_CLIENT) {
- count = atomic_inc_return(&ramster_foreign_eph_pampd_count);
- if (count > ramster_foreign_eph_pampd_count_max)
- ramster_foreign_eph_pampd_count_max = count;
- }
+ zcache_eph_ate_tail++;
+
+create_in_new_page:
+ pampd = (void *)zbud_create_prep(th, true, cdata, clen, newpage);
+ BUG_ON(pampd == NULL);
+ zcache_eph_pageframes =
+ atomic_inc_return(&zcache_eph_pageframes_atomic);
+ if (zcache_eph_pageframes > zcache_eph_pageframes_max)
+ zcache_eph_pageframes_max = zcache_eph_pageframes;
+
+got_pampd:
+ zcache_eph_zbytes =
+ atomic_long_add_return(clen, &zcache_eph_zbytes_atomic);
+ if (zcache_eph_zbytes > zcache_eph_zbytes_max)
+ zcache_eph_zbytes_max = zcache_eph_zbytes;
+ zcache_eph_zpages = atomic_inc_return(&zcache_eph_zpages_atomic);
+ if (zcache_eph_zpages > zcache_eph_zpages_max)
+ zcache_eph_zpages_max = zcache_eph_zpages;
+ if (ramster_enabled && raw)
+ ramster_count_foreign_pages(true, 1);
out:
- return ret;
+ return pampd;
}
-static int zcache_pampd_pers_create(char *data, size_t size, bool raw,
- struct tmem_pool *pool, struct tmem_oid *oid,
- uint32_t index, void **pampd)
+static void *zcache_pampd_pers_create(char *data, size_t size, bool raw,
+ struct tmem_handle *th)
{
- int ret = -1;
- void *cdata = data;
- size_t clen = size;
- struct zcache_client *cli = pool->client;
- struct page *page;
- unsigned long count;
- unsigned long zv_mean_zsize;
- struct zv_hdr *zv;
- long curr_pers_pampd_count;
- u64 total_zsize;
-#ifdef RAMSTER_TESTING
- static bool pampd_neg_warned;
-#endif
+ void *pampd = NULL, *cdata = data;
+ unsigned clen = size;
+ struct page *page = (struct page *)(data), *newpage;
+ unsigned long zbud_mean_zsize;
+ unsigned long curr_pers_zpages, total_zsize;
- curr_pers_pampd_count = atomic_read(&zcache_curr_pers_pampd_count) -
- atomic_read(&ramster_remote_pers_pages);
-#ifdef RAMSTER_TESTING
- /* should always be positive, but warn if accounting is off */
- if (!pampd_neg_warned) {
- pr_warn("ramster: bad accounting for curr_pers_pampd_count\n");
- pampd_neg_warned = true;
+ if (data == NULL) {
+ BUG_ON(!ramster_enabled);
+ goto create_pampd;
}
-#endif
- if (curr_pers_pampd_count >
- (zv_page_count_policy_percent * totalram_pages) / 100) {
- zcache_policy_percent_exceeded++;
- goto out;
- }
- if (raw)
- goto ok_to_create;
- page = virt_to_page(data);
- if (zcache_compress(page, &cdata, &clen) == 0)
- goto out;
+ curr_pers_zpages = zcache_pers_zpages;
+/* FIXME CONFIG_RAMSTER... subtract atomic remote_pers_pages here? */
+ if (!raw)
+ zcache_compress(page, &cdata, &clen);
/* reject if compression is too poor */
- if (clen > zv_max_zsize) {
+ if (clen > zbud_max_zsize) {
zcache_compress_poor++;
goto out;
}
/* reject if mean compression is too poor */
- if ((clen > zv_max_mean_zsize) && (curr_pers_pampd_count > 0)) {
- total_zsize = xv_get_total_size_bytes(cli->xvpool);
- zv_mean_zsize = div_u64(total_zsize, curr_pers_pampd_count);
- if (zv_mean_zsize > zv_max_mean_zsize) {
+ if ((clen > zbud_max_mean_zsize) && (curr_pers_zpages > 0)) {
+ total_zsize = zcache_pers_zbytes;
+ if ((long)total_zsize < 0)
+ total_zsize = 0;
+ zbud_mean_zsize = div_u64(total_zsize,
+ curr_pers_zpages);
+ if (zbud_mean_zsize > zbud_max_mean_zsize) {
zcache_mean_compress_poor++;
goto out;
}
}
-ok_to_create:
- *pampd = (void *)zv_create(cli, pool->pool_id, oid, index, cdata, clen);
- if (*pampd == NULL) {
- ret = -ENOMEM;
+
+create_pampd:
+ /* look for space via an existing match first */
+ pampd = (void *)zbud_match_prep(th, false, cdata, clen);
+ if (pampd != NULL)
+ goto got_pampd;
+
+ /* no match, now we need to find (or free up) a full page */
+ newpage = zcache_alloc_page();
+ if (newpage != NULL)
+ goto create_in_new_page;
+ /*
+ * FIXME do the following only if eph is oversized?
+ * if (zcache_eph_pageframes >
+ * (global_page_state(NR_LRU_BASE + LRU_ACTIVE_FILE) +
+ * global_page_state(NR_LRU_BASE + LRU_INACTIVE_FILE)))
+ */
+ zcache_failed_getfreepages++;
+ /* can't allocate a page, evict an ephemeral page via LRU */
+ newpage = zcache_evict_eph_pageframe();
+ if (newpage == NULL) {
+ zcache_pers_ate_eph_failed++;
goto out;
}
- ret = 0;
- count = atomic_inc_return(&zcache_curr_pers_pampd_count);
- if (count > zcache_curr_pers_pampd_count_max)
- zcache_curr_pers_pampd_count_max = count;
- if (is_local_client(cli))
- goto out;
- zv = *(struct zv_hdr **)pampd;
- count = atomic_inc_return(&ramster_foreign_pers_pampd_count);
- if (count > ramster_foreign_pers_pampd_count_max)
- ramster_foreign_pers_pampd_count_max = count;
+ zcache_pers_ate_eph++;
+
+create_in_new_page:
+ pampd = (void *)zbud_create_prep(th, false, cdata, clen, newpage);
+ BUG_ON(pampd == NULL);
+ zcache_pers_pageframes =
+ atomic_inc_return(&zcache_pers_pageframes_atomic);
+ if (zcache_pers_pageframes > zcache_pers_pageframes_max)
+ zcache_pers_pageframes_max = zcache_pers_pageframes;
+
+got_pampd:
+ zcache_pers_zpages = atomic_inc_return(&zcache_pers_zpages_atomic);
+ if (zcache_pers_zpages > zcache_pers_zpages_max)
+ zcache_pers_zpages_max = zcache_pers_zpages;
+ zcache_pers_zbytes =
+ atomic_long_add_return(clen, &zcache_pers_zbytes_atomic);
+ if (zcache_pers_zbytes > zcache_pers_zbytes_max)
+ zcache_pers_zbytes_max = zcache_pers_zbytes;
+ if (ramster_enabled && raw)
+ ramster_count_foreign_pages(false, 1);
out:
- return ret;
+ return pampd;
}
-static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph,
- struct tmem_pool *pool, struct tmem_oid *oid,
- uint32_t index)
+/*
+ * This is called directly from zcache_put_page to pre-allocate space
+ * to store a zpage.
+ */
+void *zcache_pampd_create(char *data, unsigned int size, bool raw,
+ int eph, struct tmem_handle *th)
{
void *pampd = NULL;
- int ret;
- bool ephemeral;
+ struct zcache_preload *kp;
+ struct tmem_objnode *objnode;
+ struct tmem_obj *obj;
+ int i;
- BUG_ON(preemptible());
- ephemeral = (eph == 1) || ((eph == 0) && is_ephemeral(pool));
- if (ephemeral)
- ret = zcache_pampd_eph_create(data, size, raw, pool,
- oid, index, &pampd);
+ BUG_ON(!irqs_disabled());
+ /* pre-allocate per-cpu metadata */
+ BUG_ON(zcache_objnode_cache == NULL);
+ BUG_ON(zcache_obj_cache == NULL);
+ kp = &__get_cpu_var(zcache_preloads);
+ for (i = 0; i < ARRAY_SIZE(kp->objnodes); i++) {
+ objnode = kp->objnodes[i];
+ if (objnode == NULL) {
+ objnode = kmem_cache_alloc(zcache_objnode_cache,
+ ZCACHE_GFP_MASK);
+ if (unlikely(objnode == NULL)) {
+ zcache_failed_alloc++;
+ goto out;
+ }
+ kp->objnodes[i] = objnode;
+ }
+ }
+ if (kp->obj == NULL) {
+ obj = kmem_cache_alloc(zcache_obj_cache, ZCACHE_GFP_MASK);
+ kp->obj = obj;
+ }
+ if (unlikely(kp->obj == NULL)) {
+ zcache_failed_alloc++;
+ goto out;
+ }
+ /*
+ * ok, have all the metadata pre-allocated, now do the data
+ * but since how we allocate the data is dependent on ephemeral
+ * or persistent, we split the call here to different sub-functions
+ */
+ if (eph)
+ pampd = zcache_pampd_eph_create(data, size, raw, th);
else
- ret = zcache_pampd_pers_create(data, size, raw, pool,
- oid, index, &pampd);
- /* FIXME add some counters here for failed creates? */
+ pampd = zcache_pampd_pers_create(data, size, raw, th);
+out:
return pampd;
}
/*
+ * This is a pamops called via tmem_put and is necessary to "finish"
+ * a pampd creation.
+ */
+void zcache_pampd_create_finish(void *pampd, bool eph)
+{
+ zbud_create_finish((struct zbudref *)pampd, eph);
+}
+
+/*
+ * This is passed as a function parameter to zbud_decompress so that
+ * zbud need not be familiar with the details of crypto. It assumes that
+ * the bytes from_va and to_va through from_va+size-1 and to_va+size-1 are
+ * kmapped. It must be successful, else there is a logic bug somewhere.
+ */
+static void zcache_decompress(char *from_va, unsigned int size, char *to_va)
+{
+ int ret;
+ unsigned int outlen = PAGE_SIZE;
+
+ ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, from_va, size,
+ to_va, &outlen);
+ BUG_ON(ret);
+ BUG_ON(outlen != PAGE_SIZE);
+}
+
+/*
+ * Decompress from the kernel va to a pageframe
+ */
+void zcache_decompress_to_page(char *from_va, unsigned int size,
+ struct page *to_page)
+{
+ char *to_va = kmap_atomic(to_page);
+ zcache_decompress(from_va, size, to_va);
+ kunmap_atomic(to_va);
+}
+
+/*
* fill the pageframe corresponding to the struct page with the data
* from the passed pampd
*/
-static int zcache_pampd_get_data(char *data, size_t *bufsize, bool raw,
+static int zcache_pampd_get_data(char *data, size_t *sizep, bool raw,
void *pampd, struct tmem_pool *pool,
struct tmem_oid *oid, uint32_t index)
{
- int ret = 0;
+ int ret;
+ bool eph = !is_persistent(pool);
BUG_ON(preemptible());
- BUG_ON(is_ephemeral(pool)); /* Fix later for shared pools? */
+ BUG_ON(eph); /* fix later if shared pools get implemented */
BUG_ON(pampd_is_remote(pampd));
if (raw)
- zv_copy_from_pampd(data, bufsize, pampd);
- else
- zv_decompress(virt_to_page(data), pampd);
+ ret = zbud_copy_from_zbud(data, (struct zbudref *)pampd,
+ sizep, eph);
+ else {
+ ret = zbud_decompress((struct page *)(data),
+ (struct zbudref *)pampd, false,
+ zcache_decompress);
+ *sizep = PAGE_SIZE;
+ }
return ret;
}
-static int zcache_pampd_get_data_and_free(char *data, size_t *bufsize, bool raw,
+/*
+ * fill the pageframe corresponding to the struct page with the data
+ * from the passed pampd
+ */
+static int zcache_pampd_get_data_and_free(char *data, size_t *sizep, bool raw,
void *pampd, struct tmem_pool *pool,
struct tmem_oid *oid, uint32_t index)
{
- int ret = 0;
- unsigned long flags;
- struct zcache_client *cli = pool->client;
+ int ret;
+ bool eph = !is_persistent(pool);
+ struct page *page = NULL;
+ unsigned int zsize, zpages;
BUG_ON(preemptible());
BUG_ON(pampd_is_remote(pampd));
- if (is_ephemeral(pool)) {
- local_irq_save(flags);
- if (raw)
- zbud_copy_from_pampd(data, bufsize, pampd);
- else
- ret = zbud_decompress(virt_to_page(data), pampd);
- zbud_free_and_delist((struct zbud_hdr *)pampd);
- local_irq_restore(flags);
- if (!is_local_client(cli))
- dec_and_check(&ramster_foreign_eph_pampd_count);
- dec_and_check(&zcache_curr_eph_pampd_count);
+ if (raw)
+ ret = zbud_copy_from_zbud(data, (struct zbudref *)pampd,
+ sizep, eph);
+ else {
+ ret = zbud_decompress((struct page *)(data),
+ (struct zbudref *)pampd, eph,
+ zcache_decompress);
+ *sizep = PAGE_SIZE;
+ }
+ page = zbud_free_and_delist((struct zbudref *)pampd, eph,
+ &zsize, &zpages);
+ if (eph) {
+ if (page)
+ zcache_eph_pageframes =
+ atomic_dec_return(&zcache_eph_pageframes_atomic);
+ zcache_eph_zpages =
+ atomic_sub_return(zpages, &zcache_eph_zpages_atomic);
+ zcache_eph_zbytes =
+ atomic_long_sub_return(zsize, &zcache_eph_zbytes_atomic);
} else {
- if (is_local_client(cli))
- BUG();
- if (raw)
- zv_copy_from_pampd(data, bufsize, pampd);
- else
- zv_decompress(virt_to_page(data), pampd);
- zv_free(cli->xvpool, pampd);
- if (!is_local_client(cli))
- dec_and_check(&ramster_foreign_pers_pampd_count);
- dec_and_check(&zcache_curr_pers_pampd_count);
- ret = 0;
- }
+ if (page)
+ zcache_pers_pageframes =
+ atomic_dec_return(&zcache_pers_pageframes_atomic);
+ zcache_pers_zpages =
+ atomic_sub_return(zpages, &zcache_pers_zpages_atomic);
+ zcache_pers_zbytes =
+ atomic_long_sub_return(zsize, &zcache_pers_zbytes_atomic);
+ }
+ if (!is_local_client(pool->client))
+ ramster_count_foreign_pages(eph, -1);
+ if (page)
+ zcache_free_page(page);
return ret;
}
-static bool zcache_pampd_is_remote(void *pampd)
-{
- return pampd_is_remote(pampd);
-}
-
/*
* free the pampd and remove it from any zcache lists
* pampd must no longer be pointed to from any tmem data structures!
@@ -1904,362 +803,134 @@ static bool zcache_pampd_is_remote(void *pampd)
static void zcache_pampd_free(void *pampd, struct tmem_pool *pool,
struct tmem_oid *oid, uint32_t index, bool acct)
{
- struct zcache_client *cli = pool->client;
- bool eph = is_ephemeral(pool);
- struct zv_hdr *zv;
+ struct page *page = NULL;
+ unsigned int zsize, zpages;
BUG_ON(preemptible());
if (pampd_is_remote(pampd)) {
- WARN_ON(acct == false);
- if (oid == NULL) {
- /*
- * a NULL oid means to ignore this pampd free
- * as the remote freeing will be handled elsewhere
- */
- } else if (eph) {
- /* FIXME remote flush optional but probably good idea */
- /* FIXME get these working properly again */
- dec_and_check(&zcache_curr_eph_pampd_count);
- } else if (pampd_is_intransit(pampd)) {
- /* did a pers remote get_and_free, so just free local */
- pampd = pampd_mask_intransit_and_remote(pampd);
- goto local_pers;
- } else {
- struct flushlist_node *flnode =
- ramster_flnode_alloc(pool);
-
- flnode->xh.client_id = pampd_remote_node(pampd);
- flnode->xh.pool_id = pool->pool_id;
- flnode->xh.oid = *oid;
- flnode->xh.index = index;
- flnode->rem_op.op = RAMSTER_REMOTIFY_FLUSH_PAGE;
- spin_lock(&zcache_rem_op_list_lock);
- list_add(&flnode->rem_op.list, &zcache_rem_op_list);
- spin_unlock(&zcache_rem_op_list_lock);
- dec_and_check(&zcache_curr_pers_pampd_count);
- dec_and_check(&ramster_remote_pers_pages);
- }
- } else if (eph) {
- zbud_free_and_delist((struct zbud_hdr *)pampd);
- if (!is_local_client(pool->client))
- dec_and_check(&ramster_foreign_eph_pampd_count);
- if (acct)
- /* FIXME get these working properly again */
- dec_and_check(&zcache_curr_eph_pampd_count);
- } else {
-local_pers:
- zv = (struct zv_hdr *)pampd;
- if (!is_local_client(pool->client))
- dec_and_check(&ramster_foreign_pers_pampd_count);
- zv_free(cli->xvpool, zv);
- if (acct)
- /* FIXME get these working properly again */
- dec_and_check(&zcache_curr_pers_pampd_count);
+ BUG_ON(!ramster_enabled);
+ pampd = ramster_pampd_free(pampd, pool, oid, index, acct);
+ if (pampd == NULL)
+ return;
}
-}
-
-static void zcache_pampd_free_obj(struct tmem_pool *pool,
- struct tmem_obj *obj)
-{
- struct flushlist_node *flnode;
-
- BUG_ON(preemptible());
- if (obj->extra == NULL)
- return;
- BUG_ON(!pampd_is_remote(obj->extra));
- flnode = ramster_flnode_alloc(pool);
- flnode->xh.client_id = pampd_remote_node(obj->extra);
- flnode->xh.pool_id = pool->pool_id;
- flnode->xh.oid = obj->oid;
- flnode->xh.index = FLUSH_ENTIRE_OBJECT;
- flnode->rem_op.op = RAMSTER_REMOTIFY_FLUSH_OBJ;
- spin_lock(&zcache_rem_op_list_lock);
- list_add(&flnode->rem_op.list, &zcache_rem_op_list);
- spin_unlock(&zcache_rem_op_list_lock);
-}
-
-void zcache_pampd_new_obj(struct tmem_obj *obj)
-{
- obj->extra = NULL;
-}
-
-int zcache_pampd_replace_in_obj(void *new_pampd, struct tmem_obj *obj)
-{
- int ret = -1;
-
- if (new_pampd != NULL) {
- if (obj->extra == NULL)
- obj->extra = new_pampd;
- /* enforce that all remote pages in an object reside
- * in the same node! */
- else if (pampd_remote_node(new_pampd) !=
- pampd_remote_node((void *)(obj->extra)))
- BUG();
- ret = 0;
- }
- return ret;
-}
-
-/*
- * Called by the message handler after a (still compressed) page has been
- * fetched from the remote machine in response to an "is_remote" tmem_get
- * or persistent tmem_localify. For a tmem_get, "extra" is the address of
- * the page that is to be filled to successfully resolve the tmem_get; for
- * a (persistent) tmem_localify, "extra" is NULL (as the data is placed only
- * in the local zcache). "data" points to "size" bytes of (compressed) data
- * passed in the message. In the case of a persistent remote get, if
- * pre-allocation was successful (see zcache_repatriate_preload), the page
- * is placed into both local zcache and at "extra".
- */
-int zcache_localify(int pool_id, struct tmem_oid *oidp,
- uint32_t index, char *data, size_t size,
- void *extra)
-{
- int ret = -ENOENT;
- unsigned long flags;
- struct tmem_pool *pool;
- bool ephemeral, delete = false;
- size_t clen = PAGE_SIZE;
- void *pampd, *saved_hb;
- struct tmem_obj *obj;
-
- pool = zcache_get_pool_by_id(LOCAL_CLIENT, pool_id);
- if (unlikely(pool == NULL))
- /* pool doesn't exist anymore */
- goto out;
- ephemeral = is_ephemeral(pool);
- local_irq_save(flags); /* FIXME: maybe only disable softirqs? */
- pampd = tmem_localify_get_pampd(pool, oidp, index, &obj, &saved_hb);
- if (pampd == NULL) {
- /* hmmm... must have been a flush while waiting */
-#ifdef RAMSTER_TESTING
- pr_err("UNTESTED pampd==NULL in zcache_localify\n");
-#endif
- if (ephemeral)
- ramster_remote_eph_pages_unsucc_get++;
- else
- ramster_remote_pers_pages_unsucc_get++;
- obj = NULL;
- goto finish;
- } else if (unlikely(!pampd_is_remote(pampd))) {
- /* hmmm... must have been a dup put while waiting */
-#ifdef RAMSTER_TESTING
- pr_err("UNTESTED dup while waiting in zcache_localify\n");
-#endif
- if (ephemeral)
- ramster_remote_eph_pages_unsucc_get++;
- else
- ramster_remote_pers_pages_unsucc_get++;
- obj = NULL;
- pampd = NULL;
- ret = -EEXIST;
- goto finish;
- } else if (size == 0) {
- /* no remote data, delete the local is_remote pampd */
- pampd = NULL;
- if (ephemeral)
- ramster_remote_eph_pages_unsucc_get++;
- else
- BUG();
- delete = true;
- goto finish;
- }
- if (!ephemeral && pampd_is_intransit(pampd)) {
- /* localify to zcache */
- pampd = pampd_mask_intransit_and_remote(pampd);
- zv_copy_to_pampd(pampd, data, size);
+ if (is_ephemeral(pool)) {
+ page = zbud_free_and_delist((struct zbudref *)pampd,
+ true, &zsize, &zpages);
+ if (page)
+ zcache_eph_pageframes =
+ atomic_dec_return(&zcache_eph_pageframes_atomic);
+ zcache_eph_zpages =
+ atomic_sub_return(zpages, &zcache_eph_zpages_atomic);
+ zcache_eph_zbytes =
+ atomic_long_sub_return(zsize, &zcache_eph_zbytes_atomic);
+ /* FIXME CONFIG_RAMSTER... check acct parameter? */
} else {
- pampd = NULL;
- obj = NULL;
- }
- if (extra != NULL) {
- /* decompress direct-to-memory to complete remotify */
- ret = lzo1x_decompress_safe((char *)data, size,
- (char *)extra, &clen);
- BUG_ON(ret != LZO_E_OK);
- BUG_ON(clen != PAGE_SIZE);
- }
- if (ephemeral)
- ramster_remote_eph_pages_succ_get++;
- else
- ramster_remote_pers_pages_succ_get++;
- ret = 0;
-finish:
- tmem_localify_finish(obj, index, pampd, saved_hb, delete);
- zcache_put_pool(pool);
- local_irq_restore(flags);
-out:
- return ret;
-}
-
-/*
- * Called on a remote persistent tmem_get to attempt to preallocate
- * local storage for the data contained in the remote persistent page.
- * If successfully preallocated, returns the pampd, marked as remote and
- * in_transit. Else returns NULL. Note that the appropriate tmem data
- * structure must be locked.
- */
-static void *zcache_pampd_repatriate_preload(void *pampd,
- struct tmem_pool *pool,
- struct tmem_oid *oid,
- uint32_t index,
- bool *intransit)
-{
- int clen = pampd_remote_size(pampd);
- void *ret_pampd = NULL;
- unsigned long flags;
-
- if (!pampd_is_remote(pampd))
- BUG();
- if (is_ephemeral(pool))
- BUG();
- if (pampd_is_intransit(pampd)) {
- /*
- * to avoid multiple allocations (and maybe a memory leak)
- * don't preallocate if already in the process of being
- * repatriated
- */
- *intransit = true;
- goto out;
- }
- *intransit = false;
- local_irq_save(flags);
- ret_pampd = (void *)zv_alloc(pool, oid, index, clen);
- if (ret_pampd != NULL) {
- /*
- * a pampd is marked intransit if it is remote and space has
- * been allocated for it locally (note, only happens for
- * persistent pages, in which case the remote copy is freed)
- */
- ret_pampd = pampd_mark_intransit(ret_pampd);
- dec_and_check(&ramster_remote_pers_pages);
- } else
- ramster_pers_pages_remote_nomem++;
- local_irq_restore(flags);
-out:
- return ret_pampd;
-}
-
-/*
- * Called on a remote tmem_get to invoke a message to fetch the page.
- * Might sleep so no tmem locks can be held. "extra" is passed
- * all the way through the round-trip messaging to zcache_localify.
- */
-static int zcache_pampd_repatriate(void *fake_pampd, void *real_pampd,
- struct tmem_pool *pool,
- struct tmem_oid *oid, uint32_t index,
- bool free, void *extra)
-{
- struct tmem_xhandle xh;
- int ret;
-
- if (pampd_is_intransit(real_pampd))
- /* have local space pre-reserved, so free remote copy */
- free = true;
- xh = tmem_xhandle_fill(LOCAL_CLIENT, pool, oid, index);
- /* unreliable request/response for now */
- ret = ramster_remote_async_get(&xh, free,
- pampd_remote_node(fake_pampd),
- pampd_remote_size(fake_pampd),
- pampd_remote_cksum(fake_pampd),
- extra);
-#ifdef RAMSTER_TESTING
- if (ret != 0 && ret != -ENOENT)
- pr_err("TESTING zcache_pampd_repatriate returns, ret=%d\n",
- ret);
-#endif
- return ret;
+ page = zbud_free_and_delist((struct zbudref *)pampd,
+ false, &zsize, &zpages);
+ if (page)
+ zcache_pers_pageframes =
+ atomic_dec_return(&zcache_pers_pageframes_atomic);
+ zcache_pers_zpages =
+ atomic_sub_return(zpages, &zcache_pers_zpages_atomic);
+ zcache_pers_zbytes =
+ atomic_long_sub_return(zsize, &zcache_pers_zbytes_atomic);
+ }
+ if (!is_local_client(pool->client))
+ ramster_count_foreign_pages(is_ephemeral(pool), -1);
+ if (page)
+ zcache_free_page(page);
}
static struct tmem_pamops zcache_pamops = {
- .create = zcache_pampd_create,
+ .create_finish = zcache_pampd_create_finish,
.get_data = zcache_pampd_get_data,
- .free = zcache_pampd_free,
.get_data_and_free = zcache_pampd_get_data_and_free,
- .free_obj = zcache_pampd_free_obj,
- .is_remote = zcache_pampd_is_remote,
- .repatriate_preload = zcache_pampd_repatriate_preload,
- .repatriate = zcache_pampd_repatriate,
- .new_obj = zcache_pampd_new_obj,
- .replace_in_obj = zcache_pampd_replace_in_obj,
+ .free = zcache_pampd_free,
};
/*
* zcache compression/decompression and related per-cpu stuff
*/
-#define LZO_WORKMEM_BYTES LZO1X_1_MEM_COMPRESS
-#define LZO_DSTMEM_PAGE_ORDER 1
-static DEFINE_PER_CPU(unsigned char *, zcache_workmem);
static DEFINE_PER_CPU(unsigned char *, zcache_dstmem);
+#define ZCACHE_DSTMEM_ORDER 1
-static int zcache_compress(struct page *from, void **out_va, size_t *out_len)
+static void zcache_compress(struct page *from, void **out_va, unsigned *out_len)
{
- int ret = 0;
+ int ret;
unsigned char *dmem = __get_cpu_var(zcache_dstmem);
- unsigned char *wmem = __get_cpu_var(zcache_workmem);
char *from_va;
BUG_ON(!irqs_disabled());
- if (unlikely(dmem == NULL || wmem == NULL))
- goto out; /* no buffer, so can't compress */
+ /* no buffer or no compressor so can't compress */
+ BUG_ON(dmem == NULL);
+ *out_len = PAGE_SIZE << ZCACHE_DSTMEM_ORDER;
from_va = kmap_atomic(from);
mb();
- ret = lzo1x_1_compress(from_va, PAGE_SIZE, dmem, out_len, wmem);
- BUG_ON(ret != LZO_E_OK);
+ ret = zcache_comp_op(ZCACHE_COMPOP_COMPRESS, from_va, PAGE_SIZE, dmem,
+ out_len);
+ BUG_ON(ret);
*out_va = dmem;
kunmap_atomic(from_va);
- ret = 1;
-out:
- return ret;
}
+static int zcache_comp_cpu_up(int cpu)
+{
+ struct crypto_comp *tfm;
+
+ tfm = crypto_alloc_comp(zcache_comp_name, 0, 0);
+ if (IS_ERR(tfm))
+ return NOTIFY_BAD;
+ *per_cpu_ptr(zcache_comp_pcpu_tfms, cpu) = tfm;
+ return NOTIFY_OK;
+}
+
+static void zcache_comp_cpu_down(int cpu)
+{
+ struct crypto_comp *tfm;
+
+ tfm = *per_cpu_ptr(zcache_comp_pcpu_tfms, cpu);
+ crypto_free_comp(tfm);
+ *per_cpu_ptr(zcache_comp_pcpu_tfms, cpu) = NULL;
+}
static int zcache_cpu_notifier(struct notifier_block *nb,
unsigned long action, void *pcpu)
{
- int cpu = (long)pcpu;
+ int ret, i, cpu = (long)pcpu;
struct zcache_preload *kp;
switch (action) {
case CPU_UP_PREPARE:
+ ret = zcache_comp_cpu_up(cpu);
+ if (ret != NOTIFY_OK) {
+ pr_err("%s: can't allocate compressor xform\n",
+ namestr);
+ return ret;
+ }
per_cpu(zcache_dstmem, cpu) = (void *)__get_free_pages(
- GFP_KERNEL | __GFP_REPEAT,
- LZO_DSTMEM_PAGE_ORDER),
- per_cpu(zcache_workmem, cpu) =
- kzalloc(LZO1X_MEM_COMPRESS,
- GFP_KERNEL | __GFP_REPEAT);
- per_cpu(zcache_remoteputmem, cpu) =
- kzalloc(PAGE_SIZE, GFP_KERNEL | __GFP_REPEAT);
+ GFP_KERNEL | __GFP_REPEAT, ZCACHE_DSTMEM_ORDER);
+ if (ramster_enabled)
+ ramster_cpu_up(cpu);
break;
case CPU_DEAD:
case CPU_UP_CANCELED:
- kfree(per_cpu(zcache_remoteputmem, cpu));
- per_cpu(zcache_remoteputmem, cpu) = NULL;
+ zcache_comp_cpu_down(cpu);
free_pages((unsigned long)per_cpu(zcache_dstmem, cpu),
- LZO_DSTMEM_PAGE_ORDER);
+ ZCACHE_DSTMEM_ORDER);
per_cpu(zcache_dstmem, cpu) = NULL;
- kfree(per_cpu(zcache_workmem, cpu));
- per_cpu(zcache_workmem, cpu) = NULL;
kp = &per_cpu(zcache_preloads, cpu);
- while (kp->nr) {
- kmem_cache_free(zcache_objnode_cache,
- kp->objnodes[kp->nr - 1]);
- kp->objnodes[kp->nr - 1] = NULL;
- kp->nr--;
+ for (i = 0; i < ARRAY_SIZE(kp->objnodes); i++) {
+ if (kp->objnodes[i])
+ kmem_cache_free(zcache_objnode_cache,
+ kp->objnodes[i]);
}
if (kp->obj) {
kmem_cache_free(zcache_obj_cache, kp->obj);
kp->obj = NULL;
}
- if (kp->flnode) {
- kmem_cache_free(ramster_flnode_cache, kp->flnode);
- kp->flnode = NULL;
- }
- if (kp->page) {
- free_page((unsigned long)kp->page);
- kp->page = NULL;
- }
+ if (ramster_enabled)
+ ramster_cpu_down(cpu);
break;
default:
break;
@@ -2271,314 +942,104 @@ static struct notifier_block zcache_cpu_notifier_block = {
.notifier_call = zcache_cpu_notifier
};
-#ifdef CONFIG_SYSFS
-#define ZCACHE_SYSFS_RO(_name) \
- static ssize_t zcache_##_name##_show(struct kobject *kobj, \
- struct kobj_attribute *attr, char *buf) \
- { \
- return sprintf(buf, "%lu\n", zcache_##_name); \
- } \
- static struct kobj_attribute zcache_##_name##_attr = { \
- .attr = { .name = __stringify(_name), .mode = 0444 }, \
- .show = zcache_##_name##_show, \
- }
-
-#define ZCACHE_SYSFS_RO_ATOMIC(_name) \
- static ssize_t zcache_##_name##_show(struct kobject *kobj, \
- struct kobj_attribute *attr, char *buf) \
- { \
- return sprintf(buf, "%d\n", atomic_read(&zcache_##_name)); \
- } \
- static struct kobj_attribute zcache_##_name##_attr = { \
- .attr = { .name = __stringify(_name), .mode = 0444 }, \
- .show = zcache_##_name##_show, \
- }
-
-#define ZCACHE_SYSFS_RO_CUSTOM(_name, _func) \
- static ssize_t zcache_##_name##_show(struct kobject *kobj, \
- struct kobj_attribute *attr, char *buf) \
- { \
- return _func(buf); \
- } \
- static struct kobj_attribute zcache_##_name##_attr = { \
- .attr = { .name = __stringify(_name), .mode = 0444 }, \
- .show = zcache_##_name##_show, \
- }
-
-ZCACHE_SYSFS_RO(curr_obj_count_max);
-ZCACHE_SYSFS_RO(curr_objnode_count_max);
-ZCACHE_SYSFS_RO(flush_total);
-ZCACHE_SYSFS_RO(flush_found);
-ZCACHE_SYSFS_RO(flobj_total);
-ZCACHE_SYSFS_RO(flobj_found);
-ZCACHE_SYSFS_RO(failed_eph_puts);
-ZCACHE_SYSFS_RO(nonactive_puts);
-ZCACHE_SYSFS_RO(failed_pers_puts);
-ZCACHE_SYSFS_RO(zbud_curr_zbytes);
-ZCACHE_SYSFS_RO(zbud_cumul_zpages);
-ZCACHE_SYSFS_RO(zbud_cumul_zbytes);
-ZCACHE_SYSFS_RO(zbud_buddied_count);
-ZCACHE_SYSFS_RO(evicted_raw_pages);
-ZCACHE_SYSFS_RO(evicted_unbuddied_pages);
-ZCACHE_SYSFS_RO(evicted_buddied_pages);
-ZCACHE_SYSFS_RO(failed_get_free_pages);
-ZCACHE_SYSFS_RO(failed_alloc);
-ZCACHE_SYSFS_RO(put_to_flush);
-ZCACHE_SYSFS_RO(compress_poor);
-ZCACHE_SYSFS_RO(mean_compress_poor);
-ZCACHE_SYSFS_RO(policy_percent_exceeded);
-ZCACHE_SYSFS_RO_ATOMIC(zbud_curr_raw_pages);
-ZCACHE_SYSFS_RO_ATOMIC(zbud_curr_zpages);
-ZCACHE_SYSFS_RO_ATOMIC(curr_obj_count);
-ZCACHE_SYSFS_RO_ATOMIC(curr_objnode_count);
-ZCACHE_SYSFS_RO_CUSTOM(zbud_unbuddied_list_counts,
- zbud_show_unbuddied_list_counts);
-ZCACHE_SYSFS_RO_CUSTOM(zbud_cumul_chunk_counts,
- zbud_show_cumul_chunk_counts);
-ZCACHE_SYSFS_RO_CUSTOM(zv_curr_dist_counts,
- zv_curr_dist_counts_show);
-ZCACHE_SYSFS_RO_CUSTOM(zv_cumul_dist_counts,
- zv_cumul_dist_counts_show);
-
-static struct attribute *zcache_attrs[] = {
- &zcache_curr_obj_count_attr.attr,
- &zcache_curr_obj_count_max_attr.attr,
- &zcache_curr_objnode_count_attr.attr,
- &zcache_curr_objnode_count_max_attr.attr,
- &zcache_flush_total_attr.attr,
- &zcache_flobj_total_attr.attr,
- &zcache_flush_found_attr.attr,
- &zcache_flobj_found_attr.attr,
- &zcache_failed_eph_puts_attr.attr,
- &zcache_nonactive_puts_attr.attr,
- &zcache_failed_pers_puts_attr.attr,
- &zcache_policy_percent_exceeded_attr.attr,
- &zcache_compress_poor_attr.attr,
- &zcache_mean_compress_poor_attr.attr,
- &zcache_zbud_curr_raw_pages_attr.attr,
- &zcache_zbud_curr_zpages_attr.attr,
- &zcache_zbud_curr_zbytes_attr.attr,
- &zcache_zbud_cumul_zpages_attr.attr,
- &zcache_zbud_cumul_zbytes_attr.attr,
- &zcache_zbud_buddied_count_attr.attr,
- &zcache_evicted_raw_pages_attr.attr,
- &zcache_evicted_unbuddied_pages_attr.attr,
- &zcache_evicted_buddied_pages_attr.attr,
- &zcache_failed_get_free_pages_attr.attr,
- &zcache_failed_alloc_attr.attr,
- &zcache_put_to_flush_attr.attr,
- &zcache_zbud_unbuddied_list_counts_attr.attr,
- &zcache_zbud_cumul_chunk_counts_attr.attr,
- &zcache_zv_curr_dist_counts_attr.attr,
- &zcache_zv_cumul_dist_counts_attr.attr,
- &zcache_zv_max_zsize_attr.attr,
- &zcache_zv_max_mean_zsize_attr.attr,
- &zcache_zv_page_count_policy_percent_attr.attr,
- NULL,
-};
-
-static struct attribute_group zcache_attr_group = {
- .attrs = zcache_attrs,
- .name = "zcache",
-};
-
-#define RAMSTER_SYSFS_RO(_name) \
- static ssize_t ramster_##_name##_show(struct kobject *kobj, \
- struct kobj_attribute *attr, char *buf) \
- { \
- return sprintf(buf, "%lu\n", ramster_##_name); \
- } \
- static struct kobj_attribute ramster_##_name##_attr = { \
- .attr = { .name = __stringify(_name), .mode = 0444 }, \
- .show = ramster_##_name##_show, \
- }
-
-#define RAMSTER_SYSFS_RW(_name) \
- static ssize_t ramster_##_name##_show(struct kobject *kobj, \
- struct kobj_attribute *attr, char *buf) \
- { \
- return sprintf(buf, "%lu\n", ramster_##_name); \
- } \
- static ssize_t ramster_##_name##_store(struct kobject *kobj, \
- struct kobj_attribute *attr, const char *buf, size_t count) \
- { \
- int err; \
- unsigned long enable; \
- err = kstrtoul(buf, 10, &enable); \
- if (err) \
- return -EINVAL; \
- ramster_##_name = enable; \
- return count; \
- } \
- static struct kobj_attribute ramster_##_name##_attr = { \
- .attr = { .name = __stringify(_name), .mode = 0644 }, \
- .show = ramster_##_name##_show, \
- .store = ramster_##_name##_store, \
- }
-
-#define RAMSTER_SYSFS_RO_ATOMIC(_name) \
- static ssize_t ramster_##_name##_show(struct kobject *kobj, \
- struct kobj_attribute *attr, char *buf) \
- { \
- return sprintf(buf, "%d\n", atomic_read(&ramster_##_name)); \
- } \
- static struct kobj_attribute ramster_##_name##_attr = { \
- .attr = { .name = __stringify(_name), .mode = 0444 }, \
- .show = ramster_##_name##_show, \
- }
-
-RAMSTER_SYSFS_RO(interface_revision);
-RAMSTER_SYSFS_RO_ATOMIC(remote_pers_pages);
-RAMSTER_SYSFS_RW(pers_remotify_enable);
-RAMSTER_SYSFS_RW(eph_remotify_enable);
-RAMSTER_SYSFS_RO(eph_pages_remoted);
-RAMSTER_SYSFS_RO(eph_pages_remote_failed);
-RAMSTER_SYSFS_RO(pers_pages_remoted);
-RAMSTER_SYSFS_RO(pers_pages_remote_failed);
-RAMSTER_SYSFS_RO(pers_pages_remote_nomem);
-RAMSTER_SYSFS_RO(remote_pages_flushed);
-RAMSTER_SYSFS_RO(remote_page_flushes_failed);
-RAMSTER_SYSFS_RO(remote_objects_flushed);
-RAMSTER_SYSFS_RO(remote_object_flushes_failed);
-RAMSTER_SYSFS_RO(remote_eph_pages_succ_get);
-RAMSTER_SYSFS_RO(remote_eph_pages_unsucc_get);
-RAMSTER_SYSFS_RO(remote_pers_pages_succ_get);
-RAMSTER_SYSFS_RO(remote_pers_pages_unsucc_get);
-RAMSTER_SYSFS_RO_ATOMIC(foreign_eph_pampd_count);
-RAMSTER_SYSFS_RO(foreign_eph_pampd_count_max);
-RAMSTER_SYSFS_RO_ATOMIC(foreign_pers_pampd_count);
-RAMSTER_SYSFS_RO(foreign_pers_pampd_count_max);
-RAMSTER_SYSFS_RO_ATOMIC(curr_flnode_count);
-RAMSTER_SYSFS_RO(curr_flnode_count_max);
-
-#define MANUAL_NODES 8
-static bool ramster_nodes_manual_up[MANUAL_NODES];
-static ssize_t ramster_manual_node_up_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- int i;
- char *p = buf;
- for (i = 0; i < MANUAL_NODES; i++)
- if (ramster_nodes_manual_up[i])
- p += sprintf(p, "%d ", i);
- p += sprintf(p, "\n");
- return p - buf;
-}
+/*
+ * The following code interacts with the zbud eviction and zbud
+ * zombify code to access LRU pages
+ */
-static ssize_t ramster_manual_node_up_store(struct kobject *kobj,
- struct kobj_attribute *attr, const char *buf, size_t count)
+static struct page *zcache_evict_eph_pageframe(void)
{
- int err;
- unsigned long node_num;
+ struct page *page;
+ unsigned int zsize = 0, zpages = 0;
- err = kstrtoul(buf, 10, &node_num);
- if (err) {
- pr_err("ramster: bad strtoul?\n");
- return -EINVAL;
- }
- if (node_num >= MANUAL_NODES) {
- pr_err("ramster: bad node_num=%lu?\n", node_num);
- return -EINVAL;
- }
- if (ramster_nodes_manual_up[node_num]) {
- pr_err("ramster: node %d already up, ignoring\n",
- (int)node_num);
- } else {
- ramster_nodes_manual_up[node_num] = true;
- r2net_hb_node_up_manual((int)node_num);
- }
- return count;
+ page = zbud_evict_pageframe_lru(&zsize, &zpages);
+ if (page == NULL)
+ goto out;
+ zcache_eph_zbytes = atomic_long_sub_return(zsize,
+ &zcache_eph_zbytes_atomic);
+ zcache_eph_zpages = atomic_sub_return(zpages,
+ &zcache_eph_zpages_atomic);
+ zcache_evicted_eph_zpages++;
+ zcache_eph_pageframes =
+ atomic_dec_return(&zcache_eph_pageframes_atomic);
+ zcache_evicted_eph_pageframes++;
+out:
+ return page;
}
-static struct kobj_attribute ramster_manual_node_up_attr = {
- .attr = { .name = "manual_node_up", .mode = 0644 },
- .show = ramster_manual_node_up_show,
- .store = ramster_manual_node_up_store,
-};
+#ifdef FRONTSWAP_HAS_UNUSE
+static void unswiz(struct tmem_oid oid, u32 index,
+ unsigned *type, pgoff_t *offset);
-static ssize_t ramster_remote_target_nodenum_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
+/*
+ * Choose an LRU persistent pageframe and attempt to "unuse" it by
+ * calling frontswap_unuse on both zpages.
+ *
+ * This is work-in-progress.
+ */
+
+static int zcache_frontswap_unuse(void)
{
- if (ramster_remote_target_nodenum == -1UL)
- return sprintf(buf, "unset\n");
- else
- return sprintf(buf, "%d\n", ramster_remote_target_nodenum);
-}
-
-static ssize_t ramster_remote_target_nodenum_store(struct kobject *kobj,
- struct kobj_attribute *attr, const char *buf, size_t count)
-{
- int err;
- unsigned long node_num;
-
- err = kstrtoul(buf, 10, &node_num);
- if (err) {
- pr_err("ramster: bad strtoul?\n");
- return -EINVAL;
- } else if (node_num == -1UL) {
- pr_err("ramster: disabling all remotification, "
- "data may still reside on remote nodes however\n");
- return -EINVAL;
- } else if (node_num >= MANUAL_NODES) {
- pr_err("ramster: bad node_num=%lu?\n", node_num);
- return -EINVAL;
- } else if (!ramster_nodes_manual_up[node_num]) {
- pr_err("ramster: node %d not up, ignoring setting "
- "of remotification target\n", (int)node_num);
- } else if (r2net_remote_target_node_set((int)node_num) >= 0) {
- pr_info("ramster: node %d set as remotification target\n",
- (int)node_num);
- ramster_remote_target_nodenum = (int)node_num;
- } else {
- pr_err("ramster: bad num to node node_num=%d?\n",
- (int)node_num);
- return -EINVAL;
+ struct tmem_handle th[2];
+ int ret = -ENOMEM;
+ int nzbuds, unuse_ret;
+ unsigned type;
+ struct page *newpage1 = NULL, *newpage2 = NULL;
+ struct page *evictpage1 = NULL, *evictpage2 = NULL;
+ pgoff_t offset;
+
+ newpage1 = alloc_page(ZCACHE_GFP_MASK);
+ newpage2 = alloc_page(ZCACHE_GFP_MASK);
+ if (newpage1 == NULL)
+ evictpage1 = zcache_evict_eph_pageframe();
+ if (newpage2 == NULL)
+ evictpage2 = zcache_evict_eph_pageframe();
+ if (evictpage1 == NULL || evictpage2 == NULL)
+ goto free_and_out;
+ /* ok, we have two pages pre-allocated */
+ nzbuds = zbud_make_zombie_lru(&th[0], NULL, NULL, false);
+ if (nzbuds == 0) {
+ ret = -ENOENT;
+ goto free_and_out;
+ }
+ unswiz(th[0].oid, th[0].index, &type, &offset);
+ unuse_ret = frontswap_unuse(type, offset,
+ newpage1 != NULL ? newpage1 : evictpage1,
+ ZCACHE_GFP_MASK);
+ if (unuse_ret != 0)
+ goto free_and_out;
+ else if (evictpage1 != NULL)
+ zcache_unacct_page();
+ newpage1 = NULL;
+ evictpage1 = NULL;
+ if (nzbuds == 2) {
+ unswiz(th[1].oid, th[1].index, &type, &offset);
+ unuse_ret = frontswap_unuse(type, offset,
+ newpage2 != NULL ? newpage2 : evictpage2,
+ ZCACHE_GFP_MASK);
+ if (unuse_ret != 0) {
+ goto free_and_out;
+ } else if (evictpage2 != NULL) {
+ zcache_unacct_page();
+ }
}
- return count;
+ ret = 0;
+ goto out;
+
+free_and_out:
+ if (newpage1 != NULL)
+ __free_page(newpage1);
+ if (newpage2 != NULL)
+ __free_page(newpage2);
+ if (evictpage1 != NULL)
+ zcache_free_page(evictpage1);
+ if (evictpage2 != NULL)
+ zcache_free_page(evictpage2);
+out:
+ return ret;
}
+#endif
-static struct kobj_attribute ramster_remote_target_nodenum_attr = {
- .attr = { .name = "remote_target_nodenum", .mode = 0644 },
- .show = ramster_remote_target_nodenum_show,
- .store = ramster_remote_target_nodenum_store,
-};
-
-
-static struct attribute *ramster_attrs[] = {
- &ramster_interface_revision_attr.attr,
- &ramster_pers_remotify_enable_attr.attr,
- &ramster_eph_remotify_enable_attr.attr,
- &ramster_remote_pers_pages_attr.attr,
- &ramster_eph_pages_remoted_attr.attr,
- &ramster_eph_pages_remote_failed_attr.attr,
- &ramster_pers_pages_remoted_attr.attr,
- &ramster_pers_pages_remote_failed_attr.attr,
- &ramster_pers_pages_remote_nomem_attr.attr,
- &ramster_remote_pages_flushed_attr.attr,
- &ramster_remote_page_flushes_failed_attr.attr,
- &ramster_remote_objects_flushed_attr.attr,
- &ramster_remote_object_flushes_failed_attr.attr,
- &ramster_remote_eph_pages_succ_get_attr.attr,
- &ramster_remote_eph_pages_unsucc_get_attr.attr,
- &ramster_remote_pers_pages_succ_get_attr.attr,
- &ramster_remote_pers_pages_unsucc_get_attr.attr,
- &ramster_foreign_eph_pampd_count_attr.attr,
- &ramster_foreign_eph_pampd_count_max_attr.attr,
- &ramster_foreign_pers_pampd_count_attr.attr,
- &ramster_foreign_pers_pampd_count_max_attr.attr,
- &ramster_curr_flnode_count_attr.attr,
- &ramster_curr_flnode_count_max_attr.attr,
- &ramster_manual_node_up_attr.attr,
- &ramster_remote_target_nodenum_attr.attr,
- NULL,
-};
-
-static struct attribute_group ramster_attr_group = {
- .attrs = ramster_attrs,
- .name = "ramster",
-};
-
-#endif /* CONFIG_SYSFS */
/*
* When zcache is disabled ("frozen"), pools can be created and destroyed,
* but all puts (and thus all other operations that require memory allocation)
@@ -2589,23 +1050,74 @@ static struct attribute_group ramster_attr_group = {
static bool zcache_freeze;
/*
- * zcache shrinker interface (only useful for ephemeral pages, so zbud only)
+ * This zcache shrinker interface reduces the number of ephemeral pageframes
+ * used by zcache to approximately the same as the total number of LRU_FILE
+ * pageframes in use.
*/
static int shrink_zcache_memory(struct shrinker *shrink,
struct shrink_control *sc)
{
+ static bool in_progress;
int ret = -1;
int nr = sc->nr_to_scan;
- gfp_t gfp_mask = sc->gfp_mask;
+ int nr_evict = 0;
+ int nr_unuse = 0;
+ struct page *page;
+#ifdef FRONTSWAP_HAS_UNUSE
+ int unuse_ret;
+#endif
- if (nr >= 0) {
- if (!(gfp_mask & __GFP_FS))
- /* does this case really need to be skipped? */
- goto out;
- zbud_evict_pages(nr);
+ if (nr <= 0)
+ goto skip_evict;
+
+ /* don't allow more than one eviction thread at a time */
+ if (in_progress)
+ goto skip_evict;
+
+ in_progress = true;
+
+ /* we are going to ignore nr, and target a different value */
+ zcache_last_active_file_pageframes =
+ global_page_state(NR_LRU_BASE + LRU_ACTIVE_FILE);
+ zcache_last_inactive_file_pageframes =
+ global_page_state(NR_LRU_BASE + LRU_INACTIVE_FILE);
+ nr_evict = zcache_eph_pageframes - zcache_last_active_file_pageframes +
+ zcache_last_inactive_file_pageframes;
+ while (nr_evict-- > 0) {
+ page = zcache_evict_eph_pageframe();
+ if (page == NULL)
+ break;
+ zcache_free_page(page);
+ }
+
+ zcache_last_active_anon_pageframes =
+ global_page_state(NR_LRU_BASE + LRU_ACTIVE_ANON);
+ zcache_last_inactive_anon_pageframes =
+ global_page_state(NR_LRU_BASE + LRU_INACTIVE_ANON);
+ nr_unuse = zcache_pers_pageframes - zcache_last_active_anon_pageframes +
+ zcache_last_inactive_anon_pageframes;
+#ifdef FRONTSWAP_HAS_UNUSE
+ /* rate limit for testing */
+ if (nr_unuse > 32)
+ nr_unuse = 32;
+ while (nr_unuse-- > 0) {
+ unuse_ret = zcache_frontswap_unuse();
+ if (unuse_ret == -ENOMEM)
+ break;
}
- ret = (int)atomic_read(&zcache_zbud_curr_raw_pages);
-out:
+#endif
+ in_progress = false;
+
+skip_evict:
+ /* resample: has changed, but maybe not all the way yet */
+ zcache_last_active_file_pageframes =
+ global_page_state(NR_LRU_BASE + LRU_ACTIVE_FILE);
+ zcache_last_inactive_file_pageframes =
+ global_page_state(NR_LRU_BASE + LRU_INACTIVE_FILE);
+ ret = zcache_eph_pageframes - zcache_last_active_file_pageframes +
+ zcache_last_inactive_file_pageframes;
+ if (ret < 0)
+ ret = 0;
return ret;
}
@@ -2618,30 +1130,46 @@ static struct shrinker zcache_shrinker = {
* zcache shims between cleancache/frontswap ops and tmem
*/
-int zcache_put(int cli_id, int pool_id, struct tmem_oid *oidp,
- uint32_t index, char *data, size_t size,
- bool raw, int ephemeral)
+/* FIXME rename these core routines to zcache_tmemput etc? */
+int zcache_put_page(int cli_id, int pool_id, struct tmem_oid *oidp,
+ uint32_t index, void *page,
+ unsigned int size, bool raw, int ephemeral)
{
struct tmem_pool *pool;
+ struct tmem_handle th;
int ret = -1;
+ void *pampd = NULL;
BUG_ON(!irqs_disabled());
pool = zcache_get_pool_by_id(cli_id, pool_id);
if (unlikely(pool == NULL))
goto out;
- if (!zcache_freeze && zcache_do_preload(pool) == 0) {
- /* preload does preempt_disable on success */
- ret = tmem_put(pool, oidp, index, data, size, raw, ephemeral);
- if (ret < 0) {
- if (is_ephemeral(pool))
+ if (!zcache_freeze) {
+ ret = 0;
+ th.client_id = cli_id;
+ th.pool_id = pool_id;
+ th.oid = *oidp;
+ th.index = index;
+ pampd = zcache_pampd_create((char *)page, size, raw,
+ ephemeral, &th);
+ if (pampd == NULL) {
+ ret = -ENOMEM;
+ if (ephemeral)
zcache_failed_eph_puts++;
else
zcache_failed_pers_puts++;
+ } else {
+ if (ramster_enabled)
+ ramster_do_preload_flnode(pool);
+ ret = tmem_put(pool, oidp, index, 0, pampd);
+ if (ret < 0)
+ BUG();
}
zcache_put_pool(pool);
- preempt_enable_no_resched();
} else {
zcache_put_to_flush++;
+ if (ramster_enabled)
+ ramster_do_preload_flnode(pool);
if (atomic_read(&pool->obj_count) > 0)
/* the put fails whether the flush succeeds or not */
(void)tmem_flush_page(pool, oidp, index);
@@ -2651,9 +1179,9 @@ out:
return ret;
}
-int zcache_get(int cli_id, int pool_id, struct tmem_oid *oidp,
- uint32_t index, char *data, size_t *sizep,
- bool raw, int get_and_free)
+int zcache_get_page(int cli_id, int pool_id, struct tmem_oid *oidp,
+ uint32_t index, void *page,
+ size_t *sizep, bool raw, int get_and_free)
{
struct tmem_pool *pool;
int ret = -1;
@@ -2667,22 +1195,21 @@ int zcache_get(int cli_id, int pool_id, struct tmem_oid *oidp,
eph = is_ephemeral(pool);
if (likely(pool != NULL)) {
if (atomic_read(&pool->obj_count) > 0)
- ret = tmem_get(pool, oidp, index, data, sizep,
- raw, get_and_free);
+ ret = tmem_get(pool, oidp, index, (char *)(page),
+ sizep, raw, get_and_free);
zcache_put_pool(pool);
}
- WARN_ONCE((!eph && (ret != 0)), "zcache_get fails on persistent pool, "
- "bad things are very likely to happen soon\n");
+ WARN_ONCE((!is_ephemeral(pool) && (ret != 0)),
+ "zcache_get fails on persistent pool, "
+ "bad things are very likely to happen soon\n");
#ifdef RAMSTER_TESTING
if (ret != 0 && ret != -1 && !(ret == -EINVAL && is_ephemeral(pool)))
pr_err("TESTING zcache_get tmem_get returns ret=%d\n", ret);
#endif
- if (ret == -EAGAIN)
- BUG(); /* FIXME... don't need this anymore??? let's ensure */
return ret;
}
-int zcache_flush(int cli_id, int pool_id,
+int zcache_flush_page(int cli_id, int pool_id,
struct tmem_oid *oidp, uint32_t index)
{
struct tmem_pool *pool;
@@ -2692,7 +1219,8 @@ int zcache_flush(int cli_id, int pool_id,
local_irq_save(flags);
zcache_flush_total++;
pool = zcache_get_pool_by_id(cli_id, pool_id);
- ramster_do_preload_flnode_only(pool);
+ if (ramster_enabled)
+ ramster_do_preload_flnode(pool);
if (likely(pool != NULL)) {
if (atomic_read(&pool->obj_count) > 0)
ret = tmem_flush_page(pool, oidp, index);
@@ -2704,7 +1232,8 @@ int zcache_flush(int cli_id, int pool_id,
return ret;
}
-int zcache_flush_object(int cli_id, int pool_id, struct tmem_oid *oidp)
+int zcache_flush_object(int cli_id, int pool_id,
+ struct tmem_oid *oidp)
{
struct tmem_pool *pool;
int ret = -1;
@@ -2713,7 +1242,8 @@ int zcache_flush_object(int cli_id, int pool_id, struct tmem_oid *oidp)
local_irq_save(flags);
zcache_flobj_total++;
pool = zcache_get_pool_by_id(cli_id, pool_id);
- ramster_do_preload_flnode_only(pool);
+ if (ramster_enabled)
+ ramster_do_preload_flnode(pool);
if (likely(pool != NULL)) {
if (atomic_read(&pool->obj_count) > 0)
ret = tmem_flush_object(pool, oidp);
@@ -2725,7 +1255,7 @@ int zcache_flush_object(int cli_id, int pool_id, struct tmem_oid *oidp)
return ret;
}
-int zcache_client_destroy_pool(int cli_id, int pool_id)
+static int zcache_client_destroy_pool(int cli_id, int pool_id)
{
struct tmem_pool *pool = NULL;
struct zcache_client *cli = NULL;
@@ -2752,16 +1282,15 @@ int zcache_client_destroy_pool(int cli_id, int pool_id)
ret = tmem_destroy_pool(pool);
local_bh_enable();
kfree(pool);
- pr_info("ramster: destroyed pool id=%d cli_id=%d\n", pool_id, cli_id);
+ if (cli_id == LOCAL_CLIENT)
+ pr_info("%s: destroyed local pool id=%d\n", namestr, pool_id);
+ else
+ pr_info("%s: destroyed pool id=%d, client=%d\n",
+ namestr, pool_id, cli_id);
out:
return ret;
}
-static int zcache_destroy_pool(int pool_id)
-{
- return zcache_client_destroy_pool(LOCAL_CLIENT, pool_id);
-}
-
int zcache_new_pool(uint16_t cli_id, uint32_t flags)
{
int poolid = -1;
@@ -2777,7 +1306,7 @@ int zcache_new_pool(uint16_t cli_id, uint32_t flags)
atomic_inc(&cli->refcount);
pool = kmalloc(sizeof(struct tmem_pool), GFP_ATOMIC);
if (pool == NULL) {
- pr_info("ramster: pool creation failed: out of memory\n");
+ pr_info("%s: pool creation failed: out of memory\n", namestr);
goto out;
}
@@ -2785,7 +1314,7 @@ int zcache_new_pool(uint16_t cli_id, uint32_t flags)
if (cli->tmem_pools[poolid] == NULL)
break;
if (poolid >= MAX_POOLS_PER_CLIENT) {
- pr_info("ramster: pool creation failed: max exceeded\n");
+ pr_info("%s: pool creation failed: max exceeded\n", namestr);
kfree(pool);
poolid = -1;
goto out;
@@ -2796,11 +1325,11 @@ int zcache_new_pool(uint16_t cli_id, uint32_t flags)
tmem_new_pool(pool, flags);
cli->tmem_pools[poolid] = pool;
if (cli_id == LOCAL_CLIENT)
- pr_info("ramster: created %s tmem pool, id=%d, local client\n",
+ pr_info("%s: created %s local tmem pool, id=%d\n", namestr,
flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral",
poolid);
else
- pr_info("ramster: created %s tmem pool, id=%d, client=%d\n",
+ pr_info("%s: created %s tmem pool, id=%d, client=%d\n", namestr,
flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral",
poolid, cli_id);
out:
@@ -2814,30 +1343,37 @@ static int zcache_local_new_pool(uint32_t flags)
return zcache_new_pool(LOCAL_CLIENT, flags);
}
-int zcache_autocreate_pool(int cli_id, int pool_id, bool ephemeral)
+int zcache_autocreate_pool(unsigned int cli_id, unsigned int pool_id, bool eph)
{
struct tmem_pool *pool;
- struct zcache_client *cli = NULL;
- uint32_t flags = ephemeral ? 0 : TMEM_POOL_PERSIST;
+ struct zcache_client *cli;
+ uint32_t flags = eph ? 0 : TMEM_POOL_PERSIST;
int ret = -1;
+ BUG_ON(!ramster_enabled);
if (cli_id == LOCAL_CLIENT)
goto out;
if (pool_id >= MAX_POOLS_PER_CLIENT)
goto out;
- else if ((unsigned int)cli_id < MAX_CLIENTS)
- cli = &zcache_clients[cli_id];
- if ((ephemeral && !use_cleancache) || (!ephemeral && !use_frontswap))
- BUG(); /* FIXME, handle more gracefully later */
+ if (cli_id >= MAX_CLIENTS)
+ goto out;
+
+ cli = &zcache_clients[cli_id];
+ if ((eph && disable_cleancache) || (!eph && disable_frontswap)) {
+ pr_err("zcache_autocreate_pool: pool type disabled\n");
+ goto out;
+ }
if (!cli->allocated) {
- if (zcache_new_client(cli_id))
- BUG(); /* FIXME, handle more gracefully later */
+ if (zcache_new_client(cli_id)) {
+ pr_err("zcache_autocreate_pool: can't create client\n");
+ goto out;
+ }
cli = &zcache_clients[cli_id];
}
atomic_inc(&cli->refcount);
pool = cli->tmem_pools[pool_id];
if (pool != NULL) {
- if (pool->persistent && ephemeral) {
+ if (pool->persistent && eph) {
pr_err("zcache_autocreate_pool: type mismatch\n");
goto out;
}
@@ -2846,7 +1382,7 @@ int zcache_autocreate_pool(int cli_id, int pool_id, bool ephemeral)
}
pool = kmalloc(sizeof(struct tmem_pool), GFP_KERNEL);
if (pool == NULL) {
- pr_info("ramster: pool creation failed: out of memory\n");
+ pr_info("%s: pool creation failed: out of memory\n", namestr);
goto out;
}
atomic_set(&pool->refcount, 0);
@@ -2854,14 +1390,11 @@ int zcache_autocreate_pool(int cli_id, int pool_id, bool ephemeral)
pool->pool_id = pool_id;
tmem_new_pool(pool, flags);
cli->tmem_pools[pool_id] = pool;
- pr_info("ramster: AUTOcreated %s tmem poolid=%d, for remote client=%d\n",
- flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral",
+ pr_info("%s: AUTOcreated %s tmem poolid=%d, for remote client=%d\n",
+ namestr, flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral",
pool_id, cli_id);
ret = 0;
out:
- if (cli == NULL)
- BUG(); /* FIXME, handle more gracefully later */
- /* pr_err("zcache_autocreate_pool: failed\n"); */
if (cli != NULL)
atomic_dec(&cli->refcount);
return ret;
@@ -2875,7 +1408,6 @@ out:
* to translate in-kernel semantics to zcache semantics.
*/
-#ifdef CONFIG_CLEANCACHE
static void zcache_cleancache_put_page(int pool_id,
struct cleancache_filekey key,
pgoff_t index, struct page *page)
@@ -2883,18 +1415,13 @@ static void zcache_cleancache_put_page(int pool_id,
u32 ind = (u32) index;
struct tmem_oid oid = *(struct tmem_oid *)&key;
-#ifdef __PG_WAS_ACTIVE
- if (!PageWasActive(page)) {
- zcache_nonactive_puts++;
+ if (!disable_cleancache_ignore_nonactive && !PageWasActive(page)) {
+ zcache_eph_nonactive_puts_ignored++;
return;
}
-#endif
- if (likely(ind == index)) {
- char *kva = page_address(page);
-
- (void)zcache_put(LOCAL_CLIENT, pool_id, &oid, index,
- kva, PAGE_SIZE, 0, 1);
- }
+ if (likely(ind == index))
+ (void)zcache_put_page(LOCAL_CLIENT, pool_id, &oid, index,
+ page, PAGE_SIZE, false, 1);
}
static int zcache_cleancache_get_page(int pool_id,
@@ -2903,21 +1430,16 @@ static int zcache_cleancache_get_page(int pool_id,
{
u32 ind = (u32) index;
struct tmem_oid oid = *(struct tmem_oid *)&key;
+ size_t size;
int ret = -1;
- preempt_disable();
if (likely(ind == index)) {
- char *kva = page_address(page);
- size_t size = PAGE_SIZE;
-
- ret = zcache_get(LOCAL_CLIENT, pool_id, &oid, index,
- kva, &size, 0, 0);
-#ifdef __PG_WAS_ACTIVE
+ ret = zcache_get_page(LOCAL_CLIENT, pool_id, &oid, index,
+ page, &size, false, 0);
+ BUG_ON(ret >= 0 && size != PAGE_SIZE);
if (ret == 0)
SetPageWasActive(page);
-#endif
}
- preempt_enable();
return ret;
}
@@ -2929,7 +1451,7 @@ static void zcache_cleancache_flush_page(int pool_id,
struct tmem_oid oid = *(struct tmem_oid *)&key;
if (likely(ind == index))
- (void)zcache_flush(LOCAL_CLIENT, pool_id, &oid, ind);
+ (void)zcache_flush_page(LOCAL_CLIENT, pool_id, &oid, ind);
}
static void zcache_cleancache_flush_inode(int pool_id,
@@ -2943,7 +1465,7 @@ static void zcache_cleancache_flush_inode(int pool_id,
static void zcache_cleancache_flush_fs(int pool_id)
{
if (pool_id >= 0)
- (void)zcache_destroy_pool(pool_id);
+ (void)zcache_client_destroy_pool(LOCAL_CLIENT, pool_id);
}
static int zcache_cleancache_init_fs(size_t pagesize)
@@ -2980,15 +1502,15 @@ struct cleancache_ops zcache_cleancache_register_ops(void)
return old_ops;
}
-#endif
-#ifdef CONFIG_FRONTSWAP
/* a single tmem poolid is used for all frontswap "types" (swapfiles) */
-static int zcache_frontswap_poolid = -1;
+static int zcache_frontswap_poolid __read_mostly = -1;
/*
* Swizzling increases objects per swaptype, increasing tmem concurrency
* for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS
+ * Setting SWIZ_BITS to 27 basically reconstructs the swap entry from
+ * frontswap_get_page(), but has side-effects. Hence using 8.
*/
#define SWIZ_BITS 8
#define SWIZ_MASK ((1 << SWIZ_BITS) - 1)
@@ -3002,47 +1524,64 @@ static inline struct tmem_oid oswiz(unsigned type, u32 ind)
return oid;
}
-static int zcache_frontswap_store(unsigned type, pgoff_t offset,
- struct page *page)
+#ifdef FRONTSWAP_HAS_UNUSE
+static void unswiz(struct tmem_oid oid, u32 index,
+ unsigned *type, pgoff_t *offset)
+{
+ *type = (unsigned)(oid.oid[0] >> SWIZ_BITS);
+ *offset = (pgoff_t)((index << SWIZ_BITS) |
+ (oid.oid[0] & SWIZ_MASK));
+}
+#endif
+
+static int zcache_frontswap_put_page(unsigned type, pgoff_t offset,
+ struct page *page)
{
u64 ind64 = (u64)offset;
u32 ind = (u32)offset;
struct tmem_oid oid = oswiz(type, ind);
int ret = -1;
unsigned long flags;
- char *kva;
BUG_ON(!PageLocked(page));
+ if (!disable_frontswap_ignore_nonactive && !PageWasActive(page)) {
+ zcache_pers_nonactive_puts_ignored++;
+ ret = -ERANGE;
+ goto out;
+ }
if (likely(ind64 == ind)) {
local_irq_save(flags);
- kva = page_address(page);
- ret = zcache_put(LOCAL_CLIENT, zcache_frontswap_poolid,
- &oid, iswiz(ind), kva, PAGE_SIZE, 0, 0);
+ ret = zcache_put_page(LOCAL_CLIENT, zcache_frontswap_poolid,
+ &oid, iswiz(ind),
+ page, PAGE_SIZE, false, 0);
local_irq_restore(flags);
}
+out:
return ret;
}
/* returns 0 if the page was successfully gotten from frontswap, -1 if
* was not present (should never happen!) */
-static int zcache_frontswap_load(unsigned type, pgoff_t offset,
- struct page *page)
+static int zcache_frontswap_get_page(unsigned type, pgoff_t offset,
+ struct page *page)
{
u64 ind64 = (u64)offset;
u32 ind = (u32)offset;
struct tmem_oid oid = oswiz(type, ind);
- int ret = -1;
+ size_t size;
+ int ret = -1, get_and_free;
- preempt_disable(); /* FIXME, remove this? */
+ if (frontswap_has_exclusive_gets)
+ get_and_free = 1;
+ else
+ get_and_free = -1;
BUG_ON(!PageLocked(page));
if (likely(ind64 == ind)) {
- char *kva = page_address(page);
- size_t size = PAGE_SIZE;
-
- ret = zcache_get(LOCAL_CLIENT, zcache_frontswap_poolid,
- &oid, iswiz(ind), kva, &size, 0, -1);
+ ret = zcache_get_page(LOCAL_CLIENT, zcache_frontswap_poolid,
+ &oid, iswiz(ind),
+ page, &size, false, get_and_free);
+ BUG_ON(ret >= 0 && size != PAGE_SIZE);
}
- preempt_enable(); /* FIXME, remove this? */
return ret;
}
@@ -3054,7 +1593,7 @@ static void zcache_frontswap_flush_page(unsigned type, pgoff_t offset)
struct tmem_oid oid = oswiz(type, ind);
if (likely(ind64 == ind))
- (void)zcache_flush(LOCAL_CLIENT, zcache_frontswap_poolid,
+ (void)zcache_flush_page(LOCAL_CLIENT, zcache_frontswap_poolid,
&oid, iswiz(ind));
}
@@ -3076,12 +1615,12 @@ static void zcache_frontswap_init(unsigned ignored)
/* a single tmem poolid is used for all frontswap "types" (swapfiles) */
if (zcache_frontswap_poolid < 0)
zcache_frontswap_poolid =
- zcache_local_new_pool(TMEM_POOL_PERSIST);
+ zcache_local_new_pool(TMEM_POOL_PERSIST);
}
static struct frontswap_ops zcache_frontswap_ops = {
- .store = zcache_frontswap_store,
- .load = zcache_frontswap_load,
+ .store = zcache_frontswap_put_page,
+ .load = zcache_frontswap_get_page,
.invalidate_page = zcache_frontswap_flush_page,
.invalidate_area = zcache_frontswap_flush_area,
.init = zcache_frontswap_init
@@ -3094,179 +1633,135 @@ struct frontswap_ops zcache_frontswap_register_ops(void)
return old_ops;
}
-#endif
/*
- * frontswap selfshrinking
- */
-
-#ifdef CONFIG_FRONTSWAP
-/* In HZ, controls frequency of worker invocation. */
-static unsigned int selfshrink_interval __read_mostly = 5;
-
-static void selfshrink_process(struct work_struct *work);
-static DECLARE_DELAYED_WORK(selfshrink_worker, selfshrink_process);
-
-/* Enable/disable with sysfs. */
-static bool frontswap_selfshrinking __read_mostly;
-
-/* Enable/disable with kernel boot option. */
-static bool use_frontswap_selfshrink __initdata = true;
-
-/*
- * The default values for the following parameters were deemed reasonable
- * by experimentation, may be workload-dependent, and can all be
- * adjusted via sysfs.
- */
-
-/* Control rate for frontswap shrinking. Higher hysteresis is slower. */
-static unsigned int frontswap_hysteresis __read_mostly = 20;
-
-/*
- * Number of selfshrink worker invocations to wait before observing that
- * frontswap selfshrinking should commence. Note that selfshrinking does
- * not use a separate worker thread.
+ * zcache initialization
+ * NOTE FOR NOW zcache or ramster MUST BE PROVIDED AS A KERNEL BOOT PARAMETER
+ * OR NOTHING HAPPENS!
*/
-static unsigned int frontswap_inertia __read_mostly = 3;
-/* Countdown to next invocation of frontswap_shrink() */
-static unsigned long frontswap_inertia_counter;
-
-/*
- * Invoked by the selfshrink worker thread, uses current number of pages
- * in frontswap (frontswap_curr_pages()), previous status, and control
- * values (hysteresis and inertia) to determine if frontswap should be
- * shrunk and what the new frontswap size should be. Note that
- * frontswap_shrink is essentially a partial swapoff that immediately
- * transfers pages from the "swap device" (frontswap) back into kernel
- * RAM; despite the name, frontswap "shrinking" is very different from
- * the "shrinker" interface used by the kernel MM subsystem to reclaim
- * memory.
- */
-static void frontswap_selfshrink(void)
+static int __init enable_zcache(char *s)
{
- static unsigned long cur_frontswap_pages;
- static unsigned long last_frontswap_pages;
- static unsigned long tgt_frontswap_pages;
-
- last_frontswap_pages = cur_frontswap_pages;
- cur_frontswap_pages = frontswap_curr_pages();
- if (!cur_frontswap_pages ||
- (cur_frontswap_pages > last_frontswap_pages)) {
- frontswap_inertia_counter = frontswap_inertia;
- return;
- }
- if (frontswap_inertia_counter && --frontswap_inertia_counter)
- return;
- if (cur_frontswap_pages <= frontswap_hysteresis)
- tgt_frontswap_pages = 0;
- else
- tgt_frontswap_pages = cur_frontswap_pages -
- (cur_frontswap_pages / frontswap_hysteresis);
- frontswap_shrink(tgt_frontswap_pages);
+ zcache_enabled = 1;
+ return 1;
}
+__setup("zcache", enable_zcache);
-static int __init ramster_nofrontswap_selfshrink_setup(char *s)
+static int __init enable_ramster(char *s)
{
- use_frontswap_selfshrink = false;
+ zcache_enabled = 1;
+#ifdef CONFIG_RAMSTER
+ ramster_enabled = 1;
+#endif
return 1;
}
+__setup("ramster", enable_ramster);
-__setup("noselfshrink", ramster_nofrontswap_selfshrink_setup);
+/* allow independent dynamic disabling of cleancache and frontswap */
-static void selfshrink_process(struct work_struct *work)
+static int __init no_cleancache(char *s)
{
- if (frontswap_selfshrinking && frontswap_enabled) {
- frontswap_selfshrink();
- schedule_delayed_work(&selfshrink_worker,
- selfshrink_interval * HZ);
- }
+ disable_cleancache = 1;
+ return 1;
}
-static int ramster_enabled;
+__setup("nocleancache", no_cleancache);
-static int __init ramster_selfshrink_init(void)
+static int __init no_frontswap(char *s)
{
- frontswap_selfshrinking = ramster_enabled && use_frontswap_selfshrink;
- if (frontswap_selfshrinking)
- pr_info("ramster: Initializing frontswap "
- "selfshrinking driver.\n");
- else
- return -ENODEV;
-
- schedule_delayed_work(&selfshrink_worker, selfshrink_interval * HZ);
-
- return 0;
+ disable_frontswap = 1;
+ return 1;
}
-subsys_initcall(ramster_selfshrink_init);
-#endif
+__setup("nofrontswap", no_frontswap);
-/*
- * zcache initialization
- * NOTE FOR NOW ramster MUST BE PROVIDED AS A KERNEL BOOT PARAMETER OR
- * NOTHING HAPPENS!
- */
+static int __init no_frontswap_exclusive_gets(char *s)
+{
+ frontswap_has_exclusive_gets = false;
+ return 1;
+}
-static int ramster_enabled;
+__setup("nofrontswapexclusivegets", no_frontswap_exclusive_gets);
-static int __init enable_ramster(char *s)
+static int __init no_frontswap_ignore_nonactive(char *s)
{
- ramster_enabled = 1;
+ disable_frontswap_ignore_nonactive = 1;
return 1;
}
-__setup("ramster", enable_ramster);
-/* allow independent dynamic disabling of cleancache and frontswap */
+__setup("nofrontswapignorenonactive", no_frontswap_ignore_nonactive);
-static int use_cleancache = 1;
-
-static int __init no_cleancache(char *s)
+static int __init no_cleancache_ignore_nonactive(char *s)
{
- pr_info("INIT no_cleancache called\n");
- use_cleancache = 0;
+ disable_cleancache_ignore_nonactive = 1;
return 1;
}
-/*
- * FIXME: need to guarantee this gets checked before zcache_init is called
- * What is the correct way to achieve this?
- */
-early_param("nocleancache", no_cleancache);
-
-static int use_frontswap = 1;
+__setup("nocleancacheignorenonactive", no_cleancache_ignore_nonactive);
-static int __init no_frontswap(char *s)
+static int __init enable_zcache_compressor(char *s)
{
- pr_info("INIT no_frontswap called\n");
- use_frontswap = 0;
+ strncpy(zcache_comp_name, s, ZCACHE_COMP_NAME_SZ);
+ zcache_enabled = 1;
return 1;
}
+__setup("zcache=", enable_zcache_compressor);
-__setup("nofrontswap", no_frontswap);
-static int __init zcache_init(void)
+static int __init zcache_comp_init(void)
{
int ret = 0;
-#ifdef CONFIG_SYSFS
- ret = sysfs_create_group(mm_kobj, &zcache_attr_group);
- ret = sysfs_create_group(mm_kobj, &ramster_attr_group);
- if (ret) {
- pr_err("ramster: can't create sysfs\n");
+ /* check crypto algorithm */
+ if (*zcache_comp_name != '\0') {
+ ret = crypto_has_comp(zcache_comp_name, 0, 0);
+ if (!ret)
+ pr_info("zcache: %s not supported\n",
+ zcache_comp_name);
+ }
+ if (!ret)
+ strcpy(zcache_comp_name, "lzo");
+ ret = crypto_has_comp(zcache_comp_name, 0, 0);
+ if (!ret) {
+ ret = 1;
goto out;
}
-#endif /* CONFIG_SYSFS */
-#if defined(CONFIG_CLEANCACHE) || defined(CONFIG_FRONTSWAP)
+ pr_info("zcache: using %s compressor\n", zcache_comp_name);
+
+ /* alloc percpu transforms */
+ ret = 0;
+ zcache_comp_pcpu_tfms = alloc_percpu(struct crypto_comp *);
+ if (!zcache_comp_pcpu_tfms)
+ ret = 1;
+out:
+ return ret;
+}
+
+static int __init zcache_init(void)
+{
+ int ret = 0;
+
if (ramster_enabled) {
+ namestr = "ramster";
+ ramster_register_pamops(&zcache_pamops);
+ }
+#ifdef CONFIG_DEBUG_FS
+ zcache_debugfs_init();
+#endif
+ if (zcache_enabled) {
unsigned int cpu;
- (void)r2net_register_handlers();
tmem_register_hostops(&zcache_hostops);
tmem_register_pamops(&zcache_pamops);
ret = register_cpu_notifier(&zcache_cpu_notifier_block);
if (ret) {
- pr_err("ramster: can't register cpu notifier\n");
+ pr_err("%s: can't register cpu notifier\n", namestr);
+ goto out;
+ }
+ ret = zcache_comp_init();
+ if (ret) {
+ pr_err("%s: compressor initialization failed\n",
+ namestr);
goto out;
}
for_each_online_cpu(cpu) {
@@ -3279,42 +1774,47 @@ static int __init zcache_init(void)
sizeof(struct tmem_objnode), 0, 0, NULL);
zcache_obj_cache = kmem_cache_create("zcache_obj",
sizeof(struct tmem_obj), 0, 0, NULL);
- ramster_flnode_cache = kmem_cache_create("ramster_flnode",
- sizeof(struct flushlist_node), 0, 0, NULL);
-#endif
-#ifdef CONFIG_CLEANCACHE
- pr_info("INIT ramster_enabled=%d use_cleancache=%d\n",
- ramster_enabled, use_cleancache);
- if (ramster_enabled && use_cleancache) {
+ ret = zcache_new_client(LOCAL_CLIENT);
+ if (ret) {
+ pr_err("%s: can't create client\n", namestr);
+ goto out;
+ }
+ zbud_init();
+ if (zcache_enabled && !disable_cleancache) {
struct cleancache_ops old_ops;
- zbud_init();
register_shrinker(&zcache_shrinker);
old_ops = zcache_cleancache_register_ops();
- pr_info("ramster: cleancache enabled using kernel "
- "transcendent memory and compression buddies\n");
+ pr_info("%s: cleancache enabled using kernel transcendent "
+ "memory and compression buddies\n", namestr);
+#ifdef ZCACHE_DEBUG
+ pr_info("%s: cleancache: ignorenonactive = %d\n",
+ namestr, !disable_cleancache_ignore_nonactive);
+#endif
if (old_ops.init_fs != NULL)
- pr_warning("ramster: cleancache_ops overridden");
+ pr_warn("%s: cleancache_ops overridden\n", namestr);
}
-#endif
-#ifdef CONFIG_FRONTSWAP
- pr_info("INIT ramster_enabled=%d use_frontswap=%d\n",
- ramster_enabled, use_frontswap);
- if (ramster_enabled && use_frontswap) {
+ if (zcache_enabled && !disable_frontswap) {
struct frontswap_ops old_ops;
- zcache_new_client(LOCAL_CLIENT);
old_ops = zcache_frontswap_register_ops();
- pr_info("ramster: frontswap enabled using kernel "
- "transcendent memory and xvmalloc\n");
+ if (frontswap_has_exclusive_gets)
+ frontswap_tmem_exclusive_gets(true);
+ pr_info("%s: frontswap enabled using kernel transcendent "
+ "memory and compression buddies\n", namestr);
+#ifdef ZCACHE_DEBUG
+ pr_info("%s: frontswap: excl gets = %d active only = %d\n",
+ namestr, frontswap_has_exclusive_gets,
+ !disable_frontswap_ignore_nonactive);
+#endif
if (old_ops.init != NULL)
- pr_warning("ramster: frontswap_ops overridden");
+ pr_warn("%s: frontswap_ops overridden\n", namestr);
}
- if (ramster_enabled && (use_frontswap || use_cleancache))
- ramster_remotify_init();
-#endif
+ if (ramster_enabled)
+ ramster_init(!disable_cleancache, !disable_frontswap,
+ frontswap_has_exclusive_gets);
out:
return ret;
}
-module_init(zcache_init)
+late_initcall(zcache_init);
diff --git a/drivers/staging/ramster/zcache.h b/drivers/staging/ramster/zcache.h
index 250b121c22e5..81722b33b087 100644
--- a/drivers/staging/ramster/zcache.h
+++ b/drivers/staging/ramster/zcache.h
@@ -1,22 +1,53 @@
+
/*
* zcache.h
*
- * External zcache functions
- *
- * Copyright (c) 2009-2012, Dan Magenheimer, Oracle Corp.
+ * Copyright (c) 2012, Dan Magenheimer, Oracle Corp.
*/
#ifndef _ZCACHE_H_
#define _ZCACHE_H_
-extern int zcache_put(int, int, struct tmem_oid *, uint32_t,
- char *, size_t, bool, int);
-extern int zcache_autocreate_pool(int, int, bool);
-extern int zcache_get(int, int, struct tmem_oid *, uint32_t,
- char *, size_t *, bool, int);
-extern int zcache_flush(int, int, struct tmem_oid *, uint32_t);
+struct zcache_preload {
+ struct tmem_obj *obj;
+ struct tmem_objnode *objnodes[OBJNODE_TREE_MAX_PATH];
+};
+
+struct tmem_pool;
+
+#define MAX_POOLS_PER_CLIENT 16
+
+#define MAX_CLIENTS 16
+#define LOCAL_CLIENT ((uint16_t)-1)
+
+struct zcache_client {
+ struct tmem_pool *tmem_pools[MAX_POOLS_PER_CLIENT];
+ bool allocated;
+ atomic_t refcount;
+};
+
+extern struct tmem_pool *zcache_get_pool_by_id(uint16_t cli_id,
+ uint16_t poolid);
+extern void zcache_put_pool(struct tmem_pool *pool);
+
+extern int zcache_put_page(int, int, struct tmem_oid *,
+ uint32_t, void *,
+ unsigned int, bool, int);
+extern int zcache_get_page(int, int, struct tmem_oid *, uint32_t,
+ void *, size_t *, bool, int);
+extern int zcache_flush_page(int, int, struct tmem_oid *, uint32_t);
extern int zcache_flush_object(int, int, struct tmem_oid *);
-extern int zcache_localify(int, struct tmem_oid *, uint32_t,
- char *, size_t, void *);
+extern void zcache_decompress_to_page(char *, unsigned int, struct page *);
+
+#ifdef CONFIG_RAMSTER
+extern void *zcache_pampd_create(char *, unsigned int, bool, int,
+ struct tmem_handle *);
+int zcache_autocreate_pool(unsigned int cli_id, unsigned int pool_id, bool eph);
+#endif
+
+#define MAX_POOLS_PER_CLIENT 16
+
+#define MAX_CLIENTS 16
+#define LOCAL_CLIENT ((uint16_t)-1)
-#endif /* _ZCACHE_H */
+#endif /* _ZCACHE_H_ */