aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/zram/zram_drv.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/zram/zram_drv.c')
-rw-r--r--drivers/staging/zram/zram_drv.c95
1 files changed, 77 insertions, 18 deletions
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index e77fb6ea40c9..91d94b564433 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -169,7 +169,7 @@ static inline int is_partial_io(struct bio_vec *bvec)
static inline int valid_io_request(struct zram *zram, struct bio *bio)
{
u64 start, end, bound;
-
+
/* unaligned request */
if (unlikely(bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
return 0;
@@ -418,14 +418,6 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
goto out;
}
- /*
- * System overwrites unused sectors. Free memory associated
- * with this sector now.
- */
- if (meta->table[index].handle ||
- zram_test_flag(meta, index, ZRAM_ZERO))
- zram_free_page(zram, index);
-
user_mem = kmap_atomic(page);
if (is_partial_io(bvec)) {
@@ -439,12 +431,23 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
if (page_zero_filled(uncmem)) {
kunmap_atomic(user_mem);
+ /* Free memory associated with this sector now. */
+ zram_free_page(zram, index);
+
zram->stats.pages_zero++;
zram_set_flag(meta, index, ZRAM_ZERO);
ret = 0;
goto out;
}
+ /*
+ * zram_slot_free_notify could miss free so that let's
+ * double check.
+ */
+ if (unlikely(meta->table[index].handle ||
+ zram_test_flag(meta, index, ZRAM_ZERO)))
+ zram_free_page(zram, index);
+
ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
meta->compress_workmem);
@@ -486,6 +489,12 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
zs_unmap_object(meta->mem_pool, handle);
+ /*
+ * Free memory associated with this sector
+ * before overwriting unused sectors.
+ */
+ zram_free_page(zram, index);
+
meta->table[index].handle = handle;
meta->table[index].size = clen;
@@ -504,6 +513,20 @@ out:
return ret;
}
+static void handle_pending_slot_free(struct zram *zram)
+{
+ struct zram_slot_free *free_rq;
+
+ spin_lock(&zram->slot_free_lock);
+ while (zram->slot_free_rq) {
+ free_rq = zram->slot_free_rq;
+ zram->slot_free_rq = free_rq->next;
+ zram_free_page(zram, free_rq->index);
+ kfree(free_rq);
+ }
+ spin_unlock(&zram->slot_free_lock);
+}
+
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
int offset, struct bio *bio, int rw)
{
@@ -511,10 +534,12 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
if (rw == READ) {
down_read(&zram->lock);
+ handle_pending_slot_free(zram);
ret = zram_bvec_read(zram, bvec, index, offset, bio);
up_read(&zram->lock);
} else {
down_write(&zram->lock);
+ handle_pending_slot_free(zram);
ret = zram_bvec_write(zram, bvec, index, offset);
up_write(&zram->lock);
}
@@ -522,11 +547,13 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
return ret;
}
-static void zram_reset_device(struct zram *zram)
+static void zram_reset_device(struct zram *zram, bool reset_capacity)
{
size_t index;
struct zram_meta *meta;
+ flush_work(&zram->free_work);
+
down_write(&zram->init_lock);
if (!zram->init_done) {
up_write(&zram->init_lock);
@@ -551,7 +578,8 @@ static void zram_reset_device(struct zram *zram)
memset(&zram->stats, 0, sizeof(zram->stats));
zram->disksize = 0;
- set_capacity(zram->disk, 0);
+ if (reset_capacity)
+ set_capacity(zram->disk, 0);
up_write(&zram->init_lock);
}
@@ -635,7 +663,7 @@ static ssize_t reset_store(struct device *dev,
if (bdev)
fsync_bdev(bdev);
- zram_reset_device(zram);
+ zram_reset_device(zram, true);
return len;
}
@@ -720,16 +748,40 @@ error:
bio_io_error(bio);
}
+static void zram_slot_free(struct work_struct *work)
+{
+ struct zram *zram;
+
+ zram = container_of(work, struct zram, free_work);
+ down_write(&zram->lock);
+ handle_pending_slot_free(zram);
+ up_write(&zram->lock);
+}
+
+static void add_slot_free(struct zram *zram, struct zram_slot_free *free_rq)
+{
+ spin_lock(&zram->slot_free_lock);
+ free_rq->next = zram->slot_free_rq;
+ zram->slot_free_rq = free_rq;
+ spin_unlock(&zram->slot_free_lock);
+}
+
static void zram_slot_free_notify(struct block_device *bdev,
unsigned long index)
{
struct zram *zram;
+ struct zram_slot_free *free_rq;
zram = bdev->bd_disk->private_data;
- down_write(&zram->lock);
- zram_free_page(zram, index);
- up_write(&zram->lock);
atomic64_inc(&zram->stats.notify_free);
+
+ free_rq = kmalloc(sizeof(struct zram_slot_free), GFP_ATOMIC);
+ if (!free_rq)
+ return;
+
+ free_rq->index = index;
+ add_slot_free(zram, free_rq);
+ schedule_work(&zram->free_work);
}
static const struct block_device_operations zram_devops = {
@@ -776,6 +828,10 @@ static int create_device(struct zram *zram, int device_id)
init_rwsem(&zram->lock);
init_rwsem(&zram->init_lock);
+ INIT_WORK(&zram->free_work, zram_slot_free);
+ spin_lock_init(&zram->slot_free_lock);
+ zram->slot_free_rq = NULL;
+
zram->queue = blk_alloc_queue(GFP_KERNEL);
if (!zram->queue) {
pr_err("Error allocating disk queue for device %d\n",
@@ -902,10 +958,12 @@ static void __exit zram_exit(void)
for (i = 0; i < num_devices; i++) {
zram = &zram_devices[i];
- get_disk(zram->disk);
destroy_device(zram);
- zram_reset_device(zram);
- put_disk(zram->disk);
+ /*
+ * Shouldn't access zram->disk after destroy_device
+ * because destroy_device already released zram->disk.
+ */
+ zram_reset_device(zram, false);
}
unregister_blkdev(zram_major, "zram");
@@ -923,3 +981,4 @@ MODULE_PARM_DESC(num_devices, "Number of zram devices");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
MODULE_DESCRIPTION("Compressed RAM Block Device");
+MODULE_ALIAS("devname:zram");