aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/md/dm-crypt.c2
-rw-r--r--drivers/md/dm-io.c2
-rw-r--r--drivers/md/dm.c2
-rw-r--r--fs/bio-integrity.c2
-rw-r--r--fs/bio.c191
-rw-r--r--include/linux/bio.h6
6 files changed, 170 insertions, 35 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index ce26c84af064..3326750ec02c 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1060,7 +1060,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad_page_pool;
}
- cc->bs = bioset_create(MIN_IOS, MIN_IOS);
+ cc->bs = bioset_create(MIN_IOS, 0);
if (!cc->bs) {
ti->error = "Cannot allocate crypt bioset";
goto bad_bs;
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 2fd6d4450637..a34338567a2a 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -56,7 +56,7 @@ struct dm_io_client *dm_io_client_create(unsigned num_pages)
if (!client->pool)
goto bad;
- client->bios = bioset_create(16, 16);
+ client->bios = bioset_create(16, 0);
if (!client->bios)
goto bad;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 343094c3feeb..421c9f02d8ca 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1093,7 +1093,7 @@ static struct mapped_device *alloc_dev(int minor)
if (!md->tio_pool)
goto bad_tio_pool;
- md->bs = bioset_create(16, 16);
+ md->bs = bioset_create(16, 0);
if (!md->bs)
goto bad_no_bioset;
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index 19caf7c962ac..77ebc3c263d6 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -111,7 +111,7 @@ void bio_integrity_free(struct bio *bio, struct bio_set *bs)
&& bip->bip_buf != NULL)
kfree(bip->bip_buf);
- mempool_free(bip->bip_vec, bs->bvec_pools[bip->bip_pool]);
+ bvec_free_bs(bs, bip->bip_vec, bip->bip_pool);
mempool_free(bip, bs->bio_integrity_pool);
bio->bi_integrity = NULL;
diff --git a/fs/bio.c b/fs/bio.c
index 1ab8986b0411..0146f80789e9 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -31,8 +31,6 @@
DEFINE_TRACE(block_split);
-static struct kmem_cache *bio_slab __read_mostly;
-
static mempool_t *bio_split_pool __read_mostly;
/*
@@ -40,9 +38,8 @@ static mempool_t *bio_split_pool __read_mostly;
* break badly! cannot be bigger than what you can fit into an
* unsigned short
*/
-
#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
-static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
+struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES),
};
#undef BV
@@ -53,11 +50,119 @@ static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
*/
struct bio_set *fs_bio_set;
+/*
+ * Our slab pool management
+ */
+struct bio_slab {
+ struct kmem_cache *slab;
+ unsigned int slab_ref;
+ unsigned int slab_size;
+ char name[8];
+};
+static DEFINE_MUTEX(bio_slab_lock);
+static struct bio_slab *bio_slabs;
+static unsigned int bio_slab_nr, bio_slab_max;
+
+static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
+{
+ unsigned int sz = sizeof(struct bio) + extra_size;
+ struct kmem_cache *slab = NULL;
+ struct bio_slab *bslab;
+ unsigned int i, entry = -1;
+
+ mutex_lock(&bio_slab_lock);
+
+ i = 0;
+ while (i < bio_slab_nr) {
+ struct bio_slab *bslab = &bio_slabs[i];
+
+ if (!bslab->slab && entry == -1)
+ entry = i;
+ else if (bslab->slab_size == sz) {
+ slab = bslab->slab;
+ bslab->slab_ref++;
+ break;
+ }
+ i++;
+ }
+
+ if (slab)
+ goto out_unlock;
+
+ if (bio_slab_nr == bio_slab_max && entry == -1) {
+ bio_slab_max <<= 1;
+ bio_slabs = krealloc(bio_slabs,
+ bio_slab_max * sizeof(struct bio_slab),
+ GFP_KERNEL);
+ if (!bio_slabs)
+ goto out_unlock;
+ }
+ if (entry == -1)
+ entry = bio_slab_nr++;
+
+ bslab = &bio_slabs[entry];
+
+ snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
+ slab = kmem_cache_create(bslab->name, sz, 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!slab)
+ goto out_unlock;
+
+ printk("bio: create slab <%s> at %d\n", bslab->name, entry);
+ bslab->slab = slab;
+ bslab->slab_ref = 1;
+ bslab->slab_size = sz;
+out_unlock:
+ mutex_unlock(&bio_slab_lock);
+ return slab;
+}
+
+static void bio_put_slab(struct bio_set *bs)
+{
+ struct bio_slab *bslab = NULL;
+ unsigned int i;
+
+ mutex_lock(&bio_slab_lock);
+
+ for (i = 0; i < bio_slab_nr; i++) {
+ if (bs->bio_slab == bio_slabs[i].slab) {
+ bslab = &bio_slabs[i];
+ break;
+ }
+ }
+
+ if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
+ goto out;
+
+ WARN_ON(!bslab->slab_ref);
+
+ if (--bslab->slab_ref)
+ goto out;
+
+ kmem_cache_destroy(bslab->slab);
+ bslab->slab = NULL;
+
+out:
+ mutex_unlock(&bio_slab_lock);
+}
+
unsigned int bvec_nr_vecs(unsigned short idx)
{
return bvec_slabs[idx].nr_vecs;
}
+void bvec_free_bs(struct bio_set *bs, struct bio_vec *bv, unsigned int idx)
+{
+ BIO_BUG_ON(idx >= BIOVEC_NR_POOLS);
+
+ if (idx == BIOVEC_MAX_IDX)
+ mempool_free(bv, bs->bvec_pool);
+ else {
+ struct biovec_slab *bvs = bvec_slabs + idx;
+
+ kmem_cache_free(bvs->slab, bv);
+ }
+}
+
struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx,
struct bio_set *bs)
{
@@ -134,24 +239,22 @@ fallback:
void bio_free(struct bio *bio, struct bio_set *bs)
{
- if (bio->bi_io_vec) {
- const int pool_idx = BIO_POOL_IDX(bio);
+ void *p;
- BIO_BUG_ON(pool_idx >= BIOVEC_NR_POOLS);
-
- if (pool_idx == BIOVEC_MAX_IDX)
- mempool_free(bio->bi_io_vec, bs->bvec_pool);
- else {
- struct biovec_slab *bvs = bvec_slabs + pool_idx;
-
- kmem_cache_free(bvs->slab, bio->bi_io_vec);
- }
- }
+ if (bio->bi_io_vec)
+ bvec_free_bs(bs, bio->bi_io_vec, BIO_POOL_IDX(bio));
if (bio_integrity(bio))
bio_integrity_free(bio, bs);
- mempool_free(bio, bs->bio_pool);
+ /*
+ * If we have front padding, adjust the bio pointer before freeing
+ */
+ p = bio;
+ if (bs->front_pad)
+ p -= bs->front_pad;
+
+ mempool_free(p, bs->bio_pool);
}
/*
@@ -188,16 +291,20 @@ void bio_init(struct bio *bio)
* for a &struct bio to become free. If a %NULL @bs is passed in, we will
* fall back to just using @kmalloc to allocate the required memory.
*
- * allocate bio and iovecs from the memory pools specified by the
- * bio_set structure, or @kmalloc if none given.
+ * Note that the caller must set ->bi_destructor on succesful return
+ * of a bio, to do the appropriate freeing of the bio once the reference
+ * count drops to zero.
**/
struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
{
- struct bio *bio;
+ struct bio *bio = NULL;
+
+ if (bs) {
+ void *p = mempool_alloc(bs->bio_pool, gfp_mask);
- if (bs)
- bio = mempool_alloc(bs->bio_pool, gfp_mask);
- else
+ if (p)
+ bio = p + bs->front_pad;
+ } else
bio = kmalloc(sizeof(*bio), gfp_mask);
if (likely(bio)) {
@@ -1398,11 +1505,25 @@ void bioset_free(struct bio_set *bs)
bioset_integrity_free(bs);
biovec_free_pools(bs);
+ bio_put_slab(bs);
kfree(bs);
}
-struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size)
+/**
+ * bioset_create - Create a bio_set
+ * @pool_size: Number of bio and bio_vecs to cache in the mempool
+ * @front_pad: Number of bytes to allocate in front of the returned bio
+ *
+ * Description:
+ * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
+ * to ask for a number of bytes to be allocated in front of the bio.
+ * Front pad allocation is useful for embedding the bio inside
+ * another structure, to avoid allocating extra data to go with the bio.
+ * Note that the bio must be embedded at the END of that structure always,
+ * or things will break badly.
+ */
+struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad)
{
struct bio_set *bs;
@@ -1410,16 +1531,22 @@ struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size)
if (!bs)
return NULL;
- bs->bio_slab = bio_slab;
+ bs->front_pad = front_pad;
- bs->bio_pool = mempool_create_slab_pool(bio_pool_size, bs->bio_slab);
+ bs->bio_slab = bio_find_or_create_slab(front_pad);
+ if (!bs->bio_slab) {
+ kfree(bs);
+ return NULL;
+ }
+
+ bs->bio_pool = mempool_create_slab_pool(pool_size, bs->bio_slab);
if (!bs->bio_pool)
goto bad;
- if (bioset_integrity_create(bs, bio_pool_size))
+ if (bioset_integrity_create(bs, pool_size))
goto bad;
- if (!biovec_create_pools(bs, bvec_pool_size))
+ if (!biovec_create_pools(bs, pool_size))
return bs;
bad:
@@ -1443,12 +1570,16 @@ static void __init biovec_init_slabs(void)
static int __init init_bio(void)
{
- bio_slab = KMEM_CACHE(bio, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
+ bio_slab_max = 2;
+ bio_slab_nr = 0;
+ bio_slabs = kzalloc(bio_slab_max * sizeof(struct bio_slab), GFP_KERNEL);
+ if (!bio_slabs)
+ panic("bio: can't allocate bios\n");
bio_integrity_init_slab();
biovec_init_slabs();
- fs_bio_set = bioset_create(BIO_POOL_SIZE, 2);
+ fs_bio_set = bioset_create(BIO_POOL_SIZE, 0);
if (!fs_bio_set)
panic("bio: can't allocate bios\n");
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 9340098d75dc..4b80d3537f97 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -334,7 +334,7 @@ struct bio_pair {
extern struct bio_pair *bio_split(struct bio *bi, int first_sectors);
extern void bio_pair_release(struct bio_pair *dbio);
-extern struct bio_set *bioset_create(int, int);
+extern struct bio_set *bioset_create(unsigned int, unsigned int);
extern void bioset_free(struct bio_set *);
extern struct bio *bio_alloc(gfp_t, int);
@@ -379,6 +379,7 @@ extern struct bio *bio_copy_user_iov(struct request_queue *,
extern int bio_uncopy_user(struct bio *);
void zero_fill_bio(struct bio *bio);
extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *);
+extern void bvec_free_bs(struct bio_set *, struct bio_vec *, unsigned int);
extern unsigned int bvec_nr_vecs(unsigned short idx);
/*
@@ -401,6 +402,8 @@ static inline void bio_set_completion_cpu(struct bio *bio, unsigned int cpu)
struct bio_set {
struct kmem_cache *bio_slab;
+ unsigned int front_pad;
+
mempool_t *bio_pool;
#if defined(CONFIG_BLK_DEV_INTEGRITY)
mempool_t *bio_integrity_pool;
@@ -415,6 +418,7 @@ struct biovec_slab {
};
extern struct bio_set *fs_bio_set;
+extern struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly;
/*
* a small number of entries is fine, not going to be performance critical.