From cafe563591446cf80bfbc2fe3bc72a2e36cf1060 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sat, 23 Mar 2013 16:11:31 -0700 Subject: bcache: A block layer cache Does writethrough and writeback caching, handles unclean shutdown, and has a bunch of other nifty features motivated by real world usage. See the wiki at http://bcache.evilpiepirate.org for more. Signed-off-by: Kent Overstreet --- drivers/md/bcache/request.h | 62 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 drivers/md/bcache/request.h (limited to 'drivers/md/bcache/request.h') diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h new file mode 100644 index 000000000000..254d9ab5707c --- /dev/null +++ b/drivers/md/bcache/request.h @@ -0,0 +1,62 @@ +#ifndef _BCACHE_REQUEST_H_ +#define _BCACHE_REQUEST_H_ + +#include + +struct search { + /* Stack frame for bio_complete */ + struct closure cl; + + struct bcache_device *d; + struct task_struct *task; + + struct bbio bio; + struct bio *orig_bio; + struct bio *cache_miss; + unsigned cache_bio_sectors; + + unsigned recoverable:1; + unsigned unaligned_bvec:1; + + unsigned write:1; + unsigned writeback:1; + + /* IO error returned to s->bio */ + short error; + unsigned long start_time; + + /* Anything past op->keys won't get zeroed in do_bio_hook */ + struct btree_op op; +}; + +void bch_cache_read_endio(struct bio *, int); +int bch_get_congested(struct cache_set *); +void bch_insert_data(struct closure *cl); +void bch_btree_insert_async(struct closure *); +void bch_cache_read_endio(struct bio *, int); + +void bch_open_buckets_free(struct cache_set *); +int bch_open_buckets_alloc(struct cache_set *); + +void bch_cached_dev_request_init(struct cached_dev *dc); +void bch_flash_dev_request_init(struct bcache_device *d); + +extern struct kmem_cache *bch_search_cache, *bch_passthrough_cache; + +struct bch_cgroup { +#ifdef CONFIG_CGROUP_BCACHE + struct cgroup_subsys_state css; +#endif + /* + * We subtract one from the index into bch_cache_modes[], so that + * default == -1; this makes it so the rest match up with d->cache_mode, + * and we use d->cache_mode if cgrp->cache_mode < 0 + */ + short cache_mode; + bool verify; + struct cache_stat_collector stats; +}; + +struct bch_cgroup *bch_bio_to_cgroup(struct bio *bio); + +#endif /* _BCACHE_REQUEST_H_ */ -- cgit v1.2.3-59-g8ed1b