aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/lustre/lustre/include/cl_object.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/lustre/lustre/include/cl_object.h')
-rw-r--r--drivers/staging/lustre/lustre/include/cl_object.h491
1 files changed, 251 insertions, 240 deletions
diff --git a/drivers/staging/lustre/lustre/include/cl_object.h b/drivers/staging/lustre/lustre/include/cl_object.h
index bd7acc2a1219..fb971ded5a1b 100644
--- a/drivers/staging/lustre/lustre/include/cl_object.h
+++ b/drivers/staging/lustre/lustre/include/cl_object.h
@@ -157,7 +157,8 @@ struct cl_device {
};
/** \addtogroup cl_object cl_object
- * @{ */
+ * @{
+ */
/**
* "Data attributes" of cl_object. Data attributes can be updated
* independently for a sub-object, and top-object's attributes are calculated
@@ -288,13 +289,14 @@ struct cl_object_conf {
enum {
/** configure layout, set up a new stripe, must be called while
- * holding layout lock. */
+ * holding layout lock.
+ */
OBJECT_CONF_SET = 0,
/** invalidate the current stripe configuration due to losing
- * layout lock. */
+ * layout lock.
+ */
OBJECT_CONF_INVALIDATE = 1,
- /** wait for old layout to go away so that new layout can be
- * set up. */
+ /** wait for old layout to go away so that new layout can be set up. */
OBJECT_CONF_WAIT = 2
};
@@ -320,7 +322,7 @@ struct cl_object_operations {
* to be used instead of newly created.
*/
int (*coo_page_init)(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage);
+ struct cl_page *page, struct page *vmpage);
/**
* Initialize lock slice for this layer. Called top-to-bottom through
* every object layer when a new cl_lock is instantiated. Layer
@@ -393,7 +395,8 @@ struct cl_object_operations {
*/
struct cl_object_header {
/** Standard lu_object_header. cl_object::co_lu::lo_header points
- * here. */
+ * here.
+ */
struct lu_object_header coh_lu;
/** \name locks
* \todo XXX move locks below to the separate cache-lines, they are
@@ -464,7 +467,8 @@ struct cl_object_header {
#define CL_PAGE_EOF ((pgoff_t)~0ull)
/** \addtogroup cl_page cl_page
- * @{ */
+ * @{
+ */
/** \struct cl_page
* Layered client page.
@@ -687,12 +691,14 @@ enum cl_page_state {
enum cl_page_type {
/** Host page, the page is from the host inode which the cl_page
- * belongs to. */
+ * belongs to.
+ */
CPT_CACHEABLE = 1,
/** Transient page, the transient cl_page is used to bind a cl_page
* to vmpage which is not belonging to the same object of cl_page.
- * it is used in DirectIO, lockless IO and liblustre. */
+ * it is used in DirectIO and lockless IO.
+ */
CPT_TRANSIENT,
};
@@ -728,7 +734,8 @@ struct cl_page {
/** Parent page, NULL for top-level page. Immutable after creation. */
struct cl_page *cp_parent;
/** Lower-layer page. NULL for bottommost page. Immutable after
- * creation. */
+ * creation.
+ */
struct cl_page *cp_child;
/**
* Page state. This field is const to avoid accidental update, it is
@@ -842,7 +849,7 @@ struct cl_page_operations {
* \return the underlying VM page. Optional.
*/
struct page *(*cpo_vmpage)(const struct lu_env *env,
- const struct cl_page_slice *slice);
+ const struct cl_page_slice *slice);
/**
* Called when \a io acquires this page into the exclusive
* ownership. When this method returns, it is guaranteed that the is
@@ -1126,7 +1133,8 @@ static inline int __page_in_use(const struct cl_page *page, int refc)
/** @} cl_page */
/** \addtogroup cl_lock cl_lock
- * @{ */
+ * @{
+ */
/** \struct cl_lock
*
* Extent locking on the client.
@@ -1641,7 +1649,8 @@ struct cl_lock {
struct cl_lock_slice {
struct cl_lock *cls_lock;
/** Object slice corresponding to this lock slice. Immutable after
- * creation. */
+ * creation.
+ */
struct cl_object *cls_obj;
const struct cl_lock_operations *cls_ops;
/** Linkage into cl_lock::cll_layers. Immutable after creation. */
@@ -1885,7 +1894,8 @@ struct cl_2queue {
/** @} cl_page_list */
/** \addtogroup cl_io cl_io
- * @{ */
+ * @{
+ */
/** \struct cl_io
* I/O
*
@@ -2041,8 +2051,8 @@ struct cl_io_operations {
*
* \see cl_io_operations::cio_iter_fini()
*/
- int (*cio_iter_init) (const struct lu_env *env,
- const struct cl_io_slice *slice);
+ int (*cio_iter_init)(const struct lu_env *env,
+ const struct cl_io_slice *slice);
/**
* Finalize io iteration.
*
@@ -2052,8 +2062,8 @@ struct cl_io_operations {
*
* \see cl_io_operations::cio_iter_init()
*/
- void (*cio_iter_fini) (const struct lu_env *env,
- const struct cl_io_slice *slice);
+ void (*cio_iter_fini)(const struct lu_env *env,
+ const struct cl_io_slice *slice);
/**
* Collect locks for the current iteration of io.
*
@@ -2063,8 +2073,8 @@ struct cl_io_operations {
* cl_io_lock_add(). Once all locks are collected, they are
* sorted and enqueued in the proper order.
*/
- int (*cio_lock) (const struct lu_env *env,
- const struct cl_io_slice *slice);
+ int (*cio_lock)(const struct lu_env *env,
+ const struct cl_io_slice *slice);
/**
* Finalize unlocking.
*
@@ -2089,8 +2099,8 @@ struct cl_io_operations {
* Called top-to-bottom at the end of io loop. Here layer
* might wait for an unfinished asynchronous io.
*/
- void (*cio_end) (const struct lu_env *env,
- const struct cl_io_slice *slice);
+ void (*cio_end)(const struct lu_env *env,
+ const struct cl_io_slice *slice);
/**
* Called bottom-to-top to notify layers that read/write IO
* iteration finished, with \a nob bytes transferred.
@@ -2101,8 +2111,8 @@ struct cl_io_operations {
/**
* Called once per io, bottom-to-top to release io resources.
*/
- void (*cio_fini) (const struct lu_env *env,
- const struct cl_io_slice *slice);
+ void (*cio_fini)(const struct lu_env *env,
+ const struct cl_io_slice *slice);
} op[CIT_OP_NR];
struct {
/**
@@ -2222,7 +2232,7 @@ struct cl_io_lock_link {
struct cl_lock *cill_lock;
/** optional destructor */
void (*cill_fini)(const struct lu_env *env,
- struct cl_io_lock_link *link);
+ struct cl_io_lock_link *link);
};
/**
@@ -2272,7 +2282,7 @@ enum cl_io_lock_dmd {
CILR_MANDATORY = 0,
/** Layers are free to decide between local and global locking. */
CILR_MAYBE,
- /** Never lock: there is no cache (e.g., liblustre). */
+ /** Never lock: there is no cache (e.g., lockless IO). */
CILR_NEVER
};
@@ -2284,7 +2294,8 @@ enum cl_fsync_mode {
/** discard all of dirty pages in a specific file range */
CL_FSYNC_DISCARD = 2,
/** start writeback and make sure they have reached storage before
- * return. OST_SYNC RPC must be issued and finished */
+ * return. OST_SYNC RPC must be issued and finished
+ */
CL_FSYNC_ALL = 3
};
@@ -2403,7 +2414,8 @@ struct cl_io {
/** @} cl_io */
/** \addtogroup cl_req cl_req
- * @{ */
+ * @{
+ */
/** \struct cl_req
* Transfer.
*
@@ -2582,7 +2594,8 @@ enum cache_stats_item {
/** how many entities are in the cache right now */
CS_total,
/** how many entities in the cache are actively used (and cannot be
- * evicted) right now */
+ * evicted) right now
+ */
CS_busy,
/** how many entities were created at all */
CS_create,
@@ -2600,7 +2613,7 @@ struct cache_stats {
};
/** These are not exported so far */
-void cache_stats_init (struct cache_stats *cs, const char *name);
+void cache_stats_init(struct cache_stats *cs, const char *name);
/**
* Client-side site. This represents particular client stack. "Global"
@@ -2613,7 +2626,7 @@ struct cl_site {
* Statistical counters. Atomics do not scale, something better like
* per-cpu counters is needed.
*
- * These are exported as /proc/fs/lustre/llite/.../site
+ * These are exported as /sys/kernel/debug/lustre/llite/.../site
*
* When interpreting keep in mind that both sub-locks (and sub-pages)
* and top-locks (and top-pages) are accounted here.
@@ -2624,8 +2637,8 @@ struct cl_site {
atomic_t cs_locks_state[CLS_NR];
};
-int cl_site_init (struct cl_site *s, struct cl_device *top);
-void cl_site_fini (struct cl_site *s);
+int cl_site_init(struct cl_site *s, struct cl_device *top);
+void cl_site_fini(struct cl_site *s);
void cl_stack_fini(const struct lu_env *env, struct cl_device *cl);
/**
@@ -2653,7 +2666,7 @@ static inline int lu_device_is_cl(const struct lu_device *d)
static inline struct cl_device *lu2cl_dev(const struct lu_device *d)
{
- LASSERT(d == NULL || IS_ERR(d) || lu_device_is_cl(d));
+ LASSERT(!d || IS_ERR(d) || lu_device_is_cl(d));
return container_of0(d, struct cl_device, cd_lu_dev);
}
@@ -2664,7 +2677,7 @@ static inline struct lu_device *cl2lu_dev(struct cl_device *d)
static inline struct cl_object *lu2cl(const struct lu_object *o)
{
- LASSERT(o == NULL || IS_ERR(o) || lu_device_is_cl(o->lo_dev));
+ LASSERT(!o || IS_ERR(o) || lu_device_is_cl(o->lo_dev));
return container_of0(o, struct cl_object, co_lu);
}
@@ -2681,7 +2694,7 @@ static inline struct cl_object *cl_object_next(const struct cl_object *obj)
static inline struct cl_device *cl_object_device(const struct cl_object *o)
{
- LASSERT(o == NULL || IS_ERR(o) || lu_device_is_cl(o->co_lu.lo_dev));
+ LASSERT(!o || IS_ERR(o) || lu_device_is_cl(o->co_lu.lo_dev));
return container_of0(o->co_lu.lo_dev, struct cl_device, cd_lu_dev);
}
@@ -2725,27 +2738,28 @@ void cl_req_slice_add(struct cl_req *req, struct cl_req_slice *slice,
/** @} helpers */
/** \defgroup cl_object cl_object
- * @{ */
-struct cl_object *cl_object_top (struct cl_object *o);
+ * @{
+ */
+struct cl_object *cl_object_top(struct cl_object *o);
struct cl_object *cl_object_find(const struct lu_env *env, struct cl_device *cd,
const struct lu_fid *fid,
const struct cl_object_conf *c);
int cl_object_header_init(struct cl_object_header *h);
-void cl_object_put (const struct lu_env *env, struct cl_object *o);
-void cl_object_get (struct cl_object *o);
-void cl_object_attr_lock (struct cl_object *o);
+void cl_object_put(const struct lu_env *env, struct cl_object *o);
+void cl_object_get(struct cl_object *o);
+void cl_object_attr_lock(struct cl_object *o);
void cl_object_attr_unlock(struct cl_object *o);
-int cl_object_attr_get (const struct lu_env *env, struct cl_object *obj,
- struct cl_attr *attr);
-int cl_object_attr_set (const struct lu_env *env, struct cl_object *obj,
- const struct cl_attr *attr, unsigned valid);
-int cl_object_glimpse (const struct lu_env *env, struct cl_object *obj,
- struct ost_lvb *lvb);
-int cl_conf_set (const struct lu_env *env, struct cl_object *obj,
- const struct cl_object_conf *conf);
-void cl_object_prune (const struct lu_env *env, struct cl_object *obj);
-void cl_object_kill (const struct lu_env *env, struct cl_object *obj);
+int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj,
+ struct cl_attr *attr);
+int cl_object_attr_set(const struct lu_env *env, struct cl_object *obj,
+ const struct cl_attr *attr, unsigned valid);
+int cl_object_glimpse(const struct lu_env *env, struct cl_object *obj,
+ struct ost_lvb *lvb);
+int cl_conf_set(const struct lu_env *env, struct cl_object *obj,
+ const struct cl_object_conf *conf);
+void cl_object_prune(const struct lu_env *env, struct cl_object *obj);
+void cl_object_kill(const struct lu_env *env, struct cl_object *obj);
/**
* Returns true, iff \a o0 and \a o1 are slices of the same object.
@@ -2770,7 +2784,8 @@ static inline void *cl_object_page_slice(struct cl_object *clob,
/** @} cl_object */
/** \defgroup cl_page cl_page
- * @{ */
+ * @{
+ */
enum {
CLP_GANG_OKAY = 0,
CLP_GANG_RESCHED,
@@ -2781,34 +2796,26 @@ enum {
/* callback of cl_page_gang_lookup() */
typedef int (*cl_page_gang_cb_t) (const struct lu_env *, struct cl_io *,
struct cl_page *, void *);
-int cl_page_gang_lookup (const struct lu_env *env,
- struct cl_object *obj,
- struct cl_io *io,
- pgoff_t start, pgoff_t end,
- cl_page_gang_cb_t cb, void *cbdata);
-struct cl_page *cl_page_lookup (struct cl_object_header *hdr,
- pgoff_t index);
-struct cl_page *cl_page_find (const struct lu_env *env,
- struct cl_object *obj,
- pgoff_t idx, struct page *vmpage,
- enum cl_page_type type);
-struct cl_page *cl_page_find_sub (const struct lu_env *env,
- struct cl_object *obj,
- pgoff_t idx, struct page *vmpage,
+int cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj,
+ struct cl_io *io, pgoff_t start, pgoff_t end,
+ cl_page_gang_cb_t cb, void *cbdata);
+struct cl_page *cl_page_lookup(struct cl_object_header *hdr, pgoff_t index);
+struct cl_page *cl_page_find(const struct lu_env *env, struct cl_object *obj,
+ pgoff_t idx, struct page *vmpage,
+ enum cl_page_type type);
+struct cl_page *cl_page_find_sub(const struct lu_env *env,
+ struct cl_object *obj,
+ pgoff_t idx, struct page *vmpage,
struct cl_page *parent);
-void cl_page_get (struct cl_page *page);
-void cl_page_put (const struct lu_env *env,
- struct cl_page *page);
-void cl_page_print (const struct lu_env *env, void *cookie,
- lu_printer_t printer,
- const struct cl_page *pg);
-void cl_page_header_print(const struct lu_env *env, void *cookie,
- lu_printer_t printer,
- const struct cl_page *pg);
-struct page *cl_page_vmpage (const struct lu_env *env,
- struct cl_page *page);
-struct cl_page *cl_vmpage_page (struct page *vmpage, struct cl_object *obj);
-struct cl_page *cl_page_top (struct cl_page *page);
+void cl_page_get(struct cl_page *page);
+void cl_page_put(const struct lu_env *env, struct cl_page *page);
+void cl_page_print(const struct lu_env *env, void *cookie, lu_printer_t printer,
+ const struct cl_page *pg);
+void cl_page_header_print(const struct lu_env *env, void *cookie,
+ lu_printer_t printer, const struct cl_page *pg);
+struct page *cl_page_vmpage(const struct lu_env *env, struct cl_page *page);
+struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj);
+struct cl_page *cl_page_top(struct cl_page *page);
const struct cl_page_slice *cl_page_at(const struct cl_page *page,
const struct lu_device_type *dtype);
@@ -2820,17 +2827,17 @@ const struct cl_page_slice *cl_page_at(const struct cl_page *page,
*/
/** @{ */
-int cl_page_own (const struct lu_env *env,
- struct cl_io *io, struct cl_page *page);
-int cl_page_own_try (const struct lu_env *env,
- struct cl_io *io, struct cl_page *page);
-void cl_page_assume (const struct lu_env *env,
- struct cl_io *io, struct cl_page *page);
-void cl_page_unassume (const struct lu_env *env,
- struct cl_io *io, struct cl_page *pg);
-void cl_page_disown (const struct lu_env *env,
- struct cl_io *io, struct cl_page *page);
-int cl_page_is_owned (const struct cl_page *pg, const struct cl_io *io);
+int cl_page_own(const struct lu_env *env,
+ struct cl_io *io, struct cl_page *page);
+int cl_page_own_try(const struct lu_env *env,
+ struct cl_io *io, struct cl_page *page);
+void cl_page_assume(const struct lu_env *env,
+ struct cl_io *io, struct cl_page *page);
+void cl_page_unassume(const struct lu_env *env,
+ struct cl_io *io, struct cl_page *pg);
+void cl_page_disown(const struct lu_env *env,
+ struct cl_io *io, struct cl_page *page);
+int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io);
/** @} ownership */
@@ -2841,19 +2848,19 @@ int cl_page_is_owned (const struct cl_page *pg, const struct cl_io *io);
* tracking transfer state.
*/
/** @{ */
-int cl_page_prep (const struct lu_env *env, struct cl_io *io,
- struct cl_page *pg, enum cl_req_type crt);
-void cl_page_completion (const struct lu_env *env,
- struct cl_page *pg, enum cl_req_type crt, int ioret);
-int cl_page_make_ready (const struct lu_env *env, struct cl_page *pg,
- enum cl_req_type crt);
-int cl_page_cache_add (const struct lu_env *env, struct cl_io *io,
- struct cl_page *pg, enum cl_req_type crt);
-void cl_page_clip (const struct lu_env *env, struct cl_page *pg,
- int from, int to);
-int cl_page_cancel (const struct lu_env *env, struct cl_page *page);
-int cl_page_flush (const struct lu_env *env, struct cl_io *io,
- struct cl_page *pg);
+int cl_page_prep(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *pg, enum cl_req_type crt);
+void cl_page_completion(const struct lu_env *env,
+ struct cl_page *pg, enum cl_req_type crt, int ioret);
+int cl_page_make_ready(const struct lu_env *env, struct cl_page *pg,
+ enum cl_req_type crt);
+int cl_page_cache_add(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *pg, enum cl_req_type crt);
+void cl_page_clip(const struct lu_env *env, struct cl_page *pg,
+ int from, int to);
+int cl_page_cancel(const struct lu_env *env, struct cl_page *page);
+int cl_page_flush(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *pg);
/** @} transfer */
@@ -2862,24 +2869,22 @@ int cl_page_flush (const struct lu_env *env, struct cl_io *io,
* Functions to discard, delete and export a cl_page.
*/
/** @{ */
-void cl_page_discard (const struct lu_env *env, struct cl_io *io,
- struct cl_page *pg);
-void cl_page_delete (const struct lu_env *env, struct cl_page *pg);
-int cl_page_unmap (const struct lu_env *env, struct cl_io *io,
- struct cl_page *pg);
-int cl_page_is_vmlocked (const struct lu_env *env,
- const struct cl_page *pg);
-void cl_page_export (const struct lu_env *env,
- struct cl_page *pg, int uptodate);
-int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page);
-loff_t cl_offset (const struct cl_object *obj, pgoff_t idx);
-pgoff_t cl_index (const struct cl_object *obj, loff_t offset);
-int cl_page_size (const struct cl_object *obj);
-int cl_pages_prune (const struct lu_env *env, struct cl_object *obj);
-
-void cl_lock_print (const struct lu_env *env, void *cookie,
- lu_printer_t printer, const struct cl_lock *lock);
+void cl_page_discard(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *pg);
+void cl_page_delete(const struct lu_env *env, struct cl_page *pg);
+int cl_page_unmap(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *pg);
+int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg);
+void cl_page_export(const struct lu_env *env, struct cl_page *pg, int uptodate);
+int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *page);
+loff_t cl_offset(const struct cl_object *obj, pgoff_t idx);
+pgoff_t cl_index(const struct cl_object *obj, loff_t offset);
+int cl_page_size(const struct cl_object *obj);
+int cl_pages_prune(const struct lu_env *env, struct cl_object *obj);
+
+void cl_lock_print(const struct lu_env *env, void *cookie,
+ lu_printer_t printer, const struct cl_lock *lock);
void cl_lock_descr_print(const struct lu_env *env, void *cookie,
lu_printer_t printer,
const struct cl_lock_descr *descr);
@@ -2888,7 +2893,8 @@ void cl_lock_descr_print(const struct lu_env *env, void *cookie,
/** @} cl_page */
/** \defgroup cl_lock cl_lock
- * @{ */
+ * @{
+ */
struct cl_lock *cl_lock_hold(const struct lu_env *env, const struct cl_io *io,
const struct cl_lock_descr *need,
@@ -2917,19 +2923,19 @@ static inline struct cl_lock *cl_lock_at_page(const struct lu_env *env,
const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
const struct lu_device_type *dtype);
-void cl_lock_get (struct cl_lock *lock);
-void cl_lock_get_trust (struct cl_lock *lock);
-void cl_lock_put (const struct lu_env *env, struct cl_lock *lock);
-void cl_lock_hold_add (const struct lu_env *env, struct cl_lock *lock,
- const char *scope, const void *source);
+void cl_lock_get(struct cl_lock *lock);
+void cl_lock_get_trust(struct cl_lock *lock);
+void cl_lock_put(const struct lu_env *env, struct cl_lock *lock);
+void cl_lock_hold_add(const struct lu_env *env, struct cl_lock *lock,
+ const char *scope, const void *source);
void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock,
const char *scope, const void *source);
-void cl_lock_unhold (const struct lu_env *env, struct cl_lock *lock,
- const char *scope, const void *source);
-void cl_lock_release (const struct lu_env *env, struct cl_lock *lock,
- const char *scope, const void *source);
-void cl_lock_user_add (const struct lu_env *env, struct cl_lock *lock);
-void cl_lock_user_del (const struct lu_env *env, struct cl_lock *lock);
+void cl_lock_unhold(const struct lu_env *env, struct cl_lock *lock,
+ const char *scope, const void *source);
+void cl_lock_release(const struct lu_env *env, struct cl_lock *lock,
+ const char *scope, const void *source);
+void cl_lock_user_add(const struct lu_env *env, struct cl_lock *lock);
+void cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock);
int cl_lock_is_intransit(struct cl_lock *lock);
@@ -2966,52 +2972,53 @@ int cl_lock_enqueue_wait(const struct lu_env *env, struct cl_lock *lock,
*
* cl_use_try() NONE cl_lock_operations::clo_use() CLS_HELD
*
- * @{ */
+ * @{
+ */
-int cl_wait (const struct lu_env *env, struct cl_lock *lock);
-void cl_unuse (const struct lu_env *env, struct cl_lock *lock);
-int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock,
- struct cl_io *io, __u32 flags);
-int cl_unuse_try (const struct lu_env *env, struct cl_lock *lock);
-int cl_wait_try (const struct lu_env *env, struct cl_lock *lock);
-int cl_use_try (const struct lu_env *env, struct cl_lock *lock, int atomic);
+int cl_wait(const struct lu_env *env, struct cl_lock *lock);
+void cl_unuse(const struct lu_env *env, struct cl_lock *lock);
+int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock,
+ struct cl_io *io, __u32 flags);
+int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock);
+int cl_wait_try(const struct lu_env *env, struct cl_lock *lock);
+int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic);
/** @} statemachine */
-void cl_lock_signal (const struct lu_env *env, struct cl_lock *lock);
-int cl_lock_state_wait (const struct lu_env *env, struct cl_lock *lock);
-void cl_lock_state_set (const struct lu_env *env, struct cl_lock *lock,
- enum cl_lock_state state);
-int cl_queue_match (const struct list_head *queue,
- const struct cl_lock_descr *need);
-
-void cl_lock_mutex_get (const struct lu_env *env, struct cl_lock *lock);
-void cl_lock_mutex_put (const struct lu_env *env, struct cl_lock *lock);
-int cl_lock_is_mutexed (struct cl_lock *lock);
-int cl_lock_nr_mutexed (const struct lu_env *env);
-int cl_lock_discard_pages(const struct lu_env *env, struct cl_lock *lock);
-int cl_lock_ext_match (const struct cl_lock_descr *has,
- const struct cl_lock_descr *need);
-int cl_lock_descr_match(const struct cl_lock_descr *has,
- const struct cl_lock_descr *need);
-int cl_lock_mode_match (enum cl_lock_mode has, enum cl_lock_mode need);
-int cl_lock_modify (const struct lu_env *env, struct cl_lock *lock,
- const struct cl_lock_descr *desc);
-
-void cl_lock_closure_init (const struct lu_env *env,
- struct cl_lock_closure *closure,
- struct cl_lock *origin, int wait);
-void cl_lock_closure_fini (struct cl_lock_closure *closure);
-int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock,
- struct cl_lock_closure *closure);
-void cl_lock_disclosure (const struct lu_env *env,
- struct cl_lock_closure *closure);
-int cl_lock_enclosure (const struct lu_env *env, struct cl_lock *lock,
- struct cl_lock_closure *closure);
+void cl_lock_signal(const struct lu_env *env, struct cl_lock *lock);
+int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock);
+void cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock,
+ enum cl_lock_state state);
+int cl_queue_match(const struct list_head *queue,
+ const struct cl_lock_descr *need);
+
+void cl_lock_mutex_get(const struct lu_env *env, struct cl_lock *lock);
+void cl_lock_mutex_put(const struct lu_env *env, struct cl_lock *lock);
+int cl_lock_is_mutexed(struct cl_lock *lock);
+int cl_lock_nr_mutexed(const struct lu_env *env);
+int cl_lock_discard_pages(const struct lu_env *env, struct cl_lock *lock);
+int cl_lock_ext_match(const struct cl_lock_descr *has,
+ const struct cl_lock_descr *need);
+int cl_lock_descr_match(const struct cl_lock_descr *has,
+ const struct cl_lock_descr *need);
+int cl_lock_mode_match(enum cl_lock_mode has, enum cl_lock_mode need);
+int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock,
+ const struct cl_lock_descr *desc);
+
+void cl_lock_closure_init(const struct lu_env *env,
+ struct cl_lock_closure *closure,
+ struct cl_lock *origin, int wait);
+void cl_lock_closure_fini(struct cl_lock_closure *closure);
+int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock,
+ struct cl_lock_closure *closure);
+void cl_lock_disclosure(const struct lu_env *env,
+ struct cl_lock_closure *closure);
+int cl_lock_enclosure(const struct lu_env *env, struct cl_lock *lock,
+ struct cl_lock_closure *closure);
void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock);
void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock);
-void cl_lock_error (const struct lu_env *env, struct cl_lock *lock, int error);
+void cl_lock_error(const struct lu_env *env, struct cl_lock *lock, int error);
void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int wait);
unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock);
@@ -3019,39 +3026,40 @@ unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock);
/** @} cl_lock */
/** \defgroup cl_io cl_io
- * @{ */
-
-int cl_io_init (const struct lu_env *env, struct cl_io *io,
- enum cl_io_type iot, struct cl_object *obj);
-int cl_io_sub_init (const struct lu_env *env, struct cl_io *io,
- enum cl_io_type iot, struct cl_object *obj);
-int cl_io_rw_init (const struct lu_env *env, struct cl_io *io,
- enum cl_io_type iot, loff_t pos, size_t count);
-int cl_io_loop (const struct lu_env *env, struct cl_io *io);
-
-void cl_io_fini (const struct lu_env *env, struct cl_io *io);
-int cl_io_iter_init (const struct lu_env *env, struct cl_io *io);
-void cl_io_iter_fini (const struct lu_env *env, struct cl_io *io);
-int cl_io_lock (const struct lu_env *env, struct cl_io *io);
-void cl_io_unlock (const struct lu_env *env, struct cl_io *io);
-int cl_io_start (const struct lu_env *env, struct cl_io *io);
-void cl_io_end (const struct lu_env *env, struct cl_io *io);
-int cl_io_lock_add (const struct lu_env *env, struct cl_io *io,
- struct cl_io_lock_link *link);
-int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
- struct cl_lock_descr *descr);
-int cl_io_read_page (const struct lu_env *env, struct cl_io *io,
- struct cl_page *page);
-int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, unsigned from, unsigned to);
-int cl_io_commit_write (const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, unsigned from, unsigned to);
-int cl_io_submit_rw (const struct lu_env *env, struct cl_io *io,
- enum cl_req_type iot, struct cl_2queue *queue);
-int cl_io_submit_sync (const struct lu_env *env, struct cl_io *io,
- enum cl_req_type iot, struct cl_2queue *queue,
- long timeout);
-int cl_io_is_going (const struct lu_env *env);
+ * @{
+ */
+
+int cl_io_init(const struct lu_env *env, struct cl_io *io,
+ enum cl_io_type iot, struct cl_object *obj);
+int cl_io_sub_init(const struct lu_env *env, struct cl_io *io,
+ enum cl_io_type iot, struct cl_object *obj);
+int cl_io_rw_init(const struct lu_env *env, struct cl_io *io,
+ enum cl_io_type iot, loff_t pos, size_t count);
+int cl_io_loop(const struct lu_env *env, struct cl_io *io);
+
+void cl_io_fini(const struct lu_env *env, struct cl_io *io);
+int cl_io_iter_init(const struct lu_env *env, struct cl_io *io);
+void cl_io_iter_fini(const struct lu_env *env, struct cl_io *io);
+int cl_io_lock(const struct lu_env *env, struct cl_io *io);
+void cl_io_unlock(const struct lu_env *env, struct cl_io *io);
+int cl_io_start(const struct lu_env *env, struct cl_io *io);
+void cl_io_end(const struct lu_env *env, struct cl_io *io);
+int cl_io_lock_add(const struct lu_env *env, struct cl_io *io,
+ struct cl_io_lock_link *link);
+int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
+ struct cl_lock_descr *descr);
+int cl_io_read_page(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *page);
+int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *page, unsigned from, unsigned to);
+int cl_io_commit_write(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *page, unsigned from, unsigned to);
+int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
+ enum cl_req_type iot, struct cl_2queue *queue);
+int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
+ enum cl_req_type iot, struct cl_2queue *queue,
+ long timeout);
+int cl_io_is_going(const struct lu_env *env);
/**
* True, iff \a io is an O_APPEND write(2).
@@ -3094,7 +3102,8 @@ do { \
/** @} cl_io */
/** \defgroup cl_page_list cl_page_list
- * @{ */
+ * @{
+ */
/**
* Last page in the page list.
@@ -3117,40 +3126,41 @@ static inline struct cl_page *cl_page_list_last(struct cl_page_list *plist)
#define cl_page_list_for_each_safe(page, temp, list) \
list_for_each_entry_safe((page), (temp), &(list)->pl_pages, cp_batch)
-void cl_page_list_init (struct cl_page_list *plist);
-void cl_page_list_add (struct cl_page_list *plist, struct cl_page *page);
-void cl_page_list_move (struct cl_page_list *dst, struct cl_page_list *src,
- struct cl_page *page);
-void cl_page_list_splice (struct cl_page_list *list,
- struct cl_page_list *head);
-void cl_page_list_disown (const struct lu_env *env,
- struct cl_io *io, struct cl_page_list *plist);
-
-void cl_2queue_init (struct cl_2queue *queue);
-void cl_2queue_disown (const struct lu_env *env,
- struct cl_io *io, struct cl_2queue *queue);
-void cl_2queue_discard (const struct lu_env *env,
- struct cl_io *io, struct cl_2queue *queue);
-void cl_2queue_fini (const struct lu_env *env, struct cl_2queue *queue);
+void cl_page_list_init(struct cl_page_list *plist);
+void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page);
+void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src,
+ struct cl_page *page);
+void cl_page_list_splice(struct cl_page_list *list, struct cl_page_list *head);
+void cl_page_list_disown(const struct lu_env *env,
+ struct cl_io *io, struct cl_page_list *plist);
+
+void cl_2queue_init(struct cl_2queue *queue);
+void cl_2queue_disown(const struct lu_env *env,
+ struct cl_io *io, struct cl_2queue *queue);
+void cl_2queue_discard(const struct lu_env *env,
+ struct cl_io *io, struct cl_2queue *queue);
+void cl_2queue_fini(const struct lu_env *env, struct cl_2queue *queue);
void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page);
/** @} cl_page_list */
/** \defgroup cl_req cl_req
- * @{ */
+ * @{
+ */
struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page,
enum cl_req_type crt, int nr_objects);
-void cl_req_page_add (const struct lu_env *env, struct cl_req *req,
- struct cl_page *page);
-void cl_req_page_done (const struct lu_env *env, struct cl_page *page);
-int cl_req_prep (const struct lu_env *env, struct cl_req *req);
-void cl_req_attr_set (const struct lu_env *env, struct cl_req *req,
- struct cl_req_attr *attr, u64 flags);
+void cl_req_page_add(const struct lu_env *env, struct cl_req *req,
+ struct cl_page *page);
+void cl_req_page_done(const struct lu_env *env, struct cl_page *page);
+int cl_req_prep(const struct lu_env *env, struct cl_req *req);
+void cl_req_attr_set(const struct lu_env *env, struct cl_req *req,
+ struct cl_req_attr *attr, u64 flags);
void cl_req_completion(const struct lu_env *env, struct cl_req *req, int ioret);
/** \defgroup cl_sync_io cl_sync_io
- * @{ */
+ * @{
+ */
/**
* Anchor for synchronous transfer. This is allocated on a stack by thread
@@ -3214,22 +3224,23 @@ void cl_sync_io_note(struct cl_sync_io *anchor, int ioret);
* - cl_env_reexit(cl_env_reenter had to be called priorly)
*
* \see lu_env, lu_context, lu_context_key
- * @{ */
+ * @{
+ */
struct cl_env_nest {
int cen_refcheck;
void *cen_cookie;
};
-struct lu_env *cl_env_get (int *refcheck);
-struct lu_env *cl_env_alloc (int *refcheck, __u32 tags);
-struct lu_env *cl_env_nested_get (struct cl_env_nest *nest);
-void cl_env_put (struct lu_env *env, int *refcheck);
-void cl_env_nested_put (struct cl_env_nest *nest, struct lu_env *env);
-void *cl_env_reenter (void);
-void cl_env_reexit (void *cookie);
-void cl_env_implant (struct lu_env *env, int *refcheck);
-void cl_env_unplant (struct lu_env *env, int *refcheck);
+struct lu_env *cl_env_get(int *refcheck);
+struct lu_env *cl_env_alloc(int *refcheck, __u32 tags);
+struct lu_env *cl_env_nested_get(struct cl_env_nest *nest);
+void cl_env_put(struct lu_env *env, int *refcheck);
+void cl_env_nested_put(struct cl_env_nest *nest, struct lu_env *env);
+void *cl_env_reenter(void);
+void cl_env_reexit(void *cookie);
+void cl_env_implant(struct lu_env *env, int *refcheck);
+void cl_env_unplant(struct lu_env *env, int *refcheck);
/** @} cl_env */