diff options
Diffstat (limited to 'drivers/staging/lustre/lustre/include')
33 files changed, 1229 insertions, 1781 deletions
diff --git a/drivers/staging/lustre/lustre/include/cl_object.h b/drivers/staging/lustre/lustre/include/cl_object.h index bd7acc2a1219..fb971ded5a1b 100644 --- a/drivers/staging/lustre/lustre/include/cl_object.h +++ b/drivers/staging/lustre/lustre/include/cl_object.h @@ -157,7 +157,8 @@ struct cl_device { }; /** \addtogroup cl_object cl_object - * @{ */ + * @{ + */ /** * "Data attributes" of cl_object. Data attributes can be updated * independently for a sub-object, and top-object's attributes are calculated @@ -288,13 +289,14 @@ struct cl_object_conf { enum { /** configure layout, set up a new stripe, must be called while - * holding layout lock. */ + * holding layout lock. + */ OBJECT_CONF_SET = 0, /** invalidate the current stripe configuration due to losing - * layout lock. */ + * layout lock. + */ OBJECT_CONF_INVALIDATE = 1, - /** wait for old layout to go away so that new layout can be - * set up. */ + /** wait for old layout to go away so that new layout can be set up. */ OBJECT_CONF_WAIT = 2 }; @@ -320,7 +322,7 @@ struct cl_object_operations { * to be used instead of newly created. */ int (*coo_page_init)(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, struct page *vmpage); + struct cl_page *page, struct page *vmpage); /** * Initialize lock slice for this layer. Called top-to-bottom through * every object layer when a new cl_lock is instantiated. Layer @@ -393,7 +395,8 @@ struct cl_object_operations { */ struct cl_object_header { /** Standard lu_object_header. cl_object::co_lu::lo_header points - * here. */ + * here. + */ struct lu_object_header coh_lu; /** \name locks * \todo XXX move locks below to the separate cache-lines, they are @@ -464,7 +467,8 @@ struct cl_object_header { #define CL_PAGE_EOF ((pgoff_t)~0ull) /** \addtogroup cl_page cl_page - * @{ */ + * @{ + */ /** \struct cl_page * Layered client page. @@ -687,12 +691,14 @@ enum cl_page_state { enum cl_page_type { /** Host page, the page is from the host inode which the cl_page - * belongs to. */ + * belongs to. + */ CPT_CACHEABLE = 1, /** Transient page, the transient cl_page is used to bind a cl_page * to vmpage which is not belonging to the same object of cl_page. - * it is used in DirectIO, lockless IO and liblustre. */ + * it is used in DirectIO and lockless IO. + */ CPT_TRANSIENT, }; @@ -728,7 +734,8 @@ struct cl_page { /** Parent page, NULL for top-level page. Immutable after creation. */ struct cl_page *cp_parent; /** Lower-layer page. NULL for bottommost page. Immutable after - * creation. */ + * creation. + */ struct cl_page *cp_child; /** * Page state. This field is const to avoid accidental update, it is @@ -842,7 +849,7 @@ struct cl_page_operations { * \return the underlying VM page. Optional. */ struct page *(*cpo_vmpage)(const struct lu_env *env, - const struct cl_page_slice *slice); + const struct cl_page_slice *slice); /** * Called when \a io acquires this page into the exclusive * ownership. When this method returns, it is guaranteed that the is @@ -1126,7 +1133,8 @@ static inline int __page_in_use(const struct cl_page *page, int refc) /** @} cl_page */ /** \addtogroup cl_lock cl_lock - * @{ */ + * @{ + */ /** \struct cl_lock * * Extent locking on the client. @@ -1641,7 +1649,8 @@ struct cl_lock { struct cl_lock_slice { struct cl_lock *cls_lock; /** Object slice corresponding to this lock slice. Immutable after - * creation. */ + * creation. + */ struct cl_object *cls_obj; const struct cl_lock_operations *cls_ops; /** Linkage into cl_lock::cll_layers. Immutable after creation. */ @@ -1885,7 +1894,8 @@ struct cl_2queue { /** @} cl_page_list */ /** \addtogroup cl_io cl_io - * @{ */ + * @{ + */ /** \struct cl_io * I/O * @@ -2041,8 +2051,8 @@ struct cl_io_operations { * * \see cl_io_operations::cio_iter_fini() */ - int (*cio_iter_init) (const struct lu_env *env, - const struct cl_io_slice *slice); + int (*cio_iter_init)(const struct lu_env *env, + const struct cl_io_slice *slice); /** * Finalize io iteration. * @@ -2052,8 +2062,8 @@ struct cl_io_operations { * * \see cl_io_operations::cio_iter_init() */ - void (*cio_iter_fini) (const struct lu_env *env, - const struct cl_io_slice *slice); + void (*cio_iter_fini)(const struct lu_env *env, + const struct cl_io_slice *slice); /** * Collect locks for the current iteration of io. * @@ -2063,8 +2073,8 @@ struct cl_io_operations { * cl_io_lock_add(). Once all locks are collected, they are * sorted and enqueued in the proper order. */ - int (*cio_lock) (const struct lu_env *env, - const struct cl_io_slice *slice); + int (*cio_lock)(const struct lu_env *env, + const struct cl_io_slice *slice); /** * Finalize unlocking. * @@ -2089,8 +2099,8 @@ struct cl_io_operations { * Called top-to-bottom at the end of io loop. Here layer * might wait for an unfinished asynchronous io. */ - void (*cio_end) (const struct lu_env *env, - const struct cl_io_slice *slice); + void (*cio_end)(const struct lu_env *env, + const struct cl_io_slice *slice); /** * Called bottom-to-top to notify layers that read/write IO * iteration finished, with \a nob bytes transferred. @@ -2101,8 +2111,8 @@ struct cl_io_operations { /** * Called once per io, bottom-to-top to release io resources. */ - void (*cio_fini) (const struct lu_env *env, - const struct cl_io_slice *slice); + void (*cio_fini)(const struct lu_env *env, + const struct cl_io_slice *slice); } op[CIT_OP_NR]; struct { /** @@ -2222,7 +2232,7 @@ struct cl_io_lock_link { struct cl_lock *cill_lock; /** optional destructor */ void (*cill_fini)(const struct lu_env *env, - struct cl_io_lock_link *link); + struct cl_io_lock_link *link); }; /** @@ -2272,7 +2282,7 @@ enum cl_io_lock_dmd { CILR_MANDATORY = 0, /** Layers are free to decide between local and global locking. */ CILR_MAYBE, - /** Never lock: there is no cache (e.g., liblustre). */ + /** Never lock: there is no cache (e.g., lockless IO). */ CILR_NEVER }; @@ -2284,7 +2294,8 @@ enum cl_fsync_mode { /** discard all of dirty pages in a specific file range */ CL_FSYNC_DISCARD = 2, /** start writeback and make sure they have reached storage before - * return. OST_SYNC RPC must be issued and finished */ + * return. OST_SYNC RPC must be issued and finished + */ CL_FSYNC_ALL = 3 }; @@ -2403,7 +2414,8 @@ struct cl_io { /** @} cl_io */ /** \addtogroup cl_req cl_req - * @{ */ + * @{ + */ /** \struct cl_req * Transfer. * @@ -2582,7 +2594,8 @@ enum cache_stats_item { /** how many entities are in the cache right now */ CS_total, /** how many entities in the cache are actively used (and cannot be - * evicted) right now */ + * evicted) right now + */ CS_busy, /** how many entities were created at all */ CS_create, @@ -2600,7 +2613,7 @@ struct cache_stats { }; /** These are not exported so far */ -void cache_stats_init (struct cache_stats *cs, const char *name); +void cache_stats_init(struct cache_stats *cs, const char *name); /** * Client-side site. This represents particular client stack. "Global" @@ -2613,7 +2626,7 @@ struct cl_site { * Statistical counters. Atomics do not scale, something better like * per-cpu counters is needed. * - * These are exported as /proc/fs/lustre/llite/.../site + * These are exported as /sys/kernel/debug/lustre/llite/.../site * * When interpreting keep in mind that both sub-locks (and sub-pages) * and top-locks (and top-pages) are accounted here. @@ -2624,8 +2637,8 @@ struct cl_site { atomic_t cs_locks_state[CLS_NR]; }; -int cl_site_init (struct cl_site *s, struct cl_device *top); -void cl_site_fini (struct cl_site *s); +int cl_site_init(struct cl_site *s, struct cl_device *top); +void cl_site_fini(struct cl_site *s); void cl_stack_fini(const struct lu_env *env, struct cl_device *cl); /** @@ -2653,7 +2666,7 @@ static inline int lu_device_is_cl(const struct lu_device *d) static inline struct cl_device *lu2cl_dev(const struct lu_device *d) { - LASSERT(d == NULL || IS_ERR(d) || lu_device_is_cl(d)); + LASSERT(!d || IS_ERR(d) || lu_device_is_cl(d)); return container_of0(d, struct cl_device, cd_lu_dev); } @@ -2664,7 +2677,7 @@ static inline struct lu_device *cl2lu_dev(struct cl_device *d) static inline struct cl_object *lu2cl(const struct lu_object *o) { - LASSERT(o == NULL || IS_ERR(o) || lu_device_is_cl(o->lo_dev)); + LASSERT(!o || IS_ERR(o) || lu_device_is_cl(o->lo_dev)); return container_of0(o, struct cl_object, co_lu); } @@ -2681,7 +2694,7 @@ static inline struct cl_object *cl_object_next(const struct cl_object *obj) static inline struct cl_device *cl_object_device(const struct cl_object *o) { - LASSERT(o == NULL || IS_ERR(o) || lu_device_is_cl(o->co_lu.lo_dev)); + LASSERT(!o || IS_ERR(o) || lu_device_is_cl(o->co_lu.lo_dev)); return container_of0(o->co_lu.lo_dev, struct cl_device, cd_lu_dev); } @@ -2725,27 +2738,28 @@ void cl_req_slice_add(struct cl_req *req, struct cl_req_slice *slice, /** @} helpers */ /** \defgroup cl_object cl_object - * @{ */ -struct cl_object *cl_object_top (struct cl_object *o); + * @{ + */ +struct cl_object *cl_object_top(struct cl_object *o); struct cl_object *cl_object_find(const struct lu_env *env, struct cl_device *cd, const struct lu_fid *fid, const struct cl_object_conf *c); int cl_object_header_init(struct cl_object_header *h); -void cl_object_put (const struct lu_env *env, struct cl_object *o); -void cl_object_get (struct cl_object *o); -void cl_object_attr_lock (struct cl_object *o); +void cl_object_put(const struct lu_env *env, struct cl_object *o); +void cl_object_get(struct cl_object *o); +void cl_object_attr_lock(struct cl_object *o); void cl_object_attr_unlock(struct cl_object *o); -int cl_object_attr_get (const struct lu_env *env, struct cl_object *obj, - struct cl_attr *attr); -int cl_object_attr_set (const struct lu_env *env, struct cl_object *obj, - const struct cl_attr *attr, unsigned valid); -int cl_object_glimpse (const struct lu_env *env, struct cl_object *obj, - struct ost_lvb *lvb); -int cl_conf_set (const struct lu_env *env, struct cl_object *obj, - const struct cl_object_conf *conf); -void cl_object_prune (const struct lu_env *env, struct cl_object *obj); -void cl_object_kill (const struct lu_env *env, struct cl_object *obj); +int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj, + struct cl_attr *attr); +int cl_object_attr_set(const struct lu_env *env, struct cl_object *obj, + const struct cl_attr *attr, unsigned valid); +int cl_object_glimpse(const struct lu_env *env, struct cl_object *obj, + struct ost_lvb *lvb); +int cl_conf_set(const struct lu_env *env, struct cl_object *obj, + const struct cl_object_conf *conf); +void cl_object_prune(const struct lu_env *env, struct cl_object *obj); +void cl_object_kill(const struct lu_env *env, struct cl_object *obj); /** * Returns true, iff \a o0 and \a o1 are slices of the same object. @@ -2770,7 +2784,8 @@ static inline void *cl_object_page_slice(struct cl_object *clob, /** @} cl_object */ /** \defgroup cl_page cl_page - * @{ */ + * @{ + */ enum { CLP_GANG_OKAY = 0, CLP_GANG_RESCHED, @@ -2781,34 +2796,26 @@ enum { /* callback of cl_page_gang_lookup() */ typedef int (*cl_page_gang_cb_t) (const struct lu_env *, struct cl_io *, struct cl_page *, void *); -int cl_page_gang_lookup (const struct lu_env *env, - struct cl_object *obj, - struct cl_io *io, - pgoff_t start, pgoff_t end, - cl_page_gang_cb_t cb, void *cbdata); -struct cl_page *cl_page_lookup (struct cl_object_header *hdr, - pgoff_t index); -struct cl_page *cl_page_find (const struct lu_env *env, - struct cl_object *obj, - pgoff_t idx, struct page *vmpage, - enum cl_page_type type); -struct cl_page *cl_page_find_sub (const struct lu_env *env, - struct cl_object *obj, - pgoff_t idx, struct page *vmpage, +int cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj, + struct cl_io *io, pgoff_t start, pgoff_t end, + cl_page_gang_cb_t cb, void *cbdata); +struct cl_page *cl_page_lookup(struct cl_object_header *hdr, pgoff_t index); +struct cl_page *cl_page_find(const struct lu_env *env, struct cl_object *obj, + pgoff_t idx, struct page *vmpage, + enum cl_page_type type); +struct cl_page *cl_page_find_sub(const struct lu_env *env, + struct cl_object *obj, + pgoff_t idx, struct page *vmpage, struct cl_page *parent); -void cl_page_get (struct cl_page *page); -void cl_page_put (const struct lu_env *env, - struct cl_page *page); -void cl_page_print (const struct lu_env *env, void *cookie, - lu_printer_t printer, - const struct cl_page *pg); -void cl_page_header_print(const struct lu_env *env, void *cookie, - lu_printer_t printer, - const struct cl_page *pg); -struct page *cl_page_vmpage (const struct lu_env *env, - struct cl_page *page); -struct cl_page *cl_vmpage_page (struct page *vmpage, struct cl_object *obj); -struct cl_page *cl_page_top (struct cl_page *page); +void cl_page_get(struct cl_page *page); +void cl_page_put(const struct lu_env *env, struct cl_page *page); +void cl_page_print(const struct lu_env *env, void *cookie, lu_printer_t printer, + const struct cl_page *pg); +void cl_page_header_print(const struct lu_env *env, void *cookie, + lu_printer_t printer, const struct cl_page *pg); +struct page *cl_page_vmpage(const struct lu_env *env, struct cl_page *page); +struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj); +struct cl_page *cl_page_top(struct cl_page *page); const struct cl_page_slice *cl_page_at(const struct cl_page *page, const struct lu_device_type *dtype); @@ -2820,17 +2827,17 @@ const struct cl_page_slice *cl_page_at(const struct cl_page *page, */ /** @{ */ -int cl_page_own (const struct lu_env *env, - struct cl_io *io, struct cl_page *page); -int cl_page_own_try (const struct lu_env *env, - struct cl_io *io, struct cl_page *page); -void cl_page_assume (const struct lu_env *env, - struct cl_io *io, struct cl_page *page); -void cl_page_unassume (const struct lu_env *env, - struct cl_io *io, struct cl_page *pg); -void cl_page_disown (const struct lu_env *env, - struct cl_io *io, struct cl_page *page); -int cl_page_is_owned (const struct cl_page *pg, const struct cl_io *io); +int cl_page_own(const struct lu_env *env, + struct cl_io *io, struct cl_page *page); +int cl_page_own_try(const struct lu_env *env, + struct cl_io *io, struct cl_page *page); +void cl_page_assume(const struct lu_env *env, + struct cl_io *io, struct cl_page *page); +void cl_page_unassume(const struct lu_env *env, + struct cl_io *io, struct cl_page *pg); +void cl_page_disown(const struct lu_env *env, + struct cl_io *io, struct cl_page *page); +int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io); /** @} ownership */ @@ -2841,19 +2848,19 @@ int cl_page_is_owned (const struct cl_page *pg, const struct cl_io *io); * tracking transfer state. */ /** @{ */ -int cl_page_prep (const struct lu_env *env, struct cl_io *io, - struct cl_page *pg, enum cl_req_type crt); -void cl_page_completion (const struct lu_env *env, - struct cl_page *pg, enum cl_req_type crt, int ioret); -int cl_page_make_ready (const struct lu_env *env, struct cl_page *pg, - enum cl_req_type crt); -int cl_page_cache_add (const struct lu_env *env, struct cl_io *io, - struct cl_page *pg, enum cl_req_type crt); -void cl_page_clip (const struct lu_env *env, struct cl_page *pg, - int from, int to); -int cl_page_cancel (const struct lu_env *env, struct cl_page *page); -int cl_page_flush (const struct lu_env *env, struct cl_io *io, - struct cl_page *pg); +int cl_page_prep(const struct lu_env *env, struct cl_io *io, + struct cl_page *pg, enum cl_req_type crt); +void cl_page_completion(const struct lu_env *env, + struct cl_page *pg, enum cl_req_type crt, int ioret); +int cl_page_make_ready(const struct lu_env *env, struct cl_page *pg, + enum cl_req_type crt); +int cl_page_cache_add(const struct lu_env *env, struct cl_io *io, + struct cl_page *pg, enum cl_req_type crt); +void cl_page_clip(const struct lu_env *env, struct cl_page *pg, + int from, int to); +int cl_page_cancel(const struct lu_env *env, struct cl_page *page); +int cl_page_flush(const struct lu_env *env, struct cl_io *io, + struct cl_page *pg); /** @} transfer */ @@ -2862,24 +2869,22 @@ int cl_page_flush (const struct lu_env *env, struct cl_io *io, * Functions to discard, delete and export a cl_page. */ /** @{ */ -void cl_page_discard (const struct lu_env *env, struct cl_io *io, - struct cl_page *pg); -void cl_page_delete (const struct lu_env *env, struct cl_page *pg); -int cl_page_unmap (const struct lu_env *env, struct cl_io *io, - struct cl_page *pg); -int cl_page_is_vmlocked (const struct lu_env *env, - const struct cl_page *pg); -void cl_page_export (const struct lu_env *env, - struct cl_page *pg, int uptodate); -int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io, - struct cl_page *page); -loff_t cl_offset (const struct cl_object *obj, pgoff_t idx); -pgoff_t cl_index (const struct cl_object *obj, loff_t offset); -int cl_page_size (const struct cl_object *obj); -int cl_pages_prune (const struct lu_env *env, struct cl_object *obj); - -void cl_lock_print (const struct lu_env *env, void *cookie, - lu_printer_t printer, const struct cl_lock *lock); +void cl_page_discard(const struct lu_env *env, struct cl_io *io, + struct cl_page *pg); +void cl_page_delete(const struct lu_env *env, struct cl_page *pg); +int cl_page_unmap(const struct lu_env *env, struct cl_io *io, + struct cl_page *pg); +int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg); +void cl_page_export(const struct lu_env *env, struct cl_page *pg, int uptodate); +int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io, + struct cl_page *page); +loff_t cl_offset(const struct cl_object *obj, pgoff_t idx); +pgoff_t cl_index(const struct cl_object *obj, loff_t offset); +int cl_page_size(const struct cl_object *obj); +int cl_pages_prune(const struct lu_env *env, struct cl_object *obj); + +void cl_lock_print(const struct lu_env *env, void *cookie, + lu_printer_t printer, const struct cl_lock *lock); void cl_lock_descr_print(const struct lu_env *env, void *cookie, lu_printer_t printer, const struct cl_lock_descr *descr); @@ -2888,7 +2893,8 @@ void cl_lock_descr_print(const struct lu_env *env, void *cookie, /** @} cl_page */ /** \defgroup cl_lock cl_lock - * @{ */ + * @{ + */ struct cl_lock *cl_lock_hold(const struct lu_env *env, const struct cl_io *io, const struct cl_lock_descr *need, @@ -2917,19 +2923,19 @@ static inline struct cl_lock *cl_lock_at_page(const struct lu_env *env, const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock, const struct lu_device_type *dtype); -void cl_lock_get (struct cl_lock *lock); -void cl_lock_get_trust (struct cl_lock *lock); -void cl_lock_put (const struct lu_env *env, struct cl_lock *lock); -void cl_lock_hold_add (const struct lu_env *env, struct cl_lock *lock, - const char *scope, const void *source); +void cl_lock_get(struct cl_lock *lock); +void cl_lock_get_trust(struct cl_lock *lock); +void cl_lock_put(const struct lu_env *env, struct cl_lock *lock); +void cl_lock_hold_add(const struct lu_env *env, struct cl_lock *lock, + const char *scope, const void *source); void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock, const char *scope, const void *source); -void cl_lock_unhold (const struct lu_env *env, struct cl_lock *lock, - const char *scope, const void *source); -void cl_lock_release (const struct lu_env *env, struct cl_lock *lock, - const char *scope, const void *source); -void cl_lock_user_add (const struct lu_env *env, struct cl_lock *lock); -void cl_lock_user_del (const struct lu_env *env, struct cl_lock *lock); +void cl_lock_unhold(const struct lu_env *env, struct cl_lock *lock, + const char *scope, const void *source); +void cl_lock_release(const struct lu_env *env, struct cl_lock *lock, + const char *scope, const void *source); +void cl_lock_user_add(const struct lu_env *env, struct cl_lock *lock); +void cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock); int cl_lock_is_intransit(struct cl_lock *lock); @@ -2966,52 +2972,53 @@ int cl_lock_enqueue_wait(const struct lu_env *env, struct cl_lock *lock, * * cl_use_try() NONE cl_lock_operations::clo_use() CLS_HELD * - * @{ */ + * @{ + */ -int cl_wait (const struct lu_env *env, struct cl_lock *lock); -void cl_unuse (const struct lu_env *env, struct cl_lock *lock); -int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock, - struct cl_io *io, __u32 flags); -int cl_unuse_try (const struct lu_env *env, struct cl_lock *lock); -int cl_wait_try (const struct lu_env *env, struct cl_lock *lock); -int cl_use_try (const struct lu_env *env, struct cl_lock *lock, int atomic); +int cl_wait(const struct lu_env *env, struct cl_lock *lock); +void cl_unuse(const struct lu_env *env, struct cl_lock *lock); +int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock, + struct cl_io *io, __u32 flags); +int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock); +int cl_wait_try(const struct lu_env *env, struct cl_lock *lock); +int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic); /** @} statemachine */ -void cl_lock_signal (const struct lu_env *env, struct cl_lock *lock); -int cl_lock_state_wait (const struct lu_env *env, struct cl_lock *lock); -void cl_lock_state_set (const struct lu_env *env, struct cl_lock *lock, - enum cl_lock_state state); -int cl_queue_match (const struct list_head *queue, - const struct cl_lock_descr *need); - -void cl_lock_mutex_get (const struct lu_env *env, struct cl_lock *lock); -void cl_lock_mutex_put (const struct lu_env *env, struct cl_lock *lock); -int cl_lock_is_mutexed (struct cl_lock *lock); -int cl_lock_nr_mutexed (const struct lu_env *env); -int cl_lock_discard_pages(const struct lu_env *env, struct cl_lock *lock); -int cl_lock_ext_match (const struct cl_lock_descr *has, - const struct cl_lock_descr *need); -int cl_lock_descr_match(const struct cl_lock_descr *has, - const struct cl_lock_descr *need); -int cl_lock_mode_match (enum cl_lock_mode has, enum cl_lock_mode need); -int cl_lock_modify (const struct lu_env *env, struct cl_lock *lock, - const struct cl_lock_descr *desc); - -void cl_lock_closure_init (const struct lu_env *env, - struct cl_lock_closure *closure, - struct cl_lock *origin, int wait); -void cl_lock_closure_fini (struct cl_lock_closure *closure); -int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock, - struct cl_lock_closure *closure); -void cl_lock_disclosure (const struct lu_env *env, - struct cl_lock_closure *closure); -int cl_lock_enclosure (const struct lu_env *env, struct cl_lock *lock, - struct cl_lock_closure *closure); +void cl_lock_signal(const struct lu_env *env, struct cl_lock *lock); +int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock); +void cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock, + enum cl_lock_state state); +int cl_queue_match(const struct list_head *queue, + const struct cl_lock_descr *need); + +void cl_lock_mutex_get(const struct lu_env *env, struct cl_lock *lock); +void cl_lock_mutex_put(const struct lu_env *env, struct cl_lock *lock); +int cl_lock_is_mutexed(struct cl_lock *lock); +int cl_lock_nr_mutexed(const struct lu_env *env); +int cl_lock_discard_pages(const struct lu_env *env, struct cl_lock *lock); +int cl_lock_ext_match(const struct cl_lock_descr *has, + const struct cl_lock_descr *need); +int cl_lock_descr_match(const struct cl_lock_descr *has, + const struct cl_lock_descr *need); +int cl_lock_mode_match(enum cl_lock_mode has, enum cl_lock_mode need); +int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock, + const struct cl_lock_descr *desc); + +void cl_lock_closure_init(const struct lu_env *env, + struct cl_lock_closure *closure, + struct cl_lock *origin, int wait); +void cl_lock_closure_fini(struct cl_lock_closure *closure); +int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock, + struct cl_lock_closure *closure); +void cl_lock_disclosure(const struct lu_env *env, + struct cl_lock_closure *closure); +int cl_lock_enclosure(const struct lu_env *env, struct cl_lock *lock, + struct cl_lock_closure *closure); void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock); void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock); -void cl_lock_error (const struct lu_env *env, struct cl_lock *lock, int error); +void cl_lock_error(const struct lu_env *env, struct cl_lock *lock, int error); void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int wait); unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock); @@ -3019,39 +3026,40 @@ unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock); /** @} cl_lock */ /** \defgroup cl_io cl_io - * @{ */ - -int cl_io_init (const struct lu_env *env, struct cl_io *io, - enum cl_io_type iot, struct cl_object *obj); -int cl_io_sub_init (const struct lu_env *env, struct cl_io *io, - enum cl_io_type iot, struct cl_object *obj); -int cl_io_rw_init (const struct lu_env *env, struct cl_io *io, - enum cl_io_type iot, loff_t pos, size_t count); -int cl_io_loop (const struct lu_env *env, struct cl_io *io); - -void cl_io_fini (const struct lu_env *env, struct cl_io *io); -int cl_io_iter_init (const struct lu_env *env, struct cl_io *io); -void cl_io_iter_fini (const struct lu_env *env, struct cl_io *io); -int cl_io_lock (const struct lu_env *env, struct cl_io *io); -void cl_io_unlock (const struct lu_env *env, struct cl_io *io); -int cl_io_start (const struct lu_env *env, struct cl_io *io); -void cl_io_end (const struct lu_env *env, struct cl_io *io); -int cl_io_lock_add (const struct lu_env *env, struct cl_io *io, - struct cl_io_lock_link *link); -int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io, - struct cl_lock_descr *descr); -int cl_io_read_page (const struct lu_env *env, struct cl_io *io, - struct cl_page *page); -int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io, - struct cl_page *page, unsigned from, unsigned to); -int cl_io_commit_write (const struct lu_env *env, struct cl_io *io, - struct cl_page *page, unsigned from, unsigned to); -int cl_io_submit_rw (const struct lu_env *env, struct cl_io *io, - enum cl_req_type iot, struct cl_2queue *queue); -int cl_io_submit_sync (const struct lu_env *env, struct cl_io *io, - enum cl_req_type iot, struct cl_2queue *queue, - long timeout); -int cl_io_is_going (const struct lu_env *env); + * @{ + */ + +int cl_io_init(const struct lu_env *env, struct cl_io *io, + enum cl_io_type iot, struct cl_object *obj); +int cl_io_sub_init(const struct lu_env *env, struct cl_io *io, + enum cl_io_type iot, struct cl_object *obj); +int cl_io_rw_init(const struct lu_env *env, struct cl_io *io, + enum cl_io_type iot, loff_t pos, size_t count); +int cl_io_loop(const struct lu_env *env, struct cl_io *io); + +void cl_io_fini(const struct lu_env *env, struct cl_io *io); +int cl_io_iter_init(const struct lu_env *env, struct cl_io *io); +void cl_io_iter_fini(const struct lu_env *env, struct cl_io *io); +int cl_io_lock(const struct lu_env *env, struct cl_io *io); +void cl_io_unlock(const struct lu_env *env, struct cl_io *io); +int cl_io_start(const struct lu_env *env, struct cl_io *io); +void cl_io_end(const struct lu_env *env, struct cl_io *io); +int cl_io_lock_add(const struct lu_env *env, struct cl_io *io, + struct cl_io_lock_link *link); +int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io, + struct cl_lock_descr *descr); +int cl_io_read_page(const struct lu_env *env, struct cl_io *io, + struct cl_page *page); +int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io, + struct cl_page *page, unsigned from, unsigned to); +int cl_io_commit_write(const struct lu_env *env, struct cl_io *io, + struct cl_page *page, unsigned from, unsigned to); +int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io, + enum cl_req_type iot, struct cl_2queue *queue); +int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io, + enum cl_req_type iot, struct cl_2queue *queue, + long timeout); +int cl_io_is_going(const struct lu_env *env); /** * True, iff \a io is an O_APPEND write(2). @@ -3094,7 +3102,8 @@ do { \ /** @} cl_io */ /** \defgroup cl_page_list cl_page_list - * @{ */ + * @{ + */ /** * Last page in the page list. @@ -3117,40 +3126,41 @@ static inline struct cl_page *cl_page_list_last(struct cl_page_list *plist) #define cl_page_list_for_each_safe(page, temp, list) \ list_for_each_entry_safe((page), (temp), &(list)->pl_pages, cp_batch) -void cl_page_list_init (struct cl_page_list *plist); -void cl_page_list_add (struct cl_page_list *plist, struct cl_page *page); -void cl_page_list_move (struct cl_page_list *dst, struct cl_page_list *src, - struct cl_page *page); -void cl_page_list_splice (struct cl_page_list *list, - struct cl_page_list *head); -void cl_page_list_disown (const struct lu_env *env, - struct cl_io *io, struct cl_page_list *plist); - -void cl_2queue_init (struct cl_2queue *queue); -void cl_2queue_disown (const struct lu_env *env, - struct cl_io *io, struct cl_2queue *queue); -void cl_2queue_discard (const struct lu_env *env, - struct cl_io *io, struct cl_2queue *queue); -void cl_2queue_fini (const struct lu_env *env, struct cl_2queue *queue); +void cl_page_list_init(struct cl_page_list *plist); +void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page); +void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src, + struct cl_page *page); +void cl_page_list_splice(struct cl_page_list *list, struct cl_page_list *head); +void cl_page_list_disown(const struct lu_env *env, + struct cl_io *io, struct cl_page_list *plist); + +void cl_2queue_init(struct cl_2queue *queue); +void cl_2queue_disown(const struct lu_env *env, + struct cl_io *io, struct cl_2queue *queue); +void cl_2queue_discard(const struct lu_env *env, + struct cl_io *io, struct cl_2queue *queue); +void cl_2queue_fini(const struct lu_env *env, struct cl_2queue *queue); void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page); /** @} cl_page_list */ /** \defgroup cl_req cl_req - * @{ */ + * @{ + */ struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page, enum cl_req_type crt, int nr_objects); -void cl_req_page_add (const struct lu_env *env, struct cl_req *req, - struct cl_page *page); -void cl_req_page_done (const struct lu_env *env, struct cl_page *page); -int cl_req_prep (const struct lu_env *env, struct cl_req *req); -void cl_req_attr_set (const struct lu_env *env, struct cl_req *req, - struct cl_req_attr *attr, u64 flags); +void cl_req_page_add(const struct lu_env *env, struct cl_req *req, + struct cl_page *page); +void cl_req_page_done(const struct lu_env *env, struct cl_page *page); +int cl_req_prep(const struct lu_env *env, struct cl_req *req); +void cl_req_attr_set(const struct lu_env *env, struct cl_req *req, + struct cl_req_attr *attr, u64 flags); void cl_req_completion(const struct lu_env *env, struct cl_req *req, int ioret); /** \defgroup cl_sync_io cl_sync_io - * @{ */ + * @{ + */ /** * Anchor for synchronous transfer. This is allocated on a stack by thread @@ -3214,22 +3224,23 @@ void cl_sync_io_note(struct cl_sync_io *anchor, int ioret); * - cl_env_reexit(cl_env_reenter had to be called priorly) * * \see lu_env, lu_context, lu_context_key - * @{ */ + * @{ + */ struct cl_env_nest { int cen_refcheck; void *cen_cookie; }; -struct lu_env *cl_env_get (int *refcheck); -struct lu_env *cl_env_alloc (int *refcheck, __u32 tags); -struct lu_env *cl_env_nested_get (struct cl_env_nest *nest); -void cl_env_put (struct lu_env *env, int *refcheck); -void cl_env_nested_put (struct cl_env_nest *nest, struct lu_env *env); -void *cl_env_reenter (void); -void cl_env_reexit (void *cookie); -void cl_env_implant (struct lu_env *env, int *refcheck); -void cl_env_unplant (struct lu_env *env, int *refcheck); +struct lu_env *cl_env_get(int *refcheck); +struct lu_env *cl_env_alloc(int *refcheck, __u32 tags); +struct lu_env *cl_env_nested_get(struct cl_env_nest *nest); +void cl_env_put(struct lu_env *env, int *refcheck); +void cl_env_nested_put(struct cl_env_nest *nest, struct lu_env *env); +void *cl_env_reenter(void); +void cl_env_reexit(void *cookie); +void cl_env_implant(struct lu_env *env, int *refcheck); +void cl_env_unplant(struct lu_env *env, int *refcheck); /** @} cl_env */ diff --git a/drivers/staging/lustre/lustre/include/lclient.h b/drivers/staging/lustre/lustre/include/lclient.h index 36e7a6767e71..5d839a9f789f 100644 --- a/drivers/staging/lustre/lustre/include/lclient.h +++ b/drivers/staging/lustre/lustre/include/lclient.h @@ -127,7 +127,7 @@ static inline struct ccc_thread_info *ccc_env_info(const struct lu_env *env) struct ccc_thread_info *info; info = lu_context_key_get(&env->le_ctx, &ccc_key); - LASSERT(info != NULL); + LASSERT(info); return info; } @@ -156,7 +156,7 @@ static inline struct ccc_session *ccc_env_session(const struct lu_env *env) struct ccc_session *ses; ses = lu_context_key_get(env->le_ses, &ccc_session_key); - LASSERT(ses != NULL); + LASSERT(ses); return ses; } @@ -383,7 +383,8 @@ void cl_put_grouplock(struct ccc_grouplock *cg); * * NB: If you find you have to use these interfaces for your new code, please * think about it again. These interfaces may be removed in the future for - * better layering. */ + * better layering. + */ struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj); void lov_lsm_put(struct cl_object *clobj, struct lov_stripe_md *lsm); int lov_read_and_clear_async_rc(struct cl_object *clob); diff --git a/drivers/staging/lustre/lustre/include/linux/obd.h b/drivers/staging/lustre/lustre/include/linux/obd.h index 468bc28be895..3907bf4ce07c 100644 --- a/drivers/staging/lustre/lustre/include/linux/obd.h +++ b/drivers/staging/lustre/lustre/include/linux/obd.h @@ -57,23 +57,23 @@ struct ll_iattr { #define CLIENT_OBD_LIST_LOCK_DEBUG 1 -typedef struct { +struct client_obd_lock { spinlock_t lock; unsigned long time; struct task_struct *task; const char *func; int line; -} client_obd_lock_t; +}; -static inline void __client_obd_list_lock(client_obd_lock_t *lock, +static inline void __client_obd_list_lock(struct client_obd_lock *lock, const char *func, int line) { unsigned long cur = jiffies; while (1) { if (spin_trylock(&lock->lock)) { - LASSERT(lock->task == NULL); + LASSERT(!lock->task); lock->task = current; lock->func = func; lock->line = line; @@ -85,7 +85,7 @@ static inline void __client_obd_list_lock(client_obd_lock_t *lock, time_before(lock->time + 5 * HZ, jiffies)) { struct task_struct *task = lock->task; - if (task == NULL) + if (!task) continue; LCONSOLE_WARN("%s:%d: lock %p was acquired by <%s:%d:%s:%d> for %lu seconds.\n", @@ -106,20 +106,20 @@ static inline void __client_obd_list_lock(client_obd_lock_t *lock, #define client_obd_list_lock(lock) \ __client_obd_list_lock(lock, __func__, __LINE__) -static inline void client_obd_list_unlock(client_obd_lock_t *lock) +static inline void client_obd_list_unlock(struct client_obd_lock *lock) { - LASSERT(lock->task != NULL); + LASSERT(lock->task); lock->task = NULL; lock->time = jiffies; spin_unlock(&lock->lock); } -static inline void client_obd_list_lock_init(client_obd_lock_t *lock) +static inline void client_obd_list_lock_init(struct client_obd_lock *lock) { spin_lock_init(&lock->lock); } -static inline void client_obd_list_lock_done(client_obd_lock_t *lock) +static inline void client_obd_list_lock_done(struct client_obd_lock *lock) {} #endif /* __LINUX_OBD_H */ diff --git a/drivers/staging/lustre/lustre/include/lprocfs_status.h b/drivers/staging/lustre/lustre/include/lprocfs_status.h index 0ac8e0edcc48..4146c9c3999f 100644 --- a/drivers/staging/lustre/lustre/include/lprocfs_status.h +++ b/drivers/staging/lustre/lustre/include/lprocfs_status.h @@ -54,7 +54,7 @@ struct lprocfs_vars { struct file_operations *fops; void *data; /** - * /proc file mode. + * sysfs file mode. */ umode_t proc_mode; }; @@ -175,7 +175,8 @@ struct lprocfs_percpu { enum lprocfs_stats_flags { LPROCFS_STATS_FLAG_NONE = 0x0000, /* per cpu counter */ LPROCFS_STATS_FLAG_NOPERCPU = 0x0001, /* stats have no percpu - * area and need locking */ + * area and need locking + */ LPROCFS_STATS_FLAG_IRQ_SAFE = 0x0002, /* alloc need irq safe */ }; @@ -196,7 +197,8 @@ struct lprocfs_stats { unsigned short ls_biggest_alloc_num; enum lprocfs_stats_flags ls_flags; /* Lock used when there are no percpu stats areas; For percpu stats, - * it is used to protect ls_biggest_alloc_num change */ + * it is used to protect ls_biggest_alloc_num change + */ spinlock_t ls_lock; /* has ls_num of counter headers */ @@ -274,20 +276,7 @@ static inline int opcode_offset(__u32 opc) OPC_RANGE(OST)); } else if (opc < FLD_LAST_OPC) { /* FLD opcode */ - return (opc - FLD_FIRST_OPC + - OPC_RANGE(SEC) + - OPC_RANGE(SEQ) + - OPC_RANGE(QUOTA) + - OPC_RANGE(LLOG) + - OPC_RANGE(OBD) + - OPC_RANGE(MGS) + - OPC_RANGE(LDLM) + - OPC_RANGE(MDS) + - OPC_RANGE(OST)); - } else if (opc < UPDATE_LAST_OPC) { - /* update opcode */ - return (opc - UPDATE_FIRST_OPC + - OPC_RANGE(FLD) + + return (opc - FLD_FIRST_OPC + OPC_RANGE(SEC) + OPC_RANGE(SEQ) + OPC_RANGE(QUOTA) + @@ -312,8 +301,7 @@ static inline int opcode_offset(__u32 opc) OPC_RANGE(SEC) + \ OPC_RANGE(SEQ) + \ OPC_RANGE(SEC) + \ - OPC_RANGE(FLD) + \ - OPC_RANGE(UPDATE)) + OPC_RANGE(FLD)) #define EXTRA_MAX_OPCODES ((PTLRPC_LAST_CNTR - PTLRPC_FIRST_CNTR) + \ OPC_RANGE(EXTRA)) @@ -407,7 +395,7 @@ static inline int lprocfs_stats_lock(struct lprocfs_stats *stats, int opc, } else { unsigned int cpuid = get_cpu(); - if (unlikely(stats->ls_percpu[cpuid] == NULL)) { + if (unlikely(!stats->ls_percpu[cpuid])) { rc = lprocfs_stats_alloc_one(stats, cpuid); if (rc < 0) { put_cpu(); @@ -438,12 +426,10 @@ static inline void lprocfs_stats_unlock(struct lprocfs_stats *stats, int opc, case LPROCFS_GET_SMP_ID: if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) { - if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) { - spin_unlock_irqrestore(&stats->ls_lock, - *flags); - } else { + if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) + spin_unlock_irqrestore(&stats->ls_lock, *flags); + else spin_unlock(&stats->ls_lock); - } } else { put_cpu(); } @@ -451,12 +437,10 @@ static inline void lprocfs_stats_unlock(struct lprocfs_stats *stats, int opc, case LPROCFS_GET_NUM_CPU: if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) { - if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) { - spin_unlock_irqrestore(&stats->ls_lock, - *flags); - } else { + if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) + spin_unlock_irqrestore(&stats->ls_lock, *flags); + else spin_unlock(&stats->ls_lock); - } } return; } @@ -521,11 +505,11 @@ static inline __u64 lprocfs_stats_collector(struct lprocfs_stats *stats, unsigned long flags = 0; __u64 ret = 0; - LASSERT(stats != NULL); + LASSERT(stats); num_cpu = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags); for (i = 0; i < num_cpu; i++) { - if (stats->ls_percpu[i] == NULL) + if (!stats->ls_percpu[i]) continue; ret += lprocfs_read_helper( lprocfs_stats_counter_get(stats, i, idx), @@ -608,7 +592,7 @@ int lprocfs_write_helper(const char __user *buffer, unsigned long count, int *val); int lprocfs_write_u64_helper(const char __user *buffer, unsigned long count, __u64 *val); -int lprocfs_write_frac_u64_helper(const char *buffer, +int lprocfs_write_frac_u64_helper(const char __user *buffer, unsigned long count, __u64 *val, int mult); char *lprocfs_find_named_value(const char *buffer, const char *name, @@ -625,9 +609,10 @@ int lprocfs_single_release(struct inode *, struct file *); int lprocfs_seq_release(struct inode *, struct file *); /* write the name##_seq_show function, call LPROC_SEQ_FOPS_RO for read-only - proc entries; otherwise, you will define name##_seq_write function also for - a read-write proc entry, and then call LPROC_SEQ_SEQ instead. Finally, - call ldebugfs_obd_seq_create(obd, filename, 0444, &name#_fops, data); */ + * proc entries; otherwise, you will define name##_seq_write function also for + * a read-write proc entry, and then call LPROC_SEQ_SEQ instead. Finally, + * call ldebugfs_obd_seq_create(obd, filename, 0444, &name#_fops, data); + */ #define __LPROC_SEQ_FOPS(name, custom_seq_write) \ static int name##_single_open(struct inode *inode, struct file *file) \ { \ diff --git a/drivers/staging/lustre/lustre/include/lu_object.h b/drivers/staging/lustre/lustre/include/lu_object.h index 1d79341a495d..b5088b13a305 100644 --- a/drivers/staging/lustre/lustre/include/lu_object.h +++ b/drivers/staging/lustre/lustre/include/lu_object.h @@ -164,11 +164,12 @@ struct lu_device_operations { /** * For lu_object_conf flags */ -typedef enum { +enum loc_flags { /* This is a new object to be allocated, or the file - * corresponding to the object does not exists. */ + * corresponding to the object does not exists. + */ LOC_F_NEW = 0x00000001, -} loc_flags_t; +}; /** * Object configuration, describing particulars of object being created. On @@ -179,7 +180,7 @@ struct lu_object_conf { /** * Some hints for obj find and alloc. */ - loc_flags_t loc_flags; + enum loc_flags loc_flags; }; /** @@ -392,7 +393,7 @@ struct lu_device_type_operations { static inline int lu_device_is_md(const struct lu_device *d) { - return ergo(d != NULL, d->ld_type->ldt_tags & LU_DEVICE_MD); + return ergo(d, d->ld_type->ldt_tags & LU_DEVICE_MD); } /** @@ -488,7 +489,7 @@ enum lu_object_header_flags { /** * Mark this object has already been taken out of cache. */ - LU_OBJECT_UNHASHED = 1 + LU_OBJECT_UNHASHED = 1, }; enum lu_object_header_attr { @@ -756,7 +757,7 @@ static inline const struct lu_fid *lu_object_fid(const struct lu_object *o) /** * return device operations vector for this object */ -static const inline struct lu_device_operations * +static inline const struct lu_device_operations * lu_object_ops(const struct lu_object *o) { return o->lo_dev->ld_ops; @@ -895,7 +896,8 @@ enum lu_xattr_flags { /** @} helpers */ /** \name lu_context - * @{ */ + * @{ + */ /** For lu_context health-checks */ enum lu_context_state { @@ -1119,7 +1121,7 @@ struct lu_context_key { CLASSERT(PAGE_CACHE_SIZE >= sizeof (*value)); \ \ value = kzalloc(sizeof(*value), GFP_NOFS); \ - if (value == NULL) \ + if (!value) \ value = ERR_PTR(-ENOMEM); \ \ return value; \ @@ -1174,7 +1176,7 @@ void lu_context_key_revive (struct lu_context_key *key); do { \ LU_CONTEXT_KEY_INIT(key); \ key = va_arg(args, struct lu_context_key *); \ - } while (key != NULL); \ + } while (key); \ va_end(args); \ } diff --git a/drivers/staging/lustre/lustre/include/lu_ref.h b/drivers/staging/lustre/lustre/include/lu_ref.h index 97cd157dd35a..f7dfd83951ee 100644 --- a/drivers/staging/lustre/lustre/include/lu_ref.h +++ b/drivers/staging/lustre/lustre/include/lu_ref.h @@ -17,10 +17,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with Lustre; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * */ #ifndef __LUSTRE_LU_REF_H diff --git a/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h b/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h index 09088f40ba88..07d45de69dd9 100644 --- a/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h +++ b/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h @@ -47,9 +47,11 @@ struct ll_fiemap_extent { __u64 fe_logical; /* logical offset in bytes for the start of - * the extent from the beginning of the file */ + * the extent from the beginning of the file + */ __u64 fe_physical; /* physical offset in bytes for the start - * of the extent from the beginning of the disk */ + * of the extent from the beginning of the disk + */ __u64 fe_length; /* length in bytes for this extent */ __u64 fe_reserved64[2]; __u32 fe_flags; /* FIEMAP_EXTENT_* flags for this extent */ @@ -59,9 +61,11 @@ struct ll_fiemap_extent { struct ll_user_fiemap { __u64 fm_start; /* logical offset (inclusive) at - * which to start mapping (in) */ + * which to start mapping (in) + */ __u64 fm_length; /* logical length of mapping which - * userspace wants (in) */ + * userspace wants (in) + */ __u32 fm_flags; /* FIEMAP_FLAG_* flags for request (in/out) */ __u32 fm_mapped_extents;/* number of extents that were mapped (out) */ __u32 fm_extent_count; /* size of fm_extents array (in) */ @@ -71,28 +75,38 @@ struct ll_user_fiemap { #define FIEMAP_MAX_OFFSET (~0ULL) -#define FIEMAP_FLAG_SYNC 0x00000001 /* sync file data before map */ -#define FIEMAP_FLAG_XATTR 0x00000002 /* map extended attribute tree */ - -#define FIEMAP_EXTENT_LAST 0x00000001 /* Last extent in file. */ -#define FIEMAP_EXTENT_UNKNOWN 0x00000002 /* Data location unknown. */ -#define FIEMAP_EXTENT_DELALLOC 0x00000004 /* Location still pending. - * Sets EXTENT_UNKNOWN. */ -#define FIEMAP_EXTENT_ENCODED 0x00000008 /* Data can not be read - * while fs is unmounted */ -#define FIEMAP_EXTENT_DATA_ENCRYPTED 0x00000080 /* Data is encrypted by fs. - * Sets EXTENT_NO_DIRECT. */ +#define FIEMAP_FLAG_SYNC 0x00000001 /* sync file data before + * map + */ +#define FIEMAP_FLAG_XATTR 0x00000002 /* map extended attribute + * tree + */ +#define FIEMAP_EXTENT_LAST 0x00000001 /* Last extent in file. */ +#define FIEMAP_EXTENT_UNKNOWN 0x00000002 /* Data location unknown. */ +#define FIEMAP_EXTENT_DELALLOC 0x00000004 /* Location still pending. + * Sets EXTENT_UNKNOWN. + */ +#define FIEMAP_EXTENT_ENCODED 0x00000008 /* Data can not be read + * while fs is unmounted + */ +#define FIEMAP_EXTENT_DATA_ENCRYPTED 0x00000080 /* Data is encrypted by fs. + * Sets EXTENT_NO_DIRECT. + */ #define FIEMAP_EXTENT_NOT_ALIGNED 0x00000100 /* Extent offsets may not be - * block aligned. */ + * block aligned. + */ #define FIEMAP_EXTENT_DATA_INLINE 0x00000200 /* Data mixed with metadata. * Sets EXTENT_NOT_ALIGNED.*/ -#define FIEMAP_EXTENT_DATA_TAIL 0x00000400 /* Multiple files in block. - * Sets EXTENT_NOT_ALIGNED.*/ -#define FIEMAP_EXTENT_UNWRITTEN 0x00000800 /* Space allocated, but - * no data (i.e. zero). */ -#define FIEMAP_EXTENT_MERGED 0x00001000 /* File does not natively +#define FIEMAP_EXTENT_DATA_TAIL 0x00000400 /* Multiple files in block. + * Sets EXTENT_NOT_ALIGNED. + */ +#define FIEMAP_EXTENT_UNWRITTEN 0x00000800 /* Space allocated, but + * no data (i.e. zero). + */ +#define FIEMAP_EXTENT_MERGED 0x00001000 /* File does not natively * support extents. Result - * merged for efficiency. */ + * merged for efficiency. + */ static inline size_t fiemap_count_to_size(size_t extent_count) { @@ -114,7 +128,8 @@ static inline unsigned fiemap_size_to_count(size_t array_size) /* Lustre specific flags - use a high bit, don't conflict with upstream flag */ #define FIEMAP_EXTENT_NO_DIRECT 0x40000000 /* Data mapping undefined */ -#define FIEMAP_EXTENT_NET 0x80000000 /* Data stored remotely. - * Sets NO_DIRECT flag */ +#define FIEMAP_EXTENT_NET 0x80000000 /* Data stored remotely. + * Sets NO_DIRECT flag + */ #endif /* _LUSTRE_FIEMAP_H */ diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_build_version.h b/drivers/staging/lustre/lustre/include/lustre/lustre_build_version.h deleted file mode 100644 index 93a3d7db3010..000000000000 --- a/drivers/staging/lustre/lustre/include/lustre/lustre_build_version.h +++ /dev/null @@ -1,2 +0,0 @@ -#define BUILD_VERSION "v2_3_64_0-g6e62c21-CHANGED-3.9.0" -#define LUSTRE_RELEASE 3.9.0_g6e62c21 diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h index b064b5821e3f..da8bc6eadd13 100644 --- a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h +++ b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h @@ -113,25 +113,25 @@ #define CONNMGR_REQUEST_PORTAL 1 #define CONNMGR_REPLY_PORTAL 2 -//#define OSC_REQUEST_PORTAL 3 +/*#define OSC_REQUEST_PORTAL 3 */ #define OSC_REPLY_PORTAL 4 -//#define OSC_BULK_PORTAL 5 +/*#define OSC_BULK_PORTAL 5 */ #define OST_IO_PORTAL 6 #define OST_CREATE_PORTAL 7 #define OST_BULK_PORTAL 8 -//#define MDC_REQUEST_PORTAL 9 +/*#define MDC_REQUEST_PORTAL 9 */ #define MDC_REPLY_PORTAL 10 -//#define MDC_BULK_PORTAL 11 +/*#define MDC_BULK_PORTAL 11 */ #define MDS_REQUEST_PORTAL 12 -//#define MDS_REPLY_PORTAL 13 +/*#define MDS_REPLY_PORTAL 13 */ #define MDS_BULK_PORTAL 14 #define LDLM_CB_REQUEST_PORTAL 15 #define LDLM_CB_REPLY_PORTAL 16 #define LDLM_CANCEL_REQUEST_PORTAL 17 #define LDLM_CANCEL_REPLY_PORTAL 18 -//#define PTLBD_REQUEST_PORTAL 19 -//#define PTLBD_REPLY_PORTAL 20 -//#define PTLBD_BULK_PORTAL 21 +/*#define PTLBD_REQUEST_PORTAL 19 */ +/*#define PTLBD_REPLY_PORTAL 20 */ +/*#define PTLBD_BULK_PORTAL 21 */ #define MDS_SETATTR_PORTAL 22 #define MDS_READPAGE_PORTAL 23 #define OUT_PORTAL 24 @@ -146,7 +146,9 @@ #define SEQ_CONTROLLER_PORTAL 32 #define MGS_BULK_PORTAL 33 -/* Portal 63 is reserved for the Cray Inc DVS - nic@cray.com, roe@cray.com, n8851@cray.com */ +/* Portal 63 is reserved for the Cray Inc DVS - nic@cray.com, roe@cray.com, + * n8851@cray.com + */ /* packet types */ #define PTL_RPC_MSG_REQUEST 4711 @@ -295,7 +297,8 @@ static inline int range_compare_loc(const struct lu_seq_range *r1, fld_range_is_mdt(range) ? "mdt" : "ost" /** \defgroup lu_fid lu_fid - * @{ */ + * @{ + */ /** * Flags for lustre_mdt_attrs::lma_compat and lustre_mdt_attrs::lma_incompat. @@ -307,7 +310,8 @@ enum lma_compat { LMAC_SOM = 0x00000002, LMAC_NOT_IN_OI = 0x00000004, /* the object does NOT need OI mapping */ LMAC_FID_ON_OST = 0x00000008, /* For OST-object, its OI mapping is - * under /O/<seq>/d<x>. */ + * under /O/<seq>/d<x>. + */ }; /** @@ -319,7 +323,8 @@ enum lma_incompat { LMAI_RELEASED = 0x00000001, /* file is released */ LMAI_AGENT = 0x00000002, /* agent inode */ LMAI_REMOTE_PARENT = 0x00000004, /* the parent of the object - is on the remote MDT */ + * is on the remote MDT + */ }; #define LMA_INCOMPAT_SUPP (LMAI_AGENT | LMAI_REMOTE_PARENT) @@ -395,12 +400,14 @@ enum fid_seq { FID_SEQ_LOCAL_FILE = 0x200000001ULL, FID_SEQ_DOT_LUSTRE = 0x200000002ULL, /* sequence is used for local named objects FIDs generated - * by local_object_storage library */ + * by local_object_storage library + */ FID_SEQ_LOCAL_NAME = 0x200000003ULL, /* Because current FLD will only cache the fid sequence, instead * of oid on the client side, if the FID needs to be exposed to * clients sides, it needs to make sure all of fids under one - * sequence will be located in one MDT. */ + * sequence will be located in one MDT. + */ FID_SEQ_SPECIAL = 0x200000004ULL, FID_SEQ_QUOTA = 0x200000005ULL, FID_SEQ_QUOTA_GLB = 0x200000006ULL, @@ -601,7 +608,8 @@ static inline void ostid_set_seq(struct ost_id *oi, __u64 seq) oi->oi_fid.f_seq = seq; /* Note: if f_oid + f_ver is zero, we need init it * to be 1, otherwise, ostid_seq will treat this - * as old ostid (oi_seq == 0) */ + * as old ostid (oi_seq == 0) + */ if (oi->oi_fid.f_oid == 0 && oi->oi_fid.f_ver == 0) oi->oi_fid.f_oid = LUSTRE_FID_INIT_OID; } @@ -630,15 +638,13 @@ static inline void ostid_set_id(struct ost_id *oi, __u64 oid) { if (fid_seq_is_mdt0(ostid_seq(oi))) { if (oid >= IDIF_MAX_OID) { - CERROR("Bad %llu to set "DOSTID"\n", - oid, POSTID(oi)); + CERROR("Bad %llu to set " DOSTID "\n", oid, POSTID(oi)); return; } oi->oi.oi_id = oid; } else { if (oid > OBIF_MAX_OID) { - CERROR("Bad %llu to set "DOSTID"\n", - oid, POSTID(oi)); + CERROR("Bad %llu to set " DOSTID "\n", oid, POSTID(oi)); return; } oi->oi_fid.f_oid = oid; @@ -689,11 +695,12 @@ static inline int ostid_to_fid(struct lu_fid *fid, struct ost_id *ostid, * that we map into the IDIF namespace. It allows up to 2^48 * objects per OST, as this is the object namespace that has * been in production for years. This can handle create rates - * of 1M objects/s/OST for 9 years, or combinations thereof. */ + * of 1M objects/s/OST for 9 years, or combinations thereof. + */ if (ostid_id(ostid) >= IDIF_MAX_OID) { - CERROR("bad MDT0 id, "DOSTID" ost_idx:%u\n", - POSTID(ostid), ost_idx); - return -EBADF; + CERROR("bad MDT0 id, " DOSTID " ost_idx:%u\n", + POSTID(ostid), ost_idx); + return -EBADF; } fid->f_seq = fid_idif_seq(ostid_id(ostid), ost_idx); /* truncate to 32 bits by assignment */ @@ -704,10 +711,11 @@ static inline int ostid_to_fid(struct lu_fid *fid, struct ost_id *ostid, /* This is either an IDIF object, which identifies objects across * all OSTs, or a regular FID. The IDIF namespace maps legacy * OST objects into the FID namespace. In both cases, we just - * pass the FID through, no conversion needed. */ + * pass the FID through, no conversion needed. + */ if (ostid->oi_fid.f_ver != 0) { - CERROR("bad MDT0 id, "DOSTID" ost_idx:%u\n", - POSTID(ostid), ost_idx); + CERROR("bad MDT0 id, " DOSTID " ost_idx:%u\n", + POSTID(ostid), ost_idx); return -EBADF; } *fid = ostid->oi_fid; @@ -807,7 +815,7 @@ static inline void fid_be_to_cpu(struct lu_fid *dst, const struct lu_fid *src) static inline int fid_is_sane(const struct lu_fid *fid) { - return fid != NULL && + return fid && ((fid_seq(fid) >= FID_SEQ_START && fid_ver(fid) == 0) || fid_is_igif(fid) || fid_is_idif(fid) || fid_seq_is_rsvd(fid_seq(fid))); @@ -868,7 +876,8 @@ static inline void ostid_le_to_cpu(const struct ost_id *src_oi, /** @} lu_fid */ /** \defgroup lu_dir lu_dir - * @{ */ + * @{ + */ /** * Enumeration of possible directory entry attributes. @@ -880,24 +889,8 @@ enum lu_dirent_attrs { LUDA_FID = 0x0001, LUDA_TYPE = 0x0002, LUDA_64BITHASH = 0x0004, - - /* The following attrs are used for MDT internal only, - * not visible to client */ - - /* Verify the dirent consistency */ - LUDA_VERIFY = 0x8000, - /* Only check but not repair the dirent inconsistency */ - LUDA_VERIFY_DRYRUN = 0x4000, - /* The dirent has been repaired, or to be repaired (dryrun). */ - LUDA_REPAIR = 0x2000, - /* The system is upgraded, has beed or to be repaired (dryrun). */ - LUDA_UPGRADE = 0x1000, - /* Ignore this record, go to next directly. */ - LUDA_IGNORE = 0x0800, }; -#define LU_DIRENT_ATTRS_MASK 0xf800 - /** * Layout of readdir pages, as transmitted on wire. */ @@ -1128,7 +1121,8 @@ struct ptlrpc_body_v2 { __u32 pb_conn_cnt; __u32 pb_timeout; /* for req, the deadline, for rep, the service est */ __u32 pb_service_time; /* for rep, actual service time, also used for - net_latency of req */ + * net_latency of req + */ __u32 pb_limit; __u64 pb_slv; /* VBR: pre-versions */ @@ -1174,7 +1168,8 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); /* #define MSG_AT_SUPPORT 0x0008 * This was used in early prototypes of adaptive timeouts, and while there * shouldn't be any users of that code there also isn't a need for using this - * bits. Defer usage until at least 1.10 to avoid potential conflict. */ + * bits. Defer usage until at least 1.10 to avoid potential conflict. + */ #define MSG_DELAY_REPLAY 0x0010 #define MSG_VERSION_REPLAY 0x0020 #define MSG_REQ_REPLAY_DONE 0x0040 @@ -1187,7 +1182,7 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); #define MSG_CONNECT_RECOVERING 0x00000001 #define MSG_CONNECT_RECONNECT 0x00000002 #define MSG_CONNECT_REPLAYABLE 0x00000004 -//#define MSG_CONNECT_PEER 0x8 +/*#define MSG_CONNECT_PEER 0x8 */ #define MSG_CONNECT_LIBCLIENT 0x00000010 #define MSG_CONNECT_INITIAL 0x00000020 #define MSG_CONNECT_ASYNC 0x00000040 @@ -1195,60 +1190,65 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); #define MSG_CONNECT_TRANSNO 0x00000100 /* report transno */ /* Connect flags */ -#define OBD_CONNECT_RDONLY 0x1ULL /*client has read-only access*/ -#define OBD_CONNECT_INDEX 0x2ULL /*connect specific LOV idx */ -#define OBD_CONNECT_MDS 0x4ULL /*connect from MDT to OST */ -#define OBD_CONNECT_GRANT 0x8ULL /*OSC gets grant at connect */ -#define OBD_CONNECT_SRVLOCK 0x10ULL /*server takes locks for cli */ -#define OBD_CONNECT_VERSION 0x20ULL /*Lustre versions in ocd */ -#define OBD_CONNECT_REQPORTAL 0x40ULL /*Separate non-IO req portal */ -#define OBD_CONNECT_ACL 0x80ULL /*access control lists */ -#define OBD_CONNECT_XATTR 0x100ULL /*client use extended attr */ +#define OBD_CONNECT_RDONLY 0x1ULL /*client has read-only access*/ +#define OBD_CONNECT_INDEX 0x2ULL /*connect specific LOV idx */ +#define OBD_CONNECT_MDS 0x4ULL /*connect from MDT to OST */ +#define OBD_CONNECT_GRANT 0x8ULL /*OSC gets grant at connect */ +#define OBD_CONNECT_SRVLOCK 0x10ULL /*server takes locks for cli */ +#define OBD_CONNECT_VERSION 0x20ULL /*Lustre versions in ocd */ +#define OBD_CONNECT_REQPORTAL 0x40ULL /*Separate non-IO req portal */ +#define OBD_CONNECT_ACL 0x80ULL /*access control lists */ +#define OBD_CONNECT_XATTR 0x100ULL /*client use extended attr */ #define OBD_CONNECT_CROW 0x200ULL /*MDS+OST create obj on write*/ -#define OBD_CONNECT_TRUNCLOCK 0x400ULL /*locks on server for punch */ -#define OBD_CONNECT_TRANSNO 0x800ULL /*replay sends init transno */ -#define OBD_CONNECT_IBITS 0x1000ULL /*support for inodebits locks*/ +#define OBD_CONNECT_TRUNCLOCK 0x400ULL /*locks on server for punch */ +#define OBD_CONNECT_TRANSNO 0x800ULL /*replay sends init transno */ +#define OBD_CONNECT_IBITS 0x1000ULL /*support for inodebits locks*/ #define OBD_CONNECT_JOIN 0x2000ULL /*files can be concatenated. *We do not support JOIN FILE *anymore, reserve this flags *just for preventing such bit - *to be reused.*/ -#define OBD_CONNECT_ATTRFID 0x4000ULL /*Server can GetAttr By Fid*/ -#define OBD_CONNECT_NODEVOH 0x8000ULL /*No open hndl on specl nodes*/ -#define OBD_CONNECT_RMT_CLIENT 0x10000ULL /*Remote client */ + *to be reused. + */ +#define OBD_CONNECT_ATTRFID 0x4000ULL /*Server can GetAttr By Fid*/ +#define OBD_CONNECT_NODEVOH 0x8000ULL /*No open hndl on specl nodes*/ +#define OBD_CONNECT_RMT_CLIENT 0x10000ULL /*Remote client */ #define OBD_CONNECT_RMT_CLIENT_FORCE 0x20000ULL /*Remote client by force */ -#define OBD_CONNECT_BRW_SIZE 0x40000ULL /*Max bytes per rpc */ -#define OBD_CONNECT_QUOTA64 0x80000ULL /*Not used since 2.4 */ -#define OBD_CONNECT_MDS_CAPA 0x100000ULL /*MDS capability */ -#define OBD_CONNECT_OSS_CAPA 0x200000ULL /*OSS capability */ -#define OBD_CONNECT_CANCELSET 0x400000ULL /*Early batched cancels. */ -#define OBD_CONNECT_SOM 0x800000ULL /*Size on MDS */ -#define OBD_CONNECT_AT 0x1000000ULL /*client uses AT */ +#define OBD_CONNECT_BRW_SIZE 0x40000ULL /*Max bytes per rpc */ +#define OBD_CONNECT_QUOTA64 0x80000ULL /*Not used since 2.4 */ +#define OBD_CONNECT_MDS_CAPA 0x100000ULL /*MDS capability */ +#define OBD_CONNECT_OSS_CAPA 0x200000ULL /*OSS capability */ +#define OBD_CONNECT_CANCELSET 0x400000ULL /*Early batched cancels. */ +#define OBD_CONNECT_SOM 0x800000ULL /*Size on MDS */ +#define OBD_CONNECT_AT 0x1000000ULL /*client uses AT */ #define OBD_CONNECT_LRU_RESIZE 0x2000000ULL /*LRU resize feature. */ -#define OBD_CONNECT_MDS_MDS 0x4000000ULL /*MDS-MDS connection */ +#define OBD_CONNECT_MDS_MDS 0x4000000ULL /*MDS-MDS connection */ #define OBD_CONNECT_REAL 0x8000000ULL /*real connection */ #define OBD_CONNECT_CHANGE_QS 0x10000000ULL /*Not used since 2.4 */ -#define OBD_CONNECT_CKSUM 0x20000000ULL /*support several cksum algos*/ -#define OBD_CONNECT_FID 0x40000000ULL /*FID is supported by server */ -#define OBD_CONNECT_VBR 0x80000000ULL /*version based recovery */ -#define OBD_CONNECT_LOV_V3 0x100000000ULL /*client supports LOV v3 EA */ +#define OBD_CONNECT_CKSUM 0x20000000ULL /*support several cksum algos*/ +#define OBD_CONNECT_FID 0x40000000ULL /*FID is supported by server */ +#define OBD_CONNECT_VBR 0x80000000ULL /*version based recovery */ +#define OBD_CONNECT_LOV_V3 0x100000000ULL /*client supports LOV v3 EA */ #define OBD_CONNECT_GRANT_SHRINK 0x200000000ULL /* support grant shrink */ #define OBD_CONNECT_SKIP_ORPHAN 0x400000000ULL /* don't reuse orphan objids */ #define OBD_CONNECT_MAX_EASIZE 0x800000000ULL /* preserved for large EA */ #define OBD_CONNECT_FULL20 0x1000000000ULL /* it is 2.0 client */ #define OBD_CONNECT_LAYOUTLOCK 0x2000000000ULL /* client uses layout lock */ #define OBD_CONNECT_64BITHASH 0x4000000000ULL /* client supports 64-bits - * directory hash */ + * directory hash + */ #define OBD_CONNECT_MAXBYTES 0x8000000000ULL /* max stripe size */ #define OBD_CONNECT_IMP_RECOV 0x10000000000ULL /* imp recovery support */ #define OBD_CONNECT_JOBSTATS 0x20000000000ULL /* jobid in ptlrpc_body */ #define OBD_CONNECT_UMASK 0x40000000000ULL /* create uses client umask */ #define OBD_CONNECT_EINPROGRESS 0x80000000000ULL /* client handles -EINPROGRESS - * RPC error properly */ + * RPC error properly + */ #define OBD_CONNECT_GRANT_PARAM 0x100000000000ULL/* extra grant params used for - * finer space reservation */ + * finer space reservation + */ #define OBD_CONNECT_FLOCK_OWNER 0x200000000000ULL /* for the fixed 1.8 - * policy and 2.x server */ + * policy and 2.x server + */ #define OBD_CONNECT_LVB_TYPE 0x400000000000ULL /* variable type of LVB */ #define OBD_CONNECT_NANOSEC_TIME 0x800000000000ULL /* nanosecond timestamps */ #define OBD_CONNECT_LIGHTWEIGHT 0x1000000000000ULL/* lightweight connection */ @@ -1264,61 +1264,19 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); * submit a small patch against EVERY branch that ONLY adds the new flag, * updates obd_connect_names[] for lprocfs_rd_connect_flags(), adds the * flag to check_obd_connect_data(), and updates wiretests accordingly, so it - * can be approved and landed easily to reserve the flag for future use. */ + * can be approved and landed easily to reserve the flag for future use. + */ /* The MNE_SWAB flag is overloading the MDS_MDS bit only for the MGS * connection. It is a temporary bug fix for Imperative Recovery interop * between 2.2 and 2.3 x86/ppc nodes, and can be removed when interop for - * 2.2 clients/servers is no longer needed. LU-1252/LU-1644. */ + * 2.2 clients/servers is no longer needed. LU-1252/LU-1644. + */ #define OBD_CONNECT_MNE_SWAB OBD_CONNECT_MDS_MDS #define OCD_HAS_FLAG(ocd, flg) \ (!!((ocd)->ocd_connect_flags & OBD_CONNECT_##flg)) -#define LRU_RESIZE_CONNECT_FLAG OBD_CONNECT_LRU_RESIZE - -#define MDT_CONNECT_SUPPORTED (OBD_CONNECT_RDONLY | OBD_CONNECT_VERSION | \ - OBD_CONNECT_ACL | OBD_CONNECT_XATTR | \ - OBD_CONNECT_IBITS | \ - OBD_CONNECT_NODEVOH | OBD_CONNECT_ATTRFID | \ - OBD_CONNECT_CANCELSET | OBD_CONNECT_AT | \ - OBD_CONNECT_RMT_CLIENT | \ - OBD_CONNECT_RMT_CLIENT_FORCE | \ - OBD_CONNECT_BRW_SIZE | OBD_CONNECT_MDS_CAPA | \ - OBD_CONNECT_OSS_CAPA | OBD_CONNECT_MDS_MDS | \ - OBD_CONNECT_FID | LRU_RESIZE_CONNECT_FLAG | \ - OBD_CONNECT_VBR | OBD_CONNECT_LOV_V3 | \ - OBD_CONNECT_SOM | OBD_CONNECT_FULL20 | \ - OBD_CONNECT_64BITHASH | OBD_CONNECT_JOBSTATS | \ - OBD_CONNECT_EINPROGRESS | \ - OBD_CONNECT_LIGHTWEIGHT | OBD_CONNECT_UMASK | \ - OBD_CONNECT_LVB_TYPE | OBD_CONNECT_LAYOUTLOCK |\ - OBD_CONNECT_PINGLESS | OBD_CONNECT_MAX_EASIZE |\ - OBD_CONNECT_FLOCK_DEAD | \ - OBD_CONNECT_DISP_STRIPE) - -#define OST_CONNECT_SUPPORTED (OBD_CONNECT_SRVLOCK | OBD_CONNECT_GRANT | \ - OBD_CONNECT_REQPORTAL | OBD_CONNECT_VERSION | \ - OBD_CONNECT_TRUNCLOCK | OBD_CONNECT_INDEX | \ - OBD_CONNECT_BRW_SIZE | OBD_CONNECT_OSS_CAPA | \ - OBD_CONNECT_CANCELSET | OBD_CONNECT_AT | \ - LRU_RESIZE_CONNECT_FLAG | OBD_CONNECT_CKSUM | \ - OBD_CONNECT_RMT_CLIENT | \ - OBD_CONNECT_RMT_CLIENT_FORCE | OBD_CONNECT_VBR | \ - OBD_CONNECT_MDS | OBD_CONNECT_SKIP_ORPHAN | \ - OBD_CONNECT_GRANT_SHRINK | OBD_CONNECT_FULL20 | \ - OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES | \ - OBD_CONNECT_MAX_EASIZE | \ - OBD_CONNECT_EINPROGRESS | \ - OBD_CONNECT_JOBSTATS | \ - OBD_CONNECT_LIGHTWEIGHT | OBD_CONNECT_LVB_TYPE|\ - OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_FID | \ - OBD_CONNECT_PINGLESS) -#define ECHO_CONNECT_SUPPORTED (0) -#define MGS_CONNECT_SUPPORTED (OBD_CONNECT_VERSION | OBD_CONNECT_AT | \ - OBD_CONNECT_FULL20 | OBD_CONNECT_IMP_RECOV | \ - OBD_CONNECT_MNE_SWAB | OBD_CONNECT_PINGLESS) - /* Features required for this version of the client to work with server */ #define CLIENT_CONNECT_MDT_REQD (OBD_CONNECT_IBITS | OBD_CONNECT_FID | \ OBD_CONNECT_FULL20) @@ -1334,7 +1292,8 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); /* This structure is used for both request and reply. * * If we eventually have separate connect data for different types, which we - * almost certainly will, then perhaps we stick a union in here. */ + * almost certainly will, then perhaps we stick a union in here. + */ struct obd_connect_data_v1 { __u64 ocd_connect_flags; /* OBD_CONNECT_* per above */ __u32 ocd_version; /* lustre release version number */ @@ -1364,7 +1323,7 @@ struct obd_connect_data { __u8 ocd_blocksize; /* log2 of the backend filesystem blocksize */ __u8 ocd_inodespace; /* log2 of the per-inode space consumption */ __u16 ocd_grant_extent; /* per-extent grant overhead, in 1K blocks */ - __u32 ocd_unused; /* also fix lustre_swab_connect */ + __u32 ocd_unused; /* also fix lustre_swab_connect */ __u64 ocd_transno; /* first transno from client to be replayed */ __u32 ocd_group; /* MDS group on OST */ __u32 ocd_cksum_types; /* supported checksum algorithms */ @@ -1374,7 +1333,8 @@ struct obd_connect_data { /* Fields after ocd_maxbytes are only accessible by the receiver * if the corresponding flag in ocd_connect_flags is set. Accessing * any field after ocd_maxbytes on the receiver without a valid flag - * may result in out-of-bound memory access and kernel oops. */ + * may result in out-of-bound memory access and kernel oops. + */ __u64 padding1; /* added 2.1.0. also fix lustre_swab_connect */ __u64 padding2; /* added 2.1.0. also fix lustre_swab_connect */ __u64 padding3; /* added 2.1.0. also fix lustre_swab_connect */ @@ -1398,7 +1358,8 @@ struct obd_connect_data { * with senior engineers before starting to use a new field. Then, submit * a small patch against EVERY branch that ONLY adds the new field along with * the matching OBD_CONNECT flag, so that can be approved and landed easily to - * reserve the flag for future use. */ + * reserve the flag for future use. + */ void lustre_swab_connect(struct obd_connect_data *ocd); @@ -1408,18 +1369,18 @@ void lustre_swab_connect(struct obd_connect_data *ocd); * Please update DECLARE_CKSUM_NAME/OBD_CKSUM_ALL in obd.h when adding a new * algorithm and also the OBD_FL_CKSUM* flags. */ -typedef enum { +enum cksum_type { OBD_CKSUM_CRC32 = 0x00000001, OBD_CKSUM_ADLER = 0x00000002, OBD_CKSUM_CRC32C = 0x00000004, -} cksum_type_t; +}; /* * OST requests: OBDO & OBD request records */ /* opcodes */ -typedef enum { +enum ost_cmd { OST_REPLY = 0, /* reply ? */ OST_GETATTR = 1, OST_SETATTR = 2, @@ -1440,14 +1401,14 @@ typedef enum { OST_QUOTACTL = 19, OST_QUOTA_ADJUST_QUNIT = 20, /* not used since 2.4 */ OST_LAST_OPC -} ost_cmd_t; +}; #define OST_FIRST_OPC OST_REPLY enum obdo_flags { OBD_FL_INLINEDATA = 0x00000001, OBD_FL_OBDMDEXISTS = 0x00000002, OBD_FL_DELORPHAN = 0x00000004, /* if set in o_flags delete orphans */ - OBD_FL_NORPC = 0x00000008, /* set in o_flags do in OSC not OST */ + OBD_FL_NORPC = 0x00000008, /* set in o_flags do in OSC not OST */ OBD_FL_IDONLY = 0x00000010, /* set in o_flags only adjust obj id*/ OBD_FL_RECREATE_OBJS = 0x00000020, /* recreate missing obj */ OBD_FL_DEBUG_CHECK = 0x00000040, /* echo client/server debug check */ @@ -1461,14 +1422,16 @@ enum obdo_flags { OBD_FL_CKSUM_RSVD2 = 0x00008000, /* for future cksum types */ OBD_FL_CKSUM_RSVD3 = 0x00010000, /* for future cksum types */ OBD_FL_SHRINK_GRANT = 0x00020000, /* object shrink the grant */ - OBD_FL_MMAP = 0x00040000, /* object is mmapped on the client. + OBD_FL_MMAP = 0x00040000, /* object is mmapped on the client. * XXX: obsoleted - reserved for old - * clients prior than 2.2 */ + * clients prior than 2.2 + */ OBD_FL_RECOV_RESEND = 0x00080000, /* recoverable resent */ OBD_FL_NOSPC_BLK = 0x00100000, /* no more block space on OST */ /* Note that while these checksum values are currently separate bits, - * in 2.x we can actually allow all values from 1-31 if we wanted. */ + * in 2.x we can actually allow all values from 1-31 if we wanted. + */ OBD_FL_CKSUM_ALL = OBD_FL_CKSUM_CRC32 | OBD_FL_CKSUM_ADLER | OBD_FL_CKSUM_CRC32C, @@ -1657,7 +1620,7 @@ lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic) } } -#define OBD_MD_FLID (0x00000001ULL) /* object ID */ +#define OBD_MD_FLID (0x00000001ULL) /* object ID */ #define OBD_MD_FLATIME (0x00000002ULL) /* access time */ #define OBD_MD_FLMTIME (0x00000004ULL) /* data modification time */ #define OBD_MD_FLCTIME (0x00000008ULL) /* change time */ @@ -1683,22 +1646,23 @@ lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic) #define OBD_MD_FLGROUP (0x01000000ULL) /* group */ #define OBD_MD_FLFID (0x02000000ULL) /* ->ost write inline fid */ #define OBD_MD_FLEPOCH (0x04000000ULL) /* ->ost write with ioepoch */ - /* ->mds if epoch opens or closes */ + /* ->mds if epoch opens or closes + */ #define OBD_MD_FLGRANT (0x08000000ULL) /* ost preallocation space grant */ #define OBD_MD_FLDIREA (0x10000000ULL) /* dir's extended attribute data */ #define OBD_MD_FLUSRQUOTA (0x20000000ULL) /* over quota flags sent from ost */ #define OBD_MD_FLGRPQUOTA (0x40000000ULL) /* over quota flags sent from ost */ #define OBD_MD_FLMODEASIZE (0x80000000ULL) /* EA size will be changed */ -#define OBD_MD_MDS (0x0000000100000000ULL) /* where an inode lives on */ +#define OBD_MD_MDS (0x0000000100000000ULL) /* where an inode lives on */ #define OBD_MD_REINT (0x0000000200000000ULL) /* reintegrate oa */ -#define OBD_MD_MEA (0x0000000400000000ULL) /* CMD split EA */ +#define OBD_MD_MEA (0x0000000400000000ULL) /* CMD split EA */ #define OBD_MD_TSTATE (0x0000000800000000ULL) /* transient state field */ #define OBD_MD_FLXATTR (0x0000001000000000ULL) /* xattr */ #define OBD_MD_FLXATTRLS (0x0000002000000000ULL) /* xattr list */ #define OBD_MD_FLXATTRRM (0x0000004000000000ULL) /* xattr remove */ -#define OBD_MD_FLACL (0x0000008000000000ULL) /* ACL */ +#define OBD_MD_FLACL (0x0000008000000000ULL) /* ACL */ #define OBD_MD_FLRMTPERM (0x0000010000000000ULL) /* remote permission */ #define OBD_MD_FLMDSCAPA (0x0000020000000000ULL) /* MDS capability */ #define OBD_MD_FLOSSCAPA (0x0000040000000000ULL) /* OSS capability */ @@ -1707,7 +1671,8 @@ lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic) #define OBD_MD_FLGETATTRLOCK (0x0000200000000000ULL) /* Get IOEpoch attributes * under lock; for xattr * requests means the - * client holds the lock */ + * client holds the lock + */ #define OBD_MD_FLOBJCOUNT (0x0000400000000000ULL) /* for multiple destroy */ #define OBD_MD_FLRMTLSETFACL (0x0001000000000000ULL) /* lfs lsetfacl case */ @@ -1727,7 +1692,8 @@ lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic) #define OBD_MD_FLXATTRALL (OBD_MD_FLXATTR | OBD_MD_FLXATTRLS) /* don't forget obdo_fid which is way down at the bottom so it can - * come after the definition of llog_cookie */ + * come after the definition of llog_cookie + */ enum hss_valid { HSS_SETMASK = 0x01, @@ -1749,19 +1715,20 @@ void lustre_swab_obd_statfs(struct obd_statfs *os); /* ost_body.data values for OST_BRW */ -#define OBD_BRW_READ 0x01 -#define OBD_BRW_WRITE 0x02 -#define OBD_BRW_RWMASK (OBD_BRW_READ | OBD_BRW_WRITE) -#define OBD_BRW_SYNC 0x08 /* this page is a part of synchronous +#define OBD_BRW_READ 0x01 +#define OBD_BRW_WRITE 0x02 +#define OBD_BRW_RWMASK (OBD_BRW_READ | OBD_BRW_WRITE) +#define OBD_BRW_SYNC 0x08 /* this page is a part of synchronous * transfer and is not accounted in - * the grant. */ -#define OBD_BRW_CHECK 0x10 + * the grant. + */ +#define OBD_BRW_CHECK 0x10 #define OBD_BRW_FROM_GRANT 0x20 /* the osc manages this under llite */ -#define OBD_BRW_GRANTED 0x40 /* the ost manages this */ -#define OBD_BRW_NOCACHE 0x80 /* this page is a part of non-cached IO */ -#define OBD_BRW_NOQUOTA 0x100 -#define OBD_BRW_SRVLOCK 0x200 /* Client holds no lock over this page */ -#define OBD_BRW_ASYNC 0x400 /* Server may delay commit to disk */ +#define OBD_BRW_GRANTED 0x40 /* the ost manages this */ +#define OBD_BRW_NOCACHE 0x80 /* this page is a part of non-cached IO */ +#define OBD_BRW_NOQUOTA 0x100 +#define OBD_BRW_SRVLOCK 0x200 /* Client holds no lock over this page */ +#define OBD_BRW_ASYNC 0x400 /* Server may delay commit to disk */ #define OBD_BRW_MEMALLOC 0x800 /* Client runs in the "kswapd" context */ #define OBD_BRW_OVER_USRQUOTA 0x1000 /* Running out of user quota */ #define OBD_BRW_OVER_GRPQUOTA 0x2000 /* Running out of group quota */ @@ -1775,7 +1742,8 @@ struct obd_ioobj { struct ost_id ioo_oid; /* object ID, if multi-obj BRW */ __u32 ioo_max_brw; /* low 16 bits were o_mode before 2.4, * now (PTLRPC_BULK_OPS_COUNT - 1) in - * high 16 bits in 2.4 and later */ + * high 16 bits in 2.4 and later + */ __u32 ioo_bufcnt; /* number of niobufs for this object */ }; @@ -1799,7 +1767,8 @@ void lustre_swab_niobuf_remote(struct niobuf_remote *nbr); /* lock value block communicated between the filter and llite */ /* OST_LVB_ERR_INIT is needed because the return code in rc is - * negative, i.e. because ((MASK + rc) & MASK) != MASK. */ + * negative, i.e. because ((MASK + rc) & MASK) != MASK. + */ #define OST_LVB_ERR_INIT 0xffbadbad80000000ULL #define OST_LVB_ERR_MASK 0xffbadbad00000000ULL #define OST_LVB_IS_ERR(blocks) \ @@ -1836,23 +1805,12 @@ void lustre_swab_ost_lvb(struct ost_lvb *lvb); * lquota data structures */ -#ifndef QUOTABLOCK_BITS -#define QUOTABLOCK_BITS 10 -#endif - -#ifndef QUOTABLOCK_SIZE -#define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS) -#endif - -#ifndef toqb -#define toqb(x) (((x) + QUOTABLOCK_SIZE - 1) >> QUOTABLOCK_BITS) -#endif - /* The lquota_id structure is an union of all the possible identifier types that * can be used with quota, this includes: * - 64-bit user ID * - 64-bit group ID - * - a FID which can be used for per-directory quota in the future */ + * - a FID which can be used for per-directory quota in the future + */ union lquota_id { struct lu_fid qid_fid; /* FID for per-directory quota */ __u64 qid_uid; /* user identifier */ @@ -1889,89 +1847,6 @@ do { \ Q_COPY(out, in, qc_dqblk); \ } while (0) -/* Body of quota request used for quota acquire/release RPCs between quota - * master (aka QMT) and slaves (ak QSD). */ -struct quota_body { - struct lu_fid qb_fid; /* FID of global index packing the pool ID - * and type (data or metadata) as well as - * the quota type (user or group). */ - union lquota_id qb_id; /* uid or gid or directory FID */ - __u32 qb_flags; /* see below */ - __u32 qb_padding; - __u64 qb_count; /* acquire/release count (kbytes/inodes) */ - __u64 qb_usage; /* current slave usage (kbytes/inodes) */ - __u64 qb_slv_ver; /* slave index file version */ - struct lustre_handle qb_lockh; /* per-ID lock handle */ - struct lustre_handle qb_glb_lockh; /* global lock handle */ - __u64 qb_padding1[4]; -}; - -/* When the quota_body is used in the reply of quota global intent - * lock (IT_QUOTA_CONN) reply, qb_fid contains slave index file FID. */ -#define qb_slv_fid qb_fid -/* qb_usage is the current qunit (in kbytes/inodes) when quota_body is used in - * quota reply */ -#define qb_qunit qb_usage - -#define QUOTA_DQACQ_FL_ACQ 0x1 /* acquire quota */ -#define QUOTA_DQACQ_FL_PREACQ 0x2 /* pre-acquire */ -#define QUOTA_DQACQ_FL_REL 0x4 /* release quota */ -#define QUOTA_DQACQ_FL_REPORT 0x8 /* report usage */ - -void lustre_swab_quota_body(struct quota_body *b); - -/* Quota types currently supported */ -enum { - LQUOTA_TYPE_USR = 0x00, /* maps to USRQUOTA */ - LQUOTA_TYPE_GRP = 0x01, /* maps to GRPQUOTA */ - LQUOTA_TYPE_MAX -}; - -/* There are 2 different resource types on which a quota limit can be enforced: - * - inodes on the MDTs - * - blocks on the OSTs */ -enum { - LQUOTA_RES_MD = 0x01, /* skip 0 to avoid null oid in FID */ - LQUOTA_RES_DT = 0x02, - LQUOTA_LAST_RES, - LQUOTA_FIRST_RES = LQUOTA_RES_MD -}; - -#define LQUOTA_NR_RES (LQUOTA_LAST_RES - LQUOTA_FIRST_RES + 1) - -/* - * Space accounting support - * Format of an accounting record, providing disk usage information for a given - * user or group - */ -struct lquota_acct_rec { /* 16 bytes */ - __u64 bspace; /* current space in use */ - __u64 ispace; /* current # inodes in use */ -}; - -/* - * Global quota index support - * Format of a global record, providing global quota settings for a given quota - * identifier - */ -struct lquota_glb_rec { /* 32 bytes */ - __u64 qbr_hardlimit; /* quota hard limit, in #inodes or kbytes */ - __u64 qbr_softlimit; /* quota soft limit, in #inodes or kbytes */ - __u64 qbr_time; /* grace time, in seconds */ - __u64 qbr_granted; /* how much is granted to slaves, in #inodes or - * kbytes */ -}; - -/* - * Slave index support - * Format of a slave record, recording how much space is granted to a given - * slave - */ -struct lquota_slv_rec { /* 8 bytes */ - __u64 qsr_granted; /* space granted to the slave for the key=ID, - * in #inodes or kbytes */ -}; - /* Data structures associated with the quota locks */ /* Glimpse descriptor used for the index & per-ID quota locks */ @@ -1985,9 +1860,6 @@ struct ldlm_gl_lquota_desc { __u64 gl_pad2; }; -#define gl_qunit gl_hardlimit /* current qunit value used when - * glimpsing per-ID quota locks */ - /* quota glimpse flags */ #define LQUOTA_FL_EDQUOT 0x1 /* user/group out of quota space on QMT */ @@ -2002,15 +1874,12 @@ struct lquota_lvb { void lustre_swab_lquota_lvb(struct lquota_lvb *lvb); -/* LVB used with global quota lock */ -#define lvb_glb_ver lvb_id_may_rel /* current version of the global index */ - /* op codes */ -typedef enum { +enum quota_cmd { QUOTA_DQACQ = 601, QUOTA_DQREL = 602, QUOTA_LAST_OPC -} quota_cmd_t; +}; #define QUOTA_FIRST_OPC QUOTA_DQACQ /* @@ -2018,7 +1887,7 @@ typedef enum { */ /* opcodes */ -typedef enum { +enum mds_cmd { MDS_GETATTR = 33, MDS_GETATTR_NAME = 34, MDS_CLOSE = 35, @@ -2049,23 +1918,15 @@ typedef enum { MDS_HSM_CT_UNREGISTER = 60, MDS_SWAP_LAYOUTS = 61, MDS_LAST_OPC -} mds_cmd_t; +}; #define MDS_FIRST_OPC MDS_GETATTR -/* opcodes for object update */ -typedef enum { - UPDATE_OBJ = 1000, - UPDATE_LAST_OPC -} update_cmd_t; - -#define UPDATE_FIRST_OPC UPDATE_OBJ - /* * Do not exceed 63 */ -typedef enum { +enum mdt_reint_cmd { REINT_SETATTR = 1, REINT_CREATE = 2, REINT_LINK = 3, @@ -2074,9 +1935,9 @@ typedef enum { REINT_OPEN = 6, REINT_SETXATTR = 7, REINT_RMENTRY = 8, -// REINT_WRITE = 9, +/* REINT_WRITE = 9, */ REINT_MAX -} mds_reint_t, mdt_reint_t; +}; void lustre_swab_generic_32s(__u32 *val); @@ -2097,7 +1958,8 @@ void lustre_swab_generic_32s(__u32 *val); /* INODE LOCK PARTS */ #define MDS_INODELOCK_LOOKUP 0x000001 /* For namespace, dentry etc, and also * was used to protect permission (mode, - * owner, group etc) before 2.4. */ + * owner, group etc) before 2.4. + */ #define MDS_INODELOCK_UPDATE 0x000002 /* size, links, timestamps */ #define MDS_INODELOCK_OPEN 0x000004 /* For opened files */ #define MDS_INODELOCK_LAYOUT 0x000008 /* for layout */ @@ -2110,7 +1972,8 @@ void lustre_swab_generic_32s(__u32 *val); * For local directory, MDT will always grant UPDATE_LOCK|PERM_LOCK together. * For Remote directory, the master MDT, where the remote directory is, will * grant UPDATE_LOCK|PERM_LOCK, and the remote MDT, where the name entry is, - * will grant LOOKUP_LOCK. */ + * will grant LOOKUP_LOCK. + */ #define MDS_INODELOCK_PERM 0x000010 #define MDS_INODELOCK_XATTR 0x000020 /* extended attributes */ @@ -2120,7 +1983,8 @@ void lustre_swab_generic_32s(__u32 *val); /* NOTE: until Lustre 1.8.7/2.1.1 the fid_ver() was packed into name[2], * but was moved into name[1] along with the OID to avoid consuming the - * name[2,3] fields that need to be used for the quota id (also a FID). */ + * name[2,3] fields that need to be used for the quota id (also a FID). + */ enum { LUSTRE_RES_ID_SEQ_OFF = 0, LUSTRE_RES_ID_VER_OID_OFF = 1, @@ -2156,7 +2020,8 @@ enum md_op_flags { #define LUSTRE_BFLAG_UNCOMMITTED_WRITES 0x1 /* these should be identical to their EXT4_*_FL counterparts, they are - * redefined here only to avoid dragging in fs/ext4/ext4.h */ + * redefined here only to avoid dragging in fs/ext4/ext4.h + */ #define LUSTRE_SYNC_FL 0x00000008 /* Synchronous updates */ #define LUSTRE_IMMUTABLE_FL 0x00000010 /* Immutable file */ #define LUSTRE_APPEND_FL 0x00000020 /* writes to file may only append */ @@ -2168,15 +2033,14 @@ enum md_op_flags { * protocol equivalents of LDISKFS_*_FL values stored on disk, while * the S_* flags are kernel-internal values that change between kernel * versions. These flags are set/cleared via FSFILT_IOC_{GET,SET}_FLAGS. - * See b=16526 for a full history. */ + * See b=16526 for a full history. + */ static inline int ll_ext_to_inode_flags(int flags) { return (((flags & LUSTRE_SYNC_FL) ? S_SYNC : 0) | ((flags & LUSTRE_NOATIME_FL) ? S_NOATIME : 0) | ((flags & LUSTRE_APPEND_FL) ? S_APPEND : 0) | -#if defined(S_DIRSYNC) ((flags & LUSTRE_DIRSYNC_FL) ? S_DIRSYNC : 0) | -#endif ((flags & LUSTRE_IMMUTABLE_FL) ? S_IMMUTABLE : 0)); } @@ -2185,9 +2049,7 @@ static inline int ll_inode_to_ext_flags(int iflags) return (((iflags & S_SYNC) ? LUSTRE_SYNC_FL : 0) | ((iflags & S_NOATIME) ? LUSTRE_NOATIME_FL : 0) | ((iflags & S_APPEND) ? LUSTRE_APPEND_FL : 0) | -#if defined(S_DIRSYNC) ((iflags & S_DIRSYNC) ? LUSTRE_DIRSYNC_FL : 0) | -#endif ((iflags & S_IMMUTABLE) ? LUSTRE_IMMUTABLE_FL : 0)); } @@ -2207,9 +2069,10 @@ struct mdt_body { __s64 ctime; __u64 blocks; /* XID, in the case of MDS_READPAGE */ __u64 ioepoch; - __u64 t_state; /* transient file state defined in - * enum md_transient_state - * was "ino" until 2.4.0 */ + __u64 t_state; /* transient file state defined in + * enum md_transient_state + * was "ino" until 2.4.0 + */ __u32 fsuid; __u32 fsgid; __u32 capability; @@ -2219,7 +2082,7 @@ struct mdt_body { __u32 flags; /* from vfs for pin/unpin, LUSTRE_BFLAG close */ __u32 rdev; __u32 nlink; /* #bytes to read in the case of MDS_READPAGE */ - __u32 unused2; /* was "generation" until 2.4.0 */ + __u32 unused2; /* was "generation" until 2.4.0 */ __u32 suppgid; __u32 eadatasize; __u32 aclsize; @@ -2256,7 +2119,8 @@ enum { }; /* inode access permission for remote user, the inode info are omitted, - * for client knows them. */ + * for client knows them. + */ struct mdt_remote_perm { __u32 rp_uid; __u32 rp_gid; @@ -2306,13 +2170,13 @@ void lustre_swab_mdt_rec_setattr(struct mdt_rec_setattr *sa); * since the client and MDS may run different kernels (see bug 13828) * Therefore, we should only use MDS_ATTR_* attributes for sa_valid. */ -#define MDS_ATTR_MODE 0x1ULL /* = 1 */ -#define MDS_ATTR_UID 0x2ULL /* = 2 */ -#define MDS_ATTR_GID 0x4ULL /* = 4 */ -#define MDS_ATTR_SIZE 0x8ULL /* = 8 */ -#define MDS_ATTR_ATIME 0x10ULL /* = 16 */ -#define MDS_ATTR_MTIME 0x20ULL /* = 32 */ -#define MDS_ATTR_CTIME 0x40ULL /* = 64 */ +#define MDS_ATTR_MODE 0x1ULL /* = 1 */ +#define MDS_ATTR_UID 0x2ULL /* = 2 */ +#define MDS_ATTR_GID 0x4ULL /* = 4 */ +#define MDS_ATTR_SIZE 0x8ULL /* = 8 */ +#define MDS_ATTR_ATIME 0x10ULL /* = 16 */ +#define MDS_ATTR_MTIME 0x20ULL /* = 32 */ +#define MDS_ATTR_CTIME 0x40ULL /* = 64 */ #define MDS_ATTR_ATIME_SET 0x80ULL /* = 128 */ #define MDS_ATTR_MTIME_SET 0x100ULL /* = 256 */ #define MDS_ATTR_FORCE 0x200ULL /* = 512, Not a change, but a change it */ @@ -2320,14 +2184,11 @@ void lustre_swab_mdt_rec_setattr(struct mdt_rec_setattr *sa); #define MDS_ATTR_KILL_SUID 0x800ULL /* = 2048 */ #define MDS_ATTR_KILL_SGID 0x1000ULL /* = 4096 */ #define MDS_ATTR_CTIME_SET 0x2000ULL /* = 8192 */ -#define MDS_ATTR_FROM_OPEN 0x4000ULL /* = 16384, called from open path, ie O_TRUNC */ +#define MDS_ATTR_FROM_OPEN 0x4000ULL /* = 16384, called from open path, + * ie O_TRUNC + */ #define MDS_ATTR_BLOCKS 0x8000ULL /* = 32768 */ -#ifndef FMODE_READ -#define FMODE_READ 00000001 -#define FMODE_WRITE 00000002 -#endif - #define MDS_FMODE_CLOSED 00000000 #define MDS_FMODE_EXEC 00000004 /* IO Epoch is opened on a closed file. */ @@ -2354,9 +2215,10 @@ void lustre_swab_mdt_rec_setattr(struct mdt_rec_setattr *sa); * We do not support JOIN FILE * anymore, reserve this flags * just for preventing such bit - * to be reused. */ + * to be reused. + */ -#define MDS_OPEN_LOCK 04000000000 /* This open requires open lock */ +#define MDS_OPEN_LOCK 04000000000 /* This open requires open lock */ #define MDS_OPEN_HAS_EA 010000000000 /* specify object create pattern */ #define MDS_OPEN_HAS_OBJS 020000000000 /* Just set the EA the obj exist */ #define MDS_OPEN_NORESTORE 0100000000000ULL /* Do not restore file at open */ @@ -2409,7 +2271,8 @@ struct mdt_rec_create { __u32 cr_bias; /* use of helpers set/get_mrc_cr_flags() is needed to access * 64 bits cr_flags [cr_flags_l, cr_flags_h], this is done to - * extend cr_flags size without breaking 1.8 compat */ + * extend cr_flags size without breaking 1.8 compat + */ __u32 cr_flags_l; /* for use with open, low 32 bits */ __u32 cr_flags_h; /* for use with open, high 32 bits */ __u32 cr_umask; /* umask for create */ @@ -2630,7 +2493,8 @@ enum seq_op { #define LOV_MAX_UUID_BUFFER_SIZE 8192 /* The size of the buffer the lov/mdc reserves for the * array of UUIDs returned by the MDS. With the current - * protocol, this will limit the max number of OSTs per LOV */ + * protocol, this will limit the max number of OSTs per LOV + */ #define LOV_DESC_MAGIC 0xB0CCDE5C #define LOV_DESC_QOS_MAXAGE_DEFAULT 5 /* Seconds */ @@ -2639,13 +2503,13 @@ enum seq_op { /* LOV settings descriptor (should only contain static info) */ struct lov_desc { __u32 ld_tgt_count; /* how many OBD's */ - __u32 ld_active_tgt_count; /* how many active */ - __u32 ld_default_stripe_count; /* how many objects are used */ - __u32 ld_pattern; /* default PATTERN_RAID0 */ - __u64 ld_default_stripe_size; /* in bytes */ - __u64 ld_default_stripe_offset; /* in bytes */ + __u32 ld_active_tgt_count; /* how many active */ + __u32 ld_default_stripe_count; /* how many objects are used */ + __u32 ld_pattern; /* default PATTERN_RAID0 */ + __u64 ld_default_stripe_size; /* in bytes */ + __u64 ld_default_stripe_offset; /* in bytes */ __u32 ld_padding_0; /* unused */ - __u32 ld_qos_maxage; /* in second */ + __u32 ld_qos_maxage; /* in second */ __u32 ld_padding_1; /* also fix lustre_swab_lov_desc */ __u32 ld_padding_2; /* also fix lustre_swab_lov_desc */ struct obd_uuid ld_uuid; @@ -2659,7 +2523,7 @@ void lustre_swab_lov_desc(struct lov_desc *ld); * LDLM requests: */ /* opcodes -- MUST be distinct from OST/MDS opcodes */ -typedef enum { +enum ldlm_cmd { LDLM_ENQUEUE = 101, LDLM_CONVERT = 102, LDLM_CANCEL = 103, @@ -2668,7 +2532,7 @@ typedef enum { LDLM_GL_CALLBACK = 106, LDLM_SET_INFO = 107, LDLM_LAST_OPC -} ldlm_cmd_t; +}; #define LDLM_FIRST_OPC LDLM_ENQUEUE #define RES_NAME_SIZE 4 @@ -2687,7 +2551,7 @@ static inline int ldlm_res_eq(const struct ldlm_res_id *res0, } /* lock types */ -typedef enum { +enum ldlm_mode { LCK_MINMODE = 0, LCK_EX = 1, LCK_PW = 2, @@ -2698,17 +2562,17 @@ typedef enum { LCK_GROUP = 64, LCK_COS = 128, LCK_MAXMODE -} ldlm_mode_t; +}; #define LCK_MODE_NUM 8 -typedef enum { +enum ldlm_type { LDLM_PLAIN = 10, LDLM_EXTENT = 11, LDLM_FLOCK = 12, LDLM_IBITS = 13, LDLM_MAX_TYPE -} ldlm_type_t; +}; #define LDLM_MIN_TYPE LDLM_PLAIN @@ -2747,7 +2611,8 @@ struct ldlm_flock_wire { * the first fields of the ldlm_flock structure because there is only * one ldlm_swab routine to process the ldlm_policy_data_t union. if * this ever changes we will need to swab the union differently based - * on the resource type. */ + * on the resource type. + */ typedef union { struct ldlm_extent l_extent; @@ -2768,15 +2633,15 @@ struct ldlm_intent { void lustre_swab_ldlm_intent(struct ldlm_intent *i); struct ldlm_resource_desc { - ldlm_type_t lr_type; + enum ldlm_type lr_type; __u32 lr_padding; /* also fix lustre_swab_ldlm_resource_desc */ struct ldlm_res_id lr_name; }; struct ldlm_lock_desc { struct ldlm_resource_desc l_resource; - ldlm_mode_t l_req_mode; - ldlm_mode_t l_granted_mode; + enum ldlm_mode l_req_mode; + enum ldlm_mode l_granted_mode; ldlm_wire_policy_data_t l_policy_data; }; @@ -2793,7 +2658,8 @@ struct ldlm_request { void lustre_swab_ldlm_request(struct ldlm_request *rq); /* If LDLM_ENQUEUE, 1 slot is already occupied, 1 is available. - * Otherwise, 2 are available. */ + * Otherwise, 2 are available. + */ #define ldlm_request_bufsize(count, type) \ ({ \ int _avail = LDLM_LOCKREQ_HANDLES; \ @@ -2820,7 +2686,7 @@ void lustre_swab_ldlm_reply(struct ldlm_reply *r); /* * Opcodes for mountconf (mgs and mgc) */ -typedef enum { +enum mgs_cmd { MGS_CONNECT = 250, MGS_DISCONNECT, MGS_EXCEPTION, /* node died, etc. */ @@ -2829,7 +2695,7 @@ typedef enum { MGS_SET_INFO, MGS_CONFIG_READ, MGS_LAST_OPC -} mgs_cmd_t; +}; #define MGS_FIRST_OPC MGS_CONNECT #define MGS_PARAM_MAXLEN 1024 @@ -2918,13 +2784,13 @@ void lustre_swab_cfg_marker(struct cfg_marker *marker, int swab, int size); * Opcodes for multiple servers. */ -typedef enum { +enum obd_cmd { OBD_PING = 400, OBD_LOG_CANCEL, OBD_QC_CALLBACK, OBD_IDX_READ, OBD_LAST_OPC -} obd_cmd_t; +}; #define OBD_FIRST_OPC OBD_PING /* catalog of log objects */ @@ -2933,7 +2799,7 @@ typedef enum { struct llog_logid { struct ost_id lgl_oi; __u32 lgl_ogen; -} __attribute__((packed)); +} __packed; /** Records written to the CATALOGS list */ #define CATLIST "CATALOGS" @@ -2942,7 +2808,7 @@ struct llog_catid { __u32 lci_padding1; __u32 lci_padding2; __u32 lci_padding3; -} __attribute__((packed)); +} __packed; /* Log data record types - there is no specific reason that these need to * be related to the RPC opcodes, but no reason not to (may be handy later?) @@ -2950,7 +2816,7 @@ struct llog_catid { #define LLOG_OP_MAGIC 0x10600000 #define LLOG_OP_MASK 0xfff00000 -typedef enum { +enum llog_op_type { LLOG_PAD_MAGIC = LLOG_OP_MAGIC | 0x00000, OST_SZ_REC = LLOG_OP_MAGIC | 0x00f00, /* OST_RAID1_REC = LLOG_OP_MAGIC | 0x01000, never used */ @@ -2970,7 +2836,7 @@ typedef enum { HSM_AGENT_REC = LLOG_OP_MAGIC | 0x80000, LLOG_HDR_MAGIC = LLOG_OP_MAGIC | 0x45539, LLOG_LOGID_MAGIC = LLOG_OP_MAGIC | 0x4553b, -} llog_op_type; +}; #define LLOG_REC_HDR_NEEDS_SWABBING(r) \ (((r)->lrh_type & __swab32(LLOG_OP_MASK)) == __swab32(LLOG_OP_MAGIC)) @@ -3006,7 +2872,7 @@ struct llog_logid_rec { __u64 lid_padding2; __u64 lid_padding3; struct llog_rec_tail lid_tail; -} __attribute__((packed)); +} __packed; struct llog_unlink_rec { struct llog_rec_hdr lur_hdr; @@ -3014,7 +2880,7 @@ struct llog_unlink_rec { __u32 lur_oseq; __u32 lur_count; struct llog_rec_tail lur_tail; -} __attribute__((packed)); +} __packed; struct llog_unlink64_rec { struct llog_rec_hdr lur_hdr; @@ -3024,7 +2890,7 @@ struct llog_unlink64_rec { __u64 lur_padding2; __u64 lur_padding3; struct llog_rec_tail lur_tail; -} __attribute__((packed)); +} __packed; struct llog_setattr64_rec { struct llog_rec_hdr lsr_hdr; @@ -3035,7 +2901,7 @@ struct llog_setattr64_rec { __u32 lsr_gid_h; __u64 lsr_padding; struct llog_rec_tail lsr_tail; -} __attribute__((packed)); +} __packed; struct llog_size_change_rec { struct llog_rec_hdr lsc_hdr; @@ -3045,16 +2911,7 @@ struct llog_size_change_rec { __u64 lsc_padding2; __u64 lsc_padding3; struct llog_rec_tail lsc_tail; -} __attribute__((packed)); - -#define CHANGELOG_MAGIC 0xca103000 - -/** \a changelog_rec_type's that can't be masked */ -#define CHANGELOG_MINMASK (1 << CL_MARK) -/** bits covering all \a changelog_rec_type's */ -#define CHANGELOG_ALLMASK 0XFFFFFFFF -/** default \a changelog_rec_type mask */ -#define CHANGELOG_DEFMASK CHANGELOG_ALLMASK & ~(1 << CL_ATIME | 1 << CL_CLOSE) +} __packed; /* changelog llog name, needed by client replicators */ #define CHANGELOG_CATALOG "changelog_catalog" @@ -3062,22 +2919,20 @@ struct llog_size_change_rec { struct changelog_setinfo { __u64 cs_recno; __u32 cs_id; -} __attribute__((packed)); +} __packed; /** changelog record */ struct llog_changelog_rec { struct llog_rec_hdr cr_hdr; struct changelog_rec cr; struct llog_rec_tail cr_tail; /**< for_sizezof_only */ -} __attribute__((packed)); +} __packed; struct llog_changelog_ext_rec { struct llog_rec_hdr cr_hdr; struct changelog_ext_rec cr; struct llog_rec_tail cr_tail; /**< for_sizezof_only */ -} __attribute__((packed)); - -#define CHANGELOG_USER_PREFIX "cl" +} __packed; struct llog_changelog_user_rec { struct llog_rec_hdr cur_hdr; @@ -3085,7 +2940,7 @@ struct llog_changelog_user_rec { __u32 cur_padding; __u64 cur_endrec; struct llog_rec_tail cur_tail; -} __attribute__((packed)); +} __packed; enum agent_req_status { ARS_WAITING, @@ -3123,21 +2978,22 @@ struct llog_agent_req_rec { struct llog_rec_hdr arr_hdr; /**< record header */ __u32 arr_status; /**< status of the request */ /* must match enum - * agent_req_status */ + * agent_req_status + */ __u32 arr_archive_id; /**< backend archive number */ __u64 arr_flags; /**< req flags */ - __u64 arr_compound_id; /**< compound cookie */ + __u64 arr_compound_id;/**< compound cookie */ __u64 arr_req_create; /**< req. creation time */ __u64 arr_req_change; /**< req. status change time */ struct hsm_action_item arr_hai; /**< req. to the agent */ - struct llog_rec_tail arr_tail; /**< record tail for_sizezof_only */ -} __attribute__((packed)); + struct llog_rec_tail arr_tail; /**< record tail for_sizezof_only */ +} __packed; /* Old llog gen for compatibility */ struct llog_gen { __u64 mnt_cnt; __u64 conn_cnt; -} __attribute__((packed)); +} __packed; struct llog_gen_rec { struct llog_rec_hdr lgr_hdr; @@ -3175,19 +3031,21 @@ struct llog_log_hdr { __u32 llh_reserved[LLOG_HEADER_SIZE/sizeof(__u32) - 23]; __u32 llh_bitmap[LLOG_BITMAP_BYTES/sizeof(__u32)]; struct llog_rec_tail llh_tail; -} __attribute__((packed)); +} __packed; #define LLOG_BITMAP_SIZE(llh) (__u32)((llh->llh_hdr.lrh_len - \ llh->llh_bitmap_offset - \ sizeof(llh->llh_tail)) * 8) -/** log cookies are used to reference a specific log file and a record therein */ +/** log cookies are used to reference a specific log file and a record + * therein + */ struct llog_cookie { struct llog_logid lgc_lgl; __u32 lgc_subsys; __u32 lgc_index; __u32 lgc_padding; -} __attribute__((packed)); +} __packed; /** llog protocol */ enum llogd_rpc_ops { @@ -3196,7 +3054,7 @@ enum llogd_rpc_ops { LLOG_ORIGIN_HANDLE_READ_HEADER = 503, LLOG_ORIGIN_HANDLE_WRITE_REC = 504, LLOG_ORIGIN_HANDLE_CLOSE = 505, - LLOG_ORIGIN_CONNECT = 506, + LLOG_ORIGIN_CONNECT = 506, LLOG_CATINFO = 507, /* deprecated */ LLOG_ORIGIN_HANDLE_PREV_BLOCK = 508, LLOG_ORIGIN_HANDLE_DESTROY = 509, /* for destroy llog object*/ @@ -3212,13 +3070,13 @@ struct llogd_body { __u32 lgd_saved_index; __u32 lgd_len; __u64 lgd_cur_offset; -} __attribute__((packed)); +} __packed; struct llogd_conn_body { struct llog_gen lgdc_gen; struct llog_logid lgdc_logid; __u32 lgdc_ctxt_idx; -} __attribute__((packed)); +} __packed; /* Note: 64-bit types are 64-bit aligned in structure */ struct obdo { @@ -3245,17 +3103,18 @@ struct obdo { __u64 o_ioepoch; /* epoch in ost writes */ __u32 o_stripe_idx; /* holds stripe idx */ __u32 o_parent_ver; - struct lustre_handle o_handle; /* brw: lock handle to prolong - * locks */ - struct llog_cookie o_lcookie; /* destroy: unlink cookie from - * MDS */ + struct lustre_handle o_handle; /* brw: lock handle to prolong locks + */ + struct llog_cookie o_lcookie; /* destroy: unlink cookie from MDS + */ __u32 o_uid_h; __u32 o_gid_h; __u64 o_data_version; /* getattr: sum of iversion for * each stripe. * brw: grant space consumed on - * the client for the write */ + * the client for the write + */ __u64 o_padding_4; __u64 o_padding_5; __u64 o_padding_6; @@ -3273,13 +3132,14 @@ static inline void lustre_set_wire_obdo(struct obd_connect_data *ocd, { *wobdo = *lobdo; wobdo->o_flags &= ~OBD_FL_LOCAL_MASK; - if (ocd == NULL) + if (!ocd) return; if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) && fid_seq_is_echo(ostid_seq(&lobdo->o_oi))) { /* Currently OBD_FL_OSTID will only be used when 2.4 echo - * client communicate with pre-2.4 server */ + * client communicate with pre-2.4 server + */ wobdo->o_oi.oi.oi_id = fid_oid(&lobdo->o_oi.oi_fid); wobdo->o_oi.oi.oi_seq = fid_seq(&lobdo->o_oi.oi_fid); } @@ -3292,7 +3152,7 @@ static inline void lustre_get_wire_obdo(struct obd_connect_data *ocd, __u32 local_flags = 0; if (lobdo->o_valid & OBD_MD_FLFLAGS) - local_flags = lobdo->o_flags & OBD_FL_LOCAL_MASK; + local_flags = lobdo->o_flags & OBD_FL_LOCAL_MASK; *lobdo = *wobdo; if (local_flags != 0) { @@ -3300,7 +3160,7 @@ static inline void lustre_get_wire_obdo(struct obd_connect_data *ocd, lobdo->o_flags &= ~OBD_FL_LOCAL_MASK; lobdo->o_flags |= local_flags; } - if (ocd == NULL) + if (!ocd) return; if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) && @@ -3349,100 +3209,14 @@ void dump_ioo(struct obd_ioobj *nb); void dump_ost_body(struct ost_body *ob); void dump_rcs(__u32 *rc); -#define IDX_INFO_MAGIC 0x3D37CC37 - -/* Index file transfer through the network. The server serializes the index into - * a byte stream which is sent to the client via a bulk transfer */ -struct idx_info { - __u32 ii_magic; - - /* reply: see idx_info_flags below */ - __u32 ii_flags; - - /* request & reply: number of lu_idxpage (to be) transferred */ - __u16 ii_count; - __u16 ii_pad0; - - /* request: requested attributes passed down to the iterator API */ - __u32 ii_attrs; - - /* request & reply: index file identifier (FID) */ - struct lu_fid ii_fid; - - /* reply: version of the index file before starting to walk the index. - * Please note that the version can be modified at any time during the - * transfer */ - __u64 ii_version; - - /* request: hash to start with: - * reply: hash of the first entry of the first lu_idxpage and hash - * of the entry to read next if any */ - __u64 ii_hash_start; - __u64 ii_hash_end; - - /* reply: size of keys in lu_idxpages, minimal one if II_FL_VARKEY is - * set */ - __u16 ii_keysize; - - /* reply: size of records in lu_idxpages, minimal one if II_FL_VARREC - * is set */ - __u16 ii_recsize; - - __u32 ii_pad1; - __u64 ii_pad2; - __u64 ii_pad3; -}; - -void lustre_swab_idx_info(struct idx_info *ii); - -#define II_END_OFF MDS_DIR_END_OFF /* all entries have been read */ - -/* List of flags used in idx_info::ii_flags */ -enum idx_info_flags { - II_FL_NOHASH = 1 << 0, /* client doesn't care about hash value */ - II_FL_VARKEY = 1 << 1, /* keys can be of variable size */ - II_FL_VARREC = 1 << 2, /* records can be of variable size */ - II_FL_NONUNQ = 1 << 3, /* index supports non-unique keys */ -}; - -#define LIP_MAGIC 0x8A6D6B6C - -/* 4KB (= LU_PAGE_SIZE) container gathering key/record pairs */ -struct lu_idxpage { - /* 16-byte header */ - __u32 lip_magic; - __u16 lip_flags; - __u16 lip_nr; /* number of entries in the container */ - __u64 lip_pad0; /* additional padding for future use */ - - /* key/record pairs are stored in the remaining 4080 bytes. - * depending upon the flags in idx_info::ii_flags, each key/record - * pair might be preceded by: - * - a hash value - * - the key size (II_FL_VARKEY is set) - * - the record size (II_FL_VARREC is set) - * - * For the time being, we only support fixed-size key & record. */ - char lip_entries[0]; -}; - -#define LIP_HDR_SIZE (offsetof(struct lu_idxpage, lip_entries)) - -/* Gather all possible type associated with a 4KB container */ -union lu_page { - struct lu_dirpage lp_dir; /* for MDS_READPAGE */ - struct lu_idxpage lp_idx; /* for OBD_IDX_READ */ - char lp_array[LU_PAGE_SIZE]; -}; - /* security opcodes */ -typedef enum { +enum sec_cmd { SEC_CTX_INIT = 801, SEC_CTX_INIT_CONT = 802, SEC_CTX_FINI = 803, SEC_LAST_OPC, SEC_FIRST_OPC = SEC_CTX_INIT -} sec_cmd_t; +}; /* * capa related definitions @@ -3451,7 +3225,8 @@ typedef enum { #define CAPA_HMAC_KEY_MAX_LEN 56 /* NB take care when changing the sequence of elements this struct, - * because the offset info is used in find_capa() */ + * because the offset info is used in find_capa() + */ struct lustre_capa { struct lu_fid lc_fid; /** fid */ __u64 lc_opc; /** operations allowed */ @@ -3463,7 +3238,7 @@ struct lustre_capa { /* FIXME: y2038 time_t overflow: */ __u32 lc_expiry; /** expiry time (sec) */ __u8 lc_hmac[CAPA_HMAC_MAX_LEN]; /** HMAC */ -} __attribute__((packed)); +} __packed; void lustre_swab_lustre_capa(struct lustre_capa *c); @@ -3497,7 +3272,7 @@ struct lustre_capa_key { __u32 lk_keyid; /**< key# */ __u32 lk_padding; __u8 lk_key[CAPA_HMAC_KEY_MAX_LEN]; /**< key */ -} __attribute__((packed)); +} __packed; /** The link ea holds 1 \a link_ea_entry for each hardlink */ #define LINK_EA_MAGIC 0x11EAF1DFUL @@ -3518,7 +3293,7 @@ struct link_ea_entry { unsigned char lee_reclen[2]; unsigned char lee_parent_fid[sizeof(struct lu_fid)]; char lee_name[0]; -} __attribute__((packed)); +} __packed; /** fid2path request/reply structure */ struct getinfo_fid2path { @@ -3527,7 +3302,7 @@ struct getinfo_fid2path { __u32 gf_linkno; __u32 gf_pathlen; char gf_path[0]; -} __attribute__((packed)); +} __packed; void lustre_swab_fid2path (struct getinfo_fid2path *gf); @@ -3558,7 +3333,7 @@ void lustre_swab_layout_intent(struct layout_intent *li); */ struct hsm_progress_kernel { /* Field taken from struct hsm_progress */ - lustre_fid hpk_fid; + struct lu_fid hpk_fid; __u64 hpk_cookie; struct hsm_extent hpk_extent; __u16 hpk_flags; @@ -3567,7 +3342,7 @@ struct hsm_progress_kernel { /* Additional fields */ __u64 hpk_data_version; __u64 hpk_padding2; -} __attribute__((packed)); +} __packed; void lustre_swab_hsm_user_state(struct hsm_user_state *hus); void lustre_swab_hsm_current_action(struct hsm_current_action *action); @@ -3576,92 +3351,6 @@ void lustre_swab_hsm_user_state(struct hsm_user_state *hus); void lustre_swab_hsm_user_item(struct hsm_user_item *hui); void lustre_swab_hsm_request(struct hsm_request *hr); -/** - * These are object update opcode under UPDATE_OBJ, which is currently - * being used by cross-ref operations between MDT. - * - * During the cross-ref operation, the Master MDT, which the client send the - * request to, will disassembly the operation into object updates, then OSP - * will send these updates to the remote MDT to be executed. - * - * Update request format - * magic: UPDATE_BUFFER_MAGIC_V1 - * Count: How many updates in the req. - * bufs[0] : following are packets of object. - * update[0]: - * type: object_update_op, the op code of update - * fid: The object fid of the update. - * lens/bufs: other parameters of the update. - * update[1]: - * type: object_update_op, the op code of update - * fid: The object fid of the update. - * lens/bufs: other parameters of the update. - * .......... - * update[7]: type: object_update_op, the op code of update - * fid: The object fid of the update. - * lens/bufs: other parameters of the update. - * Current 8 maxim updates per object update request. - * - ******************************************************************* - * update reply format: - * - * ur_version: UPDATE_REPLY_V1 - * ur_count: The count of the reply, which is usually equal - * to the number of updates in the request. - * ur_lens: The reply lengths of each object update. - * - * replies: 1st update reply [4bytes_ret: other body] - * 2nd update reply [4bytes_ret: other body] - * ..... - * nth update reply [4bytes_ret: other body] - * - * For each reply of the update, the format would be - * result(4 bytes):Other stuff - */ - -#define UPDATE_MAX_OPS 10 -#define UPDATE_BUFFER_MAGIC_V1 0xBDDE0001 -#define UPDATE_BUFFER_MAGIC UPDATE_BUFFER_MAGIC_V1 -#define UPDATE_BUF_COUNT 8 -enum object_update_op { - OBJ_CREATE = 1, - OBJ_DESTROY = 2, - OBJ_REF_ADD = 3, - OBJ_REF_DEL = 4, - OBJ_ATTR_SET = 5, - OBJ_ATTR_GET = 6, - OBJ_XATTR_SET = 7, - OBJ_XATTR_GET = 8, - OBJ_INDEX_LOOKUP = 9, - OBJ_INDEX_INSERT = 10, - OBJ_INDEX_DELETE = 11, - OBJ_LAST -}; - -struct update { - __u32 u_type; - __u32 u_batchid; - struct lu_fid u_fid; - __u32 u_lens[UPDATE_BUF_COUNT]; - __u32 u_bufs[0]; -}; - -struct update_buf { - __u32 ub_magic; - __u32 ub_count; - __u32 ub_bufs[0]; -}; - -#define UPDATE_REPLY_V1 0x00BD0001 -struct update_reply { - __u32 ur_version; - __u32 ur_count; - __u32 ur_lens[0]; -}; - -void lustre_swab_update_buf(struct update_buf *ub); -void lustre_swab_update_reply_buf(struct update_reply *ur); - /** layout swap request structure * fid1 and fid2 are in mdt_body */ diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h index 2b4dd656d5f5..276906e646f5 100644 --- a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h +++ b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h @@ -85,9 +85,8 @@ struct obd_statfs { __u32 os_namelen; __u64 os_maxbytes; __u32 os_state; /**< obd_statfs_state OS_STATE_* flag */ - __u32 os_fprecreated; /* objs available now to the caller */ - /* used in QoS code to find preferred - * OSTs */ + __u32 os_fprecreated; /* objs available now to the caller */ + /* used in QoS code to find preferred OSTs */ __u32 os_spare2; __u32 os_spare3; __u32 os_spare4; @@ -135,8 +134,9 @@ struct filter_fid_old { /* Userspace should treat lu_fid as opaque, and only use the following methods * to print or parse them. Other functions (e.g. compare, swab) could be moved - * here from lustre_idl.h if needed. */ -typedef struct lu_fid lustre_fid; + * here from lustre_idl.h if needed. + */ +struct lu_fid; /** * Following struct for object attributes, that will be kept inode's EA. @@ -266,7 +266,8 @@ struct ost_id { /* Define O_LOV_DELAY_CREATE to be a mask that is not useful for regular * files, but are unlikely to be used in practice and are not harmful if * used incorrectly. O_NOCTTY and FASYNC are only meaningful for character - * devices and are safe for use on new files (See LU-812, LU-4209). */ + * devices and are safe for use on new files (See LU-812, LU-4209). + */ #define O_LOV_DELAY_CREATE (O_NOCTTY | FASYNC) #define LL_FILE_IGNORE_LOCK 0x00000001 @@ -302,7 +303,8 @@ struct ost_id { * The limit of 12 pages is somewhat arbitrary, but is a reasonably large * allocation that is sufficient for the current generation of systems. * - * (max buffer size - lov+rpc header) / sizeof(struct lov_ost_data_v1) */ + * (max buffer size - lov+rpc header) / sizeof(struct lov_ost_data_v1) + */ #define LOV_MAX_STRIPE_COUNT 2000 /* ((12 * 4096 - 256) / 24) */ #define LOV_ALL_STRIPES 0xffff /* only valid for directories */ #define LOV_V1_INSANE_STRIPE_COUNT 65532 /* maximum stripe count bz13933 */ @@ -323,9 +325,11 @@ struct lov_user_md_v1 { /* LOV EA user data (host-endian) */ __u16 lmm_stripe_count; /* num stripes in use for this object */ union { __u16 lmm_stripe_offset; /* starting stripe offset in - * lmm_objects, use when writing */ + * lmm_objects, use when writing + */ __u16 lmm_layout_gen; /* layout generation number - * used when reading */ + * used when reading + */ }; struct lov_user_ost_data_v1 lmm_objects[0]; /* per-stripe data */ } __attribute__((packed, __may_alias__)); @@ -338,9 +342,11 @@ struct lov_user_md_v3 { /* LOV EA user data (host-endian) */ __u16 lmm_stripe_count; /* num stripes in use for this object */ union { __u16 lmm_stripe_offset; /* starting stripe offset in - * lmm_objects, use when writing */ + * lmm_objects, use when writing + */ __u16 lmm_layout_gen; /* layout generation number - * used when reading */ + * used when reading + */ }; char lmm_pool_name[LOV_MAXPOOLNAME]; /* pool name */ struct lov_user_ost_data_v1 lmm_objects[0]; /* per-stripe data */ @@ -442,9 +448,13 @@ static inline void obd_str2uuid(struct obd_uuid *uuid, const char *tmp) /* For printf's only, make sure uuid is terminated */ static inline char *obd_uuid2str(const struct obd_uuid *uuid) { + if (!uuid) + return NULL; + if (uuid->uuid[sizeof(*uuid) - 1] != '\0') { /* Obviously not safe, but for printfs, no real harm done... - we're always null-terminated, even in a race. */ + * we're always null-terminated, even in a race. + */ static char temp[sizeof(*uuid)]; memcpy(temp, uuid->uuid, sizeof(*uuid) - 1); @@ -455,8 +465,9 @@ static inline char *obd_uuid2str(const struct obd_uuid *uuid) } /* Extract fsname from uuid (or target name) of a target - e.g. (myfs-OST0007_UUID -> myfs) - see also deuuidify. */ + * e.g. (myfs-OST0007_UUID -> myfs) + * see also deuuidify. + */ static inline void obd_uuid2fsname(char *buf, char *uuid, int buflen) { char *p; @@ -465,11 +476,12 @@ static inline void obd_uuid2fsname(char *buf, char *uuid, int buflen) buf[buflen - 1] = '\0'; p = strrchr(buf, '-'); if (p) - *p = '\0'; + *p = '\0'; } /* printf display format - e.g. printf("file FID is "DFID"\n", PFID(fid)); */ + * e.g. printf("file FID is "DFID"\n", PFID(fid)); + */ #define FID_NOBRACE_LEN 40 #define FID_LEN (FID_NOBRACE_LEN + 2) #define DFID_NOBRACE "%#llx:0x%x:0x%x" @@ -480,7 +492,8 @@ static inline void obd_uuid2fsname(char *buf, char *uuid, int buflen) (fid)->f_ver /* scanf input parse format -- strip '[' first. - e.g. sscanf(fidstr, SFID, RFID(&fid)); */ + * e.g. sscanf(fidstr, SFID, RFID(&fid)); + */ #define SFID "0x%llx:0x%x:0x%x" #define RFID(fid) \ &((fid)->f_seq), \ @@ -542,22 +555,6 @@ enum { RMT_RGETFACL = 4 }; -#ifdef NEED_QUOTA_DEFS -#ifndef QIF_BLIMITS -#define QIF_BLIMITS 1 -#define QIF_SPACE 2 -#define QIF_ILIMITS 4 -#define QIF_INODES 8 -#define QIF_BTIME 16 -#define QIF_ITIME 32 -#define QIF_LIMITS (QIF_BLIMITS | QIF_ILIMITS) -#define QIF_USAGE (QIF_SPACE | QIF_INODES) -#define QIF_TIMES (QIF_BTIME | QIF_ITIME) -#define QIF_ALL (QIF_LIMITS | QIF_USAGE | QIF_TIMES) -#endif - -#endif /* !__KERNEL__ */ - /* lustre volatile file support * file name header: .^L^S^T^R:volatile" */ @@ -566,9 +563,9 @@ enum { /* hdr + MDT index */ #define LUSTRE_VOLATILE_IDX LUSTRE_VOLATILE_HDR":%.4X:" -typedef enum lustre_quota_version { +enum lustre_quota_version { LUSTRE_QUOTA_V2 = 1 -} lustre_quota_version_t; +}; /* XXX: same as if_dqinfo struct in kernel */ struct obd_dqinfo { @@ -698,7 +695,8 @@ static inline const char *changelog_type2str(int type) #define CLF_HSM_LAST 15 /* Remove bits higher than _h, then extract the value - * between _h and _l by shifting lower weigth to bit 0. */ + * between _h and _l by shifting lower weigth to bit 0. + */ #define CLF_GET_BITS(_b, _h, _l) (((_b << (CLF_HSM_LAST - _h)) & 0xFFFF) \ >> (CLF_HSM_LAST - _h + _l)) @@ -761,10 +759,10 @@ struct changelog_rec { __u64 cr_prev; /**< last index for this target fid */ __u64 cr_time; union { - lustre_fid cr_tfid; /**< target fid */ + struct lu_fid cr_tfid; /**< target fid */ __u32 cr_markerflags; /**< CL_MARK flags */ }; - lustre_fid cr_pfid; /**< parent fid */ + struct lu_fid cr_pfid; /**< parent fid */ char cr_name[0]; /**< last element */ } __packed; @@ -775,18 +773,19 @@ struct changelog_rec { struct changelog_ext_rec { __u16 cr_namelen; __u16 cr_flags; /**< (flags & CLF_FLAGMASK) | - CLF_EXT_VERSION */ + * CLF_EXT_VERSION + */ __u32 cr_type; /**< \a changelog_rec_type */ __u64 cr_index; /**< changelog record number */ __u64 cr_prev; /**< last index for this target fid */ __u64 cr_time; union { - lustre_fid cr_tfid; /**< target fid */ + struct lu_fid cr_tfid; /**< target fid */ __u32 cr_markerflags; /**< CL_MARK flags */ }; - lustre_fid cr_pfid; /**< target parent fid */ - lustre_fid cr_sfid; /**< source fid, or zero */ - lustre_fid cr_spfid; /**< source parent fid, or zero */ + struct lu_fid cr_pfid; /**< target parent fid */ + struct lu_fid cr_sfid; /**< source fid, or zero */ + struct lu_fid cr_spfid; /**< source parent fid, or zero */ char cr_name[0]; /**< last element */ } __packed; @@ -835,7 +834,8 @@ struct ioc_data_version { }; #define LL_DV_NOFLUSH 0x01 /* Do not take READ EXTENT LOCK before sampling - version. Dirty caches are left unchanged. */ + * version. Dirty caches are left unchanged. + */ #ifndef offsetof # define offsetof(typ, memb) ((unsigned long)((char *)&(((typ *)0)->memb))) @@ -976,8 +976,8 @@ struct hsm_request { }; struct hsm_user_item { - lustre_fid hui_fid; - struct hsm_extent hui_extent; + struct lu_fid hui_fid; + struct hsm_extent hui_extent; } __packed; struct hsm_user_request { @@ -1046,8 +1046,8 @@ static inline char *hsm_copytool_action2name(enum hsm_copytool_action a) struct hsm_action_item { __u32 hai_len; /* valid size of this struct */ __u32 hai_action; /* hsm_copytool_action, but use known size */ - lustre_fid hai_fid; /* Lustre FID to operated on */ - lustre_fid hai_dfid; /* fid used for data access */ + struct lu_fid hai_fid; /* Lustre FID to operated on */ + struct lu_fid hai_dfid; /* fid used for data access */ struct hsm_extent hai_extent; /* byte range to operate on */ __u64 hai_cookie; /* action cookie from coordinator */ __u64 hai_gid; /* grouplock id */ @@ -1095,7 +1095,8 @@ struct hsm_action_list { __u32 padding1; char hal_fsname[0]; /* null-terminated */ /* struct hsm_action_item[hal_count] follows, aligned on 8-byte - boundaries. See hai_zero */ + * boundaries. See hai_zero + */ } __packed; #ifndef HAVE_CFS_SIZE_ROUND @@ -1157,7 +1158,7 @@ struct hsm_user_import { #define HP_FLAG_RETRY 0x02 struct hsm_progress { - lustre_fid hp_fid; + struct lu_fid hp_fid; __u64 hp_cookie; struct hsm_extent hp_extent; __u16 hp_flags; diff --git a/drivers/staging/lustre/lustre/include/lustre_cfg.h b/drivers/staging/lustre/lustre/include/lustre_cfg.h index eb6b292b7b25..bb16ae980b98 100644 --- a/drivers/staging/lustre/lustre/include/lustre_cfg.h +++ b/drivers/staging/lustre/lustre/include/lustre_cfg.h @@ -50,12 +50,13 @@ #define LUSTRE_CFG_MAX_BUFCOUNT 8 #define LCFG_HDR_SIZE(count) \ - cfs_size_round(offsetof (struct lustre_cfg, lcfg_buflens[(count)])) + cfs_size_round(offsetof(struct lustre_cfg, lcfg_buflens[(count)])) /** If the LCFG_REQUIRED bit is set in a configuration command, * then the client is required to understand this parameter * in order to mount the filesystem. If it does not understand - * a REQUIRED command the client mount will fail. */ + * a REQUIRED command the client mount will fail. + */ #define LCFG_REQUIRED 0x0001000 enum lcfg_command_type { @@ -87,9 +88,11 @@ enum lcfg_command_type { LCFG_POOL_DEL = 0x00ce023, /**< destroy an ost pool name */ LCFG_SET_LDLM_TIMEOUT = 0x00ce030, /**< set ldlm_timeout */ LCFG_PRE_CLEANUP = 0x00cf031, /**< call type-specific pre - * cleanup cleanup */ + * cleanup cleanup + */ LCFG_SET_PARAM = 0x00ce032, /**< use set_param syntax to set - *a proc parameters */ + * a proc parameters + */ }; struct lustre_cfg_bufs { @@ -128,7 +131,7 @@ static inline void lustre_cfg_bufs_set(struct lustre_cfg_bufs *bufs, { if (index >= LUSTRE_CFG_MAX_BUFCOUNT) return; - if (bufs == NULL) + if (!bufs) return; if (bufs->lcfg_bufcount <= index) @@ -158,7 +161,6 @@ static inline void *lustre_cfg_buf(struct lustre_cfg *lcfg, int index) int offset; int bufcount; - LASSERT (lcfg != NULL); LASSERT (index >= 0); bufcount = lcfg->lcfg_bufcount; @@ -191,7 +193,7 @@ static inline char *lustre_cfg_string(struct lustre_cfg *lcfg, int index) return NULL; s = lustre_cfg_buf(lcfg, index); - if (s == NULL) + if (!s) return NULL; /* @@ -252,10 +254,6 @@ static inline struct lustre_cfg *lustre_cfg_new(int cmd, static inline void lustre_cfg_free(struct lustre_cfg *lcfg) { - int len; - - len = lustre_cfg_len(lcfg->lcfg_bufcount, lcfg->lcfg_buflens); - kfree(lcfg); return; } diff --git a/drivers/staging/lustre/lustre/include/lustre_disk.h b/drivers/staging/lustre/lustre/include/lustre_disk.h index 7c6933ffc9c1..95fd36063f55 100644 --- a/drivers/staging/lustre/lustre/include/lustre_disk.h +++ b/drivers/staging/lustre/lustre/include/lustre_disk.h @@ -65,7 +65,8 @@ /****************** mount command *********************/ /* The lmd is only used internally by Lustre; mount simply passes - everything as string options */ + * everything as string options + */ #define LMD_MAGIC 0xbdacbd03 #define LMD_PARAMS_MAXLEN 4096 @@ -79,23 +80,26 @@ struct lustre_mount_data { int lmd_recovery_time_soft; int lmd_recovery_time_hard; char *lmd_dev; /* device name */ - char *lmd_profile; /* client only */ + char *lmd_profile; /* client only */ char *lmd_mgssec; /* sptlrpc flavor to mgs */ - char *lmd_opts; /* lustre mount options (as opposed to - _device_ mount options) */ + char *lmd_opts; /* lustre mount options (as opposed to + * _device_ mount options) + */ char *lmd_params; /* lustre params */ - __u32 *lmd_exclude; /* array of OSTs to ignore */ - char *lmd_mgs; /* MGS nid */ - char *lmd_osd_type; /* OSD type */ + __u32 *lmd_exclude; /* array of OSTs to ignore */ + char *lmd_mgs; /* MGS nid */ + char *lmd_osd_type; /* OSD type */ }; #define LMD_FLG_SERVER 0x0001 /* Mounting a server */ #define LMD_FLG_CLIENT 0x0002 /* Mounting a client */ #define LMD_FLG_ABORT_RECOV 0x0008 /* Abort recovery */ #define LMD_FLG_NOSVC 0x0010 /* Only start MGS/MGC for servers, - no other services */ -#define LMD_FLG_NOMGS 0x0020 /* Only start target for servers, reusing - existing MGS services */ + * no other services + */ +#define LMD_FLG_NOMGS 0x0020 /* Only start target for servers, + * reusing existing MGS services + */ #define LMD_FLG_WRITECONF 0x0040 /* Rewrite config log */ #define LMD_FLG_NOIR 0x0080 /* NO imperative recovery */ #define LMD_FLG_NOSCRUB 0x0100 /* Do not trigger scrub automatically */ @@ -116,231 +120,6 @@ struct lustre_mount_data { #define LR_EXPIRE_INTERVALS 16 /**< number of intervals to track transno */ #define ENOENT_VERSION 1 /** 'virtual' version of non-existent object */ -#define LR_SERVER_SIZE 512 -#define LR_CLIENT_START 8192 -#define LR_CLIENT_SIZE 128 -#if LR_CLIENT_START < LR_SERVER_SIZE -#error "Can't have LR_CLIENT_START < LR_SERVER_SIZE" -#endif - -/* - * This limit is arbitrary (131072 clients on x86), but it is convenient to use - * 2^n * PAGE_CACHE_SIZE * 8 for the number of bits that fit an order-n allocation. - * If we need more than 131072 clients (order-2 allocation on x86) then this - * should become an array of single-page pointers that are allocated on demand. - */ -#if (128 * 1024UL) > (PAGE_CACHE_SIZE * 8) -#define LR_MAX_CLIENTS (128 * 1024UL) -#else -#define LR_MAX_CLIENTS (PAGE_CACHE_SIZE * 8) -#endif - -/** COMPAT_146: this is an OST (temporary) */ -#define OBD_COMPAT_OST 0x00000002 -/** COMPAT_146: this is an MDT (temporary) */ -#define OBD_COMPAT_MDT 0x00000004 -/** 2.0 server, interop flag to show server version is changed */ -#define OBD_COMPAT_20 0x00000008 - -/** MDS handles LOV_OBJID file */ -#define OBD_ROCOMPAT_LOVOBJID 0x00000001 - -/** OST handles group subdirs */ -#define OBD_INCOMPAT_GROUPS 0x00000001 -/** this is an OST */ -#define OBD_INCOMPAT_OST 0x00000002 -/** this is an MDT */ -#define OBD_INCOMPAT_MDT 0x00000004 -/** common last_rvcd format */ -#define OBD_INCOMPAT_COMMON_LR 0x00000008 -/** FID is enabled */ -#define OBD_INCOMPAT_FID 0x00000010 -/** Size-on-MDS is enabled */ -#define OBD_INCOMPAT_SOM 0x00000020 -/** filesystem using iam format to store directory entries */ -#define OBD_INCOMPAT_IAM_DIR 0x00000040 -/** LMA attribute contains per-inode incompatible flags */ -#define OBD_INCOMPAT_LMA 0x00000080 -/** lmm_stripe_count has been shrunk from __u32 to __u16 and the remaining 16 - * bits are now used to store a generation. Once we start changing the layout - * and bumping the generation, old versions expecting a 32-bit lmm_stripe_count - * will be confused by interpreting stripe_count | gen << 16 as the actual - * stripe count */ -#define OBD_INCOMPAT_LMM_VER 0x00000100 -/** multiple OI files for MDT */ -#define OBD_INCOMPAT_MULTI_OI 0x00000200 - -/* Data stored per server at the head of the last_rcvd file. In le32 order. - This should be common to filter_internal.h, lustre_mds.h */ -struct lr_server_data { - __u8 lsd_uuid[40]; /* server UUID */ - __u64 lsd_last_transno; /* last completed transaction ID */ - __u64 lsd_compat14; /* reserved - compat with old last_rcvd */ - __u64 lsd_mount_count; /* incarnation number */ - __u32 lsd_feature_compat; /* compatible feature flags */ - __u32 lsd_feature_rocompat;/* read-only compatible feature flags */ - __u32 lsd_feature_incompat;/* incompatible feature flags */ - __u32 lsd_server_size; /* size of server data area */ - __u32 lsd_client_start; /* start of per-client data area */ - __u16 lsd_client_size; /* size of per-client data area */ - __u16 lsd_subdir_count; /* number of subdirectories for objects */ - __u64 lsd_catalog_oid; /* recovery catalog object id */ - __u32 lsd_catalog_ogen; /* recovery catalog inode generation */ - __u8 lsd_peeruuid[40]; /* UUID of MDS associated with this OST */ - __u32 lsd_osd_index; /* index number of OST in LOV */ - __u32 lsd_padding1; /* was lsd_mdt_index, unused in 2.4.0 */ - __u32 lsd_start_epoch; /* VBR: start epoch from last boot */ - /** transaction values since lsd_trans_table_time */ - __u64 lsd_trans_table[LR_EXPIRE_INTERVALS]; - /** start point of transno table below */ - __u32 lsd_trans_table_time; /* time of first slot in table above */ - __u32 lsd_expire_intervals; /* LR_EXPIRE_INTERVALS */ - __u8 lsd_padding[LR_SERVER_SIZE - 288]; -}; - -/* Data stored per client in the last_rcvd file. In le32 order. */ -struct lsd_client_data { - __u8 lcd_uuid[40]; /* client UUID */ - __u64 lcd_last_transno; /* last completed transaction ID */ - __u64 lcd_last_xid; /* xid for the last transaction */ - __u32 lcd_last_result; /* result from last RPC */ - __u32 lcd_last_data; /* per-op data (disposition for open &c.) */ - /* for MDS_CLOSE requests */ - __u64 lcd_last_close_transno; /* last completed transaction ID */ - __u64 lcd_last_close_xid; /* xid for the last transaction */ - __u32 lcd_last_close_result; /* result from last RPC */ - __u32 lcd_last_close_data; /* per-op data */ - /* VBR: last versions */ - __u64 lcd_pre_versions[4]; - __u32 lcd_last_epoch; - /** orphans handling for delayed export rely on that */ - __u32 lcd_first_epoch; - __u8 lcd_padding[LR_CLIENT_SIZE - 128]; -}; - -/* bug20354: the lcd_uuid for export of clients may be wrong */ -static inline void check_lcd(char *obd_name, int index, - struct lsd_client_data *lcd) -{ - int length = sizeof(lcd->lcd_uuid); - - if (strnlen((char *)lcd->lcd_uuid, length) == length) { - lcd->lcd_uuid[length - 1] = '\0'; - - LCONSOLE_ERROR("the client UUID (%s) on %s for exports stored in last_rcvd(index = %d) is bad!\n", - lcd->lcd_uuid, obd_name, index); - } -} - -/* last_rcvd handling */ -static inline void lsd_le_to_cpu(struct lr_server_data *buf, - struct lr_server_data *lsd) -{ - int i; - - memcpy(lsd->lsd_uuid, buf->lsd_uuid, sizeof(lsd->lsd_uuid)); - lsd->lsd_last_transno = le64_to_cpu(buf->lsd_last_transno); - lsd->lsd_compat14 = le64_to_cpu(buf->lsd_compat14); - lsd->lsd_mount_count = le64_to_cpu(buf->lsd_mount_count); - lsd->lsd_feature_compat = le32_to_cpu(buf->lsd_feature_compat); - lsd->lsd_feature_rocompat = le32_to_cpu(buf->lsd_feature_rocompat); - lsd->lsd_feature_incompat = le32_to_cpu(buf->lsd_feature_incompat); - lsd->lsd_server_size = le32_to_cpu(buf->lsd_server_size); - lsd->lsd_client_start = le32_to_cpu(buf->lsd_client_start); - lsd->lsd_client_size = le16_to_cpu(buf->lsd_client_size); - lsd->lsd_subdir_count = le16_to_cpu(buf->lsd_subdir_count); - lsd->lsd_catalog_oid = le64_to_cpu(buf->lsd_catalog_oid); - lsd->lsd_catalog_ogen = le32_to_cpu(buf->lsd_catalog_ogen); - memcpy(lsd->lsd_peeruuid, buf->lsd_peeruuid, sizeof(lsd->lsd_peeruuid)); - lsd->lsd_osd_index = le32_to_cpu(buf->lsd_osd_index); - lsd->lsd_padding1 = le32_to_cpu(buf->lsd_padding1); - lsd->lsd_start_epoch = le32_to_cpu(buf->lsd_start_epoch); - for (i = 0; i < LR_EXPIRE_INTERVALS; i++) - lsd->lsd_trans_table[i] = le64_to_cpu(buf->lsd_trans_table[i]); - lsd->lsd_trans_table_time = le32_to_cpu(buf->lsd_trans_table_time); - lsd->lsd_expire_intervals = le32_to_cpu(buf->lsd_expire_intervals); -} - -static inline void lsd_cpu_to_le(struct lr_server_data *lsd, - struct lr_server_data *buf) -{ - int i; - - memcpy(buf->lsd_uuid, lsd->lsd_uuid, sizeof(buf->lsd_uuid)); - buf->lsd_last_transno = cpu_to_le64(lsd->lsd_last_transno); - buf->lsd_compat14 = cpu_to_le64(lsd->lsd_compat14); - buf->lsd_mount_count = cpu_to_le64(lsd->lsd_mount_count); - buf->lsd_feature_compat = cpu_to_le32(lsd->lsd_feature_compat); - buf->lsd_feature_rocompat = cpu_to_le32(lsd->lsd_feature_rocompat); - buf->lsd_feature_incompat = cpu_to_le32(lsd->lsd_feature_incompat); - buf->lsd_server_size = cpu_to_le32(lsd->lsd_server_size); - buf->lsd_client_start = cpu_to_le32(lsd->lsd_client_start); - buf->lsd_client_size = cpu_to_le16(lsd->lsd_client_size); - buf->lsd_subdir_count = cpu_to_le16(lsd->lsd_subdir_count); - buf->lsd_catalog_oid = cpu_to_le64(lsd->lsd_catalog_oid); - buf->lsd_catalog_ogen = cpu_to_le32(lsd->lsd_catalog_ogen); - memcpy(buf->lsd_peeruuid, lsd->lsd_peeruuid, sizeof(buf->lsd_peeruuid)); - buf->lsd_osd_index = cpu_to_le32(lsd->lsd_osd_index); - buf->lsd_padding1 = cpu_to_le32(lsd->lsd_padding1); - buf->lsd_start_epoch = cpu_to_le32(lsd->lsd_start_epoch); - for (i = 0; i < LR_EXPIRE_INTERVALS; i++) - buf->lsd_trans_table[i] = cpu_to_le64(lsd->lsd_trans_table[i]); - buf->lsd_trans_table_time = cpu_to_le32(lsd->lsd_trans_table_time); - buf->lsd_expire_intervals = cpu_to_le32(lsd->lsd_expire_intervals); -} - -static inline void lcd_le_to_cpu(struct lsd_client_data *buf, - struct lsd_client_data *lcd) -{ - memcpy(lcd->lcd_uuid, buf->lcd_uuid, sizeof (lcd->lcd_uuid)); - lcd->lcd_last_transno = le64_to_cpu(buf->lcd_last_transno); - lcd->lcd_last_xid = le64_to_cpu(buf->lcd_last_xid); - lcd->lcd_last_result = le32_to_cpu(buf->lcd_last_result); - lcd->lcd_last_data = le32_to_cpu(buf->lcd_last_data); - lcd->lcd_last_close_transno = le64_to_cpu(buf->lcd_last_close_transno); - lcd->lcd_last_close_xid = le64_to_cpu(buf->lcd_last_close_xid); - lcd->lcd_last_close_result = le32_to_cpu(buf->lcd_last_close_result); - lcd->lcd_last_close_data = le32_to_cpu(buf->lcd_last_close_data); - lcd->lcd_pre_versions[0] = le64_to_cpu(buf->lcd_pre_versions[0]); - lcd->lcd_pre_versions[1] = le64_to_cpu(buf->lcd_pre_versions[1]); - lcd->lcd_pre_versions[2] = le64_to_cpu(buf->lcd_pre_versions[2]); - lcd->lcd_pre_versions[3] = le64_to_cpu(buf->lcd_pre_versions[3]); - lcd->lcd_last_epoch = le32_to_cpu(buf->lcd_last_epoch); - lcd->lcd_first_epoch = le32_to_cpu(buf->lcd_first_epoch); -} - -static inline void lcd_cpu_to_le(struct lsd_client_data *lcd, - struct lsd_client_data *buf) -{ - memcpy(buf->lcd_uuid, lcd->lcd_uuid, sizeof (lcd->lcd_uuid)); - buf->lcd_last_transno = cpu_to_le64(lcd->lcd_last_transno); - buf->lcd_last_xid = cpu_to_le64(lcd->lcd_last_xid); - buf->lcd_last_result = cpu_to_le32(lcd->lcd_last_result); - buf->lcd_last_data = cpu_to_le32(lcd->lcd_last_data); - buf->lcd_last_close_transno = cpu_to_le64(lcd->lcd_last_close_transno); - buf->lcd_last_close_xid = cpu_to_le64(lcd->lcd_last_close_xid); - buf->lcd_last_close_result = cpu_to_le32(lcd->lcd_last_close_result); - buf->lcd_last_close_data = cpu_to_le32(lcd->lcd_last_close_data); - buf->lcd_pre_versions[0] = cpu_to_le64(lcd->lcd_pre_versions[0]); - buf->lcd_pre_versions[1] = cpu_to_le64(lcd->lcd_pre_versions[1]); - buf->lcd_pre_versions[2] = cpu_to_le64(lcd->lcd_pre_versions[2]); - buf->lcd_pre_versions[3] = cpu_to_le64(lcd->lcd_pre_versions[3]); - buf->lcd_last_epoch = cpu_to_le32(lcd->lcd_last_epoch); - buf->lcd_first_epoch = cpu_to_le32(lcd->lcd_first_epoch); -} - -static inline __u64 lcd_last_transno(struct lsd_client_data *lcd) -{ - return (lcd->lcd_last_transno > lcd->lcd_last_close_transno ? - lcd->lcd_last_transno : lcd->lcd_last_close_transno); -} - -static inline __u64 lcd_last_xid(struct lsd_client_data *lcd) -{ - return (lcd->lcd_last_xid > lcd->lcd_last_close_xid ? - lcd->lcd_last_xid : lcd->lcd_last_close_xid); -} - /****************** superblock additional info *********************/ struct ll_sb_info; @@ -360,7 +139,8 @@ struct lustre_sb_info { char lsi_osd_type[16]; char lsi_fstype[16]; struct backing_dev_info lsi_bdi; /* each client mountpoint needs - own backing_dev_info */ + * own backing_dev_info + */ }; #define LSI_UMOUNT_FAILOVER 0x00200000 diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h index 9b319f1df025..8b0364f71129 100644 --- a/drivers/staging/lustre/lustre/include/lustre_dlm.h +++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h @@ -69,7 +69,7 @@ struct obd_device; /** * LDLM non-error return states */ -typedef enum { +enum ldlm_error { ELDLM_OK = 0, ELDLM_LOCK_CHANGED = 300, @@ -80,7 +80,7 @@ typedef enum { ELDLM_NAMESPACE_EXISTS = 400, ELDLM_BAD_NAMESPACE = 401 -} ldlm_error_t; +}; /** * LDLM namespace type. @@ -145,16 +145,17 @@ typedef enum { #define LCK_COMPAT_COS (LCK_COS) /** @} Lock Compatibility Matrix */ -extern ldlm_mode_t lck_compat_array[]; +extern enum ldlm_mode lck_compat_array[]; -static inline void lockmode_verify(ldlm_mode_t mode) +static inline void lockmode_verify(enum ldlm_mode mode) { - LASSERT(mode > LCK_MINMODE && mode < LCK_MAXMODE); + LASSERT(mode > LCK_MINMODE && mode < LCK_MAXMODE); } -static inline int lockmode_compat(ldlm_mode_t exist_mode, ldlm_mode_t new_mode) +static inline int lockmode_compat(enum ldlm_mode exist_mode, + enum ldlm_mode new_mode) { - return (lck_compat_array[exist_mode] & new_mode); + return (lck_compat_array[exist_mode] & new_mode); } /* @@ -249,7 +250,8 @@ struct ldlm_pool { /** Current biggest client lock volume. Protected by pl_lock. */ __u64 pl_client_lock_volume; /** Lock volume factor. SLV on client is calculated as following: - * server_slv * lock_volume_factor. */ + * server_slv * lock_volume_factor. + */ atomic_t pl_lock_volume_factor; /** Time when last SLV from server was obtained. */ time64_t pl_recalc_time; @@ -295,10 +297,10 @@ struct ldlm_valblock_ops { * LDLM pools related, type of lock pool in the namespace. * Greedy means release cached locks aggressively */ -typedef enum { +enum ldlm_appetite { LDLM_NAMESPACE_GREEDY = 1 << 0, LDLM_NAMESPACE_MODEST = 1 << 1 -} ldlm_appetite_t; +}; struct ldlm_ns_bucket { /** back pointer to namespace */ @@ -317,7 +319,7 @@ enum { LDLM_NSS_LAST }; -typedef enum { +enum ldlm_ns_type { /** invalid type */ LDLM_NS_TYPE_UNKNOWN = 0, /** mdc namespace */ @@ -332,7 +334,7 @@ typedef enum { LDLM_NS_TYPE_MGC, /** mgs namespace */ LDLM_NS_TYPE_MGT, -} ldlm_ns_type_t; +}; /** * LDLM Namespace. @@ -373,7 +375,7 @@ struct ldlm_namespace { /** * Namespace connect flags supported by server (may be changed via - * /proc, LRU resize may be disabled/enabled). + * sysfs, LRU resize may be disabled/enabled). */ __u64 ns_connect_flags; @@ -439,7 +441,7 @@ struct ldlm_namespace { /** LDLM pool structure for this namespace */ struct ldlm_pool ns_pool; /** Definition of how eagerly unused locks will be released from LRU */ - ldlm_appetite_t ns_appetite; + enum ldlm_appetite ns_appetite; /** Limit of parallel AST RPC count. */ unsigned ns_max_parallel_ast; @@ -465,7 +467,6 @@ struct ldlm_namespace { */ static inline int ns_connect_cancelset(struct ldlm_namespace *ns) { - LASSERT(ns != NULL); return !!(ns->ns_connect_flags & OBD_CONNECT_CANCELSET); } @@ -474,14 +475,12 @@ static inline int ns_connect_cancelset(struct ldlm_namespace *ns) */ static inline int ns_connect_lru_resize(struct ldlm_namespace *ns) { - LASSERT(ns != NULL); return !!(ns->ns_connect_flags & OBD_CONNECT_LRU_RESIZE); } static inline void ns_register_cancel(struct ldlm_namespace *ns, ldlm_cancel_for_recovery arg) { - LASSERT(ns != NULL); ns->ns_cancel_for_recovery = arg; } @@ -503,7 +502,8 @@ struct ldlm_glimpse_work { struct list_head gl_list; /* linkage to other gl work structs */ __u32 gl_flags;/* see LDLM_GL_WORK_* below */ union ldlm_gl_desc *gl_desc; /* glimpse descriptor to be packed in - * glimpse callback request */ + * glimpse callback request + */ }; /** The ldlm_glimpse_work is allocated on the stack and should not be freed. */ @@ -512,8 +512,9 @@ struct ldlm_glimpse_work { /** Interval node data for each LDLM_EXTENT lock. */ struct ldlm_interval { struct interval_node li_node; /* node for tree management */ - struct list_head li_group; /* the locks which have the same - * policy - group of the policy */ + struct list_head li_group; /* the locks which have the same + * policy - group of the policy + */ }; #define to_ldlm_interval(n) container_of(n, struct ldlm_interval, li_node) @@ -527,7 +528,7 @@ struct ldlm_interval { struct ldlm_interval_tree { /** Tree size. */ int lit_size; - ldlm_mode_t lit_mode; /* lock mode */ + enum ldlm_mode lit_mode; /* lock mode */ struct interval_node *lit_root; /* actual ldlm_interval */ }; @@ -535,12 +536,13 @@ struct ldlm_interval_tree { #define LUSTRE_TRACKS_LOCK_EXP_REFS (0) /** Cancel flags. */ -typedef enum { +enum ldlm_cancel_flags { LCF_ASYNC = 0x1, /* Cancel locks asynchronously. */ LCF_LOCAL = 0x2, /* Cancel locks locally, not notifing server */ LCF_BL_AST = 0x4, /* Cancel locks marked as LDLM_FL_BL_AST - * in the same RPC */ -} ldlm_cancel_flags_t; + * in the same RPC + */ +}; struct ldlm_flock { __u64 start; @@ -559,7 +561,7 @@ typedef union { struct ldlm_inodebits l_inodebits; } ldlm_policy_data_t; -void ldlm_convert_policy_to_local(struct obd_export *exp, ldlm_type_t type, +void ldlm_convert_policy_to_local(struct obd_export *exp, enum ldlm_type type, const ldlm_wire_policy_data_t *wpolicy, ldlm_policy_data_t *lpolicy); @@ -637,11 +639,11 @@ struct ldlm_lock { * Requested mode. * Protected by lr_lock. */ - ldlm_mode_t l_req_mode; + enum ldlm_mode l_req_mode; /** * Granted mode, also protected by lr_lock. */ - ldlm_mode_t l_granted_mode; + enum ldlm_mode l_granted_mode; /** Lock completion handler pointer. Called when lock is granted. */ ldlm_completion_callback l_completion_ast; /** @@ -841,20 +843,19 @@ struct ldlm_resource { /** * protected by lr_lock - * @{ */ + * @{ + */ /** List of locks in granted state */ struct list_head lr_granted; /** * List of locks that could not be granted due to conflicts and - * that are waiting for conflicts to go away */ + * that are waiting for conflicts to go away + */ struct list_head lr_waiting; /** @} */ - /* XXX No longer needed? Remove ASAP */ - ldlm_mode_t lr_most_restr; - /** Type of locks this resource can hold. Only one type per resource. */ - ldlm_type_t lr_type; /* LDLM_{PLAIN,EXTENT,FLOCK,IBITS} */ + enum ldlm_type lr_type; /* LDLM_{PLAIN,EXTENT,FLOCK,IBITS} */ /** Resource name */ struct ldlm_res_id lr_name; @@ -921,7 +922,7 @@ static inline int ldlm_lvbo_init(struct ldlm_resource *res) { struct ldlm_namespace *ns = ldlm_res_to_ns(res); - if (ns->ns_lvbo != NULL && ns->ns_lvbo->lvbo_init != NULL) + if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) return ns->ns_lvbo->lvbo_init(res); return 0; @@ -931,7 +932,7 @@ static inline int ldlm_lvbo_size(struct ldlm_lock *lock) { struct ldlm_namespace *ns = ldlm_lock_to_ns(lock); - if (ns->ns_lvbo != NULL && ns->ns_lvbo->lvbo_size != NULL) + if (ns->ns_lvbo && ns->ns_lvbo->lvbo_size) return ns->ns_lvbo->lvbo_size(lock); return 0; @@ -941,10 +942,9 @@ static inline int ldlm_lvbo_fill(struct ldlm_lock *lock, void *buf, int len) { struct ldlm_namespace *ns = ldlm_lock_to_ns(lock); - if (ns->ns_lvbo != NULL) { - LASSERT(ns->ns_lvbo->lvbo_fill != NULL); + if (ns->ns_lvbo) return ns->ns_lvbo->lvbo_fill(lock, buf, len); - } + return 0; } @@ -1015,7 +1015,7 @@ void _ldlm_lock_debug(struct ldlm_lock *lock, /** Non-rate-limited lock printing function for debugging purposes. */ #define LDLM_DEBUG(lock, fmt, a...) do { \ - if (likely(lock != NULL)) { \ + if (likely(lock)) { \ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_DLMTRACE, NULL); \ ldlm_lock_debug(&msgdata, D_DLMTRACE, NULL, lock, \ "### " fmt, ##a); \ @@ -1025,7 +1025,7 @@ void _ldlm_lock_debug(struct ldlm_lock *lock, } while (0) typedef int (*ldlm_processing_policy)(struct ldlm_lock *lock, __u64 *flags, - int first_enq, ldlm_error_t *err, + int first_enq, enum ldlm_error *err, struct list_head *work_list); /** @@ -1042,7 +1042,8 @@ typedef int (*ldlm_res_iterator_t)(struct ldlm_resource *, void *); * * LDLM provides for a way to iterate through every lock on a resource or * namespace or every resource in a namespace. - * @{ */ + * @{ + */ int ldlm_resource_iterate(struct ldlm_namespace *, const struct ldlm_res_id *, ldlm_iterator_t iter, void *data); /** @} ldlm_iterator */ @@ -1091,7 +1092,7 @@ ldlm_handle2lock_long(const struct lustre_handle *h, __u64 flags) struct ldlm_lock *lock; lock = __ldlm_handle2lock(h, flags); - if (lock != NULL) + if (lock) LDLM_LOCK_REF_DEL(lock); return lock; } @@ -1111,7 +1112,7 @@ static inline int ldlm_res_lvbo_update(struct ldlm_resource *res, return 0; } -int ldlm_error2errno(ldlm_error_t error); +int ldlm_error2errno(enum ldlm_error error); #if LUSTRE_TRACKS_LOCK_EXP_REFS void ldlm_dump_export_locks(struct obd_export *exp); @@ -1168,12 +1169,13 @@ void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode); void ldlm_lock_fail_match_locked(struct ldlm_lock *lock); void ldlm_lock_allow_match(struct ldlm_lock *lock); void ldlm_lock_allow_match_locked(struct ldlm_lock *lock); -ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, - const struct ldlm_res_id *, ldlm_type_t type, - ldlm_policy_data_t *, ldlm_mode_t mode, - struct lustre_handle *, int unref); -ldlm_mode_t ldlm_revalidate_lock_handle(struct lustre_handle *lockh, - __u64 *bits); +enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, + const struct ldlm_res_id *, + enum ldlm_type type, ldlm_policy_data_t *, + enum ldlm_mode mode, struct lustre_handle *, + int unref); +enum ldlm_mode ldlm_revalidate_lock_handle(struct lustre_handle *lockh, + __u64 *bits); void ldlm_lock_cancel(struct ldlm_lock *lock); void ldlm_lock_dump_handle(int level, struct lustre_handle *); void ldlm_unlink_lock_skiplist(struct ldlm_lock *req); @@ -1181,8 +1183,8 @@ void ldlm_unlink_lock_skiplist(struct ldlm_lock *req); /* resource.c */ struct ldlm_namespace * ldlm_namespace_new(struct obd_device *obd, char *name, - ldlm_side_t client, ldlm_appetite_t apt, - ldlm_ns_type_t ns_type); + ldlm_side_t client, enum ldlm_appetite apt, + enum ldlm_ns_type ns_type); int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags); void ldlm_namespace_get(struct ldlm_namespace *ns); void ldlm_namespace_put(struct ldlm_namespace *ns); @@ -1193,7 +1195,7 @@ void ldlm_debugfs_cleanup(void); struct ldlm_resource *ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent, const struct ldlm_res_id *, - ldlm_type_t type, int create); + enum ldlm_type type, int create); int ldlm_resource_putref(struct ldlm_resource *res); void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head, @@ -1219,7 +1221,8 @@ int ldlm_lock_change_resource(struct ldlm_namespace *, struct ldlm_lock *, * These AST handlers are typically used for server-side local locks and are * also used by client-side lock handlers to perform minimum level base * processing. - * @{ */ + * @{ + */ int ldlm_completion_ast_async(struct ldlm_lock *lock, __u64 flags, void *data); int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data); /** @} ldlm_local_ast */ @@ -1227,7 +1230,8 @@ int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data); /** \defgroup ldlm_cli_api API to operate on locks from actual LDLM users. * These are typically used by client and server (*_local versions) * to obtain and release locks. - * @{ */ + * @{ + */ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp, struct ldlm_enqueue_info *einfo, const struct ldlm_res_id *res_id, @@ -1244,29 +1248,32 @@ int ldlm_prep_elc_req(struct obd_export *exp, struct list_head *cancels, int count); int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req, - ldlm_type_t type, __u8 with_policy, ldlm_mode_t mode, + enum ldlm_type type, __u8 with_policy, + enum ldlm_mode mode, __u64 *flags, void *lvb, __u32 lvb_len, struct lustre_handle *lockh, int rc); int ldlm_cli_update_pool(struct ptlrpc_request *req); int ldlm_cli_cancel(struct lustre_handle *lockh, - ldlm_cancel_flags_t cancel_flags); + enum ldlm_cancel_flags cancel_flags); int ldlm_cli_cancel_unused(struct ldlm_namespace *, const struct ldlm_res_id *, - ldlm_cancel_flags_t flags, void *opaque); + enum ldlm_cancel_flags flags, void *opaque); int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns, const struct ldlm_res_id *res_id, ldlm_policy_data_t *policy, - ldlm_mode_t mode, - ldlm_cancel_flags_t flags, + enum ldlm_mode mode, + enum ldlm_cancel_flags flags, void *opaque); int ldlm_cancel_resource_local(struct ldlm_resource *res, struct list_head *cancels, ldlm_policy_data_t *policy, - ldlm_mode_t mode, __u64 lock_flags, - ldlm_cancel_flags_t cancel_flags, void *opaque); + enum ldlm_mode mode, __u64 lock_flags, + enum ldlm_cancel_flags cancel_flags, + void *opaque); int ldlm_cli_cancel_list_local(struct list_head *cancels, int count, - ldlm_cancel_flags_t flags); + enum ldlm_cancel_flags flags); int ldlm_cli_cancel_list(struct list_head *head, int count, - struct ptlrpc_request *req, ldlm_cancel_flags_t flags); + struct ptlrpc_request *req, + enum ldlm_cancel_flags flags); /** @} ldlm_cli_api */ /* mds/handler.c */ diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h b/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h index 0d3ed87d38e1..7f2ba2ffe0eb 100644 --- a/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h +++ b/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h @@ -57,7 +57,8 @@ /** * Server placed lock on granted list, or a recovering client wants the - * lock added to the granted list, no questions asked. */ + * lock added to the granted list, no questions asked. + */ #define LDLM_FL_BLOCK_GRANTED 0x0000000000000002ULL /* bit 1 */ #define ldlm_is_block_granted(_l) LDLM_TEST_FLAG((_l), 1ULL << 1) #define ldlm_set_block_granted(_l) LDLM_SET_FLAG((_l), 1ULL << 1) @@ -65,7 +66,8 @@ /** * Server placed lock on conv list, or a recovering client wants the lock - * added to the conv list, no questions asked. */ + * added to the conv list, no questions asked. + */ #define LDLM_FL_BLOCK_CONV 0x0000000000000004ULL /* bit 2 */ #define ldlm_is_block_conv(_l) LDLM_TEST_FLAG((_l), 1ULL << 2) #define ldlm_set_block_conv(_l) LDLM_SET_FLAG((_l), 1ULL << 2) @@ -73,7 +75,8 @@ /** * Server placed lock on wait list, or a recovering client wants the lock - * added to the wait list, no questions asked. */ + * added to the wait list, no questions asked. + */ #define LDLM_FL_BLOCK_WAIT 0x0000000000000008ULL /* bit 3 */ #define ldlm_is_block_wait(_l) LDLM_TEST_FLAG((_l), 1ULL << 3) #define ldlm_set_block_wait(_l) LDLM_SET_FLAG((_l), 1ULL << 3) @@ -87,7 +90,8 @@ /** * Lock is being replayed. This could probably be implied by the fact that - * one of BLOCK_{GRANTED,CONV,WAIT} is set, but that is pretty dangerous. */ + * one of BLOCK_{GRANTED,CONV,WAIT} is set, but that is pretty dangerous. + */ #define LDLM_FL_REPLAY 0x0000000000000100ULL /* bit 8 */ #define ldlm_is_replay(_l) LDLM_TEST_FLAG((_l), 1ULL << 8) #define ldlm_set_replay(_l) LDLM_SET_FLAG((_l), 1ULL << 8) @@ -125,7 +129,8 @@ /** * Server told not to wait if blocked. For AGL, OST will not send glimpse - * callback. */ + * callback. + */ #define LDLM_FL_BLOCK_NOWAIT 0x0000000000040000ULL /* bit 18 */ #define ldlm_is_block_nowait(_l) LDLM_TEST_FLAG((_l), 1ULL << 18) #define ldlm_set_block_nowait(_l) LDLM_SET_FLAG((_l), 1ULL << 18) @@ -141,7 +146,8 @@ * Immediately cancel such locks when they block some other locks. Send * cancel notification to original lock holder, but expect no reply. This * is for clients (like liblustre) that cannot be expected to reliably - * response to blocking AST. */ + * response to blocking AST. + */ #define LDLM_FL_CANCEL_ON_BLOCK 0x0000000000800000ULL /* bit 23 */ #define ldlm_is_cancel_on_block(_l) LDLM_TEST_FLAG((_l), 1ULL << 23) #define ldlm_set_cancel_on_block(_l) LDLM_SET_FLAG((_l), 1ULL << 23) @@ -164,7 +170,8 @@ /** * Used for marking lock as a target for -EINTR while cp_ast sleep emulation - * + race with upcoming bl_ast. */ + * + race with upcoming bl_ast. + */ #define LDLM_FL_FAIL_LOC 0x0000000100000000ULL /* bit 32 */ #define ldlm_is_fail_loc(_l) LDLM_TEST_FLAG((_l), 1ULL << 32) #define ldlm_set_fail_loc(_l) LDLM_SET_FLAG((_l), 1ULL << 32) @@ -172,7 +179,8 @@ /** * Used while processing the unused list to know that we have already - * handled this lock and decided to skip it. */ + * handled this lock and decided to skip it. + */ #define LDLM_FL_SKIPPED 0x0000000200000000ULL /* bit 33 */ #define ldlm_is_skipped(_l) LDLM_TEST_FLAG((_l), 1ULL << 33) #define ldlm_set_skipped(_l) LDLM_SET_FLAG((_l), 1ULL << 33) @@ -231,7 +239,8 @@ * The proper fix is to do the granting inside of the completion AST, * which can be replaced with a LVB-aware wrapping function for OSC locks. * That change is pretty high-risk, though, and would need a lot more - * testing. */ + * testing. + */ #define LDLM_FL_LVB_READY 0x0000020000000000ULL /* bit 41 */ #define ldlm_is_lvb_ready(_l) LDLM_TEST_FLAG((_l), 1ULL << 41) #define ldlm_set_lvb_ready(_l) LDLM_SET_FLAG((_l), 1ULL << 41) @@ -243,7 +252,8 @@ * dirty pages. It can remain on the granted list during this whole time. * Threads racing to update the KMS after performing their writeback need * to know to exclude each other's locks from the calculation as they walk - * the granted list. */ + * the granted list. + */ #define LDLM_FL_KMS_IGNORE 0x0000040000000000ULL /* bit 42 */ #define ldlm_is_kms_ignore(_l) LDLM_TEST_FLAG((_l), 1ULL << 42) #define ldlm_set_kms_ignore(_l) LDLM_SET_FLAG((_l), 1ULL << 42) @@ -263,7 +273,8 @@ /** * optimization hint: LDLM can run blocking callback from current context - * w/o involving separate thread. in order to decrease cs rate */ + * w/o involving separate thread. in order to decrease cs rate + */ #define LDLM_FL_ATOMIC_CB 0x0000200000000000ULL /* bit 45 */ #define ldlm_is_atomic_cb(_l) LDLM_TEST_FLAG((_l), 1ULL << 45) #define ldlm_set_atomic_cb(_l) LDLM_SET_FLAG((_l), 1ULL << 45) @@ -280,7 +291,8 @@ * LDLM_FL_BL_DONE is to be set by ldlm_cancel_callback() when lock cache is * dropped to let ldlm_callback_handler() return EINVAL to the server. It * is used when ELC RPC is already prepared and is waiting for rpc_lock, - * too late to send a separate CANCEL RPC. */ + * too late to send a separate CANCEL RPC. + */ #define LDLM_FL_BL_AST 0x0000400000000000ULL /* bit 46 */ #define ldlm_is_bl_ast(_l) LDLM_TEST_FLAG((_l), 1ULL << 46) #define ldlm_set_bl_ast(_l) LDLM_SET_FLAG((_l), 1ULL << 46) @@ -295,7 +307,8 @@ /** * Don't put lock into the LRU list, so that it is not canceled due * to aging. Used by MGC locks, they are cancelled only at unmount or - * by callback. */ + * by callback. + */ #define LDLM_FL_NO_LRU 0x0001000000000000ULL /* bit 48 */ #define ldlm_is_no_lru(_l) LDLM_TEST_FLAG((_l), 1ULL << 48) #define ldlm_set_no_lru(_l) LDLM_SET_FLAG((_l), 1ULL << 48) @@ -304,7 +317,8 @@ /** * Set for locks that failed and where the server has been notified. * - * Protected by lock and resource locks. */ + * Protected by lock and resource locks. + */ #define LDLM_FL_FAIL_NOTIFIED 0x0002000000000000ULL /* bit 49 */ #define ldlm_is_fail_notified(_l) LDLM_TEST_FLAG((_l), 1ULL << 49) #define ldlm_set_fail_notified(_l) LDLM_SET_FLAG((_l), 1ULL << 49) @@ -315,7 +329,8 @@ * be destroyed when last reference to them is released. Set by * ldlm_lock_destroy_internal(). * - * Protected by lock and resource locks. */ + * Protected by lock and resource locks. + */ #define LDLM_FL_DESTROYED 0x0004000000000000ULL /* bit 50 */ #define ldlm_is_destroyed(_l) LDLM_TEST_FLAG((_l), 1ULL << 50) #define ldlm_set_destroyed(_l) LDLM_SET_FLAG((_l), 1ULL << 50) @@ -333,7 +348,8 @@ * NB: compared with check_res_locked(), checking this bit is cheaper. * Also, spin_is_locked() is deprecated for kernel code; one reason is * because it works only for SMP so user needs to add extra macros like - * LASSERT_SPIN_LOCKED for uniprocessor kernels. */ + * LASSERT_SPIN_LOCKED for uniprocessor kernels. + */ #define LDLM_FL_RES_LOCKED 0x0010000000000000ULL /* bit 52 */ #define ldlm_is_res_locked(_l) LDLM_TEST_FLAG((_l), 1ULL << 52) #define ldlm_set_res_locked(_l) LDLM_SET_FLAG((_l), 1ULL << 52) @@ -343,7 +359,8 @@ * It's set once we call ldlm_add_waiting_lock_res_locked() to start the * lock-timeout timer and it will never be reset. * - * Protected by lock and resource locks. */ + * Protected by lock and resource locks. + */ #define LDLM_FL_WAITED 0x0020000000000000ULL /* bit 53 */ #define ldlm_is_waited(_l) LDLM_TEST_FLAG((_l), 1ULL << 53) #define ldlm_set_waited(_l) LDLM_SET_FLAG((_l), 1ULL << 53) @@ -365,10 +382,10 @@ #define LDLM_TEST_FLAG(_l, _b) (((_l)->l_flags & (_b)) != 0) /** set a ldlm_lock flag bit */ -#define LDLM_SET_FLAG(_l, _b) (((_l)->l_flags |= (_b)) +#define LDLM_SET_FLAG(_l, _b) ((_l)->l_flags |= (_b)) /** clear a ldlm_lock flag bit */ -#define LDLM_CLEAR_FLAG(_l, _b) (((_l)->l_flags &= ~(_b)) +#define LDLM_CLEAR_FLAG(_l, _b) ((_l)->l_flags &= ~(_b)) /** Mask of flags inherited from parent lock when doing intents. */ #define LDLM_INHERIT_FLAGS LDLM_FL_INHERIT_MASK diff --git a/drivers/staging/lustre/lustre/include/lustre_export.h b/drivers/staging/lustre/lustre/include/lustre_export.h index 311e5aa9b0db..3014d27e6dc2 100644 --- a/drivers/staging/lustre/lustre/include/lustre_export.h +++ b/drivers/staging/lustre/lustre/include/lustre_export.h @@ -50,62 +50,6 @@ #include "lustre/lustre_idl.h" #include "lustre_dlm.h" -struct mds_client_data; -struct mdt_client_data; -struct mds_idmap_table; -struct mdt_idmap_table; - -/** - * Target-specific export data - */ -struct tg_export_data { - /** Protects led_lcd below */ - struct mutex ted_lcd_lock; - /** Per-client data for each export */ - struct lsd_client_data *ted_lcd; - /** Offset of record in last_rcvd file */ - loff_t ted_lr_off; - /** Client index in last_rcvd file */ - int ted_lr_idx; -}; - -/** - * MDT-specific export data - */ -struct mdt_export_data { - struct tg_export_data med_ted; - /** List of all files opened by client on this MDT */ - struct list_head med_open_head; - spinlock_t med_open_lock; /* med_open_head, mfd_list */ - /** Bitmask of all ibit locks this MDT understands */ - __u64 med_ibits_known; - struct mutex med_idmap_mutex; - struct lustre_idmap_table *med_idmap; -}; - -struct ec_export_data { /* echo client */ - struct list_head eced_locks; -}; - -/* In-memory access to client data from OST struct */ -/** Filter (oss-side) specific import data */ -struct filter_export_data { - struct tg_export_data fed_ted; - spinlock_t fed_lock; /**< protects fed_mod_list */ - long fed_dirty; /* in bytes */ - long fed_grant; /* in bytes */ - struct list_head fed_mod_list; /* files being modified */ - int fed_mod_count;/* items in fed_writing list */ - long fed_pending; /* bytes just being written */ - __u32 fed_group; - __u8 fed_pagesize; /* log2 of client page size */ -}; - -struct mgs_export_data { - struct list_head med_clients; /* mgc fs client via this exp */ - spinlock_t med_lock; /* protect med_clients */ -}; - enum obd_option { OBD_OPT_FORCE = 0x0001, OBD_OPT_FAILOVER = 0x0002, @@ -179,7 +123,8 @@ struct obd_export { */ spinlock_t exp_lock; /** Compatibility flags for this export are embedded into - * exp_connect_data */ + * exp_connect_data + */ struct obd_connect_data exp_connect_data; enum obd_option exp_flags; unsigned long exp_failed:1, @@ -200,22 +145,8 @@ struct obd_export { /** blocking dlm lock list, protected by exp_bl_list_lock */ struct list_head exp_bl_list; spinlock_t exp_bl_list_lock; - - /** Target specific data */ - union { - struct tg_export_data eu_target_data; - struct mdt_export_data eu_mdt_data; - struct filter_export_data eu_filter_data; - struct ec_export_data eu_ec_data; - struct mgs_export_data eu_mgs_data; - } u; }; -#define exp_target_data u.eu_target_data -#define exp_mdt_data u.eu_mdt_data -#define exp_filter_data u.eu_filter_data -#define exp_ec_data u.eu_ec_data - static inline __u64 *exp_connect_flags_ptr(struct obd_export *exp) { return &exp->exp_connect_data.ocd_connect_flags; @@ -228,7 +159,6 @@ static inline __u64 exp_connect_flags(struct obd_export *exp) static inline int exp_max_brw_size(struct obd_export *exp) { - LASSERT(exp != NULL); if (exp_connect_flags(exp) & OBD_CONNECT_BRW_SIZE) return exp->exp_connect_data.ocd_brw_size; @@ -242,19 +172,16 @@ static inline int exp_connect_multibulk(struct obd_export *exp) static inline int exp_connect_cancelset(struct obd_export *exp) { - LASSERT(exp != NULL); return !!(exp_connect_flags(exp) & OBD_CONNECT_CANCELSET); } static inline int exp_connect_lru_resize(struct obd_export *exp) { - LASSERT(exp != NULL); return !!(exp_connect_flags(exp) & OBD_CONNECT_LRU_RESIZE); } static inline int exp_connect_rmtclient(struct obd_export *exp) { - LASSERT(exp != NULL); return !!(exp_connect_flags(exp) & OBD_CONNECT_RMT_CLIENT); } @@ -268,14 +195,11 @@ static inline int client_is_remote(struct obd_export *exp) static inline int exp_connect_vbr(struct obd_export *exp) { - LASSERT(exp != NULL); - LASSERT(exp->exp_connection); return !!(exp_connect_flags(exp) & OBD_CONNECT_VBR); } static inline int exp_connect_som(struct obd_export *exp) { - LASSERT(exp != NULL); return !!(exp_connect_flags(exp) & OBD_CONNECT_SOM); } @@ -288,7 +212,6 @@ static inline int imp_connect_lru_resize(struct obd_import *imp) { struct obd_connect_data *ocd; - LASSERT(imp != NULL); ocd = &imp->imp_connect_data; return !!(ocd->ocd_connect_flags & OBD_CONNECT_LRU_RESIZE); } @@ -300,7 +223,6 @@ static inline int exp_connect_layout(struct obd_export *exp) static inline bool exp_connect_lvb_type(struct obd_export *exp) { - LASSERT(exp != NULL); if (exp_connect_flags(exp) & OBD_CONNECT_LVB_TYPE) return true; else @@ -311,7 +233,6 @@ static inline bool imp_connect_lvb_type(struct obd_import *imp) { struct obd_connect_data *ocd; - LASSERT(imp != NULL); ocd = &imp->imp_connect_data; if (ocd->ocd_connect_flags & OBD_CONNECT_LVB_TYPE) return true; @@ -331,13 +252,19 @@ static inline bool imp_connect_disp_stripe(struct obd_import *imp) { struct obd_connect_data *ocd; - LASSERT(imp != NULL); ocd = &imp->imp_connect_data; return ocd->ocd_connect_flags & OBD_CONNECT_DISP_STRIPE; } struct obd_export *class_conn2export(struct lustre_handle *conn); +#define KKUC_CT_DATA_MAGIC 0x092013cea +struct kkuc_ct_data { + __u32 kcd_magic; + struct obd_uuid kcd_uuid; + __u32 kcd_archive; +}; + /** @} export */ #endif /* __EXPORT_H */ diff --git a/drivers/staging/lustre/lustre/include/lustre_fid.h b/drivers/staging/lustre/lustre/include/lustre_fid.h index 9b1a9c695113..ab4a92390a43 100644 --- a/drivers/staging/lustre/lustre/include/lustre_fid.h +++ b/drivers/staging/lustre/lustre/include/lustre_fid.h @@ -251,7 +251,8 @@ static inline void lu_local_name_obj_fid(struct lu_fid *fid, __u32 oid) /* For new FS (>= 2.4), the root FID will be changed to * [FID_SEQ_ROOT:1:0], for existing FS, (upgraded to 2.4), - * the root FID will still be IGIF */ + * the root FID will still be IGIF + */ static inline int fid_is_root(const struct lu_fid *fid) { return unlikely((fid_seq(fid) == FID_SEQ_ROOT && @@ -294,7 +295,8 @@ static inline int fid_is_namespace_visible(const struct lu_fid *fid) const __u64 seq = fid_seq(fid); /* Here, we cannot distinguish whether the normal FID is for OST - * object or not. It is caller's duty to check more if needed. */ + * object or not. It is caller's duty to check more if needed. + */ return (!fid_is_last_id(fid) && (fid_seq_is_norm(seq) || fid_seq_is_igif(seq))) || fid_is_root(fid) || fid_is_dot_lustre(fid); @@ -433,7 +435,7 @@ fid_extract_from_res_name(struct lu_fid *fid, const struct ldlm_res_id *res) */ static inline struct ldlm_res_id * fid_build_quota_res_name(const struct lu_fid *glb_fid, union lquota_id *qid, - struct ldlm_res_id *res) + struct ldlm_res_id *res) { fid_build_reg_res_name(glb_fid, res); res->name[LUSTRE_RES_ID_QUOTA_SEQ_OFF] = fid_seq(&qid->qid_fid); @@ -516,7 +518,8 @@ static inline int ostid_res_name_eq(struct ost_id *oi, struct ldlm_res_id *name) { /* Note: it is just a trick here to save some effort, probably the - * correct way would be turn them into the FID and compare */ + * correct way would be turn them into the FID and compare + */ if (fid_seq_is_mdt0(ostid_seq(oi))) { return name->name[LUSTRE_RES_ID_SEQ_OFF] == ostid_id(oi) && name->name[LUSTRE_RES_ID_VER_OID_OFF] == ostid_seq(oi); @@ -589,12 +592,14 @@ static inline __u64 fid_flatten(const struct lu_fid *fid) static inline __u32 fid_hash(const struct lu_fid *f, int bits) { /* all objects with same id and different versions will belong to same - * collisions list. */ + * collisions list. + */ return hash_long(fid_flatten(f), bits); } /** - * map fid to 32 bit value for ino on 32bit systems. */ + * map fid to 32 bit value for ino on 32bit systems. + */ static inline __u32 fid_flatten32(const struct lu_fid *fid) { __u32 ino; @@ -611,7 +616,8 @@ static inline __u32 fid_flatten32(const struct lu_fid *fid) * that inodes generated at about the same time have a reduced chance * of collisions. This will give a period of 2^12 = 1024 unique clients * (from SEQ) and up to min(LUSTRE_SEQ_MAX_WIDTH, 2^20) = 128k objects - * (from OID), or up to 128M inodes without collisions for new files. */ + * (from OID), or up to 128M inodes without collisions for new files. + */ ino = ((seq & 0x000fffffULL) << 12) + ((seq >> 8) & 0xfffff000) + (seq >> (64 - (40-8)) & 0xffffff00) + (fid_oid(fid) & 0xff000fff) + ((fid_oid(fid) & 0x00fff000) << 8); diff --git a/drivers/staging/lustre/lustre/include/lustre_fld.h b/drivers/staging/lustre/lustre/include/lustre_fld.h index 551162624974..4cf2b0e61672 100644 --- a/drivers/staging/lustre/lustre/include/lustre_fld.h +++ b/drivers/staging/lustre/lustre/include/lustre_fld.h @@ -71,50 +71,41 @@ struct lu_fld_target { struct lu_server_fld { /** * super sequence controller export, needed to forward fld - * lookup request. */ + * lookup request. + */ struct obd_export *lsf_control_exp; - /** - * Client FLD cache. */ + /** Client FLD cache. */ struct fld_cache *lsf_cache; - /** - * Protect index modifications */ + /** Protect index modifications */ struct mutex lsf_lock; - /** - * Fld service name in form "fld-srv-lustre-MDTXXX" */ + /** Fld service name in form "fld-srv-lustre-MDTXXX" */ char lsf_name[LUSTRE_MDT_MAXNAMELEN]; }; struct lu_client_fld { - /** - * Client side debugfs entry. */ + /** Client side debugfs entry. */ struct dentry *lcf_debugfs_entry; - /** - * List of exports client FLD knows about. */ + /** List of exports client FLD knows about. */ struct list_head lcf_targets; - /** - * Current hash to be used to chose an export. */ + /** Current hash to be used to chose an export. */ struct lu_fld_hash *lcf_hash; - /** - * Exports count. */ + /** Exports count. */ int lcf_count; - /** - * Lock protecting exports list and fld_hash. */ + /** Lock protecting exports list and fld_hash. */ spinlock_t lcf_lock; - /** - * Client FLD cache. */ + /** Client FLD cache. */ struct fld_cache *lcf_cache; - /** - * Client fld debugfs entry name. */ + /** Client fld debugfs entry name. */ char lcf_name[LUSTRE_MDT_MAXNAMELEN]; int lcf_flags; diff --git a/drivers/staging/lustre/lustre/include/lustre_handles.h b/drivers/staging/lustre/lustre/include/lustre_handles.h index f39780ae4c8a..27f169d2ed34 100644 --- a/drivers/staging/lustre/lustre/include/lustre_handles.h +++ b/drivers/staging/lustre/lustre/include/lustre_handles.h @@ -65,7 +65,8 @@ struct portals_handle_ops { * * Now you're able to assign the results of cookie2handle directly to an * ldlm_lock. If it's not at the top, you'll want to use container_of() - * to compute the start of the structure based on the handle field. */ + * to compute the start of the structure based on the handle field. + */ struct portals_handle { struct list_head h_link; __u64 h_cookie; diff --git a/drivers/staging/lustre/lustre/include/lustre_import.h b/drivers/staging/lustre/lustre/include/lustre_import.h index 4e4230e94c11..dac2d84d8266 100644 --- a/drivers/staging/lustre/lustre/include/lustre_import.h +++ b/drivers/staging/lustre/lustre/include/lustre_import.h @@ -292,7 +292,8 @@ struct obd_import { /* need IR MNE swab */ imp_need_mne_swab:1, /* import must be reconnected instead of - * chose new connection */ + * chosing new connection + */ imp_force_reconnect:1, /* import has tried to connect with server */ imp_connect_tried:1; diff --git a/drivers/staging/lustre/lustre/include/lustre_kernelcomm.h b/drivers/staging/lustre/lustre/include/lustre_kernelcomm.h new file mode 100644 index 000000000000..970610b6de89 --- /dev/null +++ b/drivers/staging/lustre/lustre/include/lustre_kernelcomm.h @@ -0,0 +1,55 @@ +/* + * GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.gnu.org/licenses/gpl-2.0.html + * + * GPL HEADER END + */ +/* + * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. + * Use is subject to license terms. + * + * Copyright (c) 2013 Intel Corporation. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * + * Author: Nathan Rutman <nathan.rutman@sun.com> + * + * Kernel <-> userspace communication routines. + * The definitions below are used in the kernel and userspace. + */ + +#ifndef __LUSTRE_KERNELCOMM_H__ +#define __LUSTRE_KERNELCOMM_H__ + +/* For declarations shared with userspace */ +#include "uapi_kernelcomm.h" + +/* prototype for callback function on kuc groups */ +typedef int (*libcfs_kkuc_cb_t)(void *data, void *cb_arg); + +/* Kernel methods */ +int libcfs_kkuc_msg_put(struct file *fp, void *payload); +int libcfs_kkuc_group_put(unsigned int group, void *payload); +int libcfs_kkuc_group_add(struct file *fp, int uid, unsigned int group, + void *data, size_t data_len); +int libcfs_kkuc_group_rem(int uid, unsigned int group); +int libcfs_kkuc_group_foreach(unsigned int group, libcfs_kkuc_cb_t cb_func, + void *cb_arg); + +#endif /* __LUSTRE_KERNELCOMM_H__ */ diff --git a/drivers/staging/lustre/lustre/include/lustre_lib.h b/drivers/staging/lustre/lustre/include/lustre_lib.h index 428469fec534..f2223d55850a 100644 --- a/drivers/staging/lustre/lustre/include/lustre_lib.h +++ b/drivers/staging/lustre/lustre/include/lustre_lib.h @@ -153,9 +153,9 @@ struct obd_ioctl_data { /* buffers the kernel will treat as user pointers */ __u32 ioc_plen1; - char *ioc_pbuf1; + void __user *ioc_pbuf1; __u32 ioc_plen2; - char *ioc_pbuf2; + void __user *ioc_pbuf2; /* inline buffers for various arguments */ __u32 ioc_inllen1; @@ -252,8 +252,8 @@ static inline int obd_ioctl_is_invalid(struct obd_ioctl_data *data) #include "obd_support.h" /* function defined in lustre/obdclass/<platform>/<platform>-module.c */ -int obd_ioctl_getdata(char **buf, int *len, void *arg); -int obd_ioctl_popdata(void *arg, void *data, int len); +int obd_ioctl_getdata(char **buf, int *len, void __user *arg); +int obd_ioctl_popdata(void __user *arg, void *data, int len); static inline void obd_ioctl_freedata(char *buf, int len) { @@ -365,10 +365,10 @@ static inline void obd_ioctl_freedata(char *buf, int len) /* OBD_IOC_LLOG_CATINFO is deprecated */ #define OBD_IOC_LLOG_CATINFO _IOWR('f', 196, OBD_IOC_DATA_TYPE) -#define ECHO_IOC_GET_STRIPE _IOWR('f', 200, OBD_IOC_DATA_TYPE) -#define ECHO_IOC_SET_STRIPE _IOWR('f', 201, OBD_IOC_DATA_TYPE) -#define ECHO_IOC_ENQUEUE _IOWR('f', 202, OBD_IOC_DATA_TYPE) -#define ECHO_IOC_CANCEL _IOWR('f', 203, OBD_IOC_DATA_TYPE) +/* #define ECHO_IOC_GET_STRIPE _IOWR('f', 200, OBD_IOC_DATA_TYPE) */ +/* #define ECHO_IOC_SET_STRIPE _IOWR('f', 201, OBD_IOC_DATA_TYPE) */ +/* #define ECHO_IOC_ENQUEUE _IOWR('f', 202, OBD_IOC_DATA_TYPE) */ +/* #define ECHO_IOC_CANCEL _IOWR('f', 203, OBD_IOC_DATA_TYPE) */ #define OBD_IOC_GET_OBJ_VERSION _IOR('f', 210, OBD_IOC_DATA_TYPE) @@ -387,7 +387,8 @@ static inline void obd_ioctl_freedata(char *buf, int len) */ /* Until such time as we get_info the per-stripe maximum from the OST, - * we define this to be 2T - 4k, which is the ext3 maxbytes. */ + * we define this to be 2T - 4k, which is the ext3 maxbytes. + */ #define LUSTRE_STRIPE_MAXBYTES 0x1fffffff000ULL /* Special values for remove LOV EA from disk */ @@ -540,7 +541,7 @@ do { \ l_add_wait(&wq, &__wait); \ \ /* Block all signals (just the non-fatal ones if no timeout). */ \ - if (info->lwi_on_signal != NULL && (__timeout == 0 || __allow_intr)) \ + if (info->lwi_on_signal && (__timeout == 0 || __allow_intr)) \ __blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS); \ else \ __blocked = cfs_block_sigsinv(0); \ @@ -562,13 +563,13 @@ do { \ __timeout = cfs_time_sub(__timeout, \ cfs_time_sub(interval, remaining));\ if (__timeout == 0) { \ - if (info->lwi_on_timeout == NULL || \ + if (!info->lwi_on_timeout || \ info->lwi_on_timeout(info->lwi_cb_data)) { \ ret = -ETIMEDOUT; \ break; \ } \ /* Take signals after the timeout expires. */ \ - if (info->lwi_on_signal != NULL) \ + if (info->lwi_on_signal) \ (void)cfs_block_sigsinv(LUSTRE_FATAL_SIGS);\ } \ } \ @@ -578,7 +579,7 @@ do { \ if (condition) \ break; \ if (cfs_signal_pending()) { \ - if (info->lwi_on_signal != NULL && \ + if (info->lwi_on_signal && \ (__timeout == 0 || __allow_intr)) { \ if (info->lwi_on_signal != LWI_ON_SIGNAL_NOOP) \ info->lwi_on_signal(info->lwi_cb_data);\ diff --git a/drivers/staging/lustre/lustre/include/lustre_lite.h b/drivers/staging/lustre/lustre/include/lustre_lite.h index f6d7aae3a0b8..fcc5ebbceed8 100644 --- a/drivers/staging/lustre/lustre/include/lustre_lite.h +++ b/drivers/staging/lustre/lustre/include/lustre_lite.h @@ -53,56 +53,8 @@ #define LL_MAX_BLKSIZE_BITS (22) #define LL_MAX_BLKSIZE (1UL<<LL_MAX_BLKSIZE_BITS) -#include "lustre/lustre_user.h" - -struct lustre_rw_params { - int lrp_lock_mode; - ldlm_policy_data_t lrp_policy; - u32 lrp_brw_flags; - int lrp_ast_flags; -}; - -/* - * XXX nikita: this function lives in the header because it is used by both - * llite kernel module and liblustre library, and there is no (?) better place - * to put it in. - */ -static inline void lustre_build_lock_params(int cmd, unsigned long open_flags, - __u64 connect_flags, - loff_t pos, ssize_t len, - struct lustre_rw_params *params) -{ - params->lrp_lock_mode = (cmd == OBD_BRW_READ) ? LCK_PR : LCK_PW; - params->lrp_brw_flags = 0; - - params->lrp_policy.l_extent.start = pos; - params->lrp_policy.l_extent.end = pos + len - 1; - /* - * for now O_APPEND always takes local locks. - */ - if (cmd == OBD_BRW_WRITE && (open_flags & O_APPEND)) { - params->lrp_policy.l_extent.start = 0; - params->lrp_policy.l_extent.end = OBD_OBJECT_EOF; - } else if (LIBLUSTRE_CLIENT && (connect_flags & OBD_CONNECT_SRVLOCK)) { - /* - * liblustre: OST-side locking for all non-O_APPEND - * reads/writes. - */ - params->lrp_lock_mode = LCK_NL; - params->lrp_brw_flags = OBD_BRW_SRVLOCK; - } else { - /* - * nothing special for the kernel. In the future llite may use - * OST-side locks for small writes into highly contended - * files. - */ - } - params->lrp_ast_flags = (open_flags & O_NONBLOCK) ? - LDLM_FL_BLOCK_NOWAIT : 0; -} - /* - * This is embedded into liblustre and llite super-blocks to keep track of + * This is embedded into llite super-blocks to keep track of * connect flags (capabilities) supported by all imports given mount is * connected to. */ diff --git a/drivers/staging/lustre/lustre/include/lustre_log.h b/drivers/staging/lustre/lustre/include/lustre_log.h index e4fc8b5e1336..49618e186824 100644 --- a/drivers/staging/lustre/lustre/include/lustre_log.h +++ b/drivers/staging/lustre/lustre/include/lustre_log.h @@ -241,7 +241,8 @@ struct llog_ctxt { struct obd_llog_group *loc_olg; /* group containing that ctxt */ struct obd_export *loc_exp; /* parent "disk" export (e.g. MDS) */ struct obd_import *loc_imp; /* to use in RPC's: can be backward - pointing import */ + * pointing import + */ struct llog_operations *loc_logops; struct llog_handle *loc_handle; struct mutex loc_mutex; /* protect loc_imp */ @@ -255,7 +256,7 @@ struct llog_ctxt { static inline int llog_handle2ops(struct llog_handle *loghandle, struct llog_operations **lop) { - if (loghandle == NULL || loghandle->lgh_logops == NULL) + if (!loghandle || !loghandle->lgh_logops) return -EINVAL; *lop = loghandle->lgh_logops; @@ -272,7 +273,7 @@ static inline struct llog_ctxt *llog_ctxt_get(struct llog_ctxt *ctxt) static inline void llog_ctxt_put(struct llog_ctxt *ctxt) { - if (ctxt == NULL) + if (!ctxt) return; LASSERT_ATOMIC_GT_LT(&ctxt->loc_refcount, 0, LI_POISON); CDEBUG(D_INFO, "PUTting ctxt %p : new refcount %d\n", ctxt, @@ -294,7 +295,7 @@ static inline int llog_group_set_ctxt(struct obd_llog_group *olg, LASSERT(index >= 0 && index < LLOG_MAX_CTXTS); spin_lock(&olg->olg_lock); - if (olg->olg_ctxts[index] != NULL) { + if (olg->olg_ctxts[index]) { spin_unlock(&olg->olg_lock); return -EEXIST; } @@ -311,7 +312,7 @@ static inline struct llog_ctxt *llog_group_get_ctxt(struct obd_llog_group *olg, LASSERT(index >= 0 && index < LLOG_MAX_CTXTS); spin_lock(&olg->olg_lock); - if (olg->olg_ctxts[index] == NULL) + if (!olg->olg_ctxts[index]) ctxt = NULL; else ctxt = llog_ctxt_get(olg->olg_ctxts[index]); @@ -335,7 +336,7 @@ static inline struct llog_ctxt *llog_get_context(struct obd_device *obd, static inline int llog_group_ctxt_null(struct obd_llog_group *olg, int index) { - return (olg->olg_ctxts[index] == NULL); + return (!olg->olg_ctxts[index]); } static inline int llog_ctxt_null(struct obd_device *obd, int index) @@ -354,7 +355,7 @@ static inline int llog_next_block(const struct lu_env *env, rc = llog_handle2ops(loghandle, &lop); if (rc) return rc; - if (lop->lop_next_block == NULL) + if (!lop->lop_next_block) return -EOPNOTSUPP; rc = lop->lop_next_block(env, loghandle, cur_idx, next_idx, diff --git a/drivers/staging/lustre/lustre/include/lustre_mdc.h b/drivers/staging/lustre/lustre/include/lustre_mdc.h index 3da373315856..df94f9f3bef2 100644 --- a/drivers/staging/lustre/lustre/include/lustre_mdc.h +++ b/drivers/staging/lustre/lustre/include/lustre_mdc.h @@ -81,8 +81,8 @@ static inline void mdc_init_rpc_lock(struct mdc_rpc_lock *lck) static inline void mdc_get_rpc_lock(struct mdc_rpc_lock *lck, struct lookup_intent *it) { - if (it != NULL && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP || - it->it_op == IT_LAYOUT)) + if (it && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP || + it->it_op == IT_LAYOUT)) return; /* This would normally block until the existing request finishes. @@ -90,7 +90,8 @@ static inline void mdc_get_rpc_lock(struct mdc_rpc_lock *lck, * done, then set rpcl_it to MDC_FAKE_RPCL_IT. Once that is set * it will only be cleared when all fake requests are finished. * Only when all fake requests are finished can normal requests - * be sent, to ensure they are recoverable again. */ + * be sent, to ensure they are recoverable again. + */ again: mutex_lock(&lck->rpcl_mutex); @@ -105,22 +106,23 @@ static inline void mdc_get_rpc_lock(struct mdc_rpc_lock *lck, * just turned off but there are still requests in progress. * Wait until they finish. It doesn't need to be efficient * in this extremely rare case, just have low overhead in - * the common case when it isn't true. */ + * the common case when it isn't true. + */ while (unlikely(lck->rpcl_it == MDC_FAKE_RPCL_IT)) { mutex_unlock(&lck->rpcl_mutex); schedule_timeout(cfs_time_seconds(1) / 4); goto again; } - LASSERT(lck->rpcl_it == NULL); + LASSERT(!lck->rpcl_it); lck->rpcl_it = it; } static inline void mdc_put_rpc_lock(struct mdc_rpc_lock *lck, struct lookup_intent *it) { - if (it != NULL && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP || - it->it_op == IT_LAYOUT)) + if (it && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP || + it->it_op == IT_LAYOUT)) return; if (lck->rpcl_it == MDC_FAKE_RPCL_IT) { /* OBD_FAIL_MDC_RPCS_SEM */ diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h index d834ddd8183b..4fa1a18b7d15 100644 --- a/drivers/staging/lustre/lustre/include/lustre_net.h +++ b/drivers/staging/lustre/lustre/include/lustre_net.h @@ -76,7 +76,8 @@ * In order for the client and server to properly negotiate the maximum * possible transfer size, PTLRPC_BULK_OPS_COUNT must be a power-of-two * value. The client is free to limit the actual RPC size for any bulk - * transfer via cl_max_pages_per_rpc to some non-power-of-two value. */ + * transfer via cl_max_pages_per_rpc to some non-power-of-two value. + */ #define PTLRPC_BULK_OPS_BITS 2 #define PTLRPC_BULK_OPS_COUNT (1U << PTLRPC_BULK_OPS_BITS) /** @@ -85,7 +86,8 @@ * protocol limitation on the maximum RPC size that can be used by any * RPC sent to that server in the future. Instead, the server should * use the negotiated per-client ocd_brw_size to determine the bulk - * RPC count. */ + * RPC count. + */ #define PTLRPC_BULK_OPS_MASK (~((__u64)PTLRPC_BULK_OPS_COUNT - 1)) /** @@ -419,16 +421,18 @@ struct ptlrpc_reply_state { /** A spinlock to protect the reply state flags */ spinlock_t rs_lock; /** Reply state flags */ - unsigned long rs_difficult:1; /* ACK/commit stuff */ + unsigned long rs_difficult:1; /* ACK/commit stuff */ unsigned long rs_no_ack:1; /* no ACK, even for - difficult requests */ + * difficult requests + */ unsigned long rs_scheduled:1; /* being handled? */ unsigned long rs_scheduled_ever:1;/* any schedule attempts? */ unsigned long rs_handled:1; /* been handled yet? */ unsigned long rs_on_net:1; /* reply_out_callback pending? */ unsigned long rs_prealloc:1; /* rs from prealloc list */ unsigned long rs_committed:1;/* the transaction was committed - * and the rs was dispatched */ + * and the rs was dispatched + */ /** Size of the state */ int rs_size; /** opcode */ @@ -463,7 +467,7 @@ struct ptlrpc_reply_state { /** Handles of locks awaiting client reply ACK */ struct lustre_handle rs_locks[RS_MAX_LOCKS]; /** Lock modes of locks in \a rs_locks */ - ldlm_mode_t rs_modes[RS_MAX_LOCKS]; + enum ldlm_mode rs_modes[RS_MAX_LOCKS]; }; struct ptlrpc_thread; @@ -1181,7 +1185,7 @@ struct nrs_fifo_req { * purpose of this object is to hold references to the request's resources * for the lifetime of the request, and to hold properties that policies use * use for determining the request's scheduling priority. - * */ + */ struct ptlrpc_nrs_request { /** * The request's resource hierarchy. @@ -1321,15 +1325,17 @@ struct ptlrpc_request { /* do not resend request on -EINPROGRESS */ rq_no_retry_einprogress:1, /* allow the req to be sent if the import is in recovery - * status */ + * status + */ rq_allow_replay:1; unsigned int rq_nr_resend; enum rq_phase rq_phase; /* one of RQ_PHASE_* */ enum rq_phase rq_next_phase; /* one of RQ_PHASE_* to be used next */ - atomic_t rq_refcount;/* client-side refcount for SENT race, - server-side refcount for multiple replies */ + atomic_t rq_refcount; /* client-side refcount for SENT race, + * server-side refcount for multiple replies + */ /** Portal to which this request would be sent */ short rq_request_portal; /* XXX FIXME bug 249 */ @@ -1363,7 +1369,8 @@ struct ptlrpc_request { /** * security and encryption data - * @{ */ + * @{ + */ struct ptlrpc_cli_ctx *rq_cli_ctx; /**< client's half ctx */ struct ptlrpc_svc_ctx *rq_svc_ctx; /**< server's half ctx */ struct list_head rq_ctx_chain; /**< link to waited ctx */ @@ -1477,7 +1484,8 @@ struct ptlrpc_request { /** when request must finish. volatile * so that servers' early reply updates to the deadline aren't - * kept in per-cpu cache */ + * kept in per-cpu cache + */ volatile time64_t rq_deadline; /** when req reply unlink must finish. */ time64_t rq_reply_deadline; @@ -1518,7 +1526,7 @@ struct ptlrpc_request { static inline int ptlrpc_req_interpret(const struct lu_env *env, struct ptlrpc_request *req, int rc) { - if (req->rq_interpret_reply != NULL) { + if (req->rq_interpret_reply) { req->rq_status = req->rq_interpret_reply(env, req, &req->rq_async_args, rc); @@ -1678,7 +1686,8 @@ do { \ /** * This is the debug print function you need to use to print request structure * content into lustre debug log. - * for most callers (level is a constant) this is resolved at compile time */ + * for most callers (level is a constant) this is resolved at compile time + */ #define DEBUG_REQ(level, req, fmt, args...) \ do { \ if ((level) & (D_ERROR | D_WARNING)) { \ @@ -1947,7 +1956,7 @@ struct ptlrpc_service_ops { * or general metadata service for MDS. */ struct ptlrpc_service { - /** serialize /proc operations */ + /** serialize sysfs operations */ spinlock_t srv_lock; /** most often accessed fields */ /** chain thru all services */ @@ -2101,7 +2110,8 @@ struct ptlrpc_service_part { /** NRS head for regular requests */ struct ptlrpc_nrs scp_nrs_reg; /** NRS head for HP requests; this is only valid for services that can - * handle HP requests */ + * handle HP requests + */ struct ptlrpc_nrs *scp_nrs_hp; /** AT stuff */ @@ -2141,8 +2151,8 @@ struct ptlrpc_service_part { #define ptlrpc_service_for_each_part(part, i, svc) \ for (i = 0; \ i < (svc)->srv_ncpts && \ - (svc)->srv_parts != NULL && \ - ((part) = (svc)->srv_parts[i]) != NULL; i++) + (svc)->srv_parts && \ + ((part) = (svc)->srv_parts[i]); i++) /** * Declaration of ptlrpcd control structure @@ -2259,7 +2269,6 @@ static inline bool nrs_policy_compat_all(const struct ptlrpc_service *svc, static inline bool nrs_policy_compat_one(const struct ptlrpc_service *svc, const struct ptlrpc_nrs_pol_desc *desc) { - LASSERT(desc->pd_compat_svc_name != NULL); return strcmp(svc->srv_name, desc->pd_compat_svc_name) == 0; } @@ -2303,7 +2312,6 @@ static inline int ptlrpc_client_bulk_active(struct ptlrpc_request *req) struct ptlrpc_bulk_desc *desc; int rc; - LASSERT(req != NULL); desc = req->rq_bulk; if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) && @@ -2374,14 +2382,14 @@ void ptlrpc_at_set_req_timeout(struct ptlrpc_request *req); struct ptlrpc_request *ptlrpc_request_alloc(struct obd_import *imp, const struct req_format *format); struct ptlrpc_request *ptlrpc_request_alloc_pool(struct obd_import *imp, - struct ptlrpc_request_pool *, - const struct req_format *format); + struct ptlrpc_request_pool *, + const struct req_format *); void ptlrpc_request_free(struct ptlrpc_request *request); int ptlrpc_request_pack(struct ptlrpc_request *request, __u32 version, int opcode); -struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *imp, - const struct req_format *format, - __u32 version, int opcode); +struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *, + const struct req_format *, + __u32, int); int ptlrpc_request_bufs_pack(struct ptlrpc_request *request, __u32 version, int opcode, char **bufs, struct ptlrpc_cli_ctx *ctx); @@ -2462,7 +2470,8 @@ struct ptlrpc_service_thr_conf { /* "soft" limit for total threads number */ unsigned int tc_nthrs_max; /* user specified threads number, it will be validated due to - * other members of this structure. */ + * other members of this structure. + */ unsigned int tc_nthrs_user; /* set NUMA node affinity for service threads */ unsigned int tc_cpu_affinity; @@ -2500,14 +2509,12 @@ struct ptlrpc_service_conf { */ void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs); void ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs); -struct ptlrpc_service *ptlrpc_register_service( - struct ptlrpc_service_conf *conf, - struct kset *parent, - struct dentry *debugfs_entry); +struct ptlrpc_service *ptlrpc_register_service(struct ptlrpc_service_conf *conf, + struct kset *parent, + struct dentry *debugfs_entry); int ptlrpc_start_threads(struct ptlrpc_service *svc); int ptlrpc_unregister_service(struct ptlrpc_service *service); -int liblustre_check_services(void *arg); int ptlrpc_hr_init(void); void ptlrpc_hr_fini(void); @@ -2536,7 +2543,7 @@ int ptlrpc_reconnect_import(struct obd_import *imp); int ptlrpc_buf_need_swab(struct ptlrpc_request *req, const int inout, int index); void ptlrpc_buf_set_swabbed(struct ptlrpc_request *req, const int inout, - int index); + int index); int ptlrpc_unpack_rep_msg(struct ptlrpc_request *req, int len); int ptlrpc_unpack_req_msg(struct ptlrpc_request *req, int len); @@ -2726,7 +2733,7 @@ ptlrpc_client_recv_or_unlink(struct ptlrpc_request *req) static inline void ptlrpc_client_wake_req(struct ptlrpc_request *req) { - if (req->rq_set == NULL) + if (!req->rq_set) wake_up(&req->rq_reply_waitq); else wake_up(&req->rq_set->set_waitq); @@ -2750,7 +2757,7 @@ ptlrpc_rs_decref(struct ptlrpc_reply_state *rs) /* Should only be called once per req */ static inline void ptlrpc_req_drop_rs(struct ptlrpc_request *req) { - if (req->rq_reply_state == NULL) + if (!req->rq_reply_state) return; /* shouldn't occur */ ptlrpc_rs_decref(req->rq_reply_state); req->rq_reply_state = NULL; @@ -2807,7 +2814,6 @@ ptlrpc_server_get_timeout(struct ptlrpc_service_part *svcpt) static inline struct ptlrpc_service * ptlrpc_req2svc(struct ptlrpc_request *req) { - LASSERT(req->rq_rqbd != NULL); return req->rq_rqbd->rqbd_svcpt->scp_service; } diff --git a/drivers/staging/lustre/lustre/include/lustre_req_layout.h b/drivers/staging/lustre/lustre/include/lustre_req_layout.h index 46a662f89322..b2e67fcf9ef1 100644 --- a/drivers/staging/lustre/lustre/include/lustre_req_layout.h +++ b/drivers/staging/lustre/lustre/include/lustre_req_layout.h @@ -107,8 +107,8 @@ void req_capsule_set_size(struct req_capsule *pill, const struct req_msg_field *field, enum req_location loc, int size); int req_capsule_get_size(const struct req_capsule *pill, - const struct req_msg_field *field, - enum req_location loc); + const struct req_msg_field *field, + enum req_location loc); int req_capsule_msg_size(struct req_capsule *pill, enum req_location loc); int req_capsule_fmt_size(__u32 magic, const struct req_format *fmt, enum req_location loc); @@ -130,7 +130,6 @@ void req_layout_fini(void); extern struct req_format RQF_OBD_PING; extern struct req_format RQF_OBD_SET_INFO; extern struct req_format RQF_SEC_CTX; -extern struct req_format RQF_OBD_IDX_READ; /* MGS req_format */ extern struct req_format RQF_MGS_TARGET_REG; extern struct req_format RQF_MGS_SET_INFO; @@ -146,7 +145,6 @@ extern struct req_format RQF_MDS_GETSTATUS; extern struct req_format RQF_MDS_SYNC; extern struct req_format RQF_MDS_GETXATTR; extern struct req_format RQF_MDS_GETATTR; -extern struct req_format RQF_UPDATE_OBJ; /* * This is format of direct (non-intent) MDS_GETATTR_NAME request. @@ -177,7 +175,6 @@ extern struct req_format RQF_MDS_REINT_SETXATTR; extern struct req_format RQF_MDS_QUOTACHECK; extern struct req_format RQF_MDS_QUOTACTL; extern struct req_format RQF_QC_CALLBACK; -extern struct req_format RQF_QUOTA_DQACQ; extern struct req_format RQF_MDS_SWAP_LAYOUTS; /* MDS hsm formats */ extern struct req_format RQF_MDS_HSM_STATE_GET; @@ -220,7 +217,6 @@ extern struct req_format RQF_LDLM_INTENT_OPEN; extern struct req_format RQF_LDLM_INTENT_CREATE; extern struct req_format RQF_LDLM_INTENT_UNLINK; extern struct req_format RQF_LDLM_INTENT_GETXATTR; -extern struct req_format RQF_LDLM_INTENT_QUOTA; extern struct req_format RQF_LDLM_CANCEL; extern struct req_format RQF_LDLM_CALLBACK; extern struct req_format RQF_LDLM_CP_CALLBACK; @@ -252,7 +248,6 @@ extern struct req_msg_field RMF_SETINFO_KEY; extern struct req_msg_field RMF_GETINFO_VAL; extern struct req_msg_field RMF_GETINFO_VALLEN; extern struct req_msg_field RMF_GETINFO_KEY; -extern struct req_msg_field RMF_IDX_INFO; extern struct req_msg_field RMF_CLOSE_DATA; /* @@ -277,7 +272,6 @@ extern struct req_msg_field RMF_CAPA1; extern struct req_msg_field RMF_CAPA2; extern struct req_msg_field RMF_OBD_QUOTACHECK; extern struct req_msg_field RMF_OBD_QUOTACTL; -extern struct req_msg_field RMF_QUOTA_BODY; extern struct req_msg_field RMF_STRING; extern struct req_msg_field RMF_SWAP_LAYOUTS; extern struct req_msg_field RMF_MDS_HSM_PROGRESS; @@ -322,9 +316,6 @@ extern struct req_msg_field RMF_MGS_CONFIG_RES; /* generic uint32 */ extern struct req_msg_field RMF_U32; -/* OBJ update format */ -extern struct req_msg_field RMF_UPDATE; -extern struct req_msg_field RMF_UPDATE_REPLY; /** @} req_layout */ #endif /* _LUSTRE_REQ_LAYOUT_H__ */ diff --git a/drivers/staging/lustre/lustre/include/lustre_sec.h b/drivers/staging/lustre/lustre/include/lustre_sec.h index dd1033be6bfa..01b4e6726a68 100644 --- a/drivers/staging/lustre/lustre/include/lustre_sec.h +++ b/drivers/staging/lustre/lustre/include/lustre_sec.h @@ -351,26 +351,23 @@ struct ptlrpc_ctx_ops { /** * To determine whether it's suitable to use the \a ctx for \a vcred. */ - int (*match) (struct ptlrpc_cli_ctx *ctx, - struct vfs_cred *vcred); + int (*match)(struct ptlrpc_cli_ctx *ctx, struct vfs_cred *vcred); /** * To bring the \a ctx uptodate. */ - int (*refresh) (struct ptlrpc_cli_ctx *ctx); + int (*refresh)(struct ptlrpc_cli_ctx *ctx); /** * Validate the \a ctx. */ - int (*validate) (struct ptlrpc_cli_ctx *ctx); + int (*validate)(struct ptlrpc_cli_ctx *ctx); /** * Force the \a ctx to die. */ - void (*force_die) (struct ptlrpc_cli_ctx *ctx, - int grace); - int (*display) (struct ptlrpc_cli_ctx *ctx, - char *buf, int bufsize); + void (*force_die)(struct ptlrpc_cli_ctx *ctx, int grace); + int (*display)(struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize); /** * Sign the request message using \a ctx. @@ -382,8 +379,7 @@ struct ptlrpc_ctx_ops { * * \see null_ctx_sign(), plain_ctx_sign(), gss_cli_ctx_sign(). */ - int (*sign) (struct ptlrpc_cli_ctx *ctx, - struct ptlrpc_request *req); + int (*sign)(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req); /** * Verify the reply message using \a ctx. @@ -395,8 +391,7 @@ struct ptlrpc_ctx_ops { * * \see null_ctx_verify(), plain_ctx_verify(), gss_cli_ctx_verify(). */ - int (*verify) (struct ptlrpc_cli_ctx *ctx, - struct ptlrpc_request *req); + int (*verify)(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req); /** * Encrypt the request message using \a ctx. @@ -408,8 +403,7 @@ struct ptlrpc_ctx_ops { * * \see gss_cli_ctx_seal(). */ - int (*seal) (struct ptlrpc_cli_ctx *ctx, - struct ptlrpc_request *req); + int (*seal)(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req); /** * Decrypt the reply message using \a ctx. @@ -421,8 +415,7 @@ struct ptlrpc_ctx_ops { * * \see gss_cli_ctx_unseal(). */ - int (*unseal) (struct ptlrpc_cli_ctx *ctx, - struct ptlrpc_request *req); + int (*unseal)(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req); /** * Wrap bulk request data. This is called before wrapping RPC @@ -444,9 +437,9 @@ struct ptlrpc_ctx_ops { * * \see plain_cli_wrap_bulk(), gss_cli_ctx_wrap_bulk(). */ - int (*wrap_bulk) (struct ptlrpc_cli_ctx *ctx, - struct ptlrpc_request *req, - struct ptlrpc_bulk_desc *desc); + int (*wrap_bulk)(struct ptlrpc_cli_ctx *ctx, + struct ptlrpc_request *req, + struct ptlrpc_bulk_desc *desc); /** * Unwrap bulk reply data. This is called after wrapping RPC @@ -461,9 +454,9 @@ struct ptlrpc_ctx_ops { * * \see plain_cli_unwrap_bulk(), gss_cli_ctx_unwrap_bulk(). */ - int (*unwrap_bulk) (struct ptlrpc_cli_ctx *ctx, - struct ptlrpc_request *req, - struct ptlrpc_bulk_desc *desc); + int (*unwrap_bulk)(struct ptlrpc_cli_ctx *ctx, + struct ptlrpc_request *req, + struct ptlrpc_bulk_desc *desc); }; #define PTLRPC_CTX_NEW_BIT (0) /* newly created */ @@ -515,9 +508,9 @@ struct ptlrpc_sec_cops { * * \see null_create_sec(), plain_create_sec(), gss_sec_create_kr(). */ - struct ptlrpc_sec * (*create_sec) (struct obd_import *imp, - struct ptlrpc_svc_ctx *ctx, - struct sptlrpc_flavor *flavor); + struct ptlrpc_sec *(*create_sec)(struct obd_import *imp, + struct ptlrpc_svc_ctx *ctx, + struct sptlrpc_flavor *flavor); /** * Destructor of ptlrpc_sec. When called, refcount has been dropped @@ -525,7 +518,7 @@ struct ptlrpc_sec_cops { * * \see null_destroy_sec(), plain_destroy_sec(), gss_sec_destroy_kr(). */ - void (*destroy_sec) (struct ptlrpc_sec *sec); + void (*destroy_sec)(struct ptlrpc_sec *sec); /** * Notify that this ptlrpc_sec is going to die. Optionally, policy @@ -534,7 +527,7 @@ struct ptlrpc_sec_cops { * * \see plain_kill_sec(), gss_sec_kill(). */ - void (*kill_sec) (struct ptlrpc_sec *sec); + void (*kill_sec)(struct ptlrpc_sec *sec); /** * Given \a vcred, lookup and/or create its context. The policy module @@ -544,10 +537,9 @@ struct ptlrpc_sec_cops { * * \see null_lookup_ctx(), plain_lookup_ctx(), gss_sec_lookup_ctx_kr(). */ - struct ptlrpc_cli_ctx * (*lookup_ctx) (struct ptlrpc_sec *sec, - struct vfs_cred *vcred, - int create, - int remove_dead); + struct ptlrpc_cli_ctx *(*lookup_ctx)(struct ptlrpc_sec *sec, + struct vfs_cred *vcred, + int create, int remove_dead); /** * Called then the reference of \a ctx dropped to 0. The policy module @@ -559,9 +551,8 @@ struct ptlrpc_sec_cops { * * \see plain_release_ctx(), gss_sec_release_ctx_kr(). */ - void (*release_ctx) (struct ptlrpc_sec *sec, - struct ptlrpc_cli_ctx *ctx, - int sync); + void (*release_ctx)(struct ptlrpc_sec *sec, struct ptlrpc_cli_ctx *ctx, + int sync); /** * Flush the context cache. @@ -573,11 +564,8 @@ struct ptlrpc_sec_cops { * * \see plain_flush_ctx_cache(), gss_sec_flush_ctx_cache_kr(). */ - int (*flush_ctx_cache) - (struct ptlrpc_sec *sec, - uid_t uid, - int grace, - int force); + int (*flush_ctx_cache)(struct ptlrpc_sec *sec, uid_t uid, + int grace, int force); /** * Called periodically by garbage collector to remove dead contexts @@ -585,7 +573,7 @@ struct ptlrpc_sec_cops { * * \see gss_sec_gc_ctx_kr(). */ - void (*gc_ctx) (struct ptlrpc_sec *sec); + void (*gc_ctx)(struct ptlrpc_sec *sec); /** * Given an context \a ctx, install a corresponding reverse service @@ -593,9 +581,8 @@ struct ptlrpc_sec_cops { * XXX currently it's only used by GSS module, maybe we should remove * this from general API. */ - int (*install_rctx)(struct obd_import *imp, - struct ptlrpc_sec *sec, - struct ptlrpc_cli_ctx *ctx); + int (*install_rctx)(struct obd_import *imp, struct ptlrpc_sec *sec, + struct ptlrpc_cli_ctx *ctx); /** * To allocate request buffer for \a req. @@ -608,9 +595,8 @@ struct ptlrpc_sec_cops { * * \see null_alloc_reqbuf(), plain_alloc_reqbuf(), gss_alloc_reqbuf(). */ - int (*alloc_reqbuf)(struct ptlrpc_sec *sec, - struct ptlrpc_request *req, - int lustre_msg_size); + int (*alloc_reqbuf)(struct ptlrpc_sec *sec, struct ptlrpc_request *req, + int lustre_msg_size); /** * To free request buffer for \a req. @@ -619,8 +605,7 @@ struct ptlrpc_sec_cops { * * \see null_free_reqbuf(), plain_free_reqbuf(), gss_free_reqbuf(). */ - void (*free_reqbuf) (struct ptlrpc_sec *sec, - struct ptlrpc_request *req); + void (*free_reqbuf)(struct ptlrpc_sec *sec, struct ptlrpc_request *req); /** * To allocate reply buffer for \a req. @@ -632,9 +617,8 @@ struct ptlrpc_sec_cops { * * \see null_alloc_repbuf(), plain_alloc_repbuf(), gss_alloc_repbuf(). */ - int (*alloc_repbuf)(struct ptlrpc_sec *sec, - struct ptlrpc_request *req, - int lustre_msg_size); + int (*alloc_repbuf)(struct ptlrpc_sec *sec, struct ptlrpc_request *req, + int lustre_msg_size); /** * To free reply buffer for \a req. @@ -645,8 +629,7 @@ struct ptlrpc_sec_cops { * * \see null_free_repbuf(), plain_free_repbuf(), gss_free_repbuf(). */ - void (*free_repbuf) (struct ptlrpc_sec *sec, - struct ptlrpc_request *req); + void (*free_repbuf)(struct ptlrpc_sec *sec, struct ptlrpc_request *req); /** * To expand the request buffer of \a req, thus the \a segment in @@ -658,15 +641,13 @@ struct ptlrpc_sec_cops { * \see null_enlarge_reqbuf(), plain_enlarge_reqbuf(), * gss_enlarge_reqbuf(). */ - int (*enlarge_reqbuf) - (struct ptlrpc_sec *sec, - struct ptlrpc_request *req, - int segment, int newsize); + int (*enlarge_reqbuf)(struct ptlrpc_sec *sec, + struct ptlrpc_request *req, + int segment, int newsize); /* * misc */ - int (*display) (struct ptlrpc_sec *sec, - struct seq_file *seq); + int (*display)(struct ptlrpc_sec *sec, struct seq_file *seq); }; /** @@ -690,7 +671,7 @@ struct ptlrpc_sec_sops { * * \see null_accept(), plain_accept(), gss_svc_accept_kr(). */ - int (*accept) (struct ptlrpc_request *req); + int (*accept)(struct ptlrpc_request *req); /** * Perform security transformation upon reply message. @@ -702,15 +683,14 @@ struct ptlrpc_sec_sops { * * \see null_authorize(), plain_authorize(), gss_svc_authorize(). */ - int (*authorize) (struct ptlrpc_request *req); + int (*authorize)(struct ptlrpc_request *req); /** * Invalidate server context \a ctx. * * \see gss_svc_invalidate_ctx(). */ - void (*invalidate_ctx) - (struct ptlrpc_svc_ctx *ctx); + void (*invalidate_ctx)(struct ptlrpc_svc_ctx *ctx); /** * Allocate a ptlrpc_reply_state. @@ -724,28 +704,26 @@ struct ptlrpc_sec_sops { * * \see null_alloc_rs(), plain_alloc_rs(), gss_svc_alloc_rs(). */ - int (*alloc_rs) (struct ptlrpc_request *req, - int msgsize); + int (*alloc_rs)(struct ptlrpc_request *req, int msgsize); /** * Free a ptlrpc_reply_state. */ - void (*free_rs) (struct ptlrpc_reply_state *rs); + void (*free_rs)(struct ptlrpc_reply_state *rs); /** * Release the server context \a ctx. * * \see gss_svc_free_ctx(). */ - void (*free_ctx) (struct ptlrpc_svc_ctx *ctx); + void (*free_ctx)(struct ptlrpc_svc_ctx *ctx); /** * Install a reverse context based on the server context \a ctx. * * \see gss_svc_install_rctx_kr(). */ - int (*install_rctx)(struct obd_import *imp, - struct ptlrpc_svc_ctx *ctx); + int (*install_rctx)(struct obd_import *imp, struct ptlrpc_svc_ctx *ctx); /** * Prepare buffer for incoming bulk write. @@ -755,24 +733,24 @@ struct ptlrpc_sec_sops { * * \see gss_svc_prep_bulk(). */ - int (*prep_bulk) (struct ptlrpc_request *req, - struct ptlrpc_bulk_desc *desc); + int (*prep_bulk)(struct ptlrpc_request *req, + struct ptlrpc_bulk_desc *desc); /** * Unwrap the bulk write data. * * \see plain_svc_unwrap_bulk(), gss_svc_unwrap_bulk(). */ - int (*unwrap_bulk) (struct ptlrpc_request *req, - struct ptlrpc_bulk_desc *desc); + int (*unwrap_bulk)(struct ptlrpc_request *req, + struct ptlrpc_bulk_desc *desc); /** * Wrap the bulk read data. * * \see plain_svc_wrap_bulk(), gss_svc_wrap_bulk(). */ - int (*wrap_bulk) (struct ptlrpc_request *req, - struct ptlrpc_bulk_desc *desc); + int (*wrap_bulk)(struct ptlrpc_request *req, + struct ptlrpc_bulk_desc *desc); }; struct ptlrpc_sec_policy { diff --git a/drivers/staging/lustre/lustre/include/lustre_ver.h b/drivers/staging/lustre/lustre/include/lustre_ver.h index caa4da12f37a..64559a16f4de 100644 --- a/drivers/staging/lustre/lustre/include/lustre_ver.h +++ b/drivers/staging/lustre/lustre/include/lustre_ver.h @@ -1,26 +1,20 @@ #ifndef _LUSTRE_VER_H_ #define _LUSTRE_VER_H_ -/* This file automatically generated from lustre/include/lustre_ver.h.in, - * based on parameters in lustre/autoconf/lustre-version.ac. - * Changes made directly to this file will be lost. */ #define LUSTRE_MAJOR 2 -#define LUSTRE_MINOR 3 -#define LUSTRE_PATCH 64 +#define LUSTRE_MINOR 4 +#define LUSTRE_PATCH 60 #define LUSTRE_FIX 0 -#define LUSTRE_VERSION_STRING "2.3.64" +#define LUSTRE_VERSION_STRING "2.4.60" #define LUSTRE_VERSION_CODE OBD_OCD_VERSION(LUSTRE_MAJOR, \ LUSTRE_MINOR, LUSTRE_PATCH, \ LUSTRE_FIX) -/* liblustre clients are only allowed to connect if their LUSTRE_FIX mismatches - * by this amount (set in lustre/autoconf/lustre-version.ac). */ -#define LUSTRE_VERSION_ALLOWED_OFFSET OBD_OCD_VERSION(0, 0, 1, 32) - -/* If lustre version of client and servers it connects to differs by more +/* + * If lustre version of client and servers it connects to differs by more * than this amount, client would issue a warning. - * (set in lustre/autoconf/lustre-version.ac) */ + */ #define LUSTRE_VERSION_OFFSET_WARN OBD_OCD_VERSION(0, 4, 0, 0) #endif diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h index bcbe61301713..4a0f2e8b19f6 100644 --- a/drivers/staging/lustre/lustre/include/obd.h +++ b/drivers/staging/lustre/lustre/include/obd.h @@ -90,7 +90,8 @@ struct lov_stripe_md { pid_t lsm_lock_owner; /* debugging */ /* maximum possible file size, might change as OSTs status changes, - * e.g. disconnected, deactivated */ + * e.g. disconnected, deactivated + */ __u64 lsm_maxbytes; struct { /* Public members. */ @@ -123,7 +124,7 @@ static inline bool lsm_is_released(struct lov_stripe_md *lsm) static inline bool lsm_has_objects(struct lov_stripe_md *lsm) { - if (lsm == NULL) + if (!lsm) return false; if (lsm_is_released(lsm)) return false; @@ -159,7 +160,8 @@ struct obd_info { /* An update callback which is called to update some data on upper * level. E.g. it is used for update lsm->lsm_oinfo at every received * request in osc level for enqueue requests. It is also possible to - * update some caller data from LOV layer if needed. */ + * update some caller data from LOV layer if needed. + */ obd_enqueue_update_f oi_cb_up; }; @@ -216,7 +218,6 @@ struct timeout_item { }; #define OSC_MAX_RIF_DEFAULT 8 -#define MDS_OSC_MAX_RIF_DEFAULT 50 #define OSC_MAX_RIF_MAX 256 #define OSC_MAX_DIRTY_DEFAULT (OSC_MAX_RIF_DEFAULT * 4) #define OSC_MAX_DIRTY_MB_MAX 2048 /* arbitrary, but < MAX_LONG bytes */ @@ -241,7 +242,8 @@ struct client_obd { struct obd_import *cl_import; /* ptlrpc connection state */ int cl_conn_count; /* max_mds_easize is purely a performance thing so we don't have to - * call obd_size_diskmd() all the time. */ + * call obd_size_diskmd() all the time. + */ int cl_default_mds_easize; int cl_max_mds_easize; int cl_default_mds_cookiesize; @@ -261,7 +263,8 @@ struct client_obd { /* since we allocate grant by blocks, we don't know how many grant will * be used to add a page into cache. As a solution, we reserve maximum * grant before trying to dirty a page and unreserve the rest. - * See osc_{reserve|unreserve}_grant for details. */ + * See osc_{reserve|unreserve}_grant for details. + */ long cl_reserved_grant; struct list_head cl_cache_waiters; /* waiting for cache/grant */ unsigned long cl_next_shrink_grant; /* jiffies */ @@ -269,14 +272,16 @@ struct client_obd { int cl_grant_shrink_interval; /* seconds */ /* A chunk is an optimal size used by osc_extent to determine - * the extent size. A chunk is max(PAGE_CACHE_SIZE, OST block size) */ + * the extent size. A chunk is max(PAGE_CACHE_SIZE, OST block size) + */ int cl_chunkbits; int cl_chunk; int cl_extent_tax; /* extent overhead, by bytes */ /* keep track of objects that have lois that contain pages which * have been queued for async brw. this lock also protects the - * lists of osc_client_pages that hang off of the loi */ + * lists of osc_client_pages that hang off of the loi + */ /* * ->cl_loi_list_lock protects consistency of * ->cl_loi_{ready,read,write}_list. ->ap_make_ready() and @@ -295,14 +300,14 @@ struct client_obd { * NB by Jinshan: though field names are still _loi_, but actually * osc_object{}s are in the list. */ - client_obd_lock_t cl_loi_list_lock; + struct client_obd_lock cl_loi_list_lock; struct list_head cl_loi_ready_list; struct list_head cl_loi_hp_ready_list; struct list_head cl_loi_write_list; struct list_head cl_loi_read_list; int cl_r_in_flight; int cl_w_in_flight; - /* just a sum of the loi/lop pending numbers to be exported by /proc */ + /* just a sum of the loi/lop pending numbers to be exported by sysfs */ atomic_t cl_pending_w_pages; atomic_t cl_pending_r_pages; __u32 cl_max_pages_per_rpc; @@ -322,7 +327,7 @@ struct client_obd { atomic_t cl_lru_shrinkers; atomic_t cl_lru_in_list; struct list_head cl_lru_list; /* lru page list */ - client_obd_lock_t cl_lru_list_lock; /* page list protector */ + struct client_obd_lock cl_lru_list_lock; /* page list protector */ /* number of in flight destroy rpcs is limited to max_rpcs_in_flight */ atomic_t cl_destroy_in_flight; @@ -340,7 +345,7 @@ struct client_obd { /* supported checksum types that are worked out at connect time */ __u32 cl_supp_cksum_types; /* checksum algorithm to be used */ - cksum_type_t cl_cksum_type; + enum cksum_type cl_cksum_type; /* also protected by the poorly named _loi_list_lock lock above */ struct osc_async_rc cl_ar; @@ -375,14 +380,12 @@ struct echo_client_obd { spinlock_t ec_lock; struct list_head ec_objects; struct list_head ec_locks; - int ec_nstripes; __u64 ec_unique; }; /* Generic subset of OSTs */ struct ost_pool { - __u32 *op_array; /* array of index of - lov_obd->lov_tgts */ + __u32 *op_array; /* array of index of lov_obd->lov_tgts */ unsigned int op_count; /* number of OSTs in the array */ unsigned int op_size; /* allocated size of lp_array */ struct rw_semaphore op_rw_sem; /* to protect ost_pool use */ @@ -415,14 +418,16 @@ struct lov_qos { struct lov_qos_rr lq_rr; /* round robin qos data */ unsigned long lq_dirty:1, /* recalc qos data */ lq_same_space:1,/* the ost's all have approx. - the same space avail */ + * the same space avail + */ lq_reset:1, /* zero current penalties */ lq_statfs_in_progress:1; /* statfs op in progress */ /* qos statfs data */ struct lov_statfs_data *lq_statfs_data; - wait_queue_head_t lq_statfs_waitq; /* waitqueue to notify statfs - * requests completion */ + wait_queue_head_t lq_statfs_waitq; /* waitqueue to notify statfs + * requests completion + */ }; struct lov_tgt_desc { @@ -450,16 +455,16 @@ struct pool_desc { struct lov_qos_rr pool_rr; /* round robin qos */ struct hlist_node pool_hash; /* access by poolname */ struct list_head pool_list; /* serial access */ - struct dentry *pool_debugfs_entry; /* file in /proc */ + struct dentry *pool_debugfs_entry; /* file in debugfs */ struct obd_device *pool_lobd; /* obd of the lov/lod to which - * this pool belongs */ + * this pool belongs + */ }; struct lov_obd { struct lov_desc desc; struct lov_tgt_desc **lov_tgts; /* sparse array */ - struct ost_pool lov_packed; /* all OSTs in a packed - array */ + struct ost_pool lov_packed; /* all OSTs in a packed array */ struct mutex lov_lock; struct obd_connect_data lov_ocd; atomic_t lov_refcount; @@ -596,34 +601,6 @@ struct obd_trans_info { struct obd_uuid *oti_ost_uuid; }; -static inline void oti_init(struct obd_trans_info *oti, - struct ptlrpc_request *req) -{ - if (oti == NULL) - return; - memset(oti, 0, sizeof(*oti)); - - if (req == NULL) - return; - - oti->oti_xid = req->rq_xid; - /** VBR: take versions from request */ - if (req->rq_reqmsg != NULL && - lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY) { - __u64 *pre_version = lustre_msg_get_versions(req->rq_reqmsg); - - oti->oti_pre_version = pre_version ? pre_version[0] : 0; - oti->oti_transno = lustre_msg_get_transno(req->rq_reqmsg); - } - - /** called from mds_create_objects */ - if (req->rq_repmsg != NULL) - oti->oti_transno = lustre_msg_get_transno(req->rq_repmsg); - oti->oti_thread = req->rq_svc_thread; - if (req->rq_reqmsg != NULL) - oti->oti_conn_cnt = lustre_msg_get_conn_cnt(req->rq_reqmsg); -} - static inline void oti_alloc_cookies(struct obd_trans_info *oti, int num_cookies) { @@ -681,7 +658,7 @@ enum obd_notify_event { /* * Data structure used to pass obd_notify()-event to non-obd listeners (llite - * and liblustre being main examples). + * being main example). */ struct obd_notify_upcall { int (*onu_upcall)(struct obd_device *host, struct obd_device *watched, @@ -728,21 +705,23 @@ struct obd_device { unsigned long obd_attached:1, /* finished attach */ obd_set_up:1, /* finished setup */ obd_version_recov:1, /* obd uses version checking */ - obd_replayable:1, /* recovery is enabled; inform clients */ - obd_no_transno:1, /* no committed-transno notification */ + obd_replayable:1,/* recovery is enabled; inform clients */ + obd_no_transno:1, /* no committed-transno notification */ obd_no_recov:1, /* fail instead of retry messages */ obd_stopping:1, /* started cleanup */ obd_starting:1, /* started setup */ obd_force:1, /* cleanup with > 0 obd refcount */ - obd_fail:1, /* cleanup with failover */ - obd_async_recov:1, /* allow asynchronous orphan cleanup */ + obd_fail:1, /* cleanup with failover */ + obd_async_recov:1, /* allow asynchronous orphan cleanup */ obd_no_conn:1, /* deny new connections */ obd_inactive:1, /* device active/inactive - * (for /proc/status only!!) */ + * (for sysfs status only!!) + */ obd_no_ir:1, /* no imperative recovery. */ obd_process_conf:1; /* device is processing mgs config */ /* use separate field as it is set in interrupt to don't mess with - * protection of other bits using _bh lock */ + * protection of other bits using _bh lock + */ unsigned long obd_recovery_expired:1; /* uuid-export hash body */ struct cfs_hash *obd_uuid_hash; @@ -935,7 +914,8 @@ struct md_op_data { __u32 op_npages; /* used to transfer info between the stacks of MD client - * see enum op_cli_flags */ + * see enum op_cli_flags + */ __u32 op_cli_flags; /* File object data version for HSM release, on client */ @@ -957,7 +937,7 @@ struct md_enqueue_info { struct lustre_handle mi_lockh; struct inode *mi_dir; int (*mi_cb)(struct ptlrpc_request *req, - struct md_enqueue_info *minfo, int rc); + struct md_enqueue_info *minfo, int rc); __u64 mi_cbdata; unsigned int mi_generation; }; @@ -965,7 +945,7 @@ struct md_enqueue_info { struct obd_ops { struct module *owner; int (*iocontrol)(unsigned int cmd, struct obd_export *exp, int len, - void *karg, void *uarg); + void *karg, void __user *uarg); int (*get_info)(const struct lu_env *env, struct obd_export *, __u32 keylen, void *key, __u32 *vallen, void *val, struct lov_stripe_md *lsm); @@ -987,7 +967,8 @@ struct obd_ops { /* connect to the target device with given connection * data. @ocd->ocd_connect_flags is modified to reflect flags actually * granted by the target, which are guaranteed to be a subset of flags - * asked for. If @ocd == NULL, use default parameters. */ + * asked for. If @ocd == NULL, use default parameters. + */ int (*connect)(const struct lu_env *env, struct obd_export **exp, struct obd_device *src, struct obd_uuid *cluuid, struct obd_connect_data *ocd, @@ -1083,7 +1064,8 @@ struct obd_ops { /* * NOTE: If adding ops, add another LPROCFS_OBD_OP_INIT() line * to lprocfs_alloc_obd_stats() in obdclass/lprocfs_status.c. - * Also, add a wrapper function in include/linux/obd_class.h. */ + * Also, add a wrapper function in include/linux/obd_class.h. + */ }; enum { @@ -1189,14 +1171,14 @@ struct md_ops { struct obd_client_handle *); int (*set_lock_data)(struct obd_export *, __u64 *, void *, __u64 *); - ldlm_mode_t (*lock_match)(struct obd_export *, __u64, - const struct lu_fid *, ldlm_type_t, - ldlm_policy_data_t *, ldlm_mode_t, - struct lustre_handle *); + enum ldlm_mode (*lock_match)(struct obd_export *, __u64, + const struct lu_fid *, enum ldlm_type, + ldlm_policy_data_t *, enum ldlm_mode, + struct lustre_handle *); int (*cancel_unused)(struct obd_export *, const struct lu_fid *, - ldlm_policy_data_t *, ldlm_mode_t, - ldlm_cancel_flags_t flags, void *opaque); + ldlm_policy_data_t *, enum ldlm_mode, + enum ldlm_cancel_flags flags, void *opaque); int (*get_remote_perm)(struct obd_export *, const struct lu_fid *, __u32, struct ptlrpc_request **); @@ -1224,9 +1206,9 @@ struct lsm_operations { void (*lsm_stripe_by_offset)(struct lov_stripe_md *, int *, u64 *, u64 *); int (*lsm_lmm_verify)(struct lov_mds_md *lmm, int lmm_bytes, - __u16 *stripe_count); + __u16 *stripe_count); int (*lsm_unpackmd)(struct lov_obd *lov, struct lov_stripe_md *lsm, - struct lov_mds_md *lmm); + struct lov_mds_md *lmm); }; extern const struct lsm_operations lsm_v1_ops; @@ -1253,7 +1235,7 @@ static inline struct md_open_data *obd_mod_alloc(void) struct md_open_data *mod; mod = kzalloc(sizeof(*mod), GFP_NOFS); - if (mod == NULL) + if (!mod) return NULL; atomic_set(&mod->mod_refcount, 1); return mod; @@ -1300,7 +1282,7 @@ static inline bool filename_is_volatile(const char *name, int namelen, int *idx) return false; /* caller does not care of idx */ - if (idx == NULL) + if (!idx) return true; /* volatile file, the MDT can be set from name */ @@ -1327,7 +1309,8 @@ static inline bool filename_is_volatile(const char *name, int namelen, int *idx) return true; bad_format: /* bad format of mdt idx, we cannot return an error - * to caller so we use hash algo */ + * to caller so we use hash algo + */ CERROR("Bad volatile file name format: %s\n", name + LUSTRE_VOLATILE_HDR_LEN); return false; @@ -1335,7 +1318,6 @@ bad_format: static inline int cli_brw_size(struct obd_device *obd) { - LASSERT(obd != NULL); return obd->u.cli.cl_max_pages_per_rpc << PAGE_CACHE_SHIFT; } diff --git a/drivers/staging/lustre/lustre/include/obd_cksum.h b/drivers/staging/lustre/lustre/include/obd_cksum.h index 01db60405393..637fa22110a4 100644 --- a/drivers/staging/lustre/lustre/include/obd_cksum.h +++ b/drivers/staging/lustre/lustre/include/obd_cksum.h @@ -37,7 +37,7 @@ #include "../../include/linux/libcfs/libcfs.h" #include "lustre/lustre_idl.h" -static inline unsigned char cksum_obd2cfs(cksum_type_t cksum_type) +static inline unsigned char cksum_obd2cfs(enum cksum_type cksum_type) { switch (cksum_type) { case OBD_CKSUM_CRC32: @@ -63,8 +63,9 @@ static inline unsigned char cksum_obd2cfs(cksum_type_t cksum_type) * In case of an unsupported types/flags we fall back to ADLER * because that is supported by all clients since 1.8 * - * In case multiple algorithms are supported the best one is used. */ -static inline u32 cksum_type_pack(cksum_type_t cksum_type) + * In case multiple algorithms are supported the best one is used. + */ +static inline u32 cksum_type_pack(enum cksum_type cksum_type) { unsigned int performance = 0, tmp; u32 flag = OBD_FL_CKSUM_ADLER; @@ -98,7 +99,7 @@ static inline u32 cksum_type_pack(cksum_type_t cksum_type) return flag; } -static inline cksum_type_t cksum_type_unpack(u32 o_flags) +static inline enum cksum_type cksum_type_unpack(u32 o_flags) { switch (o_flags & OBD_FL_CKSUM_ALL) { case OBD_FL_CKSUM_CRC32C: @@ -116,9 +117,9 @@ static inline cksum_type_t cksum_type_unpack(u32 o_flags) * 1.8 supported ADLER it is base and not depend on hw * Client uses all available local algos */ -static inline cksum_type_t cksum_types_supported_client(void) +static inline enum cksum_type cksum_types_supported_client(void) { - cksum_type_t ret = OBD_CKSUM_ADLER; + enum cksum_type ret = OBD_CKSUM_ADLER; CDEBUG(D_INFO, "Crypto hash speed: crc %d, crc32c %d, adler %d\n", cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_CRC32)), @@ -139,14 +140,16 @@ static inline cksum_type_t cksum_types_supported_client(void) * Currently, calling cksum_type_pack() with a mask will return the fastest * checksum type due to its benchmarking at libcfs module load. * Caution is advised, however, since what is fastest on a single client may - * not be the fastest or most efficient algorithm on the server. */ -static inline cksum_type_t cksum_type_select(cksum_type_t cksum_types) + * not be the fastest or most efficient algorithm on the server. + */ +static inline enum cksum_type cksum_type_select(enum cksum_type cksum_types) { return cksum_type_unpack(cksum_type_pack(cksum_types)); } /* Checksum algorithm names. Must be defined in the same order as the - * OBD_CKSUM_* flags. */ + * OBD_CKSUM_* flags. + */ #define DECLARE_CKSUM_NAME char *cksum_name[] = {"crc32", "adler", "crc32c"} #endif /* __OBD_H */ diff --git a/drivers/staging/lustre/lustre/include/obd_class.h b/drivers/staging/lustre/lustre/include/obd_class.h index 97d80397503c..706869f8c98f 100644 --- a/drivers/staging/lustre/lustre/include/obd_class.h +++ b/drivers/staging/lustre/lustre/include/obd_class.h @@ -45,18 +45,22 @@ #include "lprocfs_status.h" #define OBD_STATFS_NODELAY 0x0001 /* requests should be send without delay - * and resends for avoid deadlocks */ + * and resends for avoid deadlocks + */ #define OBD_STATFS_FROM_CACHE 0x0002 /* the statfs callback should not update - * obd_osfs_age */ + * obd_osfs_age + */ #define OBD_STATFS_PTLRPCD 0x0004 /* requests will be sent via ptlrpcd * instead of a specific set. This * means that we cannot rely on the set * interpret routine to be called. * lov_statfs_fini() must thus be called - * by the request interpret routine */ + * by the request interpret routine + */ #define OBD_STATFS_FOR_MDT0 0x0008 /* The statfs is only for retrieving - * information from MDT0. */ -#define OBD_FL_PUNCH 0x00000001 /* To indicate it is punch operation */ + * information from MDT0. + */ +#define OBD_FL_PUNCH 0x00000001 /* To indicate it is punch operation */ /* OBD Device Declarations */ extern struct obd_device *obd_devs[MAX_OBD_DEVICES]; @@ -83,10 +87,10 @@ int class_name2dev(const char *name); struct obd_device *class_name2obd(const char *name); int class_uuid2dev(struct obd_uuid *uuid); struct obd_device *class_find_client_obd(struct obd_uuid *tgt_uuid, - const char *typ_name, - struct obd_uuid *grp_uuid); + const char *typ_name, + struct obd_uuid *grp_uuid); struct obd_device *class_devices_in_group(struct obd_uuid *grp_uuid, - int *next); + int *next); struct obd_device *class_num2obd(int num); int class_notify_sptlrpc_conf(const char *fsname, int namelen); @@ -160,8 +164,9 @@ struct config_llog_data { struct mutex cld_lock; int cld_type; unsigned int cld_stopping:1, /* we were told to stop - * watching */ - cld_lostlock:1; /* lock not requeued */ + * watching + */ + cld_lostlock:1; /* lock not requeued */ char cld_logname[0]; }; @@ -193,7 +198,7 @@ extern void (*class_export_dump_hook)(struct obd_export *); struct obd_export *class_export_get(struct obd_export *exp); void class_export_put(struct obd_export *exp); struct obd_export *class_new_export(struct obd_device *obddev, - struct obd_uuid *cluuid); + struct obd_uuid *cluuid); void class_unlink_export(struct obd_export *exp); struct obd_import *class_import_get(struct obd_import *); @@ -203,7 +208,7 @@ void class_destroy_import(struct obd_import *exp); void class_put_type(struct obd_type *type); int class_connect(struct lustre_handle *conn, struct obd_device *obd, - struct obd_uuid *cluuid); + struct obd_uuid *cluuid); int class_disconnect(struct obd_export *exp); void class_fail_export(struct obd_export *exp); int class_manual_cleanup(struct obd_device *obd); @@ -275,7 +280,8 @@ void md_from_obdo(struct md_op_data *op_data, struct obdo *oa, u32 valid); #define CTXTP(ctxt, op) (ctxt)->loc_logops->lop_##op /* Ensure obd_setup: used for cleanup which must be called - while obd is stopping */ + * while obd is stopping + */ static inline int obd_check_dev(struct obd_device *obd) { if (!obd) { @@ -306,7 +312,7 @@ static inline int obd_check_dev_active(struct obd_device *obd) / sizeof(((struct obd_ops *)(0))->iocontrol)) #define OBD_COUNTER_INCREMENT(obdx, op) \ - if ((obdx)->obd_stats != NULL) { \ + if ((obdx)->obd_stats) { \ unsigned int coffset; \ coffset = (unsigned int)((obdx)->obd_cntr_base) + \ OBD_COUNTER_OFFSET(op); \ @@ -315,7 +321,7 @@ static inline int obd_check_dev_active(struct obd_device *obd) } #define EXP_COUNTER_INCREMENT(export, op) \ - if ((export)->exp_obd->obd_stats != NULL) { \ + if ((export)->exp_obd->obd_stats) { \ unsigned int coffset; \ coffset = (unsigned int)((export)->exp_obd->obd_cntr_base) + \ OBD_COUNTER_OFFSET(op); \ @@ -329,7 +335,7 @@ static inline int obd_check_dev_active(struct obd_device *obd) / sizeof(((struct md_ops *)(0))->getstatus)) #define MD_COUNTER_INCREMENT(obdx, op) \ - if ((obd)->md_stats != NULL) { \ + if ((obd)->md_stats) { \ unsigned int coffset; \ coffset = (unsigned int)((obdx)->md_cntr_base) + \ MD_COUNTER_OFFSET(op); \ @@ -338,24 +344,24 @@ static inline int obd_check_dev_active(struct obd_device *obd) } #define EXP_MD_COUNTER_INCREMENT(export, op) \ - if ((export)->exp_obd->obd_stats != NULL) { \ + if ((export)->exp_obd->obd_stats) { \ unsigned int coffset; \ coffset = (unsigned int)((export)->exp_obd->md_cntr_base) + \ MD_COUNTER_OFFSET(op); \ LASSERT(coffset < (export)->exp_obd->md_stats->ls_num); \ lprocfs_counter_incr((export)->exp_obd->md_stats, coffset); \ - if ((export)->exp_md_stats != NULL) \ + if ((export)->exp_md_stats) \ lprocfs_counter_incr( \ (export)->exp_md_stats, coffset); \ } #define EXP_CHECK_MD_OP(exp, op) \ do { \ - if ((exp) == NULL) { \ + if (!(exp)) { \ CERROR("obd_" #op ": NULL export\n"); \ return -ENODEV; \ } \ - if ((exp)->exp_obd == NULL || !OBT((exp)->exp_obd)) { \ + if (!(exp)->exp_obd || !OBT((exp)->exp_obd)) { \ CERROR("obd_" #op ": cleaned up obd\n"); \ return -EOPNOTSUPP; \ } \ @@ -379,11 +385,11 @@ do { \ #define EXP_CHECK_DT_OP(exp, op) \ do { \ - if ((exp) == NULL) { \ + if (!(exp)) { \ CERROR("obd_" #op ": NULL export\n"); \ return -ENODEV; \ } \ - if ((exp)->exp_obd == NULL || !OBT((exp)->exp_obd)) { \ + if (!(exp)->exp_obd || !OBT((exp)->exp_obd)) { \ CERROR("obd_" #op ": cleaned up obd\n"); \ return -EOPNOTSUPP; \ } \ @@ -467,7 +473,7 @@ static inline int obd_setup(struct obd_device *obd, struct lustre_cfg *cfg) DECLARE_LU_VARS(ldt, d); ldt = obd->obd_type->typ_lu; - if (ldt != NULL) { + if (ldt) { struct lu_context session_ctx; struct lu_env env; @@ -509,7 +515,7 @@ static inline int obd_precleanup(struct obd_device *obd, return rc; ldt = obd->obd_type->typ_lu; d = obd->obd_lu_dev; - if (ldt != NULL && d != NULL) { + if (ldt && d) { if (cleanup_stage == OBD_CLEANUP_EXPORTS) { struct lu_env env; @@ -538,7 +544,7 @@ static inline int obd_cleanup(struct obd_device *obd) ldt = obd->obd_type->typ_lu; d = obd->obd_lu_dev; - if (ldt != NULL && d != NULL) { + if (ldt && d) { struct lu_env env; rc = lu_env_init(&env, ldt->ldt_ctx_tags); @@ -558,7 +564,8 @@ static inline int obd_cleanup(struct obd_device *obd) static inline void obd_cleanup_client_import(struct obd_device *obd) { /* If we set up but never connected, the - client import will not have been cleaned. */ + * client import will not have been cleaned. + */ down_write(&obd->u.cli.cl_sem); if (obd->u.cli.cl_import) { struct obd_import *imp; @@ -586,7 +593,7 @@ obd_process_config(struct obd_device *obd, int datalen, void *data) obd->obd_process_conf = 1; ldt = obd->obd_type->typ_lu; d = obd->obd_lu_dev; - if (ldt != NULL && d != NULL) { + if (ldt && d) { struct lu_env env; rc = lu_env_init(&env, ldt->ldt_ctx_tags); @@ -674,7 +681,7 @@ static inline int obd_alloc_memmd(struct obd_export *exp, struct lov_stripe_md **mem_tgt) { LASSERT(mem_tgt); - LASSERT(*mem_tgt == NULL); + LASSERT(!*mem_tgt); return obd_unpackmd(exp, mem_tgt, NULL, 0); } @@ -767,7 +774,7 @@ static inline int obd_setattr_rqset(struct obd_export *exp, EXP_COUNTER_INCREMENT(exp, setattr_async); set = ptlrpc_prep_set(); - if (set == NULL) + if (!set) return -ENOMEM; rc = OBP(exp->exp_obd, setattr_async)(exp, oinfo, oti, set); @@ -778,7 +785,8 @@ static inline int obd_setattr_rqset(struct obd_export *exp, } /* This adds all the requests into @set if @set != NULL, otherwise - all requests are sent asynchronously without waiting for response. */ + * all requests are sent asynchronously without waiting for response. + */ static inline int obd_setattr_async(struct obd_export *exp, struct obd_info *oinfo, struct obd_trans_info *oti, @@ -848,7 +856,8 @@ static inline int obd_connect(const struct lu_env *env, { int rc; __u64 ocf = data ? data->ocd_connect_flags : 0; /* for post-condition - * check */ + * check + */ rc = obd_check_dev_active(obd); if (rc) @@ -858,7 +867,7 @@ static inline int obd_connect(const struct lu_env *env, rc = OBP(obd, connect)(env, exp, obd, cluuid, data, localdata); /* check that only subset is granted */ - LASSERT(ergo(data != NULL, (data->ocd_connect_flags & ocf) == + LASSERT(ergo(data, (data->ocd_connect_flags & ocf) == data->ocd_connect_flags)); return rc; } @@ -871,8 +880,7 @@ static inline int obd_reconnect(const struct lu_env *env, void *localdata) { int rc; - __u64 ocf = d ? d->ocd_connect_flags : 0; /* for post-condition - * check */ + __u64 ocf = d ? d->ocd_connect_flags : 0; /* for post-condition check */ rc = obd_check_dev_active(obd); if (rc) @@ -882,8 +890,7 @@ static inline int obd_reconnect(const struct lu_env *env, rc = OBP(obd, reconnect)(env, exp, obd, cluuid, d, localdata); /* check that only subset is granted */ - LASSERT(ergo(d != NULL, - (d->ocd_connect_flags & ocf) == d->ocd_connect_flags)); + LASSERT(ergo(d, (d->ocd_connect_flags & ocf) == d->ocd_connect_flags)); return rc; } @@ -998,7 +1005,7 @@ static inline int obd_init_export(struct obd_export *exp) { int rc = 0; - if ((exp)->exp_obd != NULL && OBT((exp)->exp_obd) && + if ((exp)->exp_obd && OBT((exp)->exp_obd) && OBP((exp)->exp_obd, init_export)) rc = OBP(exp->exp_obd, init_export)(exp); return rc; @@ -1006,7 +1013,7 @@ static inline int obd_init_export(struct obd_export *exp) static inline int obd_destroy_export(struct obd_export *exp) { - if ((exp)->exp_obd != NULL && OBT((exp)->exp_obd) && + if ((exp)->exp_obd && OBT((exp)->exp_obd) && OBP((exp)->exp_obd, destroy_export)) OBP(exp->exp_obd, destroy_export)(exp); return 0; @@ -1014,7 +1021,8 @@ static inline int obd_destroy_export(struct obd_export *exp) /* @max_age is the oldest time in jiffies that we accept using a cached data. * If the cache is older than @max_age we will get a new value from the - * target. Use a value of "cfs_time_current() + HZ" to guarantee freshness. */ + * target. Use a value of "cfs_time_current() + HZ" to guarantee freshness. + */ static inline int obd_statfs_async(struct obd_export *exp, struct obd_info *oinfo, __u64 max_age, @@ -1023,7 +1031,7 @@ static inline int obd_statfs_async(struct obd_export *exp, int rc = 0; struct obd_device *obd; - if (exp == NULL || exp->exp_obd == NULL) + if (!exp || !exp->exp_obd) return -EINVAL; obd = exp->exp_obd; @@ -1059,7 +1067,7 @@ static inline int obd_statfs_rqset(struct obd_export *exp, int rc = 0; set = ptlrpc_prep_set(); - if (set == NULL) + if (!set) return -ENOMEM; oinfo.oi_osfs = osfs; @@ -1073,7 +1081,8 @@ static inline int obd_statfs_rqset(struct obd_export *exp, /* @max_age is the oldest time in jiffies that we accept using a cached data. * If the cache is older than @max_age we will get a new value from the - * target. Use a value of "cfs_time_current() + HZ" to guarantee freshness. */ + * target. Use a value of "cfs_time_current() + HZ" to guarantee freshness. + */ static inline int obd_statfs(const struct lu_env *env, struct obd_export *exp, struct obd_statfs *osfs, __u64 max_age, __u32 flags) @@ -1081,7 +1090,7 @@ static inline int obd_statfs(const struct lu_env *env, struct obd_export *exp, int rc = 0; struct obd_device *obd = exp->exp_obd; - if (obd == NULL) + if (!obd) return -EINVAL; OBD_CHECK_DT_OP(obd, statfs, -EOPNOTSUPP); @@ -1155,7 +1164,7 @@ static inline int obd_adjust_kms(struct obd_export *exp, } static inline int obd_iocontrol(unsigned int cmd, struct obd_export *exp, - int len, void *karg, void *uarg) + int len, void *karg, void __user *uarg) { int rc; @@ -1205,9 +1214,10 @@ static inline int obd_notify(struct obd_device *obd, return rc; /* the check for async_recov is a complete hack - I'm hereby - overloading the meaning to also mean "this was called from - mds_postsetup". I know that my mds is able to handle notifies - by this point, and it needs to get them to execute mds_postrecov. */ + * overloading the meaning to also mean "this was called from + * mds_postsetup". I know that my mds is able to handle notifies + * by this point, and it needs to get them to execute mds_postrecov. + */ if (!obd->obd_set_up && !obd->obd_async_recov) { CDEBUG(D_HA, "obd %s not set up\n", obd->obd_name); return -EINVAL; @@ -1241,7 +1251,7 @@ static inline int obd_notify_observer(struct obd_device *observer, * Also, call non-obd listener, if any */ onu = &observer->obd_upcall; - if (onu->onu_upcall != NULL) + if (onu->onu_upcall) rc2 = onu->onu_upcall(observer, observed, ev, onu->onu_owner, NULL); else @@ -1287,7 +1297,7 @@ static inline int obd_health_check(const struct lu_env *env, int rc; /* don't use EXP_CHECK_DT_OP, because NULL method is normal here */ - if (obd == NULL || !OBT(obd)) { + if (!obd || !OBT(obd)) { CERROR("cleaned up obd\n"); return -EOPNOTSUPP; } @@ -1318,57 +1328,6 @@ static inline int obd_register_observer(struct obd_device *obd, return 0; } -#if 0 -static inline int obd_register_page_removal_cb(struct obd_export *exp, - obd_page_removal_cb_t cb, - obd_pin_extent_cb pin_cb) -{ - int rc; - - OBD_CHECK_DT_OP(exp->exp_obd, register_page_removal_cb, 0); - OBD_COUNTER_INCREMENT(exp->exp_obd, register_page_removal_cb); - - rc = OBP(exp->exp_obd, register_page_removal_cb)(exp, cb, pin_cb); - return rc; -} - -static inline int obd_unregister_page_removal_cb(struct obd_export *exp, - obd_page_removal_cb_t cb) -{ - int rc; - - OBD_CHECK_DT_OP(exp->exp_obd, unregister_page_removal_cb, 0); - OBD_COUNTER_INCREMENT(exp->exp_obd, unregister_page_removal_cb); - - rc = OBP(exp->exp_obd, unregister_page_removal_cb)(exp, cb); - return rc; -} - -static inline int obd_register_lock_cancel_cb(struct obd_export *exp, - obd_lock_cancel_cb cb) -{ - int rc; - - OBD_CHECK_DT_OP(exp->exp_obd, register_lock_cancel_cb, 0); - OBD_COUNTER_INCREMENT(exp->exp_obd, register_lock_cancel_cb); - - rc = OBP(exp->exp_obd, register_lock_cancel_cb)(exp, cb); - return rc; -} - -static inline int obd_unregister_lock_cancel_cb(struct obd_export *exp, - obd_lock_cancel_cb cb) -{ - int rc; - - OBD_CHECK_DT_OP(exp->exp_obd, unregister_lock_cancel_cb, 0); - OBD_COUNTER_INCREMENT(exp->exp_obd, unregister_lock_cancel_cb); - - rc = OBP(exp->exp_obd, unregister_lock_cancel_cb)(exp, cb); - return rc; -} -#endif - /* metadata helpers */ static inline int md_getstatus(struct obd_export *exp, struct lu_fid *fid) { @@ -1392,7 +1351,7 @@ static inline int md_getattr(struct obd_export *exp, struct md_op_data *op_data, } static inline int md_null_inode(struct obd_export *exp, - const struct lu_fid *fid) + const struct lu_fid *fid) { int rc; @@ -1657,8 +1616,8 @@ static inline int md_set_lock_data(struct obd_export *exp, static inline int md_cancel_unused(struct obd_export *exp, const struct lu_fid *fid, ldlm_policy_data_t *policy, - ldlm_mode_t mode, - ldlm_cancel_flags_t flags, + enum ldlm_mode mode, + enum ldlm_cancel_flags flags, void *opaque) { int rc; @@ -1671,12 +1630,12 @@ static inline int md_cancel_unused(struct obd_export *exp, return rc; } -static inline ldlm_mode_t md_lock_match(struct obd_export *exp, __u64 flags, - const struct lu_fid *fid, - ldlm_type_t type, - ldlm_policy_data_t *policy, - ldlm_mode_t mode, - struct lustre_handle *lockh) +static inline enum ldlm_mode md_lock_match(struct obd_export *exp, __u64 flags, + const struct lu_fid *fid, + enum ldlm_type type, + ldlm_policy_data_t *policy, + enum ldlm_mode mode, + struct lustre_handle *lockh) { EXP_CHECK_MD_OP(exp, lock_match); EXP_MD_COUNTER_INCREMENT(exp, lock_match); @@ -1759,7 +1718,8 @@ struct lwp_register_item { /* I'm as embarrassed about this as you are. * * <shaver> // XXX do not look into _superhack with remaining eye - * <shaver> // XXX if this were any uglier, I'd get my own show on MTV */ + * <shaver> // XXX if this were any uglier, I'd get my own show on MTV + */ extern int (*ptlrpc_put_connection_superhack)(struct ptlrpc_connection *c); /* obd_mount.c */ @@ -1774,7 +1734,7 @@ void class_uuid_unparse(class_uuid_t in, struct obd_uuid *out); /* lustre_peer.c */ int lustre_uuid_to_peer(const char *uuid, lnet_nid_t *peer_nid, int index); int class_add_uuid(const char *uuid, __u64 nid); -int class_del_uuid (const char *uuid); +int class_del_uuid(const char *uuid); int class_check_uuid(struct obd_uuid *uuid, __u64 nid); void class_init_uuidlist(void); void class_exit_uuidlist(void); diff --git a/drivers/staging/lustre/lustre/include/obd_support.h b/drivers/staging/lustre/lustre/include/obd_support.h index d031437c0528..225262fa67b6 100644 --- a/drivers/staging/lustre/lustre/include/obd_support.h +++ b/drivers/staging/lustre/lustre/include/obd_support.h @@ -47,7 +47,8 @@ extern unsigned int obd_debug_peer_on_timeout; extern unsigned int obd_dump_on_timeout; extern unsigned int obd_dump_on_eviction; /* obd_timeout should only be used for recovery, not for - networking / disk / timings affected by load (use Adaptive Timeouts) */ + * networking / disk / timings affected by load (use Adaptive Timeouts) + */ extern unsigned int obd_timeout; /* seconds */ extern unsigned int obd_timeout_set; extern unsigned int at_min; @@ -104,18 +105,21 @@ extern char obd_jobid_var[]; * failover targets the client only pings one server at a time, and pings * can be lost on a loaded network. Since eviction has serious consequences, * and there's no urgent need to evict a client just because it's idle, we - * should be very conservative here. */ + * should be very conservative here. + */ #define PING_EVICT_TIMEOUT (PING_INTERVAL * 6) #define DISK_TIMEOUT 50 /* Beyond this we warn about disk speed */ #define CONNECTION_SWITCH_MIN 5U /* Connection switching rate limiter */ - /* Max connect interval for nonresponsive servers; ~50s to avoid building up - connect requests in the LND queues, but within obd_timeout so we don't - miss the recovery window */ +/* Max connect interval for nonresponsive servers; ~50s to avoid building up + * connect requests in the LND queues, but within obd_timeout so we don't + * miss the recovery window + */ #define CONNECTION_SWITCH_MAX min(50U, max(CONNECTION_SWITCH_MIN, obd_timeout)) #define CONNECTION_SWITCH_INC 5 /* Connection timeout backoff */ /* In general this should be low to have quick detection of a system - running on a backup server. (If it's too low, import_select_connection - will increase the timeout anyhow.) */ + * running on a backup server. (If it's too low, import_select_connection + * will increase the timeout anyhow.) + */ #define INITIAL_CONNECT_TIMEOUT max(CONNECTION_SWITCH_MIN, obd_timeout/20) /* The max delay between connects is SWITCH_MAX + SWITCH_INC + INITIAL */ #define RECONNECT_DELAY_MAX (CONNECTION_SWITCH_MAX + CONNECTION_SWITCH_INC + \ @@ -507,7 +511,6 @@ extern char obd_jobid_var[]; do { \ struct portals_handle *__h = (handle); \ \ - LASSERT(handle != NULL); \ __h->h_cookie = (unsigned long)(ptr); \ __h->h_size = (size); \ call_rcu(&__h->h_rcu, class_handle_free_cb); \ diff --git a/drivers/staging/lustre/lustre/include/uapi_kernelcomm.h b/drivers/staging/lustre/lustre/include/uapi_kernelcomm.h new file mode 100644 index 000000000000..5e998362e44b --- /dev/null +++ b/drivers/staging/lustre/lustre/include/uapi_kernelcomm.h @@ -0,0 +1,94 @@ +/* + * GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.gnu.org/licenses/gpl-2.0.html + * + * GPL HEADER END + */ +/* + * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. + * Use is subject to license terms. + * + * Copyright (c) 2013, Intel Corporation. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * + * Author: Nathan Rutman <nathan.rutman@sun.com> + * + * Kernel <-> userspace communication routines. + * The definitions below are used in the kernel and userspace. + */ + +#ifndef __UAPI_KERNELCOMM_H__ +#define __UAPI_KERNELCOMM_H__ + +#include <linux/types.h> + +/* KUC message header. + * All current and future KUC messages should use this header. + * To avoid having to include Lustre headers from libcfs, define this here. + */ +struct kuc_hdr { + __u16 kuc_magic; + /* Each new Lustre feature should use a different transport */ + __u8 kuc_transport; + __u8 kuc_flags; + /* Message type or opcode, transport-specific */ + __u16 kuc_msgtype; + /* Including header */ + __u16 kuc_msglen; +} __aligned(sizeof(__u64)); + +#define KUC_CHANGELOG_MSG_MAXSIZE (sizeof(struct kuc_hdr) + CR_MAXSIZE) + +#define KUC_MAGIC 0x191C /*Lustre9etLinC */ + +/* kuc_msgtype values are defined in each transport */ +enum kuc_transport_type { + KUC_TRANSPORT_GENERIC = 1, + KUC_TRANSPORT_HSM = 2, + KUC_TRANSPORT_CHANGELOG = 3, +}; + +enum kuc_generic_message_type { + KUC_MSG_SHUTDOWN = 1, +}; + +/* KUC Broadcast Groups. This determines which userspace process hears which + * messages. Mutliple transports may be used within a group, or multiple + * groups may use the same transport. Broadcast + * groups need not be used if e.g. a UID is specified instead; + * use group 0 to signify unicast. + */ +#define KUC_GRP_HSM 0x02 +#define KUC_GRP_MAX KUC_GRP_HSM + +#define LK_FLG_STOP 0x01 +#define LK_NOFD -1U + +/* kernelcomm control structure, passed from userspace to kernel */ +struct lustre_kernelcomm { + __u32 lk_wfd; + __u32 lk_rfd; + __u32 lk_uid; + __u32 lk_group; + __u32 lk_data; + __u32 lk_flags; +} __packed; + +#endif /* __UAPI_KERNELCOMM_H__ */ |