aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/lustre/lustre/obdclass/cl_lock.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/lustre/lustre/obdclass/cl_lock.c')
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_lock.c98
1 files changed, 52 insertions, 46 deletions
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_lock.c b/drivers/staging/lustre/lustre/obdclass/cl_lock.c
index 1836dc01499a..aec644eb4db9 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_lock.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_lock.c
@@ -96,8 +96,8 @@ static int cl_lock_invariant(const struct lu_env *env,
result = atomic_read(&lock->cll_ref) > 0 &&
cl_lock_invariant_trusted(env, lock);
- if (!result && env != NULL)
- CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken");
+ if (!result && env)
+ CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken\n");
return result;
}
@@ -259,7 +259,7 @@ static void cl_lock_free(const struct lu_env *env, struct cl_lock *lock)
struct cl_lock_slice *slice;
slice = list_entry(lock->cll_layers.next,
- struct cl_lock_slice, cls_linkage);
+ struct cl_lock_slice, cls_linkage);
list_del_init(lock->cll_layers.next);
slice->cls_ops->clo_fini(env, slice);
}
@@ -288,7 +288,7 @@ void cl_lock_put(const struct lu_env *env, struct cl_lock *lock)
LINVRNT(cl_lock_invariant(env, lock));
obj = lock->cll_descr.cld_obj;
- LINVRNT(obj != NULL);
+ LINVRNT(obj);
CDEBUG(D_TRACE, "releasing reference: %d %p %lu\n",
atomic_read(&lock->cll_ref), lock, RETIP);
@@ -361,8 +361,8 @@ static struct cl_lock *cl_lock_alloc(const struct lu_env *env,
struct cl_lock *lock;
struct lu_object_header *head;
- lock = kmem_cache_alloc(cl_lock_kmem, GFP_NOFS | __GFP_ZERO);
- if (lock != NULL) {
+ lock = kmem_cache_zalloc(cl_lock_kmem, GFP_NOFS);
+ if (lock) {
atomic_set(&lock->cll_ref, 1);
lock->cll_descr = *descr;
lock->cll_state = CLS_NEW;
@@ -382,8 +382,7 @@ static struct cl_lock *cl_lock_alloc(const struct lu_env *env,
CS_LOCK_INC(obj, total);
CS_LOCK_INC(obj, create);
cl_lock_lockdep_init(lock);
- list_for_each_entry(obj, &head->loh_layers,
- co_lu.lo_linkage) {
+ list_for_each_entry(obj, &head->loh_layers, co_lu.lo_linkage) {
int err;
err = obj->co_ops->coo_lock_init(env, obj, lock, io);
@@ -461,7 +460,7 @@ static int cl_lock_fits_into(const struct lu_env *env,
LINVRNT(cl_lock_invariant_trusted(env, lock));
list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
- if (slice->cls_ops->clo_fits_into != NULL &&
+ if (slice->cls_ops->clo_fits_into &&
!slice->cls_ops->clo_fits_into(env, slice, need, io))
return 0;
}
@@ -524,17 +523,17 @@ static struct cl_lock *cl_lock_find(const struct lu_env *env,
lock = cl_lock_lookup(env, obj, io, need);
spin_unlock(&head->coh_lock_guard);
- if (lock == NULL) {
+ if (!lock) {
lock = cl_lock_alloc(env, obj, io, need);
if (!IS_ERR(lock)) {
struct cl_lock *ghost;
spin_lock(&head->coh_lock_guard);
ghost = cl_lock_lookup(env, obj, io, need);
- if (ghost == NULL) {
+ if (!ghost) {
cl_lock_get_trust(lock);
list_add_tail(&lock->cll_linkage,
- &head->coh_locks);
+ &head->coh_locks);
spin_unlock(&head->coh_lock_guard);
CS_LOCK_INC(obj, busy);
} else {
@@ -572,7 +571,7 @@ struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io,
spin_lock(&head->coh_lock_guard);
lock = cl_lock_lookup(env, obj, io, need);
spin_unlock(&head->coh_lock_guard);
- if (lock == NULL)
+ if (!lock)
return NULL;
cl_lock_mutex_get(env, lock);
@@ -584,7 +583,7 @@ struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io,
cl_lock_put(env, lock);
lock = NULL;
}
- } while (lock == NULL);
+ } while (!lock);
cl_lock_hold_add(env, lock, scope, source);
cl_lock_user_add(env, lock);
@@ -774,8 +773,8 @@ static void cl_lock_cancel0(const struct lu_env *env, struct cl_lock *lock)
lock->cll_flags |= CLF_CANCELLED;
list_for_each_entry_reverse(slice, &lock->cll_layers,
- cls_linkage) {
- if (slice->cls_ops->clo_cancel != NULL)
+ cls_linkage) {
+ if (slice->cls_ops->clo_cancel)
slice->cls_ops->clo_cancel(env, slice);
}
}
@@ -811,8 +810,8 @@ static void cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock)
* by cl_lock_lookup().
*/
list_for_each_entry_reverse(slice, &lock->cll_layers,
- cls_linkage) {
- if (slice->cls_ops->clo_delete != NULL)
+ cls_linkage) {
+ if (slice->cls_ops->clo_delete)
slice->cls_ops->clo_delete(env, slice);
}
/*
@@ -935,7 +934,8 @@ int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
if (result == 0) {
/* To avoid being interrupted by the 'non-fatal' signals
* (SIGCHLD, for instance), we'd block them temporarily.
- * LU-305 */
+ * LU-305
+ */
blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
init_waitqueue_entry(&waiter, current);
@@ -946,7 +946,8 @@ int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
LASSERT(cl_lock_nr_mutexed(env) == 0);
/* Returning ERESTARTSYS instead of EINTR so syscalls
- * can be restarted if signals are pending here */
+ * can be restarted if signals are pending here
+ */
result = -ERESTARTSYS;
if (likely(!OBD_FAIL_CHECK(OBD_FAIL_LOCK_STATE_WAIT_INTR))) {
schedule();
@@ -974,7 +975,7 @@ static void cl_lock_state_signal(const struct lu_env *env, struct cl_lock *lock,
LINVRNT(cl_lock_invariant(env, lock));
list_for_each_entry(slice, &lock->cll_layers, cls_linkage)
- if (slice->cls_ops->clo_state != NULL)
+ if (slice->cls_ops->clo_state)
slice->cls_ops->clo_state(env, slice, state);
wake_up_all(&lock->cll_wq);
}
@@ -1038,8 +1039,8 @@ static int cl_unuse_try_internal(const struct lu_env *env, struct cl_lock *lock)
result = -ENOSYS;
list_for_each_entry_reverse(slice, &lock->cll_layers,
- cls_linkage) {
- if (slice->cls_ops->clo_unuse != NULL) {
+ cls_linkage) {
+ if (slice->cls_ops->clo_unuse) {
result = slice->cls_ops->clo_unuse(env, slice);
if (result != 0)
break;
@@ -1072,7 +1073,7 @@ int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic)
result = -ENOSYS;
state = cl_lock_intransit(env, lock);
list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
- if (slice->cls_ops->clo_use != NULL) {
+ if (slice->cls_ops->clo_use) {
result = slice->cls_ops->clo_use(env, slice);
if (result != 0)
break;
@@ -1125,7 +1126,7 @@ static int cl_enqueue_kick(const struct lu_env *env,
result = -ENOSYS;
list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
- if (slice->cls_ops->clo_enqueue != NULL) {
+ if (slice->cls_ops->clo_enqueue) {
result = slice->cls_ops->clo_enqueue(env,
slice, io, flags);
if (result != 0)
@@ -1170,7 +1171,8 @@ int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock,
/* kick layers. */
result = cl_enqueue_kick(env, lock, io, flags);
/* For AGL case, the cl_lock::cll_state may
- * become CLS_HELD already. */
+ * become CLS_HELD already.
+ */
if (result == 0 && lock->cll_state == CLS_QUEUING)
cl_lock_state_set(env, lock, CLS_ENQUEUED);
break;
@@ -1215,7 +1217,7 @@ int cl_lock_enqueue_wait(const struct lu_env *env,
LASSERT(cl_lock_is_mutexed(lock));
LASSERT(lock->cll_state == CLS_QUEUING);
- LASSERT(lock->cll_conflict != NULL);
+ LASSERT(lock->cll_conflict);
conflict = lock->cll_conflict;
lock->cll_conflict = NULL;
@@ -1258,7 +1260,7 @@ static int cl_enqueue_locked(const struct lu_env *env, struct cl_lock *lock,
do {
result = cl_enqueue_try(env, lock, io, enqflags);
if (result == CLO_WAIT) {
- if (lock->cll_conflict != NULL)
+ if (lock->cll_conflict)
result = cl_lock_enqueue_wait(env, lock, 1);
else
result = cl_lock_state_wait(env, lock);
@@ -1300,7 +1302,8 @@ int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock)
}
/* Only if the lock is in CLS_HELD or CLS_ENQUEUED state, it can hold
- * underlying resources. */
+ * underlying resources.
+ */
if (!(lock->cll_state == CLS_HELD || lock->cll_state == CLS_ENQUEUED)) {
cl_lock_user_del(env, lock);
return 0;
@@ -1416,7 +1419,7 @@ int cl_wait_try(const struct lu_env *env, struct cl_lock *lock)
result = -ENOSYS;
list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
- if (slice->cls_ops->clo_wait != NULL) {
+ if (slice->cls_ops->clo_wait) {
result = slice->cls_ops->clo_wait(env, slice);
if (result != 0)
break;
@@ -1449,7 +1452,7 @@ int cl_wait(const struct lu_env *env, struct cl_lock *lock)
LINVRNT(cl_lock_invariant(env, lock));
LASSERTF(lock->cll_state == CLS_ENQUEUED || lock->cll_state == CLS_HELD,
- "Wrong state %d \n", lock->cll_state);
+ "Wrong state %d\n", lock->cll_state);
LASSERT(lock->cll_holds > 0);
do {
@@ -1487,7 +1490,7 @@ unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock)
pound = 0;
list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
- if (slice->cls_ops->clo_weigh != NULL) {
+ if (slice->cls_ops->clo_weigh) {
ounce = slice->cls_ops->clo_weigh(env, slice);
pound += ounce;
if (pound < ounce) /* over-weight^Wflow */
@@ -1523,7 +1526,7 @@ int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock,
LINVRNT(cl_lock_invariant(env, lock));
list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
- if (slice->cls_ops->clo_modify != NULL) {
+ if (slice->cls_ops->clo_modify) {
result = slice->cls_ops->clo_modify(env, slice, desc);
if (result != 0)
return result;
@@ -1584,7 +1587,7 @@ int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock,
result = cl_lock_enclosure(env, lock, closure);
if (result == 0) {
list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
- if (slice->cls_ops->clo_closure != NULL) {
+ if (slice->cls_ops->clo_closure) {
result = slice->cls_ops->clo_closure(env, slice,
closure);
if (result != 0)
@@ -1654,7 +1657,7 @@ void cl_lock_disclosure(const struct lu_env *env,
cl_lock_trace(D_DLMTRACE, env, "disclosure lock", closure->clc_origin);
list_for_each_entry_safe(scan, temp, &closure->clc_list,
- cll_inclosure){
+ cll_inclosure) {
list_del_init(&scan->cll_inclosure);
cl_lock_mutex_put(env, scan);
lu_ref_del(&scan->cll_reference, "closure", closure);
@@ -1777,13 +1780,15 @@ struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env,
lock = NULL;
need->cld_mode = CLM_READ; /* CLM_READ matches both READ & WRITE, but
- * not PHANTOM */
+ * not PHANTOM
+ */
need->cld_start = need->cld_end = index;
need->cld_enq_flags = 0;
spin_lock(&head->coh_lock_guard);
/* It is fine to match any group lock since there could be only one
- * with a uniq gid and it conflicts with all other lock modes too */
+ * with a uniq gid and it conflicts with all other lock modes too
+ */
list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
if (scan != except &&
(scan->cll_descr.cld_mode == CLM_GROUP ||
@@ -1798,7 +1803,8 @@ struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env,
(canceld || !(scan->cll_flags & CLF_CANCELLED)) &&
(pending || !(scan->cll_flags & CLF_CANCELPEND))) {
/* Don't increase cs_hit here since this
- * is just a helper function. */
+ * is just a helper function.
+ */
cl_lock_get_trust(scan);
lock = scan;
break;
@@ -1820,7 +1826,6 @@ static pgoff_t pgoff_at_lock(struct cl_page *page, struct cl_lock *lock)
dtype = lock->cll_descr.cld_obj->co_lu.lo_dev->ld_type;
slice = cl_page_at(page, dtype);
- LASSERT(slice != NULL);
return slice->cpl_page->cp_index;
}
@@ -1839,12 +1844,13 @@ static int check_and_discard_cb(const struct lu_env *env, struct cl_io *io,
/* refresh non-overlapped index */
tmp = cl_lock_at_pgoff(env, lock->cll_descr.cld_obj, index,
- lock, 1, 0);
- if (tmp != NULL) {
+ lock, 1, 0);
+ if (tmp) {
/* Cache the first-non-overlapped index so as to skip
* all pages within [index, clt_fn_index). This
* is safe because if tmp lock is canceled, it will
- * discard these pages. */
+ * discard these pages.
+ */
info->clt_fn_index = tmp->cll_descr.cld_end + 1;
if (tmp->cll_descr.cld_end == CL_PAGE_EOF)
info->clt_fn_index = CL_PAGE_EOF;
@@ -1950,7 +1956,7 @@ void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel)
* already destroyed (as otherwise they will be left unprotected).
*/
LASSERT(ergo(!cancel,
- head->coh_tree.rnode == NULL && head->coh_pages == 0));
+ !head->coh_tree.rnode && head->coh_pages == 0));
spin_lock(&head->coh_lock_guard);
while (!list_empty(&head->coh_locks)) {
@@ -2166,8 +2172,8 @@ EXPORT_SYMBOL(cl_lock_mode_name);
* Prints human readable representation of a lock description.
*/
void cl_lock_descr_print(const struct lu_env *env, void *cookie,
- lu_printer_t printer,
- const struct cl_lock_descr *descr)
+ lu_printer_t printer,
+ const struct cl_lock_descr *descr)
{
const struct lu_fid *fid;
@@ -2194,7 +2200,7 @@ void cl_lock_print(const struct lu_env *env, void *cookie,
(*printer)(env, cookie, " %s@%p: ",
slice->cls_obj->co_lu.lo_dev->ld_type->ldt_name,
slice);
- if (slice->cls_ops->clo_print != NULL)
+ if (slice->cls_ops->clo_print)
slice->cls_ops->clo_print(env, cookie, printer, slice);
(*printer)(env, cookie, "\n");
}