aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/lustre/lustre/ldlm/ldlm_lock.c')
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lock.c115
1 files changed, 61 insertions, 54 deletions
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
index ecd65a7a3dc9..bff94ea12d6f 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
@@ -185,7 +185,7 @@ void ldlm_lock_put(struct ldlm_lock *lock)
"final lock_put on destroyed lock, freeing it.");
res = lock->l_resource;
- LASSERT(lock->l_flags & LDLM_FL_DESTROYED);
+ LASSERT(ldlm_is_destroyed(lock));
LASSERT(list_empty(&lock->l_res_link));
LASSERT(list_empty(&lock->l_pending_chain));
@@ -229,15 +229,25 @@ int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock)
/**
* Removes LDLM lock \a lock from LRU. Obtains the LRU lock first.
+ *
+ * If \a last_use is non-zero, it will remove the lock from LRU only if
+ * it matches lock's l_last_used.
+ *
+ * \retval 0 if \a last_use is set, the lock is not in LRU list or \a last_use
+ * doesn't match lock's l_last_used;
+ * otherwise, the lock hasn't been in the LRU list.
+ * \retval 1 the lock was in LRU list and removed.
*/
-int ldlm_lock_remove_from_lru(struct ldlm_lock *lock)
+int ldlm_lock_remove_from_lru_check(struct ldlm_lock *lock, time_t last_use)
{
struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
- int rc;
+ int rc = 0;
spin_lock(&ns->ns_lock);
- rc = ldlm_lock_remove_from_lru_nolock(lock);
+ if (last_use == 0 || last_use == lock->l_last_used)
+ rc = ldlm_lock_remove_from_lru_nolock(lock);
spin_unlock(&ns->ns_lock);
+
return rc;
}
@@ -252,8 +262,7 @@ static void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock)
LASSERT(list_empty(&lock->l_lru));
LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
list_add_tail(&lock->l_lru, &ns->ns_unused_list);
- if (lock->l_flags & LDLM_FL_SKIPPED)
- lock->l_flags &= ~LDLM_FL_SKIPPED;
+ ldlm_clear_skipped(lock);
LASSERT(ns->ns_nr_unused >= 0);
ns->ns_nr_unused++;
}
@@ -318,11 +327,11 @@ static int ldlm_lock_destroy_internal(struct ldlm_lock *lock)
LBUG();
}
- if (lock->l_flags & LDLM_FL_DESTROYED) {
+ if (ldlm_is_destroyed(lock)) {
LASSERT(list_empty(&lock->l_lru));
return 0;
}
- lock->l_flags |= LDLM_FL_DESTROYED;
+ ldlm_set_destroyed(lock);
if (lock->l_export && lock->l_export->exp_lock_hash) {
/* NB: it's safe to call cfs_hash_del() even lock isn't
@@ -544,7 +553,7 @@ struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
/* It's unlikely but possible that someone marked the lock as
* destroyed after we did handle2object on it
*/
- if (flags == 0 && ((lock->l_flags & LDLM_FL_DESTROYED) == 0)) {
+ if (flags == 0 && !ldlm_is_destroyed(lock)) {
lu_ref_add(&lock->l_reference, "handle", current);
return lock;
}
@@ -554,21 +563,22 @@ struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
LASSERT(lock->l_resource);
lu_ref_add_atomic(&lock->l_reference, "handle", current);
- if (unlikely(lock->l_flags & LDLM_FL_DESTROYED)) {
+ if (unlikely(ldlm_is_destroyed(lock))) {
unlock_res_and_lock(lock);
CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock);
LDLM_LOCK_PUT(lock);
return NULL;
}
- if (flags && (lock->l_flags & flags)) {
- unlock_res_and_lock(lock);
- LDLM_LOCK_PUT(lock);
- return NULL;
- }
+ if (flags) {
+ if (lock->l_flags & flags) {
+ unlock_res_and_lock(lock);
+ LDLM_LOCK_PUT(lock);
+ return NULL;
+ }
- if (flags)
lock->l_flags |= flags;
+ }
unlock_res_and_lock(lock);
return lock;
@@ -599,14 +609,14 @@ EXPORT_SYMBOL(ldlm_lock2desc);
static void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
struct list_head *work_list)
{
- if ((lock->l_flags & LDLM_FL_AST_SENT) == 0) {
+ if (!ldlm_is_ast_sent(lock)) {
LDLM_DEBUG(lock, "lock incompatible; sending blocking AST.");
- lock->l_flags |= LDLM_FL_AST_SENT;
+ ldlm_set_ast_sent(lock);
/* If the enqueuing client said so, tell the AST recipient to
* discard dirty data, rather than writing back.
*/
- if (new->l_flags & LDLM_FL_AST_DISCARD_DATA)
- lock->l_flags |= LDLM_FL_DISCARD_DATA;
+ if (ldlm_is_ast_discard_data(new))
+ ldlm_set_discard_data(lock);
LASSERT(list_empty(&lock->l_bl_ast));
list_add(&lock->l_bl_ast, work_list);
LDLM_LOCK_GET(lock);
@@ -621,8 +631,8 @@ static void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
static void ldlm_add_cp_work_item(struct ldlm_lock *lock,
struct list_head *work_list)
{
- if ((lock->l_flags & LDLM_FL_CP_REQD) == 0) {
- lock->l_flags |= LDLM_FL_CP_REQD;
+ if (!ldlm_is_cp_reqd(lock)) {
+ ldlm_set_cp_reqd(lock);
LDLM_DEBUG(lock, "lock granted; sending completion AST.");
LASSERT(list_empty(&lock->l_cp_ast));
list_add(&lock->l_cp_ast, work_list);
@@ -657,7 +667,7 @@ void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode)
struct ldlm_lock *lock;
lock = ldlm_handle2lock(lockh);
- LASSERT(lock);
+ LASSERTF(lock, "Non-existing lock: %llx\n", lockh->cookie);
ldlm_lock_addref_internal(lock, mode);
LDLM_LOCK_PUT(lock);
}
@@ -704,7 +714,7 @@ int ldlm_lock_addref_try(struct lustre_handle *lockh, __u32 mode)
if (lock) {
lock_res_and_lock(lock);
if (lock->l_readers != 0 || lock->l_writers != 0 ||
- !(lock->l_flags & LDLM_FL_CBPENDING)) {
+ !ldlm_is_cbpending(lock)) {
ldlm_lock_addref_internal_nolock(lock, mode);
result = 0;
}
@@ -770,17 +780,17 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
ldlm_lock_decref_internal_nolock(lock, mode);
- if (lock->l_flags & LDLM_FL_LOCAL &&
+ if (ldlm_is_local(lock) &&
!lock->l_readers && !lock->l_writers) {
/* If this is a local lock on a server namespace and this was
* the last reference, cancel the lock.
*/
CDEBUG(D_INFO, "forcing cancel of local lock\n");
- lock->l_flags |= LDLM_FL_CBPENDING;
+ ldlm_set_cbpending(lock);
}
if (!lock->l_readers && !lock->l_writers &&
- (lock->l_flags & LDLM_FL_CBPENDING)) {
+ ldlm_is_cbpending(lock)) {
/* If we received a blocked AST and this was the last reference,
* run the callback.
*/
@@ -791,16 +801,14 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
ldlm_lock_remove_from_lru(lock);
unlock_res_and_lock(lock);
- if (lock->l_flags & LDLM_FL_FAIL_LOC)
+ if (ldlm_is_fail_loc(lock))
OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
- if ((lock->l_flags & LDLM_FL_ATOMIC_CB) ||
+ if (ldlm_is_atomic_cb(lock) ||
ldlm_bl_to_thread_lock(ns, NULL, lock) != 0)
ldlm_handle_bl_callback(ns, NULL, lock);
} else if (!lock->l_readers && !lock->l_writers &&
- !(lock->l_flags & LDLM_FL_NO_LRU) &&
- !(lock->l_flags & LDLM_FL_BL_AST)) {
-
+ !ldlm_is_no_lru(lock) && !ldlm_is_bl_ast(lock)) {
LDLM_DEBUG(lock, "add lock into lru list");
/* If this is a client-side namespace and this was the last
@@ -809,7 +817,7 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
ldlm_lock_add_to_lru(lock);
unlock_res_and_lock(lock);
- if (lock->l_flags & LDLM_FL_FAIL_LOC)
+ if (ldlm_is_fail_loc(lock))
OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
/* Call ldlm_cancel_lru() only if EARLY_CANCEL and LRU RESIZE
@@ -853,7 +861,7 @@ void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode)
LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
lock_res_and_lock(lock);
- lock->l_flags |= LDLM_FL_CBPENDING;
+ ldlm_set_cbpending(lock);
unlock_res_and_lock(lock);
ldlm_lock_decref_internal(lock, mode);
LDLM_LOCK_PUT(lock);
@@ -971,7 +979,7 @@ static void ldlm_granted_list_add_lock(struct ldlm_lock *lock,
ldlm_resource_dump(D_INFO, res);
LDLM_DEBUG(lock, "About to add lock:");
- if (lock->l_flags & LDLM_FL_DESTROYED) {
+ if (ldlm_is_destroyed(lock)) {
CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
return;
}
@@ -1073,10 +1081,9 @@ static struct ldlm_lock *search_queue(struct list_head *queue,
* whose parents already hold a lock so forward progress
* can still happen.
*/
- if (lock->l_flags & LDLM_FL_CBPENDING &&
- !(flags & LDLM_FL_CBPENDING))
+ if (ldlm_is_cbpending(lock) && !(flags & LDLM_FL_CBPENDING))
continue;
- if (!unref && lock->l_flags & LDLM_FL_CBPENDING &&
+ if (!unref && ldlm_is_cbpending(lock) &&
lock->l_readers == 0 && lock->l_writers == 0)
continue;
@@ -1092,6 +1099,7 @@ static struct ldlm_lock *search_queue(struct list_head *queue,
if (unlikely(match == LCK_GROUP) &&
lock->l_resource->lr_type == LDLM_EXTENT &&
+ policy->l_extent.gid != LDLM_GID_ANY &&
lock->l_policy_data.l_extent.gid != policy->l_extent.gid)
continue;
@@ -1104,11 +1112,10 @@ static struct ldlm_lock *search_queue(struct list_head *queue,
policy->l_inodebits.bits))
continue;
- if (!unref && (lock->l_flags & LDLM_FL_GONE_MASK))
+ if (!unref && LDLM_HAVE_MASK(lock, GONE))
continue;
- if ((flags & LDLM_FL_LOCAL_ONLY) &&
- !(lock->l_flags & LDLM_FL_LOCAL))
+ if ((flags & LDLM_FL_LOCAL_ONLY) && !ldlm_is_local(lock))
continue;
if (flags & LDLM_FL_TEST_LOCK) {
@@ -1142,7 +1149,7 @@ EXPORT_SYMBOL(ldlm_lock_fail_match_locked);
*/
void ldlm_lock_allow_match_locked(struct ldlm_lock *lock)
{
- lock->l_flags |= LDLM_FL_LVB_READY;
+ ldlm_set_lvb_ready(lock);
wake_up_all(&lock->l_waitq);
}
EXPORT_SYMBOL(ldlm_lock_allow_match_locked);
@@ -1243,8 +1250,7 @@ enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
if (lock) {
ldlm_lock2handle(lock, lockh);
- if ((flags & LDLM_FL_LVB_READY) &&
- (!(lock->l_flags & LDLM_FL_LVB_READY))) {
+ if ((flags & LDLM_FL_LVB_READY) && !ldlm_is_lvb_ready(lock)) {
__u64 wait_flags = LDLM_FL_LVB_READY |
LDLM_FL_DESTROYED | LDLM_FL_FAIL_NOTIFIED;
struct l_wait_info lwi;
@@ -1271,7 +1277,7 @@ enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
l_wait_event(lock->l_waitq,
lock->l_flags & wait_flags,
&lwi);
- if (!(lock->l_flags & LDLM_FL_LVB_READY)) {
+ if (!ldlm_is_lvb_ready(lock)) {
if (flags & LDLM_FL_TEST_LOCK)
LDLM_LOCK_RELEASE(lock);
else
@@ -1325,10 +1331,10 @@ enum ldlm_mode ldlm_revalidate_lock_handle(struct lustre_handle *lockh,
lock = ldlm_handle2lock(lockh);
if (lock) {
lock_res_and_lock(lock);
- if (lock->l_flags & LDLM_FL_GONE_MASK)
+ if (LDLM_HAVE_MASK(lock, GONE))
goto out;
- if (lock->l_flags & LDLM_FL_CBPENDING &&
+ if (ldlm_is_cbpending(lock) &&
lock->l_readers == 0 && lock->l_writers == 0)
goto out;
@@ -1542,7 +1548,8 @@ enum ldlm_error ldlm_lock_enqueue(struct ldlm_namespace *ns,
/* Some flags from the enqueue want to make it into the AST, via the
* lock's l_flags.
*/
- lock->l_flags |= *flags & LDLM_FL_AST_DISCARD_DATA;
+ if (*flags & LDLM_FL_AST_DISCARD_DATA)
+ ldlm_set_ast_discard_data(lock);
/*
* This distinction between local lock trees is very important; a client
@@ -1581,7 +1588,7 @@ ldlm_work_bl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
lock_res_and_lock(lock);
list_del_init(&lock->l_bl_ast);
- LASSERT(lock->l_flags & LDLM_FL_AST_SENT);
+ LASSERT(ldlm_is_ast_sent(lock));
LASSERT(lock->l_bl_ast_run == 0);
LASSERT(lock->l_blocking_lock);
lock->l_bl_ast_run++;
@@ -1628,12 +1635,12 @@ ldlm_work_cp_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
/* nobody should touch l_cp_ast */
lock_res_and_lock(lock);
list_del_init(&lock->l_cp_ast);
- LASSERT(lock->l_flags & LDLM_FL_CP_REQD);
+ LASSERT(ldlm_is_cp_reqd(lock));
/* save l_completion_ast since it can be changed by
* mds_intent_policy(), see bug 14225
*/
completion_callback = lock->l_completion_ast;
- lock->l_flags &= ~LDLM_FL_CP_REQD;
+ ldlm_clear_cp_reqd(lock);
unlock_res_and_lock(lock);
if (completion_callback)
@@ -1778,8 +1785,8 @@ out:
void ldlm_cancel_callback(struct ldlm_lock *lock)
{
check_res_locked(lock->l_resource);
- if (!(lock->l_flags & LDLM_FL_CANCEL)) {
- lock->l_flags |= LDLM_FL_CANCEL;
+ if (!ldlm_is_cancel(lock)) {
+ ldlm_set_cancel(lock);
if (lock->l_blocking_ast) {
unlock_res_and_lock(lock);
lock->l_blocking_ast(lock, NULL, lock->l_ast_data,
@@ -1789,7 +1796,7 @@ void ldlm_cancel_callback(struct ldlm_lock *lock)
LDLM_DEBUG(lock, "no blocking ast");
}
}
- lock->l_flags |= LDLM_FL_BL_DONE;
+ ldlm_set_bl_done(lock);
}
/**