aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorSunil Mushran <sunil.mushran@oracle.com>2007-03-22 17:08:32 -0700
committerMark Fasheh <mark.fasheh@oracle.com>2007-03-26 16:51:15 -0700
commit2f5bf1f2d061dea5146aa283685ce2b00cea2f3d (patch)
treec957d5e3567146470e1b439dd56ca87b74624424 /fs
parentocfs2_dlm: Fix lockres ref counting bug (diff)
downloadlinux-dev-2f5bf1f2d061dea5146aa283685ce2b00cea2f3d.tar.xz
linux-dev-2f5bf1f2d061dea5146aa283685ce2b00cea2f3d.zip
ocfs2_dlm: Check for migrateable lockres in dlm_empty_lockres()
In dlm_migrate_lockres(), we check upfront whether the lockres is a candidate for migration. This patch encapsulates that code in a separate function so that dlm_empty_lockres() can also use it during umount. This patch addresses the umount process spinning problem. Signed-off-by: Sunil Mushran <sunil.mushran@oracle.com> Signed-off-by: Mark Fasheh <mark.fasheh@oracle.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c2
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c99
2 files changed, 67 insertions, 34 deletions
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index dbbac184c261..c558442a0b44 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -657,6 +657,8 @@ void dlm_unregister_domain(struct dlm_ctxt *dlm)
dlm_kick_thread(dlm, NULL);
while (dlm_migrate_all_locks(dlm)) {
+ /* Give dlm_thread time to purge the lockres' */
+ msleep(500);
mlog(0, "%s: more migration to do\n", dlm->name);
}
dlm_mark_domain_leaving(dlm);
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 9229e04362f6..6edffca99d98 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -2424,6 +2424,57 @@ static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data)
dlm_lockres_put(res);
}
+/* Checks whether the lockres can be migrated. Returns 0 if yes, < 0
+ * if not. If 0, numlocks is set to the number of locks in the lockres.
+ */
+static int dlm_is_lockres_migrateable(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ int *numlocks)
+{
+ int ret;
+ int i;
+ int count = 0;
+ struct list_head *queue, *iter;
+ struct dlm_lock *lock;
+
+ assert_spin_locked(&res->spinlock);
+
+ ret = -EINVAL;
+ if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
+ mlog(0, "cannot migrate lockres with unknown owner!\n");
+ goto leave;
+ }
+
+ if (res->owner != dlm->node_num) {
+ mlog(0, "cannot migrate lockres this node doesn't own!\n");
+ goto leave;
+ }
+
+ ret = 0;
+ queue = &res->granted;
+ for (i = 0; i < 3; i++) {
+ list_for_each(iter, queue) {
+ lock = list_entry(iter, struct dlm_lock, list);
+ ++count;
+ if (lock->ml.node == dlm->node_num) {
+ mlog(0, "found a lock owned by this node still "
+ "on the %s queue! will not migrate this "
+ "lockres\n", (i == 0 ? "granted" :
+ (i == 1 ? "converting" :
+ "blocked")));
+ ret = -ENOTEMPTY;
+ goto leave;
+ }
+ }
+ queue++;
+ }
+
+ *numlocks = count;
+ mlog(0, "migrateable lockres having %d locks\n", *numlocks);
+
+leave:
+ return ret;
+}
/*
* DLM_MIGRATE_LOCKRES
@@ -2437,14 +2488,12 @@ static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
struct dlm_master_list_entry *mle = NULL;
struct dlm_master_list_entry *oldmle = NULL;
struct dlm_migratable_lockres *mres = NULL;
- int ret = -EINVAL;
+ int ret = 0;
const char *name;
unsigned int namelen;
int mle_added = 0;
- struct list_head *queue, *iter;
- int i;
- struct dlm_lock *lock;
- int empty = 1, wake = 0;
+ int numlocks;
+ int wake = 0;
if (!dlm_grab(dlm))
return -EINVAL;
@@ -2458,42 +2507,16 @@ static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
* ensure this lockres is a proper candidate for migration
*/
spin_lock(&res->spinlock);
- if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
- mlog(0, "cannot migrate lockres with unknown owner!\n");
- spin_unlock(&res->spinlock);
- goto leave;
- }
- if (res->owner != dlm->node_num) {
- mlog(0, "cannot migrate lockres this node doesn't own!\n");
+ ret = dlm_is_lockres_migrateable(dlm, res, &numlocks);
+ if (ret < 0) {
spin_unlock(&res->spinlock);
goto leave;
}
- mlog(0, "checking queues...\n");
- queue = &res->granted;
- for (i=0; i<3; i++) {
- list_for_each(iter, queue) {
- lock = list_entry (iter, struct dlm_lock, list);
- empty = 0;
- if (lock->ml.node == dlm->node_num) {
- mlog(0, "found a lock owned by this node "
- "still on the %s queue! will not "
- "migrate this lockres\n",
- i==0 ? "granted" :
- (i==1 ? "converting" : "blocked"));
- spin_unlock(&res->spinlock);
- ret = -ENOTEMPTY;
- goto leave;
- }
- }
- queue++;
- }
- mlog(0, "all locks on this lockres are nonlocal. continuing\n");
spin_unlock(&res->spinlock);
/* no work to do */
- if (empty) {
+ if (numlocks == 0) {
mlog(0, "no locks were found on this lockres! done!\n");
- ret = 0;
goto leave;
}
@@ -2729,6 +2752,7 @@ int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
{
int ret;
int lock_dropped = 0;
+ int numlocks;
spin_lock(&res->spinlock);
if (res->owner != dlm->node_num) {
@@ -2740,6 +2764,13 @@ int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
spin_unlock(&res->spinlock);
goto leave;
}
+
+ /* No need to migrate a lockres having no locks */
+ ret = dlm_is_lockres_migrateable(dlm, res, &numlocks);
+ if (ret >= 0 && numlocks == 0) {
+ spin_unlock(&res->spinlock);
+ goto leave;
+ }
spin_unlock(&res->spinlock);
/* Wheee! Migrate lockres here! Will sleep so drop spinlock. */