aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ocfs2/dlm/dlmdomain.c
diff options
context:
space:
mode:
authorSunil Mushran <sunil.mushran@oracle.com>2011-05-19 14:34:12 -0700
committerJoel Becker <jlbec@evilplan.org>2011-05-25 21:05:22 -0700
commit66effd3c681256874a81436493a933edb1701798 (patch)
treece0b9047ee7be10b6e7e8848f731e1d6a7579731 /fs/ocfs2/dlm/dlmdomain.c
parentocfs2/dlm: Add new dlm message DLM_BEGIN_EXIT_DOMAIN_MSG (diff)
downloadlinux-dev-66effd3c681256874a81436493a933edb1701798.tar.xz
linux-dev-66effd3c681256874a81436493a933edb1701798.zip
ocfs2/dlm: Do not migrate resource to a node that is leaving the domain
During dlm domain shutdown, o2dlm has to free all the lock resources. Ones that have no locks and references are freed. Ones that have locks and/or references are migrated to another node. The first task in migration is finding a target. Currently we scan the lock resource and find one node that either has a lock or a reference. This is not very efficient in a parallel umount case as we might end up migrating the lock resource to a node which itself may have to migrate it to a third node. The patch scans the dlm->exit_domain_map to ensure the target node is not leaving the domain. If no valid target node is found, o2dlm does not migrate the resource but instead waits for the unlock and deref messages that will allow it to free the resource. Signed-off-by: Sunil Mushran <sunil.mushran@oracle.com> Signed-off-by: Joel Becker <jlbec@evilplan.org>
Diffstat (limited to 'fs/ocfs2/dlm/dlmdomain.c')
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c10
1 files changed, 7 insertions, 3 deletions
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 3aff23feefdc..6ed6b95dcf93 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -451,14 +451,18 @@ redo_bucket:
dropped = dlm_empty_lockres(dlm, res);
spin_lock(&res->spinlock);
- __dlm_lockres_calc_usage(dlm, res);
- iter = res->hash_node.next;
+ if (dropped)
+ __dlm_lockres_calc_usage(dlm, res);
+ else
+ iter = res->hash_node.next;
spin_unlock(&res->spinlock);
dlm_lockres_put(res);
- if (dropped)
+ if (dropped) {
+ cond_resched_lock(&dlm->spinlock);
goto redo_bucket;
+ }
}
cond_resched_lock(&dlm->spinlock);
num += n;