aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
diff options
context:
space:
mode:
authorJinshan Xiong <jinshan.xiong@intel.com>2016-03-30 19:48:40 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2016-03-30 21:38:13 -0700
commit06563b5606da7635b1fd5d121e7bd6d221e23db4 (patch)
tree204b4a98c04f9301e1606f9bedc93fb1bdc04463 /drivers/staging/lustre/lustre/ldlm/ldlm_request.c
parentstaging/lustre/clio: generalize cl_sync_io (diff)
downloadlinux-dev-06563b5606da7635b1fd5d121e7bd6d221e23db4.tar.xz
linux-dev-06563b5606da7635b1fd5d121e7bd6d221e23db4.zip
staging/lustre/clio: cl_lock simplification
In this patch, the cl_lock cache is eliminated. cl_lock is turned into a cacheless data container for the requirements of locks to complete the IO. cl_lock is created before I/O starts and destroyed when the I/O is complete. cl_lock depends on LDLM lock to fulfill lock semantics. LDLM lock is attached to cl_lock at OSC layer. LDLM lock is still cacheable. Two major methods are supported for cl_lock: clo_enqueue and clo_cancel. A cl_lock is enqueued by cl_lock_request(), which will call clo_enqueue() methods for each layer to enqueue the lock. At the LOV layer, if a cl_lock consists of multiple sub cl_locks, each sub locks will be enqueued correspondingly. At OSC layer, the lock enqueue request will tend to reuse cached LDLM lock; otherwise a new LDLM lock will have to be requested from OST side. cl_lock_cancel() must be called to release a cl_lock after use. clo_cancel() method will be called for each layer to release the resource held by this lock. At OSC layer, the reference count of LDLM lock, which is held at clo_enqueue time, is released. LDLM lock can only be canceled if there is no cl_lock using it. Signed-off-by: Bobi Jam <bobijam.xu@intel.com> Signed-off-by: Jinshan Xiong <jinshan.xiong@intel.com> Reviewed-on: http://review.whamcloud.com/10858 Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-3259 Reviewed-by: John L. Hammond <john.hammond@intel.com> Signed-off-by: Oleg Drokin <green@linuxhacker.ru> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/staging/lustre/lustre/ldlm/ldlm_request.c')
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_request.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
index d5968e01edd8..42925ac3331c 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
@@ -347,7 +347,6 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
struct ldlm_lock *lock;
struct ldlm_reply *reply;
int cleanup_phase = 1;
- int size = 0;
lock = ldlm_handle2lock(lockh);
/* ldlm_cli_enqueue is holding a reference on this lock. */
@@ -375,8 +374,8 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
goto cleanup;
}
- if (lvb_len != 0) {
- LASSERT(lvb);
+ if (lvb_len > 0) {
+ int size = 0;
size = req_capsule_get_size(&req->rq_pill, &RMF_DLM_LVB,
RCL_SERVER);
@@ -390,12 +389,13 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
rc = -EINVAL;
goto cleanup;
}
+ lvb_len = size;
}
if (rc == ELDLM_LOCK_ABORTED) {
- if (lvb_len != 0)
+ if (lvb_len > 0 && lvb)
rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_SERVER,
- lvb, size);
+ lvb, lvb_len);
if (rc == 0)
rc = ELDLM_LOCK_ABORTED;
goto cleanup;
@@ -489,7 +489,7 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
/* If the lock has already been granted by a completion AST, don't
* clobber the LVB with an older one.
*/
- if (lvb_len != 0) {
+ if (lvb_len > 0) {
/* We must lock or a racing completion might update lvb without
* letting us know and we'll clobber the correct value.
* Cannot unlock after the check either, as that still leaves
@@ -498,7 +498,7 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
lock_res_and_lock(lock);
if (lock->l_req_mode != lock->l_granted_mode)
rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_SERVER,
- lock->l_lvb_data, size);
+ lock->l_lvb_data, lvb_len);
unlock_res_and_lock(lock);
if (rc < 0) {
cleanup_phase = 1;
@@ -518,7 +518,7 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
}
}
- if (lvb_len && lvb) {
+ if (lvb_len > 0 && lvb) {
/* Copy the LVB here, and not earlier, because the completion
* AST (if any) can override what we got in the reply
*/