aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexander Aring <aahringo@redhat.com>2022-08-15 15:43:24 -0400
committerDavid Teigland <teigland@redhat.com>2022-08-23 14:54:04 -0500
commit7a3de7324c2b1299a4f595bb6aa503c878ad7d75 (patch)
treebcdab8f6ae49a0352c1220a635a8654cb0173298
parentfs: dlm: change ls_clear_proc_locks to spinlock (diff)
downloadlinux-dev-7a3de7324c2b1299a4f595bb6aa503c878ad7d75.tar.xz
linux-dev-7a3de7324c2b1299a4f595bb6aa503c878ad7d75.zip
fs: dlm: trace user space callbacks
This patch adds trace callbacks for user locks. Unfortenately user locks are handled in a different way than kernel locks in some cases. User locks never call the dlm_lock()/dlm_unlock() kernel API and use the next step internal API of dlm. Adding those traces from user API callers should make it possible for dlm trace system to see lock handling for user locks as well. Signed-off-by: Alexander Aring <aahringo@redhat.com> Signed-off-by: David Teigland <teigland@redhat.com>
-rw-r--r--fs/dlm/lock.c24
-rw-r--r--fs/dlm/user.c7
-rw-r--r--include/trace/events/dlm.h22
3 files changed, 38 insertions, 15 deletions
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index 65a7a0631ec8..cef25f8ac82e 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -3466,7 +3466,7 @@ int dlm_lock(dlm_lockspace_t *lockspace,
if (error == -EINPROGRESS)
error = 0;
out_put:
- trace_dlm_lock_end(ls, lkb, name, namelen, mode, flags, error);
+ trace_dlm_lock_end(ls, lkb, name, namelen, mode, flags, error, true);
if (convert || error)
__put_lkb(ls, lkb);
@@ -5842,13 +5842,15 @@ int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
goto out;
}
+ trace_dlm_lock_start(ls, lkb, name, namelen, mode, flags);
+
if (flags & DLM_LKF_VALBLK) {
ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS);
if (!ua->lksb.sb_lvbptr) {
kfree(ua);
__put_lkb(ls, lkb);
error = -ENOMEM;
- goto out;
+ goto out_trace_end;
}
}
#ifdef CONFIG_DLM_DEPRECATED_API
@@ -5863,7 +5865,7 @@ int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
ua->lksb.sb_lvbptr = NULL;
kfree(ua);
__put_lkb(ls, lkb);
- goto out;
+ goto out_trace_end;
}
/* After ua is attached to lkb it will be freed by dlm_free_lkb().
@@ -5883,7 +5885,7 @@ int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
fallthrough;
default:
__put_lkb(ls, lkb);
- goto out;
+ goto out_trace_end;
}
/* add this new lkb to the per-process list of locks */
@@ -5891,6 +5893,8 @@ int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
hold_lkb(lkb);
list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
spin_unlock(&ua->proc->locks_spin);
+ out_trace_end:
+ trace_dlm_lock_end(ls, lkb, name, namelen, mode, flags, error, false);
out:
dlm_unlock_recovery(ls);
return error;
@@ -5916,6 +5920,8 @@ int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
if (error)
goto out;
+ trace_dlm_lock_start(ls, lkb, NULL, 0, mode, flags);
+
/* user can change the params on its lock when it converts it, or
add an lvb that didn't exist before */
@@ -5953,6 +5959,7 @@ int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
if (error == -EINPROGRESS || error == -EAGAIN || error == -EDEADLK)
error = 0;
out_put:
+ trace_dlm_lock_end(ls, lkb, NULL, 0, mode, flags, error, false);
dlm_put_lkb(lkb);
out:
dlm_unlock_recovery(ls);
@@ -6045,6 +6052,8 @@ int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
if (error)
goto out;
+ trace_dlm_unlock_start(ls, lkb, flags);
+
ua = lkb->lkb_ua;
if (lvb_in && ua->lksb.sb_lvbptr)
@@ -6073,6 +6082,7 @@ int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
list_move(&lkb->lkb_ownqueue, &ua->proc->unlocking);
spin_unlock(&ua->proc->locks_spin);
out_put:
+ trace_dlm_unlock_end(ls, lkb, flags, error);
dlm_put_lkb(lkb);
out:
dlm_unlock_recovery(ls);
@@ -6094,6 +6104,8 @@ int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
if (error)
goto out;
+ trace_dlm_unlock_start(ls, lkb, flags);
+
ua = lkb->lkb_ua;
if (ua_tmp->castparam)
ua->castparam = ua_tmp->castparam;
@@ -6111,6 +6123,7 @@ int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
if (error == -EBUSY)
error = 0;
out_put:
+ trace_dlm_unlock_end(ls, lkb, flags, error);
dlm_put_lkb(lkb);
out:
dlm_unlock_recovery(ls);
@@ -6132,6 +6145,8 @@ int dlm_user_deadlock(struct dlm_ls *ls, uint32_t flags, uint32_t lkid)
if (error)
goto out;
+ trace_dlm_unlock_start(ls, lkb, flags);
+
ua = lkb->lkb_ua;
error = set_unlock_args(flags, ua, &args);
@@ -6160,6 +6175,7 @@ int dlm_user_deadlock(struct dlm_ls *ls, uint32_t flags, uint32_t lkid)
if (error == -EBUSY)
error = 0;
out_put:
+ trace_dlm_unlock_end(ls, lkb, flags, error);
dlm_put_lkb(lkb);
out:
dlm_unlock_recovery(ls);
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index df6215c73239..ca27f276a3f5 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -16,6 +16,8 @@
#include <linux/slab.h>
#include <linux/sched/signal.h>
+#include <trace/events/dlm.h>
+
#include "dlm_internal.h"
#include "lockspace.h"
#include "lock.h"
@@ -882,7 +884,9 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
goto try_another;
}
- if (cb.flags & DLM_CB_CAST) {
+ if (cb.flags & DLM_CB_BAST) {
+ trace_dlm_bast(lkb->lkb_resource->res_ls, lkb, cb.mode);
+ } else if (cb.flags & DLM_CB_CAST) {
new_mode = cb.mode;
if (!cb.sb_status && lkb->lkb_lksb->sb_lvbptr &&
@@ -891,6 +895,7 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
lkb->lkb_lksb->sb_status = cb.sb_status;
lkb->lkb_lksb->sb_flags = cb.sb_flags;
+ trace_dlm_ast(lkb->lkb_resource->res_ls, lkb);
}
rv = copy_result_to_user(lkb->lkb_ua,
diff --git a/include/trace/events/dlm.h b/include/trace/events/dlm.h
index bad21222130e..18575206295f 100644
--- a/include/trace/events/dlm.h
+++ b/include/trace/events/dlm.h
@@ -92,9 +92,10 @@ TRACE_EVENT(dlm_lock_start,
TRACE_EVENT(dlm_lock_end,
TP_PROTO(struct dlm_ls *ls, struct dlm_lkb *lkb, void *name,
- unsigned int namelen, int mode, __u32 flags, int error),
+ unsigned int namelen, int mode, __u32 flags, int error,
+ bool kernel_lock),
- TP_ARGS(ls, lkb, name, namelen, mode, flags, error),
+ TP_ARGS(ls, lkb, name, namelen, mode, flags, error, kernel_lock),
TP_STRUCT__entry(
__field(__u32, ls_id)
@@ -113,6 +114,7 @@ TRACE_EVENT(dlm_lock_end,
__entry->lkb_id = lkb->lkb_id;
__entry->mode = mode;
__entry->flags = flags;
+ __entry->error = error;
r = lkb->lkb_resource;
if (r)
@@ -122,14 +124,14 @@ TRACE_EVENT(dlm_lock_end,
memcpy(__get_dynamic_array(res_name), name,
__get_dynamic_array_len(res_name));
- /* return value will be zeroed in those cases by dlm_lock()
- * we do it here again to not introduce more overhead if
- * trace isn't running and error reflects the return value.
- */
- if (error == -EAGAIN || error == -EDEADLK)
- __entry->error = 0;
- else
- __entry->error = error;
+ if (kernel_lock) {
+ /* return value will be zeroed in those cases by dlm_lock()
+ * we do it here again to not introduce more overhead if
+ * trace isn't running and error reflects the return value.
+ */
+ if (error == -EAGAIN || error == -EDEADLK)
+ __entry->error = 0;
+ }
),