if (error == -EINPROGRESS)
error = 0;
out_put:
- trace_dlm_lock_end(ls, lkb, name, namelen, mode, flags, error);
+ trace_dlm_lock_end(ls, lkb, name, namelen, mode, flags, error, true);
if (convert || error)
__put_lkb(ls, lkb);
goto out;
}
+ trace_dlm_lock_start(ls, lkb, name, namelen, mode, flags);
+
if (flags & DLM_LKF_VALBLK) {
ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS);
if (!ua->lksb.sb_lvbptr) {
kfree(ua);
__put_lkb(ls, lkb);
error = -ENOMEM;
- goto out;
+ goto out_trace_end;
}
}
#ifdef CONFIG_DLM_DEPRECATED_API
ua->lksb.sb_lvbptr = NULL;
kfree(ua);
__put_lkb(ls, lkb);
- goto out;
+ goto out_trace_end;
}
/* After ua is attached to lkb it will be freed by dlm_free_lkb().
fallthrough;
default:
__put_lkb(ls, lkb);
- goto out;
+ goto out_trace_end;
}
/* add this new lkb to the per-process list of locks */
hold_lkb(lkb);
list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
spin_unlock(&ua->proc->locks_spin);
+ out_trace_end:
+ trace_dlm_lock_end(ls, lkb, name, namelen, mode, flags, error, false);
out:
dlm_unlock_recovery(ls);
return error;
if (error)
goto out;
+ trace_dlm_lock_start(ls, lkb, NULL, 0, mode, flags);
+
/* user can change the params on its lock when it converts it, or
add an lvb that didn't exist before */
if (error == -EINPROGRESS || error == -EAGAIN || error == -EDEADLK)
error = 0;
out_put:
+ trace_dlm_lock_end(ls, lkb, NULL, 0, mode, flags, error, false);
dlm_put_lkb(lkb);
out:
dlm_unlock_recovery(ls);
if (error)
goto out;
+ trace_dlm_unlock_start(ls, lkb, flags);
+
ua = lkb->lkb_ua;
if (lvb_in && ua->lksb.sb_lvbptr)
list_move(&lkb->lkb_ownqueue, &ua->proc->unlocking);
spin_unlock(&ua->proc->locks_spin);
out_put:
+ trace_dlm_unlock_end(ls, lkb, flags, error);
dlm_put_lkb(lkb);
out:
dlm_unlock_recovery(ls);
if (error)
goto out;
+ trace_dlm_unlock_start(ls, lkb, flags);
+
ua = lkb->lkb_ua;
if (ua_tmp->castparam)
ua->castparam = ua_tmp->castparam;
if (error == -EBUSY)
error = 0;
out_put:
+ trace_dlm_unlock_end(ls, lkb, flags, error);
dlm_put_lkb(lkb);
out:
dlm_unlock_recovery(ls);
if (error)
goto out;
+ trace_dlm_unlock_start(ls, lkb, flags);
+
ua = lkb->lkb_ua;
error = set_unlock_args(flags, ua, &args);
if (error == -EBUSY)
error = 0;
out_put:
+ trace_dlm_unlock_end(ls, lkb, flags, error);
dlm_put_lkb(lkb);
out:
dlm_unlock_recovery(ls);
#include <linux/slab.h>
#include <linux/sched/signal.h>
+#include <trace/events/dlm.h>
+
#include "dlm_internal.h"
#include "lockspace.h"
#include "lock.h"
goto try_another;
}
- if (cb.flags & DLM_CB_CAST) {
+ if (cb.flags & DLM_CB_BAST) {
+ trace_dlm_bast(lkb->lkb_resource->res_ls, lkb, cb.mode);
+ } else if (cb.flags & DLM_CB_CAST) {
new_mode = cb.mode;
if (!cb.sb_status && lkb->lkb_lksb->sb_lvbptr &&
lkb->lkb_lksb->sb_status = cb.sb_status;
lkb->lkb_lksb->sb_flags = cb.sb_flags;
+ trace_dlm_ast(lkb->lkb_resource->res_ls, lkb);
}
rv = copy_result_to_user(lkb->lkb_ua,
TRACE_EVENT(dlm_lock_end,
TP_PROTO(struct dlm_ls *ls, struct dlm_lkb *lkb, void *name,
- unsigned int namelen, int mode, __u32 flags, int error),
+ unsigned int namelen, int mode, __u32 flags, int error,
+ bool kernel_lock),
- TP_ARGS(ls, lkb, name, namelen, mode, flags, error),
+ TP_ARGS(ls, lkb, name, namelen, mode, flags, error, kernel_lock),
TP_STRUCT__entry(
__field(__u32, ls_id)
__entry->lkb_id = lkb->lkb_id;
__entry->mode = mode;
__entry->flags = flags;
+ __entry->error = error;
r = lkb->lkb_resource;
if (r)
memcpy(__get_dynamic_array(res_name), name,
__get_dynamic_array_len(res_name));
- /* return value will be zeroed in those cases by dlm_lock()
- * we do it here again to not introduce more overhead if
- * trace isn't running and error reflects the return value.
- */
- if (error == -EAGAIN || error == -EDEADLK)
- __entry->error = 0;
- else
- __entry->error = error;
+ if (kernel_lock) {
+ /* return value will be zeroed in those cases by dlm_lock()
+ * we do it here again to not introduce more overhead if
+ * trace isn't running and error reflects the return value.
+ */
+ if (error == -EAGAIN || error == -EDEADLK)
+ __entry->error = 0;
+ }
),