{
unsigned long flags;
+ trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
+
if (unlikely(current->lockdep_recursion)) {
/* XXX allow trylock from NMI ?!? */
if (lockdep_nmi() && !trylock) {
check_flags(flags);
current->lockdep_recursion++;
- trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
__lock_acquire(lock, subclass, trylock, read, check,
irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
lockdep_recursion_finish();
{
unsigned long flags;
+ trace_lock_release(lock, ip);
+
if (unlikely(current->lockdep_recursion))
return;
raw_local_irq_save(flags);
check_flags(flags);
+
current->lockdep_recursion++;
- trace_lock_release(lock, ip);
if (__lock_release(lock, ip))
check_chain_key(current);
lockdep_recursion_finish();
hlock->holdtime_stamp = now;
}
- trace_lock_acquired(lock, ip);
-
stats = get_lock_stats(hlock_class(hlock));
if (waittime) {
if (hlock->read)
{
unsigned long flags;
+ trace_lock_acquired(lock, ip);
+
if (unlikely(!lock_stat || !debug_locks))
return;
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion++;
- trace_lock_contended(lock, ip);
__lock_contended(lock, ip);
lockdep_recursion_finish();
raw_local_irq_restore(flags);
{
unsigned long flags;
+ trace_lock_contended(lock, ip);
+
if (unlikely(!lock_stat || !debug_locks))
return;