Merge tag 'locking-core-2020-10-12' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 12 Oct 2020 20:06:20 +0000 (13:06 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 12 Oct 2020 20:06:20 +0000 (13:06 -0700)
Pull locking updates from Ingo Molnar:
 "These are the locking updates for v5.10:

   - Add deadlock detection for recursive read-locks.

     The rationale is outlined in commit 224ec489d3cd ("lockdep/
     Documention: Recursive read lock detection reasoning")

     The main deadlock pattern we want to detect is:

           TASK A:                 TASK B:

           read_lock(X);
                                   write_lock(X);
           read_lock_2(X);

   - Add "latch sequence counters" (seqcount_latch_t):

     A sequence counter variant where the counter even/odd value is used
     to switch between two copies of protected data. This allows the
     read path, typically NMIs, to safely interrupt the write side
     critical section.

     We utilize this new variant for sched-clock, and to make x86 TSC
     handling safer.

   - Other seqlock cleanups, fixes and enhancements

   - KCSAN updates

   - LKMM updates

   - Misc updates, cleanups and fixes"

* tag 'locking-core-2020-10-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (67 commits)
  lockdep: Revert "lockdep: Use raw_cpu_*() for per-cpu variables"
  lockdep: Fix lockdep recursion
  lockdep: Fix usage_traceoverflow
  locking/atomics: Check atomic-arch-fallback.h too
  locking/seqlock: Tweak DEFINE_SEQLOCK() kernel doc
  lockdep: Optimize the memory usage of circular queue
  seqlock: Unbreak lockdep
  seqlock: PREEMPT_RT: Do not starve seqlock_t writers
  seqlock: seqcount_LOCKNAME_t: Introduce PREEMPT_RT support
  seqlock: seqcount_t: Implement all read APIs as statement expressions
  seqlock: Use unique prefix for seqcount_t property accessors
  seqlock: seqcount_LOCKNAME_t: Standardize naming convention
  seqlock: seqcount latch APIs: Only allow seqcount_latch_t
  rbtree_latch: Use seqcount_latch_t
  x86/tsc: Use seqcount_latch_t
  timekeeping: Use seqcount_latch_t
  time/sched_clock: Use seqcount_latch_t
  seqlock: Introduce seqcount_latch_t
  mm/swap: Do not abuse the seqcount_t latching API
  time/sched_clock: Use raw_read_seqcount_latch() during suspend
  ...

1  2 
kernel/time/timekeeping.c
tools/objtool/check.c

@@@ -85,33 -80,21 +85,33 @@@ static struct clocksource dummy_clock 
        .read = dummy_clock_read,
  };
  
 +/*
 + * Boot time initialization which allows local_clock() to be utilized
 + * during early boot when clocksources are not available. local_clock()
 + * returns nanoseconds already so no conversion is required, hence mult=1
 + * and shift=0. When the first proper clocksource is installed then
 + * the fast time keepers are updated with the correct values.
 + */
 +#define FAST_TK_INIT                                          \
 +      {                                                       \
 +              .clock          = &dummy_clock,                 \
 +              .mask           = CLOCKSOURCE_MASK(64),         \
 +              .mult           = 1,                            \
 +              .shift          = 0,                            \
 +      }
 +
  static struct tk_fast tk_fast_mono ____cacheline_aligned = {
-       .seq     = SEQCNT_RAW_SPINLOCK_ZERO(tk_fast_mono.seq, &timekeeper_lock),
+       .seq     = SEQCNT_LATCH_ZERO(tk_fast_mono.seq),
 -      .base[0] = { .clock = &dummy_clock, },
 -      .base[1] = { .clock = &dummy_clock, },
 +      .base[0] = FAST_TK_INIT,
 +      .base[1] = FAST_TK_INIT,
  };
  
  static struct tk_fast tk_fast_raw  ____cacheline_aligned = {
-       .seq     = SEQCNT_RAW_SPINLOCK_ZERO(tk_fast_raw.seq, &timekeeper_lock),
+       .seq     = SEQCNT_LATCH_ZERO(tk_fast_raw.seq),
 -      .base[0] = { .clock = &dummy_clock, },
 -      .base[1] = { .clock = &dummy_clock, },
 +      .base[0] = FAST_TK_INIT,
 +      .base[1] = FAST_TK_INIT,
  };
  
 -/* flag for if timekeeping is suspended */
 -int __read_mostly timekeeping_suspended;
 -
  static inline void tk_normalize_xtime(struct timekeeper *tk)
  {
        while (tk->tkr_mono.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_mono.shift)) {
@@@ -542,17 -526,16 +542,17 @@@ static __always_inline u64 __ktime_get_
        do {
                seq = raw_read_seqcount_latch(&tkf->seq);
                tkr = tkf->base + (seq & 0x01);
 -              now = ktime_to_ns(tkr->base_real);
 +              basem = ktime_to_ns(tkr->base);
 +              baser = ktime_to_ns(tkr->base_real);
  
 -              now += timekeeping_delta_to_ns(tkr,
 -                              clocksource_delta(
 -                                      tk_clock_read(tkr),
 -                                      tkr->cycle_last,
 -                                      tkr->mask));
 +              delta = timekeeping_delta_to_ns(tkr,
 +                              clocksource_delta(tk_clock_read(tkr),
 +                              tkr->cycle_last, tkr->mask));
-       } while (read_seqcount_retry(&tkf->seq, seq));
+       } while (read_seqcount_latch_retry(&tkf->seq, seq));
  
 -      return now;
 +      if (mono)
 +              *mono = basem + delta;
 +      return baser + delta;
  }
  
  /**
Simple merge