Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 23 Feb 2013 03:25:09 +0000 (19:25 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 23 Feb 2013 03:25:09 +0000 (19:25 -0800)
Pull core locking changes from Ingo Molnar:
 "The biggest change is the rwsem lock-steal improvements, both to the
  assembly optimized and the spinlock based variants.

  The other notable change is the clean up of the seqlock implementation
  to be based on the seqcount infrastructure.

  The rest is assorted smaller debuggability, cleanup and continued -rt
  locking changes."

* 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  rwsem-spinlock: Implement writer lock-stealing for better scalability
  futex: Revert "futex: Mark get_robust_list as deprecated"
  generic: Use raw local irq variant for generic cmpxchg
  lockdep: Selftest: convert spinlock to raw spinlock
  seqlock: Use seqcount infrastructure
  seqlock: Remove unused functions
  ntp: Make ntp_lock raw
  intel_idle: Convert i7300_idle_lock to raw_spinlock
  locking: Various static lock initializer fixes
  lockdep: Print more info when MAX_LOCK_DEPTH is exceeded
  rwsem: Implement writer lock-stealing for better scalability
  lockdep: Silence warning if CONFIG_LOCKDEP isn't set
  watchdog: Use local_clock for get_timestamp()
  lockdep: Rename print_unlock_inbalance_bug() to print_unlock_imbalance_bug()
  locking/stat: Fix a typo

1  2 
include/linux/lockdep.h
kernel/futex.c
kernel/time/ntp.c
kernel/watchdog.c

diff --combined include/linux/lockdep.h
@@@ -359,9 -359,7 +359,9 @@@ extern void lockdep_trace_alloc(gfp_t m
  
  #define lockdep_depth(tsk)    (debug_locks ? (tsk)->lockdep_depth : 0)
  
 -#define lockdep_assert_held(l)        WARN_ON(debug_locks && !lockdep_is_held(l))
 +#define lockdep_assert_held(l)        do {                            \
 +              WARN_ON(debug_locks && !lockdep_is_held(l));    \
 +      } while (0)
  
  #define lockdep_recursing(tsk)        ((tsk)->lockdep_recursion)
  
@@@ -412,7 -410,7 +412,7 @@@ struct lock_class_key { }
  
  #define lockdep_depth(tsk)    (0)
  
- #define lockdep_assert_held(l)                        do { } while (0)
+ #define lockdep_assert_held(l)                        do { (void)(l); } while (0)
  
  #define lockdep_recursing(tsk)                        (0)
  
diff --combined kernel/futex.c
@@@ -60,7 -60,6 +60,7 @@@
  #include <linux/pid.h>
  #include <linux/nsproxy.h>
  #include <linux/ptrace.h>
 +#include <linux/sched/rt.h>
  
  #include <asm/futex.h>
  
@@@ -2472,8 -2471,6 +2472,6 @@@ SYSCALL_DEFINE3(get_robust_list, int, p
        if (!futex_cmpxchg_enabled)
                return -ENOSYS;
  
-       WARN_ONCE(1, "deprecated: get_robust_list will be deleted in 2013.\n");
        rcu_read_lock();
  
        ret = -ESRCH;
diff --combined kernel/time/ntp.c
@@@ -15,7 -15,6 +15,7 @@@
  #include <linux/time.h>
  #include <linux/mm.h>
  #include <linux/module.h>
 +#include <linux/rtc.h>
  
  #include "tick-internal.h"
  
@@@ -23,7 -22,7 +23,7 @@@
   * NTP timekeeping variables:
   */
  
- DEFINE_SPINLOCK(ntp_lock);
+ DEFINE_RAW_SPINLOCK(ntp_lock);
  
  
  /* USER_HZ period (usecs): */
@@@ -348,7 -347,7 +348,7 @@@ void ntp_clear(void
  {
        unsigned long flags;
  
-       spin_lock_irqsave(&ntp_lock, flags);
+       raw_spin_lock_irqsave(&ntp_lock, flags);
  
        time_adjust     = 0;            /* stop active adjtime() */
        time_status     |= STA_UNSYNC;
  
        /* Clear PPS state variables */
        pps_clear();
-       spin_unlock_irqrestore(&ntp_lock, flags);
+       raw_spin_unlock_irqrestore(&ntp_lock, flags);
  
  }
  
@@@ -372,9 -371,9 +372,9 @@@ u64 ntp_tick_length(void
        unsigned long flags;
        s64 ret;
  
-       spin_lock_irqsave(&ntp_lock, flags);
+       raw_spin_lock_irqsave(&ntp_lock, flags);
        ret = tick_length;
-       spin_unlock_irqrestore(&ntp_lock, flags);
+       raw_spin_unlock_irqrestore(&ntp_lock, flags);
        return ret;
  }
  
@@@ -395,7 -394,7 +395,7 @@@ int second_overflow(unsigned long secs
        int leap = 0;
        unsigned long flags;
  
-       spin_lock_irqsave(&ntp_lock, flags);
+       raw_spin_lock_irqsave(&ntp_lock, flags);
  
        /*
         * Leap second processing. If in leap-insert state at the end of the
        time_adjust = 0;
  
  out:
-       spin_unlock_irqrestore(&ntp_lock, flags);
+       raw_spin_unlock_irqrestore(&ntp_lock, flags);
  
        return leap;
  }
  
 -#ifdef CONFIG_GENERIC_CMOS_UPDATE
 -
 +#if defined(CONFIG_GENERIC_CMOS_UPDATE) || defined(CONFIG_RTC_SYSTOHC)
  static void sync_cmos_clock(struct work_struct *work);
  
  static DECLARE_DELAYED_WORK(sync_cmos_work, sync_cmos_clock);
@@@ -510,26 -510,14 +510,26 @@@ static void sync_cmos_clock(struct work
        }
  
        getnstimeofday(&now);
 -      if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2)
 -              fail = update_persistent_clock(now);
 +      if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2) {
 +              struct timespec adjust = now;
 +
 +              fail = -ENODEV;
 +              if (persistent_clock_is_local)
 +                      adjust.tv_sec -= (sys_tz.tz_minuteswest * 60);
 +#ifdef CONFIG_GENERIC_CMOS_UPDATE
 +              fail = update_persistent_clock(adjust);
 +#endif
 +#ifdef CONFIG_RTC_SYSTOHC
 +              if (fail == -ENODEV)
 +                      fail = rtc_set_ntp_time(adjust);
 +#endif
 +      }
  
        next.tv_nsec = (NSEC_PER_SEC / 2) - now.tv_nsec - (TICK_NSEC / 2);
        if (next.tv_nsec <= 0)
                next.tv_nsec += NSEC_PER_SEC;
  
 -      if (!fail)
 +      if (!fail || fail == -ENODEV)
                next.tv_sec = 659;
        else
                next.tv_sec = 0;
@@@ -672,7 -660,7 +672,7 @@@ int do_adjtimex(struct timex *txc
  
        getnstimeofday(&ts);
  
-       spin_lock_irq(&ntp_lock);
+       raw_spin_lock_irq(&ntp_lock);
  
        if (txc->modes & ADJ_ADJTIME) {
                long save_adjust = time_adjust;
        /* fill PPS status fields */
        pps_fill_timex(txc);
  
-       spin_unlock_irq(&ntp_lock);
+       raw_spin_unlock_irq(&ntp_lock);
  
        txc->time.tv_sec = ts.tv_sec;
        txc->time.tv_usec = ts.tv_nsec;
@@@ -912,7 -900,7 +912,7 @@@ void hardpps(const struct timespec *pha
  
        pts_norm = pps_normalize_ts(*phase_ts);
  
-       spin_lock_irqsave(&ntp_lock, flags);
+       raw_spin_lock_irqsave(&ntp_lock, flags);
  
        /* clear the error bits, they will be set again if needed */
        time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR);
         * just start the frequency interval */
        if (unlikely(pps_fbase.tv_sec == 0)) {
                pps_fbase = *raw_ts;
-               spin_unlock_irqrestore(&ntp_lock, flags);
+               raw_spin_unlock_irqrestore(&ntp_lock, flags);
                return;
        }
  
                time_status |= STA_PPSJITTER;
                /* restart the frequency calibration interval */
                pps_fbase = *raw_ts;
-               spin_unlock_irqrestore(&ntp_lock, flags);
+               raw_spin_unlock_irqrestore(&ntp_lock, flags);
                pr_err("hardpps: PPSJITTER: bad pulse\n");
                return;
        }
  
        hardpps_update_phase(pts_norm.nsec);
  
-       spin_unlock_irqrestore(&ntp_lock, flags);
+       raw_spin_unlock_irqrestore(&ntp_lock, flags);
  }
  EXPORT_SYMBOL(hardpps);
  
diff --combined kernel/watchdog.c
@@@ -23,7 -23,6 +23,7 @@@
  #include <linux/module.h>
  #include <linux/sysctl.h>
  #include <linux/smpboot.h>
 +#include <linux/sched/rt.h>
  
  #include <asm/irq_regs.h>
  #include <linux/kvm_para.h>
@@@ -113,9 -112,9 +113,9 @@@ static int get_softlockup_thresh(void
   * resolution, and we don't need to waste time with a big divide when
   * 2^30ns == 1.074s.
   */
- static unsigned long get_timestamp(int this_cpu)
+ static unsigned long get_timestamp(void)
  {
-       return cpu_clock(this_cpu) >> 30LL;  /* 2^30 ~= 10^9 */
+       return local_clock() >> 30LL;  /* 2^30 ~= 10^9 */
  }
  
  static void set_sample_period(void)
  /* Commands for resetting the watchdog */
  static void __touch_watchdog(void)
  {
-       int this_cpu = smp_processor_id();
-       __this_cpu_write(watchdog_touch_ts, get_timestamp(this_cpu));
+       __this_cpu_write(watchdog_touch_ts, get_timestamp());
  }
  
  void touch_softlockup_watchdog(void)
@@@ -196,7 -193,7 +194,7 @@@ static int is_hardlockup(void
  
  static int is_softlockup(unsigned long touch_ts)
  {
-       unsigned long now = get_timestamp(smp_processor_id());
+       unsigned long now = get_timestamp();
  
        /* Warn about unreasonable delays: */
        if (time_after(now, touch_ts + get_softlockup_thresh()))