rtmutex: Detect changes in the pi lock chain
[platform/adaptation/renesas_rcar/renesas_kernel.git] / kernel / sched / cputime.c
index a7959e0..cfe2f26 100644 (file)
@@ -121,7 +121,7 @@ static inline void task_group_account_field(struct task_struct *p, int index,
         * is the only cgroup, then nothing else should be necessary.
         *
         */
-       __get_cpu_var(kernel_cpustat).cpustat[index] += tmp;
+       __this_cpu_add(kernel_cpustat.cpustat[index], tmp);
 
        cpuacct_account_field(p, index, tmp);
 }
@@ -326,50 +326,50 @@ out:
  * softirq as those do not count in task exec_runtime any more.
  */
 static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
-                                               struct rq *rq)
+                                        struct rq *rq, int ticks)
 {
-       cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
+       cputime_t scaled = cputime_to_scaled(cputime_one_jiffy);
+       u64 cputime = (__force u64) cputime_one_jiffy;
        u64 *cpustat = kcpustat_this_cpu->cpustat;
 
        if (steal_account_process_tick())
                return;
 
+       cputime *= ticks;
+       scaled *= ticks;
+
        if (irqtime_account_hi_update()) {
-               cpustat[CPUTIME_IRQ] += (__force u64) cputime_one_jiffy;
+               cpustat[CPUTIME_IRQ] += cputime;
        } else if (irqtime_account_si_update()) {
-               cpustat[CPUTIME_SOFTIRQ] += (__force u64) cputime_one_jiffy;
+               cpustat[CPUTIME_SOFTIRQ] += cputime;
        } else if (this_cpu_ksoftirqd() == p) {
                /*
                 * ksoftirqd time do not get accounted in cpu_softirq_time.
                 * So, we have to handle it separately here.
                 * Also, p->stime needs to be updated for ksoftirqd.
                 */
-               __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
-                                       CPUTIME_SOFTIRQ);
+               __account_system_time(p, cputime, scaled, CPUTIME_SOFTIRQ);
        } else if (user_tick) {
-               account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
+               account_user_time(p, cputimescaled);
        } else if (p == rq->idle) {
-               account_idle_time(cputime_one_jiffy);
+               account_idle_time(cputime);
        } else if (p->flags & PF_VCPU) { /* System time or guest time */
-               account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled);
+               account_guest_time(p, cputimescaled);
        } else {
-               __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
-                                       CPUTIME_SYSTEM);
+               __account_system_time(p, cputime, scaled,       CPUTIME_SYSTEM);
        }
 }
 
 static void irqtime_account_idle_ticks(int ticks)
 {
-       int i;
        struct rq *rq = this_rq();
 
-       for (i = 0; i < ticks; i++)
-               irqtime_account_process_tick(current, 0, rq);
+       irqtime_account_process_tick(current, 0, rq, ticks);
 }
 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
 static inline void irqtime_account_idle_ticks(int ticks) {}
 static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
-                                               struct rq *rq) {}
+                                               struct rq *rq, int nr_ticks) {}
 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
 
 /*
@@ -378,11 +378,8 @@ static inline void irqtime_account_process_tick(struct task_struct *p, int user_
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
 
 #ifndef __ARCH_HAS_VTIME_TASK_SWITCH
-void vtime_task_switch(struct task_struct *prev)
+void vtime_common_task_switch(struct task_struct *prev)
 {
-       if (!vtime_accounting_enabled())
-               return;
-
        if (is_idle_task(prev))
                vtime_account_idle(prev);
        else
@@ -404,11 +401,8 @@ void vtime_task_switch(struct task_struct *prev)
  * vtime_account().
  */
 #ifndef __ARCH_HAS_VTIME_ACCOUNT
-void vtime_account_irq_enter(struct task_struct *tsk)
+void vtime_common_account_irq_enter(struct task_struct *tsk)
 {
-       if (!vtime_accounting_enabled())
-               return;
-
        if (!in_interrupt()) {
                /*
                 * If we interrupted user, context_tracking_in_user()
@@ -428,7 +422,7 @@ void vtime_account_irq_enter(struct task_struct *tsk)
        }
        vtime_account_system(tsk);
 }
-EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
+EXPORT_SYMBOL_GPL(vtime_common_account_irq_enter);
 #endif /* __ARCH_HAS_VTIME_ACCOUNT */
 #endif /* CONFIG_VIRT_CPU_ACCOUNTING */
 
@@ -464,7 +458,7 @@ void account_process_tick(struct task_struct *p, int user_tick)
                return;
 
        if (sched_clock_irqtime) {
-               irqtime_account_process_tick(p, user_tick, rq);
+               irqtime_account_process_tick(p, user_tick, rq, 1);
                return;
        }
 
@@ -557,16 +551,7 @@ static void cputime_adjust(struct task_cputime *curr,
                           struct cputime *prev,
                           cputime_t *ut, cputime_t *st)
 {
-       cputime_t rtime, stime, utime, total;
-
-       if (vtime_accounting_enabled()) {
-               *ut = curr->utime;
-               *st = curr->stime;
-               return;
-       }
-
-       stime = curr->stime;
-       total = stime + curr->utime;
+       cputime_t rtime, stime, utime;
 
        /*
         * Tick based cputime accounting depend on random scheduling
@@ -588,13 +573,19 @@ static void cputime_adjust(struct task_cputime *curr,
        if (prev->stime + prev->utime >= rtime)
                goto out;
 
-       if (total) {
+       stime = curr->stime;
+       utime = curr->utime;
+
+       if (utime == 0) {
+               stime = rtime;
+       } else if (stime == 0) {
+               utime = rtime;
+       } else {
+               cputime_t total = stime + utime;
+
                stime = scale_stime((__force u64)stime,
                                    (__force u64)rtime, (__force u64)total);
                utime = rtime - stime;
-       } else {
-               stime = rtime;
-               utime = 0;
        }
 
        /*
@@ -664,23 +655,17 @@ static void __vtime_account_system(struct task_struct *tsk)
 
 void vtime_account_system(struct task_struct *tsk)
 {
-       if (!vtime_accounting_enabled())
-               return;
-
        write_seqlock(&tsk->vtime_seqlock);
        __vtime_account_system(tsk);
        write_sequnlock(&tsk->vtime_seqlock);
 }
 
-void vtime_account_irq_exit(struct task_struct *tsk)
+void vtime_gen_account_irq_exit(struct task_struct *tsk)
 {
-       if (!vtime_accounting_enabled())
-               return;
-
        write_seqlock(&tsk->vtime_seqlock);
+       __vtime_account_system(tsk);
        if (context_tracking_in_user())
                tsk->vtime_snap_whence = VTIME_USER;
-       __vtime_account_system(tsk);
        write_sequnlock(&tsk->vtime_seqlock);
 }
 
@@ -688,12 +673,8 @@ void vtime_account_user(struct task_struct *tsk)
 {
        cputime_t delta_cpu;
 
-       if (!vtime_accounting_enabled())
-               return;
-
-       delta_cpu = get_vtime_delta(tsk);
-
        write_seqlock(&tsk->vtime_seqlock);
+       delta_cpu = get_vtime_delta(tsk);
        tsk->vtime_snap_whence = VTIME_SYS;
        account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu));
        write_sequnlock(&tsk->vtime_seqlock);
@@ -701,22 +682,27 @@ void vtime_account_user(struct task_struct *tsk)
 
 void vtime_user_enter(struct task_struct *tsk)
 {
-       if (!vtime_accounting_enabled())
-               return;
-
        write_seqlock(&tsk->vtime_seqlock);
-       tsk->vtime_snap_whence = VTIME_USER;
        __vtime_account_system(tsk);
+       tsk->vtime_snap_whence = VTIME_USER;
        write_sequnlock(&tsk->vtime_seqlock);
 }
 
 void vtime_guest_enter(struct task_struct *tsk)
 {
+       /*
+        * The flags must be updated under the lock with
+        * the vtime_snap flush and update.
+        * That enforces a right ordering and update sequence
+        * synchronization against the reader (task_gtime())
+        * that can thus safely catch up with a tickless delta.
+        */
        write_seqlock(&tsk->vtime_seqlock);
        __vtime_account_system(tsk);
        current->flags |= PF_VCPU;
        write_sequnlock(&tsk->vtime_seqlock);
 }
+EXPORT_SYMBOL_GPL(vtime_guest_enter);
 
 void vtime_guest_exit(struct task_struct *tsk)
 {
@@ -725,6 +711,7 @@ void vtime_guest_exit(struct task_struct *tsk)
        current->flags &= ~PF_VCPU;
        write_sequnlock(&tsk->vtime_seqlock);
 }
+EXPORT_SYMBOL_GPL(vtime_guest_exit);
 
 void vtime_account_idle(struct task_struct *tsk)
 {
@@ -733,11 +720,6 @@ void vtime_account_idle(struct task_struct *tsk)
        account_idle_time(delta_cpu);
 }
 
-bool vtime_accounting_enabled(void)
-{
-       return context_tracking_active();
-}
-
 void arch_vtime_task_switch(struct task_struct *prev)
 {
        write_seqlock(&prev->vtime_seqlock);