workqueue: allow work_on_cpu() to be called recursively
[platform/adaptation/renesas_rcar/renesas_kernel.git] / kernel / workqueue.c
index f02c4a4..55f5f0a 100644 (file)
@@ -2817,6 +2817,19 @@ already_gone:
        return false;
 }
 
+static bool __flush_work(struct work_struct *work)
+{
+       struct wq_barrier barr;
+
+       if (start_flush_work(work, &barr)) {
+               wait_for_completion(&barr.done);
+               destroy_work_on_stack(&barr.work);
+               return true;
+       } else {
+               return false;
+       }
+}
+
 /**
  * flush_work - wait for a work to finish executing the last queueing instance
  * @work: the work to flush
@@ -2830,18 +2843,10 @@ already_gone:
  */
 bool flush_work(struct work_struct *work)
 {
-       struct wq_barrier barr;
-
        lock_map_acquire(&work->lockdep_map);
        lock_map_release(&work->lockdep_map);
 
-       if (start_flush_work(work, &barr)) {
-               wait_for_completion(&barr.done);
-               destroy_work_on_stack(&barr.work);
-               return true;
-       } else {
-               return false;
-       }
+       return __flush_work(work);
 }
 EXPORT_SYMBOL_GPL(flush_work);
 
@@ -4756,7 +4761,14 @@ long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
 
        INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
        schedule_work_on(cpu, &wfc.work);
-       flush_work(&wfc.work);
+
+       /*
+        * The work item is on-stack and can't lead to deadlock through
+        * flushing.  Use __flush_work() to avoid spurious lockdep warnings
+        * when work_on_cpu()s are nested.
+        */
+       __flush_work(&wfc.work);
+
        return wfc.ret;
 }
 EXPORT_SYMBOL_GPL(work_on_cpu);