Merge branch 'locking/core' into x86/mm, to resolve conflict
authorIngo Molnar <mingo@kernel.org>
Sat, 6 Mar 2021 12:00:58 +0000 (13:00 +0100)
committerIngo Molnar <mingo@kernel.org>
Sat, 6 Mar 2021 12:00:58 +0000 (13:00 +0100)
There's a non-trivial conflict between the parallel TLB flush
framework and the IPI flush debugging code - merge them
manually.

Conflicts:
kernel/smp.c

Signed-off-by: Ingo Molnar <mingo@kernel.org>
1  2 
kernel/smp.c

diff --cc kernel/smp.c
index af0d51da84a2246fa1293cfffa92ba3f1840989b,f472ef6239566bbd0e3f62b647efa9f8e54c4871..e210749000068bcb79e5cf6e0a098b52be7e0121
@@@ -657,57 -878,66 +899,67 @@@ static void smp_call_function_many_cond
        cpu = cpumask_first_and(mask, cpu_online_mask);
        if (cpu == this_cpu)
                cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
 +      if (cpu < nr_cpu_ids)
 +              run_remote = true;
  
 -      /* No online cpus?  We're done. */
 -      if (cpu >= nr_cpu_ids)
 -              return;
 +      if (run_remote) {
 +              cfd = this_cpu_ptr(&cfd_data);
 +              cpumask_and(cfd->cpumask, mask, cpu_online_mask);
 +              __cpumask_clear_cpu(this_cpu, cfd->cpumask);
  
 -      /* Do we have another CPU which isn't us? */
 -      next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
 -      if (next_cpu == this_cpu)
 -              next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
 -
 -      /* Fastpath: do that cpu by itself. */
 -      if (next_cpu >= nr_cpu_ids) {
 -              if (!cond_func || cond_func(cpu, info))
 -                      smp_call_function_single(cpu, func, info, wait);
 -              return;
 -      }
 +              cpumask_clear(cfd->cpumask_ipi);
 +              for_each_cpu(cpu, cfd->cpumask) {
-                       call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu);
++                      struct cfd_percpu *pcpu = per_cpu_ptr(cfd->pcpu, cpu);
++                      call_single_data_t *csd = &pcpu->csd;
  
 -      cfd = this_cpu_ptr(&cfd_data);
 +                      if (cond_func && !cond_func(cpu, info))
 +                              continue;
  
 -      cpumask_and(cfd->cpumask, mask, cpu_online_mask);
 -      __cpumask_clear_cpu(this_cpu, cfd->cpumask);
 +                      csd_lock(csd);
 +                      if (wait)
 +                              csd->node.u_flags |= CSD_TYPE_SYNC;
 +                      csd->func = func;
 +                      csd->info = info;
 +#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
 +                      csd->node.src = smp_processor_id();
 +                      csd->node.dst = cpu;
 +#endif
++                      cfd_seq_store(pcpu->seq_queue, this_cpu, cpu, CFD_SEQ_QUEUE);
 +                      if (llist_add(&csd->node.llist, &per_cpu(call_single_queue, cpu))) {
 +                              __cpumask_set_cpu(cpu, cfd->cpumask_ipi);
 +                              nr_cpus++;
 +                              last_cpu = cpu;
 -      /* Some callers race with other cpus changing the passed mask */
 -      if (unlikely(!cpumask_weight(cfd->cpumask)))
 -              return;
++                              cfd_seq_store(pcpu->seq_ipi, this_cpu, cpu, CFD_SEQ_IPI);
++                      } else {
++                              cfd_seq_store(pcpu->seq_noipi, this_cpu, cpu, CFD_SEQ_NOIPI);
 +                      }
 +              }
  
 -      cpumask_clear(cfd->cpumask_ipi);
 -      for_each_cpu(cpu, cfd->cpumask) {
 -              struct cfd_percpu *pcpu = per_cpu_ptr(cfd->pcpu, cpu);
 -              call_single_data_t *csd = &pcpu->csd;
++              cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->ping, this_cpu, CFD_SEQ_NOCPU, CFD_SEQ_PING);
 -              if (cond_func && !cond_func(cpu, info))
 -                      continue;
 +              /*
 +               * Choose the most efficient way to send an IPI. Note that the
 +               * number of CPUs might be zero due to concurrent changes to the
 +               * provided mask.
 +               */
 +              if (nr_cpus == 1)
 +                      send_call_function_single_ipi(last_cpu);
 +              else if (likely(nr_cpus > 1))
 +                      arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
 -              csd_lock(csd);
 -              if (wait)
 -                      csd->node.u_flags |= CSD_TYPE_SYNC;
 -              csd->func = func;
 -              csd->info = info;
 -#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
 -              csd->node.src = smp_processor_id();
 -              csd->node.dst = cpu;
 -#endif
 -              cfd_seq_store(pcpu->seq_queue, this_cpu, cpu, CFD_SEQ_QUEUE);
 -              if (llist_add(&csd->node.llist, &per_cpu(call_single_queue, cpu))) {
 -                      __cpumask_set_cpu(cpu, cfd->cpumask_ipi);
 -                      cfd_seq_store(pcpu->seq_ipi, this_cpu, cpu, CFD_SEQ_IPI);
 -              } else {
 -                      cfd_seq_store(pcpu->seq_noipi, this_cpu, cpu, CFD_SEQ_NOIPI);
 -              }
++              cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->pinged, this_cpu, CFD_SEQ_NOCPU, CFD_SEQ_PINGED);
        }
  
 -      /* Send a message to all CPUs in the map */
 -      cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->ping, this_cpu,
 -                    CFD_SEQ_NOCPU, CFD_SEQ_PING);
 -      arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
 -      cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->pinged, this_cpu,
 -                    CFD_SEQ_NOCPU, CFD_SEQ_PINGED);
 +      if (run_local && (!cond_func || cond_func(this_cpu, info))) {
 +              unsigned long flags;
  
 -      if (wait) {
 +              local_irq_save(flags);
 +              func(info);
 +              local_irq_restore(flags);
 +      }
 +
 +      if (run_remote && wait) {
                for_each_cpu(cpu, cfd->cpumask) {
                        call_single_data_t *csd;