OSDN Git Service

x86/cpu: Avoid cpuinfo-induced IPIing of idle CPUs
authorPaul E. McKenney <paulmck@kernel.org>
Thu, 3 Sep 2020 22:23:29 +0000 (15:23 -0700)
committerPaul E. McKenney <paulmck@kernel.org>
Sat, 7 Nov 2020 00:59:11 +0000 (16:59 -0800)
Currently, accessing /proc/cpuinfo sends IPIs to idle CPUs in order to
learn their clock frequency.  Which is a bit strange, given that waking
them from idle likely significantly changes their clock frequency.
This commit therefore avoids sending /proc/cpuinfo-induced IPIs to
idle CPUs.

[ paulmck: Also check for idle in arch_freq_prepare_all(). ]
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: <x86@kernel.org>
arch/x86/kernel/cpu/aperfmperf.c
include/linux/rcutiny.h
include/linux/rcutree.h
kernel/rcu/tree.c

index dd3261d..22911de 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/cpufreq.h>
 #include <linux/smp.h>
 #include <linux/sched/isolation.h>
+#include <linux/rcupdate.h>
 
 #include "cpu.h"
 
@@ -93,6 +94,9 @@ unsigned int aperfmperf_get_khz(int cpu)
        if (!housekeeping_cpu(cpu, HK_FLAG_MISC))
                return 0;
 
+       if (rcu_is_idle_cpu(cpu))
+               return 0; /* Idle CPUs are completely uninteresting. */
+
        aperfmperf_snapshot_cpu(cpu, ktime_get(), true);
        return per_cpu(samples.khz, cpu);
 }
@@ -112,6 +116,8 @@ void arch_freq_prepare_all(void)
        for_each_online_cpu(cpu) {
                if (!housekeeping_cpu(cpu, HK_FLAG_MISC))
                        continue;
+               if (rcu_is_idle_cpu(cpu))
+                       continue; /* Idle CPUs are completely uninteresting. */
                if (!aperfmperf_snapshot_cpu(cpu, now, false))
                        wait = true;
        }
index 7c1ecdb..2a97334 100644 (file)
@@ -89,6 +89,8 @@ static inline void rcu_irq_enter_irqson(void) { }
 static inline void rcu_irq_exit(void) { }
 static inline void rcu_irq_exit_preempt(void) { }
 static inline void rcu_irq_exit_check_preempt(void) { }
+#define rcu_is_idle_cpu(cpu) \
+       (is_idle_task(current) && !in_nmi() && !in_irq() && !in_serving_softirq())
 static inline void exit_rcu(void) { }
 static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t)
 {
index 59eb5cd..df578b7 100644 (file)
@@ -50,6 +50,7 @@ void rcu_irq_exit(void);
 void rcu_irq_exit_preempt(void);
 void rcu_irq_enter_irqson(void);
 void rcu_irq_exit_irqson(void);
+bool rcu_is_idle_cpu(int cpu);
 
 #ifdef CONFIG_PROVE_RCU
 void rcu_irq_exit_check_preempt(void);
index 06895ef..1d84c0b 100644 (file)
@@ -341,6 +341,14 @@ static bool rcu_dynticks_in_eqs(int snap)
        return !(snap & RCU_DYNTICK_CTRL_CTR);
 }
 
+/* Return true if the specified CPU is currently idle from an RCU viewpoint.  */
+bool rcu_is_idle_cpu(int cpu)
+{
+       struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
+
+       return rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp));
+}
+
 /*
  * Return true if the CPU corresponding to the specified rcu_data
  * structure has spent some time in an extended quiescent state since