OSDN Git Service

Merge branch 'pm-cpufreq'
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>
Tue, 22 Dec 2020 16:59:11 +0000 (17:59 +0100)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Tue, 22 Dec 2020 16:59:11 +0000 (17:59 +0100)
* pm-cpufreq:
  cpufreq: intel_pstate: Use most recent guaranteed performance values
  cpufreq: intel_pstate: Implement the ->adjust_perf() callback
  cpufreq: Add special-purpose fast-switching callback for drivers
  cpufreq: schedutil: Add util to struct sg_cpu
  cppc_cpufreq: replace per-cpu data array with a list
  cppc_cpufreq: expose information on frequency domains
  cppc_cpufreq: clarify support for coordination types
  cppc_cpufreq: use policy->cpu as driver of frequency setting
  ACPI: processor: fix NONE coordination for domain mapping failure
  ACPI: processor: Drop duplicate setting of shared_cpu_map

1  2 
drivers/acpi/cppc_acpi.c
kernel/sched/cpufreq_schedutil.c

diff --combined drivers/acpi/cppc_acpi.c
@@@ -39,7 -39,6 +39,7 @@@
  #include <linux/ktime.h>
  #include <linux/rwsem.h>
  #include <linux/wait.h>
 +#include <linux/topology.h>
  
  #include <acpi/cppc_acpi.h>
  
@@@ -414,109 -413,88 +414,88 @@@ end
        return result;
  }
  
+ bool acpi_cpc_valid(void)
+ {
+       struct cpc_desc *cpc_ptr;
+       int cpu;
+       for_each_possible_cpu(cpu) {
+               cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
+               if (!cpc_ptr)
+                       return false;
+       }
+       return true;
+ }
+ EXPORT_SYMBOL_GPL(acpi_cpc_valid);
  /**
-  * acpi_get_psd_map - Map the CPUs in a common freq domain.
-  * @all_cpu_data: Ptrs to CPU specific CPPC data including PSD info.
+  * acpi_get_psd_map - Map the CPUs in the freq domain of a given cpu
+  * @cpu: Find all CPUs that share a domain with cpu.
+  * @cpu_data: Pointer to CPU specific CPPC data including PSD info.
   *
   *    Return: 0 for success or negative value for err.
   */
- int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data)
+ int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data)
  {
-       int count_target;
-       int retval = 0;
-       unsigned int i, j;
-       cpumask_var_t covered_cpus;
-       struct cppc_cpudata *pr, *match_pr;
-       struct acpi_psd_package *pdomain;
-       struct acpi_psd_package *match_pdomain;
        struct cpc_desc *cpc_ptr, *match_cpc_ptr;
-       if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
-               return -ENOMEM;
+       struct acpi_psd_package *match_pdomain;
+       struct acpi_psd_package *pdomain;
+       int count_target, i;
  
        /*
         * Now that we have _PSD data from all CPUs, let's setup P-state
         * domain info.
         */
-       for_each_possible_cpu(i) {
-               if (cpumask_test_cpu(i, covered_cpus))
-                       continue;
-               pr = all_cpu_data[i];
-               cpc_ptr = per_cpu(cpc_desc_ptr, i);
-               if (!cpc_ptr) {
-                       retval = -EFAULT;
-                       goto err_ret;
-               }
+       cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
+       if (!cpc_ptr)
+               return -EFAULT;
  
-               pdomain = &(cpc_ptr->domain_info);
-               cpumask_set_cpu(i, pr->shared_cpu_map);
-               cpumask_set_cpu(i, covered_cpus);
-               if (pdomain->num_processors <= 1)
-                       continue;
+       pdomain = &(cpc_ptr->domain_info);
+       cpumask_set_cpu(cpu, cpu_data->shared_cpu_map);
+       if (pdomain->num_processors <= 1)
+               return 0;
  
-               /* Validate the Domain info */
-               count_target = pdomain->num_processors;
-               if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
-                       pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
-               else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
-                       pr->shared_type = CPUFREQ_SHARED_TYPE_HW;
-               else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
-                       pr->shared_type = CPUFREQ_SHARED_TYPE_ANY;
-               for_each_possible_cpu(j) {
-                       if (i == j)
-                               continue;
-                       match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
-                       if (!match_cpc_ptr) {
-                               retval = -EFAULT;
-                               goto err_ret;
-                       }
+       /* Validate the Domain info */
+       count_target = pdomain->num_processors;
+       if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
+               cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ALL;
+       else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
+               cpu_data->shared_type = CPUFREQ_SHARED_TYPE_HW;
+       else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
+               cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ANY;
  
-                       match_pdomain = &(match_cpc_ptr->domain_info);
-                       if (match_pdomain->domain != pdomain->domain)
-                               continue;
+       for_each_possible_cpu(i) {
+               if (i == cpu)
+                       continue;
  
-                       /* Here i and j are in the same domain */
-                       if (match_pdomain->num_processors != count_target) {
-                               retval = -EFAULT;
-                               goto err_ret;
-                       }
+               match_cpc_ptr = per_cpu(cpc_desc_ptr, i);
+               if (!match_cpc_ptr)
+                       goto err_fault;
  
-                       if (pdomain->coord_type != match_pdomain->coord_type) {
-                               retval = -EFAULT;
-                               goto err_ret;
-                       }
+               match_pdomain = &(match_cpc_ptr->domain_info);
+               if (match_pdomain->domain != pdomain->domain)
+                       continue;
  
-                       cpumask_set_cpu(j, covered_cpus);
-                       cpumask_set_cpu(j, pr->shared_cpu_map);
-               }
+               /* Here i and cpu are in the same domain */
+               if (match_pdomain->num_processors != count_target)
+                       goto err_fault;
  
-               for_each_cpu(j, pr->shared_cpu_map) {
-                       if (i == j)
-                               continue;
+               if (pdomain->coord_type != match_pdomain->coord_type)
+                       goto err_fault;
  
-                       match_pr = all_cpu_data[j];
-                       match_pr->shared_type = pr->shared_type;
-                       cpumask_copy(match_pr->shared_cpu_map,
-                                    pr->shared_cpu_map);
-               }
+               cpumask_set_cpu(i, cpu_data->shared_cpu_map);
        }
-       goto out;
  
- err_ret:
-       for_each_possible_cpu(i) {
-               pr = all_cpu_data[i];
+       return 0;
  
-               /* Assume no coordination on any error parsing domain info */
-               cpumask_clear(pr->shared_cpu_map);
-               cpumask_set_cpu(i, pr->shared_cpu_map);
-               pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
-       }
- out:
-       free_cpumask_var(covered_cpus);
-       return retval;
+ err_fault:
+       /* Assume no coordination on any error parsing domain info */
+       cpumask_clear(cpu_data->shared_cpu_map);
+       cpumask_set_cpu(cpu, cpu_data->shared_cpu_map);
+       cpu_data->shared_type = CPUFREQ_SHARED_TYPE_NONE;
+       return -EFAULT;
  }
  EXPORT_SYMBOL_GPL(acpi_get_psd_map);
  
@@@ -689,10 -667,6 +668,10 @@@ static bool is_cppc_supported(int revis
   *    }
   */
  
 +#ifndef init_freq_invariance_cppc
 +static inline void init_freq_invariance_cppc(void) { }
 +#endif
 +
  /**
   * acpi_cppc_processor_probe - Search for per CPU _CPC objects.
   * @pr: Ptr to acpi_processor containing this CPU's logical ID.
@@@ -855,8 -829,6 +834,8 @@@ int acpi_cppc_processor_probe(struct ac
                goto out_free;
        }
  
 +      init_freq_invariance_cppc();
 +
        kfree(output.pointer);
        return 0;
  
@@@ -53,6 -53,7 +53,7 @@@ struct sugov_cpu 
        unsigned int            iowait_boost;
        u64                     last_update;
  
+       unsigned long           util;
        unsigned long           bw_dl;
        unsigned long           max;
  
@@@ -276,16 -277,15 +277,15 @@@ unsigned long schedutil_cpu_util(int cp
        return min(max, util);
  }
  
- static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
+ static void sugov_get_util(struct sugov_cpu *sg_cpu)
  {
        struct rq *rq = cpu_rq(sg_cpu->cpu);
-       unsigned long util = cpu_util_cfs(rq);
        unsigned long max = arch_scale_cpu_capacity(sg_cpu->cpu);
  
        sg_cpu->max = max;
        sg_cpu->bw_dl = cpu_bw_dl(rq);
-       return schedutil_cpu_util(sg_cpu->cpu, util, max, FREQUENCY_UTIL, NULL);
+       sg_cpu->util = schedutil_cpu_util(sg_cpu->cpu, cpu_util_cfs(rq), max,
+                                         FREQUENCY_UTIL, NULL);
  }
  
  /**
@@@ -362,8 -362,6 +362,6 @@@ static void sugov_iowait_boost(struct s
   * sugov_iowait_apply() - Apply the IO boost to a CPU.
   * @sg_cpu: the sugov data for the cpu to boost
   * @time: the update time from the caller
-  * @util: the utilization to (eventually) boost
-  * @max: the maximum value the utilization can be boosted to
   *
   * A CPU running a task which woken up after an IO operation can have its
   * utilization boosted to speed up the completion of those IO operations.
   * This mechanism is designed to boost high frequently IO waiting tasks, while
   * being more conservative on tasks which does sporadic IO operations.
   */
- static unsigned long sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
-                                       unsigned long util, unsigned long max)
+ static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time)
  {
        unsigned long boost;
  
        /* No boost currently required */
        if (!sg_cpu->iowait_boost)
-               return util;
+               return;
  
        /* Reset boost if the CPU appears to have been idle enough */
        if (sugov_iowait_reset(sg_cpu, time, false))
-               return util;
+               return;
  
        if (!sg_cpu->iowait_boost_pending) {
                /*
                sg_cpu->iowait_boost >>= 1;
                if (sg_cpu->iowait_boost < IOWAIT_BOOST_MIN) {
                        sg_cpu->iowait_boost = 0;
-                       return util;
+                       return;
                }
        }
  
        sg_cpu->iowait_boost_pending = false;
  
        /*
-        * @util is already in capacity scale; convert iowait_boost
+        * sg_cpu->util is already in capacity scale; convert iowait_boost
         * into the same scale so we can compare.
         */
-       boost = (sg_cpu->iowait_boost * max) >> SCHED_CAPACITY_SHIFT;
-       return max(boost, util);
+       boost = (sg_cpu->iowait_boost * sg_cpu->max) >> SCHED_CAPACITY_SHIFT;
+       if (sg_cpu->util < boost)
+               sg_cpu->util = boost;
  }
  
  #ifdef CONFIG_NO_HZ_COMMON
@@@ -434,14 -432,10 +432,10 @@@ static inline void ignore_dl_rate_limit
                sg_policy->limits_changed = true;
  }
  
- static void sugov_update_single(struct update_util_data *hook, u64 time,
-                               unsigned int flags)
+ static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu,
+                                             u64 time, unsigned int flags)
  {
-       struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
        struct sugov_policy *sg_policy = sg_cpu->sg_policy;
-       unsigned long util, max;
-       unsigned int next_f;
-       unsigned int cached_freq = sg_policy->cached_raw_freq;
  
        sugov_iowait_boost(sg_cpu, time, flags);
        sg_cpu->last_update = time;
        ignore_dl_rate_limit(sg_cpu, sg_policy);
  
        if (!sugov_should_update_freq(sg_policy, time))
+               return false;
+       sugov_get_util(sg_cpu);
+       sugov_iowait_apply(sg_cpu, time);
+       return true;
+ }
+ static void sugov_update_single_freq(struct update_util_data *hook, u64 time,
+                                    unsigned int flags)
+ {
+       struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
+       struct sugov_policy *sg_policy = sg_cpu->sg_policy;
+       unsigned int cached_freq = sg_policy->cached_raw_freq;
+       unsigned int next_f;
+       if (!sugov_update_single_common(sg_cpu, time, flags))
                return;
  
-       util = sugov_get_util(sg_cpu);
-       max = sg_cpu->max;
-       util = sugov_iowait_apply(sg_cpu, time, util, max);
-       next_f = get_next_freq(sg_policy, util, max);
+       next_f = get_next_freq(sg_policy, sg_cpu->util, sg_cpu->max);
        /*
         * Do not reduce the frequency if the CPU has not been idle
         * recently, as the reduction is likely to be premature then.
        }
  }
  
+ static void sugov_update_single_perf(struct update_util_data *hook, u64 time,
+                                    unsigned int flags)
+ {
+       struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
+       unsigned long prev_util = sg_cpu->util;
+       /*
+        * Fall back to the "frequency" path if frequency invariance is not
+        * supported, because the direct mapping between the utilization and
+        * the performance levels depends on the frequency invariance.
+        */
+       if (!arch_scale_freq_invariant()) {
+               sugov_update_single_freq(hook, time, flags);
+               return;
+       }
+       if (!sugov_update_single_common(sg_cpu, time, flags))
+               return;
+       /*
+        * Do not reduce the target performance level if the CPU has not been
+        * idle recently, as the reduction is likely to be premature then.
+        */
+       if (sugov_cpu_is_busy(sg_cpu) && sg_cpu->util < prev_util)
+               sg_cpu->util = prev_util;
+       cpufreq_driver_adjust_perf(sg_cpu->cpu, map_util_perf(sg_cpu->bw_dl),
+                                  map_util_perf(sg_cpu->util), sg_cpu->max);
+       sg_cpu->sg_policy->last_freq_update_time = time;
+ }
  static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
  {
        struct sugov_policy *sg_policy = sg_cpu->sg_policy;
                struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
                unsigned long j_util, j_max;
  
-               j_util = sugov_get_util(j_sg_cpu);
+               sugov_get_util(j_sg_cpu);
+               sugov_iowait_apply(j_sg_cpu, time);
+               j_util = j_sg_cpu->util;
                j_max = j_sg_cpu->max;
-               j_util = sugov_iowait_apply(j_sg_cpu, time, j_util, j_max);
  
                if (j_util * max > j_max * util) {
                        util = j_util;
@@@ -817,6 -858,7 +858,7 @@@ static void sugov_exit(struct cpufreq_p
  static int sugov_start(struct cpufreq_policy *policy)
  {
        struct sugov_policy *sg_policy = policy->governor_data;
+       void (*uu)(struct update_util_data *data, u64 time, unsigned int flags);
        unsigned int cpu;
  
        sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC;
                sg_cpu->sg_policy               = sg_policy;
        }
  
+       if (policy_is_shared(policy))
+               uu = sugov_update_shared;
+       else if (policy->fast_switch_enabled && cpufreq_driver_has_adjust_perf())
+               uu = sugov_update_single_perf;
+       else
+               uu = sugov_update_single_freq;
        for_each_cpu(cpu, policy->cpus) {
                struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
  
-               cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
-                                            policy_is_shared(policy) ?
-                                                       sugov_update_shared :
-                                                       sugov_update_single);
+               cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, uu);
        }
        return 0;
  }
@@@ -897,9 -943,16 +943,9 @@@ struct cpufreq_governor *cpufreq_defaul
  cpufreq_governor_init(schedutil_gov);
  
  #ifdef CONFIG_ENERGY_MODEL
 -extern bool sched_energy_update;
 -extern struct mutex sched_energy_mutex;
 -
  static void rebuild_sd_workfn(struct work_struct *work)
  {
 -      mutex_lock(&sched_energy_mutex);
 -      sched_energy_update = true;
 -      rebuild_sched_domains();
 -      sched_energy_update = false;
 -      mutex_unlock(&sched_energy_mutex);
 +      rebuild_sched_domains_energy();
  }
  static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);