OSDN Git Service

ANDROID: cpufreq: times: add /proc/uid_concurrent_{active,policy}_time
authorConnor O'Brien <connoro@google.com>
Sat, 6 Oct 2018 02:20:54 +0000 (19:20 -0700)
committerConnor O'Brien <connoro@google.com>
Mon, 4 Nov 2019 22:00:22 +0000 (22:00 +0000)
In order to model the energy used by the cpu, we need to expose cpu
time information to userspace.  We can use this information to blame
uids for the energy they are using.

This patch adds 2 files:

/proc/uid_concurrent_active_time outputs the time each uid spent
running with 1 to num_possible_cpus online and not idle.

For instance, if 4 cores are online and active for 50ms and the uid is
running on one of the cores, the uid should be blamed for 1/4 of the
base energy used by the cpu when 1 or more cores is active.

/proc/uid_concurrent_policy_time outputs the time each uid spent
running on group of cpu's with the same policy with 1 to
num_related_cpus online and not idle.

For instance, if 2 cores that are a part of the same policy are online
and active for 50ms and the uid is running on one of the cores, the
uid should be blamed for 1/2 of the energy that is used everytime one
or more cpus in the same policy is active.

This patch is based on commit c89e69136fec ("ANDROID: cpufreq:
uid_concurrent_active_time") and commit 989212536842 ("ANDROID:
cpufreq: uid_concurrent_policy_time") in the android-msm-wahoo-4.4
kernel by Marissa Wall.

Bug: 111216804140846192
Test: cat files on hikey960 and confirm output is reasonable
Change-Id: I075fef030c261b47831ae8d4ddb286ee9d8b9347
Signed-off-by: Connor O'Brien <connoro@google.com>
(cherry picked from commit 605e2ec6d67919b995c7b822b38b9ffea33c2338)
Signed-off-by: JeiFeng Lee <linger.lee@mediatek.com>
Documentation/ABI/testing/procfs-concurrent_time [new file with mode: 0644]
drivers/cpufreq/cpufreq_times.c

diff --git a/Documentation/ABI/testing/procfs-concurrent_time b/Documentation/ABI/testing/procfs-concurrent_time
new file mode 100644 (file)
index 0000000..55b4142
--- /dev/null
@@ -0,0 +1,16 @@
+What:          /proc/uid_concurrent_active_time
+Date:          December 2018
+Contact:       Connor O'Brien <connoro@google.com>
+Description:
+       The /proc/uid_concurrent_active_time file displays aggregated cputime
+       numbers for each uid, broken down by the total number of cores that were
+       active while the uid's task was running.
+
+What:          /proc/uid_concurrent_policy_time
+Date:          December 2018
+Contact:       Connor O'Brien <connoro@google.com>
+Description:
+       The /proc/uid_concurrent_policy_time file displays aggregated cputime
+       numbers for each uid, broken down based on the cpufreq policy
+       of the core used by the uid's task and the number of cores associated
+       with that policy that were active while the uid's task was running.
index e7a8b63..2917fd3 100644 (file)
@@ -32,11 +32,17 @@ DECLARE_HASHTABLE(uid_hash_table, UID_HASH_BITS);
 static DEFINE_SPINLOCK(task_time_in_state_lock); /* task->time_in_state */
 static DEFINE_SPINLOCK(uid_lock); /* uid_hash_table */
 
+struct concurrent_times {
+       atomic64_t active[NR_CPUS];
+       atomic64_t policy[NR_CPUS];
+};
+
 struct uid_entry {
        uid_t uid;
        unsigned int max_state;
        struct hlist_node hash;
        struct rcu_head rcu;
+       struct concurrent_times *concurrent_times;
        u64 time_in_state[0];
 };
 
@@ -87,6 +93,7 @@ static struct uid_entry *find_uid_entry_locked(uid_t uid)
 static struct uid_entry *find_or_register_uid_locked(uid_t uid)
 {
        struct uid_entry *uid_entry, *temp;
+       struct concurrent_times *times;
        unsigned int max_state = READ_ONCE(next_offset);
        size_t alloc_size = sizeof(*uid_entry) + max_state *
                sizeof(uid_entry->time_in_state[0]);
@@ -115,9 +122,15 @@ static struct uid_entry *find_or_register_uid_locked(uid_t uid)
        uid_entry = kzalloc(alloc_size, GFP_ATOMIC);
        if (!uid_entry)
                return NULL;
+       times = kzalloc(sizeof(*times), GFP_ATOMIC);
+       if (!times) {
+               kfree(uid_entry);
+               return NULL;
+       }
 
        uid_entry->uid = uid;
        uid_entry->max_state = max_state;
+       uid_entry->concurrent_times = times;
 
        hash_add_rcu(uid_hash_table, &uid_entry->hash, uid);
 
@@ -232,6 +245,86 @@ static int uid_time_in_state_seq_show(struct seq_file *m, void *v)
        return 0;
 }
 
+static int concurrent_time_seq_show(struct seq_file *m, void *v,
+       atomic64_t *(*get_times)(struct concurrent_times *))
+{
+       struct uid_entry *uid_entry;
+       int i, num_possible_cpus = num_possible_cpus();
+
+       rcu_read_lock();
+
+       hlist_for_each_entry_rcu(uid_entry, (struct hlist_head *)v, hash) {
+               atomic64_t *times = get_times(uid_entry->concurrent_times);
+
+               seq_put_decimal_ull(m, "", (u64)uid_entry->uid);
+               seq_putc(m, ':');
+
+               for (i = 0; i < num_possible_cpus; ++i) {
+                       u64 time = cputime_to_clock_t(atomic64_read(&times[i]));
+
+                       seq_put_decimal_ull(m, " ", time);
+               }
+               seq_putc(m, '\n');
+       }
+
+       rcu_read_unlock();
+
+       return 0;
+}
+
+static inline atomic64_t *get_active_times(struct concurrent_times *times)
+{
+       return times->active;
+}
+
+static int concurrent_active_time_seq_show(struct seq_file *m, void *v)
+{
+       if (v == uid_hash_table) {
+               seq_put_decimal_ull(m, "cpus: ", num_possible_cpus());
+               seq_putc(m, '\n');
+       }
+
+       return concurrent_time_seq_show(m, v, get_active_times);
+}
+
+static inline atomic64_t *get_policy_times(struct concurrent_times *times)
+{
+       return times->policy;
+}
+
+static int concurrent_policy_time_seq_show(struct seq_file *m, void *v)
+{
+       int i;
+       struct cpu_freqs *freqs, *last_freqs = NULL;
+
+       if (v == uid_hash_table) {
+               int cnt = 0;
+
+               for_each_possible_cpu(i) {
+                       freqs = all_freqs[i];
+                       if (!freqs)
+                               continue;
+                       if (freqs != last_freqs) {
+                               if (last_freqs) {
+                                       seq_put_decimal_ull(m, ": ", cnt);
+                                       seq_putc(m, ' ');
+                                       cnt = 0;
+                               }
+                               seq_put_decimal_ull(m, "policy", i);
+
+                               last_freqs = freqs;
+                       }
+                       cnt++;
+               }
+               if (last_freqs) {
+                       seq_put_decimal_ull(m, ": ", cnt);
+                       seq_putc(m, '\n');
+               }
+       }
+
+       return concurrent_time_seq_show(m, v, get_policy_times);
+}
+
 void cpufreq_task_times_init(struct task_struct *p)
 {
        unsigned long flags;
@@ -326,11 +419,16 @@ void cpufreq_acct_update_power(struct task_struct *p, cputime_t cputime)
 {
        unsigned long flags;
        unsigned int state;
+       unsigned int active_cpu_cnt = 0;
+       unsigned int policy_cpu_cnt = 0;
+       unsigned int policy_first_cpu;
        struct uid_entry *uid_entry;
        struct cpu_freqs *freqs = all_freqs[task_cpu(p)];
+       struct cpufreq_policy *policy;
        uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p));
+       int cpu = 0;
 
-       if (!freqs || p->flags & PF_EXITING)
+       if (!freqs || is_idle_task(p) || p->flags & PF_EXITING)
                return;
 
        state = freqs->offset + READ_ONCE(freqs->last_index);
@@ -346,6 +444,42 @@ void cpufreq_acct_update_power(struct task_struct *p, cputime_t cputime)
        if (uid_entry && state < uid_entry->max_state)
                uid_entry->time_in_state[state] += cputime;
        spin_unlock_irqrestore(&uid_lock, flags);
+
+       rcu_read_lock();
+       uid_entry = find_uid_entry_rcu(uid);
+       if (!uid_entry) {
+               rcu_read_unlock();
+               return;
+       }
+
+       for_each_possible_cpu(cpu)
+               if (!idle_cpu(cpu))
+                       ++active_cpu_cnt;
+
+       atomic64_add(cputime,
+                    &uid_entry->concurrent_times->active[active_cpu_cnt - 1]);
+
+       policy = cpufreq_cpu_get(task_cpu(p));
+       if (!policy) {
+               /*
+                * This CPU may have just come up and not have a cpufreq policy
+                * yet.
+                */
+               rcu_read_unlock();
+               return;
+       }
+
+       for_each_cpu(cpu, policy->related_cpus)
+               if (!idle_cpu(cpu))
+                       ++policy_cpu_cnt;
+
+       policy_first_cpu = cpumask_first(policy->related_cpus);
+       cpufreq_cpu_put(policy);
+
+       atomic64_add(cputime,
+                    &uid_entry->concurrent_times->policy[policy_first_cpu +
+                                                         policy_cpu_cnt - 1]);
+       rcu_read_unlock();
 }
 
 void cpufreq_times_create_policy(struct cpufreq_policy *policy)
@@ -387,6 +521,14 @@ void cpufreq_times_create_policy(struct cpufreq_policy *policy)
                all_freqs[cpu] = freqs;
 }
 
+static void uid_entry_reclaim(struct rcu_head *rcu)
+{
+       struct uid_entry *uid_entry = container_of(rcu, struct uid_entry, rcu);
+
+       kfree(uid_entry->concurrent_times);
+       kfree(uid_entry);
+}
+
 void cpufreq_task_times_remove_uids(uid_t uid_start, uid_t uid_end)
 {
        struct uid_entry *uid_entry;
@@ -400,7 +542,7 @@ void cpufreq_task_times_remove_uids(uid_t uid_start, uid_t uid_end)
                        hash, uid_start) {
                        if (uid_start == uid_entry->uid) {
                                hash_del_rcu(&uid_entry->hash);
-                               kfree_rcu(uid_entry, rcu);
+                               call_rcu(&uid_entry->rcu, uid_entry_reclaim);
                        }
                }
        }
@@ -453,11 +595,55 @@ static const struct file_operations uid_time_in_state_fops = {
        .release        = seq_release,
 };
 
+static const struct seq_operations concurrent_active_time_seq_ops = {
+       .start = uid_seq_start,
+       .next = uid_seq_next,
+       .stop = uid_seq_stop,
+       .show = concurrent_active_time_seq_show,
+};
+
+static int concurrent_active_time_open(struct inode *inode, struct file *file)
+{
+       return seq_open(file, &concurrent_active_time_seq_ops);
+}
+
+static const struct file_operations concurrent_active_time_fops = {
+       .open           = concurrent_active_time_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = seq_release,
+};
+
+static const struct seq_operations concurrent_policy_time_seq_ops = {
+       .start = uid_seq_start,
+       .next = uid_seq_next,
+       .stop = uid_seq_stop,
+       .show = concurrent_policy_time_seq_show,
+};
+
+static int concurrent_policy_time_open(struct inode *inode, struct file *file)
+{
+       return seq_open(file, &concurrent_policy_time_seq_ops);
+}
+
+static const struct file_operations concurrent_policy_time_fops = {
+       .open           = concurrent_policy_time_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = seq_release,
+};
+
 static int __init cpufreq_times_init(void)
 {
        proc_create_data("uid_time_in_state", 0444, NULL,
                         &uid_time_in_state_fops, NULL);
 
+       proc_create_data("uid_concurrent_active_time", 0444, NULL,
+                        &concurrent_active_time_fops, NULL);
+
+       proc_create_data("uid_concurrent_policy_time", 0444, NULL,
+                        &concurrent_policy_time_fops, NULL);
+
        return 0;
 }