OSDN Git Service

ANDROID: block/cfq-iosched: make group_idle per io cgroup tunable
authorRick Yiu <rickyiu@google.com>
Wed, 26 Sep 2018 08:45:50 +0000 (16:45 +0800)
committer0ranko0P <ranko0p@outlook.com>
Wed, 4 Dec 2019 13:47:56 +0000 (21:47 +0800)
If group_idle is made per io cgroup tunable, it gives more flexibility
in tuning the performance of each group. If no value is set, it will
just use the original default value.

Bug: 117857342
Bug: 132282125
Test: values could be set to each group correctly
Signed-off-by: Rick Yiu <rickyiu@google.com>
Change-Id: I9aba172419f1819f459e8305b909630fa8305978

block/cfq-iosched.c

index af90749..e52157f 100644 (file)
@@ -222,6 +222,7 @@ struct cfq_group_data {
 
        unsigned int weight;
        unsigned int leaf_weight;
+       u64 group_idle;
 };
 
 /* This is per cgroup per device grouping structure */
@@ -307,6 +308,7 @@ struct cfq_group {
        struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
        struct cfq_queue *async_idle_cfqq;
 
+       u64 group_idle;
 };
 
 struct cfq_io_cq {
@@ -795,6 +797,17 @@ static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
 
 #endif /* CONFIG_CFQ_GROUP_IOSCHED */
 
+static inline u64 get_group_idle(struct cfq_data *cfqd)
+{
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+       struct cfq_queue *cfqq = cfqd->active_queue;
+
+       if (cfqq && cfqq->cfqg)
+               return cfqq->cfqg->group_idle;
+#endif
+       return cfqd->cfq_group_idle;
+}
+
 #define cfq_log(cfqd, fmt, args...)    \
        blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
 
@@ -815,7 +828,7 @@ static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd,
        if (!sample_valid(ttime->ttime_samples))
                return false;
        if (group_idle)
-               slice = cfqd->cfq_group_idle;
+               slice = get_group_idle(cfqd);
        else
                slice = cfqd->cfq_slice_idle;
        return ttime->ttime_mean > slice;
@@ -1609,6 +1622,7 @@ static void cfq_cpd_init(struct blkcg_policy_data *cpd)
 
        cgd->weight = weight;
        cgd->leaf_weight = weight;
+       cgd->group_idle = cfq_group_idle;
 }
 
 static void cfq_cpd_free(struct blkcg_policy_data *cpd)
@@ -1653,6 +1667,7 @@ static void cfq_pd_init(struct blkg_policy_data *pd)
 
        cfqg->weight = cgd->weight;
        cfqg->leaf_weight = cgd->leaf_weight;
+       cfqg->group_idle = cgd->group_idle;
 }
 
 static void cfq_pd_offline(struct blkg_policy_data *pd)
@@ -1774,6 +1789,19 @@ static int cfq_print_leaf_weight(struct seq_file *sf, void *v)
        return 0;
 }
 
+static int cfq_print_group_idle(struct seq_file *sf, void *v)
+{
+       struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
+       struct cfq_group_data *cgd = blkcg_to_cfqgd(blkcg);
+       u64 val = 0;
+
+       if (cgd)
+               val = cgd->group_idle;
+
+       seq_printf(sf, "%llu\n", div_u64(val, NSEC_PER_USEC));
+       return 0;
+}
+
 static ssize_t __cfqg_set_weight_device(struct kernfs_open_file *of,
                                        char *buf, size_t nbytes, loff_t off,
                                        bool on_dfl, bool is_leaf_weight)
@@ -1895,6 +1923,37 @@ static int cfq_set_leaf_weight(struct cgroup_subsys_state *css,
        return __cfq_set_weight(css, val, false, false, true);
 }
 
+static int cfq_set_group_idle(struct cgroup_subsys_state *css,
+                              struct cftype *cft, u64 val)
+{
+       struct blkcg *blkcg = css_to_blkcg(css);
+       struct cfq_group_data *cfqgd;
+       struct blkcg_gq *blkg;
+       int ret = 0;
+
+       spin_lock_irq(&blkcg->lock);
+       cfqgd = blkcg_to_cfqgd(blkcg);
+       if (!cfqgd) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       cfqgd->group_idle = val * NSEC_PER_USEC;
+
+       hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
+               struct cfq_group *cfqg = blkg_to_cfqg(blkg);
+
+               if (!cfqg)
+                       continue;
+
+               cfqg->group_idle = cfqgd->group_idle;
+       }
+
+out:
+       spin_unlock_irq(&blkcg->lock);
+       return ret;
+}
+
 static int cfqg_print_stat(struct seq_file *sf, void *v)
 {
        blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
@@ -2040,6 +2099,11 @@ static struct cftype cfq_blkcg_legacy_files[] = {
                .seq_show = cfq_print_leaf_weight,
                .write_u64 = cfq_set_leaf_weight,
        },
+       {
+               .name = "group_idle",
+               .seq_show = cfq_print_group_idle,
+               .write_u64 = cfq_set_group_idle,
+       },
 
        /* statistics, covers only the tasks in the cfqg */
        {
@@ -2930,7 +2994,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
         * otherwise we still have a problem with sync vs async workloads.
         */
        if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag &&
-               !cfqd->cfq_group_idle)
+               !get_group_idle(cfqd))
                return;
 
        WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
@@ -2941,9 +3005,8 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
         */
        if (!cfq_should_idle(cfqd, cfqq)) {
                /* no queue idling. Check for group idling */
-               if (cfqd->cfq_group_idle)
-                       group_idle = cfqd->cfq_group_idle;
-               else
+               group_idle = get_group_idle(cfqd);
+               if (!group_idle)
                        return;
        }
 
@@ -2979,7 +3042,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
        cfq_mark_cfqq_wait_request(cfqq);
 
        if (group_idle)
-               sl = cfqd->cfq_group_idle;
+               sl = group_idle;
        else
                sl = cfqd->cfq_slice_idle;
 
@@ -3328,7 +3391,7 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
         * this group, wait for requests to complete.
         */
 check_group_idle:
-       if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 &&
+       if (get_group_idle(cfqd) && cfqq->cfqg->nr_cfqq == 1 &&
            cfqq->cfqg->dispatched &&
            !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) {
                cfqq = NULL;
@@ -3889,7 +3952,7 @@ cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                        cfqd->cfq_slice_idle);
        }
 #ifdef CONFIG_CFQ_GROUP_IOSCHED
-       __cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle);
+       __cfq_update_io_thinktime(&cfqq->cfqg->ttime, get_group_idle(cfqd));
 #endif
 }
 
@@ -4282,7 +4345,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
                if (cfq_should_wait_busy(cfqd, cfqq)) {
                        u64 extend_sl = cfqd->cfq_slice_idle;
                        if (!cfqd->cfq_slice_idle)
-                               extend_sl = cfqd->cfq_group_idle;
+                               extend_sl = get_group_idle(cfqd);
                        cfqq->slice_end = now + extend_sl;
                        cfq_mark_cfqq_wait_busy(cfqq);
                        cfq_log_cfqq(cfqd, cfqq, "will busy wait");