OSDN Git Service

memcg: remove mem_cgroup_flush_stats_atomic()
authorYosry Ahmed <yosryahmed@google.com>
Fri, 21 Apr 2023 17:40:19 +0000 (17:40 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 9 Jun 2023 23:25:15 +0000 (16:25 -0700)
Previous patches removed all callers of mem_cgroup_flush_stats_atomic().
Remove the function and simplify the code.

Link: https://lkml.kernel.org/r/20230421174020.2994750-5-yosryahmed@google.com
Signed-off-by: Yosry Ahmed <yosryahmed@google.com>
Acked-by: Shakeel Butt <shakeelb@google.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Jan Kara <jack@suse.cz>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Michal Koutný <mkoutny@suse.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/memcontrol.h
mm/memcontrol.c

index 222d737..00a88cf 100644 (file)
@@ -1038,7 +1038,6 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
 }
 
 void mem_cgroup_flush_stats(void);
-void mem_cgroup_flush_stats_atomic(void);
 void mem_cgroup_flush_stats_ratelimited(void);
 
 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
@@ -1537,10 +1536,6 @@ static inline void mem_cgroup_flush_stats(void)
 {
 }
 
-static inline void mem_cgroup_flush_stats_atomic(void)
-{
-}
-
 static inline void mem_cgroup_flush_stats_ratelimited(void)
 {
 }
index 7474aa8..2184a9c 100644 (file)
@@ -639,7 +639,7 @@ static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
        }
 }
 
-static void do_flush_stats(bool atomic)
+static void do_flush_stats(void)
 {
        /*
         * We always flush the entire tree, so concurrent flushers can just
@@ -652,30 +652,16 @@ static void do_flush_stats(bool atomic)
 
        WRITE_ONCE(flush_next_time, jiffies_64 + 2*FLUSH_TIME);
 
-       if (atomic)
-               cgroup_rstat_flush_atomic(root_mem_cgroup->css.cgroup);
-       else
-               cgroup_rstat_flush(root_mem_cgroup->css.cgroup);
+       cgroup_rstat_flush(root_mem_cgroup->css.cgroup);
 
        atomic_set(&stats_flush_threshold, 0);
        atomic_set(&stats_flush_ongoing, 0);
 }
 
-static bool should_flush_stats(void)
-{
-       return atomic_read(&stats_flush_threshold) > num_online_cpus();
-}
-
 void mem_cgroup_flush_stats(void)
 {
-       if (should_flush_stats())
-               do_flush_stats(false);
-}
-
-void mem_cgroup_flush_stats_atomic(void)
-{
-       if (should_flush_stats())
-               do_flush_stats(true);
+       if (atomic_read(&stats_flush_threshold) > num_online_cpus())
+               do_flush_stats();
 }
 
 void mem_cgroup_flush_stats_ratelimited(void)
@@ -690,7 +676,7 @@ static void flush_memcg_stats_dwork(struct work_struct *w)
         * Always flush here so that flushing in latency-sensitive paths is
         * as cheap as possible.
         */
-       do_flush_stats(false);
+       do_flush_stats();
        queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
 }