OSDN Git Service

mm: memcontrol: implement lruvec stat functions on top of each other
authorJohannes Weiner <hannes@cmpxchg.org>
Thu, 1 Feb 2018 00:16:41 +0000 (16:16 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 1 Feb 2018 01:18:36 +0000 (17:18 -0800)
The implementation of the lruvec stat functions and their variants for
accounting through a page, or accounting from a preemptible context, are
mostly identical and needlessly repetitive.

Implement the lruvec_page functions by looking up the page's lruvec and
then using the lruvec function.

Implement the functions for preemptible contexts by disabling preemption
before calling the atomic context functions.

Link: http://lkml.kernel.org/r/20171103153336.24044-2-hannes@cmpxchg.org
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Vladimir Davydov <vdavydov.dev@gmail.com>
Cc: Michal Hocko <mhocko@suse.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/memcontrol.h

index 2c80b69..1ffc54a 100644 (file)
@@ -569,51 +569,51 @@ static inline void __mod_lruvec_state(struct lruvec *lruvec,
 {
        struct mem_cgroup_per_node *pn;
 
+       /* Update node */
        __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
+
        if (mem_cgroup_disabled())
                return;
+
        pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
+
+       /* Update memcg */
        __mod_memcg_state(pn->memcg, idx, val);
+
+       /* Update lruvec */
        __this_cpu_add(pn->lruvec_stat->count[idx], val);
 }
 
 static inline void mod_lruvec_state(struct lruvec *lruvec,
                                    enum node_stat_item idx, int val)
 {
-       struct mem_cgroup_per_node *pn;
-
-       mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
-       if (mem_cgroup_disabled())
-               return;
-       pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
-       mod_memcg_state(pn->memcg, idx, val);
-       this_cpu_add(pn->lruvec_stat->count[idx], val);
+       preempt_disable();
+       __mod_lruvec_state(lruvec, idx, val);
+       preempt_enable();
 }
 
 static inline void __mod_lruvec_page_state(struct page *page,
                                           enum node_stat_item idx, int val)
 {
-       struct mem_cgroup_per_node *pn;
+       pg_data_t *pgdat = page_pgdat(page);
+       struct lruvec *lruvec;
 
-       __mod_node_page_state(page_pgdat(page), idx, val);
-       if (mem_cgroup_disabled() || !page->mem_cgroup)
+       /* Untracked pages have no memcg, no lruvec. Update only the node */
+       if (!page->mem_cgroup) {
+               __mod_node_page_state(pgdat, idx, val);
                return;
-       __mod_memcg_state(page->mem_cgroup, idx, val);
-       pn = page->mem_cgroup->nodeinfo[page_to_nid(page)];
-       __this_cpu_add(pn->lruvec_stat->count[idx], val);
+       }
+
+       lruvec = mem_cgroup_lruvec(pgdat, page->mem_cgroup);
+       __mod_lruvec_state(lruvec, idx, val);
 }
 
 static inline void mod_lruvec_page_state(struct page *page,
                                         enum node_stat_item idx, int val)
 {
-       struct mem_cgroup_per_node *pn;
-
-       mod_node_page_state(page_pgdat(page), idx, val);
-       if (mem_cgroup_disabled() || !page->mem_cgroup)
-               return;
-       mod_memcg_state(page->mem_cgroup, idx, val);
-       pn = page->mem_cgroup->nodeinfo[page_to_nid(page)];
-       this_cpu_add(pn->lruvec_stat->count[idx], val);
+       preempt_disable();
+       __mod_lruvec_page_state(page, idx, val);
+       preempt_enable();
 }
 
 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,