OSDN Git Service

mm/page_alloc.c: use list_{first,last}_entry instead of list_entry
authorGeliang Tang <geliangtang@163.com>
Thu, 14 Jan 2016 23:20:30 +0000 (15:20 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 15 Jan 2016 00:00:49 +0000 (16:00 -0800)
To make the intention clearer, use list_{first,last}_entry instead of
list_entry.

Signed-off-by: Geliang Tang <geliangtang@163.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Acked-by: Mel Gorman <mgorman@techsingularity.net>
Acked-by: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/page_alloc.c

index fbff97d..b9747aa 100644 (file)
@@ -805,7 +805,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
                do {
                        int mt; /* migratetype of the to-be-freed page */
 
-                       page = list_entry(list->prev, struct page, lru);
+                       page = list_last_entry(list, struct page, lru);
                        /* must delete as __free_one_page list manipulates */
                        list_del(&page->lru);
 
@@ -1410,11 +1410,10 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
        /* Find a page of the appropriate size in the preferred list */
        for (current_order = order; current_order < MAX_ORDER; ++current_order) {
                area = &(zone->free_area[current_order]);
-               if (list_empty(&area->free_list[migratetype]))
-                       continue;
-
-               page = list_entry(area->free_list[migratetype].next,
+               page = list_first_entry_or_null(&area->free_list[migratetype],
                                                        struct page, lru);
+               if (!page)
+                       continue;
                list_del(&page->lru);
                rmv_page_order(page);
                area->nr_free--;
@@ -1693,12 +1692,12 @@ static void unreserve_highatomic_pageblock(const struct alloc_context *ac)
                for (order = 0; order < MAX_ORDER; order++) {
                        struct free_area *area = &(zone->free_area[order]);
 
-                       if (list_empty(&area->free_list[MIGRATE_HIGHATOMIC]))
+                       page = list_first_entry_or_null(
+                                       &area->free_list[MIGRATE_HIGHATOMIC],
+                                       struct page, lru);
+                       if (!page)
                                continue;
 
-                       page = list_entry(area->free_list[MIGRATE_HIGHATOMIC].next,
-                                               struct page, lru);
-
                        /*
                         * It should never happen but changes to locking could
                         * inadvertently allow a per-cpu drain to add pages
@@ -1746,7 +1745,7 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
                if (fallback_mt == -1)
                        continue;
 
-               page = list_entry(area->free_list[fallback_mt].next,
+               page = list_first_entry(&area->free_list[fallback_mt],
                                                struct page, lru);
                if (can_steal)
                        steal_suitable_fallback(zone, page, start_migratetype);
@@ -2205,9 +2204,9 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
                }
 
                if (cold)
-                       page = list_entry(list->prev, struct page, lru);
+                       page = list_last_entry(list, struct page, lru);
                else
-                       page = list_entry(list->next, struct page, lru);
+                       page = list_first_entry(list, struct page, lru);
 
                list_del(&page->lru);
                pcp->count--;