OSDN Git Service

mm/memory_hotplug: drop local variables in shrink_zone_span()
authorDavid Hildenbrand <david@redhat.com>
Tue, 4 Feb 2020 01:34:19 +0000 (17:34 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 4 Feb 2020 03:05:23 +0000 (03:05 +0000)
Get rid of the unnecessary local variables.

Link: http://lkml.kernel.org/r/20191006085646.5768-10-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Logan Gunthorpe <logang@deltatee.com>
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Pankaj Gupta <pagupta@redhat.com>
Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/memory_hotplug.c

index 61bd62d..a2b6ca2 100644 (file)
@@ -392,14 +392,11 @@ static unsigned long find_biggest_section_pfn(int nid, struct zone *zone,
 static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
                             unsigned long end_pfn)
 {
-       unsigned long zone_start_pfn = zone->zone_start_pfn;
-       unsigned long z = zone_end_pfn(zone); /* zone_end_pfn namespace clash */
-       unsigned long zone_end_pfn = z;
        unsigned long pfn;
        int nid = zone_to_nid(zone);
 
        zone_span_writelock(zone);
-       if (zone_start_pfn == start_pfn) {
+       if (zone->zone_start_pfn == start_pfn) {
                /*
                 * If the section is smallest section in the zone, it need
                 * shrink zone->zone_start_pfn and zone->zone_spanned_pages.
@@ -407,25 +404,25 @@ static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
                 * for shrinking zone.
                 */
                pfn = find_smallest_section_pfn(nid, zone, end_pfn,
-                                               zone_end_pfn);
+                                               zone_end_pfn(zone));
                if (pfn) {
+                       zone->spanned_pages = zone_end_pfn(zone) - pfn;
                        zone->zone_start_pfn = pfn;
-                       zone->spanned_pages = zone_end_pfn - pfn;
                } else {
                        zone->zone_start_pfn = 0;
                        zone->spanned_pages = 0;
                }
-       } else if (zone_end_pfn == end_pfn) {
+       } else if (zone_end_pfn(zone) == end_pfn) {
                /*
                 * If the section is biggest section in the zone, it need
                 * shrink zone->spanned_pages.
                 * In this case, we find second biggest valid mem_section for
                 * shrinking zone.
                 */
-               pfn = find_biggest_section_pfn(nid, zone, zone_start_pfn,
+               pfn = find_biggest_section_pfn(nid, zone, zone->zone_start_pfn,
                                               start_pfn);
                if (pfn)
-                       zone->spanned_pages = pfn - zone_start_pfn + 1;
+                       zone->spanned_pages = pfn - zone->zone_start_pfn + 1;
                else {
                        zone->zone_start_pfn = 0;
                        zone->spanned_pages = 0;