OSDN Git Service

mm: vmalloc: use trace_purge_vmap_area_lazy event
authorUladzislau Rezki (Sony) <urezki@gmail.com>
Tue, 18 Oct 2022 18:10:51 +0000 (20:10 +0200)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 9 Nov 2022 01:37:17 +0000 (17:37 -0800)
This is for debug purposes and is called when all outstanding areas are
removed back to the vmap space.  It gives some extra information about:

- a start:end range where set of vmap ares were freed;
- a number of purged areas which were backed off.

[urezki@gmail.com: simplify return boolean expression]
Link: https://lkml.kernel.org/r/20221020125247.5053-1-urezki@gmail.com
Link: https://lkml.kernel.org/r/20221018181053.434508-6-urezki@gmail.com
Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
Reviewed-by: Steven Rostedt (Google) <rostedt@goodmis.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Oleksiy Avramchenko <oleksiy.avramchenko@sony.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/vmalloc.c

index 849563d..1b1205a 100644 (file)
@@ -1730,6 +1730,7 @@ static void purge_fragmented_blocks_allcpus(void);
 static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
 {
        unsigned long resched_threshold;
+       unsigned int num_purged_areas = 0;
        struct list_head local_purge_list;
        struct vmap_area *va, *n_va;
 
@@ -1741,7 +1742,7 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
        spin_unlock(&purge_vmap_area_lock);
 
        if (unlikely(list_empty(&local_purge_list)))
-               return false;
+               goto out;
 
        start = min(start,
                list_first_entry(&local_purge_list,
@@ -1776,12 +1777,16 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
                                              va->va_start, va->va_end);
 
                atomic_long_sub(nr, &vmap_lazy_nr);
+               num_purged_areas++;
 
                if (atomic_long_read(&vmap_lazy_nr) < resched_threshold)
                        cond_resched_lock(&free_vmap_area_lock);
        }
        spin_unlock(&free_vmap_area_lock);
-       return true;
+
+out:
+       trace_purge_vmap_area_lazy(start, end, num_purged_areas);
+       return num_purged_areas > 0;
 }
 
 /*