OSDN Git Service

kunit: make test->lock irq safe
authorVlastimil Babka <vbabka@suse.cz>
Tue, 29 Jun 2021 02:34:30 +0000 (19:34 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 29 Jun 2021 17:53:46 +0000 (10:53 -0700)
The upcoming SLUB kunit test will be calling kunit_find_named_resource()
from a context with disabled interrupts.  That means kunit's test->lock
needs to be IRQ safe to avoid potential deadlocks and lockdep splats.

This patch therefore changes the test->lock usage to spin_lock_irqsave()
and spin_unlock_irqrestore().

Link: https://lkml.kernel.org/r/20210511150734.3492-1-glittao@gmail.com
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Oliver Glitta <glittao@gmail.com>
Reviewed-by: Brendan Higgins <brendanhiggins@google.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Daniel Latypov <dlatypov@google.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Marco Elver <elver@google.com>
Cc: Pekka Enberg <penberg@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/kunit/test.h
lib/kunit/test.c

index 49601c4..524d478 100644 (file)
@@ -515,8 +515,9 @@ kunit_find_resource(struct kunit *test,
                    void *match_data)
 {
        struct kunit_resource *res, *found = NULL;
+       unsigned long flags;
 
-       spin_lock(&test->lock);
+       spin_lock_irqsave(&test->lock, flags);
 
        list_for_each_entry_reverse(res, &test->resources, node) {
                if (match(test, res, (void *)match_data)) {
@@ -526,7 +527,7 @@ kunit_find_resource(struct kunit *test,
                }
        }
 
-       spin_unlock(&test->lock);
+       spin_unlock_irqrestore(&test->lock, flags);
 
        return found;
 }
index 2f6cc01..45f0688 100644 (file)
@@ -475,6 +475,7 @@ int kunit_add_resource(struct kunit *test,
                       void *data)
 {
        int ret = 0;
+       unsigned long flags;
 
        res->free = free;
        kref_init(&res->refcount);
@@ -487,10 +488,10 @@ int kunit_add_resource(struct kunit *test,
                res->data = data;
        }
 
-       spin_lock(&test->lock);
+       spin_lock_irqsave(&test->lock, flags);
        list_add_tail(&res->node, &test->resources);
        /* refcount for list is established by kref_init() */
-       spin_unlock(&test->lock);
+       spin_unlock_irqrestore(&test->lock, flags);
 
        return ret;
 }
@@ -548,9 +549,11 @@ EXPORT_SYMBOL_GPL(kunit_alloc_and_get_resource);
 
 void kunit_remove_resource(struct kunit *test, struct kunit_resource *res)
 {
-       spin_lock(&test->lock);
+       unsigned long flags;
+
+       spin_lock_irqsave(&test->lock, flags);
        list_del(&res->node);
-       spin_unlock(&test->lock);
+       spin_unlock_irqrestore(&test->lock, flags);
        kunit_put_resource(res);
 }
 EXPORT_SYMBOL_GPL(kunit_remove_resource);
@@ -630,6 +633,7 @@ EXPORT_SYMBOL_GPL(kunit_kfree);
 void kunit_cleanup(struct kunit *test)
 {
        struct kunit_resource *res;
+       unsigned long flags;
 
        /*
         * test->resources is a stack - each allocation must be freed in the
@@ -641,9 +645,9 @@ void kunit_cleanup(struct kunit *test)
         * protect against the current node being deleted, not the next.
         */
        while (true) {
-               spin_lock(&test->lock);
+               spin_lock_irqsave(&test->lock, flags);
                if (list_empty(&test->resources)) {
-                       spin_unlock(&test->lock);
+                       spin_unlock_irqrestore(&test->lock, flags);
                        break;
                }
                res = list_last_entry(&test->resources,
@@ -654,7 +658,7 @@ void kunit_cleanup(struct kunit *test)
                 * resource, and this can't happen if the test->lock
                 * is held.
                 */
-               spin_unlock(&test->lock);
+               spin_unlock_irqrestore(&test->lock, flags);
                kunit_remove_resource(test, res);
        }
        current->kunit_test = NULL;