OSDN Git Service

kthread: Convert worker lock to raw spinlock
authorJulia Cartwright <julia@ni.com>
Tue, 12 Feb 2019 16:25:53 +0000 (17:25 +0100)
committerThomas Gleixner <tglx@linutronix.de>
Thu, 28 Feb 2019 10:18:38 +0000 (11:18 +0100)
In order to enable the queuing of kthread work items from hardirq context
even when PREEMPT_RT_FULL is enabled, convert the worker spin_lock to a
raw_spin_lock.

This is only acceptable to do because the work performed under the lock is
well-bounded and minimal.

Reported-by: Steffen Trumtrar <s.trumtrar@pengutronix.de>
Reported-by: Tim Sander <tim@krieglstein.org>
Signed-off-by: Julia Cartwright <julia@ni.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Steffen Trumtrar <s.trumtrar@pengutronix.de>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Cc: Guenter Roeck <linux@roeck-us.net>
Link: https://lkml.kernel.org/r/20190212162554.19779-1-bigeasy@linutronix.de
include/linux/kthread.h
kernel/kthread.c

index c196176..6b8c064 100644 (file)
@@ -85,7 +85,7 @@ enum {
 
 struct kthread_worker {
        unsigned int            flags;
-       spinlock_t              lock;
+       raw_spinlock_t          lock;
        struct list_head        work_list;
        struct list_head        delayed_work_list;
        struct task_struct      *task;
@@ -106,7 +106,7 @@ struct kthread_delayed_work {
 };
 
 #define KTHREAD_WORKER_INIT(worker)    {                               \
-       .lock = __SPIN_LOCK_UNLOCKED((worker).lock),                    \
+       .lock = __RAW_SPIN_LOCK_UNLOCKED((worker).lock),                \
        .work_list = LIST_HEAD_INIT((worker).work_list),                \
        .delayed_work_list = LIST_HEAD_INIT((worker).delayed_work_list),\
        }
index 087d18d..5641b55 100644 (file)
@@ -599,7 +599,7 @@ void __kthread_init_worker(struct kthread_worker *worker,
                                struct lock_class_key *key)
 {
        memset(worker, 0, sizeof(struct kthread_worker));
-       spin_lock_init(&worker->lock);
+       raw_spin_lock_init(&worker->lock);
        lockdep_set_class_and_name(&worker->lock, key, name);
        INIT_LIST_HEAD(&worker->work_list);
        INIT_LIST_HEAD(&worker->delayed_work_list);
@@ -641,21 +641,21 @@ repeat:
 
        if (kthread_should_stop()) {
                __set_current_state(TASK_RUNNING);
-               spin_lock_irq(&worker->lock);
+               raw_spin_lock_irq(&worker->lock);
                worker->task = NULL;
-               spin_unlock_irq(&worker->lock);
+               raw_spin_unlock_irq(&worker->lock);
                return 0;
        }
 
        work = NULL;
-       spin_lock_irq(&worker->lock);
+       raw_spin_lock_irq(&worker->lock);
        if (!list_empty(&worker->work_list)) {
                work = list_first_entry(&worker->work_list,
                                        struct kthread_work, node);
                list_del_init(&work->node);
        }
        worker->current_work = work;
-       spin_unlock_irq(&worker->lock);
+       raw_spin_unlock_irq(&worker->lock);
 
        if (work) {
                __set_current_state(TASK_RUNNING);
@@ -812,12 +812,12 @@ bool kthread_queue_work(struct kthread_worker *worker,
        bool ret = false;
        unsigned long flags;
 
-       spin_lock_irqsave(&worker->lock, flags);
+       raw_spin_lock_irqsave(&worker->lock, flags);
        if (!queuing_blocked(worker, work)) {
                kthread_insert_work(worker, work, &worker->work_list);
                ret = true;
        }
-       spin_unlock_irqrestore(&worker->lock, flags);
+       raw_spin_unlock_irqrestore(&worker->lock, flags);
        return ret;
 }
 EXPORT_SYMBOL_GPL(kthread_queue_work);
@@ -843,7 +843,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
        if (WARN_ON_ONCE(!worker))
                return;
 
-       spin_lock(&worker->lock);
+       raw_spin_lock(&worker->lock);
        /* Work must not be used with >1 worker, see kthread_queue_work(). */
        WARN_ON_ONCE(work->worker != worker);
 
@@ -852,7 +852,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
        list_del_init(&work->node);
        kthread_insert_work(worker, work, &worker->work_list);
 
-       spin_unlock(&worker->lock);
+       raw_spin_unlock(&worker->lock);
 }
 EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
 
@@ -908,14 +908,14 @@ bool kthread_queue_delayed_work(struct kthread_worker *worker,
        unsigned long flags;
        bool ret = false;
 
-       spin_lock_irqsave(&worker->lock, flags);
+       raw_spin_lock_irqsave(&worker->lock, flags);
 
        if (!queuing_blocked(worker, work)) {
                __kthread_queue_delayed_work(worker, dwork, delay);
                ret = true;
        }
 
-       spin_unlock_irqrestore(&worker->lock, flags);
+       raw_spin_unlock_irqrestore(&worker->lock, flags);
        return ret;
 }
 EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
@@ -951,7 +951,7 @@ void kthread_flush_work(struct kthread_work *work)
        if (!worker)
                return;
 
-       spin_lock_irq(&worker->lock);
+       raw_spin_lock_irq(&worker->lock);
        /* Work must not be used with >1 worker, see kthread_queue_work(). */
        WARN_ON_ONCE(work->worker != worker);
 
@@ -963,7 +963,7 @@ void kthread_flush_work(struct kthread_work *work)
        else
                noop = true;
 
-       spin_unlock_irq(&worker->lock);
+       raw_spin_unlock_irq(&worker->lock);
 
        if (!noop)
                wait_for_completion(&fwork.done);
@@ -996,9 +996,9 @@ static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
                 * any queuing is blocked by setting the canceling counter.
                 */
                work->canceling++;
-               spin_unlock_irqrestore(&worker->lock, *flags);
+               raw_spin_unlock_irqrestore(&worker->lock, *flags);
                del_timer_sync(&dwork->timer);
-               spin_lock_irqsave(&worker->lock, *flags);
+               raw_spin_lock_irqsave(&worker->lock, *flags);
                work->canceling--;
        }
 
@@ -1045,7 +1045,7 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
        unsigned long flags;
        int ret = false;
 
-       spin_lock_irqsave(&worker->lock, flags);
+       raw_spin_lock_irqsave(&worker->lock, flags);
 
        /* Do not bother with canceling when never queued. */
        if (!work->worker)
@@ -1062,7 +1062,7 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
 fast_queue:
        __kthread_queue_delayed_work(worker, dwork, delay);
 out:
-       spin_unlock_irqrestore(&worker->lock, flags);
+       raw_spin_unlock_irqrestore(&worker->lock, flags);
        return ret;
 }
 EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
@@ -1076,7 +1076,7 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
        if (!worker)
                goto out;
 
-       spin_lock_irqsave(&worker->lock, flags);
+       raw_spin_lock_irqsave(&worker->lock, flags);
        /* Work must not be used with >1 worker, see kthread_queue_work(). */
        WARN_ON_ONCE(work->worker != worker);
 
@@ -1090,13 +1090,13 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
         * In the meantime, block any queuing by setting the canceling counter.
         */
        work->canceling++;
-       spin_unlock_irqrestore(&worker->lock, flags);
+       raw_spin_unlock_irqrestore(&worker->lock, flags);
        kthread_flush_work(work);
-       spin_lock_irqsave(&worker->lock, flags);
+       raw_spin_lock_irqsave(&worker->lock, flags);
        work->canceling--;
 
 out_fast:
-       spin_unlock_irqrestore(&worker->lock, flags);
+       raw_spin_unlock_irqrestore(&worker->lock, flags);
 out:
        return ret;
 }