OSDN Git Service

md/raid5: make use of spin_lock_irq over local_irq_disable + spin_lock
authorJulia Cartwright <julia@ni.com>
Fri, 28 Apr 2017 17:41:02 +0000 (12:41 -0500)
committerShaohua Li <shli@fb.com>
Thu, 4 May 2017 20:44:23 +0000 (13:44 -0700)
On mainline, there is no functional difference, just less code, and
symmetric lock/unlock paths.

On PREEMPT_RT builds, this fixes the following warning, seen by
Alexander GQ Gerasiov, due to the sleeping nature of spinlocks.

   BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:993
   in_atomic(): 0, irqs_disabled(): 1, pid: 58, name: kworker/u12:1
   CPU: 5 PID: 58 Comm: kworker/u12:1 Tainted: G        W       4.9.20-rt16-stand6-686 #1
   Hardware name: Supermicro SYS-5027R-WRF/X9SRW-F, BIOS 3.2a 10/28/2015
   Workqueue: writeback wb_workfn (flush-253:0)
   Call Trace:
    dump_stack+0x47/0x68
    ? migrate_enable+0x4a/0xf0
    ___might_sleep+0x101/0x180
    rt_spin_lock+0x17/0x40
    add_stripe_bio+0x4e3/0x6c0 [raid456]
    ? preempt_count_add+0x42/0xb0
    raid5_make_request+0x737/0xdd0 [raid456]

Reported-by: Alexander GQ Gerasiov <gq@redlab-i.ru>
Tested-by: Alexander GQ Gerasiov <gq@redlab-i.ru>
Signed-off-by: Julia Cartwright <julia@ni.com>
Signed-off-by: Shaohua Li <shli@fb.com>
drivers/md/raid5.c

index 2e38cfa..3809a21 100644 (file)
@@ -103,8 +103,7 @@ static inline void unlock_device_hash_lock(struct r5conf *conf, int hash)
 static inline void lock_all_device_hash_locks_irq(struct r5conf *conf)
 {
        int i;
-       local_irq_disable();
-       spin_lock(conf->hash_locks);
+       spin_lock_irq(conf->hash_locks);
        for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++)
                spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks);
        spin_lock(&conf->device_lock);
@@ -114,9 +113,9 @@ static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf)
 {
        int i;
        spin_unlock(&conf->device_lock);
-       for (i = NR_STRIPE_HASH_LOCKS; i; i--)
-               spin_unlock(conf->hash_locks + i - 1);
-       local_irq_enable();
+       for (i = NR_STRIPE_HASH_LOCKS - 1; i; i--)
+               spin_unlock(conf->hash_locks + i);
+       spin_unlock_irq(conf->hash_locks);
 }
 
 /* Find first data disk in a raid6 stripe */
@@ -714,12 +713,11 @@ static bool is_full_stripe_write(struct stripe_head *sh)
 
 static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
 {
-       local_irq_disable();
        if (sh1 > sh2) {
-               spin_lock(&sh2->stripe_lock);
+               spin_lock_irq(&sh2->stripe_lock);
                spin_lock_nested(&sh1->stripe_lock, 1);
        } else {
-               spin_lock(&sh1->stripe_lock);
+               spin_lock_irq(&sh1->stripe_lock);
                spin_lock_nested(&sh2->stripe_lock, 1);
        }
 }
@@ -727,8 +725,7 @@ static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
 static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
 {
        spin_unlock(&sh1->stripe_lock);
-       spin_unlock(&sh2->stripe_lock);
-       local_irq_enable();
+       spin_unlock_irq(&sh2->stripe_lock);
 }
 
 /* Only freshly new full stripe normal write stripe can be added to a batch list */