OSDN Git Service

block: queue work on power efficient wq
authorViresh Kumar <viresh.kumar@linaro.org>
Wed, 24 Apr 2013 11:42:56 +0000 (17:12 +0530)
committerTejun Heo <tj@kernel.org>
Tue, 14 May 2013 17:50:07 +0000 (10:50 -0700)
Block layer uses workqueues for multiple purposes. There is no real dependency
of scheduling these on the cpu which scheduled them.

On a idle system, it is observed that and idle cpu wakes up many times just to
service this work. It would be better if we can schedule it on a cpu which the
scheduler believes to be the most appropriate one.

This patch replaces normal workqueues with power efficient versions.

Cc: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: Tejun Heo <tj@kernel.org>
block/blk-core.c
block/blk-ioc.c
block/genhd.c

index 33c33bc..f0deb8b 100644 (file)
@@ -3180,7 +3180,8 @@ int __init blk_dev_init(void)
 
        /* used for unplugging and affects IO latency/throughput - HIGHPRI */
        kblockd_workqueue = alloc_workqueue("kblockd",
-                                           WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
+                                           WQ_MEM_RECLAIM | WQ_HIGHPRI |
+                                           WQ_POWER_EFFICIENT, 0);
        if (!kblockd_workqueue)
                panic("Failed to create kblockd\n");
 
index 9c4bb82..4464c82 100644 (file)
@@ -144,7 +144,8 @@ void put_io_context(struct io_context *ioc)
        if (atomic_long_dec_and_test(&ioc->refcount)) {
                spin_lock_irqsave(&ioc->lock, flags);
                if (!hlist_empty(&ioc->icq_list))
-                       schedule_work(&ioc->release_work);
+                       queue_work(system_power_efficient_wq,
+                                       &ioc->release_work);
                else
                        free_ioc = true;
                spin_unlock_irqrestore(&ioc->lock, flags);
index 20625ee..e9094b3 100644 (file)
@@ -1489,9 +1489,11 @@ static void __disk_unblock_events(struct gendisk *disk, bool check_now)
        intv = disk_events_poll_jiffies(disk);
        set_timer_slack(&ev->dwork.timer, intv / 4);
        if (check_now)
-               queue_delayed_work(system_freezable_wq, &ev->dwork, 0);
+               queue_delayed_work(system_freezable_power_efficient_wq,
+                               &ev->dwork, 0);
        else if (intv)
-               queue_delayed_work(system_freezable_wq, &ev->dwork, intv);
+               queue_delayed_work(system_freezable_power_efficient_wq,
+                               &ev->dwork, intv);
 out_unlock:
        spin_unlock_irqrestore(&ev->lock, flags);
 }
@@ -1534,7 +1536,8 @@ void disk_flush_events(struct gendisk *disk, unsigned int mask)
        spin_lock_irq(&ev->lock);
        ev->clearing |= mask;
        if (!ev->block)
-               mod_delayed_work(system_freezable_wq, &ev->dwork, 0);
+               mod_delayed_work(system_freezable_power_efficient_wq,
+                               &ev->dwork, 0);
        spin_unlock_irq(&ev->lock);
 }
 
@@ -1627,7 +1630,8 @@ static void disk_check_events(struct disk_events *ev,
 
        intv = disk_events_poll_jiffies(disk);
        if (!ev->block && intv)
-               queue_delayed_work(system_freezable_wq, &ev->dwork, intv);
+               queue_delayed_work(system_freezable_power_efficient_wq,
+                               &ev->dwork, intv);
 
        spin_unlock_irq(&ev->lock);