OSDN Git Service

dm: impose configurable deadline for dm_request_fn's merge heuristic
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / drivers / md / dm.c
index 6f85428..5294e01 100644 (file)
@@ -21,6 +21,8 @@
 #include <linux/delay.h>
 #include <linux/wait.h>
 #include <linux/kthread.h>
+#include <linux/ktime.h>
+#include <linux/elevator.h> /* for rq_end_sector() */
 
 #include <trace/events/block.h>
 
@@ -216,6 +218,12 @@ struct mapped_device {
 
        struct kthread_worker kworker;
        struct task_struct *kworker_task;
+
+       /* for request-based merge heuristic in dm_request_fn() */
+       unsigned seq_rq_merge_deadline_usecs;
+       int last_rq_rw;
+       sector_t last_rq_pos;
+       ktime_t last_rq_start_time;
 };
 
 /*
@@ -1930,6 +1938,12 @@ static void dm_start_request(struct mapped_device *md, struct request *orig)
        blk_start_request(orig);
        atomic_inc(&md->pending[rq_data_dir(orig)]);
 
+       if (md->seq_rq_merge_deadline_usecs) {
+               md->last_rq_pos = rq_end_sector(orig);
+               md->last_rq_rw = rq_data_dir(orig);
+               md->last_rq_start_time = ktime_get();
+       }
+
        /*
         * Hold the md reference here for the in-flight I/O.
         * We can't rely on the reference count by device opener,
@@ -1940,6 +1954,45 @@ static void dm_start_request(struct mapped_device *md, struct request *orig)
        dm_get(md);
 }
 
+#define MAX_SEQ_RQ_MERGE_DEADLINE_USECS 100000
+
+ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
+{
+       return sprintf(buf, "%u\n", md->seq_rq_merge_deadline_usecs);
+}
+
+ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
+                                                    const char *buf, size_t count)
+{
+       unsigned deadline;
+
+       if (!dm_request_based(md))
+               return count;
+
+       if (kstrtouint(buf, 10, &deadline))
+               return -EINVAL;
+
+       if (deadline > MAX_SEQ_RQ_MERGE_DEADLINE_USECS)
+               deadline = MAX_SEQ_RQ_MERGE_DEADLINE_USECS;
+
+       md->seq_rq_merge_deadline_usecs = deadline;
+
+       return count;
+}
+
+static bool dm_request_peeked_before_merge_deadline(struct mapped_device *md)
+{
+       ktime_t kt_deadline;
+
+       if (!md->seq_rq_merge_deadline_usecs)
+               return false;
+
+       kt_deadline = ns_to_ktime((u64)md->seq_rq_merge_deadline_usecs * NSEC_PER_USEC);
+       kt_deadline = ktime_add_safe(md->last_rq_start_time, kt_deadline);
+
+       return !ktime_after(ktime_get(), kt_deadline);
+}
+
 /*
  * q->request_fn for request-based dm.
  * Called with the queue lock held.
@@ -1982,6 +2035,11 @@ static void dm_request_fn(struct request_queue *q)
                        continue;
                }
 
+               if (dm_request_peeked_before_merge_deadline(md) &&
+                   md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 &&
+                   md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq))
+                       goto delay_and_out;
+
                if (ti->type->busy && ti->type->busy(ti))
                        goto delay_and_out;
 
@@ -1997,7 +2055,7 @@ static void dm_request_fn(struct request_queue *q)
        goto out;
 
 delay_and_out:
-       blk_delay_queue(q, HZ / 10);
+       blk_delay_queue(q, HZ / 100);
 out:
        dm_put_live_table(md, srcu_idx);
 }
@@ -2520,6 +2578,9 @@ static int dm_init_request_based_queue(struct mapped_device *md)
        if (!q)
                return 0;
 
+       /* disable dm_request_fn's merge heuristic by default */
+       md->seq_rq_merge_deadline_usecs = 0;
+
        md->queue = q;
        dm_init_md_queue(md);
        blk_queue_softirq_done(md->queue, dm_softirq_done);