OSDN Git Service

Fix backport of "tcp: detect malicious patterns in tcp_collapse_ofo_queue()"
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / block / blk-core.c
index 25f2527..aac0184 100644 (file)
@@ -235,7 +235,7 @@ EXPORT_SYMBOL(blk_start_queue_async);
  **/
 void blk_start_queue(struct request_queue *q)
 {
-       WARN_ON(!irqs_disabled());
+       WARN_ON(!in_interrupt() && !irqs_disabled());
 
        queue_flag_clear(QUEUE_FLAG_STOPPED, q);
        __blk_run_queue(q);
@@ -528,8 +528,8 @@ void blk_set_queue_dying(struct request_queue *q)
 
                blk_queue_for_each_rl(rl, q) {
                        if (rl->rq_pool) {
-                               wake_up(&rl->wait[BLK_RW_SYNC]);
-                               wake_up(&rl->wait[BLK_RW_ASYNC]);
+                               wake_up_all(&rl->wait[BLK_RW_SYNC]);
+                               wake_up_all(&rl->wait[BLK_RW_ASYNC]);
                        }
                }
        }
@@ -2023,7 +2023,14 @@ end_io:
  */
 blk_qc_t generic_make_request(struct bio *bio)
 {
-       struct bio_list bio_list_on_stack;
+       /*
+        * bio_list_on_stack[0] contains bios submitted by the current
+        * make_request_fn.
+        * bio_list_on_stack[1] contains bios that were submitted before
+        * the current make_request_fn, but that haven't been processed
+        * yet.
+        */
+       struct bio_list bio_list_on_stack[2];
        blk_qc_t ret = BLK_QC_T_NONE;
 
        if (!generic_make_request_checks(bio))
@@ -2040,7 +2047,7 @@ blk_qc_t generic_make_request(struct bio *bio)
         * should be added at the tail
         */
        if (current->bio_list) {
-               bio_list_add(current->bio_list, bio);
+               bio_list_add(&current->bio_list[0], bio);
                goto out;
        }
 
@@ -2059,24 +2066,39 @@ blk_qc_t generic_make_request(struct bio *bio)
         * bio_list, and call into ->make_request() again.
         */
        BUG_ON(bio->bi_next);
-       bio_list_init(&bio_list_on_stack);
-       current->bio_list = &bio_list_on_stack;
+       bio_list_init(&bio_list_on_stack[0]);
+       current->bio_list = bio_list_on_stack;
        do {
                struct request_queue *q = bdev_get_queue(bio->bi_bdev);
 
                if (likely(blk_queue_enter(q, __GFP_DIRECT_RECLAIM) == 0)) {
+                       struct bio_list lower, same;
+
+                       /* Create a fresh bio_list for all subordinate requests */
+                       bio_list_on_stack[1] = bio_list_on_stack[0];
+                       bio_list_init(&bio_list_on_stack[0]);
 
                        ret = q->make_request_fn(q, bio);
 
                        blk_queue_exit(q);
-
-                       bio = bio_list_pop(current->bio_list);
+                       /* sort new bios into those for a lower level
+                        * and those for the same level
+                        */
+                       bio_list_init(&lower);
+                       bio_list_init(&same);
+                       while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
+                               if (q == bdev_get_queue(bio->bi_bdev))
+                                       bio_list_add(&same, bio);
+                               else
+                                       bio_list_add(&lower, bio);
+                       /* now assemble so we handle the lowest level first */
+                       bio_list_merge(&bio_list_on_stack[0], &lower);
+                       bio_list_merge(&bio_list_on_stack[0], &same);
+                       bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
                } else {
-                       struct bio *bio_next = bio_list_pop(current->bio_list);
-
                        bio_io_error(bio);
-                       bio = bio_next;
                }
+               bio = bio_list_pop(&bio_list_on_stack[0]);
        } while (bio);
        current->bio_list = NULL; /* deactivate */
 
@@ -3552,76 +3574,43 @@ int __init blk_dev_init(void)
  * TODO : If necessary, we can make the histograms per-cpu and aggregate
  * them when printing them out.
  */
-void
-blk_zero_latency_hist(struct io_latency_state *s)
-{
-       memset(s->latency_y_axis_read, 0,
-              sizeof(s->latency_y_axis_read));
-       memset(s->latency_y_axis_write, 0,
-              sizeof(s->latency_y_axis_write));
-       s->latency_reads_elems = 0;
-       s->latency_writes_elems = 0;
-}
-EXPORT_SYMBOL(blk_zero_latency_hist);
-
 ssize_t
-blk_latency_hist_show(struct io_latency_state *s, char *buf)
+blk_latency_hist_show(char* name, struct io_latency_state *s, char *buf,
+               int buf_size)
 {
        int i;
        int bytes_written = 0;
        u_int64_t num_elem, elem;
        int pct;
-
-       num_elem = s->latency_reads_elems;
-       if (num_elem > 0) {
-               bytes_written += scnprintf(buf + bytes_written,
-                          PAGE_SIZE - bytes_written,
-                          "IO svc_time Read Latency Histogram (n = %llu):\n",
-                          num_elem);
-               for (i = 0;
-                    i < ARRAY_SIZE(latency_x_axis_us);
-                    i++) {
-                       elem = s->latency_y_axis_read[i];
-                       pct = div64_u64(elem * 100, num_elem);
-                       bytes_written += scnprintf(buf + bytes_written,
-                                                  PAGE_SIZE - bytes_written,
-                                                  "\t< %5lluus%15llu%15d%%\n",
-                                                  latency_x_axis_us[i],
-                                                  elem, pct);
-               }
-               /* Last element in y-axis table is overflow */
-               elem = s->latency_y_axis_read[i];
-               pct = div64_u64(elem * 100, num_elem);
-               bytes_written += scnprintf(buf + bytes_written,
-                                          PAGE_SIZE - bytes_written,
-                                          "\t> %5dms%15llu%15d%%\n", 10,
-                                          elem, pct);
-       }
-       num_elem = s->latency_writes_elems;
-       if (num_elem > 0) {
-               bytes_written += scnprintf(buf + bytes_written,
-                          PAGE_SIZE - bytes_written,
-                          "IO svc_time Write Latency Histogram (n = %llu):\n",
-                          num_elem);
-               for (i = 0;
-                    i < ARRAY_SIZE(latency_x_axis_us);
-                    i++) {
-                       elem = s->latency_y_axis_write[i];
-                       pct = div64_u64(elem * 100, num_elem);
-                       bytes_written += scnprintf(buf + bytes_written,
-                                                  PAGE_SIZE - bytes_written,
-                                                  "\t< %5lluus%15llu%15d%%\n",
-                                                  latency_x_axis_us[i],
-                                                  elem, pct);
-               }
-               /* Last element in y-axis table is overflow */
-               elem = s->latency_y_axis_write[i];
-               pct = div64_u64(elem * 100, num_elem);
-               bytes_written += scnprintf(buf + bytes_written,
-                                          PAGE_SIZE - bytes_written,
-                                          "\t> %5dms%15llu%15d%%\n", 10,
-                                          elem, pct);
+       u_int64_t average;
+
+       num_elem = s->latency_elems;
+       if (num_elem > 0) {
+              average = div64_u64(s->latency_sum, s->latency_elems);
+              bytes_written += scnprintf(buf + bytes_written,
+                              buf_size - bytes_written,
+                              "IO svc_time %s Latency Histogram (n = %llu,"
+                              " average = %llu):\n", name, num_elem, average);
+              for (i = 0;
+                   i < ARRAY_SIZE(latency_x_axis_us);
+                   i++) {
+                      elem = s->latency_y_axis[i];
+                      pct = div64_u64(elem * 100, num_elem);
+                      bytes_written += scnprintf(buf + bytes_written,
+                                      PAGE_SIZE - bytes_written,
+                                      "\t< %6lluus%15llu%15d%%\n",
+                                      latency_x_axis_us[i],
+                                      elem, pct);
+              }
+              /* Last element in y-axis table is overflow */
+              elem = s->latency_y_axis[i];
+              pct = div64_u64(elem * 100, num_elem);
+              bytes_written += scnprintf(buf + bytes_written,
+                              PAGE_SIZE - bytes_written,
+                              "\t>=%6lluus%15llu%15d%%\n",
+                              latency_x_axis_us[i - 1], elem, pct);
        }
+
        return bytes_written;
 }
 EXPORT_SYMBOL(blk_latency_hist_show);