OSDN Git Service

Revert "usb: dwc3: turn off VBUS when leaving host mode"
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / block / test-iosched.c
1 /* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
2  *
3  * This program is free software; you can redistribute it and/or modify
4  * it under the terms of the GNU General Public License version 2 and
5  * only version 2 as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * The test scheduler allows to test the block device by dispatching
13  * specific requests according to the test case and declare PASS/FAIL
14  * according to the requests completion error code.
15  * Each test is exposed via debugfs and can be triggered by writing to
16  * the debugfs file.
17  *
18  */
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt"\n"
20
21 /* elevator test iosched */
22 #include <linux/blkdev.h>
23 #include <linux/elevator.h>
24 #include <linux/bio.h>
25 #include <linux/module.h>
26 #include <linux/slab.h>
27 #include <linux/init.h>
28 #include <linux/debugfs.h>
29 #include <linux/test-iosched.h>
30 #include <linux/delay.h>
31 #include "blk.h"
32
33 #define MODULE_NAME "test-iosched"
34 #define WR_RD_START_REQ_ID 1234
35 #define UNIQUE_START_REQ_ID 5678
36 #define TIMEOUT_TIMER_MS 40000
37 #define TEST_MAX_TESTCASE_ROUNDS 15
38
39
40 static DEFINE_MUTEX(blk_dev_test_list_lock);
41 static LIST_HEAD(blk_dev_test_list);
42
43
44 /**
45  * test_iosched_mark_test_completion() - Wakeup the debugfs
46  * thread, waiting on the test completion
47  */
48 void test_iosched_mark_test_completion(struct test_iosched *tios)
49 {
50         if (!tios)
51                 return;
52
53         pr_info("%s: mark test is completed, test_count=%d, ", __func__,
54                 tios->test_count);
55         pr_info("%s: urgent_count=%d, reinsert_count=%d,", __func__,
56                 tios->urgent_count, tios->reinsert_count);
57
58         tios->test_state = TEST_COMPLETED;
59         wake_up(&tios->wait_q);
60 }
61 EXPORT_SYMBOL(test_iosched_mark_test_completion);
62
63 /**
64  *  check_test_completion() - Check if all the queued test
65  *  requests were completed
66  */
67 void check_test_completion(struct test_iosched *tios)
68 {
69         struct test_request *test_rq;
70
71         if (!tios)
72                 goto exit;
73
74         if (tios->test_info.check_test_completion_fn &&
75                 !tios->test_info.check_test_completion_fn(tios))
76                 goto exit;
77
78         list_for_each_entry(test_rq, &tios->dispatched_queue, queuelist)
79                 if (!test_rq->req_completed)
80                         goto exit;
81
82         if (!list_empty(&tios->test_queue)
83                         || !list_empty(&tios->reinsert_queue)
84                         || !list_empty(&tios->urgent_queue)) {
85                 pr_info("%s: Test still not completed,", __func__);
86                 pr_info("%s: test_count=%d, reinsert_count=%d", __func__,
87                         tios->test_count, tios->reinsert_count);
88                 pr_info("%s: dispatched_count=%d, urgent_count=%d", __func__,
89                         tios->dispatched_count,
90                         tios->urgent_count);
91                 goto exit;
92         }
93
94         tios->test_info.test_duration = ktime_sub(ktime_get(),
95                 tios->test_info.test_duration);
96
97         test_iosched_mark_test_completion(tios);
98
99 exit:
100         return;
101 }
102 EXPORT_SYMBOL(check_test_completion);
103
104 /*
105  * A callback to be called per bio completion.
106  * Frees the bio memory.
107  */
108 static void end_test_bio(struct bio *bio)
109 {
110         bio_put(bio);
111 }
112
113 void test_iosched_free_test_req_data_buffer(struct test_request *test_rq)
114 {
115         int i;
116
117         if (!test_rq)
118                 return;
119
120         for (i = 0; i < BLK_MAX_SEGMENTS; i++)
121                 if (test_rq->bios_buffer[i]) {
122                         free_page((unsigned long)test_rq->bios_buffer[i]);
123                         test_rq->bios_buffer[i] = NULL;
124                 }
125 }
126 EXPORT_SYMBOL(test_iosched_free_test_req_data_buffer);
127
128 /*
129  * A callback to be called per request completion.
130  * the request memory is not freed here, will be freed later after the test
131  * results checking.
132  */
133 static void end_test_req(struct request *rq, int err)
134 {
135         struct test_request *test_rq;
136         struct test_iosched *tios = rq->q->elevator->elevator_data;
137         test_rq = (struct test_request *)rq->elv.priv[0];
138         BUG_ON(!test_rq);
139
140         pr_debug("%s: request %d completed, err=%d",
141                __func__, test_rq->req_id, err);
142
143         test_rq->req_completed = true;
144         test_rq->req_result = err;
145
146         check_test_completion(tios);
147 }
148
149 /**
150  * test_iosched_add_unique_test_req() - Create and queue a non
151  * read/write request (such as FLUSH/DISCRAD/SANITIZE).
152  * @is_err_expcted:     A flag to indicate if this request
153  *                      should succeed or not
154  * @req_unique:         The type of request to add
155  * @start_sec:          start address of the first bio
156  * @nr_sects:           number of sectors in the request
157  * @end_req_io:         specific completion callback. When not
158  *                      set, the defaulcallback will be used
159  */
160 int test_iosched_add_unique_test_req(struct test_iosched *tios,
161         int is_err_expcted, enum req_unique_type req_unique,
162         int start_sec, int nr_sects, rq_end_io_fn *end_req_io)
163 {
164         struct bio *bio;
165         struct request *rq;
166         int rw_flags;
167         struct test_request *test_rq;
168         unsigned long flags;
169
170         if (!tios)
171                 return -ENODEV;
172
173         bio = bio_alloc(GFP_KERNEL, 0);
174         if (!bio) {
175                 pr_err("%s: Failed to allocate a bio", __func__);
176                 return -ENODEV;
177         }
178         bio_get(bio);
179         bio->bi_end_io = end_test_bio;
180
181         switch (req_unique) {
182         case REQ_UNIQUE_FLUSH:
183                 bio->bi_rw = WRITE_FLUSH;
184                 break;
185         case REQ_UNIQUE_DISCARD:
186                 bio->bi_rw = REQ_WRITE | REQ_DISCARD;
187                 bio->bi_iter.bi_size = nr_sects << 9;
188                 bio->bi_iter.bi_sector = start_sec;
189                 break;
190         default:
191                 pr_err("%s: Invalid request type %d", __func__,
192                             req_unique);
193                 bio_put(bio);
194                 return -ENODEV;
195         }
196
197         rw_flags = bio_data_dir(bio);
198         if (bio->bi_rw & REQ_SYNC)
199                 rw_flags |= REQ_SYNC;
200
201         rq = blk_get_request(tios->req_q, rw_flags, GFP_KERNEL);
202         if (!rq) {
203                 pr_err("%s: Failed to allocate a request", __func__);
204                 bio_put(bio);
205                 return -ENODEV;
206         }
207
208         init_request_from_bio(rq, bio);
209         if (end_req_io)
210                 rq->end_io = end_req_io;
211         else
212                 rq->end_io = end_test_req;
213
214         test_rq = kzalloc(sizeof(struct test_request), GFP_KERNEL);
215         if (!test_rq) {
216                 pr_err("%s: Failed to allocate a test request", __func__);
217                 bio_put(bio);
218                 blk_put_request(rq);
219                 return -ENODEV;
220         }
221         test_rq->req_completed = false;
222         test_rq->req_result = -EINVAL;
223         test_rq->rq = rq;
224         test_rq->is_err_expected = is_err_expcted;
225         rq->elv.priv[0] = (void *)test_rq;
226         test_rq->req_id = tios->unique_next_req_id++;
227
228         pr_debug(
229                 "%s: added request %d to the test requests list, type = %d",
230                 __func__, test_rq->req_id, req_unique);
231
232         spin_lock_irqsave(tios->req_q->queue_lock, flags);
233         list_add_tail(&test_rq->queuelist, &tios->test_queue);
234         tios->test_count++;
235         spin_unlock_irqrestore(tios->req_q->queue_lock, flags);
236
237         return 0;
238 }
239 EXPORT_SYMBOL(test_iosched_add_unique_test_req);
240
241 /*
242  * Get a pattern to be filled in the request data buffer.
243  * If the pattern used is (-1) the buffer will be filled with sequential
244  * numbers
245  */
246 static void fill_buf_with_pattern(int *buf, int num_bytes, int pattern)
247 {
248         int i = 0;
249         int num_of_dwords = num_bytes/sizeof(int);
250
251         if (pattern == TEST_NO_PATTERN)
252                 return;
253
254         /* num_bytes should be aligned to sizeof(int) */
255         BUG_ON((num_bytes % sizeof(int)) != 0);
256
257         if (pattern == TEST_PATTERN_SEQUENTIAL) {
258                 for (i = 0; i < num_of_dwords; i++)
259                         buf[i] = i;
260         } else {
261                 for (i = 0; i < num_of_dwords; i++)
262                         buf[i] = pattern;
263         }
264 }
265
266 /**
267  * test_iosched_create_test_req() - Create a read/write request.
268  * @is_err_expcted:     A flag to indicate if this request
269  *                      should succeed or not
270  * @direction:          READ/WRITE
271  * @start_sec:          start address of the first bio
272  * @num_bios:           number of BIOs to be allocated for the
273  *                      request
274  * @pattern:            A pattern, to be written into the write
275  *                      requests data buffer. In case of READ
276  *                      request, the given pattern is kept as
277  *                      the expected pattern. The expected
278  *                      pattern will be compared in the test
279  *                      check result function. If no comparisson
280  *                      is required, set pattern to
281  *                      TEST_NO_PATTERN.
282  * @end_req_io:         specific completion callback. When not
283  *                      set,the default callback will be used
284  *
285  * This function allocates the test request and the block
286  * request and calls blk_rq_map_kern which allocates the
287  * required BIO. The allocated test request and the block
288  * request memory is freed at the end of the test and the
289  * allocated BIO memory is freed by end_test_bio.
290  */
291 struct test_request *test_iosched_create_test_req(
292         struct test_iosched *tios, int is_err_expcted,
293         int direction, int start_sec, int num_bios, int pattern,
294         rq_end_io_fn *end_req_io)
295 {
296         struct request *rq;
297         struct test_request *test_rq;
298         struct bio *bio = NULL;
299         int i;
300         int ret;
301
302         if (!tios)
303                 return NULL;
304
305         rq = blk_get_request(tios->req_q, direction, GFP_KERNEL);
306         if (!rq) {
307                 pr_err("%s: Failed to allocate a request", __func__);
308                 return NULL;
309         }
310
311         test_rq = kzalloc(sizeof(struct test_request), GFP_KERNEL);
312         if (!test_rq) {
313                 pr_err("%s: Failed to allocate test request", __func__);
314                 goto err;
315         }
316
317         test_rq->buf_size = TEST_BIO_SIZE * num_bios;
318         test_rq->wr_rd_data_pattern = pattern;
319
320         for (i = 0; i < num_bios; i++) {
321                 test_rq->bios_buffer[i] = (void *)get_zeroed_page(GFP_KERNEL);
322                 if (!test_rq->bios_buffer[i]) {
323                         pr_err("%s: failed to kmap page for bio #%d/%d\n",
324                                 __func__, i, num_bios);
325                         goto free_bios;
326                 }
327                 ret = blk_rq_map_kern(tios->req_q, rq, test_rq->bios_buffer[i],
328                         TEST_BIO_SIZE, GFP_KERNEL);
329                 if (ret) {
330                         pr_err("%s: blk_rq_map_kern returned error %d",
331                                 __func__, ret);
332                         goto free_bios;
333                 }
334                 if (direction == WRITE)
335                         fill_buf_with_pattern(test_rq->bios_buffer[i],
336                                 TEST_BIO_SIZE, pattern);
337         }
338
339         if (end_req_io)
340                 rq->end_io = end_req_io;
341         else
342                 rq->end_io = end_test_req;
343         rq->__sector = start_sec;
344         rq->cmd_type |= REQ_TYPE_FS;
345         rq->cmd_flags |= REQ_SORTED;
346         rq->cmd_flags &= ~REQ_IO_STAT;
347
348         if (rq->bio) {
349                 rq->bio->bi_iter.bi_sector = start_sec;
350                 rq->bio->bi_end_io = end_test_bio;
351                 bio = rq->bio;
352                 while ((bio = bio->bi_next) != NULL)
353                         bio->bi_end_io = end_test_bio;
354         }
355
356         tios->num_of_write_bios += num_bios;
357         test_rq->req_id = tios->wr_rd_next_req_id++;
358
359         test_rq->req_completed = false;
360         test_rq->req_result = -EINVAL;
361         test_rq->rq = rq;
362         if (tios->test_info.get_rq_disk_fn)
363                 test_rq->rq->rq_disk = tios->test_info.get_rq_disk_fn(tios);
364         test_rq->is_err_expected = is_err_expcted;
365         rq->elv.priv[0] = (void *)test_rq;
366         return test_rq;
367
368 free_bios:
369         test_iosched_free_test_req_data_buffer(test_rq);
370         kfree(test_rq);
371 err:
372         blk_put_request(rq);
373         return NULL;
374 }
375 EXPORT_SYMBOL(test_iosched_create_test_req);
376
377
378 /**
379  * test_iosched_add_wr_rd_test_req() - Create and queue a
380  * read/write request.
381  * @is_err_expcted:     A flag to indicate if this request
382  *                      should succeed or not
383  * @direction:          READ/WRITE
384  * @start_sec:          start address of the first bio
385  * @num_bios:           number of BIOs to be allocated for the
386  *                      request
387  * @pattern:            A pattern, to be written into the write
388  *                      requests data buffer. In case of READ
389  *                      request, the given pattern is kept as
390  *                      the expected pattern. The expected
391  *                      pattern will be compared in the test
392  *                      check result function. If no comparisson
393  *                      is required, set pattern to
394  *                      TEST_NO_PATTERN.
395  * @end_req_io:         specific completion callback. When not
396  *                      set,the default callback will be used
397  *
398  * This function allocates the test request and the block
399  * request and calls blk_rq_map_kern which allocates the
400  * required BIO. Upon success the new request is added to the
401  * test_queue. The allocated test request and the block request
402  * memory is freed at the end of the test and the allocated BIO
403  * memory is freed by end_test_bio.
404  */
405 int test_iosched_add_wr_rd_test_req(struct test_iosched *tios,
406         int is_err_expcted, int direction, int start_sec, int num_bios,
407         int pattern, rq_end_io_fn *end_req_io)
408 {
409         struct test_request *test_rq = NULL;
410         unsigned long flags;
411
412         test_rq = test_iosched_create_test_req(tios, is_err_expcted, direction,
413                 start_sec, num_bios, pattern, end_req_io);
414         if (test_rq) {
415                 spin_lock_irqsave(tios->req_q->queue_lock, flags);
416                 list_add_tail(&test_rq->queuelist, &tios->test_queue);
417                 tios->test_count++;
418                 spin_unlock_irqrestore(tios->req_q->queue_lock, flags);
419                 return 0;
420         }
421         return -ENODEV;
422 }
423 EXPORT_SYMBOL(test_iosched_add_wr_rd_test_req);
424
425 /* Converts the testcase number into a string */
426 static char *get_test_case_str(struct test_iosched *tios)
427 {
428         if (tios->test_info.get_test_case_str_fn)
429                 return tios->test_info.get_test_case_str_fn(
430                         tios->test_info.testcase);
431
432         return "Unknown testcase";
433 }
434
435 /*
436  * Verify that the test request data buffer includes the expected
437  * pattern
438  */
439 int compare_buffer_to_pattern(struct test_request *test_rq)
440 {
441         int i;
442         int j;
443         unsigned int *buf;
444
445         /* num_bytes should be aligned to sizeof(int) */
446         BUG_ON((test_rq->buf_size % sizeof(int)) != 0);
447         BUG_ON(test_rq->bios_buffer == NULL);
448
449         if (test_rq->wr_rd_data_pattern == TEST_NO_PATTERN)
450                 return 0;
451
452         for (i = 0; i < test_rq->buf_size / TEST_BIO_SIZE; i++) {
453                 buf = test_rq->bios_buffer[i];
454                 for (j = 0; j < TEST_BIO_SIZE / sizeof(int); j++)
455                         if ((test_rq->wr_rd_data_pattern ==
456                                 TEST_PATTERN_SEQUENTIAL && buf[j] != j) ||
457                                 (test_rq->wr_rd_data_pattern !=
458                                 TEST_PATTERN_SEQUENTIAL &&
459                                 buf[j] != test_rq->wr_rd_data_pattern)) {
460                                 pr_err("%s: wrong pattern 0x%x in index %d",
461                                         __func__, buf[j], j);
462                                 return -EINVAL;
463                         }
464         }
465
466         return 0;
467 }
468 EXPORT_SYMBOL(compare_buffer_to_pattern);
469
470 /*
471  * Determine if the test passed or failed.
472  * The function checks the test request completion value and calls
473  * check_testcase_result for result checking that are specific
474  * to a test case.
475  */
476 static int check_test_result(struct test_iosched *tios)
477 {
478         struct test_request *trq;
479         int res = 0;
480         static int run;
481
482         list_for_each_entry(trq, &tios->dispatched_queue, queuelist) {
483                 if (!trq->rq) {
484                         pr_info("%s: req_id %d is contains empty req",
485                                         __func__, trq->req_id);
486                         continue;
487                 }
488                 if (!trq->req_completed) {
489                         pr_err("%s: rq %d not completed", __func__,
490                                     trq->req_id);
491                         res = -EINVAL;
492                         goto err;
493                 }
494
495                 if ((trq->req_result < 0) && !trq->is_err_expected) {
496                         pr_err(
497                                 "%s: rq %d completed with err, not as expected",
498                                 __func__, trq->req_id);
499                         res = -EINVAL;
500                         goto err;
501                 }
502                 if ((trq->req_result == 0) && trq->is_err_expected) {
503                         pr_err("%s: rq %d succeeded, not as expected",
504                                     __func__, trq->req_id);
505                         res = -EINVAL;
506                         goto err;
507                 }
508                 if (rq_data_dir(trq->rq) == READ) {
509                         res = compare_buffer_to_pattern(trq);
510                         if (res) {
511                                 pr_err("%s: read pattern not as expected",
512                                             __func__);
513                                 res = -EINVAL;
514                                 goto err;
515                         }
516                 }
517         }
518
519         if (tios->test_info.check_test_result_fn) {
520                 res = tios->test_info.check_test_result_fn(
521                         tios);
522                 if (res)
523                         goto err;
524         }
525
526         pr_info("%s: %s, run# %03d, PASSED",
527                 __func__, get_test_case_str(tios), ++run);
528         tios->test_result = TEST_PASSED;
529
530         return 0;
531 err:
532         pr_err("%s: %s, run# %03d, FAILED",
533                     __func__, get_test_case_str(tios), ++run);
534         tios->test_result = TEST_FAILED;
535         return res;
536 }
537
538 /* Create and queue the required requests according to the test case */
539 static int prepare_test(struct test_iosched *tios)
540 {
541         int ret = 0;
542
543         if (tios->test_info.prepare_test_fn) {
544                 ret = tios->test_info.prepare_test_fn(tios);
545                 return ret;
546         }
547
548         return 0;
549 }
550
551 /* Run the test */
552 static int run_test(struct test_iosched *tios)
553 {
554         int ret = 0;
555
556         if (tios->test_info.run_test_fn) {
557                 ret = tios->test_info.run_test_fn(tios);
558                 return ret;
559         }
560
561         blk_run_queue(tios->req_q);
562
563         return 0;
564 }
565
566 /*
567  * free_test_queue() - Free all allocated test requests in the given test_queue:
568  * free their requests and BIOs buffer
569  * @test_queue          the test queue to be freed
570  */
571 static void free_test_queue(struct list_head *test_queue)
572 {
573         struct test_request *test_rq;
574         struct bio *bio;
575
576         while (!list_empty(test_queue)) {
577                 test_rq = list_entry(test_queue->next, struct test_request,
578                                 queuelist);
579
580                 list_del_init(&test_rq->queuelist);
581                 /*
582                  * If the request was not completed we need to free its BIOs
583                  * and remove it from the packed list
584                  */
585                 if (!test_rq->req_completed) {
586                         pr_info(
587                                 "%s: Freeing memory of an uncompleted request",
588                                         __func__);
589                         list_del_init(&test_rq->rq->queuelist);
590                         while ((bio = test_rq->rq->bio) != NULL) {
591                                 test_rq->rq->bio = bio->bi_next;
592                                 bio_put(bio);
593                         }
594                 }
595                 blk_put_request(test_rq->rq);
596                 test_iosched_free_test_req_data_buffer(test_rq);
597                 kfree(test_rq);
598         }
599 }
600
601 /*
602  * free_test_requests() - Free all allocated test requests in
603  * all test queues in given test_data.
604  * @td          The test_data struct whos test requests will be
605  *              freed.
606  */
607 static void free_test_requests(struct test_iosched *tios)
608 {
609         if (!tios)
610                 return;
611
612         if (tios->urgent_count) {
613                 free_test_queue(&tios->urgent_queue);
614                 tios->urgent_count = 0;
615         }
616         if (tios->test_count) {
617                 free_test_queue(&tios->test_queue);
618                 tios->test_count = 0;
619         }
620         if (tios->dispatched_count) {
621                 free_test_queue(&tios->dispatched_queue);
622                 tios->dispatched_count = 0;
623         }
624         if (tios->reinsert_count) {
625                 free_test_queue(&tios->reinsert_queue);
626                 tios->reinsert_count = 0;
627         }
628 }
629
630 /*
631  * post_test() - Do post test operations. Free the allocated
632  * test requests, their requests and BIOs buffer.
633  * @td          The test_data struct for the test that has
634  *              ended.
635  */
636 static int post_test(struct test_iosched *tios)
637 {
638         int ret = 0;
639
640         if (tios->test_info.post_test_fn)
641                 ret = tios->test_info.post_test_fn(tios);
642
643         tios->test_info.testcase = 0;
644         tios->test_state = TEST_IDLE;
645
646         free_test_requests(tios);
647
648         return ret;
649 }
650
651 static unsigned int get_timeout_msec(struct test_iosched *tios)
652 {
653         if (tios->test_info.timeout_msec)
654                 return tios->test_info.timeout_msec;
655         return TIMEOUT_TIMER_MS;
656 }
657
658 /**
659  * test_iosched_start_test() - Prepares and runs the test.
660  * The members test_duration and test_byte_count of the input
661  * parameter t_info are modified by this function.
662  * @t_info:     the current test testcase and callbacks
663  *              functions
664  *
665  * The function also checks the test result upon test completion
666  */
667 int test_iosched_start_test(struct test_iosched *tios,
668         struct test_info *t_info)
669 {
670         int ret = 0;
671         unsigned long timeout;
672         int counter = 0;
673         char *test_name = NULL;
674
675         if (!tios)
676                 return -ENODEV;
677
678         if (!t_info) {
679                 tios->test_result = TEST_FAILED;
680                 return -EINVAL;
681         }
682
683         timeout = msecs_to_jiffies(get_timeout_msec(tios));
684
685         do {
686                 if (tios->ignore_round)
687                         /*
688                          * We ignored the last run due to FS write requests.
689                          * Sleep to allow those requests to be issued
690                          */
691                         msleep(2000);
692
693                 spin_lock(&tios->lock);
694
695                 if (tios->test_state != TEST_IDLE) {
696                         pr_info(
697                                 "%s: Another test is running, try again later",
698                                 __func__);
699                         spin_unlock(&tios->lock);
700                         return -EBUSY;
701                 }
702
703                 if (tios->start_sector == 0) {
704                         pr_err("%s: Invalid start sector", __func__);
705                         tios->test_result = TEST_FAILED;
706                         spin_unlock(&tios->lock);
707                         return -EINVAL;
708                 }
709
710                 memcpy(&tios->test_info, t_info, sizeof(*t_info));
711
712                 tios->test_result = TEST_NO_RESULT;
713                 tios->num_of_write_bios = 0;
714
715                 tios->unique_next_req_id = UNIQUE_START_REQ_ID;
716                 tios->wr_rd_next_req_id = WR_RD_START_REQ_ID;
717
718                 tios->ignore_round = false;
719                 tios->fs_wr_reqs_during_test = false;
720
721                 tios->test_state = TEST_RUNNING;
722
723                 spin_unlock(&tios->lock);
724                 /*
725                  * Give an already dispatch request from
726                  * FS a chanse to complete
727                  */
728                 msleep(2000);
729
730                 if (tios->test_info.get_test_case_str_fn)
731                         test_name =
732                                 tios->test_info.get_test_case_str_fn(
733                                         tios->test_info.testcase);
734                 else
735                         test_name = "Unknown testcase";
736                 pr_info("%s: Starting test %s", __func__, test_name);
737
738                 ret = prepare_test(tios);
739                 if (ret) {
740                         pr_err("%s: failed to prepare the test",
741                                     __func__);
742                         goto error;
743                 }
744
745                 tios->test_info.test_duration = ktime_get();
746                 ret = run_test(tios);
747                 if (ret) {
748                         pr_err("%s: failed to run the test", __func__);
749                         goto error;
750                 }
751
752                 pr_info("%s: Waiting for the test completion", __func__);
753
754                 ret = wait_event_interruptible_timeout(tios->wait_q,
755                         (tios->test_state == TEST_COMPLETED), timeout);
756                 if (ret <= 0) {
757                         tios->test_state = TEST_COMPLETED;
758                         if (!ret)
759                                 pr_info("%s: Test timeout\n", __func__);
760                         else
761                                 pr_err("%s: Test error=%d\n", __func__, ret);
762                         goto error;
763                 }
764
765                 memcpy(t_info, &tios->test_info, sizeof(*t_info));
766
767                 ret = check_test_result(tios);
768                 if (ret) {
769                         pr_err("%s: check_test_result failed", __func__);
770                         goto error;
771                 }
772
773                 ret = post_test(tios);
774                 if (ret) {
775                         pr_err("%s: post_test failed", __func__);
776                         goto error;
777                 }
778
779                 /*
780                  * Wakeup the queue thread to fetch FS requests that might got
781                  * postponded due to the test
782                  */
783                 blk_run_queue(tios->req_q);
784
785                 if (tios->ignore_round)
786                         pr_info(
787                         "%s: Round canceled (Got wr reqs in the middle)",
788                         __func__);
789
790                 if (++counter == TEST_MAX_TESTCASE_ROUNDS) {
791                         pr_info("%s: Too many rounds, did not succeed...",
792                              __func__);
793                         tios->test_result = TEST_FAILED;
794                 }
795
796         } while ((tios->ignore_round) &&
797                 (counter < TEST_MAX_TESTCASE_ROUNDS));
798
799         if (tios->test_result == TEST_PASSED)
800                 return 0;
801         else
802                 return -EINVAL;
803
804 error:
805         post_test(tios);
806         tios->test_result = TEST_FAILED;
807         return ret;
808 }
809 EXPORT_SYMBOL(test_iosched_start_test);
810
811 /**
812  * test_iosched_register() - register a block device test
813  * utility.
814  * @bdt:        the block device test type to register
815  */
816 void test_iosched_register(struct blk_dev_test_type *bdt)
817 {
818         if (!bdt)
819                 return;
820
821         mutex_lock(&blk_dev_test_list_lock);
822         list_add_tail(&bdt->list, &blk_dev_test_list);
823         mutex_unlock(&blk_dev_test_list_lock);
824
825 }
826 EXPORT_SYMBOL(test_iosched_register);
827
828 /**
829  * test_iosched_unregister() - unregister a block device test
830  * utility.
831  * @bdt:        the block device test type to unregister
832  */
833 void test_iosched_unregister(struct blk_dev_test_type *bdt)
834 {
835         if (!bdt)
836                 return;
837
838         mutex_lock(&blk_dev_test_list_lock);
839         list_del_init(&bdt->list);
840         mutex_unlock(&blk_dev_test_list_lock);
841 }
842 EXPORT_SYMBOL(test_iosched_unregister);
843
844 /**
845  * test_iosched_set_test_result() - Set the test
846  * result(PASS/FAIL)
847  * @test_result:        the test result
848  */
849 void test_iosched_set_test_result(struct test_iosched *tios,
850         int test_result)
851 {
852         if (!tios)
853                 return;
854
855         tios->test_result = test_result;
856 }
857 EXPORT_SYMBOL(test_iosched_set_test_result);
858
859
860 /**
861  * test_iosched_set_ignore_round() - Set the ignore_round flag
862  * @ignore_round:       A flag to indicate if this test round
863  * should be ignored and re-run
864  */
865 void test_iosched_set_ignore_round(struct test_iosched *tios,
866         bool ignore_round)
867 {
868         if (!tios)
869                 return;
870
871         tios->ignore_round = ignore_round;
872 }
873 EXPORT_SYMBOL(test_iosched_set_ignore_round);
874
875 static int test_debugfs_init(struct test_iosched *tios)
876 {
877         char name[2*BDEVNAME_SIZE];
878
879
880         snprintf(name, 2*BDEVNAME_SIZE - 1, "%s-%s", "test-iosched",
881                 tios->req_q->kobj.parent->name);
882         pr_debug("%s: creating test-iosched instance %s\n", __func__, name);
883
884         tios->debug.debug_root = debugfs_create_dir(name, NULL);
885         if (!tios->debug.debug_root)
886                 return -ENOENT;
887
888         tios->debug.debug_tests_root = debugfs_create_dir("tests",
889                 tios->debug.debug_root);
890         if (!tios->debug.debug_tests_root)
891                 goto err;
892
893         tios->debug.debug_utils_root = debugfs_create_dir("utils",
894                 tios->debug.debug_root);
895         if (!tios->debug.debug_utils_root)
896                 goto err;
897
898         tios->debug.debug_test_result = debugfs_create_u32(
899                                         "test_result",
900                                         S_IRUGO | S_IWUGO,
901                                         tios->debug.debug_utils_root,
902                                         &tios->test_result);
903         if (!tios->debug.debug_test_result)
904                 goto err;
905
906         tios->debug.start_sector = debugfs_create_u32(
907                                         "start_sector",
908                                         S_IRUGO | S_IWUGO,
909                                         tios->debug.debug_utils_root,
910                                         &tios->start_sector);
911         if (!tios->debug.start_sector)
912                 goto err;
913
914         tios->debug.sector_range = debugfs_create_u32(
915                                                 "sector_range",
916                                                 S_IRUGO | S_IWUGO,
917                                                 tios->debug.debug_utils_root,
918                                                 &tios->sector_range);
919         if (!tios->debug.sector_range)
920                 goto err;
921
922         return 0;
923
924 err:
925         debugfs_remove_recursive(tios->debug.debug_root);
926         return -ENOENT;
927 }
928
929 static void test_debugfs_cleanup(struct test_iosched *tios)
930 {
931         debugfs_remove_recursive(tios->debug.debug_root);
932 }
933
934 static void print_req(struct request *req)
935 {
936         struct bio *bio;
937         struct test_request *test_rq;
938
939         if (!req)
940                 return;
941
942         test_rq = (struct test_request *)req->elv.priv[0];
943
944         if (test_rq) {
945                 pr_debug("%s: Dispatch request %d: __sector=0x%lx",
946                        __func__, test_rq->req_id, (unsigned long)req->__sector);
947                 pr_debug("%s: nr_phys_segments=%d, num_of_sectors=%d",
948                        __func__, req->nr_phys_segments, blk_rq_sectors(req));
949                 bio = req->bio;
950                 pr_debug("%s: bio: bi_size=%d, bi_sector=0x%lx",
951                               __func__, bio->bi_iter.bi_size,
952                               (unsigned long)bio->bi_iter.bi_sector);
953                 while ((bio = bio->bi_next) != NULL) {
954                         pr_debug("%s: bio: bi_size=%d, bi_sector=0x%lx",
955                                       __func__, bio->bi_iter.bi_size,
956                                       (unsigned long)bio->bi_iter.bi_sector);
957                 }
958         }
959 }
960
961 static void test_merged_requests(struct request_queue *q,
962                          struct request *rq, struct request *next)
963 {
964         list_del_init(&next->queuelist);
965 }
966 /*
967  * test_dispatch_from(): Dispatch request from @queue to the @dispatched_queue.
968  * Also update the dispatched_count counter.
969  */
970 static int test_dispatch_from(struct request_queue *q,
971                 struct list_head *queue, unsigned int *count)
972 {
973         struct test_request *test_rq;
974         struct request *rq;
975         int ret = 0;
976         struct test_iosched *tios = q->elevator->elevator_data;
977         unsigned long flags;
978
979         if (!tios)
980                 goto err;
981
982         spin_lock_irqsave(&tios->lock, flags);
983         if (!list_empty(queue)) {
984                 test_rq = list_entry(queue->next, struct test_request,
985                                 queuelist);
986                 rq = test_rq->rq;
987                 if (!rq) {
988                         pr_err("%s: null request,return", __func__);
989                         spin_unlock_irqrestore(&tios->lock, flags);
990                         goto err;
991                 }
992                 list_move_tail(&test_rq->queuelist,
993                         &tios->dispatched_queue);
994                 tios->dispatched_count++;
995                 (*count)--;
996                 spin_unlock_irqrestore(&tios->lock, flags);
997
998                 print_req(rq);
999                 elv_dispatch_sort(q, rq);
1000                 tios->test_info.test_byte_count += test_rq->buf_size;
1001                 ret = 1;
1002                 goto err;
1003         }
1004         spin_unlock_irqrestore(&tios->lock, flags);
1005
1006 err:
1007         return ret;
1008 }
1009
1010 /*
1011  * Dispatch a test request in case there is a running test Otherwise, dispatch
1012  * a request that was queued by the FS to keep the card functional.
1013  */
1014 static int test_dispatch_requests(struct request_queue *q, int force)
1015 {
1016         struct test_iosched *tios = q->elevator->elevator_data;
1017         struct request *rq = NULL;
1018         int ret = 0;
1019
1020         switch (tios->test_state) {
1021         case TEST_IDLE:
1022                 if (!list_empty(&tios->queue)) {
1023                         rq = list_entry(tios->queue.next,
1024                                 struct request, queuelist);
1025                         list_del_init(&rq->queuelist);
1026                         elv_dispatch_sort(q, rq);
1027                         ret = 1;
1028                         goto exit;
1029                 }
1030                 break;
1031         case TEST_RUNNING:
1032                 if (test_dispatch_from(q, &tios->urgent_queue,
1033                                        &tios->urgent_count)) {
1034                         pr_debug("%s: Dispatched from urgent_count=%d",
1035                                         __func__, tios->urgent_count);
1036                         ret = 1;
1037                         goto exit;
1038                 }
1039                 if (test_dispatch_from(q, &tios->reinsert_queue,
1040                                        &tios->reinsert_count)) {
1041                         pr_debug("%s: Dispatched from reinsert_count=%d",
1042                                         __func__, tios->reinsert_count);
1043                         ret = 1;
1044                         goto exit;
1045                 }
1046                 if (test_dispatch_from(q, &tios->test_queue,
1047                         &tios->test_count)) {
1048                         pr_debug("%s: Dispatched from test_count=%d",
1049                                         __func__, tios->test_count);
1050                         ret = 1;
1051                         goto exit;
1052                 }
1053                 break;
1054         case TEST_COMPLETED:
1055         default:
1056                 break;
1057         }
1058
1059 exit:
1060         return ret;
1061 }
1062
1063 static void test_add_request(struct request_queue *q, struct request *rq)
1064 {
1065         struct test_iosched *tios = q->elevator->elevator_data;
1066
1067         list_add_tail(&rq->queuelist, &tios->queue);
1068
1069         /*
1070          * The write requests can be followed by a FLUSH request that might
1071          * cause unexpected results of the test.
1072          */
1073         if (rq_data_dir(rq) == WRITE &&
1074                 tios->test_state == TEST_RUNNING) {
1075                 pr_debug("%s: got WRITE req in the middle of the test",
1076                         __func__);
1077                 tios->fs_wr_reqs_during_test = true;
1078         }
1079 }
1080
1081 static struct request *
1082 test_former_request(struct request_queue *q, struct request *rq)
1083 {
1084         struct test_iosched *tios = q->elevator->elevator_data;
1085
1086         if (rq->queuelist.prev == &tios->queue)
1087                 return NULL;
1088         return list_entry(rq->queuelist.prev, struct request, queuelist);
1089 }
1090
1091 static struct request *
1092 test_latter_request(struct request_queue *q, struct request *rq)
1093 {
1094         struct test_iosched *tios = q->elevator->elevator_data;
1095
1096         if (rq->queuelist.next == &tios->queue)
1097                 return NULL;
1098         return list_entry(rq->queuelist.next, struct request, queuelist);
1099 }
1100
1101 static int test_init_queue(struct request_queue *q, struct elevator_type *e)
1102 {
1103         struct blk_dev_test_type *__bdt;
1104         struct elevator_queue *eq;
1105         struct test_iosched *tios;
1106         const char *blk_dev_name;
1107         int ret;
1108         bool found = false;
1109         unsigned long flags;
1110
1111         eq = elevator_alloc(q, e);
1112         if (!eq)
1113                 return -ENOMEM;
1114
1115         tios = kzalloc_node(sizeof(*tios), GFP_KERNEL, q->node);
1116         if (!tios) {
1117                 pr_err("%s: failed to allocate test iosched\n", __func__);
1118                 ret = -ENOMEM;
1119                 goto free_kobj;
1120         }
1121         eq->elevator_data = tios;
1122
1123         INIT_LIST_HEAD(&tios->queue);
1124         INIT_LIST_HEAD(&tios->test_queue);
1125         INIT_LIST_HEAD(&tios->dispatched_queue);
1126         INIT_LIST_HEAD(&tios->reinsert_queue);
1127         INIT_LIST_HEAD(&tios->urgent_queue);
1128         init_waitqueue_head(&tios->wait_q);
1129         tios->req_q = q;
1130
1131         spin_lock_init(&tios->lock);
1132
1133         ret = test_debugfs_init(tios);
1134         if (ret) {
1135                 pr_err("%s: Failed to create debugfs files, ret=%d",
1136                         __func__, ret);
1137                 goto free_mem;
1138         }
1139         blk_dev_name = q->kobj.parent->name;
1140
1141         /* Traverse the block device test list and init matches */
1142         mutex_lock(&blk_dev_test_list_lock);
1143
1144         list_for_each_entry(__bdt, &blk_dev_test_list, list) {
1145                 pr_debug("%s: checking if %s is a match to device %s\n",
1146                         __func__, __bdt->type_prefix, blk_dev_name);
1147                 if (!strnstr(blk_dev_name, __bdt->type_prefix,
1148                         strlen(__bdt->type_prefix)))
1149                         continue;
1150
1151                 pr_debug("%s: found the match!\n", __func__);
1152                 found = true;
1153                 break;
1154         }
1155         mutex_unlock(&blk_dev_test_list_lock);
1156
1157         /* No match found */
1158         if (!found) {
1159                 pr_err("%s: No matching block device test utility found\n",
1160                         __func__);
1161                 ret = -ENODEV;
1162                 goto free_debugfs;
1163         } else {
1164                 ret = __bdt->init_fn(tios);
1165                 if (ret) {
1166                         pr_err("%s: failed to init block device test, ret=%d\n",
1167                                 __func__, ret);
1168                         goto free_debugfs;
1169                 }
1170         }
1171
1172         spin_lock_irqsave(q->queue_lock, flags);
1173         q->elevator = eq;
1174         spin_unlock_irqrestore(q->queue_lock, flags);
1175
1176         return 0;
1177
1178 free_debugfs:
1179         test_debugfs_cleanup(tios);
1180 free_mem:
1181         kfree(tios);
1182 free_kobj:
1183         kobject_put(&eq->kobj);
1184         return ret;
1185 }
1186
1187 static void test_exit_queue(struct elevator_queue *e)
1188 {
1189         struct test_iosched *tios = e->elevator_data;
1190         struct blk_dev_test_type *__bdt;
1191
1192         BUG_ON(!list_empty(&tios->queue));
1193
1194         list_for_each_entry(__bdt, &blk_dev_test_list, list)
1195                 __bdt->exit_fn(tios);
1196
1197         test_debugfs_cleanup(tios);
1198
1199         kfree(tios);
1200 }
1201
1202 /**
1203  * test_iosched_add_urgent_req() - Add an urgent test_request.
1204  * First mark the request as urgent, then add it to the
1205  * urgent_queue test queue.
1206  * @test_rq:            pointer to the urgent test_request to be
1207  *                      added.
1208  *
1209  */
1210 void test_iosched_add_urgent_req(struct test_iosched *tios,
1211         struct test_request *test_rq)
1212 {
1213         unsigned long flags;
1214
1215         if (!tios)
1216                 return;
1217
1218         spin_lock_irqsave(&tios->lock, flags);
1219         test_rq->rq->cmd_flags |= REQ_URGENT;
1220         list_add_tail(&test_rq->queuelist, &tios->urgent_queue);
1221         tios->urgent_count++;
1222         spin_unlock_irqrestore(&tios->lock, flags);
1223 }
1224 EXPORT_SYMBOL(test_iosched_add_urgent_req);
1225
1226 static struct elevator_type elevator_test_iosched = {
1227
1228         .ops = {
1229                 .elevator_merge_req_fn = test_merged_requests,
1230                 .elevator_dispatch_fn = test_dispatch_requests,
1231                 .elevator_add_req_fn = test_add_request,
1232                 .elevator_former_req_fn = test_former_request,
1233                 .elevator_latter_req_fn = test_latter_request,
1234                 .elevator_init_fn = test_init_queue,
1235                 .elevator_exit_fn = test_exit_queue,
1236         },
1237         .elevator_name = "test-iosched",
1238         .elevator_owner = THIS_MODULE,
1239 };
1240
1241 static int __init test_init(void)
1242 {
1243         elv_register(&elevator_test_iosched);
1244
1245         return 0;
1246 }
1247
1248 static void __exit test_exit(void)
1249 {
1250         elv_unregister(&elevator_test_iosched);
1251 }
1252
1253 module_init(test_init);
1254 module_exit(test_exit);
1255
1256 MODULE_LICENSE("GPL v2");
1257 MODULE_DESCRIPTION("Test IO scheduler");