1 /* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * The test scheduler allows to test the block device by dispatching
13 * specific requests according to the test case and declare PASS/FAIL
14 * according to the requests completion error code.
15 * Each test is exposed via debugfs and can be triggered by writing to
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt"\n"
21 /* elevator test iosched */
22 #include <linux/blkdev.h>
23 #include <linux/elevator.h>
24 #include <linux/bio.h>
25 #include <linux/module.h>
26 #include <linux/slab.h>
27 #include <linux/init.h>
28 #include <linux/debugfs.h>
29 #include <linux/test-iosched.h>
30 #include <linux/delay.h>
33 #define MODULE_NAME "test-iosched"
34 #define WR_RD_START_REQ_ID 1234
35 #define UNIQUE_START_REQ_ID 5678
36 #define TIMEOUT_TIMER_MS 40000
37 #define TEST_MAX_TESTCASE_ROUNDS 15
40 static DEFINE_MUTEX(blk_dev_test_list_lock);
41 static LIST_HEAD(blk_dev_test_list);
45 * test_iosched_mark_test_completion() - Wakeup the debugfs
46 * thread, waiting on the test completion
48 void test_iosched_mark_test_completion(struct test_iosched *tios)
53 pr_info("%s: mark test is completed, test_count=%d, ", __func__,
55 pr_info("%s: urgent_count=%d, reinsert_count=%d,", __func__,
56 tios->urgent_count, tios->reinsert_count);
58 tios->test_state = TEST_COMPLETED;
59 wake_up(&tios->wait_q);
61 EXPORT_SYMBOL(test_iosched_mark_test_completion);
64 * check_test_completion() - Check if all the queued test
65 * requests were completed
67 void check_test_completion(struct test_iosched *tios)
69 struct test_request *test_rq;
74 if (tios->test_info.check_test_completion_fn &&
75 !tios->test_info.check_test_completion_fn(tios))
78 list_for_each_entry(test_rq, &tios->dispatched_queue, queuelist)
79 if (!test_rq->req_completed)
82 if (!list_empty(&tios->test_queue)
83 || !list_empty(&tios->reinsert_queue)
84 || !list_empty(&tios->urgent_queue)) {
85 pr_info("%s: Test still not completed,", __func__);
86 pr_info("%s: test_count=%d, reinsert_count=%d", __func__,
87 tios->test_count, tios->reinsert_count);
88 pr_info("%s: dispatched_count=%d, urgent_count=%d", __func__,
89 tios->dispatched_count,
94 tios->test_info.test_duration = ktime_sub(ktime_get(),
95 tios->test_info.test_duration);
97 test_iosched_mark_test_completion(tios);
102 EXPORT_SYMBOL(check_test_completion);
105 * A callback to be called per bio completion.
106 * Frees the bio memory.
108 static void end_test_bio(struct bio *bio)
113 void test_iosched_free_test_req_data_buffer(struct test_request *test_rq)
120 for (i = 0; i < BLK_MAX_SEGMENTS; i++)
121 if (test_rq->bios_buffer[i]) {
122 free_page((unsigned long)test_rq->bios_buffer[i]);
123 test_rq->bios_buffer[i] = NULL;
126 EXPORT_SYMBOL(test_iosched_free_test_req_data_buffer);
129 * A callback to be called per request completion.
130 * the request memory is not freed here, will be freed later after the test
133 static void end_test_req(struct request *rq, int err)
135 struct test_request *test_rq;
136 struct test_iosched *tios = rq->q->elevator->elevator_data;
137 test_rq = (struct test_request *)rq->elv.priv[0];
140 pr_debug("%s: request %d completed, err=%d",
141 __func__, test_rq->req_id, err);
143 test_rq->req_completed = true;
144 test_rq->req_result = err;
146 check_test_completion(tios);
150 * test_iosched_add_unique_test_req() - Create and queue a non
151 * read/write request (such as FLUSH/DISCRAD/SANITIZE).
152 * @is_err_expcted: A flag to indicate if this request
153 * should succeed or not
154 * @req_unique: The type of request to add
155 * @start_sec: start address of the first bio
156 * @nr_sects: number of sectors in the request
157 * @end_req_io: specific completion callback. When not
158 * set, the defaulcallback will be used
160 int test_iosched_add_unique_test_req(struct test_iosched *tios,
161 int is_err_expcted, enum req_unique_type req_unique,
162 int start_sec, int nr_sects, rq_end_io_fn *end_req_io)
167 struct test_request *test_rq;
173 bio = bio_alloc(GFP_KERNEL, 0);
175 pr_err("%s: Failed to allocate a bio", __func__);
179 bio->bi_end_io = end_test_bio;
181 switch (req_unique) {
182 case REQ_UNIQUE_FLUSH:
183 bio->bi_rw = WRITE_FLUSH;
185 case REQ_UNIQUE_DISCARD:
186 bio->bi_rw = REQ_WRITE | REQ_DISCARD;
187 bio->bi_iter.bi_size = nr_sects << 9;
188 bio->bi_iter.bi_sector = start_sec;
191 pr_err("%s: Invalid request type %d", __func__,
197 rw_flags = bio_data_dir(bio);
198 if (bio->bi_rw & REQ_SYNC)
199 rw_flags |= REQ_SYNC;
201 rq = blk_get_request(tios->req_q, rw_flags, GFP_KERNEL);
203 pr_err("%s: Failed to allocate a request", __func__);
208 init_request_from_bio(rq, bio);
210 rq->end_io = end_req_io;
212 rq->end_io = end_test_req;
214 test_rq = kzalloc(sizeof(struct test_request), GFP_KERNEL);
216 pr_err("%s: Failed to allocate a test request", __func__);
221 test_rq->req_completed = false;
222 test_rq->req_result = -EINVAL;
224 test_rq->is_err_expected = is_err_expcted;
225 rq->elv.priv[0] = (void *)test_rq;
226 test_rq->req_id = tios->unique_next_req_id++;
229 "%s: added request %d to the test requests list, type = %d",
230 __func__, test_rq->req_id, req_unique);
232 spin_lock_irqsave(tios->req_q->queue_lock, flags);
233 list_add_tail(&test_rq->queuelist, &tios->test_queue);
235 spin_unlock_irqrestore(tios->req_q->queue_lock, flags);
239 EXPORT_SYMBOL(test_iosched_add_unique_test_req);
242 * Get a pattern to be filled in the request data buffer.
243 * If the pattern used is (-1) the buffer will be filled with sequential
246 static void fill_buf_with_pattern(int *buf, int num_bytes, int pattern)
249 int num_of_dwords = num_bytes/sizeof(int);
251 if (pattern == TEST_NO_PATTERN)
254 /* num_bytes should be aligned to sizeof(int) */
255 BUG_ON((num_bytes % sizeof(int)) != 0);
257 if (pattern == TEST_PATTERN_SEQUENTIAL) {
258 for (i = 0; i < num_of_dwords; i++)
261 for (i = 0; i < num_of_dwords; i++)
267 * test_iosched_create_test_req() - Create a read/write request.
268 * @is_err_expcted: A flag to indicate if this request
269 * should succeed or not
270 * @direction: READ/WRITE
271 * @start_sec: start address of the first bio
272 * @num_bios: number of BIOs to be allocated for the
274 * @pattern: A pattern, to be written into the write
275 * requests data buffer. In case of READ
276 * request, the given pattern is kept as
277 * the expected pattern. The expected
278 * pattern will be compared in the test
279 * check result function. If no comparisson
280 * is required, set pattern to
282 * @end_req_io: specific completion callback. When not
283 * set,the default callback will be used
285 * This function allocates the test request and the block
286 * request and calls blk_rq_map_kern which allocates the
287 * required BIO. The allocated test request and the block
288 * request memory is freed at the end of the test and the
289 * allocated BIO memory is freed by end_test_bio.
291 struct test_request *test_iosched_create_test_req(
292 struct test_iosched *tios, int is_err_expcted,
293 int direction, int start_sec, int num_bios, int pattern,
294 rq_end_io_fn *end_req_io)
297 struct test_request *test_rq;
298 struct bio *bio = NULL;
305 rq = blk_get_request(tios->req_q, direction, GFP_KERNEL);
307 pr_err("%s: Failed to allocate a request", __func__);
311 test_rq = kzalloc(sizeof(struct test_request), GFP_KERNEL);
313 pr_err("%s: Failed to allocate test request", __func__);
317 test_rq->buf_size = TEST_BIO_SIZE * num_bios;
318 test_rq->wr_rd_data_pattern = pattern;
320 for (i = 0; i < num_bios; i++) {
321 test_rq->bios_buffer[i] = (void *)get_zeroed_page(GFP_KERNEL);
322 if (!test_rq->bios_buffer[i]) {
323 pr_err("%s: failed to kmap page for bio #%d/%d\n",
324 __func__, i, num_bios);
327 ret = blk_rq_map_kern(tios->req_q, rq, test_rq->bios_buffer[i],
328 TEST_BIO_SIZE, GFP_KERNEL);
330 pr_err("%s: blk_rq_map_kern returned error %d",
334 if (direction == WRITE)
335 fill_buf_with_pattern(test_rq->bios_buffer[i],
336 TEST_BIO_SIZE, pattern);
340 rq->end_io = end_req_io;
342 rq->end_io = end_test_req;
343 rq->__sector = start_sec;
344 rq->cmd_type |= REQ_TYPE_FS;
345 rq->cmd_flags |= REQ_SORTED;
346 rq->cmd_flags &= ~REQ_IO_STAT;
349 rq->bio->bi_iter.bi_sector = start_sec;
350 rq->bio->bi_end_io = end_test_bio;
352 while ((bio = bio->bi_next) != NULL)
353 bio->bi_end_io = end_test_bio;
356 tios->num_of_write_bios += num_bios;
357 test_rq->req_id = tios->wr_rd_next_req_id++;
359 test_rq->req_completed = false;
360 test_rq->req_result = -EINVAL;
362 if (tios->test_info.get_rq_disk_fn)
363 test_rq->rq->rq_disk = tios->test_info.get_rq_disk_fn(tios);
364 test_rq->is_err_expected = is_err_expcted;
365 rq->elv.priv[0] = (void *)test_rq;
369 test_iosched_free_test_req_data_buffer(test_rq);
375 EXPORT_SYMBOL(test_iosched_create_test_req);
379 * test_iosched_add_wr_rd_test_req() - Create and queue a
380 * read/write request.
381 * @is_err_expcted: A flag to indicate if this request
382 * should succeed or not
383 * @direction: READ/WRITE
384 * @start_sec: start address of the first bio
385 * @num_bios: number of BIOs to be allocated for the
387 * @pattern: A pattern, to be written into the write
388 * requests data buffer. In case of READ
389 * request, the given pattern is kept as
390 * the expected pattern. The expected
391 * pattern will be compared in the test
392 * check result function. If no comparisson
393 * is required, set pattern to
395 * @end_req_io: specific completion callback. When not
396 * set,the default callback will be used
398 * This function allocates the test request and the block
399 * request and calls blk_rq_map_kern which allocates the
400 * required BIO. Upon success the new request is added to the
401 * test_queue. The allocated test request and the block request
402 * memory is freed at the end of the test and the allocated BIO
403 * memory is freed by end_test_bio.
405 int test_iosched_add_wr_rd_test_req(struct test_iosched *tios,
406 int is_err_expcted, int direction, int start_sec, int num_bios,
407 int pattern, rq_end_io_fn *end_req_io)
409 struct test_request *test_rq = NULL;
412 test_rq = test_iosched_create_test_req(tios, is_err_expcted, direction,
413 start_sec, num_bios, pattern, end_req_io);
415 spin_lock_irqsave(tios->req_q->queue_lock, flags);
416 list_add_tail(&test_rq->queuelist, &tios->test_queue);
418 spin_unlock_irqrestore(tios->req_q->queue_lock, flags);
423 EXPORT_SYMBOL(test_iosched_add_wr_rd_test_req);
425 /* Converts the testcase number into a string */
426 static char *get_test_case_str(struct test_iosched *tios)
428 if (tios->test_info.get_test_case_str_fn)
429 return tios->test_info.get_test_case_str_fn(
430 tios->test_info.testcase);
432 return "Unknown testcase";
436 * Verify that the test request data buffer includes the expected
439 int compare_buffer_to_pattern(struct test_request *test_rq)
445 /* num_bytes should be aligned to sizeof(int) */
446 BUG_ON((test_rq->buf_size % sizeof(int)) != 0);
447 BUG_ON(test_rq->bios_buffer == NULL);
449 if (test_rq->wr_rd_data_pattern == TEST_NO_PATTERN)
452 for (i = 0; i < test_rq->buf_size / TEST_BIO_SIZE; i++) {
453 buf = test_rq->bios_buffer[i];
454 for (j = 0; j < TEST_BIO_SIZE / sizeof(int); j++)
455 if ((test_rq->wr_rd_data_pattern ==
456 TEST_PATTERN_SEQUENTIAL && buf[j] != j) ||
457 (test_rq->wr_rd_data_pattern !=
458 TEST_PATTERN_SEQUENTIAL &&
459 buf[j] != test_rq->wr_rd_data_pattern)) {
460 pr_err("%s: wrong pattern 0x%x in index %d",
461 __func__, buf[j], j);
468 EXPORT_SYMBOL(compare_buffer_to_pattern);
471 * Determine if the test passed or failed.
472 * The function checks the test request completion value and calls
473 * check_testcase_result for result checking that are specific
476 static int check_test_result(struct test_iosched *tios)
478 struct test_request *trq;
482 list_for_each_entry(trq, &tios->dispatched_queue, queuelist) {
484 pr_info("%s: req_id %d is contains empty req",
485 __func__, trq->req_id);
488 if (!trq->req_completed) {
489 pr_err("%s: rq %d not completed", __func__,
495 if ((trq->req_result < 0) && !trq->is_err_expected) {
497 "%s: rq %d completed with err, not as expected",
498 __func__, trq->req_id);
502 if ((trq->req_result == 0) && trq->is_err_expected) {
503 pr_err("%s: rq %d succeeded, not as expected",
504 __func__, trq->req_id);
508 if (rq_data_dir(trq->rq) == READ) {
509 res = compare_buffer_to_pattern(trq);
511 pr_err("%s: read pattern not as expected",
519 if (tios->test_info.check_test_result_fn) {
520 res = tios->test_info.check_test_result_fn(
526 pr_info("%s: %s, run# %03d, PASSED",
527 __func__, get_test_case_str(tios), ++run);
528 tios->test_result = TEST_PASSED;
532 pr_err("%s: %s, run# %03d, FAILED",
533 __func__, get_test_case_str(tios), ++run);
534 tios->test_result = TEST_FAILED;
538 /* Create and queue the required requests according to the test case */
539 static int prepare_test(struct test_iosched *tios)
543 if (tios->test_info.prepare_test_fn) {
544 ret = tios->test_info.prepare_test_fn(tios);
552 static int run_test(struct test_iosched *tios)
556 if (tios->test_info.run_test_fn) {
557 ret = tios->test_info.run_test_fn(tios);
561 blk_run_queue(tios->req_q);
567 * free_test_queue() - Free all allocated test requests in the given test_queue:
568 * free their requests and BIOs buffer
569 * @test_queue the test queue to be freed
571 static void free_test_queue(struct list_head *test_queue)
573 struct test_request *test_rq;
576 while (!list_empty(test_queue)) {
577 test_rq = list_entry(test_queue->next, struct test_request,
580 list_del_init(&test_rq->queuelist);
582 * If the request was not completed we need to free its BIOs
583 * and remove it from the packed list
585 if (!test_rq->req_completed) {
587 "%s: Freeing memory of an uncompleted request",
589 list_del_init(&test_rq->rq->queuelist);
590 while ((bio = test_rq->rq->bio) != NULL) {
591 test_rq->rq->bio = bio->bi_next;
595 blk_put_request(test_rq->rq);
596 test_iosched_free_test_req_data_buffer(test_rq);
602 * free_test_requests() - Free all allocated test requests in
603 * all test queues in given test_data.
604 * @td The test_data struct whos test requests will be
607 static void free_test_requests(struct test_iosched *tios)
612 if (tios->urgent_count) {
613 free_test_queue(&tios->urgent_queue);
614 tios->urgent_count = 0;
616 if (tios->test_count) {
617 free_test_queue(&tios->test_queue);
618 tios->test_count = 0;
620 if (tios->dispatched_count) {
621 free_test_queue(&tios->dispatched_queue);
622 tios->dispatched_count = 0;
624 if (tios->reinsert_count) {
625 free_test_queue(&tios->reinsert_queue);
626 tios->reinsert_count = 0;
631 * post_test() - Do post test operations. Free the allocated
632 * test requests, their requests and BIOs buffer.
633 * @td The test_data struct for the test that has
636 static int post_test(struct test_iosched *tios)
640 if (tios->test_info.post_test_fn)
641 ret = tios->test_info.post_test_fn(tios);
643 tios->test_info.testcase = 0;
644 tios->test_state = TEST_IDLE;
646 free_test_requests(tios);
651 static unsigned int get_timeout_msec(struct test_iosched *tios)
653 if (tios->test_info.timeout_msec)
654 return tios->test_info.timeout_msec;
655 return TIMEOUT_TIMER_MS;
659 * test_iosched_start_test() - Prepares and runs the test.
660 * The members test_duration and test_byte_count of the input
661 * parameter t_info are modified by this function.
662 * @t_info: the current test testcase and callbacks
665 * The function also checks the test result upon test completion
667 int test_iosched_start_test(struct test_iosched *tios,
668 struct test_info *t_info)
671 unsigned long timeout;
673 char *test_name = NULL;
679 tios->test_result = TEST_FAILED;
683 timeout = msecs_to_jiffies(get_timeout_msec(tios));
686 if (tios->ignore_round)
688 * We ignored the last run due to FS write requests.
689 * Sleep to allow those requests to be issued
693 spin_lock(&tios->lock);
695 if (tios->test_state != TEST_IDLE) {
697 "%s: Another test is running, try again later",
699 spin_unlock(&tios->lock);
703 if (tios->start_sector == 0) {
704 pr_err("%s: Invalid start sector", __func__);
705 tios->test_result = TEST_FAILED;
706 spin_unlock(&tios->lock);
710 memcpy(&tios->test_info, t_info, sizeof(*t_info));
712 tios->test_result = TEST_NO_RESULT;
713 tios->num_of_write_bios = 0;
715 tios->unique_next_req_id = UNIQUE_START_REQ_ID;
716 tios->wr_rd_next_req_id = WR_RD_START_REQ_ID;
718 tios->ignore_round = false;
719 tios->fs_wr_reqs_during_test = false;
721 tios->test_state = TEST_RUNNING;
723 spin_unlock(&tios->lock);
725 * Give an already dispatch request from
726 * FS a chanse to complete
730 if (tios->test_info.get_test_case_str_fn)
732 tios->test_info.get_test_case_str_fn(
733 tios->test_info.testcase);
735 test_name = "Unknown testcase";
736 pr_info("%s: Starting test %s", __func__, test_name);
738 ret = prepare_test(tios);
740 pr_err("%s: failed to prepare the test",
745 tios->test_info.test_duration = ktime_get();
746 ret = run_test(tios);
748 pr_err("%s: failed to run the test", __func__);
752 pr_info("%s: Waiting for the test completion", __func__);
754 ret = wait_event_interruptible_timeout(tios->wait_q,
755 (tios->test_state == TEST_COMPLETED), timeout);
757 tios->test_state = TEST_COMPLETED;
759 pr_info("%s: Test timeout\n", __func__);
761 pr_err("%s: Test error=%d\n", __func__, ret);
765 memcpy(t_info, &tios->test_info, sizeof(*t_info));
767 ret = check_test_result(tios);
769 pr_err("%s: check_test_result failed", __func__);
773 ret = post_test(tios);
775 pr_err("%s: post_test failed", __func__);
780 * Wakeup the queue thread to fetch FS requests that might got
781 * postponded due to the test
783 blk_run_queue(tios->req_q);
785 if (tios->ignore_round)
787 "%s: Round canceled (Got wr reqs in the middle)",
790 if (++counter == TEST_MAX_TESTCASE_ROUNDS) {
791 pr_info("%s: Too many rounds, did not succeed...",
793 tios->test_result = TEST_FAILED;
796 } while ((tios->ignore_round) &&
797 (counter < TEST_MAX_TESTCASE_ROUNDS));
799 if (tios->test_result == TEST_PASSED)
806 tios->test_result = TEST_FAILED;
809 EXPORT_SYMBOL(test_iosched_start_test);
812 * test_iosched_register() - register a block device test
814 * @bdt: the block device test type to register
816 void test_iosched_register(struct blk_dev_test_type *bdt)
821 mutex_lock(&blk_dev_test_list_lock);
822 list_add_tail(&bdt->list, &blk_dev_test_list);
823 mutex_unlock(&blk_dev_test_list_lock);
826 EXPORT_SYMBOL(test_iosched_register);
829 * test_iosched_unregister() - unregister a block device test
831 * @bdt: the block device test type to unregister
833 void test_iosched_unregister(struct blk_dev_test_type *bdt)
838 mutex_lock(&blk_dev_test_list_lock);
839 list_del_init(&bdt->list);
840 mutex_unlock(&blk_dev_test_list_lock);
842 EXPORT_SYMBOL(test_iosched_unregister);
845 * test_iosched_set_test_result() - Set the test
847 * @test_result: the test result
849 void test_iosched_set_test_result(struct test_iosched *tios,
855 tios->test_result = test_result;
857 EXPORT_SYMBOL(test_iosched_set_test_result);
861 * test_iosched_set_ignore_round() - Set the ignore_round flag
862 * @ignore_round: A flag to indicate if this test round
863 * should be ignored and re-run
865 void test_iosched_set_ignore_round(struct test_iosched *tios,
871 tios->ignore_round = ignore_round;
873 EXPORT_SYMBOL(test_iosched_set_ignore_round);
875 static int test_debugfs_init(struct test_iosched *tios)
877 char name[2*BDEVNAME_SIZE];
880 snprintf(name, 2*BDEVNAME_SIZE - 1, "%s-%s", "test-iosched",
881 tios->req_q->kobj.parent->name);
882 pr_debug("%s: creating test-iosched instance %s\n", __func__, name);
884 tios->debug.debug_root = debugfs_create_dir(name, NULL);
885 if (!tios->debug.debug_root)
888 tios->debug.debug_tests_root = debugfs_create_dir("tests",
889 tios->debug.debug_root);
890 if (!tios->debug.debug_tests_root)
893 tios->debug.debug_utils_root = debugfs_create_dir("utils",
894 tios->debug.debug_root);
895 if (!tios->debug.debug_utils_root)
898 tios->debug.debug_test_result = debugfs_create_u32(
901 tios->debug.debug_utils_root,
903 if (!tios->debug.debug_test_result)
906 tios->debug.start_sector = debugfs_create_u32(
909 tios->debug.debug_utils_root,
910 &tios->start_sector);
911 if (!tios->debug.start_sector)
914 tios->debug.sector_range = debugfs_create_u32(
917 tios->debug.debug_utils_root,
918 &tios->sector_range);
919 if (!tios->debug.sector_range)
925 debugfs_remove_recursive(tios->debug.debug_root);
929 static void test_debugfs_cleanup(struct test_iosched *tios)
931 debugfs_remove_recursive(tios->debug.debug_root);
934 static void print_req(struct request *req)
937 struct test_request *test_rq;
942 test_rq = (struct test_request *)req->elv.priv[0];
945 pr_debug("%s: Dispatch request %d: __sector=0x%lx",
946 __func__, test_rq->req_id, (unsigned long)req->__sector);
947 pr_debug("%s: nr_phys_segments=%d, num_of_sectors=%d",
948 __func__, req->nr_phys_segments, blk_rq_sectors(req));
950 pr_debug("%s: bio: bi_size=%d, bi_sector=0x%lx",
951 __func__, bio->bi_iter.bi_size,
952 (unsigned long)bio->bi_iter.bi_sector);
953 while ((bio = bio->bi_next) != NULL) {
954 pr_debug("%s: bio: bi_size=%d, bi_sector=0x%lx",
955 __func__, bio->bi_iter.bi_size,
956 (unsigned long)bio->bi_iter.bi_sector);
961 static void test_merged_requests(struct request_queue *q,
962 struct request *rq, struct request *next)
964 list_del_init(&next->queuelist);
967 * test_dispatch_from(): Dispatch request from @queue to the @dispatched_queue.
968 * Also update the dispatched_count counter.
970 static int test_dispatch_from(struct request_queue *q,
971 struct list_head *queue, unsigned int *count)
973 struct test_request *test_rq;
976 struct test_iosched *tios = q->elevator->elevator_data;
982 spin_lock_irqsave(&tios->lock, flags);
983 if (!list_empty(queue)) {
984 test_rq = list_entry(queue->next, struct test_request,
988 pr_err("%s: null request,return", __func__);
989 spin_unlock_irqrestore(&tios->lock, flags);
992 list_move_tail(&test_rq->queuelist,
993 &tios->dispatched_queue);
994 tios->dispatched_count++;
996 spin_unlock_irqrestore(&tios->lock, flags);
999 elv_dispatch_sort(q, rq);
1000 tios->test_info.test_byte_count += test_rq->buf_size;
1004 spin_unlock_irqrestore(&tios->lock, flags);
1011 * Dispatch a test request in case there is a running test Otherwise, dispatch
1012 * a request that was queued by the FS to keep the card functional.
1014 static int test_dispatch_requests(struct request_queue *q, int force)
1016 struct test_iosched *tios = q->elevator->elevator_data;
1017 struct request *rq = NULL;
1020 switch (tios->test_state) {
1022 if (!list_empty(&tios->queue)) {
1023 rq = list_entry(tios->queue.next,
1024 struct request, queuelist);
1025 list_del_init(&rq->queuelist);
1026 elv_dispatch_sort(q, rq);
1032 if (test_dispatch_from(q, &tios->urgent_queue,
1033 &tios->urgent_count)) {
1034 pr_debug("%s: Dispatched from urgent_count=%d",
1035 __func__, tios->urgent_count);
1039 if (test_dispatch_from(q, &tios->reinsert_queue,
1040 &tios->reinsert_count)) {
1041 pr_debug("%s: Dispatched from reinsert_count=%d",
1042 __func__, tios->reinsert_count);
1046 if (test_dispatch_from(q, &tios->test_queue,
1047 &tios->test_count)) {
1048 pr_debug("%s: Dispatched from test_count=%d",
1049 __func__, tios->test_count);
1054 case TEST_COMPLETED:
1063 static void test_add_request(struct request_queue *q, struct request *rq)
1065 struct test_iosched *tios = q->elevator->elevator_data;
1067 list_add_tail(&rq->queuelist, &tios->queue);
1070 * The write requests can be followed by a FLUSH request that might
1071 * cause unexpected results of the test.
1073 if (rq_data_dir(rq) == WRITE &&
1074 tios->test_state == TEST_RUNNING) {
1075 pr_debug("%s: got WRITE req in the middle of the test",
1077 tios->fs_wr_reqs_during_test = true;
1081 static struct request *
1082 test_former_request(struct request_queue *q, struct request *rq)
1084 struct test_iosched *tios = q->elevator->elevator_data;
1086 if (rq->queuelist.prev == &tios->queue)
1088 return list_entry(rq->queuelist.prev, struct request, queuelist);
1091 static struct request *
1092 test_latter_request(struct request_queue *q, struct request *rq)
1094 struct test_iosched *tios = q->elevator->elevator_data;
1096 if (rq->queuelist.next == &tios->queue)
1098 return list_entry(rq->queuelist.next, struct request, queuelist);
1101 static int test_init_queue(struct request_queue *q, struct elevator_type *e)
1103 struct blk_dev_test_type *__bdt;
1104 struct elevator_queue *eq;
1105 struct test_iosched *tios;
1106 const char *blk_dev_name;
1109 unsigned long flags;
1111 eq = elevator_alloc(q, e);
1115 tios = kzalloc_node(sizeof(*tios), GFP_KERNEL, q->node);
1117 pr_err("%s: failed to allocate test iosched\n", __func__);
1121 eq->elevator_data = tios;
1123 INIT_LIST_HEAD(&tios->queue);
1124 INIT_LIST_HEAD(&tios->test_queue);
1125 INIT_LIST_HEAD(&tios->dispatched_queue);
1126 INIT_LIST_HEAD(&tios->reinsert_queue);
1127 INIT_LIST_HEAD(&tios->urgent_queue);
1128 init_waitqueue_head(&tios->wait_q);
1131 spin_lock_init(&tios->lock);
1133 ret = test_debugfs_init(tios);
1135 pr_err("%s: Failed to create debugfs files, ret=%d",
1139 blk_dev_name = q->kobj.parent->name;
1141 /* Traverse the block device test list and init matches */
1142 mutex_lock(&blk_dev_test_list_lock);
1144 list_for_each_entry(__bdt, &blk_dev_test_list, list) {
1145 pr_debug("%s: checking if %s is a match to device %s\n",
1146 __func__, __bdt->type_prefix, blk_dev_name);
1147 if (!strnstr(blk_dev_name, __bdt->type_prefix,
1148 strlen(__bdt->type_prefix)))
1151 pr_debug("%s: found the match!\n", __func__);
1155 mutex_unlock(&blk_dev_test_list_lock);
1157 /* No match found */
1159 pr_err("%s: No matching block device test utility found\n",
1164 ret = __bdt->init_fn(tios);
1166 pr_err("%s: failed to init block device test, ret=%d\n",
1172 spin_lock_irqsave(q->queue_lock, flags);
1174 spin_unlock_irqrestore(q->queue_lock, flags);
1179 test_debugfs_cleanup(tios);
1183 kobject_put(&eq->kobj);
1187 static void test_exit_queue(struct elevator_queue *e)
1189 struct test_iosched *tios = e->elevator_data;
1190 struct blk_dev_test_type *__bdt;
1192 BUG_ON(!list_empty(&tios->queue));
1194 list_for_each_entry(__bdt, &blk_dev_test_list, list)
1195 __bdt->exit_fn(tios);
1197 test_debugfs_cleanup(tios);
1203 * test_iosched_add_urgent_req() - Add an urgent test_request.
1204 * First mark the request as urgent, then add it to the
1205 * urgent_queue test queue.
1206 * @test_rq: pointer to the urgent test_request to be
1210 void test_iosched_add_urgent_req(struct test_iosched *tios,
1211 struct test_request *test_rq)
1213 unsigned long flags;
1218 spin_lock_irqsave(&tios->lock, flags);
1219 test_rq->rq->cmd_flags |= REQ_URGENT;
1220 list_add_tail(&test_rq->queuelist, &tios->urgent_queue);
1221 tios->urgent_count++;
1222 spin_unlock_irqrestore(&tios->lock, flags);
1224 EXPORT_SYMBOL(test_iosched_add_urgent_req);
1226 static struct elevator_type elevator_test_iosched = {
1229 .elevator_merge_req_fn = test_merged_requests,
1230 .elevator_dispatch_fn = test_dispatch_requests,
1231 .elevator_add_req_fn = test_add_request,
1232 .elevator_former_req_fn = test_former_request,
1233 .elevator_latter_req_fn = test_latter_request,
1234 .elevator_init_fn = test_init_queue,
1235 .elevator_exit_fn = test_exit_queue,
1237 .elevator_name = "test-iosched",
1238 .elevator_owner = THIS_MODULE,
1241 static int __init test_init(void)
1243 elv_register(&elevator_test_iosched);
1248 static void __exit test_exit(void)
1250 elv_unregister(&elevator_test_iosched);
1253 module_init(test_init);
1254 module_exit(test_exit);
1256 MODULE_LICENSE("GPL v2");
1257 MODULE_DESCRIPTION("Test IO scheduler");