2 * Block tests for iothreads
4 * Copyright (c) 2018 Kevin Wolf <kwolf@redhat.com>
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
26 #include "block/block.h"
27 #include "block/block_int-global-state.h"
28 #include "block/blockjob_int.h"
29 #include "sysemu/block-backend.h"
30 #include "qapi/error.h"
31 #include "qapi/qmp/qdict.h"
32 #include "qemu/main-loop.h"
35 static int coroutine_fn bdrv_test_co_preadv(BlockDriverState *bs,
36 int64_t offset, int64_t bytes,
38 BdrvRequestFlags flags)
43 static int coroutine_fn bdrv_test_co_pwritev(BlockDriverState *bs,
44 int64_t offset, int64_t bytes,
46 BdrvRequestFlags flags)
51 static int coroutine_fn bdrv_test_co_pdiscard(BlockDriverState *bs,
52 int64_t offset, int64_t bytes)
57 static int coroutine_fn
58 bdrv_test_co_truncate(BlockDriverState *bs, int64_t offset, bool exact,
59 PreallocMode prealloc, BdrvRequestFlags flags,
65 static int coroutine_fn bdrv_test_co_block_status(BlockDriverState *bs,
67 int64_t offset, int64_t count,
68 int64_t *pnum, int64_t *map,
69 BlockDriverState **file)
75 static BlockDriver bdrv_test = {
76 .format_name = "test",
79 .bdrv_co_preadv = bdrv_test_co_preadv,
80 .bdrv_co_pwritev = bdrv_test_co_pwritev,
81 .bdrv_co_pdiscard = bdrv_test_co_pdiscard,
82 .bdrv_co_truncate = bdrv_test_co_truncate,
83 .bdrv_co_block_status = bdrv_test_co_block_status,
86 static void test_sync_op_pread(BdrvChild *c)
92 ret = bdrv_pread(c, 0, sizeof(buf), buf, 0);
93 g_assert_cmpint(ret, ==, 0);
95 /* Early error: Negative offset */
96 ret = bdrv_pread(c, -2, sizeof(buf), buf, 0);
97 g_assert_cmpint(ret, ==, -EIO);
100 static void test_sync_op_pwrite(BdrvChild *c)
102 uint8_t buf[512] = { 0 };
106 ret = bdrv_pwrite(c, 0, sizeof(buf), buf, 0);
107 g_assert_cmpint(ret, ==, 0);
109 /* Early error: Negative offset */
110 ret = bdrv_pwrite(c, -2, sizeof(buf), buf, 0);
111 g_assert_cmpint(ret, ==, -EIO);
114 static void test_sync_op_blk_pread(BlockBackend *blk)
120 ret = blk_pread(blk, 0, sizeof(buf), buf, 0);
121 g_assert_cmpint(ret, ==, 0);
123 /* Early error: Negative offset */
124 ret = blk_pread(blk, -2, sizeof(buf), buf, 0);
125 g_assert_cmpint(ret, ==, -EIO);
128 static void test_sync_op_blk_pwrite(BlockBackend *blk)
130 uint8_t buf[512] = { 0 };
134 ret = blk_pwrite(blk, 0, sizeof(buf), buf, 0);
135 g_assert_cmpint(ret, ==, 0);
137 /* Early error: Negative offset */
138 ret = blk_pwrite(blk, -2, sizeof(buf), buf, 0);
139 g_assert_cmpint(ret, ==, -EIO);
142 static void test_sync_op_blk_preadv(BlockBackend *blk)
145 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, sizeof(buf));
149 ret = blk_preadv(blk, 0, sizeof(buf), &qiov, 0);
150 g_assert_cmpint(ret, ==, 0);
152 /* Early error: Negative offset */
153 ret = blk_preadv(blk, -2, sizeof(buf), &qiov, 0);
154 g_assert_cmpint(ret, ==, -EIO);
157 static void test_sync_op_blk_pwritev(BlockBackend *blk)
159 uint8_t buf[512] = { 0 };
160 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, sizeof(buf));
164 ret = blk_pwritev(blk, 0, sizeof(buf), &qiov, 0);
165 g_assert_cmpint(ret, ==, 0);
167 /* Early error: Negative offset */
168 ret = blk_pwritev(blk, -2, sizeof(buf), &qiov, 0);
169 g_assert_cmpint(ret, ==, -EIO);
172 static void test_sync_op_blk_preadv_part(BlockBackend *blk)
175 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, sizeof(buf));
179 ret = blk_preadv_part(blk, 0, sizeof(buf), &qiov, 0, 0);
180 g_assert_cmpint(ret, ==, 0);
182 /* Early error: Negative offset */
183 ret = blk_preadv_part(blk, -2, sizeof(buf), &qiov, 0, 0);
184 g_assert_cmpint(ret, ==, -EIO);
187 static void test_sync_op_blk_pwritev_part(BlockBackend *blk)
189 uint8_t buf[512] = { 0 };
190 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, sizeof(buf));
194 ret = blk_pwritev_part(blk, 0, sizeof(buf), &qiov, 0, 0);
195 g_assert_cmpint(ret, ==, 0);
197 /* Early error: Negative offset */
198 ret = blk_pwritev_part(blk, -2, sizeof(buf), &qiov, 0, 0);
199 g_assert_cmpint(ret, ==, -EIO);
202 static void test_sync_op_blk_pwrite_compressed(BlockBackend *blk)
204 uint8_t buf[512] = { 0 };
207 /* Late error: Not supported */
208 ret = blk_pwrite_compressed(blk, 0, sizeof(buf), buf);
209 g_assert_cmpint(ret, ==, -ENOTSUP);
211 /* Early error: Negative offset */
212 ret = blk_pwrite_compressed(blk, -2, sizeof(buf), buf);
213 g_assert_cmpint(ret, ==, -EIO);
216 static void test_sync_op_blk_pwrite_zeroes(BlockBackend *blk)
221 ret = blk_pwrite_zeroes(blk, 0, 512, 0);
222 g_assert_cmpint(ret, ==, 0);
224 /* Early error: Negative offset */
225 ret = blk_pwrite_zeroes(blk, -2, 512, 0);
226 g_assert_cmpint(ret, ==, -EIO);
229 static void test_sync_op_load_vmstate(BdrvChild *c)
234 /* Error: Driver does not support snapshots */
235 ret = bdrv_load_vmstate(c->bs, buf, 0, sizeof(buf));
236 g_assert_cmpint(ret, ==, -ENOTSUP);
239 static void test_sync_op_save_vmstate(BdrvChild *c)
241 uint8_t buf[512] = { 0 };
244 /* Error: Driver does not support snapshots */
245 ret = bdrv_save_vmstate(c->bs, buf, 0, sizeof(buf));
246 g_assert_cmpint(ret, ==, -ENOTSUP);
249 static void test_sync_op_pdiscard(BdrvChild *c)
253 /* Normal success path */
254 c->bs->open_flags |= BDRV_O_UNMAP;
255 ret = bdrv_pdiscard(c, 0, 512);
256 g_assert_cmpint(ret, ==, 0);
258 /* Early success: UNMAP not supported */
259 c->bs->open_flags &= ~BDRV_O_UNMAP;
260 ret = bdrv_pdiscard(c, 0, 512);
261 g_assert_cmpint(ret, ==, 0);
263 /* Early error: Negative offset */
264 ret = bdrv_pdiscard(c, -2, 512);
265 g_assert_cmpint(ret, ==, -EIO);
268 static void test_sync_op_blk_pdiscard(BlockBackend *blk)
272 /* Early success: UNMAP not supported */
273 ret = blk_pdiscard(blk, 0, 512);
274 g_assert_cmpint(ret, ==, 0);
276 /* Early error: Negative offset */
277 ret = blk_pdiscard(blk, -2, 512);
278 g_assert_cmpint(ret, ==, -EIO);
281 static void test_sync_op_truncate(BdrvChild *c)
285 /* Normal success path */
286 ret = bdrv_truncate(c, 65536, false, PREALLOC_MODE_OFF, 0, NULL);
287 g_assert_cmpint(ret, ==, 0);
289 /* Early error: Negative offset */
290 ret = bdrv_truncate(c, -2, false, PREALLOC_MODE_OFF, 0, NULL);
291 g_assert_cmpint(ret, ==, -EINVAL);
293 /* Error: Read-only image */
294 c->bs->open_flags &= ~BDRV_O_RDWR;
296 ret = bdrv_truncate(c, 65536, false, PREALLOC_MODE_OFF, 0, NULL);
297 g_assert_cmpint(ret, ==, -EACCES);
299 c->bs->open_flags |= BDRV_O_RDWR;
302 static void test_sync_op_blk_truncate(BlockBackend *blk)
306 /* Normal success path */
307 ret = blk_truncate(blk, 65536, false, PREALLOC_MODE_OFF, 0, NULL);
308 g_assert_cmpint(ret, ==, 0);
310 /* Early error: Negative offset */
311 ret = blk_truncate(blk, -2, false, PREALLOC_MODE_OFF, 0, NULL);
312 g_assert_cmpint(ret, ==, -EINVAL);
315 /* Disable TSA to make bdrv_test.bdrv_co_block_status writable */
316 static void TSA_NO_TSA test_sync_op_block_status(BdrvChild *c)
321 /* Normal success path */
322 ret = bdrv_is_allocated(c->bs, 0, 65536, &n);
323 g_assert_cmpint(ret, ==, 0);
325 /* Early success: No driver support */
326 bdrv_test.bdrv_co_block_status = NULL;
327 ret = bdrv_is_allocated(c->bs, 0, 65536, &n);
328 g_assert_cmpint(ret, ==, 1);
330 /* Early success: bytes = 0 */
331 ret = bdrv_is_allocated(c->bs, 0, 0, &n);
332 g_assert_cmpint(ret, ==, 0);
334 /* Early success: Offset > image size*/
335 ret = bdrv_is_allocated(c->bs, 0x1000000, 0x1000000, &n);
336 g_assert_cmpint(ret, ==, 0);
339 static void test_sync_op_flush(BdrvChild *c)
343 /* Normal success path */
344 ret = bdrv_flush(c->bs);
345 g_assert_cmpint(ret, ==, 0);
347 /* Early success: Read-only image */
348 c->bs->open_flags &= ~BDRV_O_RDWR;
350 ret = bdrv_flush(c->bs);
351 g_assert_cmpint(ret, ==, 0);
353 c->bs->open_flags |= BDRV_O_RDWR;
356 static void test_sync_op_blk_flush(BlockBackend *blk)
358 BlockDriverState *bs = blk_bs(blk);
361 /* Normal success path */
362 ret = blk_flush(blk);
363 g_assert_cmpint(ret, ==, 0);
365 /* Early success: Read-only image */
366 bs->open_flags &= ~BDRV_O_RDWR;
368 ret = blk_flush(blk);
369 g_assert_cmpint(ret, ==, 0);
371 bs->open_flags |= BDRV_O_RDWR;
374 static void test_sync_op_check(BdrvChild *c)
376 BdrvCheckResult result;
379 /* Error: Driver does not implement check */
380 ret = bdrv_check(c->bs, &result, 0);
381 g_assert_cmpint(ret, ==, -ENOTSUP);
384 static void test_sync_op_activate(BdrvChild *c)
387 GRAPH_RDLOCK_GUARD_MAINLOOP();
389 /* Early success: Image is not inactive */
390 bdrv_activate(c->bs, NULL);
394 typedef struct SyncOpTest {
396 void (*fn)(BdrvChild *c);
397 void (*blkfn)(BlockBackend *blk);
400 const SyncOpTest sync_op_tests[] = {
402 .name = "/sync-op/pread",
403 .fn = test_sync_op_pread,
404 .blkfn = test_sync_op_blk_pread,
406 .name = "/sync-op/pwrite",
407 .fn = test_sync_op_pwrite,
408 .blkfn = test_sync_op_blk_pwrite,
410 .name = "/sync-op/preadv",
412 .blkfn = test_sync_op_blk_preadv,
414 .name = "/sync-op/pwritev",
416 .blkfn = test_sync_op_blk_pwritev,
418 .name = "/sync-op/preadv_part",
420 .blkfn = test_sync_op_blk_preadv_part,
422 .name = "/sync-op/pwritev_part",
424 .blkfn = test_sync_op_blk_pwritev_part,
426 .name = "/sync-op/pwrite_compressed",
428 .blkfn = test_sync_op_blk_pwrite_compressed,
430 .name = "/sync-op/pwrite_zeroes",
432 .blkfn = test_sync_op_blk_pwrite_zeroes,
434 .name = "/sync-op/load_vmstate",
435 .fn = test_sync_op_load_vmstate,
437 .name = "/sync-op/save_vmstate",
438 .fn = test_sync_op_save_vmstate,
440 .name = "/sync-op/pdiscard",
441 .fn = test_sync_op_pdiscard,
442 .blkfn = test_sync_op_blk_pdiscard,
444 .name = "/sync-op/truncate",
445 .fn = test_sync_op_truncate,
446 .blkfn = test_sync_op_blk_truncate,
448 .name = "/sync-op/block_status",
449 .fn = test_sync_op_block_status,
451 .name = "/sync-op/flush",
452 .fn = test_sync_op_flush,
453 .blkfn = test_sync_op_blk_flush,
455 .name = "/sync-op/check",
456 .fn = test_sync_op_check,
458 .name = "/sync-op/activate",
459 .fn = test_sync_op_activate,
463 /* Test synchronous operations that run in a different iothread, so we have to
464 * poll for the coroutine there to return. */
465 static void test_sync_op(const void *opaque)
467 const SyncOpTest *t = opaque;
468 IOThread *iothread = iothread_new();
469 AioContext *ctx = iothread_get_aio_context(iothread);
471 BlockDriverState *bs;
476 blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
477 bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
478 bs->total_sectors = 65536 / BDRV_SECTOR_SIZE;
479 blk_insert_bs(blk, bs, &error_abort);
481 bdrv_graph_rdlock_main_loop();
482 c = QLIST_FIRST(&bs->parents);
483 bdrv_graph_rdunlock_main_loop();
485 blk_set_aio_context(blk, ctx, &error_abort);
486 aio_context_acquire(ctx);
493 blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
494 aio_context_release(ctx);
500 typedef struct TestBlockJob {
502 bool should_complete;
506 static int test_job_prepare(Job *job)
508 g_assert(qemu_get_current_aio_context() == qemu_get_aio_context());
512 static int coroutine_fn test_job_run(Job *job, Error **errp)
514 TestBlockJob *s = container_of(job, TestBlockJob, common.job);
516 job_transition_to_ready(&s->common.job);
517 while (!s->should_complete) {
519 g_assert(qemu_get_current_aio_context() == job->aio_context);
521 /* Avoid job_sleep_ns() because it marks the job as !busy. We want to
522 * emulate some actual activity (probably some I/O) here so that the
523 * drain involved in AioContext switches has to wait for this activity
525 qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 1000000);
527 job_pause_point(&s->common.job);
530 g_assert(qemu_get_current_aio_context() == job->aio_context);
534 static void test_job_complete(Job *job, Error **errp)
536 TestBlockJob *s = container_of(job, TestBlockJob, common.job);
537 s->should_complete = true;
540 BlockJobDriver test_job_driver = {
542 .instance_size = sizeof(TestBlockJob),
543 .free = block_job_free,
544 .user_resume = block_job_user_resume,
546 .complete = test_job_complete,
547 .prepare = test_job_prepare,
551 static void test_attach_blockjob(void)
553 IOThread *iothread = iothread_new();
554 AioContext *ctx = iothread_get_aio_context(iothread);
556 BlockDriverState *bs;
559 blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
560 bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
561 blk_insert_bs(blk, bs, &error_abort);
563 tjob = block_job_create("job0", &test_job_driver, NULL, bs,
565 0, 0, NULL, NULL, &error_abort);
566 job_start(&tjob->common.job);
568 while (tjob->n == 0) {
569 aio_poll(qemu_get_aio_context(), false);
572 blk_set_aio_context(blk, ctx, &error_abort);
575 while (tjob->n == 0) {
576 aio_poll(qemu_get_aio_context(), false);
579 aio_context_acquire(ctx);
580 blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
581 aio_context_release(ctx);
584 while (tjob->n == 0) {
585 aio_poll(qemu_get_aio_context(), false);
588 blk_set_aio_context(blk, ctx, &error_abort);
591 while (tjob->n == 0) {
592 aio_poll(qemu_get_aio_context(), false);
595 WITH_JOB_LOCK_GUARD() {
596 job_complete_sync_locked(&tjob->common.job, &error_abort);
598 aio_context_acquire(ctx);
599 blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
600 aio_context_release(ctx);
607 * Test that changing the AioContext for one node in a tree (here through blk)
608 * changes all other nodes as well:
612 * | bs_verify [blkverify]
615 * bs_a [bdrv_test] bs_b [bdrv_test]
618 static void test_propagate_basic(void)
620 IOThread *iothread = iothread_new();
621 AioContext *ctx = iothread_get_aio_context(iothread);
622 AioContext *main_ctx;
624 BlockDriverState *bs_a, *bs_b, *bs_verify;
628 * Create bs_a and its BlockBackend. We cannot take the RESIZE
629 * permission because blkverify will not share it on the test
632 blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL & ~BLK_PERM_RESIZE,
634 bs_a = bdrv_new_open_driver(&bdrv_test, "bs_a", BDRV_O_RDWR, &error_abort);
635 blk_insert_bs(blk, bs_a, &error_abort);
638 bs_b = bdrv_new_open_driver(&bdrv_test, "bs_b", BDRV_O_RDWR, &error_abort);
640 /* Create blkverify filter that references both bs_a and bs_b */
641 options = qdict_new();
642 qdict_put_str(options, "driver", "blkverify");
643 qdict_put_str(options, "test", "bs_a");
644 qdict_put_str(options, "raw", "bs_b");
646 bs_verify = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
648 /* Switch the AioContext */
649 blk_set_aio_context(blk, ctx, &error_abort);
650 g_assert(blk_get_aio_context(blk) == ctx);
651 g_assert(bdrv_get_aio_context(bs_a) == ctx);
652 g_assert(bdrv_get_aio_context(bs_verify) == ctx);
653 g_assert(bdrv_get_aio_context(bs_b) == ctx);
655 /* Switch the AioContext back */
656 main_ctx = qemu_get_aio_context();
657 aio_context_acquire(ctx);
658 blk_set_aio_context(blk, main_ctx, &error_abort);
659 aio_context_release(ctx);
660 g_assert(blk_get_aio_context(blk) == main_ctx);
661 g_assert(bdrv_get_aio_context(bs_a) == main_ctx);
662 g_assert(bdrv_get_aio_context(bs_verify) == main_ctx);
663 g_assert(bdrv_get_aio_context(bs_b) == main_ctx);
665 bdrv_unref(bs_verify);
672 * Test that diamonds in the graph don't lead to endless recursion:
676 * bs_verify [blkverify]
679 * bs_b [raw] bs_c[raw]
684 static void test_propagate_diamond(void)
686 IOThread *iothread = iothread_new();
687 AioContext *ctx = iothread_get_aio_context(iothread);
688 AioContext *main_ctx;
690 BlockDriverState *bs_a, *bs_b, *bs_c, *bs_verify;
694 bs_a = bdrv_new_open_driver(&bdrv_test, "bs_a", BDRV_O_RDWR, &error_abort);
696 /* Create bs_b and bc_c */
697 options = qdict_new();
698 qdict_put_str(options, "driver", "raw");
699 qdict_put_str(options, "file", "bs_a");
700 qdict_put_str(options, "node-name", "bs_b");
701 bs_b = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
703 options = qdict_new();
704 qdict_put_str(options, "driver", "raw");
705 qdict_put_str(options, "file", "bs_a");
706 qdict_put_str(options, "node-name", "bs_c");
707 bs_c = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
709 /* Create blkverify filter that references both bs_b and bs_c */
710 options = qdict_new();
711 qdict_put_str(options, "driver", "blkverify");
712 qdict_put_str(options, "test", "bs_b");
713 qdict_put_str(options, "raw", "bs_c");
715 bs_verify = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
717 * Do not take the RESIZE permission: This would require the same
718 * from bs_c and thus from bs_a; however, blkverify will not share
719 * it on bs_b, and thus it will not be available for bs_a.
721 blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL & ~BLK_PERM_RESIZE,
723 blk_insert_bs(blk, bs_verify, &error_abort);
725 /* Switch the AioContext */
726 blk_set_aio_context(blk, ctx, &error_abort);
727 g_assert(blk_get_aio_context(blk) == ctx);
728 g_assert(bdrv_get_aio_context(bs_verify) == ctx);
729 g_assert(bdrv_get_aio_context(bs_a) == ctx);
730 g_assert(bdrv_get_aio_context(bs_b) == ctx);
731 g_assert(bdrv_get_aio_context(bs_c) == ctx);
733 /* Switch the AioContext back */
734 main_ctx = qemu_get_aio_context();
735 aio_context_acquire(ctx);
736 blk_set_aio_context(blk, main_ctx, &error_abort);
737 aio_context_release(ctx);
738 g_assert(blk_get_aio_context(blk) == main_ctx);
739 g_assert(bdrv_get_aio_context(bs_verify) == main_ctx);
740 g_assert(bdrv_get_aio_context(bs_a) == main_ctx);
741 g_assert(bdrv_get_aio_context(bs_b) == main_ctx);
742 g_assert(bdrv_get_aio_context(bs_c) == main_ctx);
745 bdrv_unref(bs_verify);
751 static void test_propagate_mirror(void)
753 IOThread *iothread = iothread_new();
754 AioContext *ctx = iothread_get_aio_context(iothread);
755 AioContext *main_ctx = qemu_get_aio_context();
756 BlockDriverState *src, *target, *filter;
759 Error *local_err = NULL;
761 /* Create src and target*/
762 src = bdrv_new_open_driver(&bdrv_test, "src", BDRV_O_RDWR, &error_abort);
763 target = bdrv_new_open_driver(&bdrv_test, "target", BDRV_O_RDWR,
766 /* Start a mirror job */
767 aio_context_acquire(main_ctx);
768 mirror_start("job0", src, target, NULL, JOB_DEFAULT, 0, 0, 0,
769 MIRROR_SYNC_MODE_NONE, MIRROR_OPEN_BACKING_CHAIN, false,
770 BLOCKDEV_ON_ERROR_REPORT, BLOCKDEV_ON_ERROR_REPORT,
771 false, "filter_node", MIRROR_COPY_MODE_BACKGROUND,
773 aio_context_release(main_ctx);
775 WITH_JOB_LOCK_GUARD() {
776 job = job_get_locked("job0");
778 filter = bdrv_find_node("filter_node");
780 /* Change the AioContext of src */
781 bdrv_try_change_aio_context(src, ctx, NULL, &error_abort);
782 g_assert(bdrv_get_aio_context(src) == ctx);
783 g_assert(bdrv_get_aio_context(target) == ctx);
784 g_assert(bdrv_get_aio_context(filter) == ctx);
785 g_assert(job->aio_context == ctx);
787 /* Change the AioContext of target */
788 aio_context_acquire(ctx);
789 bdrv_try_change_aio_context(target, main_ctx, NULL, &error_abort);
790 aio_context_release(ctx);
791 g_assert(bdrv_get_aio_context(src) == main_ctx);
792 g_assert(bdrv_get_aio_context(target) == main_ctx);
793 g_assert(bdrv_get_aio_context(filter) == main_ctx);
795 /* With a BlockBackend on src, changing target must fail */
796 blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL);
797 blk_insert_bs(blk, src, &error_abort);
799 bdrv_try_change_aio_context(target, ctx, NULL, &local_err);
800 error_free_or_abort(&local_err);
802 g_assert(blk_get_aio_context(blk) == main_ctx);
803 g_assert(bdrv_get_aio_context(src) == main_ctx);
804 g_assert(bdrv_get_aio_context(target) == main_ctx);
805 g_assert(bdrv_get_aio_context(filter) == main_ctx);
807 /* ...unless we explicitly allow it */
808 aio_context_acquire(ctx);
809 blk_set_allow_aio_context_change(blk, true);
810 bdrv_try_change_aio_context(target, ctx, NULL, &error_abort);
811 aio_context_release(ctx);
813 g_assert(blk_get_aio_context(blk) == ctx);
814 g_assert(bdrv_get_aio_context(src) == ctx);
815 g_assert(bdrv_get_aio_context(target) == ctx);
816 g_assert(bdrv_get_aio_context(filter) == ctx);
818 job_cancel_sync_all();
820 aio_context_acquire(ctx);
821 blk_set_aio_context(blk, main_ctx, &error_abort);
822 bdrv_try_change_aio_context(target, main_ctx, NULL, &error_abort);
823 aio_context_release(ctx);
830 static void test_attach_second_node(void)
832 IOThread *iothread = iothread_new();
833 AioContext *ctx = iothread_get_aio_context(iothread);
834 AioContext *main_ctx = qemu_get_aio_context();
836 BlockDriverState *bs, *filter;
839 aio_context_acquire(main_ctx);
840 blk = blk_new(ctx, BLK_PERM_ALL, BLK_PERM_ALL);
841 bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
842 blk_insert_bs(blk, bs, &error_abort);
844 options = qdict_new();
845 qdict_put_str(options, "driver", "raw");
846 qdict_put_str(options, "file", "base");
848 filter = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
849 aio_context_release(main_ctx);
851 g_assert(blk_get_aio_context(blk) == ctx);
852 g_assert(bdrv_get_aio_context(bs) == ctx);
853 g_assert(bdrv_get_aio_context(filter) == ctx);
855 aio_context_acquire(ctx);
856 blk_set_aio_context(blk, main_ctx, &error_abort);
857 aio_context_release(ctx);
858 g_assert(blk_get_aio_context(blk) == main_ctx);
859 g_assert(bdrv_get_aio_context(bs) == main_ctx);
860 g_assert(bdrv_get_aio_context(filter) == main_ctx);
867 static void test_attach_preserve_blk_ctx(void)
869 IOThread *iothread = iothread_new();
870 AioContext *ctx = iothread_get_aio_context(iothread);
871 AioContext *main_ctx = qemu_get_aio_context();
873 BlockDriverState *bs;
875 aio_context_acquire(main_ctx);
876 blk = blk_new(ctx, BLK_PERM_ALL, BLK_PERM_ALL);
877 bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
878 bs->total_sectors = 65536 / BDRV_SECTOR_SIZE;
880 /* Add node to BlockBackend that has an iothread context assigned */
881 blk_insert_bs(blk, bs, &error_abort);
882 g_assert(blk_get_aio_context(blk) == ctx);
883 g_assert(bdrv_get_aio_context(bs) == ctx);
884 aio_context_release(main_ctx);
886 /* Remove the node again */
887 aio_context_acquire(ctx);
889 aio_context_release(ctx);
890 g_assert(blk_get_aio_context(blk) == ctx);
891 g_assert(bdrv_get_aio_context(bs) == qemu_get_aio_context());
893 /* Re-attach the node */
894 aio_context_acquire(main_ctx);
895 blk_insert_bs(blk, bs, &error_abort);
896 aio_context_release(main_ctx);
897 g_assert(blk_get_aio_context(blk) == ctx);
898 g_assert(bdrv_get_aio_context(bs) == ctx);
900 aio_context_acquire(ctx);
901 blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
902 aio_context_release(ctx);
907 int main(int argc, char **argv)
912 qemu_init_main_loop(&error_abort);
914 g_test_init(&argc, &argv, NULL);
916 for (i = 0; i < ARRAY_SIZE(sync_op_tests); i++) {
917 const SyncOpTest *t = &sync_op_tests[i];
918 g_test_add_data_func(t->name, t, test_sync_op);
921 g_test_add_func("/attach/blockjob", test_attach_blockjob);
922 g_test_add_func("/attach/second_node", test_attach_second_node);
923 g_test_add_func("/attach/preserve_blk_ctx", test_attach_preserve_blk_ctx);
924 g_test_add_func("/propagate/basic", test_propagate_basic);
925 g_test_add_func("/propagate/diamond", test_propagate_diamond);
926 g_test_add_func("/propagate/mirror", test_propagate_mirror);