OSDN Git Service

Merge tag 'pull-halloween-omnibus-311023-2' of https://gitlab.com/stsquad/qemu into...
[qmiga/qemu.git] / tests / qemu-iotests / 124
1 #!/usr/bin/env python3
2 # group: rw backing
3 #
4 # Tests for incremental drive-backup
5 #
6 # Copyright (C) 2015 John Snow for Red Hat, Inc.
7 #
8 # Based on 056.
9 #
10 # This program is free software; you can redistribute it and/or modify
11 # it under the terms of the GNU General Public License as published by
12 # the Free Software Foundation; either version 2 of the License, or
13 # (at your option) any later version.
14 #
15 # This program is distributed in the hope that it will be useful,
16 # but WITHOUT ANY WARRANTY; without even the implied warranty of
17 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18 # GNU General Public License for more details.
19 #
20 # You should have received a copy of the GNU General Public License
21 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
22 #
23
24 import os
25 import iotests
26 from iotests import try_remove
27 from qemu.qmp.qmp_client import ExecuteError
28
29
30 def io_write_patterns(img, patterns):
31     for pattern in patterns:
32         iotests.qemu_io('-c', 'write -P%s %s %s' % pattern, img)
33
34
35 def transaction_action(action, **kwargs):
36     return {
37         'type': action,
38         'data': dict((k.replace('_', '-'), v) for k, v in kwargs.items())
39     }
40
41
42 def transaction_bitmap_clear(node, name, **kwargs):
43     return transaction_action('block-dirty-bitmap-clear',
44                               node=node, name=name, **kwargs)
45
46
47 def transaction_drive_backup(device, target, **kwargs):
48     return transaction_action('drive-backup', job_id=device, device=device,
49                               target=target, **kwargs)
50
51
52 class Bitmap:
53     def __init__(self, name, drive):
54         self.name = name
55         self.drive = drive
56         self.num = 0
57         self.backups = list()
58
59     def base_target(self):
60         return (self.drive['backup'], None)
61
62     def new_target(self, num=None):
63         if num is None:
64             num = self.num
65         self.num = num + 1
66         base = os.path.join(iotests.test_dir,
67                             "%s.%s." % (self.drive['id'], self.name))
68         suff = "%i.%s" % (num, self.drive['fmt'])
69         target = base + "inc" + suff
70         reference = base + "ref" + suff
71         self.backups.append((target, reference))
72         return (target, reference)
73
74     def last_target(self):
75         if self.backups:
76             return self.backups[-1]
77         return self.base_target()
78
79     def del_target(self):
80         for image in self.backups.pop():
81             try_remove(image)
82         self.num -= 1
83
84     def cleanup(self):
85         for backup in self.backups:
86             for image in backup:
87                 try_remove(image)
88
89
90 class TestIncrementalBackupBase(iotests.QMPTestCase):
91     def __init__(self, *args):
92         super(TestIncrementalBackupBase, self).__init__(*args)
93         self.bitmaps = list()
94         self.files = list()
95         self.drives = list()
96         self.vm = iotests.VM()
97         self.err_img = os.path.join(iotests.test_dir, 'err.%s' % iotests.imgfmt)
98
99
100     def setUp(self):
101         # Create a base image with a distinctive patterning
102         drive0 = self.add_node('drive0')
103         self.img_create(drive0['file'], drive0['fmt'])
104         self.vm.add_drive(drive0['file'], opts='node-name=node0')
105         self.write_default_pattern(drive0['file'])
106         self.vm.launch()
107
108
109     def write_default_pattern(self, target):
110         io_write_patterns(target, (('0x41', 0, 512),
111                                    ('0xd5', '1M', '32k'),
112                                    ('0xdc', '32M', '124k')))
113
114
115     def add_node(self, node_id, fmt=iotests.imgfmt, path=None, backup=None):
116         if path is None:
117             path = os.path.join(iotests.test_dir, '%s.%s' % (node_id, fmt))
118         if backup is None:
119             backup = os.path.join(iotests.test_dir,
120                                   '%s.full.backup.%s' % (node_id, fmt))
121
122         self.drives.append({
123             'id': node_id,
124             'file': path,
125             'backup': backup,
126             'fmt': fmt })
127         return self.drives[-1]
128
129
130     def img_create(self, img, fmt=iotests.imgfmt, size='64M',
131                    parent=None, parentFormat=None, **kwargs):
132         optargs = []
133         for k,v in kwargs.items():
134             optargs = optargs + ['-o', '%s=%s' % (k,v)]
135         args = ['create', '-f', fmt] + optargs + [img, size]
136         if parent:
137             if parentFormat is None:
138                 parentFormat = fmt
139             args = args + ['-b', parent, '-F', parentFormat]
140         iotests.qemu_img(*args)
141         self.files.append(img)
142
143
144     def do_qmp_backup(self, error='Input/output error', **kwargs):
145         self.vm.cmd('drive-backup', **kwargs)
146         return self.wait_qmp_backup(kwargs['device'], error)
147
148
149     def ignore_job_status_change_events(self):
150         while True:
151             e = self.vm.event_wait(name="JOB_STATUS_CHANGE")
152             if e['data']['status'] == 'null':
153                 break
154
155     def wait_qmp_backup(self, device, error='Input/output error'):
156         event = self.vm.event_wait(name="BLOCK_JOB_COMPLETED",
157                                    match={'data': {'device': device}})
158         self.assertNotEqual(event, None)
159         self.ignore_job_status_change_events()
160
161         try:
162             failure = self.dictpath(event, 'data/error')
163         except AssertionError:
164             # Backup succeeded.
165             self.assert_qmp(event, 'data/offset', event['data']['len'])
166             return True
167         else:
168             # Backup failed.
169             self.assert_qmp(event, 'data/error', error)
170             return False
171
172
173     def wait_qmp_backup_cancelled(self, device):
174         event = self.vm.event_wait(name='BLOCK_JOB_CANCELLED',
175                                    match={'data': {'device': device}})
176         self.assertNotEqual(event, None)
177         self.ignore_job_status_change_events()
178
179
180     def create_anchor_backup(self, drive=None):
181         if drive is None:
182             drive = self.drives[-1]
183         res = self.do_qmp_backup(job_id=drive['id'],
184                                  device=drive['id'], sync='full',
185                                  format=drive['fmt'], target=drive['backup'])
186         self.assertTrue(res)
187         self.files.append(drive['backup'])
188         return drive['backup']
189
190
191     def make_reference_backup(self, bitmap=None):
192         if bitmap is None:
193             bitmap = self.bitmaps[-1]
194         _, reference = bitmap.last_target()
195         res = self.do_qmp_backup(job_id=bitmap.drive['id'],
196                                  device=bitmap.drive['id'], sync='full',
197                                  format=bitmap.drive['fmt'], target=reference)
198         self.assertTrue(res)
199
200
201     def add_bitmap(self, name, drive, **kwargs):
202         bitmap = Bitmap(name, drive)
203         self.bitmaps.append(bitmap)
204         self.vm.cmd('block-dirty-bitmap-add', node=drive['id'],
205                     name=bitmap.name, **kwargs)
206         return bitmap
207
208
209     def prepare_backup(self, bitmap=None, parent=None, **kwargs):
210         if bitmap is None:
211             bitmap = self.bitmaps[-1]
212         if parent is None:
213             parent, _ = bitmap.last_target()
214
215         target, _ = bitmap.new_target()
216         self.img_create(target, bitmap.drive['fmt'], parent=parent,
217                         **kwargs)
218         return target
219
220
221     def create_incremental(self, bitmap=None, parent=None,
222                            parentFormat=None, validate=True,
223                            target=None):
224         if bitmap is None:
225             bitmap = self.bitmaps[-1]
226         if parent is None:
227             parent, _ = bitmap.last_target()
228
229         if target is None:
230             target = self.prepare_backup(bitmap, parent)
231         res = self.do_qmp_backup(job_id=bitmap.drive['id'],
232                                  device=bitmap.drive['id'],
233                                  sync='incremental', bitmap=bitmap.name,
234                                  format=bitmap.drive['fmt'], target=target,
235                                  mode='existing')
236         if not res:
237             bitmap.del_target();
238             self.assertFalse(validate)
239         else:
240             self.make_reference_backup(bitmap)
241         return res
242
243
244     def check_backups(self):
245         for bitmap in self.bitmaps:
246             for incremental, reference in bitmap.backups:
247                 self.assertTrue(iotests.compare_images(incremental, reference))
248             last = bitmap.last_target()[0]
249             self.assertTrue(iotests.compare_images(last, bitmap.drive['file']))
250
251
252     def hmp_io_writes(self, drive, patterns):
253         for pattern in patterns:
254             self.vm.hmp_qemu_io(drive, 'write -P%s %s %s' % pattern)
255         self.vm.hmp_qemu_io(drive, 'flush')
256
257
258     def do_incremental_simple(self, **kwargs):
259         self.create_anchor_backup()
260         self.add_bitmap('bitmap0', self.drives[0], **kwargs)
261
262         # Sanity: Create a "hollow" incremental backup
263         self.create_incremental()
264         # Three writes: One complete overwrite, one new segment,
265         # and one partial overlap.
266         self.hmp_io_writes(self.drives[0]['id'], (('0xab', 0, 512),
267                                                   ('0xfe', '16M', '256k'),
268                                                   ('0x64', '32736k', '64k')))
269         self.create_incremental()
270         # Three more writes, one of each kind, like above
271         self.hmp_io_writes(self.drives[0]['id'], (('0x9a', 0, 512),
272                                                   ('0x55', '8M', '352k'),
273                                                   ('0x78', '15872k', '1M')))
274         self.create_incremental()
275         self.vm.shutdown()
276         self.check_backups()
277
278
279     def tearDown(self):
280         self.vm.shutdown()
281         for bitmap in self.bitmaps:
282             bitmap.cleanup()
283         for filename in self.files:
284             try_remove(filename)
285
286
287
288 class TestIncrementalBackup(TestIncrementalBackupBase):
289     def test_incremental_simple(self):
290         '''
291         Test: Create and verify three incremental backups.
292
293         Create a bitmap and a full backup before VM execution begins,
294         then create a series of three incremental backups "during execution,"
295         i.e.; after IO requests begin modifying the drive.
296         '''
297         return self.do_incremental_simple()
298
299
300     def test_small_granularity(self):
301         '''
302         Test: Create and verify backups made with a small granularity bitmap.
303
304         Perform the same test as test_incremental_simple, but with a granularity
305         of only 32KiB instead of the present default of 64KiB.
306         '''
307         return self.do_incremental_simple(granularity=32768)
308
309
310     def test_large_granularity(self):
311         '''
312         Test: Create and verify backups made with a large granularity bitmap.
313
314         Perform the same test as test_incremental_simple, but with a granularity
315         of 128KiB instead of the present default of 64KiB.
316         '''
317         return self.do_incremental_simple(granularity=131072)
318
319
320     def test_larger_cluster_target(self):
321         '''
322         Test: Create and verify backups made to a larger cluster size target.
323
324         With a default granularity of 64KiB, verify that backups made to a
325         larger cluster size target of 128KiB without a backing file works.
326         '''
327         drive0 = self.drives[0]
328
329         # Create a cluster_size=128k full backup / "anchor" backup
330         self.img_create(drive0['backup'], cluster_size='128k')
331         self.assertTrue(self.do_qmp_backup(device=drive0['id'], sync='full',
332                                            format=drive0['fmt'],
333                                            target=drive0['backup'],
334                                            mode='existing'))
335
336         # Create bitmap and dirty it with some new writes.
337         # overwrite [32736, 32799] which will dirty bitmap clusters at
338         # 32M-64K and 32M. 32M+64K will be left undirtied.
339         bitmap0 = self.add_bitmap('bitmap0', drive0)
340         self.hmp_io_writes(drive0['id'],
341                            (('0xab', 0, 512),
342                             ('0xfe', '16M', '256k'),
343                             ('0x64', '32736k', '64k')))
344         # Check the dirty bitmap stats
345         self.assertTrue(self.vm.check_bitmap_status(
346             'node0', bitmap0.name, {
347                 'name': 'bitmap0',
348                 'count': 458752,
349                 'granularity': 65536,
350                 'persistent': False
351             }))
352
353         # Prepare a cluster_size=128k backup target without a backing file.
354         (target, _) = bitmap0.new_target()
355         self.img_create(target, bitmap0.drive['fmt'], cluster_size='128k')
356
357         # Perform Incremental Backup
358         self.assertTrue(self.do_qmp_backup(device=bitmap0.drive['id'],
359                                            sync='incremental',
360                                            bitmap=bitmap0.name,
361                                            format=bitmap0.drive['fmt'],
362                                            target=target,
363                                            mode='existing'))
364         self.make_reference_backup(bitmap0)
365
366         # Add the backing file, then compare and exit.
367         iotests.qemu_img('rebase', '-f', drive0['fmt'], '-u', '-b',
368                          drive0['backup'], '-F', drive0['fmt'], target)
369         self.vm.shutdown()
370         self.check_backups()
371
372
373     def test_incremental_transaction(self):
374         '''Test: Verify backups made from transactionally created bitmaps.
375
376         Create a bitmap "before" VM execution begins, then create a second
377         bitmap AFTER writes have already occurred. Use transactions to create
378         a full backup and synchronize both bitmaps to this backup.
379         Create an incremental backup through both bitmaps and verify that
380         both backups match the current drive0 image.
381         '''
382
383         drive0 = self.drives[0]
384         bitmap0 = self.add_bitmap('bitmap0', drive0)
385         self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
386                                           ('0xfe', '16M', '256k'),
387                                           ('0x64', '32736k', '64k')))
388         bitmap1 = self.add_bitmap('bitmap1', drive0)
389
390         self.vm.cmd('transaction', actions=[
391             transaction_bitmap_clear(bitmap0.drive['id'], bitmap0.name),
392             transaction_bitmap_clear(bitmap1.drive['id'], bitmap1.name),
393             transaction_drive_backup(drive0['id'], drive0['backup'],
394                                      sync='full', format=drive0['fmt'])
395         ])
396         self.wait_until_completed(drive0['id'])
397         self.files.append(drive0['backup'])
398
399         self.hmp_io_writes(drive0['id'], (('0x9a', 0, 512),
400                                           ('0x55', '8M', '352k'),
401                                           ('0x78', '15872k', '1M')))
402         # Both bitmaps should be correctly in sync.
403         self.create_incremental(bitmap0)
404         self.create_incremental(bitmap1)
405         self.vm.shutdown()
406         self.check_backups()
407
408
409     def do_transaction_failure_test(self, race=False):
410         # Create a second drive, with pattern:
411         drive1 = self.add_node('drive1')
412         self.img_create(drive1['file'], drive1['fmt'])
413         io_write_patterns(drive1['file'], (('0x14', 0, 512),
414                                            ('0x5d', '1M', '32k'),
415                                            ('0xcd', '32M', '124k')))
416
417         # Create a blkdebug interface to this img as 'drive1'
418         self.vm.cmd('blockdev-add',
419             node_name=drive1['id'],
420             driver=drive1['fmt'],
421             file={
422                 'driver': 'blkdebug',
423                 'image': {
424                     'driver': 'file',
425                     'filename': drive1['file']
426                 },
427                 'set-state': [{
428                     'event': 'flush_to_disk',
429                     'state': 1,
430                     'new_state': 2
431                 }],
432                 'inject-error': [{
433                     'event': 'read_aio',
434                     'errno': 5,
435                     'state': 2,
436                     'immediately': False,
437                     'once': True
438                 }],
439             }
440         )
441
442         # Create bitmaps and full backups for both drives
443         drive0 = self.drives[0]
444         dr0bm0 = self.add_bitmap('bitmap0', drive0)
445         dr1bm0 = self.add_bitmap('bitmap0', drive1)
446         self.create_anchor_backup(drive0)
447         self.create_anchor_backup(drive1)
448         self.assert_no_active_block_jobs()
449         self.assertFalse(self.vm.get_qmp_events(wait=False))
450
451         # Emulate some writes
452         if not race:
453             self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
454                                               ('0xfe', '16M', '256k'),
455                                               ('0x64', '32736k', '64k')))
456         self.hmp_io_writes(drive1['id'], (('0xba', 0, 512),
457                                           ('0xef', '16M', '256k'),
458                                           ('0x46', '32736k', '64k')))
459
460         # Create incremental backup targets
461         target0 = self.prepare_backup(dr0bm0)
462         target1 = self.prepare_backup(dr1bm0)
463
464         # Ask for a new incremental backup per-each drive,
465         # expecting drive1's backup to fail. In the 'race' test,
466         # we expect drive1 to attempt to cancel the empty drive0 job.
467         transaction = [
468             transaction_drive_backup(drive0['id'], target0, sync='incremental',
469                                      format=drive0['fmt'], mode='existing',
470                                      bitmap=dr0bm0.name),
471             transaction_drive_backup(drive1['id'], target1, sync='incremental',
472                                      format=drive1['fmt'], mode='existing',
473                                      bitmap=dr1bm0.name)
474         ]
475         self.vm.cmd('transaction', actions=transaction,
476                     properties={'completion-mode': 'grouped'} )
477
478         # Observe that drive0's backup is cancelled and drive1 completes with
479         # an error.
480         self.wait_qmp_backup_cancelled(drive0['id'])
481         self.assertFalse(self.wait_qmp_backup(drive1['id']))
482         error = self.vm.event_wait('BLOCK_JOB_ERROR')
483         self.assert_qmp(error, 'data', {'device': drive1['id'],
484                                         'action': 'report',
485                                         'operation': 'read'})
486         self.assertFalse(self.vm.get_qmp_events(wait=False))
487         self.assert_no_active_block_jobs()
488
489         # Delete drive0's successful target and eliminate our record of the
490         # unsuccessful drive1 target.
491         dr0bm0.del_target()
492         dr1bm0.del_target()
493         if race:
494             # Don't re-run the transaction, we only wanted to test the race.
495             self.vm.shutdown()
496             return
497
498         # Re-run the same transaction:
499         target0 = self.prepare_backup(dr0bm0)
500         target1 = self.prepare_backup(dr1bm0)
501
502         # Re-run the exact same transaction.
503         self.vm.cmd('transaction', actions=transaction,
504                     properties={'completion-mode':'grouped'})
505
506         # Both should complete successfully this time.
507         self.assertTrue(self.wait_qmp_backup(drive0['id']))
508         self.assertTrue(self.wait_qmp_backup(drive1['id']))
509         self.make_reference_backup(dr0bm0)
510         self.make_reference_backup(dr1bm0)
511         self.assertFalse(self.vm.get_qmp_events(wait=False))
512         self.assert_no_active_block_jobs()
513
514         # And the images should of course validate.
515         self.vm.shutdown()
516         self.check_backups()
517
518     def test_transaction_failure(self):
519         '''Test: Verify backups made from a transaction that partially fails.
520
521         Add a second drive with its own unique pattern, and add a bitmap to each
522         drive. Use blkdebug to interfere with the backup on just one drive and
523         attempt to create a coherent incremental backup across both drives.
524
525         verify a failure in one but not both, then delete the failed stubs and
526         re-run the same transaction.
527
528         verify that both incrementals are created successfully.
529         '''
530         self.do_transaction_failure_test()
531
532     def test_transaction_failure_race(self):
533         '''Test: Verify that transactions with jobs that have no data to
534         transfer do not cause race conditions in the cancellation of the entire
535         transaction job group.
536         '''
537         self.do_transaction_failure_test(race=True)
538
539
540     def test_sync_dirty_bitmap_missing(self):
541         self.assert_no_active_block_jobs()
542         self.files.append(self.err_img)
543         result = self.vm.qmp('drive-backup', device=self.drives[0]['id'],
544                              sync='incremental', format=self.drives[0]['fmt'],
545                              target=self.err_img)
546         self.assert_qmp(result, 'error/class', 'GenericError')
547
548
549     def test_sync_dirty_bitmap_not_found(self):
550         self.assert_no_active_block_jobs()
551         self.files.append(self.err_img)
552         result = self.vm.qmp('drive-backup', device=self.drives[0]['id'],
553                              sync='incremental', bitmap='unknown',
554                              format=self.drives[0]['fmt'], target=self.err_img)
555         self.assert_qmp(result, 'error/class', 'GenericError')
556
557
558     def test_sync_dirty_bitmap_bad_granularity(self):
559         '''
560         Test: Test what happens if we provide an improper granularity.
561
562         The granularity must always be a power of 2.
563         '''
564         self.assert_no_active_block_jobs()
565         self.assertRaises(ExecuteError, self.add_bitmap,
566                           'bitmap0', self.drives[0],
567                           granularity=64000)
568
569     def test_growing_before_backup(self):
570         '''
571         Test: Add a bitmap, truncate the image, write past the old
572               end, do a backup.
573
574         Incremental backup should not ignore dirty bits past the old
575         image end.
576         '''
577         self.assert_no_active_block_jobs()
578
579         self.create_anchor_backup()
580
581         self.add_bitmap('bitmap0', self.drives[0])
582
583         self.vm.cmd('block_resize', device=self.drives[0]['id'],
584                     size=(65 * 1048576))
585
586         # Dirty the image past the old end
587         self.vm.hmp_qemu_io(self.drives[0]['id'], 'write 64M 64k')
588
589         target = self.prepare_backup(size='65M')
590         self.create_incremental(target=target)
591
592         self.vm.shutdown()
593         self.check_backups()
594
595
596 class TestIncrementalBackupBlkdebug(TestIncrementalBackupBase):
597     '''Incremental backup tests that utilize a BlkDebug filter on drive0.'''
598
599     def setUp(self):
600         drive0 = self.add_node('drive0')
601         self.img_create(drive0['file'], drive0['fmt'])
602         self.write_default_pattern(drive0['file'])
603         self.vm.launch()
604
605     def test_incremental_failure(self):
606         '''Test: Verify backups made after a failure are correct.
607
608         Simulate a failure during an incremental backup block job,
609         emulate additional writes, then create another incremental backup
610         afterwards and verify that the backup created is correct.
611         '''
612
613         drive0 = self.drives[0]
614         self.vm.cmd('blockdev-add',
615             node_name=drive0['id'],
616             driver=drive0['fmt'],
617             file={
618                 'driver': 'blkdebug',
619                 'image': {
620                     'driver': 'file',
621                     'filename': drive0['file']
622                 },
623                 'set-state': [{
624                     'event': 'flush_to_disk',
625                     'state': 1,
626                     'new_state': 2
627                 }],
628                 'inject-error': [{
629                     'event': 'read_aio',
630                     'errno': 5,
631                     'state': 2,
632                     'immediately': False,
633                     'once': True
634                 }],
635             }
636         )
637
638         self.create_anchor_backup(drive0)
639         self.add_bitmap('bitmap0', drive0)
640         # Note: at this point, during a normal execution,
641         # Assume that the VM resumes and begins issuing IO requests here.
642
643         self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
644                                           ('0xfe', '16M', '256k'),
645                                           ('0x64', '32736k', '64k')))
646
647         result = self.create_incremental(validate=False)
648         self.assertFalse(result)
649         self.hmp_io_writes(drive0['id'], (('0x9a', 0, 512),
650                                           ('0x55', '8M', '352k'),
651                                           ('0x78', '15872k', '1M')))
652         self.create_incremental()
653         self.vm.shutdown()
654         self.check_backups()
655
656     def test_incremental_pause(self):
657         """
658         Test an incremental backup that errors into a pause and is resumed.
659         """
660
661         drive0 = self.drives[0]
662         # NB: The blkdebug script here looks for a "flush, read" pattern.
663         # The flush occurs in hmp_io_writes, and the read during the block job.
664         self.vm.cmd('blockdev-add',
665                     node_name=drive0['id'],
666                     driver=drive0['fmt'],
667                     file={
668                         'driver': 'blkdebug',
669                         'image': {
670                             'driver': 'file',
671                             'filename': drive0['file']
672                         },
673                         'set-state': [{
674                             'event': 'flush_to_disk',
675                             'state': 1,
676                             'new_state': 2
677                         }],
678                         'inject-error': [{
679                             'event': 'read_aio',
680                             'errno': 5,
681                             'state': 2,
682                             'immediately': False,
683                             'once': True
684                         }],
685                     })
686         self.create_anchor_backup(drive0)
687         bitmap = self.add_bitmap('bitmap0', drive0)
688
689         # Emulate guest activity
690         self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
691                                           ('0xfe', '16M', '256k'),
692                                           ('0x64', '32736k', '64k')))
693
694         # Bitmap Status Check
695         self.assertTrue(self.vm.check_bitmap_status(
696             drive0['id'], bitmap.name, {
697                 'count': 458752,
698                 'granularity': 65536,
699                 'busy': False,
700                 'recording': True
701             }))
702
703         # Start backup
704         parent, _ = bitmap.last_target()
705         target = self.prepare_backup(bitmap, parent)
706         self.vm.cmd('drive-backup',
707                     job_id=bitmap.drive['id'],
708                     device=bitmap.drive['id'],
709                     sync='incremental',
710                     bitmap=bitmap.name,
711                     format=bitmap.drive['fmt'],
712                     target=target,
713                     mode='existing',
714                     on_source_error='stop')
715
716         # Wait for the error
717         event = self.vm.event_wait(name="BLOCK_JOB_ERROR",
718                                    match={"data":{"device":bitmap.drive['id']}})
719         self.assert_qmp(event, 'data', {'device': bitmap.drive['id'],
720                                         'action': 'stop',
721                                         'operation': 'read'})
722
723         # Bitmap Status Check
724         self.assertTrue(self.vm.check_bitmap_status(
725             drive0['id'], bitmap.name, {
726                 'count': 458752,
727                 'granularity': 65536,
728                 'busy': True,
729                 'recording': True
730             }))
731
732         # Resume and check incremental backup for consistency
733         self.vm.cmd('block-job-resume', device=bitmap.drive['id'])
734         self.wait_qmp_backup(bitmap.drive['id'])
735
736         # Bitmap Status Check
737         self.assertTrue(self.vm.check_bitmap_status(
738             drive0['id'], bitmap.name, {
739                 'count': 0,
740                 'granularity': 65536,
741                 'busy': False,
742                 'recording': True
743             }))
744
745         # Finalize / Cleanup
746         self.make_reference_backup(bitmap)
747         self.vm.shutdown()
748         self.check_backups()
749
750
751 if __name__ == '__main__':
752     iotests.main(supported_fmts=['qcow2'],
753                  supported_protocols=['file'])