2 * QEMU System Emulator block driver
4 * Copyright (c) 2011 IBM Corp.
5 * Copyright (c) 2012 Red Hat, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "qemu/osdep.h"
27 #include "block/aio-wait.h"
28 #include "block/block.h"
29 #include "block/blockjob_int.h"
30 #include "block/block_int.h"
31 #include "block/trace.h"
32 #include "sysemu/block-backend.h"
33 #include "qapi/error.h"
34 #include "qapi/qapi-events-block-core.h"
35 #include "qapi/qmp/qerror.h"
36 #include "qemu/main-loop.h"
37 #include "qemu/timer.h"
39 static bool is_block_job(Job *job)
41 return job_type(job) == JOB_TYPE_BACKUP ||
42 job_type(job) == JOB_TYPE_COMMIT ||
43 job_type(job) == JOB_TYPE_MIRROR ||
44 job_type(job) == JOB_TYPE_STREAM;
47 BlockJob *block_job_next_locked(BlockJob *bjob)
49 Job *job = bjob ? &bjob->job : NULL;
53 job = job_next_locked(job);
54 } while (job && !is_block_job(job));
56 return job ? container_of(job, BlockJob, job) : NULL;
59 BlockJob *block_job_get_locked(const char *id)
61 Job *job = job_get_locked(id);
64 if (job && is_block_job(job)) {
65 return container_of(job, BlockJob, job);
71 BlockJob *block_job_get(const char *id)
74 return block_job_get_locked(id);
77 void block_job_free(Job *job)
79 BlockJob *bjob = container_of(job, BlockJob, job);
82 block_job_remove_all_bdrv(bjob);
83 ratelimit_destroy(&bjob->limit);
84 error_free(bjob->blocker);
87 static char *child_job_get_parent_desc(BdrvChild *c)
89 BlockJob *job = c->opaque;
90 return g_strdup_printf("%s job '%s'", job_type_str(&job->job), job->job.id);
93 static void child_job_drained_begin(BdrvChild *c)
95 BlockJob *job = c->opaque;
99 static bool child_job_drained_poll(BdrvChild *c)
101 BlockJob *bjob = c->opaque;
102 Job *job = &bjob->job;
103 const BlockJobDriver *drv = block_job_driver(bjob);
105 /* An inactive or completed job doesn't have any pending requests. Jobs
106 * with !job->busy are either already paused or have a pause point after
107 * being reentered, so no job driver code will run before they pause. */
108 WITH_JOB_LOCK_GUARD() {
109 if (!job->busy || job_is_completed_locked(job)) {
114 /* Otherwise, assume that it isn't fully stopped yet, but allow the job to
115 * override this assumption. */
116 if (drv->drained_poll) {
117 return drv->drained_poll(bjob);
123 static void child_job_drained_end(BdrvChild *c)
125 BlockJob *job = c->opaque;
126 job_resume(&job->job);
129 typedef struct BdrvStateChildJobContext {
132 } BdrvStateChildJobContext;
134 static void child_job_set_aio_ctx_commit(void *opaque)
136 BdrvStateChildJobContext *s = opaque;
137 BlockJob *job = s->job;
139 job_set_aio_context(&job->job, s->new_ctx);
142 static TransactionActionDrv change_child_job_context = {
143 .commit = child_job_set_aio_ctx_commit,
147 static bool child_job_change_aio_ctx(BdrvChild *c, AioContext *ctx,
148 GHashTable *visited, Transaction *tran,
151 BlockJob *job = c->opaque;
152 BdrvStateChildJobContext *s;
155 for (l = job->nodes; l; l = l->next) {
156 BdrvChild *sibling = l->data;
157 if (!bdrv_child_change_aio_context(sibling, ctx, visited,
163 s = g_new(BdrvStateChildJobContext, 1);
164 *s = (BdrvStateChildJobContext) {
169 tran_add(tran, &change_child_job_context, s);
173 static AioContext *child_job_get_parent_aio_context(BdrvChild *c)
175 BlockJob *job = c->opaque;
179 return job->job.aio_context;
182 static const BdrvChildClass child_job = {
183 .get_parent_desc = child_job_get_parent_desc,
184 .drained_begin = child_job_drained_begin,
185 .drained_poll = child_job_drained_poll,
186 .drained_end = child_job_drained_end,
187 .change_aio_ctx = child_job_change_aio_ctx,
188 .stay_at_node = true,
189 .get_parent_aio_context = child_job_get_parent_aio_context,
192 void block_job_remove_all_bdrv(BlockJob *job)
196 * bdrv_root_unref_child() may reach child_job_[can_]set_aio_ctx(),
197 * which will also traverse job->nodes, so consume the list one by
198 * one to make sure that such a concurrent access does not attempt
199 * to process an already freed BdrvChild.
201 aio_context_release(job->job.aio_context);
202 bdrv_graph_wrlock(NULL);
203 aio_context_acquire(job->job.aio_context);
205 GSList *l = job->nodes;
206 BdrvChild *c = l->data;
208 job->nodes = l->next;
210 bdrv_op_unblock_all(c->bs, job->blocker);
211 bdrv_root_unref_child(c);
215 bdrv_graph_wrunlock();
218 bool block_job_has_bdrv(BlockJob *job, BlockDriverState *bs)
223 for (el = job->nodes; el; el = el->next) {
224 BdrvChild *c = el->data;
233 int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs,
234 uint64_t perm, uint64_t shared_perm, Error **errp)
237 AioContext *ctx = bdrv_get_aio_context(bs);
238 bool need_context_ops;
243 need_context_ops = ctx != job->job.aio_context;
245 if (need_context_ops) {
246 if (job->job.aio_context != qemu_get_aio_context()) {
247 aio_context_release(job->job.aio_context);
249 aio_context_acquire(ctx);
251 c = bdrv_root_attach_child(bs, name, &child_job, 0, perm, shared_perm, job,
253 if (need_context_ops) {
254 aio_context_release(ctx);
255 if (job->job.aio_context != qemu_get_aio_context()) {
256 aio_context_acquire(job->job.aio_context);
263 job->nodes = g_slist_prepend(job->nodes, c);
264 bdrv_op_block_all(bs, job->blocker);
269 /* Called with job_mutex lock held. */
270 static void block_job_on_idle_locked(Notifier *n, void *opaque)
275 bool block_job_is_internal(BlockJob *job)
277 return (job->job.id == NULL);
280 const BlockJobDriver *block_job_driver(BlockJob *job)
282 return container_of(job->job.driver, BlockJobDriver, job_driver);
285 /* Assumes the job_mutex is held */
286 static bool job_timer_pending(Job *job)
288 return timer_pending(&job->sleep_timer);
291 bool block_job_set_speed_locked(BlockJob *job, int64_t speed, Error **errp)
293 const BlockJobDriver *drv = block_job_driver(job);
294 int64_t old_speed = job->speed;
298 if (job_apply_verb_locked(&job->job, JOB_VERB_SET_SPEED, errp) < 0) {
302 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "speed",
303 "a non-negative value");
307 ratelimit_set_speed(&job->limit, speed, BLOCK_JOB_SLICE_TIME);
311 if (drv->set_speed) {
313 drv->set_speed(job, speed);
317 if (speed && speed <= old_speed) {
321 /* kick only if a timer is pending */
322 job_enter_cond_locked(&job->job, job_timer_pending);
327 static bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
330 return block_job_set_speed_locked(job, speed, errp);
333 void block_job_change_locked(BlockJob *job, BlockJobChangeOptions *opts,
336 const BlockJobDriver *drv = block_job_driver(job);
340 if (job_apply_verb_locked(&job->job, JOB_VERB_CHANGE, errp)) {
346 drv->change(job, opts, errp);
349 error_setg(errp, "Job type does not support change");
353 void block_job_ratelimit_processed_bytes(BlockJob *job, uint64_t n)
356 ratelimit_calculate_delay(&job->limit, n);
359 void block_job_ratelimit_sleep(BlockJob *job)
364 * Sleep at least once. If the job is reentered early, keep waiting until
365 * we've waited for the full time that is necessary to keep the job at the
368 * Make sure to recalculate the delay after each (possibly interrupted)
369 * sleep because the speed can change while the job has yielded.
372 delay_ns = ratelimit_calculate_delay(&job->limit, 0);
373 job_sleep_ns(&job->job, delay_ns);
374 } while (delay_ns && !job_is_cancelled(&job->job));
377 BlockJobInfo *block_job_query_locked(BlockJob *job, Error **errp)
380 uint64_t progress_current, progress_total;
384 if (block_job_is_internal(job)) {
385 error_setg(errp, "Cannot query QEMU internal jobs");
389 progress_get_snapshot(&job->job.progress, &progress_current,
392 info = g_new0(BlockJobInfo, 1);
393 info->type = g_strdup(job_type_str(&job->job));
394 info->device = g_strdup(job->job.id);
395 info->busy = job->job.busy;
396 info->paused = job->job.pause_count > 0;
397 info->offset = progress_current;
398 info->len = progress_total;
399 info->speed = job->speed;
400 info->io_status = job->iostatus;
401 info->ready = job_is_ready_locked(&job->job),
402 info->status = job->job.status;
403 info->auto_finalize = job->job.auto_finalize;
404 info->auto_dismiss = job->job.auto_dismiss;
406 info->error = job->job.err ?
407 g_strdup(error_get_pretty(job->job.err)) :
408 g_strdup(strerror(-job->job.ret));
413 /* Called with job lock held */
414 static void block_job_iostatus_set_err_locked(BlockJob *job, int error)
416 if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
417 job->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
418 BLOCK_DEVICE_IO_STATUS_FAILED;
422 /* Called with job_mutex lock held. */
423 static void block_job_event_cancelled_locked(Notifier *n, void *opaque)
425 BlockJob *job = opaque;
426 uint64_t progress_current, progress_total;
428 if (block_job_is_internal(job)) {
432 progress_get_snapshot(&job->job.progress, &progress_current,
435 qapi_event_send_block_job_cancelled(job_type(&job->job),
442 /* Called with job_mutex lock held. */
443 static void block_job_event_completed_locked(Notifier *n, void *opaque)
445 BlockJob *job = opaque;
446 const char *msg = NULL;
447 uint64_t progress_current, progress_total;
449 if (block_job_is_internal(job)) {
453 if (job->job.ret < 0) {
454 msg = error_get_pretty(job->job.err);
457 progress_get_snapshot(&job->job.progress, &progress_current,
460 qapi_event_send_block_job_completed(job_type(&job->job),
468 /* Called with job_mutex lock held. */
469 static void block_job_event_pending_locked(Notifier *n, void *opaque)
471 BlockJob *job = opaque;
473 if (block_job_is_internal(job)) {
477 qapi_event_send_block_job_pending(job_type(&job->job),
481 /* Called with job_mutex lock held. */
482 static void block_job_event_ready_locked(Notifier *n, void *opaque)
484 BlockJob *job = opaque;
485 uint64_t progress_current, progress_total;
487 if (block_job_is_internal(job)) {
491 progress_get_snapshot(&job->job.progress, &progress_current,
494 qapi_event_send_block_job_ready(job_type(&job->job),
502 void *block_job_create(const char *job_id, const BlockJobDriver *driver,
503 JobTxn *txn, BlockDriverState *bs, uint64_t perm,
504 uint64_t shared_perm, int64_t speed, int flags,
505 BlockCompletionFunc *cb, void *opaque, Error **errp)
510 GRAPH_RDLOCK_GUARD_MAINLOOP();
512 if (job_id == NULL && !(flags & JOB_INTERNAL)) {
513 job_id = bdrv_get_device_name(bs);
516 job = job_create(job_id, &driver->job_driver, txn, bdrv_get_aio_context(bs),
517 flags, cb, opaque, errp);
522 assert(is_block_job(&job->job));
523 assert(job->job.driver->free == &block_job_free);
524 assert(job->job.driver->user_resume == &block_job_user_resume);
526 ratelimit_init(&job->limit);
528 job->finalize_cancelled_notifier.notify = block_job_event_cancelled_locked;
529 job->finalize_completed_notifier.notify = block_job_event_completed_locked;
530 job->pending_notifier.notify = block_job_event_pending_locked;
531 job->ready_notifier.notify = block_job_event_ready_locked;
532 job->idle_notifier.notify = block_job_on_idle_locked;
534 WITH_JOB_LOCK_GUARD() {
535 notifier_list_add(&job->job.on_finalize_cancelled,
536 &job->finalize_cancelled_notifier);
537 notifier_list_add(&job->job.on_finalize_completed,
538 &job->finalize_completed_notifier);
539 notifier_list_add(&job->job.on_pending, &job->pending_notifier);
540 notifier_list_add(&job->job.on_ready, &job->ready_notifier);
541 notifier_list_add(&job->job.on_idle, &job->idle_notifier);
544 error_setg(&job->blocker, "block device is in use by block job: %s",
545 job_type_str(&job->job));
547 ret = block_job_add_bdrv(job, "main node", bs, perm, shared_perm, errp);
552 bdrv_op_unblock(bs, BLOCK_OP_TYPE_DATAPLANE, job->blocker);
554 if (!block_job_set_speed(job, speed, errp)) {
561 job_early_fail(&job->job);
565 void block_job_iostatus_reset_locked(BlockJob *job)
568 if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
571 assert(job->job.user_paused && job->job.pause_count > 0);
572 job->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
575 static void block_job_iostatus_reset(BlockJob *job)
578 block_job_iostatus_reset_locked(job);
581 void block_job_user_resume(Job *job)
583 BlockJob *bjob = container_of(job, BlockJob, job);
585 block_job_iostatus_reset(bjob);
588 BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err,
589 int is_read, int error)
591 BlockErrorAction action;
595 case BLOCKDEV_ON_ERROR_ENOSPC:
596 case BLOCKDEV_ON_ERROR_AUTO:
597 action = (error == ENOSPC) ?
598 BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT;
600 case BLOCKDEV_ON_ERROR_STOP:
601 action = BLOCK_ERROR_ACTION_STOP;
603 case BLOCKDEV_ON_ERROR_REPORT:
604 action = BLOCK_ERROR_ACTION_REPORT;
606 case BLOCKDEV_ON_ERROR_IGNORE:
607 action = BLOCK_ERROR_ACTION_IGNORE;
612 if (!block_job_is_internal(job)) {
613 qapi_event_send_block_job_error(job->job.id,
614 is_read ? IO_OPERATION_TYPE_READ :
615 IO_OPERATION_TYPE_WRITE,
618 if (action == BLOCK_ERROR_ACTION_STOP) {
619 WITH_JOB_LOCK_GUARD() {
620 if (!job->job.user_paused) {
621 job_pause_locked(&job->job);
623 * make the pause user visible, which will be
626 job->job.user_paused = true;
628 block_job_iostatus_set_err_locked(job, error);
634 AioContext *block_job_get_aio_context(BlockJob *job)
637 return job->job.aio_context;