X-Git-Url: http://git.osdn.net/view?a=blobdiff_plain;f=blockjob.c;h=58c5d6453933ea2d2797dd9dbb743c7280143bb1;hb=7df6f7511769af63c209d2fdcd6c7638f680e35a;hp=b7daf2a9f628f2a5f6a0d2c0cc451c800c98b7e5;hpb=2f65df6e16dea2d6e7212fa675f4779d9281e26f;p=qmiga%2Fqemu.git diff --git a/blockjob.c b/blockjob.c index b7daf2a9f6..58c5d64539 100644 --- a/blockjob.c +++ b/blockjob.c @@ -24,6 +24,7 @@ */ #include "qemu/osdep.h" +#include "block/aio-wait.h" #include "block/block.h" #include "block/blockjob_int.h" #include "block/block_int.h" @@ -32,7 +33,6 @@ #include "qapi/error.h" #include "qapi/qapi-events-block-core.h" #include "qapi/qmp/qerror.h" -#include "qemu/coroutine.h" #include "qemu/main-loop.h" #include "qemu/timer.h" @@ -198,6 +198,7 @@ void block_job_remove_all_bdrv(BlockJob *job) * one to make sure that such a concurrent access does not attempt * to process an already freed BdrvChild. */ + bdrv_graph_wrlock(NULL); while (job->nodes) { GSList *l = job->nodes; BdrvChild *c = l->data; @@ -209,6 +210,7 @@ void block_job_remove_all_bdrv(BlockJob *job) g_slist_free_1(l); } + bdrv_graph_wrunlock(); } bool block_job_has_bdrv(BlockJob *job, BlockDriverState *bs) @@ -230,20 +232,27 @@ int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs, uint64_t perm, uint64_t shared_perm, Error **errp) { BdrvChild *c; + AioContext *ctx = bdrv_get_aio_context(bs); bool need_context_ops; GLOBAL_STATE_CODE(); bdrv_ref(bs); - need_context_ops = bdrv_get_aio_context(bs) != job->job.aio_context; + need_context_ops = ctx != job->job.aio_context; - if (need_context_ops && job->job.aio_context != qemu_get_aio_context()) { - aio_context_release(job->job.aio_context); + if (need_context_ops) { + if (job->job.aio_context != qemu_get_aio_context()) { + aio_context_release(job->job.aio_context); + } + aio_context_acquire(ctx); } c = bdrv_root_attach_child(bs, name, &child_job, 0, perm, shared_perm, job, errp); - if (need_context_ops && job->job.aio_context != qemu_get_aio_context()) { - aio_context_acquire(job->job.aio_context); + if (need_context_ops) { + aio_context_release(ctx); + if (job->job.aio_context != qemu_get_aio_context()) { + aio_context_acquire(job->job.aio_context); + } } if (c == NULL) { return -EPERM; @@ -319,10 +328,28 @@ static bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp) return block_job_set_speed_locked(job, speed, errp); } -int64_t block_job_ratelimit_get_delay(BlockJob *job, uint64_t n) +void block_job_ratelimit_processed_bytes(BlockJob *job, uint64_t n) { IO_CODE(); - return ratelimit_calculate_delay(&job->limit, n); + ratelimit_calculate_delay(&job->limit, n); +} + +void block_job_ratelimit_sleep(BlockJob *job) +{ + uint64_t delay_ns; + + /* + * Sleep at least once. If the job is reentered early, keep waiting until + * we've waited for the full time that is necessary to keep the job at the + * right speed. + * + * Make sure to recalculate the delay after each (possibly interrupted) + * sleep because the speed can change while the job has yielded. + */ + do { + delay_ns = ratelimit_calculate_delay(&job->limit, 0); + job_sleep_ns(&job->job, delay_ns); + } while (delay_ns && !job_is_cancelled(&job->job)); } BlockJobInfo *block_job_query_locked(BlockJob *job, Error **errp)