* Must be called without clone's queue lock held,
* see end_clone_request() for more details.
*/
-static void dm_end_request(struct request *clone, int error)
+void dm_end_request(struct request *clone, int error)
{
int rw = rq_data_dir(clone);
struct dm_rq_target_io *tio = clone->end_io_data;
* Target's rq_end_io() function isn't called.
* This may be used when the target's map_rq() or clone_and_map_rq() functions fail.
*/
-static void dm_kill_unmapped_request(struct request *rq, int error)
+void dm_kill_unmapped_request(struct request *rq, int error)
{
rq->cmd_flags |= REQ_FAILED;
dm_complete_request(rq, error);
dm_complete_request(rq, r);
}
+void dm_dispatch_request(struct request *rq)
+{
+ struct dm_rq_target_io *tio = tio_from_request(rq);
+
+ dm_dispatch_clone_request(tio->clone, rq);
+}
+
static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
void *data)
{
tio = tio_from_request(rq);
/* Establish tio->ti before queuing work (map_tio_request) */
tio->ti = ti;
- queue_kthread_work(&md->kworker, &tio->work);
+ spin_unlock(q->queue_lock);
+ if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE)
+ dm_requeue_original_request(md, rq);
BUG_ON(!irqs_disabled());
+ spin_lock(q->queue_lock);
}
goto out;
* the query about congestion status of request_queue
*/
if (dm_request_based(md))
- r = md->queue->backing_dev_info.wb.state &
+ r = md->queue->backing_dev_info->wb.state &
bdi_bits;
else
r = dm_table_any_congested(map, bdi_bits);
/*
* Initialize aspects of queue that aren't relevant for blk-mq
*/
- md->queue->backing_dev_info.congested_data = md;
- md->queue->backing_dev_info.congested_fn = dm_any_congested;
+ md->queue->backing_dev_info->congested_data = md;
+ md->queue->backing_dev_info->congested_fn = dm_any_congested;
blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
}