OSDN Git Service

scripts/kallsyms: fix wrong kallsyms_relative_base
[tomoyo/tomoyo-test1.git] / drivers / block / virtio_blk.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 //#define DEBUG
3 #include <linux/spinlock.h>
4 #include <linux/slab.h>
5 #include <linux/blkdev.h>
6 #include <linux/hdreg.h>
7 #include <linux/module.h>
8 #include <linux/mutex.h>
9 #include <linux/interrupt.h>
10 #include <linux/virtio.h>
11 #include <linux/virtio_blk.h>
12 #include <linux/scatterlist.h>
13 #include <linux/string_helpers.h>
14 #include <linux/idr.h>
15 #include <linux/blk-mq.h>
16 #include <linux/blk-mq-virtio.h>
17 #include <linux/numa.h>
18
19 #define PART_BITS 4
20 #define VQ_NAME_LEN 16
21 #define MAX_DISCARD_SEGMENTS 256u
22
23 static int major;
24 static DEFINE_IDA(vd_index_ida);
25
26 static struct workqueue_struct *virtblk_wq;
27
28 struct virtio_blk_vq {
29         struct virtqueue *vq;
30         spinlock_t lock;
31         char name[VQ_NAME_LEN];
32 } ____cacheline_aligned_in_smp;
33
34 struct virtio_blk {
35         struct virtio_device *vdev;
36
37         /* The disk structure for the kernel. */
38         struct gendisk *disk;
39
40         /* Block layer tags. */
41         struct blk_mq_tag_set tag_set;
42
43         /* Process context for config space updates */
44         struct work_struct config_work;
45
46         /* What host tells us, plus 2 for header & tailer. */
47         unsigned int sg_elems;
48
49         /* Ida index - used to track minor number allocations. */
50         int index;
51
52         /* num of vqs */
53         int num_vqs;
54         struct virtio_blk_vq *vqs;
55 };
56
57 struct virtblk_req {
58         struct virtio_blk_outhdr out_hdr;
59         u8 status;
60         struct scatterlist sg[];
61 };
62
63 static inline blk_status_t virtblk_result(struct virtblk_req *vbr)
64 {
65         switch (vbr->status) {
66         case VIRTIO_BLK_S_OK:
67                 return BLK_STS_OK;
68         case VIRTIO_BLK_S_UNSUPP:
69                 return BLK_STS_NOTSUPP;
70         default:
71                 return BLK_STS_IOERR;
72         }
73 }
74
75 static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr,
76                 struct scatterlist *data_sg, bool have_data)
77 {
78         struct scatterlist hdr, status, *sgs[3];
79         unsigned int num_out = 0, num_in = 0;
80
81         sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
82         sgs[num_out++] = &hdr;
83
84         if (have_data) {
85                 if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
86                         sgs[num_out++] = data_sg;
87                 else
88                         sgs[num_out + num_in++] = data_sg;
89         }
90
91         sg_init_one(&status, &vbr->status, sizeof(vbr->status));
92         sgs[num_out + num_in++] = &status;
93
94         return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
95 }
96
97 static int virtblk_setup_discard_write_zeroes(struct request *req, bool unmap)
98 {
99         unsigned short segments = blk_rq_nr_discard_segments(req);
100         unsigned short n = 0;
101         struct virtio_blk_discard_write_zeroes *range;
102         struct bio *bio;
103         u32 flags = 0;
104
105         if (unmap)
106                 flags |= VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP;
107
108         range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC);
109         if (!range)
110                 return -ENOMEM;
111
112         __rq_for_each_bio(bio, req) {
113                 u64 sector = bio->bi_iter.bi_sector;
114                 u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT;
115
116                 range[n].flags = cpu_to_le32(flags);
117                 range[n].num_sectors = cpu_to_le32(num_sectors);
118                 range[n].sector = cpu_to_le64(sector);
119                 n++;
120         }
121
122         req->special_vec.bv_page = virt_to_page(range);
123         req->special_vec.bv_offset = offset_in_page(range);
124         req->special_vec.bv_len = sizeof(*range) * segments;
125         req->rq_flags |= RQF_SPECIAL_PAYLOAD;
126
127         return 0;
128 }
129
130 static inline void virtblk_request_done(struct request *req)
131 {
132         struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
133
134         if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
135                 kfree(page_address(req->special_vec.bv_page) +
136                       req->special_vec.bv_offset);
137         }
138
139         blk_mq_end_request(req, virtblk_result(vbr));
140 }
141
142 static void virtblk_done(struct virtqueue *vq)
143 {
144         struct virtio_blk *vblk = vq->vdev->priv;
145         bool req_done = false;
146         int qid = vq->index;
147         struct virtblk_req *vbr;
148         unsigned long flags;
149         unsigned int len;
150
151         spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
152         do {
153                 virtqueue_disable_cb(vq);
154                 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
155                         struct request *req = blk_mq_rq_from_pdu(vbr);
156
157                         blk_mq_complete_request(req);
158                         req_done = true;
159                 }
160                 if (unlikely(virtqueue_is_broken(vq)))
161                         break;
162         } while (!virtqueue_enable_cb(vq));
163
164         /* In case queue is stopped waiting for more buffers. */
165         if (req_done)
166                 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
167         spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
168 }
169
170 static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
171 {
172         struct virtio_blk *vblk = hctx->queue->queuedata;
173         struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
174         bool kick;
175
176         spin_lock_irq(&vq->lock);
177         kick = virtqueue_kick_prepare(vq->vq);
178         spin_unlock_irq(&vq->lock);
179
180         if (kick)
181                 virtqueue_notify(vq->vq);
182 }
183
184 static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
185                            const struct blk_mq_queue_data *bd)
186 {
187         struct virtio_blk *vblk = hctx->queue->queuedata;
188         struct request *req = bd->rq;
189         struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
190         unsigned long flags;
191         unsigned int num;
192         int qid = hctx->queue_num;
193         int err;
194         bool notify = false;
195         bool unmap = false;
196         u32 type;
197
198         BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
199
200         switch (req_op(req)) {
201         case REQ_OP_READ:
202         case REQ_OP_WRITE:
203                 type = 0;
204                 break;
205         case REQ_OP_FLUSH:
206                 type = VIRTIO_BLK_T_FLUSH;
207                 break;
208         case REQ_OP_DISCARD:
209                 type = VIRTIO_BLK_T_DISCARD;
210                 break;
211         case REQ_OP_WRITE_ZEROES:
212                 type = VIRTIO_BLK_T_WRITE_ZEROES;
213                 unmap = !(req->cmd_flags & REQ_NOUNMAP);
214                 break;
215         case REQ_OP_DRV_IN:
216                 type = VIRTIO_BLK_T_GET_ID;
217                 break;
218         default:
219                 WARN_ON_ONCE(1);
220                 return BLK_STS_IOERR;
221         }
222
223         vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type);
224         vbr->out_hdr.sector = type ?
225                 0 : cpu_to_virtio64(vblk->vdev, blk_rq_pos(req));
226         vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req));
227
228         blk_mq_start_request(req);
229
230         if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES) {
231                 err = virtblk_setup_discard_write_zeroes(req, unmap);
232                 if (err)
233                         return BLK_STS_RESOURCE;
234         }
235
236         num = blk_rq_map_sg(hctx->queue, req, vbr->sg);
237         if (num) {
238                 if (rq_data_dir(req) == WRITE)
239                         vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT);
240                 else
241                         vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN);
242         }
243
244         spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
245         err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
246         if (err) {
247                 virtqueue_kick(vblk->vqs[qid].vq);
248                 blk_mq_stop_hw_queue(hctx);
249                 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
250                 /* Out of mem doesn't actually happen, since we fall back
251                  * to direct descriptors */
252                 if (err == -ENOMEM || err == -ENOSPC)
253                         return BLK_STS_DEV_RESOURCE;
254                 return BLK_STS_IOERR;
255         }
256
257         if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
258                 notify = true;
259         spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
260
261         if (notify)
262                 virtqueue_notify(vblk->vqs[qid].vq);
263         return BLK_STS_OK;
264 }
265
266 /* return id (s/n) string for *disk to *id_str
267  */
268 static int virtblk_get_id(struct gendisk *disk, char *id_str)
269 {
270         struct virtio_blk *vblk = disk->private_data;
271         struct request_queue *q = vblk->disk->queue;
272         struct request *req;
273         int err;
274
275         req = blk_get_request(q, REQ_OP_DRV_IN, 0);
276         if (IS_ERR(req))
277                 return PTR_ERR(req);
278
279         err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
280         if (err)
281                 goto out;
282
283         blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
284         err = blk_status_to_errno(virtblk_result(blk_mq_rq_to_pdu(req)));
285 out:
286         blk_put_request(req);
287         return err;
288 }
289
290 /* We provide getgeo only to please some old bootloader/partitioning tools */
291 static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
292 {
293         struct virtio_blk *vblk = bd->bd_disk->private_data;
294
295         /* see if the host passed in geometry config */
296         if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
297                 virtio_cread(vblk->vdev, struct virtio_blk_config,
298                              geometry.cylinders, &geo->cylinders);
299                 virtio_cread(vblk->vdev, struct virtio_blk_config,
300                              geometry.heads, &geo->heads);
301                 virtio_cread(vblk->vdev, struct virtio_blk_config,
302                              geometry.sectors, &geo->sectors);
303         } else {
304                 /* some standard values, similar to sd */
305                 geo->heads = 1 << 6;
306                 geo->sectors = 1 << 5;
307                 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
308         }
309         return 0;
310 }
311
312 static const struct block_device_operations virtblk_fops = {
313         .owner  = THIS_MODULE,
314         .getgeo = virtblk_getgeo,
315 };
316
317 static int index_to_minor(int index)
318 {
319         return index << PART_BITS;
320 }
321
322 static int minor_to_index(int minor)
323 {
324         return minor >> PART_BITS;
325 }
326
327 static ssize_t serial_show(struct device *dev,
328                            struct device_attribute *attr, char *buf)
329 {
330         struct gendisk *disk = dev_to_disk(dev);
331         int err;
332
333         /* sysfs gives us a PAGE_SIZE buffer */
334         BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES);
335
336         buf[VIRTIO_BLK_ID_BYTES] = '\0';
337         err = virtblk_get_id(disk, buf);
338         if (!err)
339                 return strlen(buf);
340
341         if (err == -EIO) /* Unsupported? Make it empty. */
342                 return 0;
343
344         return err;
345 }
346
347 static DEVICE_ATTR_RO(serial);
348
349 /* The queue's logical block size must be set before calling this */
350 static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize)
351 {
352         struct virtio_device *vdev = vblk->vdev;
353         struct request_queue *q = vblk->disk->queue;
354         char cap_str_2[10], cap_str_10[10];
355         unsigned long long nblocks;
356         u64 capacity;
357
358         /* Host must always specify the capacity. */
359         virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
360
361         /* If capacity is too big, truncate with warning. */
362         if ((sector_t)capacity != capacity) {
363                 dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
364                          (unsigned long long)capacity);
365                 capacity = (sector_t)-1;
366         }
367
368         nblocks = DIV_ROUND_UP_ULL(capacity, queue_logical_block_size(q) >> 9);
369
370         string_get_size(nblocks, queue_logical_block_size(q),
371                         STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
372         string_get_size(nblocks, queue_logical_block_size(q),
373                         STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
374
375         dev_notice(&vdev->dev,
376                    "[%s] %s%llu %d-byte logical blocks (%s/%s)\n",
377                    vblk->disk->disk_name,
378                    resize ? "new size: " : "",
379                    nblocks,
380                    queue_logical_block_size(q),
381                    cap_str_10,
382                    cap_str_2);
383
384         set_capacity(vblk->disk, capacity);
385 }
386
387 static void virtblk_config_changed_work(struct work_struct *work)
388 {
389         struct virtio_blk *vblk =
390                 container_of(work, struct virtio_blk, config_work);
391         char *envp[] = { "RESIZE=1", NULL };
392
393         virtblk_update_capacity(vblk, true);
394         revalidate_disk(vblk->disk);
395         kobject_uevent_env(&disk_to_dev(vblk->disk)->kobj, KOBJ_CHANGE, envp);
396 }
397
398 static void virtblk_config_changed(struct virtio_device *vdev)
399 {
400         struct virtio_blk *vblk = vdev->priv;
401
402         queue_work(virtblk_wq, &vblk->config_work);
403 }
404
405 static int init_vq(struct virtio_blk *vblk)
406 {
407         int err;
408         int i;
409         vq_callback_t **callbacks;
410         const char **names;
411         struct virtqueue **vqs;
412         unsigned short num_vqs;
413         struct virtio_device *vdev = vblk->vdev;
414         struct irq_affinity desc = { 0, };
415
416         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ,
417                                    struct virtio_blk_config, num_queues,
418                                    &num_vqs);
419         if (err)
420                 num_vqs = 1;
421
422         num_vqs = min_t(unsigned int, nr_cpu_ids, num_vqs);
423
424         vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
425         if (!vblk->vqs)
426                 return -ENOMEM;
427
428         names = kmalloc_array(num_vqs, sizeof(*names), GFP_KERNEL);
429         callbacks = kmalloc_array(num_vqs, sizeof(*callbacks), GFP_KERNEL);
430         vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL);
431         if (!names || !callbacks || !vqs) {
432                 err = -ENOMEM;
433                 goto out;
434         }
435
436         for (i = 0; i < num_vqs; i++) {
437                 callbacks[i] = virtblk_done;
438                 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i);
439                 names[i] = vblk->vqs[i].name;
440         }
441
442         /* Discover virtqueues and write information to configuration.  */
443         err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc);
444         if (err)
445                 goto out;
446
447         for (i = 0; i < num_vqs; i++) {
448                 spin_lock_init(&vblk->vqs[i].lock);
449                 vblk->vqs[i].vq = vqs[i];
450         }
451         vblk->num_vqs = num_vqs;
452
453 out:
454         kfree(vqs);
455         kfree(callbacks);
456         kfree(names);
457         if (err)
458                 kfree(vblk->vqs);
459         return err;
460 }
461
462 /*
463  * Legacy naming scheme used for virtio devices.  We are stuck with it for
464  * virtio blk but don't ever use it for any new driver.
465  */
466 static int virtblk_name_format(char *prefix, int index, char *buf, int buflen)
467 {
468         const int base = 'z' - 'a' + 1;
469         char *begin = buf + strlen(prefix);
470         char *end = buf + buflen;
471         char *p;
472         int unit;
473
474         p = end - 1;
475         *p = '\0';
476         unit = base;
477         do {
478                 if (p == begin)
479                         return -EINVAL;
480                 *--p = 'a' + (index % unit);
481                 index = (index / unit) - 1;
482         } while (index >= 0);
483
484         memmove(begin, p, end - p);
485         memcpy(buf, prefix, strlen(prefix));
486
487         return 0;
488 }
489
490 static int virtblk_get_cache_mode(struct virtio_device *vdev)
491 {
492         u8 writeback;
493         int err;
494
495         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE,
496                                    struct virtio_blk_config, wce,
497                                    &writeback);
498
499         /*
500          * If WCE is not configurable and flush is not available,
501          * assume no writeback cache is in use.
502          */
503         if (err)
504                 writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH);
505
506         return writeback;
507 }
508
509 static void virtblk_update_cache_mode(struct virtio_device *vdev)
510 {
511         u8 writeback = virtblk_get_cache_mode(vdev);
512         struct virtio_blk *vblk = vdev->priv;
513
514         blk_queue_write_cache(vblk->disk->queue, writeback, false);
515         revalidate_disk(vblk->disk);
516 }
517
518 static const char *const virtblk_cache_types[] = {
519         "write through", "write back"
520 };
521
522 static ssize_t
523 cache_type_store(struct device *dev, struct device_attribute *attr,
524                  const char *buf, size_t count)
525 {
526         struct gendisk *disk = dev_to_disk(dev);
527         struct virtio_blk *vblk = disk->private_data;
528         struct virtio_device *vdev = vblk->vdev;
529         int i;
530
531         BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
532         i = sysfs_match_string(virtblk_cache_types, buf);
533         if (i < 0)
534                 return i;
535
536         virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
537         virtblk_update_cache_mode(vdev);
538         return count;
539 }
540
541 static ssize_t
542 cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
543 {
544         struct gendisk *disk = dev_to_disk(dev);
545         struct virtio_blk *vblk = disk->private_data;
546         u8 writeback = virtblk_get_cache_mode(vblk->vdev);
547
548         BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types));
549         return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]);
550 }
551
552 static DEVICE_ATTR_RW(cache_type);
553
554 static struct attribute *virtblk_attrs[] = {
555         &dev_attr_serial.attr,
556         &dev_attr_cache_type.attr,
557         NULL,
558 };
559
560 static umode_t virtblk_attrs_are_visible(struct kobject *kobj,
561                 struct attribute *a, int n)
562 {
563         struct device *dev = container_of(kobj, struct device, kobj);
564         struct gendisk *disk = dev_to_disk(dev);
565         struct virtio_blk *vblk = disk->private_data;
566         struct virtio_device *vdev = vblk->vdev;
567
568         if (a == &dev_attr_cache_type.attr &&
569             !virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
570                 return S_IRUGO;
571
572         return a->mode;
573 }
574
575 static const struct attribute_group virtblk_attr_group = {
576         .attrs = virtblk_attrs,
577         .is_visible = virtblk_attrs_are_visible,
578 };
579
580 static const struct attribute_group *virtblk_attr_groups[] = {
581         &virtblk_attr_group,
582         NULL,
583 };
584
585 static int virtblk_init_request(struct blk_mq_tag_set *set, struct request *rq,
586                 unsigned int hctx_idx, unsigned int numa_node)
587 {
588         struct virtio_blk *vblk = set->driver_data;
589         struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);
590
591         sg_init_table(vbr->sg, vblk->sg_elems);
592         return 0;
593 }
594
595 static int virtblk_map_queues(struct blk_mq_tag_set *set)
596 {
597         struct virtio_blk *vblk = set->driver_data;
598
599         return blk_mq_virtio_map_queues(&set->map[HCTX_TYPE_DEFAULT],
600                                         vblk->vdev, 0);
601 }
602
603 static const struct blk_mq_ops virtio_mq_ops = {
604         .queue_rq       = virtio_queue_rq,
605         .commit_rqs     = virtio_commit_rqs,
606         .complete       = virtblk_request_done,
607         .init_request   = virtblk_init_request,
608         .map_queues     = virtblk_map_queues,
609 };
610
611 static unsigned int virtblk_queue_depth;
612 module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
613
614 static int virtblk_probe(struct virtio_device *vdev)
615 {
616         struct virtio_blk *vblk;
617         struct request_queue *q;
618         int err, index;
619
620         u32 v, blk_size, max_size, sg_elems, opt_io_size;
621         u16 min_io_size;
622         u8 physical_block_exp, alignment_offset;
623
624         if (!vdev->config->get) {
625                 dev_err(&vdev->dev, "%s failure: config access disabled\n",
626                         __func__);
627                 return -EINVAL;
628         }
629
630         err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
631                              GFP_KERNEL);
632         if (err < 0)
633                 goto out;
634         index = err;
635
636         /* We need to know how many segments before we allocate. */
637         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
638                                    struct virtio_blk_config, seg_max,
639                                    &sg_elems);
640
641         /* We need at least one SG element, whatever they say. */
642         if (err || !sg_elems)
643                 sg_elems = 1;
644
645         /* We need an extra sg elements at head and tail. */
646         sg_elems += 2;
647         vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
648         if (!vblk) {
649                 err = -ENOMEM;
650                 goto out_free_index;
651         }
652
653         vblk->vdev = vdev;
654         vblk->sg_elems = sg_elems;
655
656         INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
657
658         err = init_vq(vblk);
659         if (err)
660                 goto out_free_vblk;
661
662         /* FIXME: How many partitions?  How long is a piece of string? */
663         vblk->disk = alloc_disk(1 << PART_BITS);
664         if (!vblk->disk) {
665                 err = -ENOMEM;
666                 goto out_free_vq;
667         }
668
669         /* Default queue sizing is to fill the ring. */
670         if (!virtblk_queue_depth) {
671                 virtblk_queue_depth = vblk->vqs[0].vq->num_free;
672                 /* ... but without indirect descs, we use 2 descs per req */
673                 if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
674                         virtblk_queue_depth /= 2;
675         }
676
677         memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
678         vblk->tag_set.ops = &virtio_mq_ops;
679         vblk->tag_set.queue_depth = virtblk_queue_depth;
680         vblk->tag_set.numa_node = NUMA_NO_NODE;
681         vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
682         vblk->tag_set.cmd_size =
683                 sizeof(struct virtblk_req) +
684                 sizeof(struct scatterlist) * sg_elems;
685         vblk->tag_set.driver_data = vblk;
686         vblk->tag_set.nr_hw_queues = vblk->num_vqs;
687
688         err = blk_mq_alloc_tag_set(&vblk->tag_set);
689         if (err)
690                 goto out_put_disk;
691
692         q = blk_mq_init_queue(&vblk->tag_set);
693         if (IS_ERR(q)) {
694                 err = -ENOMEM;
695                 goto out_free_tags;
696         }
697         vblk->disk->queue = q;
698
699         q->queuedata = vblk;
700
701         virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
702
703         vblk->disk->major = major;
704         vblk->disk->first_minor = index_to_minor(index);
705         vblk->disk->private_data = vblk;
706         vblk->disk->fops = &virtblk_fops;
707         vblk->disk->flags |= GENHD_FL_EXT_DEVT;
708         vblk->index = index;
709
710         /* configure queue flush support */
711         virtblk_update_cache_mode(vdev);
712
713         /* If disk is read-only in the host, the guest should obey */
714         if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
715                 set_disk_ro(vblk->disk, 1);
716
717         /* We can handle whatever the host told us to handle. */
718         blk_queue_max_segments(q, vblk->sg_elems-2);
719
720         /* No real sector limit. */
721         blk_queue_max_hw_sectors(q, -1U);
722
723         max_size = virtio_max_dma_size(vdev);
724
725         /* Host can optionally specify maximum segment size and number of
726          * segments. */
727         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX,
728                                    struct virtio_blk_config, size_max, &v);
729         if (!err)
730                 max_size = min(max_size, v);
731
732         blk_queue_max_segment_size(q, max_size);
733
734         /* Host can optionally specify the block size of the device */
735         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
736                                    struct virtio_blk_config, blk_size,
737                                    &blk_size);
738         if (!err)
739                 blk_queue_logical_block_size(q, blk_size);
740         else
741                 blk_size = queue_logical_block_size(q);
742
743         /* Use topology information if available */
744         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
745                                    struct virtio_blk_config, physical_block_exp,
746                                    &physical_block_exp);
747         if (!err && physical_block_exp)
748                 blk_queue_physical_block_size(q,
749                                 blk_size * (1 << physical_block_exp));
750
751         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
752                                    struct virtio_blk_config, alignment_offset,
753                                    &alignment_offset);
754         if (!err && alignment_offset)
755                 blk_queue_alignment_offset(q, blk_size * alignment_offset);
756
757         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
758                                    struct virtio_blk_config, min_io_size,
759                                    &min_io_size);
760         if (!err && min_io_size)
761                 blk_queue_io_min(q, blk_size * min_io_size);
762
763         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
764                                    struct virtio_blk_config, opt_io_size,
765                                    &opt_io_size);
766         if (!err && opt_io_size)
767                 blk_queue_io_opt(q, blk_size * opt_io_size);
768
769         if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
770                 q->limits.discard_granularity = blk_size;
771
772                 virtio_cread(vdev, struct virtio_blk_config,
773                              discard_sector_alignment, &v);
774                 q->limits.discard_alignment = v ? v << SECTOR_SHIFT : 0;
775
776                 virtio_cread(vdev, struct virtio_blk_config,
777                              max_discard_sectors, &v);
778                 blk_queue_max_discard_sectors(q, v ? v : UINT_MAX);
779
780                 virtio_cread(vdev, struct virtio_blk_config, max_discard_seg,
781                              &v);
782                 blk_queue_max_discard_segments(q,
783                                                min_not_zero(v,
784                                                             MAX_DISCARD_SEGMENTS));
785
786                 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
787         }
788
789         if (virtio_has_feature(vdev, VIRTIO_BLK_F_WRITE_ZEROES)) {
790                 virtio_cread(vdev, struct virtio_blk_config,
791                              max_write_zeroes_sectors, &v);
792                 blk_queue_max_write_zeroes_sectors(q, v ? v : UINT_MAX);
793         }
794
795         virtblk_update_capacity(vblk, false);
796         virtio_device_ready(vdev);
797
798         device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
799         return 0;
800
801 out_free_tags:
802         blk_mq_free_tag_set(&vblk->tag_set);
803 out_put_disk:
804         put_disk(vblk->disk);
805 out_free_vq:
806         vdev->config->del_vqs(vdev);
807 out_free_vblk:
808         kfree(vblk);
809 out_free_index:
810         ida_simple_remove(&vd_index_ida, index);
811 out:
812         return err;
813 }
814
815 static void virtblk_remove(struct virtio_device *vdev)
816 {
817         struct virtio_blk *vblk = vdev->priv;
818         int index = vblk->index;
819         int refc;
820
821         /* Make sure no work handler is accessing the device. */
822         flush_work(&vblk->config_work);
823
824         del_gendisk(vblk->disk);
825         blk_cleanup_queue(vblk->disk->queue);
826
827         blk_mq_free_tag_set(&vblk->tag_set);
828
829         /* Stop all the virtqueues. */
830         vdev->config->reset(vdev);
831
832         refc = kref_read(&disk_to_dev(vblk->disk)->kobj.kref);
833         put_disk(vblk->disk);
834         vdev->config->del_vqs(vdev);
835         kfree(vblk->vqs);
836         kfree(vblk);
837
838         /* Only free device id if we don't have any users */
839         if (refc == 1)
840                 ida_simple_remove(&vd_index_ida, index);
841 }
842
843 #ifdef CONFIG_PM_SLEEP
844 static int virtblk_freeze(struct virtio_device *vdev)
845 {
846         struct virtio_blk *vblk = vdev->priv;
847
848         /* Ensure we don't receive any more interrupts */
849         vdev->config->reset(vdev);
850
851         /* Make sure no work handler is accessing the device. */
852         flush_work(&vblk->config_work);
853
854         blk_mq_quiesce_queue(vblk->disk->queue);
855
856         vdev->config->del_vqs(vdev);
857         return 0;
858 }
859
860 static int virtblk_restore(struct virtio_device *vdev)
861 {
862         struct virtio_blk *vblk = vdev->priv;
863         int ret;
864
865         ret = init_vq(vdev->priv);
866         if (ret)
867                 return ret;
868
869         virtio_device_ready(vdev);
870
871         blk_mq_unquiesce_queue(vblk->disk->queue);
872         return 0;
873 }
874 #endif
875
876 static const struct virtio_device_id id_table[] = {
877         { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
878         { 0 },
879 };
880
881 static unsigned int features_legacy[] = {
882         VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
883         VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
884         VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
885         VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
886 }
887 ;
888 static unsigned int features[] = {
889         VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
890         VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
891         VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
892         VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
893 };
894
895 static struct virtio_driver virtio_blk = {
896         .feature_table                  = features,
897         .feature_table_size             = ARRAY_SIZE(features),
898         .feature_table_legacy           = features_legacy,
899         .feature_table_size_legacy      = ARRAY_SIZE(features_legacy),
900         .driver.name                    = KBUILD_MODNAME,
901         .driver.owner                   = THIS_MODULE,
902         .id_table                       = id_table,
903         .probe                          = virtblk_probe,
904         .remove                         = virtblk_remove,
905         .config_changed                 = virtblk_config_changed,
906 #ifdef CONFIG_PM_SLEEP
907         .freeze                         = virtblk_freeze,
908         .restore                        = virtblk_restore,
909 #endif
910 };
911
912 static int __init init(void)
913 {
914         int error;
915
916         virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
917         if (!virtblk_wq)
918                 return -ENOMEM;
919
920         major = register_blkdev(0, "virtblk");
921         if (major < 0) {
922                 error = major;
923                 goto out_destroy_workqueue;
924         }
925
926         error = register_virtio_driver(&virtio_blk);
927         if (error)
928                 goto out_unregister_blkdev;
929         return 0;
930
931 out_unregister_blkdev:
932         unregister_blkdev(major, "virtblk");
933 out_destroy_workqueue:
934         destroy_workqueue(virtblk_wq);
935         return error;
936 }
937
938 static void __exit fini(void)
939 {
940         unregister_virtio_driver(&virtio_blk);
941         unregister_blkdev(major, "virtblk");
942         destroy_workqueue(virtblk_wq);
943 }
944 module_init(init);
945 module_exit(fini);
946
947 MODULE_DEVICE_TABLE(virtio, id_table);
948 MODULE_DESCRIPTION("Virtio block driver");
949 MODULE_LICENSE("GPL");