OSDN Git Service

block: disable entropy contributions for nonrot devices
authorMike Snitzer <snitzer@redhat.com>
Sat, 4 Oct 2014 16:55:32 +0000 (10:55 -0600)
committerJens Axboe <axboe@fb.com>
Sat, 4 Oct 2014 16:55:32 +0000 (10:55 -0600)
Clear QUEUE_FLAG_ADD_RANDOM in all block drivers that set
QUEUE_FLAG_NONROT.

Historically, all block devices have automatically made entropy
contributions.  But as previously stated in commit e2e1a148 ("block: add
sysfs knob for turning off disk entropy contributions"):
    - On SSD disks, the completion times aren't as random as they
      are for rotational drives. So it's questionable whether they
      should contribute to the random pool in the first place.
    - Calling add_disk_randomness() has a lot of overhead.

There are more reliable sources for randomness than non-rotational block
devices.  From a security perspective it is better to err on the side of
caution than to allow entropy contributions from unreliable "random"
sources.

Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
14 files changed:
drivers/block/mtip32xx/mtip32xx.c
drivers/block/nbd.c
drivers/block/null_blk.c
drivers/block/nvme-core.c
drivers/block/rsxx/dev.c
drivers/block/skd_main.c
drivers/block/zram/zram_drv.c
drivers/ide/ide-disk.c
drivers/md/bcache/super.c
drivers/mmc/card/queue.c
drivers/mtd/mtd_blkdevs.c
drivers/s390/block/scm_blk.c
drivers/s390/block/xpram.c
drivers/scsi/sd.c

index db1e956..936f8c1 100644 (file)
@@ -3952,6 +3952,7 @@ skip_create_disk:
 
        /* Set device limits. */
        set_bit(QUEUE_FLAG_NONROT, &dd->queue->queue_flags);
+       clear_bit(QUEUE_FLAG_ADD_RANDOM, &dd->queue->queue_flags);
        blk_queue_max_segments(dd->queue, MTIP_MAX_SG);
        blk_queue_physical_block_size(dd->queue, 4096);
        blk_queue_max_hw_sectors(dd->queue, 0xffff);
index fb31b8e..4bc2a5c 100644 (file)
@@ -847,6 +847,7 @@ static int __init nbd_init(void)
                 * Tell the block layer that we are not a rotational device
                 */
                queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
+               queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue);
                disk->queue->limits.discard_granularity = 512;
                disk->queue->limits.max_discard_sectors = UINT_MAX;
                disk->queue->limits.discard_zeroes_data = 0;
index a3b042c..b0d94b6 100644 (file)
@@ -507,6 +507,7 @@ static int null_add_dev(void)
 
        nullb->q->queuedata = nullb;
        queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
+       queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);
 
        disk = nullb->disk = alloc_disk_node(1, home_node);
        if (!disk)
index 02351e2..e2bb8af 100644 (file)
@@ -1916,6 +1916,7 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
        ns->queue->queue_flags = QUEUE_FLAG_DEFAULT;
        queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
        queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
+       queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, ns->queue);
        blk_queue_make_request(ns->queue, nvme_make_request);
        ns->dev = dev;
        ns->queue->queuedata = ns;
index 2839d37..40ee770 100644 (file)
@@ -307,6 +307,7 @@ int rsxx_setup_dev(struct rsxx_cardinfo *card)
        blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE);
 
        queue_flag_set_unlocked(QUEUE_FLAG_NONROT, card->queue);
+       queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, card->queue);
        if (rsxx_discard_supported(card)) {
                queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, card->queue);
                blk_queue_max_discard_sectors(card->queue,
index 8fcdcfb..1e46eb2 100644 (file)
@@ -4426,6 +4426,7 @@ static int skd_cons_disk(struct skd_device *skdev)
        q->limits.discard_zeroes_data = 1;
        queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
        queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
+       queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
 
        spin_lock_irqsave(&skdev->lock, flags);
        pr_debug("%s:%s:%d stopping %s queue\n",
index dfa4024..6dd2cef 100644 (file)
@@ -925,6 +925,7 @@ static int create_device(struct zram *zram, int device_id)
        set_capacity(zram->disk, 0);
        /* zram devices sort of resembles non-rotational disks */
        queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
+       queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
        /*
         * To ensure that we always get PAGE_SIZE aligned
         * and n*PAGE_SIZED sized I/O requests.
index ee88038..56b9708 100644 (file)
@@ -685,8 +685,10 @@ static void ide_disk_setup(ide_drive_t *drive)
        printk(KERN_INFO "%s: max request size: %dKiB\n", drive->name,
               queue_max_sectors(q) / 2);
 
-       if (ata_id_is_ssd(id))
+       if (ata_id_is_ssd(id)) {
                queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
+               queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
+       }
 
        /* calculate drive capacity, and select LBA if possible */
        ide_disk_get_capacity(drive);
index d4713d0..4dd2bb7 100644 (file)
@@ -842,6 +842,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
        q->limits.logical_block_size    = block_size;
        q->limits.physical_block_size   = block_size;
        set_bit(QUEUE_FLAG_NONROT,      &d->disk->queue->queue_flags);
+       clear_bit(QUEUE_FLAG_ADD_RANDOM, &d->disk->queue->queue_flags);
        set_bit(QUEUE_FLAG_DISCARD,     &d->disk->queue->queue_flags);
 
        blk_queue_flush(q, REQ_FLUSH|REQ_FUA);
index 3e049c1..c19bfc1 100644 (file)
@@ -210,6 +210,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
 
        blk_queue_prep_rq(mq->queue, mmc_prep_request);
        queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
+       queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
        if (mmc_can_erase(card))
                mmc_queue_setup_discard(mq->queue, card);
 
index 43e3099..d08229e 100644 (file)
@@ -417,6 +417,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
        blk_queue_logical_block_size(new->rq, tr->blksize);
 
        queue_flag_set_unlocked(QUEUE_FLAG_NONROT, new->rq);
+       queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, new->rq);
 
        if (tr->discard) {
                queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, new->rq);
index 76bed17..56046ab 100644 (file)
@@ -386,6 +386,7 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
        blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */
        blk_queue_max_segments(rq, nr_max_blk);
        queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rq);
+       queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, rq);
        scm_blk_dev_cluster_setup(bdev);
 
        bdev->gendisk = alloc_disk(SCM_NR_PARTS);
index 6969d39..9e0de9c 100644 (file)
@@ -346,6 +346,7 @@ static int __init xpram_setup_blkdev(void)
                        goto out;
                }
                queue_flag_set_unlocked(QUEUE_FLAG_NONROT, xpram_queues[i]);
+               queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, xpram_queues[i]);
                blk_queue_make_request(xpram_queues[i], xpram_make_request);
                blk_queue_logical_block_size(xpram_queues[i], 4096);
        }
index 2c2041c..fe67f5c 100644 (file)
@@ -2660,8 +2660,10 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
 
        rot = get_unaligned_be16(&buffer[4]);
 
-       if (rot == 1)
+       if (rot == 1) {
                queue_flag_set_unlocked(QUEUE_FLAG_NONROT, sdkp->disk->queue);
+               queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, sdkp->disk->queue);
+       }
 
  out:
        kfree(buffer);