OSDN Git Service

zram: read page from backing device
[uclinux-h8/linux.git] / drivers / block / zram / zram_drv.c
index 195b337..4ddc18a 100644 (file)
@@ -454,6 +454,95 @@ void zram_page_end_io(struct bio *bio)
        bio_put(bio);
 }
 
+/*
+ * Returns 1 if the submission is successful.
+ */
+static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec,
+                       unsigned long entry, struct bio *parent)
+{
+       struct bio *bio;
+
+       bio = bio_alloc(GFP_ATOMIC, 1);
+       if (!bio)
+               return -ENOMEM;
+
+       bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
+       bio->bi_bdev = zram->bdev;
+       if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, bvec->bv_offset)) {
+               bio_put(bio);
+               return -EIO;
+       }
+
+       if (!parent) {
+               bio->bi_opf = REQ_OP_READ;
+               bio->bi_end_io = zram_page_end_io;
+       } else {
+               bio->bi_opf = parent->bi_opf;
+               bio_chain(bio, parent);
+       }
+
+       submit_bio(bio);
+       return 1;
+}
+
+struct zram_work {
+       struct work_struct work;
+       struct zram *zram;
+       unsigned long entry;
+       struct bio *bio;
+};
+
+#if PAGE_SIZE != 4096
+static void zram_sync_read(struct work_struct *work)
+{
+       struct bio_vec bvec;
+       struct zram_work *zw = container_of(work, struct zram_work, work);
+       struct zram *zram = zw->zram;
+       unsigned long entry = zw->entry;
+       struct bio *bio = zw->bio;
+
+       read_from_bdev_async(zram, &bvec, entry, bio);
+}
+
+/*
+ * Block layer want one ->make_request_fn to be active at a time
+ * so if we use chained IO with parent IO in same context,
+ * it's a deadlock. To avoid, it, it uses worker thread context.
+ */
+static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
+                               unsigned long entry, struct bio *bio)
+{
+       struct zram_work work;
+
+       work.zram = zram;
+       work.entry = entry;
+       work.bio = bio;
+
+       INIT_WORK_ONSTACK(&work.work, zram_sync_read);
+       queue_work(system_unbound_wq, &work.work);
+       flush_work(&work.work);
+       destroy_work_on_stack(&work.work);
+
+       return 1;
+}
+#else
+static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
+                               unsigned long entry, struct bio *bio)
+{
+       WARN_ON(1);
+       return -EIO;
+}
+#endif
+
+static int read_from_bdev(struct zram *zram, struct bio_vec *bvec,
+                       unsigned long entry, struct bio *parent, bool sync)
+{
+       if (sync)
+               return read_from_bdev_sync(zram, bvec, entry, parent);
+       else
+               return read_from_bdev_async(zram, bvec, entry, parent);
+}
+
 static int write_to_bdev(struct zram *zram, struct bio_vec *bvec,
                                        u32 index, struct bio *parent,
                                        unsigned long *pentry)
@@ -514,6 +603,12 @@ static int write_to_bdev(struct zram *zram, struct bio_vec *bvec,
 {
        return -EIO;
 }
+
+static int read_from_bdev(struct zram *zram, struct bio_vec *bvec,
+                       unsigned long entry, struct bio *parent, bool sync)
+{
+       return -EIO;
+}
 static void zram_wb_clear(struct zram *zram, u32 index) {}
 #endif
 
@@ -773,13 +868,31 @@ static void zram_free_page(struct zram *zram, size_t index)
        zram_set_obj_size(zram, index, 0);
 }
 
-static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index)
+static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
+                               struct bio *bio, bool partial_io)
 {
        int ret;
        unsigned long handle;
        unsigned int size;
        void *src, *dst;
 
+       if (zram_wb_enabled(zram)) {
+               zram_slot_lock(zram, index);
+               if (zram_test_flag(zram, index, ZRAM_WB)) {
+                       struct bio_vec bvec;
+
+                       zram_slot_unlock(zram, index);
+
+                       bvec.bv_page = page;
+                       bvec.bv_len = PAGE_SIZE;
+                       bvec.bv_offset = 0;
+                       return read_from_bdev(zram, &bvec,
+                                       zram_get_element(zram, index),
+                                       bio, partial_io);
+               }
+               zram_slot_unlock(zram, index);
+       }
+
        if (zram_same_page_read(zram, index, page, 0, PAGE_SIZE))
                return 0;
 
@@ -812,7 +925,7 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index)
 }
 
 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
-                               u32 index, int offset)
+                               u32 index, int offset, struct bio *bio)
 {
        int ret;
        struct page *page;
@@ -825,7 +938,7 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
                        return -ENOMEM;
        }
 
-       ret = __zram_bvec_read(zram, page, index);
+       ret = __zram_bvec_read(zram, page, index, bio, is_partial_io(bvec));
        if (unlikely(ret))
                goto out;
 
@@ -988,7 +1101,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
                if (!page)
                        return -ENOMEM;
 
-               ret = __zram_bvec_read(zram, page, index);
+               ret = __zram_bvec_read(zram, page, index, bio, true);
                if (ret)
                        goto out;
 
@@ -1065,7 +1178,7 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
 
        if (!is_write) {
                atomic64_inc(&zram->stats.num_reads);
-               ret = zram_bvec_read(zram, bvec, index, offset);
+               ret = zram_bvec_read(zram, bvec, index, offset, bio);
                flush_dcache_page(bvec->bv_page);
        } else {
                atomic64_inc(&zram->stats.num_writes);