OSDN Git Service

Merge branch 'for-4.4/io-poll' of git://git.kernel.dk/linux-block
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / drivers / nvdimm / pmem.c
1 /*
2  * Persistent Memory Driver
3  *
4  * Copyright (c) 2014-2015, Intel Corporation.
5  * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>.
6  * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  */
17
18 #include <asm/cacheflush.h>
19 #include <linux/blkdev.h>
20 #include <linux/hdreg.h>
21 #include <linux/init.h>
22 #include <linux/platform_device.h>
23 #include <linux/module.h>
24 #include <linux/memory_hotplug.h>
25 #include <linux/moduleparam.h>
26 #include <linux/vmalloc.h>
27 #include <linux/slab.h>
28 #include <linux/pmem.h>
29 #include <linux/nd.h>
30 #include "pfn.h"
31 #include "nd.h"
32
33 struct pmem_device {
34         struct request_queue    *pmem_queue;
35         struct gendisk          *pmem_disk;
36         struct nd_namespace_common *ndns;
37
38         /* One contiguous memory region per device */
39         phys_addr_t             phys_addr;
40         /* when non-zero this device is hosting a 'pfn' instance */
41         phys_addr_t             data_offset;
42         void __pmem             *virt_addr;
43         size_t                  size;
44 };
45
46 static int pmem_major;
47
48 static void pmem_do_bvec(struct pmem_device *pmem, struct page *page,
49                         unsigned int len, unsigned int off, int rw,
50                         sector_t sector)
51 {
52         void *mem = kmap_atomic(page);
53         phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
54         void __pmem *pmem_addr = pmem->virt_addr + pmem_off;
55
56         if (rw == READ) {
57                 memcpy_from_pmem(mem + off, pmem_addr, len);
58                 flush_dcache_page(page);
59         } else {
60                 flush_dcache_page(page);
61                 memcpy_to_pmem(pmem_addr, mem + off, len);
62         }
63
64         kunmap_atomic(mem);
65 }
66
67 static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
68 {
69         bool do_acct;
70         unsigned long start;
71         struct bio_vec bvec;
72         struct bvec_iter iter;
73         struct block_device *bdev = bio->bi_bdev;
74         struct pmem_device *pmem = bdev->bd_disk->private_data;
75
76         do_acct = nd_iostat_start(bio, &start);
77         bio_for_each_segment(bvec, bio, iter)
78                 pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len, bvec.bv_offset,
79                                 bio_data_dir(bio), iter.bi_sector);
80         if (do_acct)
81                 nd_iostat_end(bio, start);
82
83         if (bio_data_dir(bio))
84                 wmb_pmem();
85
86         bio_endio(bio);
87         return BLK_QC_T_NONE;
88 }
89
90 static int pmem_rw_page(struct block_device *bdev, sector_t sector,
91                        struct page *page, int rw)
92 {
93         struct pmem_device *pmem = bdev->bd_disk->private_data;
94
95         pmem_do_bvec(pmem, page, PAGE_CACHE_SIZE, 0, rw, sector);
96         if (rw & WRITE)
97                 wmb_pmem();
98         page_endio(page, rw & WRITE, 0);
99
100         return 0;
101 }
102
103 static long pmem_direct_access(struct block_device *bdev, sector_t sector,
104                       void __pmem **kaddr, unsigned long *pfn)
105 {
106         struct pmem_device *pmem = bdev->bd_disk->private_data;
107         resource_size_t offset = sector * 512 + pmem->data_offset;
108         resource_size_t size;
109
110         if (pmem->data_offset) {
111                 /*
112                  * Limit the direct_access() size to what is covered by
113                  * the memmap
114                  */
115                 size = (pmem->size - offset) & ~ND_PFN_MASK;
116         } else
117                 size = pmem->size - offset;
118
119         /* FIXME convert DAX to comprehend that this mapping has a lifetime */
120         *kaddr = pmem->virt_addr + offset;
121         *pfn = (pmem->phys_addr + offset) >> PAGE_SHIFT;
122
123         return size;
124 }
125
126 static const struct block_device_operations pmem_fops = {
127         .owner =                THIS_MODULE,
128         .rw_page =              pmem_rw_page,
129         .direct_access =        pmem_direct_access,
130         .revalidate_disk =      nvdimm_revalidate_disk,
131 };
132
133 static struct pmem_device *pmem_alloc(struct device *dev,
134                 struct resource *res, int id)
135 {
136         struct pmem_device *pmem;
137
138         pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
139         if (!pmem)
140                 return ERR_PTR(-ENOMEM);
141
142         pmem->phys_addr = res->start;
143         pmem->size = resource_size(res);
144         if (!arch_has_wmb_pmem())
145                 dev_warn(dev, "unable to guarantee persistence of writes\n");
146
147         if (!devm_request_mem_region(dev, pmem->phys_addr, pmem->size,
148                         dev_name(dev))) {
149                 dev_warn(dev, "could not reserve region [0x%pa:0x%zx]\n",
150                                 &pmem->phys_addr, pmem->size);
151                 return ERR_PTR(-EBUSY);
152         }
153
154         if (pmem_should_map_pages(dev))
155                 pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, res);
156         else
157                 pmem->virt_addr = (void __pmem *) devm_memremap(dev,
158                                 pmem->phys_addr, pmem->size,
159                                 ARCH_MEMREMAP_PMEM);
160
161         if (IS_ERR(pmem->virt_addr))
162                 return (void __force *) pmem->virt_addr;
163
164         return pmem;
165 }
166
167 static void pmem_detach_disk(struct pmem_device *pmem)
168 {
169         if (!pmem->pmem_disk)
170                 return;
171
172         del_gendisk(pmem->pmem_disk);
173         put_disk(pmem->pmem_disk);
174         blk_cleanup_queue(pmem->pmem_queue);
175 }
176
177 static int pmem_attach_disk(struct device *dev,
178                 struct nd_namespace_common *ndns, struct pmem_device *pmem)
179 {
180         int nid = dev_to_node(dev);
181         struct gendisk *disk;
182
183         pmem->pmem_queue = blk_alloc_queue_node(GFP_KERNEL, nid);
184         if (!pmem->pmem_queue)
185                 return -ENOMEM;
186
187         blk_queue_make_request(pmem->pmem_queue, pmem_make_request);
188         blk_queue_physical_block_size(pmem->pmem_queue, PAGE_SIZE);
189         blk_queue_max_hw_sectors(pmem->pmem_queue, UINT_MAX);
190         blk_queue_bounce_limit(pmem->pmem_queue, BLK_BOUNCE_ANY);
191         queue_flag_set_unlocked(QUEUE_FLAG_NONROT, pmem->pmem_queue);
192
193         disk = alloc_disk_node(0, nid);
194         if (!disk) {
195                 blk_cleanup_queue(pmem->pmem_queue);
196                 return -ENOMEM;
197         }
198
199         disk->major             = pmem_major;
200         disk->first_minor       = 0;
201         disk->fops              = &pmem_fops;
202         disk->private_data      = pmem;
203         disk->queue             = pmem->pmem_queue;
204         disk->flags             = GENHD_FL_EXT_DEVT;
205         nvdimm_namespace_disk_name(ndns, disk->disk_name);
206         disk->driverfs_dev = dev;
207         set_capacity(disk, (pmem->size - pmem->data_offset) / 512);
208         pmem->pmem_disk = disk;
209
210         add_disk(disk);
211         revalidate_disk(disk);
212
213         return 0;
214 }
215
216 static int pmem_rw_bytes(struct nd_namespace_common *ndns,
217                 resource_size_t offset, void *buf, size_t size, int rw)
218 {
219         struct pmem_device *pmem = dev_get_drvdata(ndns->claim);
220
221         if (unlikely(offset + size > pmem->size)) {
222                 dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n");
223                 return -EFAULT;
224         }
225
226         if (rw == READ)
227                 memcpy_from_pmem(buf, pmem->virt_addr + offset, size);
228         else {
229                 memcpy_to_pmem(pmem->virt_addr + offset, buf, size);
230                 wmb_pmem();
231         }
232
233         return 0;
234 }
235
236 static int nd_pfn_init(struct nd_pfn *nd_pfn)
237 {
238         struct nd_pfn_sb *pfn_sb = kzalloc(sizeof(*pfn_sb), GFP_KERNEL);
239         struct pmem_device *pmem = dev_get_drvdata(&nd_pfn->dev);
240         struct nd_namespace_common *ndns = nd_pfn->ndns;
241         struct nd_region *nd_region;
242         unsigned long npfns;
243         phys_addr_t offset;
244         u64 checksum;
245         int rc;
246
247         if (!pfn_sb)
248                 return -ENOMEM;
249
250         nd_pfn->pfn_sb = pfn_sb;
251         rc = nd_pfn_validate(nd_pfn);
252         if (rc == 0 || rc == -EBUSY)
253                 return rc;
254
255         /* section alignment for simple hotplug */
256         if (nvdimm_namespace_capacity(ndns) < ND_PFN_ALIGN
257                         || pmem->phys_addr & ND_PFN_MASK)
258                 return -ENODEV;
259
260         nd_region = to_nd_region(nd_pfn->dev.parent);
261         if (nd_region->ro) {
262                 dev_info(&nd_pfn->dev,
263                                 "%s is read-only, unable to init metadata\n",
264                                 dev_name(&nd_region->dev));
265                 goto err;
266         }
267
268         memset(pfn_sb, 0, sizeof(*pfn_sb));
269         npfns = (pmem->size - SZ_8K) / SZ_4K;
270         /*
271          * Note, we use 64 here for the standard size of struct page,
272          * debugging options may cause it to be larger in which case the
273          * implementation will limit the pfns advertised through
274          * ->direct_access() to those that are included in the memmap.
275          */
276         if (nd_pfn->mode == PFN_MODE_PMEM)
277                 offset = ALIGN(SZ_8K + 64 * npfns, PMD_SIZE);
278         else if (nd_pfn->mode == PFN_MODE_RAM)
279                 offset = SZ_8K;
280         else
281                 goto err;
282
283         npfns = (pmem->size - offset) / SZ_4K;
284         pfn_sb->mode = cpu_to_le32(nd_pfn->mode);
285         pfn_sb->dataoff = cpu_to_le64(offset);
286         pfn_sb->npfns = cpu_to_le64(npfns);
287         memcpy(pfn_sb->signature, PFN_SIG, PFN_SIG_LEN);
288         memcpy(pfn_sb->uuid, nd_pfn->uuid, 16);
289         pfn_sb->version_major = cpu_to_le16(1);
290         checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
291         pfn_sb->checksum = cpu_to_le64(checksum);
292
293         rc = nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb));
294         if (rc)
295                 goto err;
296
297         return 0;
298  err:
299         nd_pfn->pfn_sb = NULL;
300         kfree(pfn_sb);
301         return -ENXIO;
302 }
303
304 static int nvdimm_namespace_detach_pfn(struct nd_namespace_common *ndns)
305 {
306         struct nd_pfn *nd_pfn = to_nd_pfn(ndns->claim);
307         struct pmem_device *pmem;
308
309         /* free pmem disk */
310         pmem = dev_get_drvdata(&nd_pfn->dev);
311         pmem_detach_disk(pmem);
312
313         /* release nd_pfn resources */
314         kfree(nd_pfn->pfn_sb);
315         nd_pfn->pfn_sb = NULL;
316
317         return 0;
318 }
319
320 static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns)
321 {
322         struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
323         struct nd_pfn *nd_pfn = to_nd_pfn(ndns->claim);
324         struct device *dev = &nd_pfn->dev;
325         struct vmem_altmap *altmap;
326         struct nd_region *nd_region;
327         struct nd_pfn_sb *pfn_sb;
328         struct pmem_device *pmem;
329         phys_addr_t offset;
330         int rc;
331
332         if (!nd_pfn->uuid || !nd_pfn->ndns)
333                 return -ENODEV;
334
335         nd_region = to_nd_region(dev->parent);
336         rc = nd_pfn_init(nd_pfn);
337         if (rc)
338                 return rc;
339
340         if (PAGE_SIZE != SZ_4K) {
341                 dev_err(dev, "only supported on systems with 4K PAGE_SIZE\n");
342                 return -ENXIO;
343         }
344         if (nsio->res.start & ND_PFN_MASK) {
345                 dev_err(dev, "%s not memory hotplug section aligned\n",
346                                 dev_name(&ndns->dev));
347                 return -ENXIO;
348         }
349
350         pfn_sb = nd_pfn->pfn_sb;
351         offset = le64_to_cpu(pfn_sb->dataoff);
352         nd_pfn->mode = le32_to_cpu(nd_pfn->pfn_sb->mode);
353         if (nd_pfn->mode == PFN_MODE_RAM) {
354                 if (offset != SZ_8K)
355                         return -EINVAL;
356                 nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
357                 altmap = NULL;
358         } else {
359                 rc = -ENXIO;
360                 goto err;
361         }
362
363         /* establish pfn range for lookup, and switch to direct map */
364         pmem = dev_get_drvdata(dev);
365         devm_memunmap(dev, (void __force *) pmem->virt_addr);
366         pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, &nsio->res);
367         if (IS_ERR(pmem->virt_addr)) {
368                 rc = PTR_ERR(pmem->virt_addr);
369                 goto err;
370         }
371
372         /* attach pmem disk in "pfn-mode" */
373         pmem->data_offset = offset;
374         rc = pmem_attach_disk(dev, ndns, pmem);
375         if (rc)
376                 goto err;
377
378         return rc;
379  err:
380         nvdimm_namespace_detach_pfn(ndns);
381         return rc;
382 }
383
384 static int nd_pmem_probe(struct device *dev)
385 {
386         struct nd_region *nd_region = to_nd_region(dev->parent);
387         struct nd_namespace_common *ndns;
388         struct nd_namespace_io *nsio;
389         struct pmem_device *pmem;
390
391         ndns = nvdimm_namespace_common_probe(dev);
392         if (IS_ERR(ndns))
393                 return PTR_ERR(ndns);
394
395         nsio = to_nd_namespace_io(&ndns->dev);
396         pmem = pmem_alloc(dev, &nsio->res, nd_region->id);
397         if (IS_ERR(pmem))
398                 return PTR_ERR(pmem);
399
400         pmem->ndns = ndns;
401         dev_set_drvdata(dev, pmem);
402         ndns->rw_bytes = pmem_rw_bytes;
403
404         if (is_nd_btt(dev))
405                 return nvdimm_namespace_attach_btt(ndns);
406
407         if (is_nd_pfn(dev))
408                 return nvdimm_namespace_attach_pfn(ndns);
409
410         if (nd_btt_probe(ndns, pmem) == 0) {
411                 /* we'll come back as btt-pmem */
412                 return -ENXIO;
413         }
414
415         if (nd_pfn_probe(ndns, pmem) == 0) {
416                 /* we'll come back as pfn-pmem */
417                 return -ENXIO;
418         }
419
420         return pmem_attach_disk(dev, ndns, pmem);
421 }
422
423 static int nd_pmem_remove(struct device *dev)
424 {
425         struct pmem_device *pmem = dev_get_drvdata(dev);
426
427         if (is_nd_btt(dev))
428                 nvdimm_namespace_detach_btt(pmem->ndns);
429         else if (is_nd_pfn(dev))
430                 nvdimm_namespace_detach_pfn(pmem->ndns);
431         else
432                 pmem_detach_disk(pmem);
433
434         return 0;
435 }
436
437 MODULE_ALIAS("pmem");
438 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO);
439 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM);
440 static struct nd_device_driver nd_pmem_driver = {
441         .probe = nd_pmem_probe,
442         .remove = nd_pmem_remove,
443         .drv = {
444                 .name = "nd_pmem",
445         },
446         .type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM,
447 };
448
449 static int __init pmem_init(void)
450 {
451         int error;
452
453         pmem_major = register_blkdev(0, "pmem");
454         if (pmem_major < 0)
455                 return pmem_major;
456
457         error = nd_driver_register(&nd_pmem_driver);
458         if (error) {
459                 unregister_blkdev(pmem_major, "pmem");
460                 return error;
461         }
462
463         return 0;
464 }
465 module_init(pmem_init);
466
467 static void pmem_exit(void)
468 {
469         driver_unregister(&nd_pmem_driver.drv);
470         unregister_blkdev(pmem_major, "pmem");
471 }
472 module_exit(pmem_exit);
473
474 MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
475 MODULE_LICENSE("GPL v2");