OSDN Git Service

clk: at91: fix masterck name
[uclinux-h8/linux.git] / drivers / acpi / nfit / core.c
1 /*
2  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 #include <linux/list_sort.h>
14 #include <linux/libnvdimm.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/ndctl.h>
18 #include <linux/sysfs.h>
19 #include <linux/delay.h>
20 #include <linux/list.h>
21 #include <linux/acpi.h>
22 #include <linux/sort.h>
23 #include <linux/io.h>
24 #include <linux/nd.h>
25 #include <asm/cacheflush.h>
26 #include <acpi/nfit.h>
27 #include "intel.h"
28 #include "nfit.h"
29 #include "intel.h"
30
31 /*
32  * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
33  * irrelevant.
34  */
35 #include <linux/io-64-nonatomic-hi-lo.h>
36
37 static bool force_enable_dimms;
38 module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR);
39 MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status");
40
41 static bool disable_vendor_specific;
42 module_param(disable_vendor_specific, bool, S_IRUGO);
43 MODULE_PARM_DESC(disable_vendor_specific,
44                 "Limit commands to the publicly specified set");
45
46 static unsigned long override_dsm_mask;
47 module_param(override_dsm_mask, ulong, S_IRUGO);
48 MODULE_PARM_DESC(override_dsm_mask, "Bitmask of allowed NVDIMM DSM functions");
49
50 static int default_dsm_family = -1;
51 module_param(default_dsm_family, int, S_IRUGO);
52 MODULE_PARM_DESC(default_dsm_family,
53                 "Try this DSM type first when identifying NVDIMM family");
54
55 static bool no_init_ars;
56 module_param(no_init_ars, bool, 0644);
57 MODULE_PARM_DESC(no_init_ars, "Skip ARS run at nfit init time");
58
59 LIST_HEAD(acpi_descs);
60 DEFINE_MUTEX(acpi_desc_lock);
61
62 static struct workqueue_struct *nfit_wq;
63
64 struct nfit_table_prev {
65         struct list_head spas;
66         struct list_head memdevs;
67         struct list_head dcrs;
68         struct list_head bdws;
69         struct list_head idts;
70         struct list_head flushes;
71 };
72
73 static guid_t nfit_uuid[NFIT_UUID_MAX];
74
75 const guid_t *to_nfit_uuid(enum nfit_uuids id)
76 {
77         return &nfit_uuid[id];
78 }
79 EXPORT_SYMBOL(to_nfit_uuid);
80
81 static struct acpi_nfit_desc *to_acpi_nfit_desc(
82                 struct nvdimm_bus_descriptor *nd_desc)
83 {
84         return container_of(nd_desc, struct acpi_nfit_desc, nd_desc);
85 }
86
87 static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
88 {
89         struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
90
91         /*
92          * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct
93          * acpi_device.
94          */
95         if (!nd_desc->provider_name
96                         || strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0)
97                 return NULL;
98
99         return to_acpi_device(acpi_desc->dev);
100 }
101
102 static int xlat_bus_status(void *buf, unsigned int cmd, u32 status)
103 {
104         struct nd_cmd_clear_error *clear_err;
105         struct nd_cmd_ars_status *ars_status;
106         u16 flags;
107
108         switch (cmd) {
109         case ND_CMD_ARS_CAP:
110                 if ((status & 0xffff) == NFIT_ARS_CAP_NONE)
111                         return -ENOTTY;
112
113                 /* Command failed */
114                 if (status & 0xffff)
115                         return -EIO;
116
117                 /* No supported scan types for this range */
118                 flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE;
119                 if ((status >> 16 & flags) == 0)
120                         return -ENOTTY;
121                 return 0;
122         case ND_CMD_ARS_START:
123                 /* ARS is in progress */
124                 if ((status & 0xffff) == NFIT_ARS_START_BUSY)
125                         return -EBUSY;
126
127                 /* Command failed */
128                 if (status & 0xffff)
129                         return -EIO;
130                 return 0;
131         case ND_CMD_ARS_STATUS:
132                 ars_status = buf;
133                 /* Command failed */
134                 if (status & 0xffff)
135                         return -EIO;
136                 /* Check extended status (Upper two bytes) */
137                 if (status == NFIT_ARS_STATUS_DONE)
138                         return 0;
139
140                 /* ARS is in progress */
141                 if (status == NFIT_ARS_STATUS_BUSY)
142                         return -EBUSY;
143
144                 /* No ARS performed for the current boot */
145                 if (status == NFIT_ARS_STATUS_NONE)
146                         return -EAGAIN;
147
148                 /*
149                  * ARS interrupted, either we overflowed or some other
150                  * agent wants the scan to stop.  If we didn't overflow
151                  * then just continue with the returned results.
152                  */
153                 if (status == NFIT_ARS_STATUS_INTR) {
154                         if (ars_status->out_length >= 40 && (ars_status->flags
155                                                 & NFIT_ARS_F_OVERFLOW))
156                                 return -ENOSPC;
157                         return 0;
158                 }
159
160                 /* Unknown status */
161                 if (status >> 16)
162                         return -EIO;
163                 return 0;
164         case ND_CMD_CLEAR_ERROR:
165                 clear_err = buf;
166                 if (status & 0xffff)
167                         return -EIO;
168                 if (!clear_err->cleared)
169                         return -EIO;
170                 if (clear_err->length > clear_err->cleared)
171                         return clear_err->cleared;
172                 return 0;
173         default:
174                 break;
175         }
176
177         /* all other non-zero status results in an error */
178         if (status)
179                 return -EIO;
180         return 0;
181 }
182
183 #define ACPI_LABELS_LOCKED 3
184
185 static int xlat_nvdimm_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd,
186                 u32 status)
187 {
188         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
189
190         switch (cmd) {
191         case ND_CMD_GET_CONFIG_SIZE:
192                 /*
193                  * In the _LSI, _LSR, _LSW case the locked status is
194                  * communicated via the read/write commands
195                  */
196                 if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags))
197                         break;
198
199                 if (status >> 16 & ND_CONFIG_LOCKED)
200                         return -EACCES;
201                 break;
202         case ND_CMD_GET_CONFIG_DATA:
203                 if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)
204                                 && status == ACPI_LABELS_LOCKED)
205                         return -EACCES;
206                 break;
207         case ND_CMD_SET_CONFIG_DATA:
208                 if (test_bit(NFIT_MEM_LSW, &nfit_mem->flags)
209                                 && status == ACPI_LABELS_LOCKED)
210                         return -EACCES;
211                 break;
212         default:
213                 break;
214         }
215
216         /* all other non-zero status results in an error */
217         if (status)
218                 return -EIO;
219         return 0;
220 }
221
222 static int xlat_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd,
223                 u32 status)
224 {
225         if (!nvdimm)
226                 return xlat_bus_status(buf, cmd, status);
227         return xlat_nvdimm_status(nvdimm, buf, cmd, status);
228 }
229
230 /* convert _LS{I,R} packages to the buffer object acpi_nfit_ctl expects */
231 static union acpi_object *pkg_to_buf(union acpi_object *pkg)
232 {
233         int i;
234         void *dst;
235         size_t size = 0;
236         union acpi_object *buf = NULL;
237
238         if (pkg->type != ACPI_TYPE_PACKAGE) {
239                 WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
240                                 pkg->type);
241                 goto err;
242         }
243
244         for (i = 0; i < pkg->package.count; i++) {
245                 union acpi_object *obj = &pkg->package.elements[i];
246
247                 if (obj->type == ACPI_TYPE_INTEGER)
248                         size += 4;
249                 else if (obj->type == ACPI_TYPE_BUFFER)
250                         size += obj->buffer.length;
251                 else {
252                         WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
253                                         obj->type);
254                         goto err;
255                 }
256         }
257
258         buf = ACPI_ALLOCATE(sizeof(*buf) + size);
259         if (!buf)
260                 goto err;
261
262         dst = buf + 1;
263         buf->type = ACPI_TYPE_BUFFER;
264         buf->buffer.length = size;
265         buf->buffer.pointer = dst;
266         for (i = 0; i < pkg->package.count; i++) {
267                 union acpi_object *obj = &pkg->package.elements[i];
268
269                 if (obj->type == ACPI_TYPE_INTEGER) {
270                         memcpy(dst, &obj->integer.value, 4);
271                         dst += 4;
272                 } else if (obj->type == ACPI_TYPE_BUFFER) {
273                         memcpy(dst, obj->buffer.pointer, obj->buffer.length);
274                         dst += obj->buffer.length;
275                 }
276         }
277 err:
278         ACPI_FREE(pkg);
279         return buf;
280 }
281
282 static union acpi_object *int_to_buf(union acpi_object *integer)
283 {
284         union acpi_object *buf = ACPI_ALLOCATE(sizeof(*buf) + 4);
285         void *dst = NULL;
286
287         if (!buf)
288                 goto err;
289
290         if (integer->type != ACPI_TYPE_INTEGER) {
291                 WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
292                                 integer->type);
293                 goto err;
294         }
295
296         dst = buf + 1;
297         buf->type = ACPI_TYPE_BUFFER;
298         buf->buffer.length = 4;
299         buf->buffer.pointer = dst;
300         memcpy(dst, &integer->integer.value, 4);
301 err:
302         ACPI_FREE(integer);
303         return buf;
304 }
305
306 static union acpi_object *acpi_label_write(acpi_handle handle, u32 offset,
307                 u32 len, void *data)
308 {
309         acpi_status rc;
310         struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
311         struct acpi_object_list input = {
312                 .count = 3,
313                 .pointer = (union acpi_object []) {
314                         [0] = {
315                                 .integer.type = ACPI_TYPE_INTEGER,
316                                 .integer.value = offset,
317                         },
318                         [1] = {
319                                 .integer.type = ACPI_TYPE_INTEGER,
320                                 .integer.value = len,
321                         },
322                         [2] = {
323                                 .buffer.type = ACPI_TYPE_BUFFER,
324                                 .buffer.pointer = data,
325                                 .buffer.length = len,
326                         },
327                 },
328         };
329
330         rc = acpi_evaluate_object(handle, "_LSW", &input, &buf);
331         if (ACPI_FAILURE(rc))
332                 return NULL;
333         return int_to_buf(buf.pointer);
334 }
335
336 static union acpi_object *acpi_label_read(acpi_handle handle, u32 offset,
337                 u32 len)
338 {
339         acpi_status rc;
340         struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
341         struct acpi_object_list input = {
342                 .count = 2,
343                 .pointer = (union acpi_object []) {
344                         [0] = {
345                                 .integer.type = ACPI_TYPE_INTEGER,
346                                 .integer.value = offset,
347                         },
348                         [1] = {
349                                 .integer.type = ACPI_TYPE_INTEGER,
350                                 .integer.value = len,
351                         },
352                 },
353         };
354
355         rc = acpi_evaluate_object(handle, "_LSR", &input, &buf);
356         if (ACPI_FAILURE(rc))
357                 return NULL;
358         return pkg_to_buf(buf.pointer);
359 }
360
361 static union acpi_object *acpi_label_info(acpi_handle handle)
362 {
363         acpi_status rc;
364         struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
365
366         rc = acpi_evaluate_object(handle, "_LSI", NULL, &buf);
367         if (ACPI_FAILURE(rc))
368                 return NULL;
369         return pkg_to_buf(buf.pointer);
370 }
371
372 static u8 nfit_dsm_revid(unsigned family, unsigned func)
373 {
374         static const u8 revid_table[NVDIMM_FAMILY_MAX+1][32] = {
375                 [NVDIMM_FAMILY_INTEL] = {
376                         [NVDIMM_INTEL_GET_MODES] = 2,
377                         [NVDIMM_INTEL_GET_FWINFO] = 2,
378                         [NVDIMM_INTEL_START_FWUPDATE] = 2,
379                         [NVDIMM_INTEL_SEND_FWUPDATE] = 2,
380                         [NVDIMM_INTEL_FINISH_FWUPDATE] = 2,
381                         [NVDIMM_INTEL_QUERY_FWUPDATE] = 2,
382                         [NVDIMM_INTEL_SET_THRESHOLD] = 2,
383                         [NVDIMM_INTEL_INJECT_ERROR] = 2,
384                         [NVDIMM_INTEL_GET_SECURITY_STATE] = 2,
385                         [NVDIMM_INTEL_SET_PASSPHRASE] = 2,
386                         [NVDIMM_INTEL_DISABLE_PASSPHRASE] = 2,
387                         [NVDIMM_INTEL_UNLOCK_UNIT] = 2,
388                         [NVDIMM_INTEL_FREEZE_LOCK] = 2,
389                         [NVDIMM_INTEL_SECURE_ERASE] = 2,
390                         [NVDIMM_INTEL_OVERWRITE] = 2,
391                         [NVDIMM_INTEL_QUERY_OVERWRITE] = 2,
392                         [NVDIMM_INTEL_SET_MASTER_PASSPHRASE] = 2,
393                         [NVDIMM_INTEL_MASTER_SECURE_ERASE] = 2,
394                 },
395         };
396         u8 id;
397
398         if (family > NVDIMM_FAMILY_MAX)
399                 return 0;
400         if (func > 31)
401                 return 0;
402         id = revid_table[family][func];
403         if (id == 0)
404                 return 1; /* default */
405         return id;
406 }
407
408 static bool payload_dumpable(struct nvdimm *nvdimm, unsigned int func)
409 {
410         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
411
412         if (nfit_mem && nfit_mem->family == NVDIMM_FAMILY_INTEL
413                         && func >= NVDIMM_INTEL_GET_SECURITY_STATE
414                         && func <= NVDIMM_INTEL_MASTER_SECURE_ERASE)
415                 return IS_ENABLED(CONFIG_NFIT_SECURITY_DEBUG);
416         return true;
417 }
418
419 int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
420                 unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
421 {
422         struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
423         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
424         union acpi_object in_obj, in_buf, *out_obj;
425         const struct nd_cmd_desc *desc = NULL;
426         struct device *dev = acpi_desc->dev;
427         struct nd_cmd_pkg *call_pkg = NULL;
428         const char *cmd_name, *dimm_name;
429         unsigned long cmd_mask, dsm_mask;
430         u32 offset, fw_status = 0;
431         acpi_handle handle;
432         unsigned int func;
433         const guid_t *guid;
434         int rc, i;
435
436         if (cmd_rc)
437                 *cmd_rc = -EINVAL;
438         func = cmd;
439         if (cmd == ND_CMD_CALL) {
440                 call_pkg = buf;
441                 func = call_pkg->nd_command;
442
443                 for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
444                         if (call_pkg->nd_reserved2[i])
445                                 return -EINVAL;
446         }
447
448         if (nvdimm) {
449                 struct acpi_device *adev = nfit_mem->adev;
450
451                 if (!adev)
452                         return -ENOTTY;
453                 if (call_pkg && nfit_mem->family != call_pkg->nd_family)
454                         return -ENOTTY;
455
456                 dimm_name = nvdimm_name(nvdimm);
457                 cmd_name = nvdimm_cmd_name(cmd);
458                 cmd_mask = nvdimm_cmd_mask(nvdimm);
459                 dsm_mask = nfit_mem->dsm_mask;
460                 desc = nd_cmd_dimm_desc(cmd);
461                 guid = to_nfit_uuid(nfit_mem->family);
462                 handle = adev->handle;
463         } else {
464                 struct acpi_device *adev = to_acpi_dev(acpi_desc);
465
466                 cmd_name = nvdimm_bus_cmd_name(cmd);
467                 cmd_mask = nd_desc->cmd_mask;
468                 dsm_mask = cmd_mask;
469                 if (cmd == ND_CMD_CALL)
470                         dsm_mask = nd_desc->bus_dsm_mask;
471                 desc = nd_cmd_bus_desc(cmd);
472                 guid = to_nfit_uuid(NFIT_DEV_BUS);
473                 handle = adev->handle;
474                 dimm_name = "bus";
475         }
476
477         if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
478                 return -ENOTTY;
479
480         if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask))
481                 return -ENOTTY;
482
483         in_obj.type = ACPI_TYPE_PACKAGE;
484         in_obj.package.count = 1;
485         in_obj.package.elements = &in_buf;
486         in_buf.type = ACPI_TYPE_BUFFER;
487         in_buf.buffer.pointer = buf;
488         in_buf.buffer.length = 0;
489
490         /* libnvdimm has already validated the input envelope */
491         for (i = 0; i < desc->in_num; i++)
492                 in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc,
493                                 i, buf);
494
495         if (call_pkg) {
496                 /* skip over package wrapper */
497                 in_buf.buffer.pointer = (void *) &call_pkg->nd_payload;
498                 in_buf.buffer.length = call_pkg->nd_size_in;
499         }
500
501         dev_dbg(dev, "%s cmd: %d: func: %d input length: %d\n",
502                 dimm_name, cmd, func, in_buf.buffer.length);
503         if (payload_dumpable(nvdimm, func))
504                 print_hex_dump_debug("nvdimm in  ", DUMP_PREFIX_OFFSET, 4, 4,
505                                 in_buf.buffer.pointer,
506                                 min_t(u32, 256, in_buf.buffer.length), true);
507
508         /* call the BIOS, prefer the named methods over _DSM if available */
509         if (nvdimm && cmd == ND_CMD_GET_CONFIG_SIZE
510                         && test_bit(NFIT_MEM_LSR, &nfit_mem->flags))
511                 out_obj = acpi_label_info(handle);
512         else if (nvdimm && cmd == ND_CMD_GET_CONFIG_DATA
513                         && test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) {
514                 struct nd_cmd_get_config_data_hdr *p = buf;
515
516                 out_obj = acpi_label_read(handle, p->in_offset, p->in_length);
517         } else if (nvdimm && cmd == ND_CMD_SET_CONFIG_DATA
518                         && test_bit(NFIT_MEM_LSW, &nfit_mem->flags)) {
519                 struct nd_cmd_set_config_hdr *p = buf;
520
521                 out_obj = acpi_label_write(handle, p->in_offset, p->in_length,
522                                 p->in_buf);
523         } else {
524                 u8 revid;
525
526                 if (nvdimm)
527                         revid = nfit_dsm_revid(nfit_mem->family, func);
528                 else
529                         revid = 1;
530                 out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj);
531         }
532
533         if (!out_obj) {
534                 dev_dbg(dev, "%s _DSM failed cmd: %s\n", dimm_name, cmd_name);
535                 return -EINVAL;
536         }
537
538         if (call_pkg) {
539                 call_pkg->nd_fw_size = out_obj->buffer.length;
540                 memcpy(call_pkg->nd_payload + call_pkg->nd_size_in,
541                         out_obj->buffer.pointer,
542                         min(call_pkg->nd_fw_size, call_pkg->nd_size_out));
543
544                 ACPI_FREE(out_obj);
545                 /*
546                  * Need to support FW function w/o known size in advance.
547                  * Caller can determine required size based upon nd_fw_size.
548                  * If we return an error (like elsewhere) then caller wouldn't
549                  * be able to rely upon data returned to make calculation.
550                  */
551                 if (cmd_rc)
552                         *cmd_rc = 0;
553                 return 0;
554         }
555
556         if (out_obj->package.type != ACPI_TYPE_BUFFER) {
557                 dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n",
558                                 dimm_name, cmd_name, out_obj->type);
559                 rc = -EINVAL;
560                 goto out;
561         }
562
563         dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name,
564                         cmd_name, out_obj->buffer.length);
565         print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4,
566                         out_obj->buffer.pointer,
567                         min_t(u32, 128, out_obj->buffer.length), true);
568
569         for (i = 0, offset = 0; i < desc->out_num; i++) {
570                 u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf,
571                                 (u32 *) out_obj->buffer.pointer,
572                                 out_obj->buffer.length - offset);
573
574                 if (offset + out_size > out_obj->buffer.length) {
575                         dev_dbg(dev, "%s output object underflow cmd: %s field: %d\n",
576                                         dimm_name, cmd_name, i);
577                         break;
578                 }
579
580                 if (in_buf.buffer.length + offset + out_size > buf_len) {
581                         dev_dbg(dev, "%s output overrun cmd: %s field: %d\n",
582                                         dimm_name, cmd_name, i);
583                         rc = -ENXIO;
584                         goto out;
585                 }
586                 memcpy(buf + in_buf.buffer.length + offset,
587                                 out_obj->buffer.pointer + offset, out_size);
588                 offset += out_size;
589         }
590
591         /*
592          * Set fw_status for all the commands with a known format to be
593          * later interpreted by xlat_status().
594          */
595         if (i >= 1 && ((!nvdimm && cmd >= ND_CMD_ARS_CAP
596                                         && cmd <= ND_CMD_CLEAR_ERROR)
597                                 || (nvdimm && cmd >= ND_CMD_SMART
598                                         && cmd <= ND_CMD_VENDOR)))
599                 fw_status = *(u32 *) out_obj->buffer.pointer;
600
601         if (offset + in_buf.buffer.length < buf_len) {
602                 if (i >= 1) {
603                         /*
604                          * status valid, return the number of bytes left
605                          * unfilled in the output buffer
606                          */
607                         rc = buf_len - offset - in_buf.buffer.length;
608                         if (cmd_rc)
609                                 *cmd_rc = xlat_status(nvdimm, buf, cmd,
610                                                 fw_status);
611                 } else {
612                         dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
613                                         __func__, dimm_name, cmd_name, buf_len,
614                                         offset);
615                         rc = -ENXIO;
616                 }
617         } else {
618                 rc = 0;
619                 if (cmd_rc)
620                         *cmd_rc = xlat_status(nvdimm, buf, cmd, fw_status);
621         }
622
623  out:
624         ACPI_FREE(out_obj);
625
626         return rc;
627 }
628 EXPORT_SYMBOL_GPL(acpi_nfit_ctl);
629
630 static const char *spa_type_name(u16 type)
631 {
632         static const char *to_name[] = {
633                 [NFIT_SPA_VOLATILE] = "volatile",
634                 [NFIT_SPA_PM] = "pmem",
635                 [NFIT_SPA_DCR] = "dimm-control-region",
636                 [NFIT_SPA_BDW] = "block-data-window",
637                 [NFIT_SPA_VDISK] = "volatile-disk",
638                 [NFIT_SPA_VCD] = "volatile-cd",
639                 [NFIT_SPA_PDISK] = "persistent-disk",
640                 [NFIT_SPA_PCD] = "persistent-cd",
641
642         };
643
644         if (type > NFIT_SPA_PCD)
645                 return "unknown";
646
647         return to_name[type];
648 }
649
650 int nfit_spa_type(struct acpi_nfit_system_address *spa)
651 {
652         int i;
653
654         for (i = 0; i < NFIT_UUID_MAX; i++)
655                 if (guid_equal(to_nfit_uuid(i), (guid_t *)&spa->range_guid))
656                         return i;
657         return -1;
658 }
659
660 static bool add_spa(struct acpi_nfit_desc *acpi_desc,
661                 struct nfit_table_prev *prev,
662                 struct acpi_nfit_system_address *spa)
663 {
664         struct device *dev = acpi_desc->dev;
665         struct nfit_spa *nfit_spa;
666
667         if (spa->header.length != sizeof(*spa))
668                 return false;
669
670         list_for_each_entry(nfit_spa, &prev->spas, list) {
671                 if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) {
672                         list_move_tail(&nfit_spa->list, &acpi_desc->spas);
673                         return true;
674                 }
675         }
676
677         nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa) + sizeof(*spa),
678                         GFP_KERNEL);
679         if (!nfit_spa)
680                 return false;
681         INIT_LIST_HEAD(&nfit_spa->list);
682         memcpy(nfit_spa->spa, spa, sizeof(*spa));
683         list_add_tail(&nfit_spa->list, &acpi_desc->spas);
684         dev_dbg(dev, "spa index: %d type: %s\n",
685                         spa->range_index,
686                         spa_type_name(nfit_spa_type(spa)));
687         return true;
688 }
689
690 static bool add_memdev(struct acpi_nfit_desc *acpi_desc,
691                 struct nfit_table_prev *prev,
692                 struct acpi_nfit_memory_map *memdev)
693 {
694         struct device *dev = acpi_desc->dev;
695         struct nfit_memdev *nfit_memdev;
696
697         if (memdev->header.length != sizeof(*memdev))
698                 return false;
699
700         list_for_each_entry(nfit_memdev, &prev->memdevs, list)
701                 if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) {
702                         list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs);
703                         return true;
704                 }
705
706         nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev) + sizeof(*memdev),
707                         GFP_KERNEL);
708         if (!nfit_memdev)
709                 return false;
710         INIT_LIST_HEAD(&nfit_memdev->list);
711         memcpy(nfit_memdev->memdev, memdev, sizeof(*memdev));
712         list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs);
713         dev_dbg(dev, "memdev handle: %#x spa: %d dcr: %d flags: %#x\n",
714                         memdev->device_handle, memdev->range_index,
715                         memdev->region_index, memdev->flags);
716         return true;
717 }
718
719 int nfit_get_smbios_id(u32 device_handle, u16 *flags)
720 {
721         struct acpi_nfit_memory_map *memdev;
722         struct acpi_nfit_desc *acpi_desc;
723         struct nfit_mem *nfit_mem;
724
725         mutex_lock(&acpi_desc_lock);
726         list_for_each_entry(acpi_desc, &acpi_descs, list) {
727                 mutex_lock(&acpi_desc->init_mutex);
728                 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
729                         memdev = __to_nfit_memdev(nfit_mem);
730                         if (memdev->device_handle == device_handle) {
731                                 mutex_unlock(&acpi_desc->init_mutex);
732                                 mutex_unlock(&acpi_desc_lock);
733                                 *flags = memdev->flags;
734                                 return memdev->physical_id;
735                         }
736                 }
737                 mutex_unlock(&acpi_desc->init_mutex);
738         }
739         mutex_unlock(&acpi_desc_lock);
740
741         return -ENODEV;
742 }
743 EXPORT_SYMBOL_GPL(nfit_get_smbios_id);
744
745 /*
746  * An implementation may provide a truncated control region if no block windows
747  * are defined.
748  */
749 static size_t sizeof_dcr(struct acpi_nfit_control_region *dcr)
750 {
751         if (dcr->header.length < offsetof(struct acpi_nfit_control_region,
752                                 window_size))
753                 return 0;
754         if (dcr->windows)
755                 return sizeof(*dcr);
756         return offsetof(struct acpi_nfit_control_region, window_size);
757 }
758
759 static bool add_dcr(struct acpi_nfit_desc *acpi_desc,
760                 struct nfit_table_prev *prev,
761                 struct acpi_nfit_control_region *dcr)
762 {
763         struct device *dev = acpi_desc->dev;
764         struct nfit_dcr *nfit_dcr;
765
766         if (!sizeof_dcr(dcr))
767                 return false;
768
769         list_for_each_entry(nfit_dcr, &prev->dcrs, list)
770                 if (memcmp(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)) == 0) {
771                         list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs);
772                         return true;
773                 }
774
775         nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr) + sizeof(*dcr),
776                         GFP_KERNEL);
777         if (!nfit_dcr)
778                 return false;
779         INIT_LIST_HEAD(&nfit_dcr->list);
780         memcpy(nfit_dcr->dcr, dcr, sizeof_dcr(dcr));
781         list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs);
782         dev_dbg(dev, "dcr index: %d windows: %d\n",
783                         dcr->region_index, dcr->windows);
784         return true;
785 }
786
787 static bool add_bdw(struct acpi_nfit_desc *acpi_desc,
788                 struct nfit_table_prev *prev,
789                 struct acpi_nfit_data_region *bdw)
790 {
791         struct device *dev = acpi_desc->dev;
792         struct nfit_bdw *nfit_bdw;
793
794         if (bdw->header.length != sizeof(*bdw))
795                 return false;
796         list_for_each_entry(nfit_bdw, &prev->bdws, list)
797                 if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) {
798                         list_move_tail(&nfit_bdw->list, &acpi_desc->bdws);
799                         return true;
800                 }
801
802         nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw) + sizeof(*bdw),
803                         GFP_KERNEL);
804         if (!nfit_bdw)
805                 return false;
806         INIT_LIST_HEAD(&nfit_bdw->list);
807         memcpy(nfit_bdw->bdw, bdw, sizeof(*bdw));
808         list_add_tail(&nfit_bdw->list, &acpi_desc->bdws);
809         dev_dbg(dev, "bdw dcr: %d windows: %d\n",
810                         bdw->region_index, bdw->windows);
811         return true;
812 }
813
814 static size_t sizeof_idt(struct acpi_nfit_interleave *idt)
815 {
816         if (idt->header.length < sizeof(*idt))
817                 return 0;
818         return sizeof(*idt) + sizeof(u32) * (idt->line_count - 1);
819 }
820
821 static bool add_idt(struct acpi_nfit_desc *acpi_desc,
822                 struct nfit_table_prev *prev,
823                 struct acpi_nfit_interleave *idt)
824 {
825         struct device *dev = acpi_desc->dev;
826         struct nfit_idt *nfit_idt;
827
828         if (!sizeof_idt(idt))
829                 return false;
830
831         list_for_each_entry(nfit_idt, &prev->idts, list) {
832                 if (sizeof_idt(nfit_idt->idt) != sizeof_idt(idt))
833                         continue;
834
835                 if (memcmp(nfit_idt->idt, idt, sizeof_idt(idt)) == 0) {
836                         list_move_tail(&nfit_idt->list, &acpi_desc->idts);
837                         return true;
838                 }
839         }
840
841         nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt) + sizeof_idt(idt),
842                         GFP_KERNEL);
843         if (!nfit_idt)
844                 return false;
845         INIT_LIST_HEAD(&nfit_idt->list);
846         memcpy(nfit_idt->idt, idt, sizeof_idt(idt));
847         list_add_tail(&nfit_idt->list, &acpi_desc->idts);
848         dev_dbg(dev, "idt index: %d num_lines: %d\n",
849                         idt->interleave_index, idt->line_count);
850         return true;
851 }
852
853 static size_t sizeof_flush(struct acpi_nfit_flush_address *flush)
854 {
855         if (flush->header.length < sizeof(*flush))
856                 return 0;
857         return sizeof(*flush) + sizeof(u64) * (flush->hint_count - 1);
858 }
859
860 static bool add_flush(struct acpi_nfit_desc *acpi_desc,
861                 struct nfit_table_prev *prev,
862                 struct acpi_nfit_flush_address *flush)
863 {
864         struct device *dev = acpi_desc->dev;
865         struct nfit_flush *nfit_flush;
866
867         if (!sizeof_flush(flush))
868                 return false;
869
870         list_for_each_entry(nfit_flush, &prev->flushes, list) {
871                 if (sizeof_flush(nfit_flush->flush) != sizeof_flush(flush))
872                         continue;
873
874                 if (memcmp(nfit_flush->flush, flush,
875                                         sizeof_flush(flush)) == 0) {
876                         list_move_tail(&nfit_flush->list, &acpi_desc->flushes);
877                         return true;
878                 }
879         }
880
881         nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush)
882                         + sizeof_flush(flush), GFP_KERNEL);
883         if (!nfit_flush)
884                 return false;
885         INIT_LIST_HEAD(&nfit_flush->list);
886         memcpy(nfit_flush->flush, flush, sizeof_flush(flush));
887         list_add_tail(&nfit_flush->list, &acpi_desc->flushes);
888         dev_dbg(dev, "nfit_flush handle: %d hint_count: %d\n",
889                         flush->device_handle, flush->hint_count);
890         return true;
891 }
892
893 static bool add_platform_cap(struct acpi_nfit_desc *acpi_desc,
894                 struct acpi_nfit_capabilities *pcap)
895 {
896         struct device *dev = acpi_desc->dev;
897         u32 mask;
898
899         mask = (1 << (pcap->highest_capability + 1)) - 1;
900         acpi_desc->platform_cap = pcap->capabilities & mask;
901         dev_dbg(dev, "cap: %#x\n", acpi_desc->platform_cap);
902         return true;
903 }
904
905 static void *add_table(struct acpi_nfit_desc *acpi_desc,
906                 struct nfit_table_prev *prev, void *table, const void *end)
907 {
908         struct device *dev = acpi_desc->dev;
909         struct acpi_nfit_header *hdr;
910         void *err = ERR_PTR(-ENOMEM);
911
912         if (table >= end)
913                 return NULL;
914
915         hdr = table;
916         if (!hdr->length) {
917                 dev_warn(dev, "found a zero length table '%d' parsing nfit\n",
918                         hdr->type);
919                 return NULL;
920         }
921
922         switch (hdr->type) {
923         case ACPI_NFIT_TYPE_SYSTEM_ADDRESS:
924                 if (!add_spa(acpi_desc, prev, table))
925                         return err;
926                 break;
927         case ACPI_NFIT_TYPE_MEMORY_MAP:
928                 if (!add_memdev(acpi_desc, prev, table))
929                         return err;
930                 break;
931         case ACPI_NFIT_TYPE_CONTROL_REGION:
932                 if (!add_dcr(acpi_desc, prev, table))
933                         return err;
934                 break;
935         case ACPI_NFIT_TYPE_DATA_REGION:
936                 if (!add_bdw(acpi_desc, prev, table))
937                         return err;
938                 break;
939         case ACPI_NFIT_TYPE_INTERLEAVE:
940                 if (!add_idt(acpi_desc, prev, table))
941                         return err;
942                 break;
943         case ACPI_NFIT_TYPE_FLUSH_ADDRESS:
944                 if (!add_flush(acpi_desc, prev, table))
945                         return err;
946                 break;
947         case ACPI_NFIT_TYPE_SMBIOS:
948                 dev_dbg(dev, "smbios\n");
949                 break;
950         case ACPI_NFIT_TYPE_CAPABILITIES:
951                 if (!add_platform_cap(acpi_desc, table))
952                         return err;
953                 break;
954         default:
955                 dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type);
956                 break;
957         }
958
959         return table + hdr->length;
960 }
961
962 static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc,
963                 struct nfit_mem *nfit_mem)
964 {
965         u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
966         u16 dcr = nfit_mem->dcr->region_index;
967         struct nfit_spa *nfit_spa;
968
969         list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
970                 u16 range_index = nfit_spa->spa->range_index;
971                 int type = nfit_spa_type(nfit_spa->spa);
972                 struct nfit_memdev *nfit_memdev;
973
974                 if (type != NFIT_SPA_BDW)
975                         continue;
976
977                 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
978                         if (nfit_memdev->memdev->range_index != range_index)
979                                 continue;
980                         if (nfit_memdev->memdev->device_handle != device_handle)
981                                 continue;
982                         if (nfit_memdev->memdev->region_index != dcr)
983                                 continue;
984
985                         nfit_mem->spa_bdw = nfit_spa->spa;
986                         return;
987                 }
988         }
989
990         dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n",
991                         nfit_mem->spa_dcr->range_index);
992         nfit_mem->bdw = NULL;
993 }
994
995 static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc,
996                 struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
997 {
998         u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
999         struct nfit_memdev *nfit_memdev;
1000         struct nfit_bdw *nfit_bdw;
1001         struct nfit_idt *nfit_idt;
1002         u16 idt_idx, range_index;
1003
1004         list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
1005                 if (nfit_bdw->bdw->region_index != dcr)
1006                         continue;
1007                 nfit_mem->bdw = nfit_bdw->bdw;
1008                 break;
1009         }
1010
1011         if (!nfit_mem->bdw)
1012                 return;
1013
1014         nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
1015
1016         if (!nfit_mem->spa_bdw)
1017                 return;
1018
1019         range_index = nfit_mem->spa_bdw->range_index;
1020         list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1021                 if (nfit_memdev->memdev->range_index != range_index ||
1022                                 nfit_memdev->memdev->region_index != dcr)
1023                         continue;
1024                 nfit_mem->memdev_bdw = nfit_memdev->memdev;
1025                 idt_idx = nfit_memdev->memdev->interleave_index;
1026                 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
1027                         if (nfit_idt->idt->interleave_index != idt_idx)
1028                                 continue;
1029                         nfit_mem->idt_bdw = nfit_idt->idt;
1030                         break;
1031                 }
1032                 break;
1033         }
1034 }
1035
1036 static int __nfit_mem_init(struct acpi_nfit_desc *acpi_desc,
1037                 struct acpi_nfit_system_address *spa)
1038 {
1039         struct nfit_mem *nfit_mem, *found;
1040         struct nfit_memdev *nfit_memdev;
1041         int type = spa ? nfit_spa_type(spa) : 0;
1042
1043         switch (type) {
1044         case NFIT_SPA_DCR:
1045         case NFIT_SPA_PM:
1046                 break;
1047         default:
1048                 if (spa)
1049                         return 0;
1050         }
1051
1052         /*
1053          * This loop runs in two modes, when a dimm is mapped the loop
1054          * adds memdev associations to an existing dimm, or creates a
1055          * dimm. In the unmapped dimm case this loop sweeps for memdev
1056          * instances with an invalid / zero range_index and adds those
1057          * dimms without spa associations.
1058          */
1059         list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1060                 struct nfit_flush *nfit_flush;
1061                 struct nfit_dcr *nfit_dcr;
1062                 u32 device_handle;
1063                 u16 dcr;
1064
1065                 if (spa && nfit_memdev->memdev->range_index != spa->range_index)
1066                         continue;
1067                 if (!spa && nfit_memdev->memdev->range_index)
1068                         continue;
1069                 found = NULL;
1070                 dcr = nfit_memdev->memdev->region_index;
1071                 device_handle = nfit_memdev->memdev->device_handle;
1072                 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
1073                         if (__to_nfit_memdev(nfit_mem)->device_handle
1074                                         == device_handle) {
1075                                 found = nfit_mem;
1076                                 break;
1077                         }
1078
1079                 if (found)
1080                         nfit_mem = found;
1081                 else {
1082                         nfit_mem = devm_kzalloc(acpi_desc->dev,
1083                                         sizeof(*nfit_mem), GFP_KERNEL);
1084                         if (!nfit_mem)
1085                                 return -ENOMEM;
1086                         INIT_LIST_HEAD(&nfit_mem->list);
1087                         nfit_mem->acpi_desc = acpi_desc;
1088                         list_add(&nfit_mem->list, &acpi_desc->dimms);
1089                 }
1090
1091                 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
1092                         if (nfit_dcr->dcr->region_index != dcr)
1093                                 continue;
1094                         /*
1095                          * Record the control region for the dimm.  For
1096                          * the ACPI 6.1 case, where there are separate
1097                          * control regions for the pmem vs blk
1098                          * interfaces, be sure to record the extended
1099                          * blk details.
1100                          */
1101                         if (!nfit_mem->dcr)
1102                                 nfit_mem->dcr = nfit_dcr->dcr;
1103                         else if (nfit_mem->dcr->windows == 0
1104                                         && nfit_dcr->dcr->windows)
1105                                 nfit_mem->dcr = nfit_dcr->dcr;
1106                         break;
1107                 }
1108
1109                 list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) {
1110                         struct acpi_nfit_flush_address *flush;
1111                         u16 i;
1112
1113                         if (nfit_flush->flush->device_handle != device_handle)
1114                                 continue;
1115                         nfit_mem->nfit_flush = nfit_flush;
1116                         flush = nfit_flush->flush;
1117                         nfit_mem->flush_wpq = devm_kcalloc(acpi_desc->dev,
1118                                         flush->hint_count,
1119                                         sizeof(struct resource),
1120                                         GFP_KERNEL);
1121                         if (!nfit_mem->flush_wpq)
1122                                 return -ENOMEM;
1123                         for (i = 0; i < flush->hint_count; i++) {
1124                                 struct resource *res = &nfit_mem->flush_wpq[i];
1125
1126                                 res->start = flush->hint_address[i];
1127                                 res->end = res->start + 8 - 1;
1128                         }
1129                         break;
1130                 }
1131
1132                 if (dcr && !nfit_mem->dcr) {
1133                         dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n",
1134                                         spa->range_index, dcr);
1135                         return -ENODEV;
1136                 }
1137
1138                 if (type == NFIT_SPA_DCR) {
1139                         struct nfit_idt *nfit_idt;
1140                         u16 idt_idx;
1141
1142                         /* multiple dimms may share a SPA when interleaved */
1143                         nfit_mem->spa_dcr = spa;
1144                         nfit_mem->memdev_dcr = nfit_memdev->memdev;
1145                         idt_idx = nfit_memdev->memdev->interleave_index;
1146                         list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
1147                                 if (nfit_idt->idt->interleave_index != idt_idx)
1148                                         continue;
1149                                 nfit_mem->idt_dcr = nfit_idt->idt;
1150                                 break;
1151                         }
1152                         nfit_mem_init_bdw(acpi_desc, nfit_mem, spa);
1153                 } else if (type == NFIT_SPA_PM) {
1154                         /*
1155                          * A single dimm may belong to multiple SPA-PM
1156                          * ranges, record at least one in addition to
1157                          * any SPA-DCR range.
1158                          */
1159                         nfit_mem->memdev_pmem = nfit_memdev->memdev;
1160                 } else
1161                         nfit_mem->memdev_dcr = nfit_memdev->memdev;
1162         }
1163
1164         return 0;
1165 }
1166
1167 static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b)
1168 {
1169         struct nfit_mem *a = container_of(_a, typeof(*a), list);
1170         struct nfit_mem *b = container_of(_b, typeof(*b), list);
1171         u32 handleA, handleB;
1172
1173         handleA = __to_nfit_memdev(a)->device_handle;
1174         handleB = __to_nfit_memdev(b)->device_handle;
1175         if (handleA < handleB)
1176                 return -1;
1177         else if (handleA > handleB)
1178                 return 1;
1179         return 0;
1180 }
1181
1182 static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc)
1183 {
1184         struct nfit_spa *nfit_spa;
1185         int rc;
1186
1187
1188         /*
1189          * For each SPA-DCR or SPA-PMEM address range find its
1190          * corresponding MEMDEV(s).  From each MEMDEV find the
1191          * corresponding DCR.  Then, if we're operating on a SPA-DCR,
1192          * try to find a SPA-BDW and a corresponding BDW that references
1193          * the DCR.  Throw it all into an nfit_mem object.  Note, that
1194          * BDWs are optional.
1195          */
1196         list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
1197                 rc = __nfit_mem_init(acpi_desc, nfit_spa->spa);
1198                 if (rc)
1199                         return rc;
1200         }
1201
1202         /*
1203          * If a DIMM has failed to be mapped into SPA there will be no
1204          * SPA entries above. Find and register all the unmapped DIMMs
1205          * for reporting and recovery purposes.
1206          */
1207         rc = __nfit_mem_init(acpi_desc, NULL);
1208         if (rc)
1209                 return rc;
1210
1211         list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp);
1212
1213         return 0;
1214 }
1215
1216 static ssize_t bus_dsm_mask_show(struct device *dev,
1217                 struct device_attribute *attr, char *buf)
1218 {
1219         struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
1220         struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1221
1222         return sprintf(buf, "%#lx\n", nd_desc->bus_dsm_mask);
1223 }
1224 static struct device_attribute dev_attr_bus_dsm_mask =
1225                 __ATTR(dsm_mask, 0444, bus_dsm_mask_show, NULL);
1226
1227 static ssize_t revision_show(struct device *dev,
1228                 struct device_attribute *attr, char *buf)
1229 {
1230         struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
1231         struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1232         struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1233
1234         return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision);
1235 }
1236 static DEVICE_ATTR_RO(revision);
1237
1238 static ssize_t hw_error_scrub_show(struct device *dev,
1239                 struct device_attribute *attr, char *buf)
1240 {
1241         struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
1242         struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1243         struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1244
1245         return sprintf(buf, "%d\n", acpi_desc->scrub_mode);
1246 }
1247
1248 /*
1249  * The 'hw_error_scrub' attribute can have the following values written to it:
1250  * '0': Switch to the default mode where an exception will only insert
1251  *      the address of the memory error into the poison and badblocks lists.
1252  * '1': Enable a full scrub to happen if an exception for a memory error is
1253  *      received.
1254  */
1255 static ssize_t hw_error_scrub_store(struct device *dev,
1256                 struct device_attribute *attr, const char *buf, size_t size)
1257 {
1258         struct nvdimm_bus_descriptor *nd_desc;
1259         ssize_t rc;
1260         long val;
1261
1262         rc = kstrtol(buf, 0, &val);
1263         if (rc)
1264                 return rc;
1265
1266         device_lock(dev);
1267         nd_desc = dev_get_drvdata(dev);
1268         if (nd_desc) {
1269                 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1270
1271                 switch (val) {
1272                 case HW_ERROR_SCRUB_ON:
1273                         acpi_desc->scrub_mode = HW_ERROR_SCRUB_ON;
1274                         break;
1275                 case HW_ERROR_SCRUB_OFF:
1276                         acpi_desc->scrub_mode = HW_ERROR_SCRUB_OFF;
1277                         break;
1278                 default:
1279                         rc = -EINVAL;
1280                         break;
1281                 }
1282         }
1283         device_unlock(dev);
1284         if (rc)
1285                 return rc;
1286         return size;
1287 }
1288 static DEVICE_ATTR_RW(hw_error_scrub);
1289
1290 /*
1291  * This shows the number of full Address Range Scrubs that have been
1292  * completed since driver load time. Userspace can wait on this using
1293  * select/poll etc. A '+' at the end indicates an ARS is in progress
1294  */
1295 static ssize_t scrub_show(struct device *dev,
1296                 struct device_attribute *attr, char *buf)
1297 {
1298         struct nvdimm_bus_descriptor *nd_desc;
1299         ssize_t rc = -ENXIO;
1300
1301         device_lock(dev);
1302         nd_desc = dev_get_drvdata(dev);
1303         if (nd_desc) {
1304                 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1305
1306                 mutex_lock(&acpi_desc->init_mutex);
1307                 rc = sprintf(buf, "%d%s", acpi_desc->scrub_count,
1308                                 acpi_desc->scrub_busy
1309                                 && !acpi_desc->cancel ? "+\n" : "\n");
1310                 mutex_unlock(&acpi_desc->init_mutex);
1311         }
1312         device_unlock(dev);
1313         return rc;
1314 }
1315
1316 static ssize_t scrub_store(struct device *dev,
1317                 struct device_attribute *attr, const char *buf, size_t size)
1318 {
1319         struct nvdimm_bus_descriptor *nd_desc;
1320         ssize_t rc;
1321         long val;
1322
1323         rc = kstrtol(buf, 0, &val);
1324         if (rc)
1325                 return rc;
1326         if (val != 1)
1327                 return -EINVAL;
1328
1329         device_lock(dev);
1330         nd_desc = dev_get_drvdata(dev);
1331         if (nd_desc) {
1332                 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1333
1334                 rc = acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG);
1335         }
1336         device_unlock(dev);
1337         if (rc)
1338                 return rc;
1339         return size;
1340 }
1341 static DEVICE_ATTR_RW(scrub);
1342
1343 static bool ars_supported(struct nvdimm_bus *nvdimm_bus)
1344 {
1345         struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1346         const unsigned long mask = 1 << ND_CMD_ARS_CAP | 1 << ND_CMD_ARS_START
1347                 | 1 << ND_CMD_ARS_STATUS;
1348
1349         return (nd_desc->cmd_mask & mask) == mask;
1350 }
1351
1352 static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n)
1353 {
1354         struct device *dev = container_of(kobj, struct device, kobj);
1355         struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
1356
1357         if (a == &dev_attr_scrub.attr && !ars_supported(nvdimm_bus))
1358                 return 0;
1359         return a->mode;
1360 }
1361
1362 static struct attribute *acpi_nfit_attributes[] = {
1363         &dev_attr_revision.attr,
1364         &dev_attr_scrub.attr,
1365         &dev_attr_hw_error_scrub.attr,
1366         &dev_attr_bus_dsm_mask.attr,
1367         NULL,
1368 };
1369
1370 static const struct attribute_group acpi_nfit_attribute_group = {
1371         .name = "nfit",
1372         .attrs = acpi_nfit_attributes,
1373         .is_visible = nfit_visible,
1374 };
1375
1376 static const struct attribute_group *acpi_nfit_attribute_groups[] = {
1377         &nvdimm_bus_attribute_group,
1378         &acpi_nfit_attribute_group,
1379         NULL,
1380 };
1381
1382 static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev)
1383 {
1384         struct nvdimm *nvdimm = to_nvdimm(dev);
1385         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1386
1387         return __to_nfit_memdev(nfit_mem);
1388 }
1389
1390 static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev)
1391 {
1392         struct nvdimm *nvdimm = to_nvdimm(dev);
1393         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1394
1395         return nfit_mem->dcr;
1396 }
1397
1398 static ssize_t handle_show(struct device *dev,
1399                 struct device_attribute *attr, char *buf)
1400 {
1401         struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
1402
1403         return sprintf(buf, "%#x\n", memdev->device_handle);
1404 }
1405 static DEVICE_ATTR_RO(handle);
1406
1407 static ssize_t phys_id_show(struct device *dev,
1408                 struct device_attribute *attr, char *buf)
1409 {
1410         struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
1411
1412         return sprintf(buf, "%#x\n", memdev->physical_id);
1413 }
1414 static DEVICE_ATTR_RO(phys_id);
1415
1416 static ssize_t vendor_show(struct device *dev,
1417                 struct device_attribute *attr, char *buf)
1418 {
1419         struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1420
1421         return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->vendor_id));
1422 }
1423 static DEVICE_ATTR_RO(vendor);
1424
1425 static ssize_t rev_id_show(struct device *dev,
1426                 struct device_attribute *attr, char *buf)
1427 {
1428         struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1429
1430         return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->revision_id));
1431 }
1432 static DEVICE_ATTR_RO(rev_id);
1433
1434 static ssize_t device_show(struct device *dev,
1435                 struct device_attribute *attr, char *buf)
1436 {
1437         struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1438
1439         return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->device_id));
1440 }
1441 static DEVICE_ATTR_RO(device);
1442
1443 static ssize_t subsystem_vendor_show(struct device *dev,
1444                 struct device_attribute *attr, char *buf)
1445 {
1446         struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1447
1448         return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_vendor_id));
1449 }
1450 static DEVICE_ATTR_RO(subsystem_vendor);
1451
1452 static ssize_t subsystem_rev_id_show(struct device *dev,
1453                 struct device_attribute *attr, char *buf)
1454 {
1455         struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1456
1457         return sprintf(buf, "0x%04x\n",
1458                         be16_to_cpu(dcr->subsystem_revision_id));
1459 }
1460 static DEVICE_ATTR_RO(subsystem_rev_id);
1461
1462 static ssize_t subsystem_device_show(struct device *dev,
1463                 struct device_attribute *attr, char *buf)
1464 {
1465         struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1466
1467         return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_device_id));
1468 }
1469 static DEVICE_ATTR_RO(subsystem_device);
1470
1471 static int num_nvdimm_formats(struct nvdimm *nvdimm)
1472 {
1473         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1474         int formats = 0;
1475
1476         if (nfit_mem->memdev_pmem)
1477                 formats++;
1478         if (nfit_mem->memdev_bdw)
1479                 formats++;
1480         return formats;
1481 }
1482
1483 static ssize_t format_show(struct device *dev,
1484                 struct device_attribute *attr, char *buf)
1485 {
1486         struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1487
1488         return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code));
1489 }
1490 static DEVICE_ATTR_RO(format);
1491
1492 static ssize_t format1_show(struct device *dev,
1493                 struct device_attribute *attr, char *buf)
1494 {
1495         u32 handle;
1496         ssize_t rc = -ENXIO;
1497         struct nfit_mem *nfit_mem;
1498         struct nfit_memdev *nfit_memdev;
1499         struct acpi_nfit_desc *acpi_desc;
1500         struct nvdimm *nvdimm = to_nvdimm(dev);
1501         struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1502
1503         nfit_mem = nvdimm_provider_data(nvdimm);
1504         acpi_desc = nfit_mem->acpi_desc;
1505         handle = to_nfit_memdev(dev)->device_handle;
1506
1507         /* assumes DIMMs have at most 2 published interface codes */
1508         mutex_lock(&acpi_desc->init_mutex);
1509         list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1510                 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
1511                 struct nfit_dcr *nfit_dcr;
1512
1513                 if (memdev->device_handle != handle)
1514                         continue;
1515
1516                 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
1517                         if (nfit_dcr->dcr->region_index != memdev->region_index)
1518                                 continue;
1519                         if (nfit_dcr->dcr->code == dcr->code)
1520                                 continue;
1521                         rc = sprintf(buf, "0x%04x\n",
1522                                         le16_to_cpu(nfit_dcr->dcr->code));
1523                         break;
1524                 }
1525                 if (rc != ENXIO)
1526                         break;
1527         }
1528         mutex_unlock(&acpi_desc->init_mutex);
1529         return rc;
1530 }
1531 static DEVICE_ATTR_RO(format1);
1532
1533 static ssize_t formats_show(struct device *dev,
1534                 struct device_attribute *attr, char *buf)
1535 {
1536         struct nvdimm *nvdimm = to_nvdimm(dev);
1537
1538         return sprintf(buf, "%d\n", num_nvdimm_formats(nvdimm));
1539 }
1540 static DEVICE_ATTR_RO(formats);
1541
1542 static ssize_t serial_show(struct device *dev,
1543                 struct device_attribute *attr, char *buf)
1544 {
1545         struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1546
1547         return sprintf(buf, "0x%08x\n", be32_to_cpu(dcr->serial_number));
1548 }
1549 static DEVICE_ATTR_RO(serial);
1550
1551 static ssize_t family_show(struct device *dev,
1552                 struct device_attribute *attr, char *buf)
1553 {
1554         struct nvdimm *nvdimm = to_nvdimm(dev);
1555         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1556
1557         if (nfit_mem->family < 0)
1558                 return -ENXIO;
1559         return sprintf(buf, "%d\n", nfit_mem->family);
1560 }
1561 static DEVICE_ATTR_RO(family);
1562
1563 static ssize_t dsm_mask_show(struct device *dev,
1564                 struct device_attribute *attr, char *buf)
1565 {
1566         struct nvdimm *nvdimm = to_nvdimm(dev);
1567         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1568
1569         if (nfit_mem->family < 0)
1570                 return -ENXIO;
1571         return sprintf(buf, "%#lx\n", nfit_mem->dsm_mask);
1572 }
1573 static DEVICE_ATTR_RO(dsm_mask);
1574
1575 static ssize_t flags_show(struct device *dev,
1576                 struct device_attribute *attr, char *buf)
1577 {
1578         struct nvdimm *nvdimm = to_nvdimm(dev);
1579         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1580         u16 flags = __to_nfit_memdev(nfit_mem)->flags;
1581
1582         if (test_bit(NFIT_MEM_DIRTY, &nfit_mem->flags))
1583                 flags |= ACPI_NFIT_MEM_FLUSH_FAILED;
1584
1585         return sprintf(buf, "%s%s%s%s%s%s%s\n",
1586                 flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "",
1587                 flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "",
1588                 flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "",
1589                 flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "",
1590                 flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "",
1591                 flags & ACPI_NFIT_MEM_MAP_FAILED ? "map_fail " : "",
1592                 flags & ACPI_NFIT_MEM_HEALTH_ENABLED ? "smart_notify " : "");
1593 }
1594 static DEVICE_ATTR_RO(flags);
1595
1596 static ssize_t id_show(struct device *dev,
1597                 struct device_attribute *attr, char *buf)
1598 {
1599         struct nvdimm *nvdimm = to_nvdimm(dev);
1600         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1601
1602         return sprintf(buf, "%s\n", nfit_mem->id);
1603 }
1604 static DEVICE_ATTR_RO(id);
1605
1606 static ssize_t dirty_shutdown_show(struct device *dev,
1607                 struct device_attribute *attr, char *buf)
1608 {
1609         struct nvdimm *nvdimm = to_nvdimm(dev);
1610         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1611
1612         return sprintf(buf, "%d\n", nfit_mem->dirty_shutdown);
1613 }
1614 static DEVICE_ATTR_RO(dirty_shutdown);
1615
1616 static struct attribute *acpi_nfit_dimm_attributes[] = {
1617         &dev_attr_handle.attr,
1618         &dev_attr_phys_id.attr,
1619         &dev_attr_vendor.attr,
1620         &dev_attr_device.attr,
1621         &dev_attr_rev_id.attr,
1622         &dev_attr_subsystem_vendor.attr,
1623         &dev_attr_subsystem_device.attr,
1624         &dev_attr_subsystem_rev_id.attr,
1625         &dev_attr_format.attr,
1626         &dev_attr_formats.attr,
1627         &dev_attr_format1.attr,
1628         &dev_attr_serial.attr,
1629         &dev_attr_flags.attr,
1630         &dev_attr_id.attr,
1631         &dev_attr_family.attr,
1632         &dev_attr_dsm_mask.attr,
1633         &dev_attr_dirty_shutdown.attr,
1634         NULL,
1635 };
1636
1637 static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj,
1638                 struct attribute *a, int n)
1639 {
1640         struct device *dev = container_of(kobj, struct device, kobj);
1641         struct nvdimm *nvdimm = to_nvdimm(dev);
1642         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1643
1644         if (!to_nfit_dcr(dev)) {
1645                 /* Without a dcr only the memdev attributes can be surfaced */
1646                 if (a == &dev_attr_handle.attr || a == &dev_attr_phys_id.attr
1647                                 || a == &dev_attr_flags.attr
1648                                 || a == &dev_attr_family.attr
1649                                 || a == &dev_attr_dsm_mask.attr)
1650                         return a->mode;
1651                 return 0;
1652         }
1653
1654         if (a == &dev_attr_format1.attr && num_nvdimm_formats(nvdimm) <= 1)
1655                 return 0;
1656
1657         if (!test_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags)
1658                         && a == &dev_attr_dirty_shutdown.attr)
1659                 return 0;
1660
1661         return a->mode;
1662 }
1663
1664 static const struct attribute_group acpi_nfit_dimm_attribute_group = {
1665         .name = "nfit",
1666         .attrs = acpi_nfit_dimm_attributes,
1667         .is_visible = acpi_nfit_dimm_attr_visible,
1668 };
1669
1670 static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = {
1671         &nvdimm_attribute_group,
1672         &nd_device_attribute_group,
1673         &acpi_nfit_dimm_attribute_group,
1674         NULL,
1675 };
1676
1677 static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc,
1678                 u32 device_handle)
1679 {
1680         struct nfit_mem *nfit_mem;
1681
1682         list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
1683                 if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle)
1684                         return nfit_mem->nvdimm;
1685
1686         return NULL;
1687 }
1688
1689 void __acpi_nvdimm_notify(struct device *dev, u32 event)
1690 {
1691         struct nfit_mem *nfit_mem;
1692         struct acpi_nfit_desc *acpi_desc;
1693
1694         dev_dbg(dev->parent, "%s: event: %d\n", dev_name(dev),
1695                         event);
1696
1697         if (event != NFIT_NOTIFY_DIMM_HEALTH) {
1698                 dev_dbg(dev->parent, "%s: unknown event: %d\n", dev_name(dev),
1699                                 event);
1700                 return;
1701         }
1702
1703         acpi_desc = dev_get_drvdata(dev->parent);
1704         if (!acpi_desc)
1705                 return;
1706
1707         /*
1708          * If we successfully retrieved acpi_desc, then we know nfit_mem data
1709          * is still valid.
1710          */
1711         nfit_mem = dev_get_drvdata(dev);
1712         if (nfit_mem && nfit_mem->flags_attr)
1713                 sysfs_notify_dirent(nfit_mem->flags_attr);
1714 }
1715 EXPORT_SYMBOL_GPL(__acpi_nvdimm_notify);
1716
1717 static void acpi_nvdimm_notify(acpi_handle handle, u32 event, void *data)
1718 {
1719         struct acpi_device *adev = data;
1720         struct device *dev = &adev->dev;
1721
1722         device_lock(dev->parent);
1723         __acpi_nvdimm_notify(dev, event);
1724         device_unlock(dev->parent);
1725 }
1726
1727 static bool acpi_nvdimm_has_method(struct acpi_device *adev, char *method)
1728 {
1729         acpi_handle handle;
1730         acpi_status status;
1731
1732         status = acpi_get_handle(adev->handle, method, &handle);
1733
1734         if (ACPI_SUCCESS(status))
1735                 return true;
1736         return false;
1737 }
1738
1739 __weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem)
1740 {
1741         struct nd_intel_smart smart = { 0 };
1742         union acpi_object in_buf = {
1743                 .type = ACPI_TYPE_BUFFER,
1744                 .buffer.pointer = (char *) &smart,
1745                 .buffer.length = sizeof(smart),
1746         };
1747         union acpi_object in_obj = {
1748                 .type = ACPI_TYPE_PACKAGE,
1749                 .package.count = 1,
1750                 .package.elements = &in_buf,
1751         };
1752         const u8 func = ND_INTEL_SMART;
1753         const guid_t *guid = to_nfit_uuid(nfit_mem->family);
1754         u8 revid = nfit_dsm_revid(nfit_mem->family, func);
1755         struct acpi_device *adev = nfit_mem->adev;
1756         acpi_handle handle = adev->handle;
1757         union acpi_object *out_obj;
1758
1759         if ((nfit_mem->dsm_mask & (1 << func)) == 0)
1760                 return;
1761
1762         out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj);
1763         if (!out_obj)
1764                 return;
1765
1766         if (smart.flags & ND_INTEL_SMART_SHUTDOWN_VALID) {
1767                 if (smart.shutdown_state)
1768                         set_bit(NFIT_MEM_DIRTY, &nfit_mem->flags);
1769         }
1770
1771         if (smart.flags & ND_INTEL_SMART_SHUTDOWN_COUNT_VALID) {
1772                 set_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags);
1773                 nfit_mem->dirty_shutdown = smart.shutdown_count;
1774         }
1775         ACPI_FREE(out_obj);
1776 }
1777
1778 static void populate_shutdown_status(struct nfit_mem *nfit_mem)
1779 {
1780         /*
1781          * For DIMMs that provide a dynamic facility to retrieve a
1782          * dirty-shutdown status and/or a dirty-shutdown count, cache
1783          * these values in nfit_mem.
1784          */
1785         if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
1786                 nfit_intel_shutdown_status(nfit_mem);
1787 }
1788
1789 static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
1790                 struct nfit_mem *nfit_mem, u32 device_handle)
1791 {
1792         struct acpi_device *adev, *adev_dimm;
1793         struct device *dev = acpi_desc->dev;
1794         unsigned long dsm_mask, label_mask;
1795         const guid_t *guid;
1796         int i;
1797         int family = -1;
1798         struct acpi_nfit_control_region *dcr = nfit_mem->dcr;
1799
1800         /* nfit test assumes 1:1 relationship between commands and dsms */
1801         nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en;
1802         nfit_mem->family = NVDIMM_FAMILY_INTEL;
1803
1804         if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID)
1805                 sprintf(nfit_mem->id, "%04x-%02x-%04x-%08x",
1806                                 be16_to_cpu(dcr->vendor_id),
1807                                 dcr->manufacturing_location,
1808                                 be16_to_cpu(dcr->manufacturing_date),
1809                                 be32_to_cpu(dcr->serial_number));
1810         else
1811                 sprintf(nfit_mem->id, "%04x-%08x",
1812                                 be16_to_cpu(dcr->vendor_id),
1813                                 be32_to_cpu(dcr->serial_number));
1814
1815         adev = to_acpi_dev(acpi_desc);
1816         if (!adev) {
1817                 /* unit test case */
1818                 populate_shutdown_status(nfit_mem);
1819                 return 0;
1820         }
1821
1822         adev_dimm = acpi_find_child_device(adev, device_handle, false);
1823         nfit_mem->adev = adev_dimm;
1824         if (!adev_dimm) {
1825                 dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n",
1826                                 device_handle);
1827                 return force_enable_dimms ? 0 : -ENODEV;
1828         }
1829
1830         if (ACPI_FAILURE(acpi_install_notify_handler(adev_dimm->handle,
1831                 ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify, adev_dimm))) {
1832                 dev_err(dev, "%s: notification registration failed\n",
1833                                 dev_name(&adev_dimm->dev));
1834                 return -ENXIO;
1835         }
1836         /*
1837          * Record nfit_mem for the notification path to track back to
1838          * the nfit sysfs attributes for this dimm device object.
1839          */
1840         dev_set_drvdata(&adev_dimm->dev, nfit_mem);
1841
1842         /*
1843          * Until standardization materializes we need to consider 4
1844          * different command sets.  Note, that checking for function0 (bit0)
1845          * tells us if any commands are reachable through this GUID.
1846          */
1847         for (i = 0; i <= NVDIMM_FAMILY_MAX; i++)
1848                 if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1))
1849                         if (family < 0 || i == default_dsm_family)
1850                                 family = i;
1851
1852         /* limit the supported commands to those that are publicly documented */
1853         nfit_mem->family = family;
1854         if (override_dsm_mask && !disable_vendor_specific)
1855                 dsm_mask = override_dsm_mask;
1856         else if (nfit_mem->family == NVDIMM_FAMILY_INTEL) {
1857                 dsm_mask = NVDIMM_INTEL_CMDMASK;
1858                 if (disable_vendor_specific)
1859                         dsm_mask &= ~(1 << ND_CMD_VENDOR);
1860         } else if (nfit_mem->family == NVDIMM_FAMILY_HPE1) {
1861                 dsm_mask = 0x1c3c76;
1862         } else if (nfit_mem->family == NVDIMM_FAMILY_HPE2) {
1863                 dsm_mask = 0x1fe;
1864                 if (disable_vendor_specific)
1865                         dsm_mask &= ~(1 << 8);
1866         } else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) {
1867                 dsm_mask = 0xffffffff;
1868         } else {
1869                 dev_dbg(dev, "unknown dimm command family\n");
1870                 nfit_mem->family = -1;
1871                 /* DSMs are optional, continue loading the driver... */
1872                 return 0;
1873         }
1874
1875         guid = to_nfit_uuid(nfit_mem->family);
1876         for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
1877                 if (acpi_check_dsm(adev_dimm->handle, guid,
1878                                         nfit_dsm_revid(nfit_mem->family, i),
1879                                         1ULL << i))
1880                         set_bit(i, &nfit_mem->dsm_mask);
1881
1882         /*
1883          * Prefer the NVDIMM_FAMILY_INTEL label read commands if present
1884          * due to their better semantics handling locked capacity.
1885          */
1886         label_mask = 1 << ND_CMD_GET_CONFIG_SIZE | 1 << ND_CMD_GET_CONFIG_DATA
1887                 | 1 << ND_CMD_SET_CONFIG_DATA;
1888         if (family == NVDIMM_FAMILY_INTEL
1889                         && (dsm_mask & label_mask) == label_mask)
1890                 return 0;
1891
1892         if (acpi_nvdimm_has_method(adev_dimm, "_LSI")
1893                         && acpi_nvdimm_has_method(adev_dimm, "_LSR")) {
1894                 dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev));
1895                 set_bit(NFIT_MEM_LSR, &nfit_mem->flags);
1896         }
1897
1898         if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)
1899                         && acpi_nvdimm_has_method(adev_dimm, "_LSW")) {
1900                 dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev));
1901                 set_bit(NFIT_MEM_LSW, &nfit_mem->flags);
1902         }
1903
1904         populate_shutdown_status(nfit_mem);
1905
1906         return 0;
1907 }
1908
1909 static void shutdown_dimm_notify(void *data)
1910 {
1911         struct acpi_nfit_desc *acpi_desc = data;
1912         struct nfit_mem *nfit_mem;
1913
1914         mutex_lock(&acpi_desc->init_mutex);
1915         /*
1916          * Clear out the nfit_mem->flags_attr and shut down dimm event
1917          * notifications.
1918          */
1919         list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
1920                 struct acpi_device *adev_dimm = nfit_mem->adev;
1921
1922                 if (nfit_mem->flags_attr) {
1923                         sysfs_put(nfit_mem->flags_attr);
1924                         nfit_mem->flags_attr = NULL;
1925                 }
1926                 if (adev_dimm) {
1927                         acpi_remove_notify_handler(adev_dimm->handle,
1928                                         ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify);
1929                         dev_set_drvdata(&adev_dimm->dev, NULL);
1930                 }
1931         }
1932         mutex_unlock(&acpi_desc->init_mutex);
1933 }
1934
1935 static const struct nvdimm_security_ops *acpi_nfit_get_security_ops(int family)
1936 {
1937         switch (family) {
1938         case NVDIMM_FAMILY_INTEL:
1939                 return intel_security_ops;
1940         default:
1941                 return NULL;
1942         }
1943 }
1944
1945 static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
1946 {
1947         struct nfit_mem *nfit_mem;
1948         int dimm_count = 0, rc;
1949         struct nvdimm *nvdimm;
1950
1951         list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
1952                 struct acpi_nfit_flush_address *flush;
1953                 unsigned long flags = 0, cmd_mask;
1954                 struct nfit_memdev *nfit_memdev;
1955                 u32 device_handle;
1956                 u16 mem_flags;
1957
1958                 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
1959                 nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle);
1960                 if (nvdimm) {
1961                         dimm_count++;
1962                         continue;
1963                 }
1964
1965                 if (nfit_mem->bdw && nfit_mem->memdev_pmem)
1966                         set_bit(NDD_ALIASING, &flags);
1967
1968                 /* collate flags across all memdevs for this dimm */
1969                 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1970                         struct acpi_nfit_memory_map *dimm_memdev;
1971
1972                         dimm_memdev = __to_nfit_memdev(nfit_mem);
1973                         if (dimm_memdev->device_handle
1974                                         != nfit_memdev->memdev->device_handle)
1975                                 continue;
1976                         dimm_memdev->flags |= nfit_memdev->memdev->flags;
1977                 }
1978
1979                 mem_flags = __to_nfit_memdev(nfit_mem)->flags;
1980                 if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED)
1981                         set_bit(NDD_UNARMED, &flags);
1982
1983                 rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle);
1984                 if (rc)
1985                         continue;
1986
1987                 /*
1988                  * TODO: provide translation for non-NVDIMM_FAMILY_INTEL
1989                  * devices (i.e. from nd_cmd to acpi_dsm) to standardize the
1990                  * userspace interface.
1991                  */
1992                 cmd_mask = 1UL << ND_CMD_CALL;
1993                 if (nfit_mem->family == NVDIMM_FAMILY_INTEL) {
1994                         /*
1995                          * These commands have a 1:1 correspondence
1996                          * between DSM payload and libnvdimm ioctl
1997                          * payload format.
1998                          */
1999                         cmd_mask |= nfit_mem->dsm_mask & NVDIMM_STANDARD_CMDMASK;
2000                 }
2001
2002                 if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) {
2003                         set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
2004                         set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
2005                 }
2006                 if (test_bit(NFIT_MEM_LSW, &nfit_mem->flags))
2007                         set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask);
2008
2009                 flush = nfit_mem->nfit_flush ? nfit_mem->nfit_flush->flush
2010                         : NULL;
2011                 nvdimm = __nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem,
2012                                 acpi_nfit_dimm_attribute_groups,
2013                                 flags, cmd_mask, flush ? flush->hint_count : 0,
2014                                 nfit_mem->flush_wpq, &nfit_mem->id[0],
2015                                 acpi_nfit_get_security_ops(nfit_mem->family));
2016                 if (!nvdimm)
2017                         return -ENOMEM;
2018
2019                 nfit_mem->nvdimm = nvdimm;
2020                 dimm_count++;
2021
2022                 if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0)
2023                         continue;
2024
2025                 dev_info(acpi_desc->dev, "%s flags:%s%s%s%s%s\n",
2026                                 nvdimm_name(nvdimm),
2027                   mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "",
2028                   mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"",
2029                   mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "",
2030                   mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : "",
2031                   mem_flags & ACPI_NFIT_MEM_MAP_FAILED ? " map_fail" : "");
2032
2033         }
2034
2035         rc = nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count);
2036         if (rc)
2037                 return rc;
2038
2039         /*
2040          * Now that dimms are successfully registered, and async registration
2041          * is flushed, attempt to enable event notification.
2042          */
2043         list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
2044                 struct kernfs_node *nfit_kernfs;
2045
2046                 nvdimm = nfit_mem->nvdimm;
2047                 if (!nvdimm)
2048                         continue;
2049
2050                 rc = nvdimm_security_setup_events(nvdimm);
2051                 if (rc < 0)
2052                         dev_warn(acpi_desc->dev,
2053                                 "security event setup failed: %d\n", rc);
2054
2055                 nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit");
2056                 if (nfit_kernfs)
2057                         nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs,
2058                                         "flags");
2059                 sysfs_put(nfit_kernfs);
2060                 if (!nfit_mem->flags_attr)
2061                         dev_warn(acpi_desc->dev, "%s: notifications disabled\n",
2062                                         nvdimm_name(nvdimm));
2063         }
2064
2065         return devm_add_action_or_reset(acpi_desc->dev, shutdown_dimm_notify,
2066                         acpi_desc);
2067 }
2068
2069 /*
2070  * These constants are private because there are no kernel consumers of
2071  * these commands.
2072  */
2073 enum nfit_aux_cmds {
2074         NFIT_CMD_TRANSLATE_SPA = 5,
2075         NFIT_CMD_ARS_INJECT_SET = 7,
2076         NFIT_CMD_ARS_INJECT_CLEAR = 8,
2077         NFIT_CMD_ARS_INJECT_GET = 9,
2078 };
2079
2080 static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
2081 {
2082         struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2083         const guid_t *guid = to_nfit_uuid(NFIT_DEV_BUS);
2084         struct acpi_device *adev;
2085         unsigned long dsm_mask;
2086         int i;
2087
2088         nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en;
2089         nd_desc->bus_dsm_mask = acpi_desc->bus_nfit_cmd_force_en;
2090         adev = to_acpi_dev(acpi_desc);
2091         if (!adev)
2092                 return;
2093
2094         for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++)
2095                 if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
2096                         set_bit(i, &nd_desc->cmd_mask);
2097         set_bit(ND_CMD_CALL, &nd_desc->cmd_mask);
2098
2099         dsm_mask =
2100                 (1 << ND_CMD_ARS_CAP) |
2101                 (1 << ND_CMD_ARS_START) |
2102                 (1 << ND_CMD_ARS_STATUS) |
2103                 (1 << ND_CMD_CLEAR_ERROR) |
2104                 (1 << NFIT_CMD_TRANSLATE_SPA) |
2105                 (1 << NFIT_CMD_ARS_INJECT_SET) |
2106                 (1 << NFIT_CMD_ARS_INJECT_CLEAR) |
2107                 (1 << NFIT_CMD_ARS_INJECT_GET);
2108         for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
2109                 if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
2110                         set_bit(i, &nd_desc->bus_dsm_mask);
2111 }
2112
2113 static ssize_t range_index_show(struct device *dev,
2114                 struct device_attribute *attr, char *buf)
2115 {
2116         struct nd_region *nd_region = to_nd_region(dev);
2117         struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region);
2118
2119         return sprintf(buf, "%d\n", nfit_spa->spa->range_index);
2120 }
2121 static DEVICE_ATTR_RO(range_index);
2122
2123 static struct attribute *acpi_nfit_region_attributes[] = {
2124         &dev_attr_range_index.attr,
2125         NULL,
2126 };
2127
2128 static const struct attribute_group acpi_nfit_region_attribute_group = {
2129         .name = "nfit",
2130         .attrs = acpi_nfit_region_attributes,
2131 };
2132
2133 static const struct attribute_group *acpi_nfit_region_attribute_groups[] = {
2134         &nd_region_attribute_group,
2135         &nd_mapping_attribute_group,
2136         &nd_device_attribute_group,
2137         &nd_numa_attribute_group,
2138         &acpi_nfit_region_attribute_group,
2139         NULL,
2140 };
2141
2142 /* enough info to uniquely specify an interleave set */
2143 struct nfit_set_info {
2144         struct nfit_set_info_map {
2145                 u64 region_offset;
2146                 u32 serial_number;
2147                 u32 pad;
2148         } mapping[0];
2149 };
2150
2151 struct nfit_set_info2 {
2152         struct nfit_set_info_map2 {
2153                 u64 region_offset;
2154                 u32 serial_number;
2155                 u16 vendor_id;
2156                 u16 manufacturing_date;
2157                 u8  manufacturing_location;
2158                 u8  reserved[31];
2159         } mapping[0];
2160 };
2161
2162 static size_t sizeof_nfit_set_info(int num_mappings)
2163 {
2164         return sizeof(struct nfit_set_info)
2165                 + num_mappings * sizeof(struct nfit_set_info_map);
2166 }
2167
2168 static size_t sizeof_nfit_set_info2(int num_mappings)
2169 {
2170         return sizeof(struct nfit_set_info2)
2171                 + num_mappings * sizeof(struct nfit_set_info_map2);
2172 }
2173
2174 static int cmp_map_compat(const void *m0, const void *m1)
2175 {
2176         const struct nfit_set_info_map *map0 = m0;
2177         const struct nfit_set_info_map *map1 = m1;
2178
2179         return memcmp(&map0->region_offset, &map1->region_offset,
2180                         sizeof(u64));
2181 }
2182
2183 static int cmp_map(const void *m0, const void *m1)
2184 {
2185         const struct nfit_set_info_map *map0 = m0;
2186         const struct nfit_set_info_map *map1 = m1;
2187
2188         if (map0->region_offset < map1->region_offset)
2189                 return -1;
2190         else if (map0->region_offset > map1->region_offset)
2191                 return 1;
2192         return 0;
2193 }
2194
2195 static int cmp_map2(const void *m0, const void *m1)
2196 {
2197         const struct nfit_set_info_map2 *map0 = m0;
2198         const struct nfit_set_info_map2 *map1 = m1;
2199
2200         if (map0->region_offset < map1->region_offset)
2201                 return -1;
2202         else if (map0->region_offset > map1->region_offset)
2203                 return 1;
2204         return 0;
2205 }
2206
2207 /* Retrieve the nth entry referencing this spa */
2208 static struct acpi_nfit_memory_map *memdev_from_spa(
2209                 struct acpi_nfit_desc *acpi_desc, u16 range_index, int n)
2210 {
2211         struct nfit_memdev *nfit_memdev;
2212
2213         list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list)
2214                 if (nfit_memdev->memdev->range_index == range_index)
2215                         if (n-- == 0)
2216                                 return nfit_memdev->memdev;
2217         return NULL;
2218 }
2219
2220 static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
2221                 struct nd_region_desc *ndr_desc,
2222                 struct acpi_nfit_system_address *spa)
2223 {
2224         struct device *dev = acpi_desc->dev;
2225         struct nd_interleave_set *nd_set;
2226         u16 nr = ndr_desc->num_mappings;
2227         struct nfit_set_info2 *info2;
2228         struct nfit_set_info *info;
2229         int i;
2230
2231         nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
2232         if (!nd_set)
2233                 return -ENOMEM;
2234         ndr_desc->nd_set = nd_set;
2235         guid_copy(&nd_set->type_guid, (guid_t *) spa->range_guid);
2236
2237         info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL);
2238         if (!info)
2239                 return -ENOMEM;
2240
2241         info2 = devm_kzalloc(dev, sizeof_nfit_set_info2(nr), GFP_KERNEL);
2242         if (!info2)
2243                 return -ENOMEM;
2244
2245         for (i = 0; i < nr; i++) {
2246                 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
2247                 struct nfit_set_info_map *map = &info->mapping[i];
2248                 struct nfit_set_info_map2 *map2 = &info2->mapping[i];
2249                 struct nvdimm *nvdimm = mapping->nvdimm;
2250                 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
2251                 struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc,
2252                                 spa->range_index, i);
2253                 struct acpi_nfit_control_region *dcr = nfit_mem->dcr;
2254
2255                 if (!memdev || !nfit_mem->dcr) {
2256                         dev_err(dev, "%s: failed to find DCR\n", __func__);
2257                         return -ENODEV;
2258                 }
2259
2260                 map->region_offset = memdev->region_offset;
2261                 map->serial_number = dcr->serial_number;
2262
2263                 map2->region_offset = memdev->region_offset;
2264                 map2->serial_number = dcr->serial_number;
2265                 map2->vendor_id = dcr->vendor_id;
2266                 map2->manufacturing_date = dcr->manufacturing_date;
2267                 map2->manufacturing_location = dcr->manufacturing_location;
2268         }
2269
2270         /* v1.1 namespaces */
2271         sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
2272                         cmp_map, NULL);
2273         nd_set->cookie1 = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
2274
2275         /* v1.2 namespaces */
2276         sort(&info2->mapping[0], nr, sizeof(struct nfit_set_info_map2),
2277                         cmp_map2, NULL);
2278         nd_set->cookie2 = nd_fletcher64(info2, sizeof_nfit_set_info2(nr), 0);
2279
2280         /* support v1.1 namespaces created with the wrong sort order */
2281         sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
2282                         cmp_map_compat, NULL);
2283         nd_set->altcookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
2284
2285         /* record the result of the sort for the mapping position */
2286         for (i = 0; i < nr; i++) {
2287                 struct nfit_set_info_map2 *map2 = &info2->mapping[i];
2288                 int j;
2289
2290                 for (j = 0; j < nr; j++) {
2291                         struct nd_mapping_desc *mapping = &ndr_desc->mapping[j];
2292                         struct nvdimm *nvdimm = mapping->nvdimm;
2293                         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
2294                         struct acpi_nfit_control_region *dcr = nfit_mem->dcr;
2295
2296                         if (map2->serial_number == dcr->serial_number &&
2297                             map2->vendor_id == dcr->vendor_id &&
2298                             map2->manufacturing_date == dcr->manufacturing_date &&
2299                             map2->manufacturing_location
2300                                     == dcr->manufacturing_location) {
2301                                 mapping->position = i;
2302                                 break;
2303                         }
2304                 }
2305         }
2306
2307         ndr_desc->nd_set = nd_set;
2308         devm_kfree(dev, info);
2309         devm_kfree(dev, info2);
2310
2311         return 0;
2312 }
2313
2314 static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio)
2315 {
2316         struct acpi_nfit_interleave *idt = mmio->idt;
2317         u32 sub_line_offset, line_index, line_offset;
2318         u64 line_no, table_skip_count, table_offset;
2319
2320         line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset);
2321         table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index);
2322         line_offset = idt->line_offset[line_index]
2323                 * mmio->line_size;
2324         table_offset = table_skip_count * mmio->table_size;
2325
2326         return mmio->base_offset + line_offset + table_offset + sub_line_offset;
2327 }
2328
2329 static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
2330 {
2331         struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
2332         u64 offset = nfit_blk->stat_offset + mmio->size * bw;
2333         const u32 STATUS_MASK = 0x80000037;
2334
2335         if (mmio->num_lines)
2336                 offset = to_interleave_offset(offset, mmio);
2337
2338         return readl(mmio->addr.base + offset) & STATUS_MASK;
2339 }
2340
2341 static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
2342                 resource_size_t dpa, unsigned int len, unsigned int write)
2343 {
2344         u64 cmd, offset;
2345         struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
2346
2347         enum {
2348                 BCW_OFFSET_MASK = (1ULL << 48)-1,
2349                 BCW_LEN_SHIFT = 48,
2350                 BCW_LEN_MASK = (1ULL << 8) - 1,
2351                 BCW_CMD_SHIFT = 56,
2352         };
2353
2354         cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK;
2355         len = len >> L1_CACHE_SHIFT;
2356         cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT;
2357         cmd |= ((u64) write) << BCW_CMD_SHIFT;
2358
2359         offset = nfit_blk->cmd_offset + mmio->size * bw;
2360         if (mmio->num_lines)
2361                 offset = to_interleave_offset(offset, mmio);
2362
2363         writeq(cmd, mmio->addr.base + offset);
2364         nvdimm_flush(nfit_blk->nd_region);
2365
2366         if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH)
2367                 readq(mmio->addr.base + offset);
2368 }
2369
2370 static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
2371                 resource_size_t dpa, void *iobuf, size_t len, int rw,
2372                 unsigned int lane)
2373 {
2374         struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
2375         unsigned int copied = 0;
2376         u64 base_offset;
2377         int rc;
2378
2379         base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES
2380                 + lane * mmio->size;
2381         write_blk_ctl(nfit_blk, lane, dpa, len, rw);
2382         while (len) {
2383                 unsigned int c;
2384                 u64 offset;
2385
2386                 if (mmio->num_lines) {
2387                         u32 line_offset;
2388
2389                         offset = to_interleave_offset(base_offset + copied,
2390                                         mmio);
2391                         div_u64_rem(offset, mmio->line_size, &line_offset);
2392                         c = min_t(size_t, len, mmio->line_size - line_offset);
2393                 } else {
2394                         offset = base_offset + nfit_blk->bdw_offset;
2395                         c = len;
2396                 }
2397
2398                 if (rw)
2399                         memcpy_flushcache(mmio->addr.aperture + offset, iobuf + copied, c);
2400                 else {
2401                         if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH)
2402                                 arch_invalidate_pmem((void __force *)
2403                                         mmio->addr.aperture + offset, c);
2404
2405                         memcpy(iobuf + copied, mmio->addr.aperture + offset, c);
2406                 }
2407
2408                 copied += c;
2409                 len -= c;
2410         }
2411
2412         if (rw)
2413                 nvdimm_flush(nfit_blk->nd_region);
2414
2415         rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0;
2416         return rc;
2417 }
2418
2419 static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr,
2420                 resource_size_t dpa, void *iobuf, u64 len, int rw)
2421 {
2422         struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr);
2423         struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
2424         struct nd_region *nd_region = nfit_blk->nd_region;
2425         unsigned int lane, copied = 0;
2426         int rc = 0;
2427
2428         lane = nd_region_acquire_lane(nd_region);
2429         while (len) {
2430                 u64 c = min(len, mmio->size);
2431
2432                 rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied,
2433                                 iobuf + copied, c, rw, lane);
2434                 if (rc)
2435                         break;
2436
2437                 copied += c;
2438                 len -= c;
2439         }
2440         nd_region_release_lane(nd_region, lane);
2441
2442         return rc;
2443 }
2444
2445 static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio,
2446                 struct acpi_nfit_interleave *idt, u16 interleave_ways)
2447 {
2448         if (idt) {
2449                 mmio->num_lines = idt->line_count;
2450                 mmio->line_size = idt->line_size;
2451                 if (interleave_ways == 0)
2452                         return -ENXIO;
2453                 mmio->table_size = mmio->num_lines * interleave_ways
2454                         * mmio->line_size;
2455         }
2456
2457         return 0;
2458 }
2459
2460 static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc,
2461                 struct nvdimm *nvdimm, struct nfit_blk *nfit_blk)
2462 {
2463         struct nd_cmd_dimm_flags flags;
2464         int rc;
2465
2466         memset(&flags, 0, sizeof(flags));
2467         rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags,
2468                         sizeof(flags), NULL);
2469
2470         if (rc >= 0 && flags.status == 0)
2471                 nfit_blk->dimm_flags = flags.flags;
2472         else if (rc == -ENOTTY) {
2473                 /* fall back to a conservative default */
2474                 nfit_blk->dimm_flags = NFIT_BLK_DCR_LATCH | NFIT_BLK_READ_FLUSH;
2475                 rc = 0;
2476         } else
2477                 rc = -ENXIO;
2478
2479         return rc;
2480 }
2481
2482 static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
2483                 struct device *dev)
2484 {
2485         struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
2486         struct nd_blk_region *ndbr = to_nd_blk_region(dev);
2487         struct nfit_blk_mmio *mmio;
2488         struct nfit_blk *nfit_blk;
2489         struct nfit_mem *nfit_mem;
2490         struct nvdimm *nvdimm;
2491         int rc;
2492
2493         nvdimm = nd_blk_region_to_dimm(ndbr);
2494         nfit_mem = nvdimm_provider_data(nvdimm);
2495         if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) {
2496                 dev_dbg(dev, "missing%s%s%s\n",
2497                                 nfit_mem ? "" : " nfit_mem",
2498                                 (nfit_mem && nfit_mem->dcr) ? "" : " dcr",
2499                                 (nfit_mem && nfit_mem->bdw) ? "" : " bdw");
2500                 return -ENXIO;
2501         }
2502
2503         nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL);
2504         if (!nfit_blk)
2505                 return -ENOMEM;
2506         nd_blk_region_set_provider_data(ndbr, nfit_blk);
2507         nfit_blk->nd_region = to_nd_region(dev);
2508
2509         /* map block aperture memory */
2510         nfit_blk->bdw_offset = nfit_mem->bdw->offset;
2511         mmio = &nfit_blk->mmio[BDW];
2512         mmio->addr.base = devm_nvdimm_memremap(dev, nfit_mem->spa_bdw->address,
2513                         nfit_mem->spa_bdw->length, nd_blk_memremap_flags(ndbr));
2514         if (!mmio->addr.base) {
2515                 dev_dbg(dev, "%s failed to map bdw\n",
2516                                 nvdimm_name(nvdimm));
2517                 return -ENOMEM;
2518         }
2519         mmio->size = nfit_mem->bdw->size;
2520         mmio->base_offset = nfit_mem->memdev_bdw->region_offset;
2521         mmio->idt = nfit_mem->idt_bdw;
2522         mmio->spa = nfit_mem->spa_bdw;
2523         rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw,
2524                         nfit_mem->memdev_bdw->interleave_ways);
2525         if (rc) {
2526                 dev_dbg(dev, "%s failed to init bdw interleave\n",
2527                                 nvdimm_name(nvdimm));
2528                 return rc;
2529         }
2530
2531         /* map block control memory */
2532         nfit_blk->cmd_offset = nfit_mem->dcr->command_offset;
2533         nfit_blk->stat_offset = nfit_mem->dcr->status_offset;
2534         mmio = &nfit_blk->mmio[DCR];
2535         mmio->addr.base = devm_nvdimm_ioremap(dev, nfit_mem->spa_dcr->address,
2536                         nfit_mem->spa_dcr->length);
2537         if (!mmio->addr.base) {
2538                 dev_dbg(dev, "%s failed to map dcr\n",
2539                                 nvdimm_name(nvdimm));
2540                 return -ENOMEM;
2541         }
2542         mmio->size = nfit_mem->dcr->window_size;
2543         mmio->base_offset = nfit_mem->memdev_dcr->region_offset;
2544         mmio->idt = nfit_mem->idt_dcr;
2545         mmio->spa = nfit_mem->spa_dcr;
2546         rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr,
2547                         nfit_mem->memdev_dcr->interleave_ways);
2548         if (rc) {
2549                 dev_dbg(dev, "%s failed to init dcr interleave\n",
2550                                 nvdimm_name(nvdimm));
2551                 return rc;
2552         }
2553
2554         rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk);
2555         if (rc < 0) {
2556                 dev_dbg(dev, "%s failed get DIMM flags\n",
2557                                 nvdimm_name(nvdimm));
2558                 return rc;
2559         }
2560
2561         if (nvdimm_has_flush(nfit_blk->nd_region) < 0)
2562                 dev_warn(dev, "unable to guarantee persistence of writes\n");
2563
2564         if (mmio->line_size == 0)
2565                 return 0;
2566
2567         if ((u32) nfit_blk->cmd_offset % mmio->line_size
2568                         + 8 > mmio->line_size) {
2569                 dev_dbg(dev, "cmd_offset crosses interleave boundary\n");
2570                 return -ENXIO;
2571         } else if ((u32) nfit_blk->stat_offset % mmio->line_size
2572                         + 8 > mmio->line_size) {
2573                 dev_dbg(dev, "stat_offset crosses interleave boundary\n");
2574                 return -ENXIO;
2575         }
2576
2577         return 0;
2578 }
2579
2580 static int ars_get_cap(struct acpi_nfit_desc *acpi_desc,
2581                 struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa)
2582 {
2583         struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2584         struct acpi_nfit_system_address *spa = nfit_spa->spa;
2585         int cmd_rc, rc;
2586
2587         cmd->address = spa->address;
2588         cmd->length = spa->length;
2589         rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd,
2590                         sizeof(*cmd), &cmd_rc);
2591         if (rc < 0)
2592                 return rc;
2593         return cmd_rc;
2594 }
2595
2596 static int ars_start(struct acpi_nfit_desc *acpi_desc,
2597                 struct nfit_spa *nfit_spa, enum nfit_ars_state req_type)
2598 {
2599         int rc;
2600         int cmd_rc;
2601         struct nd_cmd_ars_start ars_start;
2602         struct acpi_nfit_system_address *spa = nfit_spa->spa;
2603         struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2604
2605         memset(&ars_start, 0, sizeof(ars_start));
2606         ars_start.address = spa->address;
2607         ars_start.length = spa->length;
2608         if (req_type == ARS_REQ_SHORT)
2609                 ars_start.flags = ND_ARS_RETURN_PREV_DATA;
2610         if (nfit_spa_type(spa) == NFIT_SPA_PM)
2611                 ars_start.type = ND_ARS_PERSISTENT;
2612         else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE)
2613                 ars_start.type = ND_ARS_VOLATILE;
2614         else
2615                 return -ENOTTY;
2616
2617         rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
2618                         sizeof(ars_start), &cmd_rc);
2619
2620         if (rc < 0)
2621                 return rc;
2622         return cmd_rc;
2623 }
2624
2625 static int ars_continue(struct acpi_nfit_desc *acpi_desc)
2626 {
2627         int rc, cmd_rc;
2628         struct nd_cmd_ars_start ars_start;
2629         struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2630         struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
2631
2632         memset(&ars_start, 0, sizeof(ars_start));
2633         ars_start.address = ars_status->restart_address;
2634         ars_start.length = ars_status->restart_length;
2635         ars_start.type = ars_status->type;
2636         ars_start.flags = acpi_desc->ars_start_flags;
2637         rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
2638                         sizeof(ars_start), &cmd_rc);
2639         if (rc < 0)
2640                 return rc;
2641         return cmd_rc;
2642 }
2643
2644 static int ars_get_status(struct acpi_nfit_desc *acpi_desc)
2645 {
2646         struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2647         struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
2648         int rc, cmd_rc;
2649
2650         rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, ars_status,
2651                         acpi_desc->max_ars, &cmd_rc);
2652         if (rc < 0)
2653                 return rc;
2654         return cmd_rc;
2655 }
2656
2657 static void ars_complete(struct acpi_nfit_desc *acpi_desc,
2658                 struct nfit_spa *nfit_spa)
2659 {
2660         struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
2661         struct acpi_nfit_system_address *spa = nfit_spa->spa;
2662         struct nd_region *nd_region = nfit_spa->nd_region;
2663         struct device *dev;
2664
2665         lockdep_assert_held(&acpi_desc->init_mutex);
2666         /*
2667          * Only advance the ARS state for ARS runs initiated by the
2668          * kernel, ignore ARS results from BIOS initiated runs for scrub
2669          * completion tracking.
2670          */
2671         if (acpi_desc->scrub_spa != nfit_spa)
2672                 return;
2673
2674         if ((ars_status->address >= spa->address && ars_status->address
2675                                 < spa->address + spa->length)
2676                         || (ars_status->address < spa->address)) {
2677                 /*
2678                  * Assume that if a scrub starts at an offset from the
2679                  * start of nfit_spa that we are in the continuation
2680                  * case.
2681                  *
2682                  * Otherwise, if the scrub covers the spa range, mark
2683                  * any pending request complete.
2684                  */
2685                 if (ars_status->address + ars_status->length
2686                                 >= spa->address + spa->length)
2687                                 /* complete */;
2688                 else
2689                         return;
2690         } else
2691                 return;
2692
2693         acpi_desc->scrub_spa = NULL;
2694         if (nd_region) {
2695                 dev = nd_region_dev(nd_region);
2696                 nvdimm_region_notify(nd_region, NVDIMM_REVALIDATE_POISON);
2697         } else
2698                 dev = acpi_desc->dev;
2699         dev_dbg(dev, "ARS: range %d complete\n", spa->range_index);
2700 }
2701
2702 static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc)
2703 {
2704         struct nvdimm_bus *nvdimm_bus = acpi_desc->nvdimm_bus;
2705         struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
2706         int rc;
2707         u32 i;
2708
2709         /*
2710          * First record starts at 44 byte offset from the start of the
2711          * payload.
2712          */
2713         if (ars_status->out_length < 44)
2714                 return 0;
2715         for (i = 0; i < ars_status->num_records; i++) {
2716                 /* only process full records */
2717                 if (ars_status->out_length
2718                                 < 44 + sizeof(struct nd_ars_record) * (i + 1))
2719                         break;
2720                 rc = nvdimm_bus_add_badrange(nvdimm_bus,
2721                                 ars_status->records[i].err_address,
2722                                 ars_status->records[i].length);
2723                 if (rc)
2724                         return rc;
2725         }
2726         if (i < ars_status->num_records)
2727                 dev_warn(acpi_desc->dev, "detected truncated ars results\n");
2728
2729         return 0;
2730 }
2731
2732 static void acpi_nfit_remove_resource(void *data)
2733 {
2734         struct resource *res = data;
2735
2736         remove_resource(res);
2737 }
2738
2739 static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc,
2740                 struct nd_region_desc *ndr_desc)
2741 {
2742         struct resource *res, *nd_res = ndr_desc->res;
2743         int is_pmem, ret;
2744
2745         /* No operation if the region is already registered as PMEM */
2746         is_pmem = region_intersects(nd_res->start, resource_size(nd_res),
2747                                 IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY);
2748         if (is_pmem == REGION_INTERSECTS)
2749                 return 0;
2750
2751         res = devm_kzalloc(acpi_desc->dev, sizeof(*res), GFP_KERNEL);
2752         if (!res)
2753                 return -ENOMEM;
2754
2755         res->name = "Persistent Memory";
2756         res->start = nd_res->start;
2757         res->end = nd_res->end;
2758         res->flags = IORESOURCE_MEM;
2759         res->desc = IORES_DESC_PERSISTENT_MEMORY;
2760
2761         ret = insert_resource(&iomem_resource, res);
2762         if (ret)
2763                 return ret;
2764
2765         ret = devm_add_action_or_reset(acpi_desc->dev,
2766                                         acpi_nfit_remove_resource,
2767                                         res);
2768         if (ret)
2769                 return ret;
2770
2771         return 0;
2772 }
2773
2774 static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
2775                 struct nd_mapping_desc *mapping, struct nd_region_desc *ndr_desc,
2776                 struct acpi_nfit_memory_map *memdev,
2777                 struct nfit_spa *nfit_spa)
2778 {
2779         struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc,
2780                         memdev->device_handle);
2781         struct acpi_nfit_system_address *spa = nfit_spa->spa;
2782         struct nd_blk_region_desc *ndbr_desc;
2783         struct nfit_mem *nfit_mem;
2784         int rc;
2785
2786         if (!nvdimm) {
2787                 dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n",
2788                                 spa->range_index, memdev->device_handle);
2789                 return -ENODEV;
2790         }
2791
2792         mapping->nvdimm = nvdimm;
2793         switch (nfit_spa_type(spa)) {
2794         case NFIT_SPA_PM:
2795         case NFIT_SPA_VOLATILE:
2796                 mapping->start = memdev->address;
2797                 mapping->size = memdev->region_size;
2798                 break;
2799         case NFIT_SPA_DCR:
2800                 nfit_mem = nvdimm_provider_data(nvdimm);
2801                 if (!nfit_mem || !nfit_mem->bdw) {
2802                         dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n",
2803                                         spa->range_index, nvdimm_name(nvdimm));
2804                         break;
2805                 }
2806
2807                 mapping->size = nfit_mem->bdw->capacity;
2808                 mapping->start = nfit_mem->bdw->start_address;
2809                 ndr_desc->num_lanes = nfit_mem->bdw->windows;
2810                 ndr_desc->mapping = mapping;
2811                 ndr_desc->num_mappings = 1;
2812                 ndbr_desc = to_blk_region_desc(ndr_desc);
2813                 ndbr_desc->enable = acpi_nfit_blk_region_enable;
2814                 ndbr_desc->do_io = acpi_desc->blk_do_io;
2815                 rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
2816                 if (rc)
2817                         return rc;
2818                 nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus,
2819                                 ndr_desc);
2820                 if (!nfit_spa->nd_region)
2821                         return -ENOMEM;
2822                 break;
2823         }
2824
2825         return 0;
2826 }
2827
2828 static bool nfit_spa_is_virtual(struct acpi_nfit_system_address *spa)
2829 {
2830         return (nfit_spa_type(spa) == NFIT_SPA_VDISK ||
2831                 nfit_spa_type(spa) == NFIT_SPA_VCD   ||
2832                 nfit_spa_type(spa) == NFIT_SPA_PDISK ||
2833                 nfit_spa_type(spa) == NFIT_SPA_PCD);
2834 }
2835
2836 static bool nfit_spa_is_volatile(struct acpi_nfit_system_address *spa)
2837 {
2838         return (nfit_spa_type(spa) == NFIT_SPA_VDISK ||
2839                 nfit_spa_type(spa) == NFIT_SPA_VCD   ||
2840                 nfit_spa_type(spa) == NFIT_SPA_VOLATILE);
2841 }
2842
2843 static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
2844                 struct nfit_spa *nfit_spa)
2845 {
2846         static struct nd_mapping_desc mappings[ND_MAX_MAPPINGS];
2847         struct acpi_nfit_system_address *spa = nfit_spa->spa;
2848         struct nd_blk_region_desc ndbr_desc;
2849         struct nd_region_desc *ndr_desc;
2850         struct nfit_memdev *nfit_memdev;
2851         struct nvdimm_bus *nvdimm_bus;
2852         struct resource res;
2853         int count = 0, rc;
2854
2855         if (nfit_spa->nd_region)
2856                 return 0;
2857
2858         if (spa->range_index == 0 && !nfit_spa_is_virtual(spa)) {
2859                 dev_dbg(acpi_desc->dev, "detected invalid spa index\n");
2860                 return 0;
2861         }
2862
2863         memset(&res, 0, sizeof(res));
2864         memset(&mappings, 0, sizeof(mappings));
2865         memset(&ndbr_desc, 0, sizeof(ndbr_desc));
2866         res.start = spa->address;
2867         res.end = res.start + spa->length - 1;
2868         ndr_desc = &ndbr_desc.ndr_desc;
2869         ndr_desc->res = &res;
2870         ndr_desc->provider_data = nfit_spa;
2871         ndr_desc->attr_groups = acpi_nfit_region_attribute_groups;
2872         if (spa->flags & ACPI_NFIT_PROXIMITY_VALID)
2873                 ndr_desc->numa_node = acpi_map_pxm_to_online_node(
2874                                                 spa->proximity_domain);
2875         else
2876                 ndr_desc->numa_node = NUMA_NO_NODE;
2877
2878         /*
2879          * Persistence domain bits are hierarchical, if
2880          * ACPI_NFIT_CAPABILITY_CACHE_FLUSH is set then
2881          * ACPI_NFIT_CAPABILITY_MEM_FLUSH is implied.
2882          */
2883         if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_CACHE_FLUSH)
2884                 set_bit(ND_REGION_PERSIST_CACHE, &ndr_desc->flags);
2885         else if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_MEM_FLUSH)
2886                 set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc->flags);
2887
2888         list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
2889                 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
2890                 struct nd_mapping_desc *mapping;
2891
2892                 if (memdev->range_index != spa->range_index)
2893                         continue;
2894                 if (count >= ND_MAX_MAPPINGS) {
2895                         dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n",
2896                                         spa->range_index, ND_MAX_MAPPINGS);
2897                         return -ENXIO;
2898                 }
2899                 mapping = &mappings[count++];
2900                 rc = acpi_nfit_init_mapping(acpi_desc, mapping, ndr_desc,
2901                                 memdev, nfit_spa);
2902                 if (rc)
2903                         goto out;
2904         }
2905
2906         ndr_desc->mapping = mappings;
2907         ndr_desc->num_mappings = count;
2908         rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
2909         if (rc)
2910                 goto out;
2911
2912         nvdimm_bus = acpi_desc->nvdimm_bus;
2913         if (nfit_spa_type(spa) == NFIT_SPA_PM) {
2914                 rc = acpi_nfit_insert_resource(acpi_desc, ndr_desc);
2915                 if (rc) {
2916                         dev_warn(acpi_desc->dev,
2917                                 "failed to insert pmem resource to iomem: %d\n",
2918                                 rc);
2919                         goto out;
2920                 }
2921
2922                 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
2923                                 ndr_desc);
2924                 if (!nfit_spa->nd_region)
2925                         rc = -ENOMEM;
2926         } else if (nfit_spa_is_volatile(spa)) {
2927                 nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus,
2928                                 ndr_desc);
2929                 if (!nfit_spa->nd_region)
2930                         rc = -ENOMEM;
2931         } else if (nfit_spa_is_virtual(spa)) {
2932                 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
2933                                 ndr_desc);
2934                 if (!nfit_spa->nd_region)
2935                         rc = -ENOMEM;
2936         }
2937
2938  out:
2939         if (rc)
2940                 dev_err(acpi_desc->dev, "failed to register spa range %d\n",
2941                                 nfit_spa->spa->range_index);
2942         return rc;
2943 }
2944
2945 static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc)
2946 {
2947         struct device *dev = acpi_desc->dev;
2948         struct nd_cmd_ars_status *ars_status;
2949
2950         if (acpi_desc->ars_status) {
2951                 memset(acpi_desc->ars_status, 0, acpi_desc->max_ars);
2952                 return 0;
2953         }
2954
2955         ars_status = devm_kzalloc(dev, acpi_desc->max_ars, GFP_KERNEL);
2956         if (!ars_status)
2957                 return -ENOMEM;
2958         acpi_desc->ars_status = ars_status;
2959         return 0;
2960 }
2961
2962 static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc)
2963 {
2964         int rc;
2965
2966         if (ars_status_alloc(acpi_desc))
2967                 return -ENOMEM;
2968
2969         rc = ars_get_status(acpi_desc);
2970
2971         if (rc < 0 && rc != -ENOSPC)
2972                 return rc;
2973
2974         if (ars_status_process_records(acpi_desc))
2975                 dev_err(acpi_desc->dev, "Failed to process ARS records\n");
2976
2977         return rc;
2978 }
2979
2980 static int ars_register(struct acpi_nfit_desc *acpi_desc,
2981                 struct nfit_spa *nfit_spa)
2982 {
2983         int rc;
2984
2985         if (no_init_ars || test_bit(ARS_FAILED, &nfit_spa->ars_state))
2986                 return acpi_nfit_register_region(acpi_desc, nfit_spa);
2987
2988         set_bit(ARS_REQ_SHORT, &nfit_spa->ars_state);
2989         set_bit(ARS_REQ_LONG, &nfit_spa->ars_state);
2990
2991         switch (acpi_nfit_query_poison(acpi_desc)) {
2992         case 0:
2993         case -EAGAIN:
2994                 rc = ars_start(acpi_desc, nfit_spa, ARS_REQ_SHORT);
2995                 /* shouldn't happen, try again later */
2996                 if (rc == -EBUSY)
2997                         break;
2998                 if (rc) {
2999                         set_bit(ARS_FAILED, &nfit_spa->ars_state);
3000                         break;
3001                 }
3002                 clear_bit(ARS_REQ_SHORT, &nfit_spa->ars_state);
3003                 rc = acpi_nfit_query_poison(acpi_desc);
3004                 if (rc)
3005                         break;
3006                 acpi_desc->scrub_spa = nfit_spa;
3007                 ars_complete(acpi_desc, nfit_spa);
3008                 /*
3009                  * If ars_complete() says we didn't complete the
3010                  * short scrub, we'll try again with a long
3011                  * request.
3012                  */
3013                 acpi_desc->scrub_spa = NULL;
3014                 break;
3015         case -EBUSY:
3016         case -ENOMEM:
3017         case -ENOSPC:
3018                 /*
3019                  * BIOS was using ARS, wait for it to complete (or
3020                  * resources to become available) and then perform our
3021                  * own scrubs.
3022                  */
3023                 break;
3024         default:
3025                 set_bit(ARS_FAILED, &nfit_spa->ars_state);
3026                 break;
3027         }
3028
3029         return acpi_nfit_register_region(acpi_desc, nfit_spa);
3030 }
3031
3032 static void ars_complete_all(struct acpi_nfit_desc *acpi_desc)
3033 {
3034         struct nfit_spa *nfit_spa;
3035
3036         list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
3037                 if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
3038                         continue;
3039                 ars_complete(acpi_desc, nfit_spa);
3040         }
3041 }
3042
3043 static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc,
3044                 int query_rc)
3045 {
3046         unsigned int tmo = acpi_desc->scrub_tmo;
3047         struct device *dev = acpi_desc->dev;
3048         struct nfit_spa *nfit_spa;
3049
3050         lockdep_assert_held(&acpi_desc->init_mutex);
3051
3052         if (acpi_desc->cancel)
3053                 return 0;
3054
3055         if (query_rc == -EBUSY) {
3056                 dev_dbg(dev, "ARS: ARS busy\n");
3057                 return min(30U * 60U, tmo * 2);
3058         }
3059         if (query_rc == -ENOSPC) {
3060                 dev_dbg(dev, "ARS: ARS continue\n");
3061                 ars_continue(acpi_desc);
3062                 return 1;
3063         }
3064         if (query_rc && query_rc != -EAGAIN) {
3065                 unsigned long long addr, end;
3066
3067                 addr = acpi_desc->ars_status->address;
3068                 end = addr + acpi_desc->ars_status->length;
3069                 dev_dbg(dev, "ARS: %llx-%llx failed (%d)\n", addr, end,
3070                                 query_rc);
3071         }
3072
3073         ars_complete_all(acpi_desc);
3074         list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
3075                 enum nfit_ars_state req_type;
3076                 int rc;
3077
3078                 if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
3079                         continue;
3080
3081                 /* prefer short ARS requests first */
3082                 if (test_bit(ARS_REQ_SHORT, &nfit_spa->ars_state))
3083                         req_type = ARS_REQ_SHORT;
3084                 else if (test_bit(ARS_REQ_LONG, &nfit_spa->ars_state))
3085                         req_type = ARS_REQ_LONG;
3086                 else
3087                         continue;
3088                 rc = ars_start(acpi_desc, nfit_spa, req_type);
3089
3090                 dev = nd_region_dev(nfit_spa->nd_region);
3091                 dev_dbg(dev, "ARS: range %d ARS start %s (%d)\n",
3092                                 nfit_spa->spa->range_index,
3093                                 req_type == ARS_REQ_SHORT ? "short" : "long",
3094                                 rc);
3095                 /*
3096                  * Hmm, we raced someone else starting ARS? Try again in
3097                  * a bit.
3098                  */
3099                 if (rc == -EBUSY)
3100                         return 1;
3101                 if (rc == 0) {
3102                         dev_WARN_ONCE(dev, acpi_desc->scrub_spa,
3103                                         "scrub start while range %d active\n",
3104                                         acpi_desc->scrub_spa->spa->range_index);
3105                         clear_bit(req_type, &nfit_spa->ars_state);
3106                         acpi_desc->scrub_spa = nfit_spa;
3107                         /*
3108                          * Consider this spa last for future scrub
3109                          * requests
3110                          */
3111                         list_move_tail(&nfit_spa->list, &acpi_desc->spas);
3112                         return 1;
3113                 }
3114
3115                 dev_err(dev, "ARS: range %d ARS failed (%d)\n",
3116                                 nfit_spa->spa->range_index, rc);
3117                 set_bit(ARS_FAILED, &nfit_spa->ars_state);
3118         }
3119         return 0;
3120 }
3121
3122 static void __sched_ars(struct acpi_nfit_desc *acpi_desc, unsigned int tmo)
3123 {
3124         lockdep_assert_held(&acpi_desc->init_mutex);
3125
3126         acpi_desc->scrub_busy = 1;
3127         /* note this should only be set from within the workqueue */
3128         if (tmo)
3129                 acpi_desc->scrub_tmo = tmo;
3130         queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ);
3131 }
3132
3133 static void sched_ars(struct acpi_nfit_desc *acpi_desc)
3134 {
3135         __sched_ars(acpi_desc, 0);
3136 }
3137
3138 static void notify_ars_done(struct acpi_nfit_desc *acpi_desc)
3139 {
3140         lockdep_assert_held(&acpi_desc->init_mutex);
3141
3142         acpi_desc->scrub_busy = 0;
3143         acpi_desc->scrub_count++;
3144         if (acpi_desc->scrub_count_state)
3145                 sysfs_notify_dirent(acpi_desc->scrub_count_state);
3146 }
3147
3148 static void acpi_nfit_scrub(struct work_struct *work)
3149 {
3150         struct acpi_nfit_desc *acpi_desc;
3151         unsigned int tmo;
3152         int query_rc;
3153
3154         acpi_desc = container_of(work, typeof(*acpi_desc), dwork.work);
3155         mutex_lock(&acpi_desc->init_mutex);
3156         query_rc = acpi_nfit_query_poison(acpi_desc);
3157         tmo = __acpi_nfit_scrub(acpi_desc, query_rc);
3158         if (tmo)
3159                 __sched_ars(acpi_desc, tmo);
3160         else
3161                 notify_ars_done(acpi_desc);
3162         memset(acpi_desc->ars_status, 0, acpi_desc->max_ars);
3163         mutex_unlock(&acpi_desc->init_mutex);
3164 }
3165
3166 static void acpi_nfit_init_ars(struct acpi_nfit_desc *acpi_desc,
3167                 struct nfit_spa *nfit_spa)
3168 {
3169         int type = nfit_spa_type(nfit_spa->spa);
3170         struct nd_cmd_ars_cap ars_cap;
3171         int rc;
3172
3173         set_bit(ARS_FAILED, &nfit_spa->ars_state);
3174         memset(&ars_cap, 0, sizeof(ars_cap));
3175         rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa);
3176         if (rc < 0)
3177                 return;
3178         /* check that the supported scrub types match the spa type */
3179         if (type == NFIT_SPA_VOLATILE && ((ars_cap.status >> 16)
3180                                 & ND_ARS_VOLATILE) == 0)
3181                 return;
3182         if (type == NFIT_SPA_PM && ((ars_cap.status >> 16)
3183                                 & ND_ARS_PERSISTENT) == 0)
3184                 return;
3185
3186         nfit_spa->max_ars = ars_cap.max_ars_out;
3187         nfit_spa->clear_err_unit = ars_cap.clear_err_unit;
3188         acpi_desc->max_ars = max(nfit_spa->max_ars, acpi_desc->max_ars);
3189         clear_bit(ARS_FAILED, &nfit_spa->ars_state);
3190 }
3191
3192 static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
3193 {
3194         struct nfit_spa *nfit_spa;
3195         int rc;
3196
3197         list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
3198                 switch (nfit_spa_type(nfit_spa->spa)) {
3199                 case NFIT_SPA_VOLATILE:
3200                 case NFIT_SPA_PM:
3201                         acpi_nfit_init_ars(acpi_desc, nfit_spa);
3202                         break;
3203                 }
3204         }
3205
3206         list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
3207                 switch (nfit_spa_type(nfit_spa->spa)) {
3208                 case NFIT_SPA_VOLATILE:
3209                 case NFIT_SPA_PM:
3210                         /* register regions and kick off initial ARS run */
3211                         rc = ars_register(acpi_desc, nfit_spa);
3212                         if (rc)
3213                                 return rc;
3214                         break;
3215                 case NFIT_SPA_BDW:
3216                         /* nothing to register */
3217                         break;
3218                 case NFIT_SPA_DCR:
3219                 case NFIT_SPA_VDISK:
3220                 case NFIT_SPA_VCD:
3221                 case NFIT_SPA_PDISK:
3222                 case NFIT_SPA_PCD:
3223                         /* register known regions that don't support ARS */
3224                         rc = acpi_nfit_register_region(acpi_desc, nfit_spa);
3225                         if (rc)
3226                                 return rc;
3227                         break;
3228                 default:
3229                         /* don't register unknown regions */
3230                         break;
3231                 }
3232
3233         sched_ars(acpi_desc);
3234         return 0;
3235 }
3236
3237 static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc,
3238                 struct nfit_table_prev *prev)
3239 {
3240         struct device *dev = acpi_desc->dev;
3241
3242         if (!list_empty(&prev->spas) ||
3243                         !list_empty(&prev->memdevs) ||
3244                         !list_empty(&prev->dcrs) ||
3245                         !list_empty(&prev->bdws) ||
3246                         !list_empty(&prev->idts) ||
3247                         !list_empty(&prev->flushes)) {
3248                 dev_err(dev, "new nfit deletes entries (unsupported)\n");
3249                 return -ENXIO;
3250         }
3251         return 0;
3252 }
3253
3254 static int acpi_nfit_desc_init_scrub_attr(struct acpi_nfit_desc *acpi_desc)
3255 {
3256         struct device *dev = acpi_desc->dev;
3257         struct kernfs_node *nfit;
3258         struct device *bus_dev;
3259
3260         if (!ars_supported(acpi_desc->nvdimm_bus))
3261                 return 0;
3262
3263         bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus);
3264         nfit = sysfs_get_dirent(bus_dev->kobj.sd, "nfit");
3265         if (!nfit) {
3266                 dev_err(dev, "sysfs_get_dirent 'nfit' failed\n");
3267                 return -ENODEV;
3268         }
3269         acpi_desc->scrub_count_state = sysfs_get_dirent(nfit, "scrub");
3270         sysfs_put(nfit);
3271         if (!acpi_desc->scrub_count_state) {
3272                 dev_err(dev, "sysfs_get_dirent 'scrub' failed\n");
3273                 return -ENODEV;
3274         }
3275
3276         return 0;
3277 }
3278
3279 static void acpi_nfit_unregister(void *data)
3280 {
3281         struct acpi_nfit_desc *acpi_desc = data;
3282
3283         nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
3284 }
3285
3286 int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz)
3287 {
3288         struct device *dev = acpi_desc->dev;
3289         struct nfit_table_prev prev;
3290         const void *end;
3291         int rc;
3292
3293         if (!acpi_desc->nvdimm_bus) {
3294                 acpi_nfit_init_dsms(acpi_desc);
3295
3296                 acpi_desc->nvdimm_bus = nvdimm_bus_register(dev,
3297                                 &acpi_desc->nd_desc);
3298                 if (!acpi_desc->nvdimm_bus)
3299                         return -ENOMEM;
3300
3301                 rc = devm_add_action_or_reset(dev, acpi_nfit_unregister,
3302                                 acpi_desc);
3303                 if (rc)
3304                         return rc;
3305
3306                 rc = acpi_nfit_desc_init_scrub_attr(acpi_desc);
3307                 if (rc)
3308                         return rc;
3309
3310                 /* register this acpi_desc for mce notifications */
3311                 mutex_lock(&acpi_desc_lock);
3312                 list_add_tail(&acpi_desc->list, &acpi_descs);
3313                 mutex_unlock(&acpi_desc_lock);
3314         }
3315
3316         mutex_lock(&acpi_desc->init_mutex);
3317
3318         INIT_LIST_HEAD(&prev.spas);
3319         INIT_LIST_HEAD(&prev.memdevs);
3320         INIT_LIST_HEAD(&prev.dcrs);
3321         INIT_LIST_HEAD(&prev.bdws);
3322         INIT_LIST_HEAD(&prev.idts);
3323         INIT_LIST_HEAD(&prev.flushes);
3324
3325         list_cut_position(&prev.spas, &acpi_desc->spas,
3326                                 acpi_desc->spas.prev);
3327         list_cut_position(&prev.memdevs, &acpi_desc->memdevs,
3328                                 acpi_desc->memdevs.prev);
3329         list_cut_position(&prev.dcrs, &acpi_desc->dcrs,
3330                                 acpi_desc->dcrs.prev);
3331         list_cut_position(&prev.bdws, &acpi_desc->bdws,
3332                                 acpi_desc->bdws.prev);
3333         list_cut_position(&prev.idts, &acpi_desc->idts,
3334                                 acpi_desc->idts.prev);
3335         list_cut_position(&prev.flushes, &acpi_desc->flushes,
3336                                 acpi_desc->flushes.prev);
3337
3338         end = data + sz;
3339         while (!IS_ERR_OR_NULL(data))
3340                 data = add_table(acpi_desc, &prev, data, end);
3341
3342         if (IS_ERR(data)) {
3343                 dev_dbg(dev, "nfit table parsing error: %ld\n", PTR_ERR(data));
3344                 rc = PTR_ERR(data);
3345                 goto out_unlock;
3346         }
3347
3348         rc = acpi_nfit_check_deletions(acpi_desc, &prev);
3349         if (rc)
3350                 goto out_unlock;
3351
3352         rc = nfit_mem_init(acpi_desc);
3353         if (rc)
3354                 goto out_unlock;
3355
3356         rc = acpi_nfit_register_dimms(acpi_desc);
3357         if (rc)
3358                 goto out_unlock;
3359
3360         rc = acpi_nfit_register_regions(acpi_desc);
3361
3362  out_unlock:
3363         mutex_unlock(&acpi_desc->init_mutex);
3364         return rc;
3365 }
3366 EXPORT_SYMBOL_GPL(acpi_nfit_init);
3367
3368 static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
3369 {
3370         struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
3371         struct device *dev = acpi_desc->dev;
3372
3373         /* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
3374         device_lock(dev);
3375         device_unlock(dev);
3376
3377         /* Bounce the init_mutex to complete initial registration */
3378         mutex_lock(&acpi_desc->init_mutex);
3379         mutex_unlock(&acpi_desc->init_mutex);
3380
3381         return 0;
3382 }
3383
3384 static int __acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
3385                 struct nvdimm *nvdimm, unsigned int cmd)
3386 {
3387         struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
3388
3389         if (nvdimm)
3390                 return 0;
3391         if (cmd != ND_CMD_ARS_START)
3392                 return 0;
3393
3394         /*
3395          * The kernel and userspace may race to initiate a scrub, but
3396          * the scrub thread is prepared to lose that initial race.  It
3397          * just needs guarantees that any ARS it initiates are not
3398          * interrupted by any intervening start requests from userspace.
3399          */
3400         if (work_busy(&acpi_desc->dwork.work))
3401                 return -EBUSY;
3402
3403         return 0;
3404 }
3405
3406 /* prevent security commands from being issued via ioctl */
3407 static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
3408                 struct nvdimm *nvdimm, unsigned int cmd, void *buf)
3409 {
3410         struct nd_cmd_pkg *call_pkg = buf;
3411         unsigned int func;
3412
3413         if (nvdimm && cmd == ND_CMD_CALL &&
3414                         call_pkg->nd_family == NVDIMM_FAMILY_INTEL) {
3415                 func = call_pkg->nd_command;
3416                 if ((1 << func) & NVDIMM_INTEL_SECURITY_CMDMASK)
3417                         return -EOPNOTSUPP;
3418         }
3419
3420         return __acpi_nfit_clear_to_send(nd_desc, nvdimm, cmd);
3421 }
3422
3423 int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc,
3424                 enum nfit_ars_state req_type)
3425 {
3426         struct device *dev = acpi_desc->dev;
3427         int scheduled = 0, busy = 0;
3428         struct nfit_spa *nfit_spa;
3429
3430         mutex_lock(&acpi_desc->init_mutex);
3431         if (acpi_desc->cancel) {
3432                 mutex_unlock(&acpi_desc->init_mutex);
3433                 return 0;
3434         }
3435
3436         list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
3437                 int type = nfit_spa_type(nfit_spa->spa);
3438
3439                 if (type != NFIT_SPA_PM && type != NFIT_SPA_VOLATILE)
3440                         continue;
3441                 if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
3442                         continue;
3443
3444                 if (test_and_set_bit(req_type, &nfit_spa->ars_state))
3445                         busy++;
3446                 else
3447                         scheduled++;
3448         }
3449         if (scheduled) {
3450                 sched_ars(acpi_desc);
3451                 dev_dbg(dev, "ars_scan triggered\n");
3452         }
3453         mutex_unlock(&acpi_desc->init_mutex);
3454
3455         if (scheduled)
3456                 return 0;
3457         if (busy)
3458                 return -EBUSY;
3459         return -ENOTTY;
3460 }
3461
3462 void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev)
3463 {
3464         struct nvdimm_bus_descriptor *nd_desc;
3465
3466         dev_set_drvdata(dev, acpi_desc);
3467         acpi_desc->dev = dev;
3468         acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io;
3469         nd_desc = &acpi_desc->nd_desc;
3470         nd_desc->provider_name = "ACPI.NFIT";
3471         nd_desc->module = THIS_MODULE;
3472         nd_desc->ndctl = acpi_nfit_ctl;
3473         nd_desc->flush_probe = acpi_nfit_flush_probe;
3474         nd_desc->clear_to_send = acpi_nfit_clear_to_send;
3475         nd_desc->attr_groups = acpi_nfit_attribute_groups;
3476
3477         INIT_LIST_HEAD(&acpi_desc->spas);
3478         INIT_LIST_HEAD(&acpi_desc->dcrs);
3479         INIT_LIST_HEAD(&acpi_desc->bdws);
3480         INIT_LIST_HEAD(&acpi_desc->idts);
3481         INIT_LIST_HEAD(&acpi_desc->flushes);
3482         INIT_LIST_HEAD(&acpi_desc->memdevs);
3483         INIT_LIST_HEAD(&acpi_desc->dimms);
3484         INIT_LIST_HEAD(&acpi_desc->list);
3485         mutex_init(&acpi_desc->init_mutex);
3486         acpi_desc->scrub_tmo = 1;
3487         INIT_DELAYED_WORK(&acpi_desc->dwork, acpi_nfit_scrub);
3488 }
3489 EXPORT_SYMBOL_GPL(acpi_nfit_desc_init);
3490
3491 static void acpi_nfit_put_table(void *table)
3492 {
3493         acpi_put_table(table);
3494 }
3495
3496 void acpi_nfit_shutdown(void *data)
3497 {
3498         struct acpi_nfit_desc *acpi_desc = data;
3499         struct device *bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus);
3500
3501         /*
3502          * Destruct under acpi_desc_lock so that nfit_handle_mce does not
3503          * race teardown
3504          */
3505         mutex_lock(&acpi_desc_lock);
3506         list_del(&acpi_desc->list);
3507         mutex_unlock(&acpi_desc_lock);
3508
3509         mutex_lock(&acpi_desc->init_mutex);
3510         acpi_desc->cancel = 1;
3511         cancel_delayed_work_sync(&acpi_desc->dwork);
3512         mutex_unlock(&acpi_desc->init_mutex);
3513
3514         /*
3515          * Bounce the nvdimm bus lock to make sure any in-flight
3516          * acpi_nfit_ars_rescan() submissions have had a chance to
3517          * either submit or see ->cancel set.
3518          */
3519         device_lock(bus_dev);
3520         device_unlock(bus_dev);
3521
3522         flush_workqueue(nfit_wq);
3523 }
3524 EXPORT_SYMBOL_GPL(acpi_nfit_shutdown);
3525
3526 static int acpi_nfit_add(struct acpi_device *adev)
3527 {
3528         struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
3529         struct acpi_nfit_desc *acpi_desc;
3530         struct device *dev = &adev->dev;
3531         struct acpi_table_header *tbl;
3532         acpi_status status = AE_OK;
3533         acpi_size sz;
3534         int rc = 0;
3535
3536         status = acpi_get_table(ACPI_SIG_NFIT, 0, &tbl);
3537         if (ACPI_FAILURE(status)) {
3538                 /* The NVDIMM root device allows OS to trigger enumeration of
3539                  * NVDIMMs through NFIT at boot time and re-enumeration at
3540                  * root level via the _FIT method during runtime.
3541                  * This is ok to return 0 here, we could have an nvdimm
3542                  * hotplugged later and evaluate _FIT method which returns
3543                  * data in the format of a series of NFIT Structures.
3544                  */
3545                 dev_dbg(dev, "failed to find NFIT at startup\n");
3546                 return 0;
3547         }
3548
3549         rc = devm_add_action_or_reset(dev, acpi_nfit_put_table, tbl);
3550         if (rc)
3551                 return rc;
3552         sz = tbl->length;
3553
3554         acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
3555         if (!acpi_desc)
3556                 return -ENOMEM;
3557         acpi_nfit_desc_init(acpi_desc, &adev->dev);
3558
3559         /* Save the acpi header for exporting the revision via sysfs */
3560         acpi_desc->acpi_header = *tbl;
3561
3562         /* Evaluate _FIT and override with that if present */
3563         status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
3564         if (ACPI_SUCCESS(status) && buf.length > 0) {
3565                 union acpi_object *obj = buf.pointer;
3566
3567                 if (obj->type == ACPI_TYPE_BUFFER)
3568                         rc = acpi_nfit_init(acpi_desc, obj->buffer.pointer,
3569                                         obj->buffer.length);
3570                 else
3571                         dev_dbg(dev, "invalid type %d, ignoring _FIT\n",
3572                                 (int) obj->type);
3573                 kfree(buf.pointer);
3574         } else
3575                 /* skip over the lead-in header table */
3576                 rc = acpi_nfit_init(acpi_desc, (void *) tbl
3577                                 + sizeof(struct acpi_table_nfit),
3578                                 sz - sizeof(struct acpi_table_nfit));
3579
3580         if (rc)
3581                 return rc;
3582         return devm_add_action_or_reset(dev, acpi_nfit_shutdown, acpi_desc);
3583 }
3584
3585 static int acpi_nfit_remove(struct acpi_device *adev)
3586 {
3587         /* see acpi_nfit_unregister */
3588         return 0;
3589 }
3590
3591 static void acpi_nfit_update_notify(struct device *dev, acpi_handle handle)
3592 {
3593         struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev);
3594         struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
3595         union acpi_object *obj;
3596         acpi_status status;
3597         int ret;
3598
3599         if (!dev->driver) {
3600                 /* dev->driver may be null if we're being removed */
3601                 dev_dbg(dev, "no driver found for dev\n");
3602                 return;
3603         }
3604
3605         if (!acpi_desc) {
3606                 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
3607                 if (!acpi_desc)
3608                         return;
3609                 acpi_nfit_desc_init(acpi_desc, dev);
3610         } else {
3611                 /*
3612                  * Finish previous registration before considering new
3613                  * regions.
3614                  */
3615                 flush_workqueue(nfit_wq);
3616         }
3617
3618         /* Evaluate _FIT */
3619         status = acpi_evaluate_object(handle, "_FIT", NULL, &buf);
3620         if (ACPI_FAILURE(status)) {
3621                 dev_err(dev, "failed to evaluate _FIT\n");
3622                 return;
3623         }
3624
3625         obj = buf.pointer;
3626         if (obj->type == ACPI_TYPE_BUFFER) {
3627                 ret = acpi_nfit_init(acpi_desc, obj->buffer.pointer,
3628                                 obj->buffer.length);
3629                 if (ret)
3630                         dev_err(dev, "failed to merge updated NFIT\n");
3631         } else
3632                 dev_err(dev, "Invalid _FIT\n");
3633         kfree(buf.pointer);
3634 }
3635
3636 static void acpi_nfit_uc_error_notify(struct device *dev, acpi_handle handle)
3637 {
3638         struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev);
3639
3640         if (acpi_desc->scrub_mode == HW_ERROR_SCRUB_ON)
3641                 acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG);
3642         else
3643                 acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_SHORT);
3644 }
3645
3646 void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event)
3647 {
3648         dev_dbg(dev, "event: 0x%x\n", event);
3649
3650         switch (event) {
3651         case NFIT_NOTIFY_UPDATE:
3652                 return acpi_nfit_update_notify(dev, handle);
3653         case NFIT_NOTIFY_UC_MEMORY_ERROR:
3654                 return acpi_nfit_uc_error_notify(dev, handle);
3655         default:
3656                 return;
3657         }
3658 }
3659 EXPORT_SYMBOL_GPL(__acpi_nfit_notify);
3660
3661 static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
3662 {
3663         device_lock(&adev->dev);
3664         __acpi_nfit_notify(&adev->dev, adev->handle, event);
3665         device_unlock(&adev->dev);
3666 }
3667
3668 static const struct acpi_device_id acpi_nfit_ids[] = {
3669         { "ACPI0012", 0 },
3670         { "", 0 },
3671 };
3672 MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids);
3673
3674 static struct acpi_driver acpi_nfit_driver = {
3675         .name = KBUILD_MODNAME,
3676         .ids = acpi_nfit_ids,
3677         .ops = {
3678                 .add = acpi_nfit_add,
3679                 .remove = acpi_nfit_remove,
3680                 .notify = acpi_nfit_notify,
3681         },
3682 };
3683
3684 static __init int nfit_init(void)
3685 {
3686         int ret;
3687
3688         BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40);
3689         BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56);
3690         BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48);
3691         BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20);
3692         BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9);
3693         BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80);
3694         BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40);
3695         BUILD_BUG_ON(sizeof(struct acpi_nfit_capabilities) != 16);
3696
3697         guid_parse(UUID_VOLATILE_MEMORY, &nfit_uuid[NFIT_SPA_VOLATILE]);
3698         guid_parse(UUID_PERSISTENT_MEMORY, &nfit_uuid[NFIT_SPA_PM]);
3699         guid_parse(UUID_CONTROL_REGION, &nfit_uuid[NFIT_SPA_DCR]);
3700         guid_parse(UUID_DATA_REGION, &nfit_uuid[NFIT_SPA_BDW]);
3701         guid_parse(UUID_VOLATILE_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_VDISK]);
3702         guid_parse(UUID_VOLATILE_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_VCD]);
3703         guid_parse(UUID_PERSISTENT_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_PDISK]);
3704         guid_parse(UUID_PERSISTENT_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_PCD]);
3705         guid_parse(UUID_NFIT_BUS, &nfit_uuid[NFIT_DEV_BUS]);
3706         guid_parse(UUID_NFIT_DIMM, &nfit_uuid[NFIT_DEV_DIMM]);
3707         guid_parse(UUID_NFIT_DIMM_N_HPE1, &nfit_uuid[NFIT_DEV_DIMM_N_HPE1]);
3708         guid_parse(UUID_NFIT_DIMM_N_HPE2, &nfit_uuid[NFIT_DEV_DIMM_N_HPE2]);
3709         guid_parse(UUID_NFIT_DIMM_N_MSFT, &nfit_uuid[NFIT_DEV_DIMM_N_MSFT]);
3710
3711         nfit_wq = create_singlethread_workqueue("nfit");
3712         if (!nfit_wq)
3713                 return -ENOMEM;
3714
3715         nfit_mce_register();
3716         ret = acpi_bus_register_driver(&acpi_nfit_driver);
3717         if (ret) {
3718                 nfit_mce_unregister();
3719                 destroy_workqueue(nfit_wq);
3720         }
3721
3722         return ret;
3723
3724 }
3725
3726 static __exit void nfit_exit(void)
3727 {
3728         nfit_mce_unregister();
3729         acpi_bus_unregister_driver(&acpi_nfit_driver);
3730         destroy_workqueue(nfit_wq);
3731         WARN_ON(!list_empty(&acpi_descs));
3732 }
3733
3734 module_init(nfit_init);
3735 module_exit(nfit_exit);
3736 MODULE_LICENSE("GPL v2");
3737 MODULE_AUTHOR("Intel Corporation");