Merge tag 'tty-5.10-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty
[platform/kernel/linux-rpi.git] / drivers / acpi / nfit / core.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4  */
5 #include <linux/list_sort.h>
6 #include <linux/libnvdimm.h>
7 #include <linux/module.h>
8 #include <linux/mutex.h>
9 #include <linux/ndctl.h>
10 #include <linux/sysfs.h>
11 #include <linux/delay.h>
12 #include <linux/list.h>
13 #include <linux/acpi.h>
14 #include <linux/sort.h>
15 #include <linux/io.h>
16 #include <linux/nd.h>
17 #include <asm/cacheflush.h>
18 #include <acpi/nfit.h>
19 #include "intel.h"
20 #include "nfit.h"
21
22 /*
23  * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
24  * irrelevant.
25  */
26 #include <linux/io-64-nonatomic-hi-lo.h>
27
28 static bool force_enable_dimms;
29 module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR);
30 MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status");
31
32 static bool disable_vendor_specific;
33 module_param(disable_vendor_specific, bool, S_IRUGO);
34 MODULE_PARM_DESC(disable_vendor_specific,
35                 "Limit commands to the publicly specified set");
36
37 static unsigned long override_dsm_mask;
38 module_param(override_dsm_mask, ulong, S_IRUGO);
39 MODULE_PARM_DESC(override_dsm_mask, "Bitmask of allowed NVDIMM DSM functions");
40
41 static int default_dsm_family = -1;
42 module_param(default_dsm_family, int, S_IRUGO);
43 MODULE_PARM_DESC(default_dsm_family,
44                 "Try this DSM type first when identifying NVDIMM family");
45
46 static bool no_init_ars;
47 module_param(no_init_ars, bool, 0644);
48 MODULE_PARM_DESC(no_init_ars, "Skip ARS run at nfit init time");
49
50 static bool force_labels;
51 module_param(force_labels, bool, 0444);
52 MODULE_PARM_DESC(force_labels, "Opt-in to labels despite missing methods");
53
54 LIST_HEAD(acpi_descs);
55 DEFINE_MUTEX(acpi_desc_lock);
56
57 static struct workqueue_struct *nfit_wq;
58
59 struct nfit_table_prev {
60         struct list_head spas;
61         struct list_head memdevs;
62         struct list_head dcrs;
63         struct list_head bdws;
64         struct list_head idts;
65         struct list_head flushes;
66 };
67
68 static guid_t nfit_uuid[NFIT_UUID_MAX];
69
70 const guid_t *to_nfit_uuid(enum nfit_uuids id)
71 {
72         return &nfit_uuid[id];
73 }
74 EXPORT_SYMBOL(to_nfit_uuid);
75
76 static const guid_t *to_nfit_bus_uuid(int family)
77 {
78         if (WARN_ONCE(family == NVDIMM_BUS_FAMILY_NFIT,
79                         "only secondary bus families can be translated\n"))
80                 return NULL;
81         /*
82          * The index of bus UUIDs starts immediately following the last
83          * NVDIMM/leaf family.
84          */
85         return to_nfit_uuid(family + NVDIMM_FAMILY_MAX);
86 }
87
88 static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
89 {
90         struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
91
92         /*
93          * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct
94          * acpi_device.
95          */
96         if (!nd_desc->provider_name
97                         || strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0)
98                 return NULL;
99
100         return to_acpi_device(acpi_desc->dev);
101 }
102
103 static int xlat_bus_status(void *buf, unsigned int cmd, u32 status)
104 {
105         struct nd_cmd_clear_error *clear_err;
106         struct nd_cmd_ars_status *ars_status;
107         u16 flags;
108
109         switch (cmd) {
110         case ND_CMD_ARS_CAP:
111                 if ((status & 0xffff) == NFIT_ARS_CAP_NONE)
112                         return -ENOTTY;
113
114                 /* Command failed */
115                 if (status & 0xffff)
116                         return -EIO;
117
118                 /* No supported scan types for this range */
119                 flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE;
120                 if ((status >> 16 & flags) == 0)
121                         return -ENOTTY;
122                 return 0;
123         case ND_CMD_ARS_START:
124                 /* ARS is in progress */
125                 if ((status & 0xffff) == NFIT_ARS_START_BUSY)
126                         return -EBUSY;
127
128                 /* Command failed */
129                 if (status & 0xffff)
130                         return -EIO;
131                 return 0;
132         case ND_CMD_ARS_STATUS:
133                 ars_status = buf;
134                 /* Command failed */
135                 if (status & 0xffff)
136                         return -EIO;
137                 /* Check extended status (Upper two bytes) */
138                 if (status == NFIT_ARS_STATUS_DONE)
139                         return 0;
140
141                 /* ARS is in progress */
142                 if (status == NFIT_ARS_STATUS_BUSY)
143                         return -EBUSY;
144
145                 /* No ARS performed for the current boot */
146                 if (status == NFIT_ARS_STATUS_NONE)
147                         return -EAGAIN;
148
149                 /*
150                  * ARS interrupted, either we overflowed or some other
151                  * agent wants the scan to stop.  If we didn't overflow
152                  * then just continue with the returned results.
153                  */
154                 if (status == NFIT_ARS_STATUS_INTR) {
155                         if (ars_status->out_length >= 40 && (ars_status->flags
156                                                 & NFIT_ARS_F_OVERFLOW))
157                                 return -ENOSPC;
158                         return 0;
159                 }
160
161                 /* Unknown status */
162                 if (status >> 16)
163                         return -EIO;
164                 return 0;
165         case ND_CMD_CLEAR_ERROR:
166                 clear_err = buf;
167                 if (status & 0xffff)
168                         return -EIO;
169                 if (!clear_err->cleared)
170                         return -EIO;
171                 if (clear_err->length > clear_err->cleared)
172                         return clear_err->cleared;
173                 return 0;
174         default:
175                 break;
176         }
177
178         /* all other non-zero status results in an error */
179         if (status)
180                 return -EIO;
181         return 0;
182 }
183
184 #define ACPI_LABELS_LOCKED 3
185
186 static int xlat_nvdimm_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd,
187                 u32 status)
188 {
189         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
190
191         switch (cmd) {
192         case ND_CMD_GET_CONFIG_SIZE:
193                 /*
194                  * In the _LSI, _LSR, _LSW case the locked status is
195                  * communicated via the read/write commands
196                  */
197                 if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags))
198                         break;
199
200                 if (status >> 16 & ND_CONFIG_LOCKED)
201                         return -EACCES;
202                 break;
203         case ND_CMD_GET_CONFIG_DATA:
204                 if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)
205                                 && status == ACPI_LABELS_LOCKED)
206                         return -EACCES;
207                 break;
208         case ND_CMD_SET_CONFIG_DATA:
209                 if (test_bit(NFIT_MEM_LSW, &nfit_mem->flags)
210                                 && status == ACPI_LABELS_LOCKED)
211                         return -EACCES;
212                 break;
213         default:
214                 break;
215         }
216
217         /* all other non-zero status results in an error */
218         if (status)
219                 return -EIO;
220         return 0;
221 }
222
223 static int xlat_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd,
224                 u32 status)
225 {
226         if (!nvdimm)
227                 return xlat_bus_status(buf, cmd, status);
228         return xlat_nvdimm_status(nvdimm, buf, cmd, status);
229 }
230
231 /* convert _LS{I,R} packages to the buffer object acpi_nfit_ctl expects */
232 static union acpi_object *pkg_to_buf(union acpi_object *pkg)
233 {
234         int i;
235         void *dst;
236         size_t size = 0;
237         union acpi_object *buf = NULL;
238
239         if (pkg->type != ACPI_TYPE_PACKAGE) {
240                 WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
241                                 pkg->type);
242                 goto err;
243         }
244
245         for (i = 0; i < pkg->package.count; i++) {
246                 union acpi_object *obj = &pkg->package.elements[i];
247
248                 if (obj->type == ACPI_TYPE_INTEGER)
249                         size += 4;
250                 else if (obj->type == ACPI_TYPE_BUFFER)
251                         size += obj->buffer.length;
252                 else {
253                         WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
254                                         obj->type);
255                         goto err;
256                 }
257         }
258
259         buf = ACPI_ALLOCATE(sizeof(*buf) + size);
260         if (!buf)
261                 goto err;
262
263         dst = buf + 1;
264         buf->type = ACPI_TYPE_BUFFER;
265         buf->buffer.length = size;
266         buf->buffer.pointer = dst;
267         for (i = 0; i < pkg->package.count; i++) {
268                 union acpi_object *obj = &pkg->package.elements[i];
269
270                 if (obj->type == ACPI_TYPE_INTEGER) {
271                         memcpy(dst, &obj->integer.value, 4);
272                         dst += 4;
273                 } else if (obj->type == ACPI_TYPE_BUFFER) {
274                         memcpy(dst, obj->buffer.pointer, obj->buffer.length);
275                         dst += obj->buffer.length;
276                 }
277         }
278 err:
279         ACPI_FREE(pkg);
280         return buf;
281 }
282
283 static union acpi_object *int_to_buf(union acpi_object *integer)
284 {
285         union acpi_object *buf = ACPI_ALLOCATE(sizeof(*buf) + 4);
286         void *dst = NULL;
287
288         if (!buf)
289                 goto err;
290
291         if (integer->type != ACPI_TYPE_INTEGER) {
292                 WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
293                                 integer->type);
294                 goto err;
295         }
296
297         dst = buf + 1;
298         buf->type = ACPI_TYPE_BUFFER;
299         buf->buffer.length = 4;
300         buf->buffer.pointer = dst;
301         memcpy(dst, &integer->integer.value, 4);
302 err:
303         ACPI_FREE(integer);
304         return buf;
305 }
306
307 static union acpi_object *acpi_label_write(acpi_handle handle, u32 offset,
308                 u32 len, void *data)
309 {
310         acpi_status rc;
311         struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
312         struct acpi_object_list input = {
313                 .count = 3,
314                 .pointer = (union acpi_object []) {
315                         [0] = {
316                                 .integer.type = ACPI_TYPE_INTEGER,
317                                 .integer.value = offset,
318                         },
319                         [1] = {
320                                 .integer.type = ACPI_TYPE_INTEGER,
321                                 .integer.value = len,
322                         },
323                         [2] = {
324                                 .buffer.type = ACPI_TYPE_BUFFER,
325                                 .buffer.pointer = data,
326                                 .buffer.length = len,
327                         },
328                 },
329         };
330
331         rc = acpi_evaluate_object(handle, "_LSW", &input, &buf);
332         if (ACPI_FAILURE(rc))
333                 return NULL;
334         return int_to_buf(buf.pointer);
335 }
336
337 static union acpi_object *acpi_label_read(acpi_handle handle, u32 offset,
338                 u32 len)
339 {
340         acpi_status rc;
341         struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
342         struct acpi_object_list input = {
343                 .count = 2,
344                 .pointer = (union acpi_object []) {
345                         [0] = {
346                                 .integer.type = ACPI_TYPE_INTEGER,
347                                 .integer.value = offset,
348                         },
349                         [1] = {
350                                 .integer.type = ACPI_TYPE_INTEGER,
351                                 .integer.value = len,
352                         },
353                 },
354         };
355
356         rc = acpi_evaluate_object(handle, "_LSR", &input, &buf);
357         if (ACPI_FAILURE(rc))
358                 return NULL;
359         return pkg_to_buf(buf.pointer);
360 }
361
362 static union acpi_object *acpi_label_info(acpi_handle handle)
363 {
364         acpi_status rc;
365         struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
366
367         rc = acpi_evaluate_object(handle, "_LSI", NULL, &buf);
368         if (ACPI_FAILURE(rc))
369                 return NULL;
370         return pkg_to_buf(buf.pointer);
371 }
372
373 static u8 nfit_dsm_revid(unsigned family, unsigned func)
374 {
375         static const u8 revid_table[NVDIMM_FAMILY_MAX+1][NVDIMM_CMD_MAX+1] = {
376                 [NVDIMM_FAMILY_INTEL] = {
377                         [NVDIMM_INTEL_GET_MODES ...
378                                 NVDIMM_INTEL_FW_ACTIVATE_ARM] = 2,
379                 },
380         };
381         u8 id;
382
383         if (family > NVDIMM_FAMILY_MAX)
384                 return 0;
385         if (func > NVDIMM_CMD_MAX)
386                 return 0;
387         id = revid_table[family][func];
388         if (id == 0)
389                 return 1; /* default */
390         return id;
391 }
392
393 static bool payload_dumpable(struct nvdimm *nvdimm, unsigned int func)
394 {
395         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
396
397         if (nfit_mem && nfit_mem->family == NVDIMM_FAMILY_INTEL
398                         && func >= NVDIMM_INTEL_GET_SECURITY_STATE
399                         && func <= NVDIMM_INTEL_MASTER_SECURE_ERASE)
400                 return IS_ENABLED(CONFIG_NFIT_SECURITY_DEBUG);
401         return true;
402 }
403
404 static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd,
405                 struct nd_cmd_pkg *call_pkg, int *family)
406 {
407         if (call_pkg) {
408                 int i;
409
410                 if (nfit_mem && nfit_mem->family != call_pkg->nd_family)
411                         return -ENOTTY;
412
413                 for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
414                         if (call_pkg->nd_reserved2[i])
415                                 return -EINVAL;
416                 *family = call_pkg->nd_family;
417                 return call_pkg->nd_command;
418         }
419
420         /* In the !call_pkg case, bus commands == bus functions */
421         if (!nfit_mem)
422                 return cmd;
423
424         /* Linux ND commands == NVDIMM_FAMILY_INTEL function numbers */
425         if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
426                 return cmd;
427
428         /*
429          * Force function number validation to fail since 0 is never
430          * published as a valid function in dsm_mask.
431          */
432         return 0;
433 }
434
435 int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
436                 unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
437 {
438         struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
439         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
440         union acpi_object in_obj, in_buf, *out_obj;
441         const struct nd_cmd_desc *desc = NULL;
442         struct device *dev = acpi_desc->dev;
443         struct nd_cmd_pkg *call_pkg = NULL;
444         const char *cmd_name, *dimm_name;
445         unsigned long cmd_mask, dsm_mask;
446         u32 offset, fw_status = 0;
447         acpi_handle handle;
448         const guid_t *guid;
449         int func, rc, i;
450         int family = 0;
451
452         if (cmd_rc)
453                 *cmd_rc = -EINVAL;
454
455         if (cmd == ND_CMD_CALL)
456                 call_pkg = buf;
457         func = cmd_to_func(nfit_mem, cmd, call_pkg, &family);
458         if (func < 0)
459                 return func;
460
461         if (nvdimm) {
462                 struct acpi_device *adev = nfit_mem->adev;
463
464                 if (!adev)
465                         return -ENOTTY;
466
467                 dimm_name = nvdimm_name(nvdimm);
468                 cmd_name = nvdimm_cmd_name(cmd);
469                 cmd_mask = nvdimm_cmd_mask(nvdimm);
470                 dsm_mask = nfit_mem->dsm_mask;
471                 desc = nd_cmd_dimm_desc(cmd);
472                 guid = to_nfit_uuid(nfit_mem->family);
473                 handle = adev->handle;
474         } else {
475                 struct acpi_device *adev = to_acpi_dev(acpi_desc);
476
477                 cmd_name = nvdimm_bus_cmd_name(cmd);
478                 cmd_mask = nd_desc->cmd_mask;
479                 if (cmd == ND_CMD_CALL && call_pkg->nd_family) {
480                         family = call_pkg->nd_family;
481                         if (!test_bit(family, &nd_desc->bus_family_mask))
482                                 return -EINVAL;
483                         dsm_mask = acpi_desc->family_dsm_mask[family];
484                         guid = to_nfit_bus_uuid(family);
485                 } else {
486                         dsm_mask = acpi_desc->bus_dsm_mask;
487                         guid = to_nfit_uuid(NFIT_DEV_BUS);
488                 }
489                 desc = nd_cmd_bus_desc(cmd);
490                 handle = adev->handle;
491                 dimm_name = "bus";
492         }
493
494         if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
495                 return -ENOTTY;
496
497         /*
498          * Check for a valid command.  For ND_CMD_CALL, we also have to
499          * make sure that the DSM function is supported.
500          */
501         if (cmd == ND_CMD_CALL &&
502             (func > NVDIMM_CMD_MAX || !test_bit(func, &dsm_mask)))
503                 return -ENOTTY;
504         else if (!test_bit(cmd, &cmd_mask))
505                 return -ENOTTY;
506
507         in_obj.type = ACPI_TYPE_PACKAGE;
508         in_obj.package.count = 1;
509         in_obj.package.elements = &in_buf;
510         in_buf.type = ACPI_TYPE_BUFFER;
511         in_buf.buffer.pointer = buf;
512         in_buf.buffer.length = 0;
513
514         /* libnvdimm has already validated the input envelope */
515         for (i = 0; i < desc->in_num; i++)
516                 in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc,
517                                 i, buf);
518
519         if (call_pkg) {
520                 /* skip over package wrapper */
521                 in_buf.buffer.pointer = (void *) &call_pkg->nd_payload;
522                 in_buf.buffer.length = call_pkg->nd_size_in;
523         }
524
525         dev_dbg(dev, "%s cmd: %d: family: %d func: %d input length: %d\n",
526                 dimm_name, cmd, family, func, in_buf.buffer.length);
527         if (payload_dumpable(nvdimm, func))
528                 print_hex_dump_debug("nvdimm in  ", DUMP_PREFIX_OFFSET, 4, 4,
529                                 in_buf.buffer.pointer,
530                                 min_t(u32, 256, in_buf.buffer.length), true);
531
532         /* call the BIOS, prefer the named methods over _DSM if available */
533         if (nvdimm && cmd == ND_CMD_GET_CONFIG_SIZE
534                         && test_bit(NFIT_MEM_LSR, &nfit_mem->flags))
535                 out_obj = acpi_label_info(handle);
536         else if (nvdimm && cmd == ND_CMD_GET_CONFIG_DATA
537                         && test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) {
538                 struct nd_cmd_get_config_data_hdr *p = buf;
539
540                 out_obj = acpi_label_read(handle, p->in_offset, p->in_length);
541         } else if (nvdimm && cmd == ND_CMD_SET_CONFIG_DATA
542                         && test_bit(NFIT_MEM_LSW, &nfit_mem->flags)) {
543                 struct nd_cmd_set_config_hdr *p = buf;
544
545                 out_obj = acpi_label_write(handle, p->in_offset, p->in_length,
546                                 p->in_buf);
547         } else {
548                 u8 revid;
549
550                 if (nvdimm)
551                         revid = nfit_dsm_revid(nfit_mem->family, func);
552                 else
553                         revid = 1;
554                 out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj);
555         }
556
557         if (!out_obj) {
558                 dev_dbg(dev, "%s _DSM failed cmd: %s\n", dimm_name, cmd_name);
559                 return -EINVAL;
560         }
561
562         if (out_obj->type != ACPI_TYPE_BUFFER) {
563                 dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n",
564                                 dimm_name, cmd_name, out_obj->type);
565                 rc = -EINVAL;
566                 goto out;
567         }
568
569         dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name,
570                         cmd_name, out_obj->buffer.length);
571         print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4,
572                         out_obj->buffer.pointer,
573                         min_t(u32, 128, out_obj->buffer.length), true);
574
575         if (call_pkg) {
576                 call_pkg->nd_fw_size = out_obj->buffer.length;
577                 memcpy(call_pkg->nd_payload + call_pkg->nd_size_in,
578                         out_obj->buffer.pointer,
579                         min(call_pkg->nd_fw_size, call_pkg->nd_size_out));
580
581                 ACPI_FREE(out_obj);
582                 /*
583                  * Need to support FW function w/o known size in advance.
584                  * Caller can determine required size based upon nd_fw_size.
585                  * If we return an error (like elsewhere) then caller wouldn't
586                  * be able to rely upon data returned to make calculation.
587                  */
588                 if (cmd_rc)
589                         *cmd_rc = 0;
590                 return 0;
591         }
592
593         for (i = 0, offset = 0; i < desc->out_num; i++) {
594                 u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf,
595                                 (u32 *) out_obj->buffer.pointer,
596                                 out_obj->buffer.length - offset);
597
598                 if (offset + out_size > out_obj->buffer.length) {
599                         dev_dbg(dev, "%s output object underflow cmd: %s field: %d\n",
600                                         dimm_name, cmd_name, i);
601                         break;
602                 }
603
604                 if (in_buf.buffer.length + offset + out_size > buf_len) {
605                         dev_dbg(dev, "%s output overrun cmd: %s field: %d\n",
606                                         dimm_name, cmd_name, i);
607                         rc = -ENXIO;
608                         goto out;
609                 }
610                 memcpy(buf + in_buf.buffer.length + offset,
611                                 out_obj->buffer.pointer + offset, out_size);
612                 offset += out_size;
613         }
614
615         /*
616          * Set fw_status for all the commands with a known format to be
617          * later interpreted by xlat_status().
618          */
619         if (i >= 1 && ((!nvdimm && cmd >= ND_CMD_ARS_CAP
620                                         && cmd <= ND_CMD_CLEAR_ERROR)
621                                 || (nvdimm && cmd >= ND_CMD_SMART
622                                         && cmd <= ND_CMD_VENDOR)))
623                 fw_status = *(u32 *) out_obj->buffer.pointer;
624
625         if (offset + in_buf.buffer.length < buf_len) {
626                 if (i >= 1) {
627                         /*
628                          * status valid, return the number of bytes left
629                          * unfilled in the output buffer
630                          */
631                         rc = buf_len - offset - in_buf.buffer.length;
632                         if (cmd_rc)
633                                 *cmd_rc = xlat_status(nvdimm, buf, cmd,
634                                                 fw_status);
635                 } else {
636                         dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
637                                         __func__, dimm_name, cmd_name, buf_len,
638                                         offset);
639                         rc = -ENXIO;
640                 }
641         } else {
642                 rc = 0;
643                 if (cmd_rc)
644                         *cmd_rc = xlat_status(nvdimm, buf, cmd, fw_status);
645         }
646
647  out:
648         ACPI_FREE(out_obj);
649
650         return rc;
651 }
652 EXPORT_SYMBOL_GPL(acpi_nfit_ctl);
653
654 static const char *spa_type_name(u16 type)
655 {
656         static const char *to_name[] = {
657                 [NFIT_SPA_VOLATILE] = "volatile",
658                 [NFIT_SPA_PM] = "pmem",
659                 [NFIT_SPA_DCR] = "dimm-control-region",
660                 [NFIT_SPA_BDW] = "block-data-window",
661                 [NFIT_SPA_VDISK] = "volatile-disk",
662                 [NFIT_SPA_VCD] = "volatile-cd",
663                 [NFIT_SPA_PDISK] = "persistent-disk",
664                 [NFIT_SPA_PCD] = "persistent-cd",
665
666         };
667
668         if (type > NFIT_SPA_PCD)
669                 return "unknown";
670
671         return to_name[type];
672 }
673
674 int nfit_spa_type(struct acpi_nfit_system_address *spa)
675 {
676         int i;
677
678         for (i = 0; i < NFIT_UUID_MAX; i++)
679                 if (guid_equal(to_nfit_uuid(i), (guid_t *)&spa->range_guid))
680                         return i;
681         return -1;
682 }
683
684 static bool add_spa(struct acpi_nfit_desc *acpi_desc,
685                 struct nfit_table_prev *prev,
686                 struct acpi_nfit_system_address *spa)
687 {
688         struct device *dev = acpi_desc->dev;
689         struct nfit_spa *nfit_spa;
690
691         if (spa->header.length != sizeof(*spa))
692                 return false;
693
694         list_for_each_entry(nfit_spa, &prev->spas, list) {
695                 if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) {
696                         list_move_tail(&nfit_spa->list, &acpi_desc->spas);
697                         return true;
698                 }
699         }
700
701         nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa) + sizeof(*spa),
702                         GFP_KERNEL);
703         if (!nfit_spa)
704                 return false;
705         INIT_LIST_HEAD(&nfit_spa->list);
706         memcpy(nfit_spa->spa, spa, sizeof(*spa));
707         list_add_tail(&nfit_spa->list, &acpi_desc->spas);
708         dev_dbg(dev, "spa index: %d type: %s\n",
709                         spa->range_index,
710                         spa_type_name(nfit_spa_type(spa)));
711         return true;
712 }
713
714 static bool add_memdev(struct acpi_nfit_desc *acpi_desc,
715                 struct nfit_table_prev *prev,
716                 struct acpi_nfit_memory_map *memdev)
717 {
718         struct device *dev = acpi_desc->dev;
719         struct nfit_memdev *nfit_memdev;
720
721         if (memdev->header.length != sizeof(*memdev))
722                 return false;
723
724         list_for_each_entry(nfit_memdev, &prev->memdevs, list)
725                 if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) {
726                         list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs);
727                         return true;
728                 }
729
730         nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev) + sizeof(*memdev),
731                         GFP_KERNEL);
732         if (!nfit_memdev)
733                 return false;
734         INIT_LIST_HEAD(&nfit_memdev->list);
735         memcpy(nfit_memdev->memdev, memdev, sizeof(*memdev));
736         list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs);
737         dev_dbg(dev, "memdev handle: %#x spa: %d dcr: %d flags: %#x\n",
738                         memdev->device_handle, memdev->range_index,
739                         memdev->region_index, memdev->flags);
740         return true;
741 }
742
743 int nfit_get_smbios_id(u32 device_handle, u16 *flags)
744 {
745         struct acpi_nfit_memory_map *memdev;
746         struct acpi_nfit_desc *acpi_desc;
747         struct nfit_mem *nfit_mem;
748         u16 physical_id;
749
750         mutex_lock(&acpi_desc_lock);
751         list_for_each_entry(acpi_desc, &acpi_descs, list) {
752                 mutex_lock(&acpi_desc->init_mutex);
753                 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
754                         memdev = __to_nfit_memdev(nfit_mem);
755                         if (memdev->device_handle == device_handle) {
756                                 *flags = memdev->flags;
757                                 physical_id = memdev->physical_id;
758                                 mutex_unlock(&acpi_desc->init_mutex);
759                                 mutex_unlock(&acpi_desc_lock);
760                                 return physical_id;
761                         }
762                 }
763                 mutex_unlock(&acpi_desc->init_mutex);
764         }
765         mutex_unlock(&acpi_desc_lock);
766
767         return -ENODEV;
768 }
769 EXPORT_SYMBOL_GPL(nfit_get_smbios_id);
770
771 /*
772  * An implementation may provide a truncated control region if no block windows
773  * are defined.
774  */
775 static size_t sizeof_dcr(struct acpi_nfit_control_region *dcr)
776 {
777         if (dcr->header.length < offsetof(struct acpi_nfit_control_region,
778                                 window_size))
779                 return 0;
780         if (dcr->windows)
781                 return sizeof(*dcr);
782         return offsetof(struct acpi_nfit_control_region, window_size);
783 }
784
785 static bool add_dcr(struct acpi_nfit_desc *acpi_desc,
786                 struct nfit_table_prev *prev,
787                 struct acpi_nfit_control_region *dcr)
788 {
789         struct device *dev = acpi_desc->dev;
790         struct nfit_dcr *nfit_dcr;
791
792         if (!sizeof_dcr(dcr))
793                 return false;
794
795         list_for_each_entry(nfit_dcr, &prev->dcrs, list)
796                 if (memcmp(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)) == 0) {
797                         list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs);
798                         return true;
799                 }
800
801         nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr) + sizeof(*dcr),
802                         GFP_KERNEL);
803         if (!nfit_dcr)
804                 return false;
805         INIT_LIST_HEAD(&nfit_dcr->list);
806         memcpy(nfit_dcr->dcr, dcr, sizeof_dcr(dcr));
807         list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs);
808         dev_dbg(dev, "dcr index: %d windows: %d\n",
809                         dcr->region_index, dcr->windows);
810         return true;
811 }
812
813 static bool add_bdw(struct acpi_nfit_desc *acpi_desc,
814                 struct nfit_table_prev *prev,
815                 struct acpi_nfit_data_region *bdw)
816 {
817         struct device *dev = acpi_desc->dev;
818         struct nfit_bdw *nfit_bdw;
819
820         if (bdw->header.length != sizeof(*bdw))
821                 return false;
822         list_for_each_entry(nfit_bdw, &prev->bdws, list)
823                 if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) {
824                         list_move_tail(&nfit_bdw->list, &acpi_desc->bdws);
825                         return true;
826                 }
827
828         nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw) + sizeof(*bdw),
829                         GFP_KERNEL);
830         if (!nfit_bdw)
831                 return false;
832         INIT_LIST_HEAD(&nfit_bdw->list);
833         memcpy(nfit_bdw->bdw, bdw, sizeof(*bdw));
834         list_add_tail(&nfit_bdw->list, &acpi_desc->bdws);
835         dev_dbg(dev, "bdw dcr: %d windows: %d\n",
836                         bdw->region_index, bdw->windows);
837         return true;
838 }
839
840 static size_t sizeof_idt(struct acpi_nfit_interleave *idt)
841 {
842         if (idt->header.length < sizeof(*idt))
843                 return 0;
844         return sizeof(*idt) + sizeof(u32) * (idt->line_count - 1);
845 }
846
847 static bool add_idt(struct acpi_nfit_desc *acpi_desc,
848                 struct nfit_table_prev *prev,
849                 struct acpi_nfit_interleave *idt)
850 {
851         struct device *dev = acpi_desc->dev;
852         struct nfit_idt *nfit_idt;
853
854         if (!sizeof_idt(idt))
855                 return false;
856
857         list_for_each_entry(nfit_idt, &prev->idts, list) {
858                 if (sizeof_idt(nfit_idt->idt) != sizeof_idt(idt))
859                         continue;
860
861                 if (memcmp(nfit_idt->idt, idt, sizeof_idt(idt)) == 0) {
862                         list_move_tail(&nfit_idt->list, &acpi_desc->idts);
863                         return true;
864                 }
865         }
866
867         nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt) + sizeof_idt(idt),
868                         GFP_KERNEL);
869         if (!nfit_idt)
870                 return false;
871         INIT_LIST_HEAD(&nfit_idt->list);
872         memcpy(nfit_idt->idt, idt, sizeof_idt(idt));
873         list_add_tail(&nfit_idt->list, &acpi_desc->idts);
874         dev_dbg(dev, "idt index: %d num_lines: %d\n",
875                         idt->interleave_index, idt->line_count);
876         return true;
877 }
878
879 static size_t sizeof_flush(struct acpi_nfit_flush_address *flush)
880 {
881         if (flush->header.length < sizeof(*flush))
882                 return 0;
883         return sizeof(*flush) + sizeof(u64) * (flush->hint_count - 1);
884 }
885
886 static bool add_flush(struct acpi_nfit_desc *acpi_desc,
887                 struct nfit_table_prev *prev,
888                 struct acpi_nfit_flush_address *flush)
889 {
890         struct device *dev = acpi_desc->dev;
891         struct nfit_flush *nfit_flush;
892
893         if (!sizeof_flush(flush))
894                 return false;
895
896         list_for_each_entry(nfit_flush, &prev->flushes, list) {
897                 if (sizeof_flush(nfit_flush->flush) != sizeof_flush(flush))
898                         continue;
899
900                 if (memcmp(nfit_flush->flush, flush,
901                                         sizeof_flush(flush)) == 0) {
902                         list_move_tail(&nfit_flush->list, &acpi_desc->flushes);
903                         return true;
904                 }
905         }
906
907         nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush)
908                         + sizeof_flush(flush), GFP_KERNEL);
909         if (!nfit_flush)
910                 return false;
911         INIT_LIST_HEAD(&nfit_flush->list);
912         memcpy(nfit_flush->flush, flush, sizeof_flush(flush));
913         list_add_tail(&nfit_flush->list, &acpi_desc->flushes);
914         dev_dbg(dev, "nfit_flush handle: %d hint_count: %d\n",
915                         flush->device_handle, flush->hint_count);
916         return true;
917 }
918
919 static bool add_platform_cap(struct acpi_nfit_desc *acpi_desc,
920                 struct acpi_nfit_capabilities *pcap)
921 {
922         struct device *dev = acpi_desc->dev;
923         u32 mask;
924
925         mask = (1 << (pcap->highest_capability + 1)) - 1;
926         acpi_desc->platform_cap = pcap->capabilities & mask;
927         dev_dbg(dev, "cap: %#x\n", acpi_desc->platform_cap);
928         return true;
929 }
930
931 static void *add_table(struct acpi_nfit_desc *acpi_desc,
932                 struct nfit_table_prev *prev, void *table, const void *end)
933 {
934         struct device *dev = acpi_desc->dev;
935         struct acpi_nfit_header *hdr;
936         void *err = ERR_PTR(-ENOMEM);
937
938         if (table >= end)
939                 return NULL;
940
941         hdr = table;
942         if (!hdr->length) {
943                 dev_warn(dev, "found a zero length table '%d' parsing nfit\n",
944                         hdr->type);
945                 return NULL;
946         }
947
948         switch (hdr->type) {
949         case ACPI_NFIT_TYPE_SYSTEM_ADDRESS:
950                 if (!add_spa(acpi_desc, prev, table))
951                         return err;
952                 break;
953         case ACPI_NFIT_TYPE_MEMORY_MAP:
954                 if (!add_memdev(acpi_desc, prev, table))
955                         return err;
956                 break;
957         case ACPI_NFIT_TYPE_CONTROL_REGION:
958                 if (!add_dcr(acpi_desc, prev, table))
959                         return err;
960                 break;
961         case ACPI_NFIT_TYPE_DATA_REGION:
962                 if (!add_bdw(acpi_desc, prev, table))
963                         return err;
964                 break;
965         case ACPI_NFIT_TYPE_INTERLEAVE:
966                 if (!add_idt(acpi_desc, prev, table))
967                         return err;
968                 break;
969         case ACPI_NFIT_TYPE_FLUSH_ADDRESS:
970                 if (!add_flush(acpi_desc, prev, table))
971                         return err;
972                 break;
973         case ACPI_NFIT_TYPE_SMBIOS:
974                 dev_dbg(dev, "smbios\n");
975                 break;
976         case ACPI_NFIT_TYPE_CAPABILITIES:
977                 if (!add_platform_cap(acpi_desc, table))
978                         return err;
979                 break;
980         default:
981                 dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type);
982                 break;
983         }
984
985         return table + hdr->length;
986 }
987
988 static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc,
989                 struct nfit_mem *nfit_mem)
990 {
991         u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
992         u16 dcr = nfit_mem->dcr->region_index;
993         struct nfit_spa *nfit_spa;
994
995         list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
996                 u16 range_index = nfit_spa->spa->range_index;
997                 int type = nfit_spa_type(nfit_spa->spa);
998                 struct nfit_memdev *nfit_memdev;
999
1000                 if (type != NFIT_SPA_BDW)
1001                         continue;
1002
1003                 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1004                         if (nfit_memdev->memdev->range_index != range_index)
1005                                 continue;
1006                         if (nfit_memdev->memdev->device_handle != device_handle)
1007                                 continue;
1008                         if (nfit_memdev->memdev->region_index != dcr)
1009                                 continue;
1010
1011                         nfit_mem->spa_bdw = nfit_spa->spa;
1012                         return;
1013                 }
1014         }
1015
1016         dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n",
1017                         nfit_mem->spa_dcr->range_index);
1018         nfit_mem->bdw = NULL;
1019 }
1020
1021 static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc,
1022                 struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
1023 {
1024         u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
1025         struct nfit_memdev *nfit_memdev;
1026         struct nfit_bdw *nfit_bdw;
1027         struct nfit_idt *nfit_idt;
1028         u16 idt_idx, range_index;
1029
1030         list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
1031                 if (nfit_bdw->bdw->region_index != dcr)
1032                         continue;
1033                 nfit_mem->bdw = nfit_bdw->bdw;
1034                 break;
1035         }
1036
1037         if (!nfit_mem->bdw)
1038                 return;
1039
1040         nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
1041
1042         if (!nfit_mem->spa_bdw)
1043                 return;
1044
1045         range_index = nfit_mem->spa_bdw->range_index;
1046         list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1047                 if (nfit_memdev->memdev->range_index != range_index ||
1048                                 nfit_memdev->memdev->region_index != dcr)
1049                         continue;
1050                 nfit_mem->memdev_bdw = nfit_memdev->memdev;
1051                 idt_idx = nfit_memdev->memdev->interleave_index;
1052                 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
1053                         if (nfit_idt->idt->interleave_index != idt_idx)
1054                                 continue;
1055                         nfit_mem->idt_bdw = nfit_idt->idt;
1056                         break;
1057                 }
1058                 break;
1059         }
1060 }
1061
1062 static int __nfit_mem_init(struct acpi_nfit_desc *acpi_desc,
1063                 struct acpi_nfit_system_address *spa)
1064 {
1065         struct nfit_mem *nfit_mem, *found;
1066         struct nfit_memdev *nfit_memdev;
1067         int type = spa ? nfit_spa_type(spa) : 0;
1068
1069         switch (type) {
1070         case NFIT_SPA_DCR:
1071         case NFIT_SPA_PM:
1072                 break;
1073         default:
1074                 if (spa)
1075                         return 0;
1076         }
1077
1078         /*
1079          * This loop runs in two modes, when a dimm is mapped the loop
1080          * adds memdev associations to an existing dimm, or creates a
1081          * dimm. In the unmapped dimm case this loop sweeps for memdev
1082          * instances with an invalid / zero range_index and adds those
1083          * dimms without spa associations.
1084          */
1085         list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1086                 struct nfit_flush *nfit_flush;
1087                 struct nfit_dcr *nfit_dcr;
1088                 u32 device_handle;
1089                 u16 dcr;
1090
1091                 if (spa && nfit_memdev->memdev->range_index != spa->range_index)
1092                         continue;
1093                 if (!spa && nfit_memdev->memdev->range_index)
1094                         continue;
1095                 found = NULL;
1096                 dcr = nfit_memdev->memdev->region_index;
1097                 device_handle = nfit_memdev->memdev->device_handle;
1098                 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
1099                         if (__to_nfit_memdev(nfit_mem)->device_handle
1100                                         == device_handle) {
1101                                 found = nfit_mem;
1102                                 break;
1103                         }
1104
1105                 if (found)
1106                         nfit_mem = found;
1107                 else {
1108                         nfit_mem = devm_kzalloc(acpi_desc->dev,
1109                                         sizeof(*nfit_mem), GFP_KERNEL);
1110                         if (!nfit_mem)
1111                                 return -ENOMEM;
1112                         INIT_LIST_HEAD(&nfit_mem->list);
1113                         nfit_mem->acpi_desc = acpi_desc;
1114                         list_add(&nfit_mem->list, &acpi_desc->dimms);
1115                 }
1116
1117                 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
1118                         if (nfit_dcr->dcr->region_index != dcr)
1119                                 continue;
1120                         /*
1121                          * Record the control region for the dimm.  For
1122                          * the ACPI 6.1 case, where there are separate
1123                          * control regions for the pmem vs blk
1124                          * interfaces, be sure to record the extended
1125                          * blk details.
1126                          */
1127                         if (!nfit_mem->dcr)
1128                                 nfit_mem->dcr = nfit_dcr->dcr;
1129                         else if (nfit_mem->dcr->windows == 0
1130                                         && nfit_dcr->dcr->windows)
1131                                 nfit_mem->dcr = nfit_dcr->dcr;
1132                         break;
1133                 }
1134
1135                 list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) {
1136                         struct acpi_nfit_flush_address *flush;
1137                         u16 i;
1138
1139                         if (nfit_flush->flush->device_handle != device_handle)
1140                                 continue;
1141                         nfit_mem->nfit_flush = nfit_flush;
1142                         flush = nfit_flush->flush;
1143                         nfit_mem->flush_wpq = devm_kcalloc(acpi_desc->dev,
1144                                         flush->hint_count,
1145                                         sizeof(struct resource),
1146                                         GFP_KERNEL);
1147                         if (!nfit_mem->flush_wpq)
1148                                 return -ENOMEM;
1149                         for (i = 0; i < flush->hint_count; i++) {
1150                                 struct resource *res = &nfit_mem->flush_wpq[i];
1151
1152                                 res->start = flush->hint_address[i];
1153                                 res->end = res->start + 8 - 1;
1154                         }
1155                         break;
1156                 }
1157
1158                 if (dcr && !nfit_mem->dcr) {
1159                         dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n",
1160                                         spa->range_index, dcr);
1161                         return -ENODEV;
1162                 }
1163
1164                 if (type == NFIT_SPA_DCR) {
1165                         struct nfit_idt *nfit_idt;
1166                         u16 idt_idx;
1167
1168                         /* multiple dimms may share a SPA when interleaved */
1169                         nfit_mem->spa_dcr = spa;
1170                         nfit_mem->memdev_dcr = nfit_memdev->memdev;
1171                         idt_idx = nfit_memdev->memdev->interleave_index;
1172                         list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
1173                                 if (nfit_idt->idt->interleave_index != idt_idx)
1174                                         continue;
1175                                 nfit_mem->idt_dcr = nfit_idt->idt;
1176                                 break;
1177                         }
1178                         nfit_mem_init_bdw(acpi_desc, nfit_mem, spa);
1179                 } else if (type == NFIT_SPA_PM) {
1180                         /*
1181                          * A single dimm may belong to multiple SPA-PM
1182                          * ranges, record at least one in addition to
1183                          * any SPA-DCR range.
1184                          */
1185                         nfit_mem->memdev_pmem = nfit_memdev->memdev;
1186                 } else
1187                         nfit_mem->memdev_dcr = nfit_memdev->memdev;
1188         }
1189
1190         return 0;
1191 }
1192
1193 static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b)
1194 {
1195         struct nfit_mem *a = container_of(_a, typeof(*a), list);
1196         struct nfit_mem *b = container_of(_b, typeof(*b), list);
1197         u32 handleA, handleB;
1198
1199         handleA = __to_nfit_memdev(a)->device_handle;
1200         handleB = __to_nfit_memdev(b)->device_handle;
1201         if (handleA < handleB)
1202                 return -1;
1203         else if (handleA > handleB)
1204                 return 1;
1205         return 0;
1206 }
1207
1208 static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc)
1209 {
1210         struct nfit_spa *nfit_spa;
1211         int rc;
1212
1213
1214         /*
1215          * For each SPA-DCR or SPA-PMEM address range find its
1216          * corresponding MEMDEV(s).  From each MEMDEV find the
1217          * corresponding DCR.  Then, if we're operating on a SPA-DCR,
1218          * try to find a SPA-BDW and a corresponding BDW that references
1219          * the DCR.  Throw it all into an nfit_mem object.  Note, that
1220          * BDWs are optional.
1221          */
1222         list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
1223                 rc = __nfit_mem_init(acpi_desc, nfit_spa->spa);
1224                 if (rc)
1225                         return rc;
1226         }
1227
1228         /*
1229          * If a DIMM has failed to be mapped into SPA there will be no
1230          * SPA entries above. Find and register all the unmapped DIMMs
1231          * for reporting and recovery purposes.
1232          */
1233         rc = __nfit_mem_init(acpi_desc, NULL);
1234         if (rc)
1235                 return rc;
1236
1237         list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp);
1238
1239         return 0;
1240 }
1241
1242 static ssize_t bus_dsm_mask_show(struct device *dev,
1243                 struct device_attribute *attr, char *buf)
1244 {
1245         struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
1246         struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1247         struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1248
1249         return sprintf(buf, "%#lx\n", acpi_desc->bus_dsm_mask);
1250 }
1251 static struct device_attribute dev_attr_bus_dsm_mask =
1252                 __ATTR(dsm_mask, 0444, bus_dsm_mask_show, NULL);
1253
1254 static ssize_t revision_show(struct device *dev,
1255                 struct device_attribute *attr, char *buf)
1256 {
1257         struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
1258         struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1259         struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1260
1261         return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision);
1262 }
1263 static DEVICE_ATTR_RO(revision);
1264
1265 static ssize_t hw_error_scrub_show(struct device *dev,
1266                 struct device_attribute *attr, char *buf)
1267 {
1268         struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
1269         struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1270         struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1271
1272         return sprintf(buf, "%d\n", acpi_desc->scrub_mode);
1273 }
1274
1275 /*
1276  * The 'hw_error_scrub' attribute can have the following values written to it:
1277  * '0': Switch to the default mode where an exception will only insert
1278  *      the address of the memory error into the poison and badblocks lists.
1279  * '1': Enable a full scrub to happen if an exception for a memory error is
1280  *      received.
1281  */
1282 static ssize_t hw_error_scrub_store(struct device *dev,
1283                 struct device_attribute *attr, const char *buf, size_t size)
1284 {
1285         struct nvdimm_bus_descriptor *nd_desc;
1286         ssize_t rc;
1287         long val;
1288
1289         rc = kstrtol(buf, 0, &val);
1290         if (rc)
1291                 return rc;
1292
1293         nfit_device_lock(dev);
1294         nd_desc = dev_get_drvdata(dev);
1295         if (nd_desc) {
1296                 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1297
1298                 switch (val) {
1299                 case HW_ERROR_SCRUB_ON:
1300                         acpi_desc->scrub_mode = HW_ERROR_SCRUB_ON;
1301                         break;
1302                 case HW_ERROR_SCRUB_OFF:
1303                         acpi_desc->scrub_mode = HW_ERROR_SCRUB_OFF;
1304                         break;
1305                 default:
1306                         rc = -EINVAL;
1307                         break;
1308                 }
1309         }
1310         nfit_device_unlock(dev);
1311         if (rc)
1312                 return rc;
1313         return size;
1314 }
1315 static DEVICE_ATTR_RW(hw_error_scrub);
1316
1317 /*
1318  * This shows the number of full Address Range Scrubs that have been
1319  * completed since driver load time. Userspace can wait on this using
1320  * select/poll etc. A '+' at the end indicates an ARS is in progress
1321  */
1322 static ssize_t scrub_show(struct device *dev,
1323                 struct device_attribute *attr, char *buf)
1324 {
1325         struct nvdimm_bus_descriptor *nd_desc;
1326         struct acpi_nfit_desc *acpi_desc;
1327         ssize_t rc = -ENXIO;
1328         bool busy;
1329
1330         nfit_device_lock(dev);
1331         nd_desc = dev_get_drvdata(dev);
1332         if (!nd_desc) {
1333                 nfit_device_unlock(dev);
1334                 return rc;
1335         }
1336         acpi_desc = to_acpi_desc(nd_desc);
1337
1338         mutex_lock(&acpi_desc->init_mutex);
1339         busy = test_bit(ARS_BUSY, &acpi_desc->scrub_flags)
1340                 && !test_bit(ARS_CANCEL, &acpi_desc->scrub_flags);
1341         rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, busy ? "+\n" : "\n");
1342         /* Allow an admin to poll the busy state at a higher rate */
1343         if (busy && capable(CAP_SYS_RAWIO) && !test_and_set_bit(ARS_POLL,
1344                                 &acpi_desc->scrub_flags)) {
1345                 acpi_desc->scrub_tmo = 1;
1346                 mod_delayed_work(nfit_wq, &acpi_desc->dwork, HZ);
1347         }
1348
1349         mutex_unlock(&acpi_desc->init_mutex);
1350         nfit_device_unlock(dev);
1351         return rc;
1352 }
1353
1354 static ssize_t scrub_store(struct device *dev,
1355                 struct device_attribute *attr, const char *buf, size_t size)
1356 {
1357         struct nvdimm_bus_descriptor *nd_desc;
1358         ssize_t rc;
1359         long val;
1360
1361         rc = kstrtol(buf, 0, &val);
1362         if (rc)
1363                 return rc;
1364         if (val != 1)
1365                 return -EINVAL;
1366
1367         nfit_device_lock(dev);
1368         nd_desc = dev_get_drvdata(dev);
1369         if (nd_desc) {
1370                 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1371
1372                 rc = acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG);
1373         }
1374         nfit_device_unlock(dev);
1375         if (rc)
1376                 return rc;
1377         return size;
1378 }
1379 static DEVICE_ATTR_RW(scrub);
1380
1381 static bool ars_supported(struct nvdimm_bus *nvdimm_bus)
1382 {
1383         struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1384         const unsigned long mask = 1 << ND_CMD_ARS_CAP | 1 << ND_CMD_ARS_START
1385                 | 1 << ND_CMD_ARS_STATUS;
1386
1387         return (nd_desc->cmd_mask & mask) == mask;
1388 }
1389
1390 static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n)
1391 {
1392         struct device *dev = kobj_to_dev(kobj);
1393         struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
1394
1395         if (a == &dev_attr_scrub.attr)
1396                 return ars_supported(nvdimm_bus) ? a->mode : 0;
1397
1398         if (a == &dev_attr_firmware_activate_noidle.attr)
1399                 return intel_fwa_supported(nvdimm_bus) ? a->mode : 0;
1400
1401         return a->mode;
1402 }
1403
1404 static struct attribute *acpi_nfit_attributes[] = {
1405         &dev_attr_revision.attr,
1406         &dev_attr_scrub.attr,
1407         &dev_attr_hw_error_scrub.attr,
1408         &dev_attr_bus_dsm_mask.attr,
1409         &dev_attr_firmware_activate_noidle.attr,
1410         NULL,
1411 };
1412
1413 static const struct attribute_group acpi_nfit_attribute_group = {
1414         .name = "nfit",
1415         .attrs = acpi_nfit_attributes,
1416         .is_visible = nfit_visible,
1417 };
1418
1419 static const struct attribute_group *acpi_nfit_attribute_groups[] = {
1420         &acpi_nfit_attribute_group,
1421         NULL,
1422 };
1423
1424 static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev)
1425 {
1426         struct nvdimm *nvdimm = to_nvdimm(dev);
1427         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1428
1429         return __to_nfit_memdev(nfit_mem);
1430 }
1431
1432 static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev)
1433 {
1434         struct nvdimm *nvdimm = to_nvdimm(dev);
1435         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1436
1437         return nfit_mem->dcr;
1438 }
1439
1440 static ssize_t handle_show(struct device *dev,
1441                 struct device_attribute *attr, char *buf)
1442 {
1443         struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
1444
1445         return sprintf(buf, "%#x\n", memdev->device_handle);
1446 }
1447 static DEVICE_ATTR_RO(handle);
1448
1449 static ssize_t phys_id_show(struct device *dev,
1450                 struct device_attribute *attr, char *buf)
1451 {
1452         struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
1453
1454         return sprintf(buf, "%#x\n", memdev->physical_id);
1455 }
1456 static DEVICE_ATTR_RO(phys_id);
1457
1458 static ssize_t vendor_show(struct device *dev,
1459                 struct device_attribute *attr, char *buf)
1460 {
1461         struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1462
1463         return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->vendor_id));
1464 }
1465 static DEVICE_ATTR_RO(vendor);
1466
1467 static ssize_t rev_id_show(struct device *dev,
1468                 struct device_attribute *attr, char *buf)
1469 {
1470         struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1471
1472         return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->revision_id));
1473 }
1474 static DEVICE_ATTR_RO(rev_id);
1475
1476 static ssize_t device_show(struct device *dev,
1477                 struct device_attribute *attr, char *buf)
1478 {
1479         struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1480
1481         return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->device_id));
1482 }
1483 static DEVICE_ATTR_RO(device);
1484
1485 static ssize_t subsystem_vendor_show(struct device *dev,
1486                 struct device_attribute *attr, char *buf)
1487 {
1488         struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1489
1490         return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_vendor_id));
1491 }
1492 static DEVICE_ATTR_RO(subsystem_vendor);
1493
1494 static ssize_t subsystem_rev_id_show(struct device *dev,
1495                 struct device_attribute *attr, char *buf)
1496 {
1497         struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1498
1499         return sprintf(buf, "0x%04x\n",
1500                         be16_to_cpu(dcr->subsystem_revision_id));
1501 }
1502 static DEVICE_ATTR_RO(subsystem_rev_id);
1503
1504 static ssize_t subsystem_device_show(struct device *dev,
1505                 struct device_attribute *attr, char *buf)
1506 {
1507         struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1508
1509         return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_device_id));
1510 }
1511 static DEVICE_ATTR_RO(subsystem_device);
1512
1513 static int num_nvdimm_formats(struct nvdimm *nvdimm)
1514 {
1515         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1516         int formats = 0;
1517
1518         if (nfit_mem->memdev_pmem)
1519                 formats++;
1520         if (nfit_mem->memdev_bdw)
1521                 formats++;
1522         return formats;
1523 }
1524
1525 static ssize_t format_show(struct device *dev,
1526                 struct device_attribute *attr, char *buf)
1527 {
1528         struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1529
1530         return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code));
1531 }
1532 static DEVICE_ATTR_RO(format);
1533
1534 static ssize_t format1_show(struct device *dev,
1535                 struct device_attribute *attr, char *buf)
1536 {
1537         u32 handle;
1538         ssize_t rc = -ENXIO;
1539         struct nfit_mem *nfit_mem;
1540         struct nfit_memdev *nfit_memdev;
1541         struct acpi_nfit_desc *acpi_desc;
1542         struct nvdimm *nvdimm = to_nvdimm(dev);
1543         struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1544
1545         nfit_mem = nvdimm_provider_data(nvdimm);
1546         acpi_desc = nfit_mem->acpi_desc;
1547         handle = to_nfit_memdev(dev)->device_handle;
1548
1549         /* assumes DIMMs have at most 2 published interface codes */
1550         mutex_lock(&acpi_desc->init_mutex);
1551         list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1552                 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
1553                 struct nfit_dcr *nfit_dcr;
1554
1555                 if (memdev->device_handle != handle)
1556                         continue;
1557
1558                 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
1559                         if (nfit_dcr->dcr->region_index != memdev->region_index)
1560                                 continue;
1561                         if (nfit_dcr->dcr->code == dcr->code)
1562                                 continue;
1563                         rc = sprintf(buf, "0x%04x\n",
1564                                         le16_to_cpu(nfit_dcr->dcr->code));
1565                         break;
1566                 }
1567                 if (rc != -ENXIO)
1568                         break;
1569         }
1570         mutex_unlock(&acpi_desc->init_mutex);
1571         return rc;
1572 }
1573 static DEVICE_ATTR_RO(format1);
1574
1575 static ssize_t formats_show(struct device *dev,
1576                 struct device_attribute *attr, char *buf)
1577 {
1578         struct nvdimm *nvdimm = to_nvdimm(dev);
1579
1580         return sprintf(buf, "%d\n", num_nvdimm_formats(nvdimm));
1581 }
1582 static DEVICE_ATTR_RO(formats);
1583
1584 static ssize_t serial_show(struct device *dev,
1585                 struct device_attribute *attr, char *buf)
1586 {
1587         struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1588
1589         return sprintf(buf, "0x%08x\n", be32_to_cpu(dcr->serial_number));
1590 }
1591 static DEVICE_ATTR_RO(serial);
1592
1593 static ssize_t family_show(struct device *dev,
1594                 struct device_attribute *attr, char *buf)
1595 {
1596         struct nvdimm *nvdimm = to_nvdimm(dev);
1597         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1598
1599         if (nfit_mem->family < 0)
1600                 return -ENXIO;
1601         return sprintf(buf, "%d\n", nfit_mem->family);
1602 }
1603 static DEVICE_ATTR_RO(family);
1604
1605 static ssize_t dsm_mask_show(struct device *dev,
1606                 struct device_attribute *attr, char *buf)
1607 {
1608         struct nvdimm *nvdimm = to_nvdimm(dev);
1609         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1610
1611         if (nfit_mem->family < 0)
1612                 return -ENXIO;
1613         return sprintf(buf, "%#lx\n", nfit_mem->dsm_mask);
1614 }
1615 static DEVICE_ATTR_RO(dsm_mask);
1616
1617 static ssize_t flags_show(struct device *dev,
1618                 struct device_attribute *attr, char *buf)
1619 {
1620         struct nvdimm *nvdimm = to_nvdimm(dev);
1621         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1622         u16 flags = __to_nfit_memdev(nfit_mem)->flags;
1623
1624         if (test_bit(NFIT_MEM_DIRTY, &nfit_mem->flags))
1625                 flags |= ACPI_NFIT_MEM_FLUSH_FAILED;
1626
1627         return sprintf(buf, "%s%s%s%s%s%s%s\n",
1628                 flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "",
1629                 flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "",
1630                 flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "",
1631                 flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "",
1632                 flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "",
1633                 flags & ACPI_NFIT_MEM_MAP_FAILED ? "map_fail " : "",
1634                 flags & ACPI_NFIT_MEM_HEALTH_ENABLED ? "smart_notify " : "");
1635 }
1636 static DEVICE_ATTR_RO(flags);
1637
1638 static ssize_t id_show(struct device *dev,
1639                 struct device_attribute *attr, char *buf)
1640 {
1641         struct nvdimm *nvdimm = to_nvdimm(dev);
1642         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1643
1644         return sprintf(buf, "%s\n", nfit_mem->id);
1645 }
1646 static DEVICE_ATTR_RO(id);
1647
1648 static ssize_t dirty_shutdown_show(struct device *dev,
1649                 struct device_attribute *attr, char *buf)
1650 {
1651         struct nvdimm *nvdimm = to_nvdimm(dev);
1652         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1653
1654         return sprintf(buf, "%d\n", nfit_mem->dirty_shutdown);
1655 }
1656 static DEVICE_ATTR_RO(dirty_shutdown);
1657
1658 static struct attribute *acpi_nfit_dimm_attributes[] = {
1659         &dev_attr_handle.attr,
1660         &dev_attr_phys_id.attr,
1661         &dev_attr_vendor.attr,
1662         &dev_attr_device.attr,
1663         &dev_attr_rev_id.attr,
1664         &dev_attr_subsystem_vendor.attr,
1665         &dev_attr_subsystem_device.attr,
1666         &dev_attr_subsystem_rev_id.attr,
1667         &dev_attr_format.attr,
1668         &dev_attr_formats.attr,
1669         &dev_attr_format1.attr,
1670         &dev_attr_serial.attr,
1671         &dev_attr_flags.attr,
1672         &dev_attr_id.attr,
1673         &dev_attr_family.attr,
1674         &dev_attr_dsm_mask.attr,
1675         &dev_attr_dirty_shutdown.attr,
1676         NULL,
1677 };
1678
1679 static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj,
1680                 struct attribute *a, int n)
1681 {
1682         struct device *dev = kobj_to_dev(kobj);
1683         struct nvdimm *nvdimm = to_nvdimm(dev);
1684         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1685
1686         if (!to_nfit_dcr(dev)) {
1687                 /* Without a dcr only the memdev attributes can be surfaced */
1688                 if (a == &dev_attr_handle.attr || a == &dev_attr_phys_id.attr
1689                                 || a == &dev_attr_flags.attr
1690                                 || a == &dev_attr_family.attr
1691                                 || a == &dev_attr_dsm_mask.attr)
1692                         return a->mode;
1693                 return 0;
1694         }
1695
1696         if (a == &dev_attr_format1.attr && num_nvdimm_formats(nvdimm) <= 1)
1697                 return 0;
1698
1699         if (!test_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags)
1700                         && a == &dev_attr_dirty_shutdown.attr)
1701                 return 0;
1702
1703         return a->mode;
1704 }
1705
1706 static const struct attribute_group acpi_nfit_dimm_attribute_group = {
1707         .name = "nfit",
1708         .attrs = acpi_nfit_dimm_attributes,
1709         .is_visible = acpi_nfit_dimm_attr_visible,
1710 };
1711
1712 static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = {
1713         &acpi_nfit_dimm_attribute_group,
1714         NULL,
1715 };
1716
1717 static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc,
1718                 u32 device_handle)
1719 {
1720         struct nfit_mem *nfit_mem;
1721
1722         list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
1723                 if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle)
1724                         return nfit_mem->nvdimm;
1725
1726         return NULL;
1727 }
1728
1729 void __acpi_nvdimm_notify(struct device *dev, u32 event)
1730 {
1731         struct nfit_mem *nfit_mem;
1732         struct acpi_nfit_desc *acpi_desc;
1733
1734         dev_dbg(dev->parent, "%s: event: %d\n", dev_name(dev),
1735                         event);
1736
1737         if (event != NFIT_NOTIFY_DIMM_HEALTH) {
1738                 dev_dbg(dev->parent, "%s: unknown event: %d\n", dev_name(dev),
1739                                 event);
1740                 return;
1741         }
1742
1743         acpi_desc = dev_get_drvdata(dev->parent);
1744         if (!acpi_desc)
1745                 return;
1746
1747         /*
1748          * If we successfully retrieved acpi_desc, then we know nfit_mem data
1749          * is still valid.
1750          */
1751         nfit_mem = dev_get_drvdata(dev);
1752         if (nfit_mem && nfit_mem->flags_attr)
1753                 sysfs_notify_dirent(nfit_mem->flags_attr);
1754 }
1755 EXPORT_SYMBOL_GPL(__acpi_nvdimm_notify);
1756
1757 static void acpi_nvdimm_notify(acpi_handle handle, u32 event, void *data)
1758 {
1759         struct acpi_device *adev = data;
1760         struct device *dev = &adev->dev;
1761
1762         nfit_device_lock(dev->parent);
1763         __acpi_nvdimm_notify(dev, event);
1764         nfit_device_unlock(dev->parent);
1765 }
1766
1767 static bool acpi_nvdimm_has_method(struct acpi_device *adev, char *method)
1768 {
1769         acpi_handle handle;
1770         acpi_status status;
1771
1772         status = acpi_get_handle(adev->handle, method, &handle);
1773
1774         if (ACPI_SUCCESS(status))
1775                 return true;
1776         return false;
1777 }
1778
1779 __weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem)
1780 {
1781         struct device *dev = &nfit_mem->adev->dev;
1782         struct nd_intel_smart smart = { 0 };
1783         union acpi_object in_buf = {
1784                 .buffer.type = ACPI_TYPE_BUFFER,
1785                 .buffer.length = 0,
1786         };
1787         union acpi_object in_obj = {
1788                 .package.type = ACPI_TYPE_PACKAGE,
1789                 .package.count = 1,
1790                 .package.elements = &in_buf,
1791         };
1792         const u8 func = ND_INTEL_SMART;
1793         const guid_t *guid = to_nfit_uuid(nfit_mem->family);
1794         u8 revid = nfit_dsm_revid(nfit_mem->family, func);
1795         struct acpi_device *adev = nfit_mem->adev;
1796         acpi_handle handle = adev->handle;
1797         union acpi_object *out_obj;
1798
1799         if ((nfit_mem->dsm_mask & (1 << func)) == 0)
1800                 return;
1801
1802         out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj);
1803         if (!out_obj || out_obj->type != ACPI_TYPE_BUFFER
1804                         || out_obj->buffer.length < sizeof(smart)) {
1805                 dev_dbg(dev->parent, "%s: failed to retrieve initial health\n",
1806                                 dev_name(dev));
1807                 ACPI_FREE(out_obj);
1808                 return;
1809         }
1810         memcpy(&smart, out_obj->buffer.pointer, sizeof(smart));
1811         ACPI_FREE(out_obj);
1812
1813         if (smart.flags & ND_INTEL_SMART_SHUTDOWN_VALID) {
1814                 if (smart.shutdown_state)
1815                         set_bit(NFIT_MEM_DIRTY, &nfit_mem->flags);
1816         }
1817
1818         if (smart.flags & ND_INTEL_SMART_SHUTDOWN_COUNT_VALID) {
1819                 set_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags);
1820                 nfit_mem->dirty_shutdown = smart.shutdown_count;
1821         }
1822 }
1823
1824 static void populate_shutdown_status(struct nfit_mem *nfit_mem)
1825 {
1826         /*
1827          * For DIMMs that provide a dynamic facility to retrieve a
1828          * dirty-shutdown status and/or a dirty-shutdown count, cache
1829          * these values in nfit_mem.
1830          */
1831         if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
1832                 nfit_intel_shutdown_status(nfit_mem);
1833 }
1834
1835 static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
1836                 struct nfit_mem *nfit_mem, u32 device_handle)
1837 {
1838         struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
1839         struct acpi_device *adev, *adev_dimm;
1840         struct device *dev = acpi_desc->dev;
1841         unsigned long dsm_mask, label_mask;
1842         const guid_t *guid;
1843         int i;
1844         int family = -1;
1845         struct acpi_nfit_control_region *dcr = nfit_mem->dcr;
1846
1847         /* nfit test assumes 1:1 relationship between commands and dsms */
1848         nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en;
1849         nfit_mem->family = NVDIMM_FAMILY_INTEL;
1850         set_bit(NVDIMM_FAMILY_INTEL, &nd_desc->dimm_family_mask);
1851
1852         if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID)
1853                 sprintf(nfit_mem->id, "%04x-%02x-%04x-%08x",
1854                                 be16_to_cpu(dcr->vendor_id),
1855                                 dcr->manufacturing_location,
1856                                 be16_to_cpu(dcr->manufacturing_date),
1857                                 be32_to_cpu(dcr->serial_number));
1858         else
1859                 sprintf(nfit_mem->id, "%04x-%08x",
1860                                 be16_to_cpu(dcr->vendor_id),
1861                                 be32_to_cpu(dcr->serial_number));
1862
1863         adev = to_acpi_dev(acpi_desc);
1864         if (!adev) {
1865                 /* unit test case */
1866                 populate_shutdown_status(nfit_mem);
1867                 return 0;
1868         }
1869
1870         adev_dimm = acpi_find_child_device(adev, device_handle, false);
1871         nfit_mem->adev = adev_dimm;
1872         if (!adev_dimm) {
1873                 dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n",
1874                                 device_handle);
1875                 return force_enable_dimms ? 0 : -ENODEV;
1876         }
1877
1878         if (ACPI_FAILURE(acpi_install_notify_handler(adev_dimm->handle,
1879                 ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify, adev_dimm))) {
1880                 dev_err(dev, "%s: notification registration failed\n",
1881                                 dev_name(&adev_dimm->dev));
1882                 return -ENXIO;
1883         }
1884         /*
1885          * Record nfit_mem for the notification path to track back to
1886          * the nfit sysfs attributes for this dimm device object.
1887          */
1888         dev_set_drvdata(&adev_dimm->dev, nfit_mem);
1889
1890         /*
1891          * There are 4 "legacy" NVDIMM command sets
1892          * (NVDIMM_FAMILY_{INTEL,MSFT,HPE1,HPE2}) that were created before
1893          * an EFI working group was established to constrain this
1894          * proliferation. The nfit driver probes for the supported command
1895          * set by GUID. Note, if you're a platform developer looking to add
1896          * a new command set to this probe, consider using an existing set,
1897          * or otherwise seek approval to publish the command set at
1898          * http://www.uefi.org/RFIC_LIST.
1899          *
1900          * Note, that checking for function0 (bit0) tells us if any commands
1901          * are reachable through this GUID.
1902          */
1903         clear_bit(NVDIMM_FAMILY_INTEL, &nd_desc->dimm_family_mask);
1904         for (i = 0; i <= NVDIMM_FAMILY_MAX; i++)
1905                 if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1)) {
1906                         set_bit(i, &nd_desc->dimm_family_mask);
1907                         if (family < 0 || i == default_dsm_family)
1908                                 family = i;
1909                 }
1910
1911         /* limit the supported commands to those that are publicly documented */
1912         nfit_mem->family = family;
1913         if (override_dsm_mask && !disable_vendor_specific)
1914                 dsm_mask = override_dsm_mask;
1915         else if (nfit_mem->family == NVDIMM_FAMILY_INTEL) {
1916                 dsm_mask = NVDIMM_INTEL_CMDMASK;
1917                 if (disable_vendor_specific)
1918                         dsm_mask &= ~(1 << ND_CMD_VENDOR);
1919         } else if (nfit_mem->family == NVDIMM_FAMILY_HPE1) {
1920                 dsm_mask = 0x1c3c76;
1921         } else if (nfit_mem->family == NVDIMM_FAMILY_HPE2) {
1922                 dsm_mask = 0x1fe;
1923                 if (disable_vendor_specific)
1924                         dsm_mask &= ~(1 << 8);
1925         } else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) {
1926                 dsm_mask = 0xffffffff;
1927         } else if (nfit_mem->family == NVDIMM_FAMILY_HYPERV) {
1928                 dsm_mask = 0x1f;
1929         } else {
1930                 dev_dbg(dev, "unknown dimm command family\n");
1931                 nfit_mem->family = -1;
1932                 /* DSMs are optional, continue loading the driver... */
1933                 return 0;
1934         }
1935
1936         /*
1937          * Function 0 is the command interrogation function, don't
1938          * export it to potential userspace use, and enable it to be
1939          * used as an error value in acpi_nfit_ctl().
1940          */
1941         dsm_mask &= ~1UL;
1942
1943         guid = to_nfit_uuid(nfit_mem->family);
1944         for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
1945                 if (acpi_check_dsm(adev_dimm->handle, guid,
1946                                         nfit_dsm_revid(nfit_mem->family, i),
1947                                         1ULL << i))
1948                         set_bit(i, &nfit_mem->dsm_mask);
1949
1950         /*
1951          * Prefer the NVDIMM_FAMILY_INTEL label read commands if present
1952          * due to their better semantics handling locked capacity.
1953          */
1954         label_mask = 1 << ND_CMD_GET_CONFIG_SIZE | 1 << ND_CMD_GET_CONFIG_DATA
1955                 | 1 << ND_CMD_SET_CONFIG_DATA;
1956         if (family == NVDIMM_FAMILY_INTEL
1957                         && (dsm_mask & label_mask) == label_mask)
1958                 /* skip _LS{I,R,W} enabling */;
1959         else {
1960                 if (acpi_nvdimm_has_method(adev_dimm, "_LSI")
1961                                 && acpi_nvdimm_has_method(adev_dimm, "_LSR")) {
1962                         dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev));
1963                         set_bit(NFIT_MEM_LSR, &nfit_mem->flags);
1964                 }
1965
1966                 if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)
1967                                 && acpi_nvdimm_has_method(adev_dimm, "_LSW")) {
1968                         dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev));
1969                         set_bit(NFIT_MEM_LSW, &nfit_mem->flags);
1970                 }
1971
1972                 /*
1973                  * Quirk read-only label configurations to preserve
1974                  * access to label-less namespaces by default.
1975                  */
1976                 if (!test_bit(NFIT_MEM_LSW, &nfit_mem->flags)
1977                                 && !force_labels) {
1978                         dev_dbg(dev, "%s: No _LSW, disable labels\n",
1979                                         dev_name(&adev_dimm->dev));
1980                         clear_bit(NFIT_MEM_LSR, &nfit_mem->flags);
1981                 } else
1982                         dev_dbg(dev, "%s: Force enable labels\n",
1983                                         dev_name(&adev_dimm->dev));
1984         }
1985
1986         populate_shutdown_status(nfit_mem);
1987
1988         return 0;
1989 }
1990
1991 static void shutdown_dimm_notify(void *data)
1992 {
1993         struct acpi_nfit_desc *acpi_desc = data;
1994         struct nfit_mem *nfit_mem;
1995
1996         mutex_lock(&acpi_desc->init_mutex);
1997         /*
1998          * Clear out the nfit_mem->flags_attr and shut down dimm event
1999          * notifications.
2000          */
2001         list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
2002                 struct acpi_device *adev_dimm = nfit_mem->adev;
2003
2004                 if (nfit_mem->flags_attr) {
2005                         sysfs_put(nfit_mem->flags_attr);
2006                         nfit_mem->flags_attr = NULL;
2007                 }
2008                 if (adev_dimm) {
2009                         acpi_remove_notify_handler(adev_dimm->handle,
2010                                         ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify);
2011                         dev_set_drvdata(&adev_dimm->dev, NULL);
2012                 }
2013         }
2014         mutex_unlock(&acpi_desc->init_mutex);
2015 }
2016
2017 static const struct nvdimm_security_ops *acpi_nfit_get_security_ops(int family)
2018 {
2019         switch (family) {
2020         case NVDIMM_FAMILY_INTEL:
2021                 return intel_security_ops;
2022         default:
2023                 return NULL;
2024         }
2025 }
2026
2027 static const struct nvdimm_fw_ops *acpi_nfit_get_fw_ops(
2028                 struct nfit_mem *nfit_mem)
2029 {
2030         unsigned long mask;
2031         struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc;
2032         struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2033
2034         if (!nd_desc->fw_ops)
2035                 return NULL;
2036
2037         if (nfit_mem->family != NVDIMM_FAMILY_INTEL)
2038                 return NULL;
2039
2040         mask = nfit_mem->dsm_mask & NVDIMM_INTEL_FW_ACTIVATE_CMDMASK;
2041         if (mask != NVDIMM_INTEL_FW_ACTIVATE_CMDMASK)
2042                 return NULL;
2043
2044         return intel_fw_ops;
2045 }
2046
2047 static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
2048 {
2049         struct nfit_mem *nfit_mem;
2050         int dimm_count = 0, rc;
2051         struct nvdimm *nvdimm;
2052
2053         list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
2054                 struct acpi_nfit_flush_address *flush;
2055                 unsigned long flags = 0, cmd_mask;
2056                 struct nfit_memdev *nfit_memdev;
2057                 u32 device_handle;
2058                 u16 mem_flags;
2059
2060                 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
2061                 nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle);
2062                 if (nvdimm) {
2063                         dimm_count++;
2064                         continue;
2065                 }
2066
2067                 if (nfit_mem->bdw && nfit_mem->memdev_pmem) {
2068                         set_bit(NDD_ALIASING, &flags);
2069                         set_bit(NDD_LABELING, &flags);
2070                 }
2071
2072                 /* collate flags across all memdevs for this dimm */
2073                 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
2074                         struct acpi_nfit_memory_map *dimm_memdev;
2075
2076                         dimm_memdev = __to_nfit_memdev(nfit_mem);
2077                         if (dimm_memdev->device_handle
2078                                         != nfit_memdev->memdev->device_handle)
2079                                 continue;
2080                         dimm_memdev->flags |= nfit_memdev->memdev->flags;
2081                 }
2082
2083                 mem_flags = __to_nfit_memdev(nfit_mem)->flags;
2084                 if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED)
2085                         set_bit(NDD_UNARMED, &flags);
2086
2087                 rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle);
2088                 if (rc)
2089                         continue;
2090
2091                 /*
2092                  * TODO: provide translation for non-NVDIMM_FAMILY_INTEL
2093                  * devices (i.e. from nd_cmd to acpi_dsm) to standardize the
2094                  * userspace interface.
2095                  */
2096                 cmd_mask = 1UL << ND_CMD_CALL;
2097                 if (nfit_mem->family == NVDIMM_FAMILY_INTEL) {
2098                         /*
2099                          * These commands have a 1:1 correspondence
2100                          * between DSM payload and libnvdimm ioctl
2101                          * payload format.
2102                          */
2103                         cmd_mask |= nfit_mem->dsm_mask & NVDIMM_STANDARD_CMDMASK;
2104                 }
2105
2106                 /* Quirk to ignore LOCAL for labels on HYPERV DIMMs */
2107                 if (nfit_mem->family == NVDIMM_FAMILY_HYPERV)
2108                         set_bit(NDD_NOBLK, &flags);
2109
2110                 if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) {
2111                         set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
2112                         set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
2113                 }
2114                 if (test_bit(NFIT_MEM_LSW, &nfit_mem->flags))
2115                         set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask);
2116
2117                 flush = nfit_mem->nfit_flush ? nfit_mem->nfit_flush->flush
2118                         : NULL;
2119                 nvdimm = __nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem,
2120                                 acpi_nfit_dimm_attribute_groups,
2121                                 flags, cmd_mask, flush ? flush->hint_count : 0,
2122                                 nfit_mem->flush_wpq, &nfit_mem->id[0],
2123                                 acpi_nfit_get_security_ops(nfit_mem->family),
2124                                 acpi_nfit_get_fw_ops(nfit_mem));
2125                 if (!nvdimm)
2126                         return -ENOMEM;
2127
2128                 nfit_mem->nvdimm = nvdimm;
2129                 dimm_count++;
2130
2131                 if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0)
2132                         continue;
2133
2134                 dev_err(acpi_desc->dev, "Error found in NVDIMM %s flags:%s%s%s%s%s\n",
2135                                 nvdimm_name(nvdimm),
2136                   mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "",
2137                   mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"",
2138                   mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "",
2139                   mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : "",
2140                   mem_flags & ACPI_NFIT_MEM_MAP_FAILED ? " map_fail" : "");
2141
2142         }
2143
2144         rc = nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count);
2145         if (rc)
2146                 return rc;
2147
2148         /*
2149          * Now that dimms are successfully registered, and async registration
2150          * is flushed, attempt to enable event notification.
2151          */
2152         list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
2153                 struct kernfs_node *nfit_kernfs;
2154
2155                 nvdimm = nfit_mem->nvdimm;
2156                 if (!nvdimm)
2157                         continue;
2158
2159                 nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit");
2160                 if (nfit_kernfs)
2161                         nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs,
2162                                         "flags");
2163                 sysfs_put(nfit_kernfs);
2164                 if (!nfit_mem->flags_attr)
2165                         dev_warn(acpi_desc->dev, "%s: notifications disabled\n",
2166                                         nvdimm_name(nvdimm));
2167         }
2168
2169         return devm_add_action_or_reset(acpi_desc->dev, shutdown_dimm_notify,
2170                         acpi_desc);
2171 }
2172
2173 /*
2174  * These constants are private because there are no kernel consumers of
2175  * these commands.
2176  */
2177 enum nfit_aux_cmds {
2178         NFIT_CMD_TRANSLATE_SPA = 5,
2179         NFIT_CMD_ARS_INJECT_SET = 7,
2180         NFIT_CMD_ARS_INJECT_CLEAR = 8,
2181         NFIT_CMD_ARS_INJECT_GET = 9,
2182 };
2183
2184 static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
2185 {
2186         struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2187         const guid_t *guid = to_nfit_uuid(NFIT_DEV_BUS);
2188         unsigned long dsm_mask, *mask;
2189         struct acpi_device *adev;
2190         int i;
2191
2192         set_bit(ND_CMD_CALL, &nd_desc->cmd_mask);
2193         set_bit(NVDIMM_BUS_FAMILY_NFIT, &nd_desc->bus_family_mask);
2194
2195         /* enable nfit_test to inject bus command emulation */
2196         if (acpi_desc->bus_cmd_force_en) {
2197                 nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en;
2198                 mask = &nd_desc->bus_family_mask;
2199                 if (acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL]) {
2200                         set_bit(NVDIMM_BUS_FAMILY_INTEL, mask);
2201                         nd_desc->fw_ops = intel_bus_fw_ops;
2202                 }
2203         }
2204
2205         adev = to_acpi_dev(acpi_desc);
2206         if (!adev)
2207                 return;
2208
2209         for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++)
2210                 if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
2211                         set_bit(i, &nd_desc->cmd_mask);
2212
2213         dsm_mask =
2214                 (1 << ND_CMD_ARS_CAP) |
2215                 (1 << ND_CMD_ARS_START) |
2216                 (1 << ND_CMD_ARS_STATUS) |
2217                 (1 << ND_CMD_CLEAR_ERROR) |
2218                 (1 << NFIT_CMD_TRANSLATE_SPA) |
2219                 (1 << NFIT_CMD_ARS_INJECT_SET) |
2220                 (1 << NFIT_CMD_ARS_INJECT_CLEAR) |
2221                 (1 << NFIT_CMD_ARS_INJECT_GET);
2222         for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
2223                 if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
2224                         set_bit(i, &acpi_desc->bus_dsm_mask);
2225
2226         /* Enumerate allowed NVDIMM_BUS_FAMILY_INTEL commands */
2227         dsm_mask = NVDIMM_BUS_INTEL_FW_ACTIVATE_CMDMASK;
2228         guid = to_nfit_bus_uuid(NVDIMM_BUS_FAMILY_INTEL);
2229         mask = &acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL];
2230         for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
2231                 if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
2232                         set_bit(i, mask);
2233
2234         if (*mask == dsm_mask) {
2235                 set_bit(NVDIMM_BUS_FAMILY_INTEL, &nd_desc->bus_family_mask);
2236                 nd_desc->fw_ops = intel_bus_fw_ops;
2237         }
2238 }
2239
2240 static ssize_t range_index_show(struct device *dev,
2241                 struct device_attribute *attr, char *buf)
2242 {
2243         struct nd_region *nd_region = to_nd_region(dev);
2244         struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region);
2245
2246         return sprintf(buf, "%d\n", nfit_spa->spa->range_index);
2247 }
2248 static DEVICE_ATTR_RO(range_index);
2249
2250 static struct attribute *acpi_nfit_region_attributes[] = {
2251         &dev_attr_range_index.attr,
2252         NULL,
2253 };
2254
2255 static const struct attribute_group acpi_nfit_region_attribute_group = {
2256         .name = "nfit",
2257         .attrs = acpi_nfit_region_attributes,
2258 };
2259
2260 static const struct attribute_group *acpi_nfit_region_attribute_groups[] = {
2261         &acpi_nfit_region_attribute_group,
2262         NULL,
2263 };
2264
2265 /* enough info to uniquely specify an interleave set */
2266 struct nfit_set_info {
2267         struct nfit_set_info_map {
2268                 u64 region_offset;
2269                 u32 serial_number;
2270                 u32 pad;
2271         } mapping[0];
2272 };
2273
2274 struct nfit_set_info2 {
2275         struct nfit_set_info_map2 {
2276                 u64 region_offset;
2277                 u32 serial_number;
2278                 u16 vendor_id;
2279                 u16 manufacturing_date;
2280                 u8  manufacturing_location;
2281                 u8  reserved[31];
2282         } mapping[0];
2283 };
2284
2285 static size_t sizeof_nfit_set_info(int num_mappings)
2286 {
2287         return sizeof(struct nfit_set_info)
2288                 + num_mappings * sizeof(struct nfit_set_info_map);
2289 }
2290
2291 static size_t sizeof_nfit_set_info2(int num_mappings)
2292 {
2293         return sizeof(struct nfit_set_info2)
2294                 + num_mappings * sizeof(struct nfit_set_info_map2);
2295 }
2296
2297 static int cmp_map_compat(const void *m0, const void *m1)
2298 {
2299         const struct nfit_set_info_map *map0 = m0;
2300         const struct nfit_set_info_map *map1 = m1;
2301
2302         return memcmp(&map0->region_offset, &map1->region_offset,
2303                         sizeof(u64));
2304 }
2305
2306 static int cmp_map(const void *m0, const void *m1)
2307 {
2308         const struct nfit_set_info_map *map0 = m0;
2309         const struct nfit_set_info_map *map1 = m1;
2310
2311         if (map0->region_offset < map1->region_offset)
2312                 return -1;
2313         else if (map0->region_offset > map1->region_offset)
2314                 return 1;
2315         return 0;
2316 }
2317
2318 static int cmp_map2(const void *m0, const void *m1)
2319 {
2320         const struct nfit_set_info_map2 *map0 = m0;
2321         const struct nfit_set_info_map2 *map1 = m1;
2322
2323         if (map0->region_offset < map1->region_offset)
2324                 return -1;
2325         else if (map0->region_offset > map1->region_offset)
2326                 return 1;
2327         return 0;
2328 }
2329
2330 /* Retrieve the nth entry referencing this spa */
2331 static struct acpi_nfit_memory_map *memdev_from_spa(
2332                 struct acpi_nfit_desc *acpi_desc, u16 range_index, int n)
2333 {
2334         struct nfit_memdev *nfit_memdev;
2335
2336         list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list)
2337                 if (nfit_memdev->memdev->range_index == range_index)
2338                         if (n-- == 0)
2339                                 return nfit_memdev->memdev;
2340         return NULL;
2341 }
2342
2343 static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
2344                 struct nd_region_desc *ndr_desc,
2345                 struct acpi_nfit_system_address *spa)
2346 {
2347         struct device *dev = acpi_desc->dev;
2348         struct nd_interleave_set *nd_set;
2349         u16 nr = ndr_desc->num_mappings;
2350         struct nfit_set_info2 *info2;
2351         struct nfit_set_info *info;
2352         int i;
2353
2354         nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
2355         if (!nd_set)
2356                 return -ENOMEM;
2357         import_guid(&nd_set->type_guid, spa->range_guid);
2358
2359         info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL);
2360         if (!info)
2361                 return -ENOMEM;
2362
2363         info2 = devm_kzalloc(dev, sizeof_nfit_set_info2(nr), GFP_KERNEL);
2364         if (!info2)
2365                 return -ENOMEM;
2366
2367         for (i = 0; i < nr; i++) {
2368                 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
2369                 struct nfit_set_info_map *map = &info->mapping[i];
2370                 struct nfit_set_info_map2 *map2 = &info2->mapping[i];
2371                 struct nvdimm *nvdimm = mapping->nvdimm;
2372                 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
2373                 struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc,
2374                                 spa->range_index, i);
2375                 struct acpi_nfit_control_region *dcr = nfit_mem->dcr;
2376
2377                 if (!memdev || !nfit_mem->dcr) {
2378                         dev_err(dev, "%s: failed to find DCR\n", __func__);
2379                         return -ENODEV;
2380                 }
2381
2382                 map->region_offset = memdev->region_offset;
2383                 map->serial_number = dcr->serial_number;
2384
2385                 map2->region_offset = memdev->region_offset;
2386                 map2->serial_number = dcr->serial_number;
2387                 map2->vendor_id = dcr->vendor_id;
2388                 map2->manufacturing_date = dcr->manufacturing_date;
2389                 map2->manufacturing_location = dcr->manufacturing_location;
2390         }
2391
2392         /* v1.1 namespaces */
2393         sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
2394                         cmp_map, NULL);
2395         nd_set->cookie1 = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
2396
2397         /* v1.2 namespaces */
2398         sort(&info2->mapping[0], nr, sizeof(struct nfit_set_info_map2),
2399                         cmp_map2, NULL);
2400         nd_set->cookie2 = nd_fletcher64(info2, sizeof_nfit_set_info2(nr), 0);
2401
2402         /* support v1.1 namespaces created with the wrong sort order */
2403         sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
2404                         cmp_map_compat, NULL);
2405         nd_set->altcookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
2406
2407         /* record the result of the sort for the mapping position */
2408         for (i = 0; i < nr; i++) {
2409                 struct nfit_set_info_map2 *map2 = &info2->mapping[i];
2410                 int j;
2411
2412                 for (j = 0; j < nr; j++) {
2413                         struct nd_mapping_desc *mapping = &ndr_desc->mapping[j];
2414                         struct nvdimm *nvdimm = mapping->nvdimm;
2415                         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
2416                         struct acpi_nfit_control_region *dcr = nfit_mem->dcr;
2417
2418                         if (map2->serial_number == dcr->serial_number &&
2419                             map2->vendor_id == dcr->vendor_id &&
2420                             map2->manufacturing_date == dcr->manufacturing_date &&
2421                             map2->manufacturing_location
2422                                     == dcr->manufacturing_location) {
2423                                 mapping->position = i;
2424                                 break;
2425                         }
2426                 }
2427         }
2428
2429         ndr_desc->nd_set = nd_set;
2430         devm_kfree(dev, info);
2431         devm_kfree(dev, info2);
2432
2433         return 0;
2434 }
2435
2436 static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio)
2437 {
2438         struct acpi_nfit_interleave *idt = mmio->idt;
2439         u32 sub_line_offset, line_index, line_offset;
2440         u64 line_no, table_skip_count, table_offset;
2441
2442         line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset);
2443         table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index);
2444         line_offset = idt->line_offset[line_index]
2445                 * mmio->line_size;
2446         table_offset = table_skip_count * mmio->table_size;
2447
2448         return mmio->base_offset + line_offset + table_offset + sub_line_offset;
2449 }
2450
2451 static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
2452 {
2453         struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
2454         u64 offset = nfit_blk->stat_offset + mmio->size * bw;
2455         const u32 STATUS_MASK = 0x80000037;
2456
2457         if (mmio->num_lines)
2458                 offset = to_interleave_offset(offset, mmio);
2459
2460         return readl(mmio->addr.base + offset) & STATUS_MASK;
2461 }
2462
2463 static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
2464                 resource_size_t dpa, unsigned int len, unsigned int write)
2465 {
2466         u64 cmd, offset;
2467         struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
2468
2469         enum {
2470                 BCW_OFFSET_MASK = (1ULL << 48)-1,
2471                 BCW_LEN_SHIFT = 48,
2472                 BCW_LEN_MASK = (1ULL << 8) - 1,
2473                 BCW_CMD_SHIFT = 56,
2474         };
2475
2476         cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK;
2477         len = len >> L1_CACHE_SHIFT;
2478         cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT;
2479         cmd |= ((u64) write) << BCW_CMD_SHIFT;
2480
2481         offset = nfit_blk->cmd_offset + mmio->size * bw;
2482         if (mmio->num_lines)
2483                 offset = to_interleave_offset(offset, mmio);
2484
2485         writeq(cmd, mmio->addr.base + offset);
2486         nvdimm_flush(nfit_blk->nd_region, NULL);
2487
2488         if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH)
2489                 readq(mmio->addr.base + offset);
2490 }
2491
2492 static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
2493                 resource_size_t dpa, void *iobuf, size_t len, int rw,
2494                 unsigned int lane)
2495 {
2496         struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
2497         unsigned int copied = 0;
2498         u64 base_offset;
2499         int rc;
2500
2501         base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES
2502                 + lane * mmio->size;
2503         write_blk_ctl(nfit_blk, lane, dpa, len, rw);
2504         while (len) {
2505                 unsigned int c;
2506                 u64 offset;
2507
2508                 if (mmio->num_lines) {
2509                         u32 line_offset;
2510
2511                         offset = to_interleave_offset(base_offset + copied,
2512                                         mmio);
2513                         div_u64_rem(offset, mmio->line_size, &line_offset);
2514                         c = min_t(size_t, len, mmio->line_size - line_offset);
2515                 } else {
2516                         offset = base_offset + nfit_blk->bdw_offset;
2517                         c = len;
2518                 }
2519
2520                 if (rw)
2521                         memcpy_flushcache(mmio->addr.aperture + offset, iobuf + copied, c);
2522                 else {
2523                         if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH)
2524                                 arch_invalidate_pmem((void __force *)
2525                                         mmio->addr.aperture + offset, c);
2526
2527                         memcpy(iobuf + copied, mmio->addr.aperture + offset, c);
2528                 }
2529
2530                 copied += c;
2531                 len -= c;
2532         }
2533
2534         if (rw)
2535                 nvdimm_flush(nfit_blk->nd_region, NULL);
2536
2537         rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0;
2538         return rc;
2539 }
2540
2541 static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr,
2542                 resource_size_t dpa, void *iobuf, u64 len, int rw)
2543 {
2544         struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr);
2545         struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
2546         struct nd_region *nd_region = nfit_blk->nd_region;
2547         unsigned int lane, copied = 0;
2548         int rc = 0;
2549
2550         lane = nd_region_acquire_lane(nd_region);
2551         while (len) {
2552                 u64 c = min(len, mmio->size);
2553
2554                 rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied,
2555                                 iobuf + copied, c, rw, lane);
2556                 if (rc)
2557                         break;
2558
2559                 copied += c;
2560                 len -= c;
2561         }
2562         nd_region_release_lane(nd_region, lane);
2563
2564         return rc;
2565 }
2566
2567 static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio,
2568                 struct acpi_nfit_interleave *idt, u16 interleave_ways)
2569 {
2570         if (idt) {
2571                 mmio->num_lines = idt->line_count;
2572                 mmio->line_size = idt->line_size;
2573                 if (interleave_ways == 0)
2574                         return -ENXIO;
2575                 mmio->table_size = mmio->num_lines * interleave_ways
2576                         * mmio->line_size;
2577         }
2578
2579         return 0;
2580 }
2581
2582 static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc,
2583                 struct nvdimm *nvdimm, struct nfit_blk *nfit_blk)
2584 {
2585         struct nd_cmd_dimm_flags flags;
2586         int rc;
2587
2588         memset(&flags, 0, sizeof(flags));
2589         rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags,
2590                         sizeof(flags), NULL);
2591
2592         if (rc >= 0 && flags.status == 0)
2593                 nfit_blk->dimm_flags = flags.flags;
2594         else if (rc == -ENOTTY) {
2595                 /* fall back to a conservative default */
2596                 nfit_blk->dimm_flags = NFIT_BLK_DCR_LATCH | NFIT_BLK_READ_FLUSH;
2597                 rc = 0;
2598         } else
2599                 rc = -ENXIO;
2600
2601         return rc;
2602 }
2603
2604 static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
2605                 struct device *dev)
2606 {
2607         struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
2608         struct nd_blk_region *ndbr = to_nd_blk_region(dev);
2609         struct nfit_blk_mmio *mmio;
2610         struct nfit_blk *nfit_blk;
2611         struct nfit_mem *nfit_mem;
2612         struct nvdimm *nvdimm;
2613         int rc;
2614
2615         nvdimm = nd_blk_region_to_dimm(ndbr);
2616         nfit_mem = nvdimm_provider_data(nvdimm);
2617         if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) {
2618                 dev_dbg(dev, "missing%s%s%s\n",
2619                                 nfit_mem ? "" : " nfit_mem",
2620                                 (nfit_mem && nfit_mem->dcr) ? "" : " dcr",
2621                                 (nfit_mem && nfit_mem->bdw) ? "" : " bdw");
2622                 return -ENXIO;
2623         }
2624
2625         nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL);
2626         if (!nfit_blk)
2627                 return -ENOMEM;
2628         nd_blk_region_set_provider_data(ndbr, nfit_blk);
2629         nfit_blk->nd_region = to_nd_region(dev);
2630
2631         /* map block aperture memory */
2632         nfit_blk->bdw_offset = nfit_mem->bdw->offset;
2633         mmio = &nfit_blk->mmio[BDW];
2634         mmio->addr.base = devm_nvdimm_memremap(dev, nfit_mem->spa_bdw->address,
2635                         nfit_mem->spa_bdw->length, nd_blk_memremap_flags(ndbr));
2636         if (!mmio->addr.base) {
2637                 dev_dbg(dev, "%s failed to map bdw\n",
2638                                 nvdimm_name(nvdimm));
2639                 return -ENOMEM;
2640         }
2641         mmio->size = nfit_mem->bdw->size;
2642         mmio->base_offset = nfit_mem->memdev_bdw->region_offset;
2643         mmio->idt = nfit_mem->idt_bdw;
2644         mmio->spa = nfit_mem->spa_bdw;
2645         rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw,
2646                         nfit_mem->memdev_bdw->interleave_ways);
2647         if (rc) {
2648                 dev_dbg(dev, "%s failed to init bdw interleave\n",
2649                                 nvdimm_name(nvdimm));
2650                 return rc;
2651         }
2652
2653         /* map block control memory */
2654         nfit_blk->cmd_offset = nfit_mem->dcr->command_offset;
2655         nfit_blk->stat_offset = nfit_mem->dcr->status_offset;
2656         mmio = &nfit_blk->mmio[DCR];
2657         mmio->addr.base = devm_nvdimm_ioremap(dev, nfit_mem->spa_dcr->address,
2658                         nfit_mem->spa_dcr->length);
2659         if (!mmio->addr.base) {
2660                 dev_dbg(dev, "%s failed to map dcr\n",
2661                                 nvdimm_name(nvdimm));
2662                 return -ENOMEM;
2663         }
2664         mmio->size = nfit_mem->dcr->window_size;
2665         mmio->base_offset = nfit_mem->memdev_dcr->region_offset;
2666         mmio->idt = nfit_mem->idt_dcr;
2667         mmio->spa = nfit_mem->spa_dcr;
2668         rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr,
2669                         nfit_mem->memdev_dcr->interleave_ways);
2670         if (rc) {
2671                 dev_dbg(dev, "%s failed to init dcr interleave\n",
2672                                 nvdimm_name(nvdimm));
2673                 return rc;
2674         }
2675
2676         rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk);
2677         if (rc < 0) {
2678                 dev_dbg(dev, "%s failed get DIMM flags\n",
2679                                 nvdimm_name(nvdimm));
2680                 return rc;
2681         }
2682
2683         if (nvdimm_has_flush(nfit_blk->nd_region) < 0)
2684                 dev_warn(dev, "unable to guarantee persistence of writes\n");
2685
2686         if (mmio->line_size == 0)
2687                 return 0;
2688
2689         if ((u32) nfit_blk->cmd_offset % mmio->line_size
2690                         + 8 > mmio->line_size) {
2691                 dev_dbg(dev, "cmd_offset crosses interleave boundary\n");
2692                 return -ENXIO;
2693         } else if ((u32) nfit_blk->stat_offset % mmio->line_size
2694                         + 8 > mmio->line_size) {
2695                 dev_dbg(dev, "stat_offset crosses interleave boundary\n");
2696                 return -ENXIO;
2697         }
2698
2699         return 0;
2700 }
2701
2702 static int ars_get_cap(struct acpi_nfit_desc *acpi_desc,
2703                 struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa)
2704 {
2705         struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2706         struct acpi_nfit_system_address *spa = nfit_spa->spa;
2707         int cmd_rc, rc;
2708
2709         cmd->address = spa->address;
2710         cmd->length = spa->length;
2711         rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd,
2712                         sizeof(*cmd), &cmd_rc);
2713         if (rc < 0)
2714                 return rc;
2715         return cmd_rc;
2716 }
2717
2718 static int ars_start(struct acpi_nfit_desc *acpi_desc,
2719                 struct nfit_spa *nfit_spa, enum nfit_ars_state req_type)
2720 {
2721         int rc;
2722         int cmd_rc;
2723         struct nd_cmd_ars_start ars_start;
2724         struct acpi_nfit_system_address *spa = nfit_spa->spa;
2725         struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2726
2727         memset(&ars_start, 0, sizeof(ars_start));
2728         ars_start.address = spa->address;
2729         ars_start.length = spa->length;
2730         if (req_type == ARS_REQ_SHORT)
2731                 ars_start.flags = ND_ARS_RETURN_PREV_DATA;
2732         if (nfit_spa_type(spa) == NFIT_SPA_PM)
2733                 ars_start.type = ND_ARS_PERSISTENT;
2734         else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE)
2735                 ars_start.type = ND_ARS_VOLATILE;
2736         else
2737                 return -ENOTTY;
2738
2739         rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
2740                         sizeof(ars_start), &cmd_rc);
2741
2742         if (rc < 0)
2743                 return rc;
2744         if (cmd_rc < 0)
2745                 return cmd_rc;
2746         set_bit(ARS_VALID, &acpi_desc->scrub_flags);
2747         return 0;
2748 }
2749
2750 static int ars_continue(struct acpi_nfit_desc *acpi_desc)
2751 {
2752         int rc, cmd_rc;
2753         struct nd_cmd_ars_start ars_start;
2754         struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2755         struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
2756
2757         ars_start = (struct nd_cmd_ars_start) {
2758                 .address = ars_status->restart_address,
2759                 .length = ars_status->restart_length,
2760                 .type = ars_status->type,
2761         };
2762         rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
2763                         sizeof(ars_start), &cmd_rc);
2764         if (rc < 0)
2765                 return rc;
2766         return cmd_rc;
2767 }
2768
2769 static int ars_get_status(struct acpi_nfit_desc *acpi_desc)
2770 {
2771         struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2772         struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
2773         int rc, cmd_rc;
2774
2775         rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, ars_status,
2776                         acpi_desc->max_ars, &cmd_rc);
2777         if (rc < 0)
2778                 return rc;
2779         return cmd_rc;
2780 }
2781
2782 static void ars_complete(struct acpi_nfit_desc *acpi_desc,
2783                 struct nfit_spa *nfit_spa)
2784 {
2785         struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
2786         struct acpi_nfit_system_address *spa = nfit_spa->spa;
2787         struct nd_region *nd_region = nfit_spa->nd_region;
2788         struct device *dev;
2789
2790         lockdep_assert_held(&acpi_desc->init_mutex);
2791         /*
2792          * Only advance the ARS state for ARS runs initiated by the
2793          * kernel, ignore ARS results from BIOS initiated runs for scrub
2794          * completion tracking.
2795          */
2796         if (acpi_desc->scrub_spa != nfit_spa)
2797                 return;
2798
2799         if ((ars_status->address >= spa->address && ars_status->address
2800                                 < spa->address + spa->length)
2801                         || (ars_status->address < spa->address)) {
2802                 /*
2803                  * Assume that if a scrub starts at an offset from the
2804                  * start of nfit_spa that we are in the continuation
2805                  * case.
2806                  *
2807                  * Otherwise, if the scrub covers the spa range, mark
2808                  * any pending request complete.
2809                  */
2810                 if (ars_status->address + ars_status->length
2811                                 >= spa->address + spa->length)
2812                                 /* complete */;
2813                 else
2814                         return;
2815         } else
2816                 return;
2817
2818         acpi_desc->scrub_spa = NULL;
2819         if (nd_region) {
2820                 dev = nd_region_dev(nd_region);
2821                 nvdimm_region_notify(nd_region, NVDIMM_REVALIDATE_POISON);
2822         } else
2823                 dev = acpi_desc->dev;
2824         dev_dbg(dev, "ARS: range %d complete\n", spa->range_index);
2825 }
2826
2827 static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc)
2828 {
2829         struct nvdimm_bus *nvdimm_bus = acpi_desc->nvdimm_bus;
2830         struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
2831         int rc;
2832         u32 i;
2833
2834         /*
2835          * First record starts at 44 byte offset from the start of the
2836          * payload.
2837          */
2838         if (ars_status->out_length < 44)
2839                 return 0;
2840
2841         /*
2842          * Ignore potentially stale results that are only refreshed
2843          * after a start-ARS event.
2844          */
2845         if (!test_and_clear_bit(ARS_VALID, &acpi_desc->scrub_flags)) {
2846                 dev_dbg(acpi_desc->dev, "skip %d stale records\n",
2847                                 ars_status->num_records);
2848                 return 0;
2849         }
2850
2851         for (i = 0; i < ars_status->num_records; i++) {
2852                 /* only process full records */
2853                 if (ars_status->out_length
2854                                 < 44 + sizeof(struct nd_ars_record) * (i + 1))
2855                         break;
2856                 rc = nvdimm_bus_add_badrange(nvdimm_bus,
2857                                 ars_status->records[i].err_address,
2858                                 ars_status->records[i].length);
2859                 if (rc)
2860                         return rc;
2861         }
2862         if (i < ars_status->num_records)
2863                 dev_warn(acpi_desc->dev, "detected truncated ars results\n");
2864
2865         return 0;
2866 }
2867
2868 static void acpi_nfit_remove_resource(void *data)
2869 {
2870         struct resource *res = data;
2871
2872         remove_resource(res);
2873 }
2874
2875 static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc,
2876                 struct nd_region_desc *ndr_desc)
2877 {
2878         struct resource *res, *nd_res = ndr_desc->res;
2879         int is_pmem, ret;
2880
2881         /* No operation if the region is already registered as PMEM */
2882         is_pmem = region_intersects(nd_res->start, resource_size(nd_res),
2883                                 IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY);
2884         if (is_pmem == REGION_INTERSECTS)
2885                 return 0;
2886
2887         res = devm_kzalloc(acpi_desc->dev, sizeof(*res), GFP_KERNEL);
2888         if (!res)
2889                 return -ENOMEM;
2890
2891         res->name = "Persistent Memory";
2892         res->start = nd_res->start;
2893         res->end = nd_res->end;
2894         res->flags = IORESOURCE_MEM;
2895         res->desc = IORES_DESC_PERSISTENT_MEMORY;
2896
2897         ret = insert_resource(&iomem_resource, res);
2898         if (ret)
2899                 return ret;
2900
2901         ret = devm_add_action_or_reset(acpi_desc->dev,
2902                                         acpi_nfit_remove_resource,
2903                                         res);
2904         if (ret)
2905                 return ret;
2906
2907         return 0;
2908 }
2909
2910 static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
2911                 struct nd_mapping_desc *mapping, struct nd_region_desc *ndr_desc,
2912                 struct acpi_nfit_memory_map *memdev,
2913                 struct nfit_spa *nfit_spa)
2914 {
2915         struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc,
2916                         memdev->device_handle);
2917         struct acpi_nfit_system_address *spa = nfit_spa->spa;
2918         struct nd_blk_region_desc *ndbr_desc;
2919         struct nfit_mem *nfit_mem;
2920         int rc;
2921
2922         if (!nvdimm) {
2923                 dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n",
2924                                 spa->range_index, memdev->device_handle);
2925                 return -ENODEV;
2926         }
2927
2928         mapping->nvdimm = nvdimm;
2929         switch (nfit_spa_type(spa)) {
2930         case NFIT_SPA_PM:
2931         case NFIT_SPA_VOLATILE:
2932                 mapping->start = memdev->address;
2933                 mapping->size = memdev->region_size;
2934                 break;
2935         case NFIT_SPA_DCR:
2936                 nfit_mem = nvdimm_provider_data(nvdimm);
2937                 if (!nfit_mem || !nfit_mem->bdw) {
2938                         dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n",
2939                                         spa->range_index, nvdimm_name(nvdimm));
2940                         break;
2941                 }
2942
2943                 mapping->size = nfit_mem->bdw->capacity;
2944                 mapping->start = nfit_mem->bdw->start_address;
2945                 ndr_desc->num_lanes = nfit_mem->bdw->windows;
2946                 ndr_desc->mapping = mapping;
2947                 ndr_desc->num_mappings = 1;
2948                 ndbr_desc = to_blk_region_desc(ndr_desc);
2949                 ndbr_desc->enable = acpi_nfit_blk_region_enable;
2950                 ndbr_desc->do_io = acpi_desc->blk_do_io;
2951                 rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
2952                 if (rc)
2953                         return rc;
2954                 nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus,
2955                                 ndr_desc);
2956                 if (!nfit_spa->nd_region)
2957                         return -ENOMEM;
2958                 break;
2959         }
2960
2961         return 0;
2962 }
2963
2964 static bool nfit_spa_is_virtual(struct acpi_nfit_system_address *spa)
2965 {
2966         return (nfit_spa_type(spa) == NFIT_SPA_VDISK ||
2967                 nfit_spa_type(spa) == NFIT_SPA_VCD   ||
2968                 nfit_spa_type(spa) == NFIT_SPA_PDISK ||
2969                 nfit_spa_type(spa) == NFIT_SPA_PCD);
2970 }
2971
2972 static bool nfit_spa_is_volatile(struct acpi_nfit_system_address *spa)
2973 {
2974         return (nfit_spa_type(spa) == NFIT_SPA_VDISK ||
2975                 nfit_spa_type(spa) == NFIT_SPA_VCD   ||
2976                 nfit_spa_type(spa) == NFIT_SPA_VOLATILE);
2977 }
2978
2979 static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
2980                 struct nfit_spa *nfit_spa)
2981 {
2982         static struct nd_mapping_desc mappings[ND_MAX_MAPPINGS];
2983         struct acpi_nfit_system_address *spa = nfit_spa->spa;
2984         struct nd_blk_region_desc ndbr_desc;
2985         struct nd_region_desc *ndr_desc;
2986         struct nfit_memdev *nfit_memdev;
2987         struct nvdimm_bus *nvdimm_bus;
2988         struct resource res;
2989         int count = 0, rc;
2990
2991         if (nfit_spa->nd_region)
2992                 return 0;
2993
2994         if (spa->range_index == 0 && !nfit_spa_is_virtual(spa)) {
2995                 dev_dbg(acpi_desc->dev, "detected invalid spa index\n");
2996                 return 0;
2997         }
2998
2999         memset(&res, 0, sizeof(res));
3000         memset(&mappings, 0, sizeof(mappings));
3001         memset(&ndbr_desc, 0, sizeof(ndbr_desc));
3002         res.start = spa->address;
3003         res.end = res.start + spa->length - 1;
3004         ndr_desc = &ndbr_desc.ndr_desc;
3005         ndr_desc->res = &res;
3006         ndr_desc->provider_data = nfit_spa;
3007         ndr_desc->attr_groups = acpi_nfit_region_attribute_groups;
3008         if (spa->flags & ACPI_NFIT_PROXIMITY_VALID) {
3009                 ndr_desc->numa_node = pxm_to_online_node(spa->proximity_domain);
3010                 ndr_desc->target_node = pxm_to_node(spa->proximity_domain);
3011         } else {
3012                 ndr_desc->numa_node = NUMA_NO_NODE;
3013                 ndr_desc->target_node = NUMA_NO_NODE;
3014         }
3015
3016         /*
3017          * Persistence domain bits are hierarchical, if
3018          * ACPI_NFIT_CAPABILITY_CACHE_FLUSH is set then
3019          * ACPI_NFIT_CAPABILITY_MEM_FLUSH is implied.
3020          */
3021         if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_CACHE_FLUSH)
3022                 set_bit(ND_REGION_PERSIST_CACHE, &ndr_desc->flags);
3023         else if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_MEM_FLUSH)
3024                 set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc->flags);
3025
3026         list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
3027                 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
3028                 struct nd_mapping_desc *mapping;
3029
3030                 if (memdev->range_index != spa->range_index)
3031                         continue;
3032                 if (count >= ND_MAX_MAPPINGS) {
3033                         dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n",
3034                                         spa->range_index, ND_MAX_MAPPINGS);
3035                         return -ENXIO;
3036                 }
3037                 mapping = &mappings[count++];
3038                 rc = acpi_nfit_init_mapping(acpi_desc, mapping, ndr_desc,
3039                                 memdev, nfit_spa);
3040                 if (rc)
3041                         goto out;
3042         }
3043
3044         ndr_desc->mapping = mappings;
3045         ndr_desc->num_mappings = count;
3046         rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
3047         if (rc)
3048                 goto out;
3049
3050         nvdimm_bus = acpi_desc->nvdimm_bus;
3051         if (nfit_spa_type(spa) == NFIT_SPA_PM) {
3052                 rc = acpi_nfit_insert_resource(acpi_desc, ndr_desc);
3053                 if (rc) {
3054                         dev_warn(acpi_desc->dev,
3055                                 "failed to insert pmem resource to iomem: %d\n",
3056                                 rc);
3057                         goto out;
3058                 }
3059
3060                 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
3061                                 ndr_desc);
3062                 if (!nfit_spa->nd_region)
3063                         rc = -ENOMEM;
3064         } else if (nfit_spa_is_volatile(spa)) {
3065                 nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus,
3066                                 ndr_desc);
3067                 if (!nfit_spa->nd_region)
3068                         rc = -ENOMEM;
3069         } else if (nfit_spa_is_virtual(spa)) {
3070                 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
3071                                 ndr_desc);
3072                 if (!nfit_spa->nd_region)
3073                         rc = -ENOMEM;
3074         }
3075
3076  out:
3077         if (rc)
3078                 dev_err(acpi_desc->dev, "failed to register spa range %d\n",
3079                                 nfit_spa->spa->range_index);
3080         return rc;
3081 }
3082
3083 static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc)
3084 {
3085         struct device *dev = acpi_desc->dev;
3086         struct nd_cmd_ars_status *ars_status;
3087
3088         if (acpi_desc->ars_status) {
3089                 memset(acpi_desc->ars_status, 0, acpi_desc->max_ars);
3090                 return 0;
3091         }
3092
3093         ars_status = devm_kzalloc(dev, acpi_desc->max_ars, GFP_KERNEL);
3094         if (!ars_status)
3095                 return -ENOMEM;
3096         acpi_desc->ars_status = ars_status;
3097         return 0;
3098 }
3099
3100 static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc)
3101 {
3102         int rc;
3103
3104         if (ars_status_alloc(acpi_desc))
3105                 return -ENOMEM;
3106
3107         rc = ars_get_status(acpi_desc);
3108
3109         if (rc < 0 && rc != -ENOSPC)
3110                 return rc;
3111
3112         if (ars_status_process_records(acpi_desc))
3113                 dev_err(acpi_desc->dev, "Failed to process ARS records\n");
3114
3115         return rc;
3116 }
3117
3118 static int ars_register(struct acpi_nfit_desc *acpi_desc,
3119                 struct nfit_spa *nfit_spa)
3120 {
3121         int rc;
3122
3123         if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
3124                 return acpi_nfit_register_region(acpi_desc, nfit_spa);
3125
3126         set_bit(ARS_REQ_SHORT, &nfit_spa->ars_state);
3127         if (!no_init_ars)
3128                 set_bit(ARS_REQ_LONG, &nfit_spa->ars_state);
3129
3130         switch (acpi_nfit_query_poison(acpi_desc)) {
3131         case 0:
3132         case -ENOSPC:
3133         case -EAGAIN:
3134                 rc = ars_start(acpi_desc, nfit_spa, ARS_REQ_SHORT);
3135                 /* shouldn't happen, try again later */
3136                 if (rc == -EBUSY)
3137                         break;
3138                 if (rc) {
3139                         set_bit(ARS_FAILED, &nfit_spa->ars_state);
3140                         break;
3141                 }
3142                 clear_bit(ARS_REQ_SHORT, &nfit_spa->ars_state);
3143                 rc = acpi_nfit_query_poison(acpi_desc);
3144                 if (rc)
3145                         break;
3146                 acpi_desc->scrub_spa = nfit_spa;
3147                 ars_complete(acpi_desc, nfit_spa);
3148                 /*
3149                  * If ars_complete() says we didn't complete the
3150                  * short scrub, we'll try again with a long
3151                  * request.
3152                  */
3153                 acpi_desc->scrub_spa = NULL;
3154                 break;
3155         case -EBUSY:
3156         case -ENOMEM:
3157                 /*
3158                  * BIOS was using ARS, wait for it to complete (or
3159                  * resources to become available) and then perform our
3160                  * own scrubs.
3161                  */
3162                 break;
3163         default:
3164                 set_bit(ARS_FAILED, &nfit_spa->ars_state);
3165                 break;
3166         }
3167
3168         return acpi_nfit_register_region(acpi_desc, nfit_spa);
3169 }
3170
3171 static void ars_complete_all(struct acpi_nfit_desc *acpi_desc)
3172 {
3173         struct nfit_spa *nfit_spa;
3174
3175         list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
3176                 if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
3177                         continue;
3178                 ars_complete(acpi_desc, nfit_spa);
3179         }
3180 }
3181
3182 static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc,
3183                 int query_rc)
3184 {
3185         unsigned int tmo = acpi_desc->scrub_tmo;
3186         struct device *dev = acpi_desc->dev;
3187         struct nfit_spa *nfit_spa;
3188
3189         lockdep_assert_held(&acpi_desc->init_mutex);
3190
3191         if (test_bit(ARS_CANCEL, &acpi_desc->scrub_flags))
3192                 return 0;
3193
3194         if (query_rc == -EBUSY) {
3195                 dev_dbg(dev, "ARS: ARS busy\n");
3196                 return min(30U * 60U, tmo * 2);
3197         }
3198         if (query_rc == -ENOSPC) {
3199                 dev_dbg(dev, "ARS: ARS continue\n");
3200                 ars_continue(acpi_desc);
3201                 return 1;
3202         }
3203         if (query_rc && query_rc != -EAGAIN) {
3204                 unsigned long long addr, end;
3205
3206                 addr = acpi_desc->ars_status->address;
3207                 end = addr + acpi_desc->ars_status->length;
3208                 dev_dbg(dev, "ARS: %llx-%llx failed (%d)\n", addr, end,
3209                                 query_rc);
3210         }
3211
3212         ars_complete_all(acpi_desc);
3213         list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
3214                 enum nfit_ars_state req_type;
3215                 int rc;
3216
3217                 if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
3218                         continue;
3219
3220                 /* prefer short ARS requests first */
3221                 if (test_bit(ARS_REQ_SHORT, &nfit_spa->ars_state))
3222                         req_type = ARS_REQ_SHORT;
3223                 else if (test_bit(ARS_REQ_LONG, &nfit_spa->ars_state))
3224                         req_type = ARS_REQ_LONG;
3225                 else
3226                         continue;
3227                 rc = ars_start(acpi_desc, nfit_spa, req_type);
3228
3229                 dev = nd_region_dev(nfit_spa->nd_region);
3230                 dev_dbg(dev, "ARS: range %d ARS start %s (%d)\n",
3231                                 nfit_spa->spa->range_index,
3232                                 req_type == ARS_REQ_SHORT ? "short" : "long",
3233                                 rc);
3234                 /*
3235                  * Hmm, we raced someone else starting ARS? Try again in
3236                  * a bit.
3237                  */
3238                 if (rc == -EBUSY)
3239                         return 1;
3240                 if (rc == 0) {
3241                         dev_WARN_ONCE(dev, acpi_desc->scrub_spa,
3242                                         "scrub start while range %d active\n",
3243                                         acpi_desc->scrub_spa->spa->range_index);
3244                         clear_bit(req_type, &nfit_spa->ars_state);
3245                         acpi_desc->scrub_spa = nfit_spa;
3246                         /*
3247                          * Consider this spa last for future scrub
3248                          * requests
3249                          */
3250                         list_move_tail(&nfit_spa->list, &acpi_desc->spas);
3251                         return 1;
3252                 }
3253
3254                 dev_err(dev, "ARS: range %d ARS failed (%d)\n",
3255                                 nfit_spa->spa->range_index, rc);
3256                 set_bit(ARS_FAILED, &nfit_spa->ars_state);
3257         }
3258         return 0;
3259 }
3260
3261 static void __sched_ars(struct acpi_nfit_desc *acpi_desc, unsigned int tmo)
3262 {
3263         lockdep_assert_held(&acpi_desc->init_mutex);
3264
3265         set_bit(ARS_BUSY, &acpi_desc->scrub_flags);
3266         /* note this should only be set from within the workqueue */
3267         if (tmo)
3268                 acpi_desc->scrub_tmo = tmo;
3269         queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ);
3270 }
3271
3272 static void sched_ars(struct acpi_nfit_desc *acpi_desc)
3273 {
3274         __sched_ars(acpi_desc, 0);
3275 }
3276
3277 static void notify_ars_done(struct acpi_nfit_desc *acpi_desc)
3278 {
3279         lockdep_assert_held(&acpi_desc->init_mutex);
3280
3281         clear_bit(ARS_BUSY, &acpi_desc->scrub_flags);
3282         acpi_desc->scrub_count++;
3283         if (acpi_desc->scrub_count_state)
3284                 sysfs_notify_dirent(acpi_desc->scrub_count_state);
3285 }
3286
3287 static void acpi_nfit_scrub(struct work_struct *work)
3288 {
3289         struct acpi_nfit_desc *acpi_desc;
3290         unsigned int tmo;
3291         int query_rc;
3292
3293         acpi_desc = container_of(work, typeof(*acpi_desc), dwork.work);
3294         mutex_lock(&acpi_desc->init_mutex);
3295         query_rc = acpi_nfit_query_poison(acpi_desc);
3296         tmo = __acpi_nfit_scrub(acpi_desc, query_rc);
3297         if (tmo)
3298                 __sched_ars(acpi_desc, tmo);
3299         else
3300                 notify_ars_done(acpi_desc);
3301         memset(acpi_desc->ars_status, 0, acpi_desc->max_ars);
3302         clear_bit(ARS_POLL, &acpi_desc->scrub_flags);
3303         mutex_unlock(&acpi_desc->init_mutex);
3304 }
3305
3306 static void acpi_nfit_init_ars(struct acpi_nfit_desc *acpi_desc,
3307                 struct nfit_spa *nfit_spa)
3308 {
3309         int type = nfit_spa_type(nfit_spa->spa);
3310         struct nd_cmd_ars_cap ars_cap;
3311         int rc;
3312
3313         set_bit(ARS_FAILED, &nfit_spa->ars_state);
3314         memset(&ars_cap, 0, sizeof(ars_cap));
3315         rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa);
3316         if (rc < 0)
3317                 return;
3318         /* check that the supported scrub types match the spa type */
3319         if (type == NFIT_SPA_VOLATILE && ((ars_cap.status >> 16)
3320                                 & ND_ARS_VOLATILE) == 0)
3321                 return;
3322         if (type == NFIT_SPA_PM && ((ars_cap.status >> 16)
3323                                 & ND_ARS_PERSISTENT) == 0)
3324                 return;
3325
3326         nfit_spa->max_ars = ars_cap.max_ars_out;
3327         nfit_spa->clear_err_unit = ars_cap.clear_err_unit;
3328         acpi_desc->max_ars = max(nfit_spa->max_ars, acpi_desc->max_ars);
3329         clear_bit(ARS_FAILED, &nfit_spa->ars_state);
3330 }
3331
3332 static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
3333 {
3334         struct nfit_spa *nfit_spa;
3335         int rc, do_sched_ars = 0;
3336
3337         set_bit(ARS_VALID, &acpi_desc->scrub_flags);
3338         list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
3339                 switch (nfit_spa_type(nfit_spa->spa)) {
3340                 case NFIT_SPA_VOLATILE:
3341                 case NFIT_SPA_PM:
3342                         acpi_nfit_init_ars(acpi_desc, nfit_spa);
3343                         break;
3344                 }
3345         }
3346
3347         list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
3348                 switch (nfit_spa_type(nfit_spa->spa)) {
3349                 case NFIT_SPA_VOLATILE:
3350                 case NFIT_SPA_PM:
3351                         /* register regions and kick off initial ARS run */
3352                         rc = ars_register(acpi_desc, nfit_spa);
3353                         if (rc)
3354                                 return rc;
3355
3356                         /*
3357                          * Kick off background ARS if at least one
3358                          * region successfully registered ARS
3359                          */
3360                         if (!test_bit(ARS_FAILED, &nfit_spa->ars_state))
3361                                 do_sched_ars++;
3362                         break;
3363                 case NFIT_SPA_BDW:
3364                         /* nothing to register */
3365                         break;
3366                 case NFIT_SPA_DCR:
3367                 case NFIT_SPA_VDISK:
3368                 case NFIT_SPA_VCD:
3369                 case NFIT_SPA_PDISK:
3370                 case NFIT_SPA_PCD:
3371                         /* register known regions that don't support ARS */
3372                         rc = acpi_nfit_register_region(acpi_desc, nfit_spa);
3373                         if (rc)
3374                                 return rc;
3375                         break;
3376                 default:
3377                         /* don't register unknown regions */
3378                         break;
3379                 }
3380         }
3381
3382         if (do_sched_ars)
3383                 sched_ars(acpi_desc);
3384         return 0;
3385 }
3386
3387 static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc,
3388                 struct nfit_table_prev *prev)
3389 {
3390         struct device *dev = acpi_desc->dev;
3391
3392         if (!list_empty(&prev->spas) ||
3393                         !list_empty(&prev->memdevs) ||
3394                         !list_empty(&prev->dcrs) ||
3395                         !list_empty(&prev->bdws) ||
3396                         !list_empty(&prev->idts) ||
3397                         !list_empty(&prev->flushes)) {
3398                 dev_err(dev, "new nfit deletes entries (unsupported)\n");
3399                 return -ENXIO;
3400         }
3401         return 0;
3402 }
3403
3404 static int acpi_nfit_desc_init_scrub_attr(struct acpi_nfit_desc *acpi_desc)
3405 {
3406         struct device *dev = acpi_desc->dev;
3407         struct kernfs_node *nfit;
3408         struct device *bus_dev;
3409
3410         if (!ars_supported(acpi_desc->nvdimm_bus))
3411                 return 0;
3412
3413         bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus);
3414         nfit = sysfs_get_dirent(bus_dev->kobj.sd, "nfit");
3415         if (!nfit) {
3416                 dev_err(dev, "sysfs_get_dirent 'nfit' failed\n");
3417                 return -ENODEV;
3418         }
3419         acpi_desc->scrub_count_state = sysfs_get_dirent(nfit, "scrub");
3420         sysfs_put(nfit);
3421         if (!acpi_desc->scrub_count_state) {
3422                 dev_err(dev, "sysfs_get_dirent 'scrub' failed\n");
3423                 return -ENODEV;
3424         }
3425
3426         return 0;
3427 }
3428
3429 static void acpi_nfit_unregister(void *data)
3430 {
3431         struct acpi_nfit_desc *acpi_desc = data;
3432
3433         nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
3434 }
3435
3436 int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz)
3437 {
3438         struct device *dev = acpi_desc->dev;
3439         struct nfit_table_prev prev;
3440         const void *end;
3441         int rc;
3442
3443         if (!acpi_desc->nvdimm_bus) {
3444                 acpi_nfit_init_dsms(acpi_desc);
3445
3446                 acpi_desc->nvdimm_bus = nvdimm_bus_register(dev,
3447                                 &acpi_desc->nd_desc);
3448                 if (!acpi_desc->nvdimm_bus)
3449                         return -ENOMEM;
3450
3451                 rc = devm_add_action_or_reset(dev, acpi_nfit_unregister,
3452                                 acpi_desc);
3453                 if (rc)
3454                         return rc;
3455
3456                 rc = acpi_nfit_desc_init_scrub_attr(acpi_desc);
3457                 if (rc)
3458                         return rc;
3459
3460                 /* register this acpi_desc for mce notifications */
3461                 mutex_lock(&acpi_desc_lock);
3462                 list_add_tail(&acpi_desc->list, &acpi_descs);
3463                 mutex_unlock(&acpi_desc_lock);
3464         }
3465
3466         mutex_lock(&acpi_desc->init_mutex);
3467
3468         INIT_LIST_HEAD(&prev.spas);
3469         INIT_LIST_HEAD(&prev.memdevs);
3470         INIT_LIST_HEAD(&prev.dcrs);
3471         INIT_LIST_HEAD(&prev.bdws);
3472         INIT_LIST_HEAD(&prev.idts);
3473         INIT_LIST_HEAD(&prev.flushes);
3474
3475         list_cut_position(&prev.spas, &acpi_desc->spas,
3476                                 acpi_desc->spas.prev);
3477         list_cut_position(&prev.memdevs, &acpi_desc->memdevs,
3478                                 acpi_desc->memdevs.prev);
3479         list_cut_position(&prev.dcrs, &acpi_desc->dcrs,
3480                                 acpi_desc->dcrs.prev);
3481         list_cut_position(&prev.bdws, &acpi_desc->bdws,
3482                                 acpi_desc->bdws.prev);
3483         list_cut_position(&prev.idts, &acpi_desc->idts,
3484                                 acpi_desc->idts.prev);
3485         list_cut_position(&prev.flushes, &acpi_desc->flushes,
3486                                 acpi_desc->flushes.prev);
3487
3488         end = data + sz;
3489         while (!IS_ERR_OR_NULL(data))
3490                 data = add_table(acpi_desc, &prev, data, end);
3491
3492         if (IS_ERR(data)) {
3493                 dev_dbg(dev, "nfit table parsing error: %ld\n", PTR_ERR(data));
3494                 rc = PTR_ERR(data);
3495                 goto out_unlock;
3496         }
3497
3498         rc = acpi_nfit_check_deletions(acpi_desc, &prev);
3499         if (rc)
3500                 goto out_unlock;
3501
3502         rc = nfit_mem_init(acpi_desc);
3503         if (rc)
3504                 goto out_unlock;
3505
3506         rc = acpi_nfit_register_dimms(acpi_desc);
3507         if (rc)
3508                 goto out_unlock;
3509
3510         rc = acpi_nfit_register_regions(acpi_desc);
3511
3512  out_unlock:
3513         mutex_unlock(&acpi_desc->init_mutex);
3514         return rc;
3515 }
3516 EXPORT_SYMBOL_GPL(acpi_nfit_init);
3517
3518 static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
3519 {
3520         struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
3521         struct device *dev = acpi_desc->dev;
3522
3523         /* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
3524         nfit_device_lock(dev);
3525         nfit_device_unlock(dev);
3526
3527         /* Bounce the init_mutex to complete initial registration */
3528         mutex_lock(&acpi_desc->init_mutex);
3529         mutex_unlock(&acpi_desc->init_mutex);
3530
3531         return 0;
3532 }
3533
3534 static int __acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
3535                 struct nvdimm *nvdimm, unsigned int cmd)
3536 {
3537         struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
3538
3539         if (nvdimm)
3540                 return 0;
3541         if (cmd != ND_CMD_ARS_START)
3542                 return 0;
3543
3544         /*
3545          * The kernel and userspace may race to initiate a scrub, but
3546          * the scrub thread is prepared to lose that initial race.  It
3547          * just needs guarantees that any ARS it initiates are not
3548          * interrupted by any intervening start requests from userspace.
3549          */
3550         if (work_busy(&acpi_desc->dwork.work))
3551                 return -EBUSY;
3552
3553         return 0;
3554 }
3555
3556 /*
3557  * Prevent security and firmware activate commands from being issued via
3558  * ioctl.
3559  */
3560 static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
3561                 struct nvdimm *nvdimm, unsigned int cmd, void *buf)
3562 {
3563         struct nd_cmd_pkg *call_pkg = buf;
3564         unsigned int func;
3565
3566         if (nvdimm && cmd == ND_CMD_CALL &&
3567                         call_pkg->nd_family == NVDIMM_FAMILY_INTEL) {
3568                 func = call_pkg->nd_command;
3569                 if (func > NVDIMM_CMD_MAX ||
3570                     (1 << func) & NVDIMM_INTEL_DENY_CMDMASK)
3571                         return -EOPNOTSUPP;
3572         }
3573
3574         /* block all non-nfit bus commands */
3575         if (!nvdimm && cmd == ND_CMD_CALL &&
3576                         call_pkg->nd_family != NVDIMM_BUS_FAMILY_NFIT)
3577                 return -EOPNOTSUPP;
3578
3579         return __acpi_nfit_clear_to_send(nd_desc, nvdimm, cmd);
3580 }
3581
3582 int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc,
3583                 enum nfit_ars_state req_type)
3584 {
3585         struct device *dev = acpi_desc->dev;
3586         int scheduled = 0, busy = 0;
3587         struct nfit_spa *nfit_spa;
3588
3589         mutex_lock(&acpi_desc->init_mutex);
3590         if (test_bit(ARS_CANCEL, &acpi_desc->scrub_flags)) {
3591                 mutex_unlock(&acpi_desc->init_mutex);
3592                 return 0;
3593         }
3594
3595         list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
3596                 int type = nfit_spa_type(nfit_spa->spa);
3597
3598                 if (type != NFIT_SPA_PM && type != NFIT_SPA_VOLATILE)
3599                         continue;
3600                 if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
3601                         continue;
3602
3603                 if (test_and_set_bit(req_type, &nfit_spa->ars_state))
3604                         busy++;
3605                 else
3606                         scheduled++;
3607         }
3608         if (scheduled) {
3609                 sched_ars(acpi_desc);
3610                 dev_dbg(dev, "ars_scan triggered\n");
3611         }
3612         mutex_unlock(&acpi_desc->init_mutex);
3613
3614         if (scheduled)
3615                 return 0;
3616         if (busy)
3617                 return -EBUSY;
3618         return -ENOTTY;
3619 }
3620
3621 void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev)
3622 {
3623         struct nvdimm_bus_descriptor *nd_desc;
3624
3625         dev_set_drvdata(dev, acpi_desc);
3626         acpi_desc->dev = dev;
3627         acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io;
3628         nd_desc = &acpi_desc->nd_desc;
3629         nd_desc->provider_name = "ACPI.NFIT";
3630         nd_desc->module = THIS_MODULE;
3631         nd_desc->ndctl = acpi_nfit_ctl;
3632         nd_desc->flush_probe = acpi_nfit_flush_probe;
3633         nd_desc->clear_to_send = acpi_nfit_clear_to_send;
3634         nd_desc->attr_groups = acpi_nfit_attribute_groups;
3635
3636         INIT_LIST_HEAD(&acpi_desc->spas);
3637         INIT_LIST_HEAD(&acpi_desc->dcrs);
3638         INIT_LIST_HEAD(&acpi_desc->bdws);
3639         INIT_LIST_HEAD(&acpi_desc->idts);
3640         INIT_LIST_HEAD(&acpi_desc->flushes);
3641         INIT_LIST_HEAD(&acpi_desc->memdevs);
3642         INIT_LIST_HEAD(&acpi_desc->dimms);
3643         INIT_LIST_HEAD(&acpi_desc->list);
3644         mutex_init(&acpi_desc->init_mutex);
3645         acpi_desc->scrub_tmo = 1;
3646         INIT_DELAYED_WORK(&acpi_desc->dwork, acpi_nfit_scrub);
3647 }
3648 EXPORT_SYMBOL_GPL(acpi_nfit_desc_init);
3649
3650 static void acpi_nfit_put_table(void *table)
3651 {
3652         acpi_put_table(table);
3653 }
3654
3655 void acpi_nfit_shutdown(void *data)
3656 {
3657         struct acpi_nfit_desc *acpi_desc = data;
3658         struct device *bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus);
3659
3660         /*
3661          * Destruct under acpi_desc_lock so that nfit_handle_mce does not
3662          * race teardown
3663          */
3664         mutex_lock(&acpi_desc_lock);
3665         list_del(&acpi_desc->list);
3666         mutex_unlock(&acpi_desc_lock);
3667
3668         mutex_lock(&acpi_desc->init_mutex);
3669         set_bit(ARS_CANCEL, &acpi_desc->scrub_flags);
3670         cancel_delayed_work_sync(&acpi_desc->dwork);
3671         mutex_unlock(&acpi_desc->init_mutex);
3672
3673         /*
3674          * Bounce the nvdimm bus lock to make sure any in-flight
3675          * acpi_nfit_ars_rescan() submissions have had a chance to
3676          * either submit or see ->cancel set.
3677          */
3678         nfit_device_lock(bus_dev);
3679         nfit_device_unlock(bus_dev);
3680
3681         flush_workqueue(nfit_wq);
3682 }
3683 EXPORT_SYMBOL_GPL(acpi_nfit_shutdown);
3684
3685 static int acpi_nfit_add(struct acpi_device *adev)
3686 {
3687         struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
3688         struct acpi_nfit_desc *acpi_desc;
3689         struct device *dev = &adev->dev;
3690         struct acpi_table_header *tbl;
3691         acpi_status status = AE_OK;
3692         acpi_size sz;
3693         int rc = 0;
3694
3695         status = acpi_get_table(ACPI_SIG_NFIT, 0, &tbl);
3696         if (ACPI_FAILURE(status)) {
3697                 /* The NVDIMM root device allows OS to trigger enumeration of
3698                  * NVDIMMs through NFIT at boot time and re-enumeration at
3699                  * root level via the _FIT method during runtime.
3700                  * This is ok to return 0 here, we could have an nvdimm
3701                  * hotplugged later and evaluate _FIT method which returns
3702                  * data in the format of a series of NFIT Structures.
3703                  */
3704                 dev_dbg(dev, "failed to find NFIT at startup\n");
3705                 return 0;
3706         }
3707
3708         rc = devm_add_action_or_reset(dev, acpi_nfit_put_table, tbl);
3709         if (rc)
3710                 return rc;
3711         sz = tbl->length;
3712
3713         acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
3714         if (!acpi_desc)
3715                 return -ENOMEM;
3716         acpi_nfit_desc_init(acpi_desc, &adev->dev);
3717
3718         /* Save the acpi header for exporting the revision via sysfs */
3719         acpi_desc->acpi_header = *tbl;
3720
3721         /* Evaluate _FIT and override with that if present */
3722         status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
3723         if (ACPI_SUCCESS(status) && buf.length > 0) {
3724                 union acpi_object *obj = buf.pointer;
3725
3726                 if (obj->type == ACPI_TYPE_BUFFER)
3727                         rc = acpi_nfit_init(acpi_desc, obj->buffer.pointer,
3728                                         obj->buffer.length);
3729                 else
3730                         dev_dbg(dev, "invalid type %d, ignoring _FIT\n",
3731                                 (int) obj->type);
3732                 kfree(buf.pointer);
3733         } else
3734                 /* skip over the lead-in header table */
3735                 rc = acpi_nfit_init(acpi_desc, (void *) tbl
3736                                 + sizeof(struct acpi_table_nfit),
3737                                 sz - sizeof(struct acpi_table_nfit));
3738
3739         if (rc)
3740                 return rc;
3741         return devm_add_action_or_reset(dev, acpi_nfit_shutdown, acpi_desc);
3742 }
3743
3744 static int acpi_nfit_remove(struct acpi_device *adev)
3745 {
3746         /* see acpi_nfit_unregister */
3747         return 0;
3748 }
3749
3750 static void acpi_nfit_update_notify(struct device *dev, acpi_handle handle)
3751 {
3752         struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev);
3753         struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
3754         union acpi_object *obj;
3755         acpi_status status;
3756         int ret;
3757
3758         if (!dev->driver) {
3759                 /* dev->driver may be null if we're being removed */
3760                 dev_dbg(dev, "no driver found for dev\n");
3761                 return;
3762         }
3763
3764         if (!acpi_desc) {
3765                 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
3766                 if (!acpi_desc)
3767                         return;
3768                 acpi_nfit_desc_init(acpi_desc, dev);
3769         } else {
3770                 /*
3771                  * Finish previous registration before considering new
3772                  * regions.
3773                  */
3774                 flush_workqueue(nfit_wq);
3775         }
3776
3777         /* Evaluate _FIT */
3778         status = acpi_evaluate_object(handle, "_FIT", NULL, &buf);
3779         if (ACPI_FAILURE(status)) {
3780                 dev_err(dev, "failed to evaluate _FIT\n");
3781                 return;
3782         }
3783
3784         obj = buf.pointer;
3785         if (obj->type == ACPI_TYPE_BUFFER) {
3786                 ret = acpi_nfit_init(acpi_desc, obj->buffer.pointer,
3787                                 obj->buffer.length);
3788                 if (ret)
3789                         dev_err(dev, "failed to merge updated NFIT\n");
3790         } else
3791                 dev_err(dev, "Invalid _FIT\n");
3792         kfree(buf.pointer);
3793 }
3794
3795 static void acpi_nfit_uc_error_notify(struct device *dev, acpi_handle handle)
3796 {
3797         struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev);
3798
3799         if (acpi_desc->scrub_mode == HW_ERROR_SCRUB_ON)
3800                 acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG);
3801         else
3802                 acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_SHORT);
3803 }
3804
3805 void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event)
3806 {
3807         dev_dbg(dev, "event: 0x%x\n", event);
3808
3809         switch (event) {
3810         case NFIT_NOTIFY_UPDATE:
3811                 return acpi_nfit_update_notify(dev, handle);
3812         case NFIT_NOTIFY_UC_MEMORY_ERROR:
3813                 return acpi_nfit_uc_error_notify(dev, handle);
3814         default:
3815                 return;
3816         }
3817 }
3818 EXPORT_SYMBOL_GPL(__acpi_nfit_notify);
3819
3820 static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
3821 {
3822         nfit_device_lock(&adev->dev);
3823         __acpi_nfit_notify(&adev->dev, adev->handle, event);
3824         nfit_device_unlock(&adev->dev);
3825 }
3826
3827 static const struct acpi_device_id acpi_nfit_ids[] = {
3828         { "ACPI0012", 0 },
3829         { "", 0 },
3830 };
3831 MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids);
3832
3833 static struct acpi_driver acpi_nfit_driver = {
3834         .name = KBUILD_MODNAME,
3835         .ids = acpi_nfit_ids,
3836         .ops = {
3837                 .add = acpi_nfit_add,
3838                 .remove = acpi_nfit_remove,
3839                 .notify = acpi_nfit_notify,
3840         },
3841 };
3842
3843 static __init int nfit_init(void)
3844 {
3845         int ret;
3846
3847         BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40);
3848         BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56);
3849         BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48);
3850         BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20);
3851         BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9);
3852         BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80);
3853         BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40);
3854         BUILD_BUG_ON(sizeof(struct acpi_nfit_capabilities) != 16);
3855
3856         guid_parse(UUID_VOLATILE_MEMORY, &nfit_uuid[NFIT_SPA_VOLATILE]);
3857         guid_parse(UUID_PERSISTENT_MEMORY, &nfit_uuid[NFIT_SPA_PM]);
3858         guid_parse(UUID_CONTROL_REGION, &nfit_uuid[NFIT_SPA_DCR]);
3859         guid_parse(UUID_DATA_REGION, &nfit_uuid[NFIT_SPA_BDW]);
3860         guid_parse(UUID_VOLATILE_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_VDISK]);
3861         guid_parse(UUID_VOLATILE_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_VCD]);
3862         guid_parse(UUID_PERSISTENT_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_PDISK]);
3863         guid_parse(UUID_PERSISTENT_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_PCD]);
3864         guid_parse(UUID_NFIT_BUS, &nfit_uuid[NFIT_DEV_BUS]);
3865         guid_parse(UUID_NFIT_DIMM, &nfit_uuid[NFIT_DEV_DIMM]);
3866         guid_parse(UUID_NFIT_DIMM_N_HPE1, &nfit_uuid[NFIT_DEV_DIMM_N_HPE1]);
3867         guid_parse(UUID_NFIT_DIMM_N_HPE2, &nfit_uuid[NFIT_DEV_DIMM_N_HPE2]);
3868         guid_parse(UUID_NFIT_DIMM_N_MSFT, &nfit_uuid[NFIT_DEV_DIMM_N_MSFT]);
3869         guid_parse(UUID_NFIT_DIMM_N_HYPERV, &nfit_uuid[NFIT_DEV_DIMM_N_HYPERV]);
3870         guid_parse(UUID_INTEL_BUS, &nfit_uuid[NFIT_BUS_INTEL]);
3871
3872         nfit_wq = create_singlethread_workqueue("nfit");
3873         if (!nfit_wq)
3874                 return -ENOMEM;
3875
3876         nfit_mce_register();
3877         ret = acpi_bus_register_driver(&acpi_nfit_driver);
3878         if (ret) {
3879                 nfit_mce_unregister();
3880                 destroy_workqueue(nfit_wq);
3881         }
3882
3883         return ret;
3884
3885 }
3886
3887 static __exit void nfit_exit(void)
3888 {
3889         nfit_mce_unregister();
3890         acpi_bus_unregister_driver(&acpi_nfit_driver);
3891         destroy_workqueue(nfit_wq);
3892         WARN_ON(!list_empty(&acpi_descs));
3893 }
3894
3895 module_init(nfit_init);
3896 module_exit(nfit_exit);
3897 MODULE_LICENSE("GPL v2");
3898 MODULE_AUTHOR("Intel Corporation");