Merge tag 'nvme-6.4-2023-05-18' of git://git.infradead.org/nvme into block-6.4
[platform/kernel/linux-starfive.git] / drivers / nvmem / core.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * nvmem framework core.
4  *
5  * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
6  * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
7  */
8
9 #include <linux/device.h>
10 #include <linux/export.h>
11 #include <linux/fs.h>
12 #include <linux/idr.h>
13 #include <linux/init.h>
14 #include <linux/kref.h>
15 #include <linux/module.h>
16 #include <linux/nvmem-consumer.h>
17 #include <linux/nvmem-provider.h>
18 #include <linux/gpio/consumer.h>
19 #include <linux/of.h>
20 #include <linux/of_device.h>
21 #include <linux/slab.h>
22
23 struct nvmem_device {
24         struct module           *owner;
25         struct device           dev;
26         int                     stride;
27         int                     word_size;
28         int                     id;
29         struct kref             refcnt;
30         size_t                  size;
31         bool                    read_only;
32         bool                    root_only;
33         int                     flags;
34         enum nvmem_type         type;
35         struct bin_attribute    eeprom;
36         struct device           *base_dev;
37         struct list_head        cells;
38         const struct nvmem_keepout *keepout;
39         unsigned int            nkeepout;
40         nvmem_reg_read_t        reg_read;
41         nvmem_reg_write_t       reg_write;
42         struct gpio_desc        *wp_gpio;
43         struct nvmem_layout     *layout;
44         void *priv;
45 };
46
47 #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
48
49 #define FLAG_COMPAT             BIT(0)
50 struct nvmem_cell_entry {
51         const char              *name;
52         int                     offset;
53         size_t                  raw_len;
54         int                     bytes;
55         int                     bit_offset;
56         int                     nbits;
57         nvmem_cell_post_process_t read_post_process;
58         void                    *priv;
59         struct device_node      *np;
60         struct nvmem_device     *nvmem;
61         struct list_head        node;
62 };
63
64 struct nvmem_cell {
65         struct nvmem_cell_entry *entry;
66         const char              *id;
67         int                     index;
68 };
69
70 static DEFINE_MUTEX(nvmem_mutex);
71 static DEFINE_IDA(nvmem_ida);
72
73 static DEFINE_MUTEX(nvmem_cell_mutex);
74 static LIST_HEAD(nvmem_cell_tables);
75
76 static DEFINE_MUTEX(nvmem_lookup_mutex);
77 static LIST_HEAD(nvmem_lookup_list);
78
79 static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
80
81 static DEFINE_SPINLOCK(nvmem_layout_lock);
82 static LIST_HEAD(nvmem_layouts);
83
84 static int __nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
85                             void *val, size_t bytes)
86 {
87         if (nvmem->reg_read)
88                 return nvmem->reg_read(nvmem->priv, offset, val, bytes);
89
90         return -EINVAL;
91 }
92
93 static int __nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
94                              void *val, size_t bytes)
95 {
96         int ret;
97
98         if (nvmem->reg_write) {
99                 gpiod_set_value_cansleep(nvmem->wp_gpio, 0);
100                 ret = nvmem->reg_write(nvmem->priv, offset, val, bytes);
101                 gpiod_set_value_cansleep(nvmem->wp_gpio, 1);
102                 return ret;
103         }
104
105         return -EINVAL;
106 }
107
108 static int nvmem_access_with_keepouts(struct nvmem_device *nvmem,
109                                       unsigned int offset, void *val,
110                                       size_t bytes, int write)
111 {
112
113         unsigned int end = offset + bytes;
114         unsigned int kend, ksize;
115         const struct nvmem_keepout *keepout = nvmem->keepout;
116         const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout;
117         int rc;
118
119         /*
120          * Skip all keepouts before the range being accessed.
121          * Keepouts are sorted.
122          */
123         while ((keepout < keepoutend) && (keepout->end <= offset))
124                 keepout++;
125
126         while ((offset < end) && (keepout < keepoutend)) {
127                 /* Access the valid portion before the keepout. */
128                 if (offset < keepout->start) {
129                         kend = min(end, keepout->start);
130                         ksize = kend - offset;
131                         if (write)
132                                 rc = __nvmem_reg_write(nvmem, offset, val, ksize);
133                         else
134                                 rc = __nvmem_reg_read(nvmem, offset, val, ksize);
135
136                         if (rc)
137                                 return rc;
138
139                         offset += ksize;
140                         val += ksize;
141                 }
142
143                 /*
144                  * Now we're aligned to the start of this keepout zone. Go
145                  * through it.
146                  */
147                 kend = min(end, keepout->end);
148                 ksize = kend - offset;
149                 if (!write)
150                         memset(val, keepout->value, ksize);
151
152                 val += ksize;
153                 offset += ksize;
154                 keepout++;
155         }
156
157         /*
158          * If we ran out of keepouts but there's still stuff to do, send it
159          * down directly
160          */
161         if (offset < end) {
162                 ksize = end - offset;
163                 if (write)
164                         return __nvmem_reg_write(nvmem, offset, val, ksize);
165                 else
166                         return __nvmem_reg_read(nvmem, offset, val, ksize);
167         }
168
169         return 0;
170 }
171
172 static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
173                           void *val, size_t bytes)
174 {
175         if (!nvmem->nkeepout)
176                 return __nvmem_reg_read(nvmem, offset, val, bytes);
177
178         return nvmem_access_with_keepouts(nvmem, offset, val, bytes, false);
179 }
180
181 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
182                            void *val, size_t bytes)
183 {
184         if (!nvmem->nkeepout)
185                 return __nvmem_reg_write(nvmem, offset, val, bytes);
186
187         return nvmem_access_with_keepouts(nvmem, offset, val, bytes, true);
188 }
189
190 #ifdef CONFIG_NVMEM_SYSFS
191 static const char * const nvmem_type_str[] = {
192         [NVMEM_TYPE_UNKNOWN] = "Unknown",
193         [NVMEM_TYPE_EEPROM] = "EEPROM",
194         [NVMEM_TYPE_OTP] = "OTP",
195         [NVMEM_TYPE_BATTERY_BACKED] = "Battery backed",
196         [NVMEM_TYPE_FRAM] = "FRAM",
197 };
198
199 #ifdef CONFIG_DEBUG_LOCK_ALLOC
200 static struct lock_class_key eeprom_lock_key;
201 #endif
202
203 static ssize_t type_show(struct device *dev,
204                          struct device_attribute *attr, char *buf)
205 {
206         struct nvmem_device *nvmem = to_nvmem_device(dev);
207
208         return sprintf(buf, "%s\n", nvmem_type_str[nvmem->type]);
209 }
210
211 static DEVICE_ATTR_RO(type);
212
213 static struct attribute *nvmem_attrs[] = {
214         &dev_attr_type.attr,
215         NULL,
216 };
217
218 static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
219                                    struct bin_attribute *attr, char *buf,
220                                    loff_t pos, size_t count)
221 {
222         struct device *dev;
223         struct nvmem_device *nvmem;
224         int rc;
225
226         if (attr->private)
227                 dev = attr->private;
228         else
229                 dev = kobj_to_dev(kobj);
230         nvmem = to_nvmem_device(dev);
231
232         /* Stop the user from reading */
233         if (pos >= nvmem->size)
234                 return 0;
235
236         if (!IS_ALIGNED(pos, nvmem->stride))
237                 return -EINVAL;
238
239         if (count < nvmem->word_size)
240                 return -EINVAL;
241
242         if (pos + count > nvmem->size)
243                 count = nvmem->size - pos;
244
245         count = round_down(count, nvmem->word_size);
246
247         if (!nvmem->reg_read)
248                 return -EPERM;
249
250         rc = nvmem_reg_read(nvmem, pos, buf, count);
251
252         if (rc)
253                 return rc;
254
255         return count;
256 }
257
258 static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
259                                     struct bin_attribute *attr, char *buf,
260                                     loff_t pos, size_t count)
261 {
262         struct device *dev;
263         struct nvmem_device *nvmem;
264         int rc;
265
266         if (attr->private)
267                 dev = attr->private;
268         else
269                 dev = kobj_to_dev(kobj);
270         nvmem = to_nvmem_device(dev);
271
272         /* Stop the user from writing */
273         if (pos >= nvmem->size)
274                 return -EFBIG;
275
276         if (!IS_ALIGNED(pos, nvmem->stride))
277                 return -EINVAL;
278
279         if (count < nvmem->word_size)
280                 return -EINVAL;
281
282         if (pos + count > nvmem->size)
283                 count = nvmem->size - pos;
284
285         count = round_down(count, nvmem->word_size);
286
287         if (!nvmem->reg_write)
288                 return -EPERM;
289
290         rc = nvmem_reg_write(nvmem, pos, buf, count);
291
292         if (rc)
293                 return rc;
294
295         return count;
296 }
297
298 static umode_t nvmem_bin_attr_get_umode(struct nvmem_device *nvmem)
299 {
300         umode_t mode = 0400;
301
302         if (!nvmem->root_only)
303                 mode |= 0044;
304
305         if (!nvmem->read_only)
306                 mode |= 0200;
307
308         if (!nvmem->reg_write)
309                 mode &= ~0200;
310
311         if (!nvmem->reg_read)
312                 mode &= ~0444;
313
314         return mode;
315 }
316
317 static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj,
318                                          struct bin_attribute *attr, int i)
319 {
320         struct device *dev = kobj_to_dev(kobj);
321         struct nvmem_device *nvmem = to_nvmem_device(dev);
322
323         attr->size = nvmem->size;
324
325         return nvmem_bin_attr_get_umode(nvmem);
326 }
327
328 /* default read/write permissions */
329 static struct bin_attribute bin_attr_rw_nvmem = {
330         .attr   = {
331                 .name   = "nvmem",
332                 .mode   = 0644,
333         },
334         .read   = bin_attr_nvmem_read,
335         .write  = bin_attr_nvmem_write,
336 };
337
338 static struct bin_attribute *nvmem_bin_attributes[] = {
339         &bin_attr_rw_nvmem,
340         NULL,
341 };
342
343 static const struct attribute_group nvmem_bin_group = {
344         .bin_attrs      = nvmem_bin_attributes,
345         .attrs          = nvmem_attrs,
346         .is_bin_visible = nvmem_bin_attr_is_visible,
347 };
348
349 static const struct attribute_group *nvmem_dev_groups[] = {
350         &nvmem_bin_group,
351         NULL,
352 };
353
354 static struct bin_attribute bin_attr_nvmem_eeprom_compat = {
355         .attr   = {
356                 .name   = "eeprom",
357         },
358         .read   = bin_attr_nvmem_read,
359         .write  = bin_attr_nvmem_write,
360 };
361
362 /*
363  * nvmem_setup_compat() - Create an additional binary entry in
364  * drivers sys directory, to be backwards compatible with the older
365  * drivers/misc/eeprom drivers.
366  */
367 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
368                                     const struct nvmem_config *config)
369 {
370         int rval;
371
372         if (!config->compat)
373                 return 0;
374
375         if (!config->base_dev)
376                 return -EINVAL;
377
378         if (config->type == NVMEM_TYPE_FRAM)
379                 bin_attr_nvmem_eeprom_compat.attr.name = "fram";
380
381         nvmem->eeprom = bin_attr_nvmem_eeprom_compat;
382         nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem);
383         nvmem->eeprom.size = nvmem->size;
384 #ifdef CONFIG_DEBUG_LOCK_ALLOC
385         nvmem->eeprom.attr.key = &eeprom_lock_key;
386 #endif
387         nvmem->eeprom.private = &nvmem->dev;
388         nvmem->base_dev = config->base_dev;
389
390         rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom);
391         if (rval) {
392                 dev_err(&nvmem->dev,
393                         "Failed to create eeprom binary file %d\n", rval);
394                 return rval;
395         }
396
397         nvmem->flags |= FLAG_COMPAT;
398
399         return 0;
400 }
401
402 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
403                               const struct nvmem_config *config)
404 {
405         if (config->compat)
406                 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
407 }
408
409 #else /* CONFIG_NVMEM_SYSFS */
410
411 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
412                                     const struct nvmem_config *config)
413 {
414         return -ENOSYS;
415 }
416 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
417                                       const struct nvmem_config *config)
418 {
419 }
420
421 #endif /* CONFIG_NVMEM_SYSFS */
422
423 static void nvmem_release(struct device *dev)
424 {
425         struct nvmem_device *nvmem = to_nvmem_device(dev);
426
427         ida_free(&nvmem_ida, nvmem->id);
428         gpiod_put(nvmem->wp_gpio);
429         kfree(nvmem);
430 }
431
432 static const struct device_type nvmem_provider_type = {
433         .release        = nvmem_release,
434 };
435
436 static struct bus_type nvmem_bus_type = {
437         .name           = "nvmem",
438 };
439
440 static void nvmem_cell_entry_drop(struct nvmem_cell_entry *cell)
441 {
442         blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell);
443         mutex_lock(&nvmem_mutex);
444         list_del(&cell->node);
445         mutex_unlock(&nvmem_mutex);
446         of_node_put(cell->np);
447         kfree_const(cell->name);
448         kfree(cell);
449 }
450
451 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem)
452 {
453         struct nvmem_cell_entry *cell, *p;
454
455         list_for_each_entry_safe(cell, p, &nvmem->cells, node)
456                 nvmem_cell_entry_drop(cell);
457 }
458
459 static void nvmem_cell_entry_add(struct nvmem_cell_entry *cell)
460 {
461         mutex_lock(&nvmem_mutex);
462         list_add_tail(&cell->node, &cell->nvmem->cells);
463         mutex_unlock(&nvmem_mutex);
464         blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell);
465 }
466
467 static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem,
468                                                      const struct nvmem_cell_info *info,
469                                                      struct nvmem_cell_entry *cell)
470 {
471         cell->nvmem = nvmem;
472         cell->offset = info->offset;
473         cell->raw_len = info->raw_len ?: info->bytes;
474         cell->bytes = info->bytes;
475         cell->name = info->name;
476         cell->read_post_process = info->read_post_process;
477         cell->priv = info->priv;
478
479         cell->bit_offset = info->bit_offset;
480         cell->nbits = info->nbits;
481         cell->np = info->np;
482
483         if (cell->nbits)
484                 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
485                                            BITS_PER_BYTE);
486
487         if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
488                 dev_err(&nvmem->dev,
489                         "cell %s unaligned to nvmem stride %d\n",
490                         cell->name ?: "<unknown>", nvmem->stride);
491                 return -EINVAL;
492         }
493
494         return 0;
495 }
496
497 static int nvmem_cell_info_to_nvmem_cell_entry(struct nvmem_device *nvmem,
498                                                const struct nvmem_cell_info *info,
499                                                struct nvmem_cell_entry *cell)
500 {
501         int err;
502
503         err = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, cell);
504         if (err)
505                 return err;
506
507         cell->name = kstrdup_const(info->name, GFP_KERNEL);
508         if (!cell->name)
509                 return -ENOMEM;
510
511         return 0;
512 }
513
514 /**
515  * nvmem_add_one_cell() - Add one cell information to an nvmem device
516  *
517  * @nvmem: nvmem device to add cells to.
518  * @info: nvmem cell info to add to the device
519  *
520  * Return: 0 or negative error code on failure.
521  */
522 int nvmem_add_one_cell(struct nvmem_device *nvmem,
523                        const struct nvmem_cell_info *info)
524 {
525         struct nvmem_cell_entry *cell;
526         int rval;
527
528         cell = kzalloc(sizeof(*cell), GFP_KERNEL);
529         if (!cell)
530                 return -ENOMEM;
531
532         rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell);
533         if (rval) {
534                 kfree(cell);
535                 return rval;
536         }
537
538         nvmem_cell_entry_add(cell);
539
540         return 0;
541 }
542 EXPORT_SYMBOL_GPL(nvmem_add_one_cell);
543
544 /**
545  * nvmem_add_cells() - Add cell information to an nvmem device
546  *
547  * @nvmem: nvmem device to add cells to.
548  * @info: nvmem cell info to add to the device
549  * @ncells: number of cells in info
550  *
551  * Return: 0 or negative error code on failure.
552  */
553 static int nvmem_add_cells(struct nvmem_device *nvmem,
554                     const struct nvmem_cell_info *info,
555                     int ncells)
556 {
557         int i, rval;
558
559         for (i = 0; i < ncells; i++) {
560                 rval = nvmem_add_one_cell(nvmem, &info[i]);
561                 if (rval)
562                         return rval;
563         }
564
565         return 0;
566 }
567
568 /**
569  * nvmem_register_notifier() - Register a notifier block for nvmem events.
570  *
571  * @nb: notifier block to be called on nvmem events.
572  *
573  * Return: 0 on success, negative error number on failure.
574  */
575 int nvmem_register_notifier(struct notifier_block *nb)
576 {
577         return blocking_notifier_chain_register(&nvmem_notifier, nb);
578 }
579 EXPORT_SYMBOL_GPL(nvmem_register_notifier);
580
581 /**
582  * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events.
583  *
584  * @nb: notifier block to be unregistered.
585  *
586  * Return: 0 on success, negative error number on failure.
587  */
588 int nvmem_unregister_notifier(struct notifier_block *nb)
589 {
590         return blocking_notifier_chain_unregister(&nvmem_notifier, nb);
591 }
592 EXPORT_SYMBOL_GPL(nvmem_unregister_notifier);
593
594 static int nvmem_add_cells_from_table(struct nvmem_device *nvmem)
595 {
596         const struct nvmem_cell_info *info;
597         struct nvmem_cell_table *table;
598         struct nvmem_cell_entry *cell;
599         int rval = 0, i;
600
601         mutex_lock(&nvmem_cell_mutex);
602         list_for_each_entry(table, &nvmem_cell_tables, node) {
603                 if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) {
604                         for (i = 0; i < table->ncells; i++) {
605                                 info = &table->cells[i];
606
607                                 cell = kzalloc(sizeof(*cell), GFP_KERNEL);
608                                 if (!cell) {
609                                         rval = -ENOMEM;
610                                         goto out;
611                                 }
612
613                                 rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell);
614                                 if (rval) {
615                                         kfree(cell);
616                                         goto out;
617                                 }
618
619                                 nvmem_cell_entry_add(cell);
620                         }
621                 }
622         }
623
624 out:
625         mutex_unlock(&nvmem_cell_mutex);
626         return rval;
627 }
628
629 static struct nvmem_cell_entry *
630 nvmem_find_cell_entry_by_name(struct nvmem_device *nvmem, const char *cell_id)
631 {
632         struct nvmem_cell_entry *iter, *cell = NULL;
633
634         mutex_lock(&nvmem_mutex);
635         list_for_each_entry(iter, &nvmem->cells, node) {
636                 if (strcmp(cell_id, iter->name) == 0) {
637                         cell = iter;
638                         break;
639                 }
640         }
641         mutex_unlock(&nvmem_mutex);
642
643         return cell;
644 }
645
646 static int nvmem_validate_keepouts(struct nvmem_device *nvmem)
647 {
648         unsigned int cur = 0;
649         const struct nvmem_keepout *keepout = nvmem->keepout;
650         const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout;
651
652         while (keepout < keepoutend) {
653                 /* Ensure keepouts are sorted and don't overlap. */
654                 if (keepout->start < cur) {
655                         dev_err(&nvmem->dev,
656                                 "Keepout regions aren't sorted or overlap.\n");
657
658                         return -ERANGE;
659                 }
660
661                 if (keepout->end < keepout->start) {
662                         dev_err(&nvmem->dev,
663                                 "Invalid keepout region.\n");
664
665                         return -EINVAL;
666                 }
667
668                 /*
669                  * Validate keepouts (and holes between) don't violate
670                  * word_size constraints.
671                  */
672                 if ((keepout->end - keepout->start < nvmem->word_size) ||
673                     ((keepout->start != cur) &&
674                      (keepout->start - cur < nvmem->word_size))) {
675
676                         dev_err(&nvmem->dev,
677                                 "Keepout regions violate word_size constraints.\n");
678
679                         return -ERANGE;
680                 }
681
682                 /* Validate keepouts don't violate stride (alignment). */
683                 if (!IS_ALIGNED(keepout->start, nvmem->stride) ||
684                     !IS_ALIGNED(keepout->end, nvmem->stride)) {
685
686                         dev_err(&nvmem->dev,
687                                 "Keepout regions violate stride.\n");
688
689                         return -EINVAL;
690                 }
691
692                 cur = keepout->end;
693                 keepout++;
694         }
695
696         return 0;
697 }
698
699 static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
700 {
701         struct nvmem_layout *layout = nvmem->layout;
702         struct device *dev = &nvmem->dev;
703         struct device_node *child;
704         const __be32 *addr;
705         int len, ret;
706
707         for_each_child_of_node(dev->of_node, child) {
708                 struct nvmem_cell_info info = {0};
709
710                 addr = of_get_property(child, "reg", &len);
711                 if (!addr)
712                         continue;
713                 if (len < 2 * sizeof(u32)) {
714                         dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
715                         of_node_put(child);
716                         return -EINVAL;
717                 }
718
719                 info.offset = be32_to_cpup(addr++);
720                 info.bytes = be32_to_cpup(addr);
721                 info.name = kasprintf(GFP_KERNEL, "%pOFn", child);
722
723                 addr = of_get_property(child, "bits", &len);
724                 if (addr && len == (2 * sizeof(u32))) {
725                         info.bit_offset = be32_to_cpup(addr++);
726                         info.nbits = be32_to_cpup(addr);
727                 }
728
729                 info.np = of_node_get(child);
730
731                 if (layout && layout->fixup_cell_info)
732                         layout->fixup_cell_info(nvmem, layout, &info);
733
734                 ret = nvmem_add_one_cell(nvmem, &info);
735                 kfree(info.name);
736                 if (ret) {
737                         of_node_put(child);
738                         return ret;
739                 }
740         }
741
742         return 0;
743 }
744
745 int __nvmem_layout_register(struct nvmem_layout *layout, struct module *owner)
746 {
747         layout->owner = owner;
748
749         spin_lock(&nvmem_layout_lock);
750         list_add(&layout->node, &nvmem_layouts);
751         spin_unlock(&nvmem_layout_lock);
752
753         return 0;
754 }
755 EXPORT_SYMBOL_GPL(__nvmem_layout_register);
756
757 void nvmem_layout_unregister(struct nvmem_layout *layout)
758 {
759         spin_lock(&nvmem_layout_lock);
760         list_del(&layout->node);
761         spin_unlock(&nvmem_layout_lock);
762 }
763 EXPORT_SYMBOL_GPL(nvmem_layout_unregister);
764
765 static struct nvmem_layout *nvmem_layout_get(struct nvmem_device *nvmem)
766 {
767         struct device_node *layout_np, *np = nvmem->dev.of_node;
768         struct nvmem_layout *l, *layout = ERR_PTR(-EPROBE_DEFER);
769
770         layout_np = of_get_child_by_name(np, "nvmem-layout");
771         if (!layout_np)
772                 return NULL;
773
774         /*
775          * In case the nvmem device was built-in while the layout was built as a
776          * module, we shall manually request the layout driver loading otherwise
777          * we'll never have any match.
778          */
779         of_request_module(layout_np);
780
781         spin_lock(&nvmem_layout_lock);
782
783         list_for_each_entry(l, &nvmem_layouts, node) {
784                 if (of_match_node(l->of_match_table, layout_np)) {
785                         if (try_module_get(l->owner))
786                                 layout = l;
787
788                         break;
789                 }
790         }
791
792         spin_unlock(&nvmem_layout_lock);
793         of_node_put(layout_np);
794
795         return layout;
796 }
797
798 static void nvmem_layout_put(struct nvmem_layout *layout)
799 {
800         if (layout)
801                 module_put(layout->owner);
802 }
803
804 static int nvmem_add_cells_from_layout(struct nvmem_device *nvmem)
805 {
806         struct nvmem_layout *layout = nvmem->layout;
807         int ret;
808
809         if (layout && layout->add_cells) {
810                 ret = layout->add_cells(&nvmem->dev, nvmem, layout);
811                 if (ret)
812                         return ret;
813         }
814
815         return 0;
816 }
817
818 #if IS_ENABLED(CONFIG_OF)
819 /**
820  * of_nvmem_layout_get_container() - Get OF node to layout container.
821  *
822  * @nvmem: nvmem device.
823  *
824  * Return: a node pointer with refcount incremented or NULL if no
825  * container exists. Use of_node_put() on it when done.
826  */
827 struct device_node *of_nvmem_layout_get_container(struct nvmem_device *nvmem)
828 {
829         return of_get_child_by_name(nvmem->dev.of_node, "nvmem-layout");
830 }
831 EXPORT_SYMBOL_GPL(of_nvmem_layout_get_container);
832 #endif
833
834 const void *nvmem_layout_get_match_data(struct nvmem_device *nvmem,
835                                         struct nvmem_layout *layout)
836 {
837         struct device_node __maybe_unused *layout_np;
838         const struct of_device_id *match;
839
840         layout_np = of_nvmem_layout_get_container(nvmem);
841         match = of_match_node(layout->of_match_table, layout_np);
842
843         return match ? match->data : NULL;
844 }
845 EXPORT_SYMBOL_GPL(nvmem_layout_get_match_data);
846
847 /**
848  * nvmem_register() - Register a nvmem device for given nvmem_config.
849  * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
850  *
851  * @config: nvmem device configuration with which nvmem device is created.
852  *
853  * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
854  * on success.
855  */
856
857 struct nvmem_device *nvmem_register(const struct nvmem_config *config)
858 {
859         struct nvmem_device *nvmem;
860         int rval;
861
862         if (!config->dev)
863                 return ERR_PTR(-EINVAL);
864
865         if (!config->reg_read && !config->reg_write)
866                 return ERR_PTR(-EINVAL);
867
868         nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL);
869         if (!nvmem)
870                 return ERR_PTR(-ENOMEM);
871
872         rval = ida_alloc(&nvmem_ida, GFP_KERNEL);
873         if (rval < 0) {
874                 kfree(nvmem);
875                 return ERR_PTR(rval);
876         }
877
878         nvmem->id = rval;
879
880         nvmem->dev.type = &nvmem_provider_type;
881         nvmem->dev.bus = &nvmem_bus_type;
882         nvmem->dev.parent = config->dev;
883
884         device_initialize(&nvmem->dev);
885
886         if (!config->ignore_wp)
887                 nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp",
888                                                     GPIOD_OUT_HIGH);
889         if (IS_ERR(nvmem->wp_gpio)) {
890                 rval = PTR_ERR(nvmem->wp_gpio);
891                 nvmem->wp_gpio = NULL;
892                 goto err_put_device;
893         }
894
895         kref_init(&nvmem->refcnt);
896         INIT_LIST_HEAD(&nvmem->cells);
897
898         nvmem->owner = config->owner;
899         if (!nvmem->owner && config->dev->driver)
900                 nvmem->owner = config->dev->driver->owner;
901         nvmem->stride = config->stride ?: 1;
902         nvmem->word_size = config->word_size ?: 1;
903         nvmem->size = config->size;
904         nvmem->root_only = config->root_only;
905         nvmem->priv = config->priv;
906         nvmem->type = config->type;
907         nvmem->reg_read = config->reg_read;
908         nvmem->reg_write = config->reg_write;
909         nvmem->keepout = config->keepout;
910         nvmem->nkeepout = config->nkeepout;
911         if (config->of_node)
912                 nvmem->dev.of_node = config->of_node;
913         else if (!config->no_of_node)
914                 nvmem->dev.of_node = config->dev->of_node;
915
916         switch (config->id) {
917         case NVMEM_DEVID_NONE:
918                 rval = dev_set_name(&nvmem->dev, "%s", config->name);
919                 break;
920         case NVMEM_DEVID_AUTO:
921                 rval = dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id);
922                 break;
923         default:
924                 rval = dev_set_name(&nvmem->dev, "%s%d",
925                              config->name ? : "nvmem",
926                              config->name ? config->id : nvmem->id);
927                 break;
928         }
929
930         if (rval)
931                 goto err_put_device;
932
933         nvmem->read_only = device_property_present(config->dev, "read-only") ||
934                            config->read_only || !nvmem->reg_write;
935
936 #ifdef CONFIG_NVMEM_SYSFS
937         nvmem->dev.groups = nvmem_dev_groups;
938 #endif
939
940         if (nvmem->nkeepout) {
941                 rval = nvmem_validate_keepouts(nvmem);
942                 if (rval)
943                         goto err_put_device;
944         }
945
946         if (config->compat) {
947                 rval = nvmem_sysfs_setup_compat(nvmem, config);
948                 if (rval)
949                         goto err_put_device;
950         }
951
952         /*
953          * If the driver supplied a layout by config->layout, the module
954          * pointer will be NULL and nvmem_layout_put() will be a noop.
955          */
956         nvmem->layout = config->layout ?: nvmem_layout_get(nvmem);
957         if (IS_ERR(nvmem->layout)) {
958                 rval = PTR_ERR(nvmem->layout);
959                 nvmem->layout = NULL;
960
961                 if (rval == -EPROBE_DEFER)
962                         goto err_teardown_compat;
963         }
964
965         if (config->cells) {
966                 rval = nvmem_add_cells(nvmem, config->cells, config->ncells);
967                 if (rval)
968                         goto err_remove_cells;
969         }
970
971         rval = nvmem_add_cells_from_table(nvmem);
972         if (rval)
973                 goto err_remove_cells;
974
975         rval = nvmem_add_cells_from_of(nvmem);
976         if (rval)
977                 goto err_remove_cells;
978
979         dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
980
981         rval = device_add(&nvmem->dev);
982         if (rval)
983                 goto err_remove_cells;
984
985         rval = nvmem_add_cells_from_layout(nvmem);
986         if (rval)
987                 goto err_remove_cells;
988
989         blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
990
991         return nvmem;
992
993 err_remove_cells:
994         nvmem_device_remove_all_cells(nvmem);
995         nvmem_layout_put(nvmem->layout);
996 err_teardown_compat:
997         if (config->compat)
998                 nvmem_sysfs_remove_compat(nvmem, config);
999 err_put_device:
1000         put_device(&nvmem->dev);
1001
1002         return ERR_PTR(rval);
1003 }
1004 EXPORT_SYMBOL_GPL(nvmem_register);
1005
1006 static void nvmem_device_release(struct kref *kref)
1007 {
1008         struct nvmem_device *nvmem;
1009
1010         nvmem = container_of(kref, struct nvmem_device, refcnt);
1011
1012         blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem);
1013
1014         if (nvmem->flags & FLAG_COMPAT)
1015                 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
1016
1017         nvmem_device_remove_all_cells(nvmem);
1018         nvmem_layout_put(nvmem->layout);
1019         device_unregister(&nvmem->dev);
1020 }
1021
1022 /**
1023  * nvmem_unregister() - Unregister previously registered nvmem device
1024  *
1025  * @nvmem: Pointer to previously registered nvmem device.
1026  */
1027 void nvmem_unregister(struct nvmem_device *nvmem)
1028 {
1029         if (nvmem)
1030                 kref_put(&nvmem->refcnt, nvmem_device_release);
1031 }
1032 EXPORT_SYMBOL_GPL(nvmem_unregister);
1033
1034 static void devm_nvmem_unregister(void *nvmem)
1035 {
1036         nvmem_unregister(nvmem);
1037 }
1038
1039 /**
1040  * devm_nvmem_register() - Register a managed nvmem device for given
1041  * nvmem_config.
1042  * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
1043  *
1044  * @dev: Device that uses the nvmem device.
1045  * @config: nvmem device configuration with which nvmem device is created.
1046  *
1047  * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
1048  * on success.
1049  */
1050 struct nvmem_device *devm_nvmem_register(struct device *dev,
1051                                          const struct nvmem_config *config)
1052 {
1053         struct nvmem_device *nvmem;
1054         int ret;
1055
1056         nvmem = nvmem_register(config);
1057         if (IS_ERR(nvmem))
1058                 return nvmem;
1059
1060         ret = devm_add_action_or_reset(dev, devm_nvmem_unregister, nvmem);
1061         if (ret)
1062                 return ERR_PTR(ret);
1063
1064         return nvmem;
1065 }
1066 EXPORT_SYMBOL_GPL(devm_nvmem_register);
1067
1068 static struct nvmem_device *__nvmem_device_get(void *data,
1069                         int (*match)(struct device *dev, const void *data))
1070 {
1071         struct nvmem_device *nvmem = NULL;
1072         struct device *dev;
1073
1074         mutex_lock(&nvmem_mutex);
1075         dev = bus_find_device(&nvmem_bus_type, NULL, data, match);
1076         if (dev)
1077                 nvmem = to_nvmem_device(dev);
1078         mutex_unlock(&nvmem_mutex);
1079         if (!nvmem)
1080                 return ERR_PTR(-EPROBE_DEFER);
1081
1082         if (!try_module_get(nvmem->owner)) {
1083                 dev_err(&nvmem->dev,
1084                         "could not increase module refcount for cell %s\n",
1085                         nvmem_dev_name(nvmem));
1086
1087                 put_device(&nvmem->dev);
1088                 return ERR_PTR(-EINVAL);
1089         }
1090
1091         kref_get(&nvmem->refcnt);
1092
1093         return nvmem;
1094 }
1095
1096 static void __nvmem_device_put(struct nvmem_device *nvmem)
1097 {
1098         put_device(&nvmem->dev);
1099         module_put(nvmem->owner);
1100         kref_put(&nvmem->refcnt, nvmem_device_release);
1101 }
1102
1103 #if IS_ENABLED(CONFIG_OF)
1104 /**
1105  * of_nvmem_device_get() - Get nvmem device from a given id
1106  *
1107  * @np: Device tree node that uses the nvmem device.
1108  * @id: nvmem name from nvmem-names property.
1109  *
1110  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
1111  * on success.
1112  */
1113 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
1114 {
1115
1116         struct device_node *nvmem_np;
1117         struct nvmem_device *nvmem;
1118         int index = 0;
1119
1120         if (id)
1121                 index = of_property_match_string(np, "nvmem-names", id);
1122
1123         nvmem_np = of_parse_phandle(np, "nvmem", index);
1124         if (!nvmem_np)
1125                 return ERR_PTR(-ENOENT);
1126
1127         nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
1128         of_node_put(nvmem_np);
1129         return nvmem;
1130 }
1131 EXPORT_SYMBOL_GPL(of_nvmem_device_get);
1132 #endif
1133
1134 /**
1135  * nvmem_device_get() - Get nvmem device from a given id
1136  *
1137  * @dev: Device that uses the nvmem device.
1138  * @dev_name: name of the requested nvmem device.
1139  *
1140  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
1141  * on success.
1142  */
1143 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name)
1144 {
1145         if (dev->of_node) { /* try dt first */
1146                 struct nvmem_device *nvmem;
1147
1148                 nvmem = of_nvmem_device_get(dev->of_node, dev_name);
1149
1150                 if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER)
1151                         return nvmem;
1152
1153         }
1154
1155         return __nvmem_device_get((void *)dev_name, device_match_name);
1156 }
1157 EXPORT_SYMBOL_GPL(nvmem_device_get);
1158
1159 /**
1160  * nvmem_device_find() - Find nvmem device with matching function
1161  *
1162  * @data: Data to pass to match function
1163  * @match: Callback function to check device
1164  *
1165  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
1166  * on success.
1167  */
1168 struct nvmem_device *nvmem_device_find(void *data,
1169                         int (*match)(struct device *dev, const void *data))
1170 {
1171         return __nvmem_device_get(data, match);
1172 }
1173 EXPORT_SYMBOL_GPL(nvmem_device_find);
1174
1175 static int devm_nvmem_device_match(struct device *dev, void *res, void *data)
1176 {
1177         struct nvmem_device **nvmem = res;
1178
1179         if (WARN_ON(!nvmem || !*nvmem))
1180                 return 0;
1181
1182         return *nvmem == data;
1183 }
1184
1185 static void devm_nvmem_device_release(struct device *dev, void *res)
1186 {
1187         nvmem_device_put(*(struct nvmem_device **)res);
1188 }
1189
1190 /**
1191  * devm_nvmem_device_put() - put alredy got nvmem device
1192  *
1193  * @dev: Device that uses the nvmem device.
1194  * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(),
1195  * that needs to be released.
1196  */
1197 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem)
1198 {
1199         int ret;
1200
1201         ret = devres_release(dev, devm_nvmem_device_release,
1202                              devm_nvmem_device_match, nvmem);
1203
1204         WARN_ON(ret);
1205 }
1206 EXPORT_SYMBOL_GPL(devm_nvmem_device_put);
1207
1208 /**
1209  * nvmem_device_put() - put alredy got nvmem device
1210  *
1211  * @nvmem: pointer to nvmem device that needs to be released.
1212  */
1213 void nvmem_device_put(struct nvmem_device *nvmem)
1214 {
1215         __nvmem_device_put(nvmem);
1216 }
1217 EXPORT_SYMBOL_GPL(nvmem_device_put);
1218
1219 /**
1220  * devm_nvmem_device_get() - Get nvmem cell of device form a given id
1221  *
1222  * @dev: Device that requests the nvmem device.
1223  * @id: name id for the requested nvmem device.
1224  *
1225  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
1226  * on success.  The nvmem_cell will be freed by the automatically once the
1227  * device is freed.
1228  */
1229 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
1230 {
1231         struct nvmem_device **ptr, *nvmem;
1232
1233         ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL);
1234         if (!ptr)
1235                 return ERR_PTR(-ENOMEM);
1236
1237         nvmem = nvmem_device_get(dev, id);
1238         if (!IS_ERR(nvmem)) {
1239                 *ptr = nvmem;
1240                 devres_add(dev, ptr);
1241         } else {
1242                 devres_free(ptr);
1243         }
1244
1245         return nvmem;
1246 }
1247 EXPORT_SYMBOL_GPL(devm_nvmem_device_get);
1248
1249 static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry,
1250                                             const char *id, int index)
1251 {
1252         struct nvmem_cell *cell;
1253         const char *name = NULL;
1254
1255         cell = kzalloc(sizeof(*cell), GFP_KERNEL);
1256         if (!cell)
1257                 return ERR_PTR(-ENOMEM);
1258
1259         if (id) {
1260                 name = kstrdup_const(id, GFP_KERNEL);
1261                 if (!name) {
1262                         kfree(cell);
1263                         return ERR_PTR(-ENOMEM);
1264                 }
1265         }
1266
1267         cell->id = name;
1268         cell->entry = entry;
1269         cell->index = index;
1270
1271         return cell;
1272 }
1273
1274 static struct nvmem_cell *
1275 nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
1276 {
1277         struct nvmem_cell_entry *cell_entry;
1278         struct nvmem_cell *cell = ERR_PTR(-ENOENT);
1279         struct nvmem_cell_lookup *lookup;
1280         struct nvmem_device *nvmem;
1281         const char *dev_id;
1282
1283         if (!dev)
1284                 return ERR_PTR(-EINVAL);
1285
1286         dev_id = dev_name(dev);
1287
1288         mutex_lock(&nvmem_lookup_mutex);
1289
1290         list_for_each_entry(lookup, &nvmem_lookup_list, node) {
1291                 if ((strcmp(lookup->dev_id, dev_id) == 0) &&
1292                     (strcmp(lookup->con_id, con_id) == 0)) {
1293                         /* This is the right entry. */
1294                         nvmem = __nvmem_device_get((void *)lookup->nvmem_name,
1295                                                    device_match_name);
1296                         if (IS_ERR(nvmem)) {
1297                                 /* Provider may not be registered yet. */
1298                                 cell = ERR_CAST(nvmem);
1299                                 break;
1300                         }
1301
1302                         cell_entry = nvmem_find_cell_entry_by_name(nvmem,
1303                                                                    lookup->cell_name);
1304                         if (!cell_entry) {
1305                                 __nvmem_device_put(nvmem);
1306                                 cell = ERR_PTR(-ENOENT);
1307                         } else {
1308                                 cell = nvmem_create_cell(cell_entry, con_id, 0);
1309                                 if (IS_ERR(cell))
1310                                         __nvmem_device_put(nvmem);
1311                         }
1312                         break;
1313                 }
1314         }
1315
1316         mutex_unlock(&nvmem_lookup_mutex);
1317         return cell;
1318 }
1319
1320 #if IS_ENABLED(CONFIG_OF)
1321 static struct nvmem_cell_entry *
1322 nvmem_find_cell_entry_by_node(struct nvmem_device *nvmem, struct device_node *np)
1323 {
1324         struct nvmem_cell_entry *iter, *cell = NULL;
1325
1326         mutex_lock(&nvmem_mutex);
1327         list_for_each_entry(iter, &nvmem->cells, node) {
1328                 if (np == iter->np) {
1329                         cell = iter;
1330                         break;
1331                 }
1332         }
1333         mutex_unlock(&nvmem_mutex);
1334
1335         return cell;
1336 }
1337
1338 /**
1339  * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
1340  *
1341  * @np: Device tree node that uses the nvmem cell.
1342  * @id: nvmem cell name from nvmem-cell-names property, or NULL
1343  *      for the cell at index 0 (the lone cell with no accompanying
1344  *      nvmem-cell-names property).
1345  *
1346  * Return: Will be an ERR_PTR() on error or a valid pointer
1347  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
1348  * nvmem_cell_put().
1349  */
1350 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
1351 {
1352         struct device_node *cell_np, *nvmem_np;
1353         struct nvmem_device *nvmem;
1354         struct nvmem_cell_entry *cell_entry;
1355         struct nvmem_cell *cell;
1356         struct of_phandle_args cell_spec;
1357         int index = 0;
1358         int cell_index = 0;
1359         int ret;
1360
1361         /* if cell name exists, find index to the name */
1362         if (id)
1363                 index = of_property_match_string(np, "nvmem-cell-names", id);
1364
1365         ret = of_parse_phandle_with_optional_args(np, "nvmem-cells",
1366                                                   "#nvmem-cell-cells",
1367                                                   index, &cell_spec);
1368         if (ret)
1369                 return ERR_PTR(-ENOENT);
1370
1371         if (cell_spec.args_count > 1)
1372                 return ERR_PTR(-EINVAL);
1373
1374         cell_np = cell_spec.np;
1375         if (cell_spec.args_count)
1376                 cell_index = cell_spec.args[0];
1377
1378         nvmem_np = of_get_parent(cell_np);
1379         if (!nvmem_np) {
1380                 of_node_put(cell_np);
1381                 return ERR_PTR(-EINVAL);
1382         }
1383
1384         /* nvmem layouts produce cells within the nvmem-layout container */
1385         if (of_node_name_eq(nvmem_np, "nvmem-layout")) {
1386                 nvmem_np = of_get_next_parent(nvmem_np);
1387                 if (!nvmem_np) {
1388                         of_node_put(cell_np);
1389                         return ERR_PTR(-EINVAL);
1390                 }
1391         }
1392
1393         nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
1394         of_node_put(nvmem_np);
1395         if (IS_ERR(nvmem)) {
1396                 of_node_put(cell_np);
1397                 return ERR_CAST(nvmem);
1398         }
1399
1400         cell_entry = nvmem_find_cell_entry_by_node(nvmem, cell_np);
1401         of_node_put(cell_np);
1402         if (!cell_entry) {
1403                 __nvmem_device_put(nvmem);
1404                 return ERR_PTR(-ENOENT);
1405         }
1406
1407         cell = nvmem_create_cell(cell_entry, id, cell_index);
1408         if (IS_ERR(cell))
1409                 __nvmem_device_put(nvmem);
1410
1411         return cell;
1412 }
1413 EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
1414 #endif
1415
1416 /**
1417  * nvmem_cell_get() - Get nvmem cell of device form a given cell name
1418  *
1419  * @dev: Device that requests the nvmem cell.
1420  * @id: nvmem cell name to get (this corresponds with the name from the
1421  *      nvmem-cell-names property for DT systems and with the con_id from
1422  *      the lookup entry for non-DT systems).
1423  *
1424  * Return: Will be an ERR_PTR() on error or a valid pointer
1425  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
1426  * nvmem_cell_put().
1427  */
1428 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id)
1429 {
1430         struct nvmem_cell *cell;
1431
1432         if (dev->of_node) { /* try dt first */
1433                 cell = of_nvmem_cell_get(dev->of_node, id);
1434                 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER)
1435                         return cell;
1436         }
1437
1438         /* NULL cell id only allowed for device tree; invalid otherwise */
1439         if (!id)
1440                 return ERR_PTR(-EINVAL);
1441
1442         return nvmem_cell_get_from_lookup(dev, id);
1443 }
1444 EXPORT_SYMBOL_GPL(nvmem_cell_get);
1445
1446 static void devm_nvmem_cell_release(struct device *dev, void *res)
1447 {
1448         nvmem_cell_put(*(struct nvmem_cell **)res);
1449 }
1450
1451 /**
1452  * devm_nvmem_cell_get() - Get nvmem cell of device form a given id
1453  *
1454  * @dev: Device that requests the nvmem cell.
1455  * @id: nvmem cell name id to get.
1456  *
1457  * Return: Will be an ERR_PTR() on error or a valid pointer
1458  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
1459  * automatically once the device is freed.
1460  */
1461 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id)
1462 {
1463         struct nvmem_cell **ptr, *cell;
1464
1465         ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL);
1466         if (!ptr)
1467                 return ERR_PTR(-ENOMEM);
1468
1469         cell = nvmem_cell_get(dev, id);
1470         if (!IS_ERR(cell)) {
1471                 *ptr = cell;
1472                 devres_add(dev, ptr);
1473         } else {
1474                 devres_free(ptr);
1475         }
1476
1477         return cell;
1478 }
1479 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get);
1480
1481 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data)
1482 {
1483         struct nvmem_cell **c = res;
1484
1485         if (WARN_ON(!c || !*c))
1486                 return 0;
1487
1488         return *c == data;
1489 }
1490
1491 /**
1492  * devm_nvmem_cell_put() - Release previously allocated nvmem cell
1493  * from devm_nvmem_cell_get.
1494  *
1495  * @dev: Device that requests the nvmem cell.
1496  * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get().
1497  */
1498 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell)
1499 {
1500         int ret;
1501
1502         ret = devres_release(dev, devm_nvmem_cell_release,
1503                                 devm_nvmem_cell_match, cell);
1504
1505         WARN_ON(ret);
1506 }
1507 EXPORT_SYMBOL(devm_nvmem_cell_put);
1508
1509 /**
1510  * nvmem_cell_put() - Release previously allocated nvmem cell.
1511  *
1512  * @cell: Previously allocated nvmem cell by nvmem_cell_get().
1513  */
1514 void nvmem_cell_put(struct nvmem_cell *cell)
1515 {
1516         struct nvmem_device *nvmem = cell->entry->nvmem;
1517
1518         if (cell->id)
1519                 kfree_const(cell->id);
1520
1521         kfree(cell);
1522         __nvmem_device_put(nvmem);
1523 }
1524 EXPORT_SYMBOL_GPL(nvmem_cell_put);
1525
1526 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell_entry *cell, void *buf)
1527 {
1528         u8 *p, *b;
1529         int i, extra, bit_offset = cell->bit_offset;
1530
1531         p = b = buf;
1532         if (bit_offset) {
1533                 /* First shift */
1534                 *b++ >>= bit_offset;
1535
1536                 /* setup rest of the bytes if any */
1537                 for (i = 1; i < cell->bytes; i++) {
1538                         /* Get bits from next byte and shift them towards msb */
1539                         *p |= *b << (BITS_PER_BYTE - bit_offset);
1540
1541                         p = b;
1542                         *b++ >>= bit_offset;
1543                 }
1544         } else {
1545                 /* point to the msb */
1546                 p += cell->bytes - 1;
1547         }
1548
1549         /* result fits in less bytes */
1550         extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE);
1551         while (--extra >= 0)
1552                 *p-- = 0;
1553
1554         /* clear msb bits if any leftover in the last byte */
1555         if (cell->nbits % BITS_PER_BYTE)
1556                 *p &= GENMASK((cell->nbits % BITS_PER_BYTE) - 1, 0);
1557 }
1558
1559 static int __nvmem_cell_read(struct nvmem_device *nvmem,
1560                              struct nvmem_cell_entry *cell,
1561                              void *buf, size_t *len, const char *id, int index)
1562 {
1563         int rc;
1564
1565         rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->raw_len);
1566
1567         if (rc)
1568                 return rc;
1569
1570         /* shift bits in-place */
1571         if (cell->bit_offset || cell->nbits)
1572                 nvmem_shift_read_buffer_in_place(cell, buf);
1573
1574         if (cell->read_post_process) {
1575                 rc = cell->read_post_process(cell->priv, id, index,
1576                                              cell->offset, buf, cell->raw_len);
1577                 if (rc)
1578                         return rc;
1579         }
1580
1581         if (len)
1582                 *len = cell->bytes;
1583
1584         return 0;
1585 }
1586
1587 /**
1588  * nvmem_cell_read() - Read a given nvmem cell
1589  *
1590  * @cell: nvmem cell to be read.
1591  * @len: pointer to length of cell which will be populated on successful read;
1592  *       can be NULL.
1593  *
1594  * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The
1595  * buffer should be freed by the consumer with a kfree().
1596  */
1597 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
1598 {
1599         struct nvmem_cell_entry *entry = cell->entry;
1600         struct nvmem_device *nvmem = entry->nvmem;
1601         u8 *buf;
1602         int rc;
1603
1604         if (!nvmem)
1605                 return ERR_PTR(-EINVAL);
1606
1607         buf = kzalloc(max_t(size_t, entry->raw_len, entry->bytes), GFP_KERNEL);
1608         if (!buf)
1609                 return ERR_PTR(-ENOMEM);
1610
1611         rc = __nvmem_cell_read(nvmem, cell->entry, buf, len, cell->id, cell->index);
1612         if (rc) {
1613                 kfree(buf);
1614                 return ERR_PTR(rc);
1615         }
1616
1617         return buf;
1618 }
1619 EXPORT_SYMBOL_GPL(nvmem_cell_read);
1620
1621 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell_entry *cell,
1622                                              u8 *_buf, int len)
1623 {
1624         struct nvmem_device *nvmem = cell->nvmem;
1625         int i, rc, nbits, bit_offset = cell->bit_offset;
1626         u8 v, *p, *buf, *b, pbyte, pbits;
1627
1628         nbits = cell->nbits;
1629         buf = kzalloc(cell->bytes, GFP_KERNEL);
1630         if (!buf)
1631                 return ERR_PTR(-ENOMEM);
1632
1633         memcpy(buf, _buf, len);
1634         p = b = buf;
1635
1636         if (bit_offset) {
1637                 pbyte = *b;
1638                 *b <<= bit_offset;
1639
1640                 /* setup the first byte with lsb bits from nvmem */
1641                 rc = nvmem_reg_read(nvmem, cell->offset, &v, 1);
1642                 if (rc)
1643                         goto err;
1644                 *b++ |= GENMASK(bit_offset - 1, 0) & v;
1645
1646                 /* setup rest of the byte if any */
1647                 for (i = 1; i < cell->bytes; i++) {
1648                         /* Get last byte bits and shift them towards lsb */
1649                         pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset);
1650                         pbyte = *b;
1651                         p = b;
1652                         *b <<= bit_offset;
1653                         *b++ |= pbits;
1654                 }
1655         }
1656
1657         /* if it's not end on byte boundary */
1658         if ((nbits + bit_offset) % BITS_PER_BYTE) {
1659                 /* setup the last byte with msb bits from nvmem */
1660                 rc = nvmem_reg_read(nvmem,
1661                                     cell->offset + cell->bytes - 1, &v, 1);
1662                 if (rc)
1663                         goto err;
1664                 *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
1665
1666         }
1667
1668         return buf;
1669 err:
1670         kfree(buf);
1671         return ERR_PTR(rc);
1672 }
1673
1674 static int __nvmem_cell_entry_write(struct nvmem_cell_entry *cell, void *buf, size_t len)
1675 {
1676         struct nvmem_device *nvmem = cell->nvmem;
1677         int rc;
1678
1679         if (!nvmem || nvmem->read_only ||
1680             (cell->bit_offset == 0 && len != cell->bytes))
1681                 return -EINVAL;
1682
1683         /*
1684          * Any cells which have a read_post_process hook are read-only because
1685          * we cannot reverse the operation and it might affect other cells,
1686          * too.
1687          */
1688         if (cell->read_post_process)
1689                 return -EINVAL;
1690
1691         if (cell->bit_offset || cell->nbits) {
1692                 buf = nvmem_cell_prepare_write_buffer(cell, buf, len);
1693                 if (IS_ERR(buf))
1694                         return PTR_ERR(buf);
1695         }
1696
1697         rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes);
1698
1699         /* free the tmp buffer */
1700         if (cell->bit_offset || cell->nbits)
1701                 kfree(buf);
1702
1703         if (rc)
1704                 return rc;
1705
1706         return len;
1707 }
1708
1709 /**
1710  * nvmem_cell_write() - Write to a given nvmem cell
1711  *
1712  * @cell: nvmem cell to be written.
1713  * @buf: Buffer to be written.
1714  * @len: length of buffer to be written to nvmem cell.
1715  *
1716  * Return: length of bytes written or negative on failure.
1717  */
1718 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
1719 {
1720         return __nvmem_cell_entry_write(cell->entry, buf, len);
1721 }
1722
1723 EXPORT_SYMBOL_GPL(nvmem_cell_write);
1724
1725 static int nvmem_cell_read_common(struct device *dev, const char *cell_id,
1726                                   void *val, size_t count)
1727 {
1728         struct nvmem_cell *cell;
1729         void *buf;
1730         size_t len;
1731
1732         cell = nvmem_cell_get(dev, cell_id);
1733         if (IS_ERR(cell))
1734                 return PTR_ERR(cell);
1735
1736         buf = nvmem_cell_read(cell, &len);
1737         if (IS_ERR(buf)) {
1738                 nvmem_cell_put(cell);
1739                 return PTR_ERR(buf);
1740         }
1741         if (len != count) {
1742                 kfree(buf);
1743                 nvmem_cell_put(cell);
1744                 return -EINVAL;
1745         }
1746         memcpy(val, buf, count);
1747         kfree(buf);
1748         nvmem_cell_put(cell);
1749
1750         return 0;
1751 }
1752
1753 /**
1754  * nvmem_cell_read_u8() - Read a cell value as a u8
1755  *
1756  * @dev: Device that requests the nvmem cell.
1757  * @cell_id: Name of nvmem cell to read.
1758  * @val: pointer to output value.
1759  *
1760  * Return: 0 on success or negative errno.
1761  */
1762 int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val)
1763 {
1764         return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1765 }
1766 EXPORT_SYMBOL_GPL(nvmem_cell_read_u8);
1767
1768 /**
1769  * nvmem_cell_read_u16() - Read a cell value as a u16
1770  *
1771  * @dev: Device that requests the nvmem cell.
1772  * @cell_id: Name of nvmem cell to read.
1773  * @val: pointer to output value.
1774  *
1775  * Return: 0 on success or negative errno.
1776  */
1777 int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val)
1778 {
1779         return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1780 }
1781 EXPORT_SYMBOL_GPL(nvmem_cell_read_u16);
1782
1783 /**
1784  * nvmem_cell_read_u32() - Read a cell value as a u32
1785  *
1786  * @dev: Device that requests the nvmem cell.
1787  * @cell_id: Name of nvmem cell to read.
1788  * @val: pointer to output value.
1789  *
1790  * Return: 0 on success or negative errno.
1791  */
1792 int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val)
1793 {
1794         return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1795 }
1796 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32);
1797
1798 /**
1799  * nvmem_cell_read_u64() - Read a cell value as a u64
1800  *
1801  * @dev: Device that requests the nvmem cell.
1802  * @cell_id: Name of nvmem cell to read.
1803  * @val: pointer to output value.
1804  *
1805  * Return: 0 on success or negative errno.
1806  */
1807 int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val)
1808 {
1809         return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1810 }
1811 EXPORT_SYMBOL_GPL(nvmem_cell_read_u64);
1812
1813 static const void *nvmem_cell_read_variable_common(struct device *dev,
1814                                                    const char *cell_id,
1815                                                    size_t max_len, size_t *len)
1816 {
1817         struct nvmem_cell *cell;
1818         int nbits;
1819         void *buf;
1820
1821         cell = nvmem_cell_get(dev, cell_id);
1822         if (IS_ERR(cell))
1823                 return cell;
1824
1825         nbits = cell->entry->nbits;
1826         buf = nvmem_cell_read(cell, len);
1827         nvmem_cell_put(cell);
1828         if (IS_ERR(buf))
1829                 return buf;
1830
1831         /*
1832          * If nbits is set then nvmem_cell_read() can significantly exaggerate
1833          * the length of the real data. Throw away the extra junk.
1834          */
1835         if (nbits)
1836                 *len = DIV_ROUND_UP(nbits, 8);
1837
1838         if (*len > max_len) {
1839                 kfree(buf);
1840                 return ERR_PTR(-ERANGE);
1841         }
1842
1843         return buf;
1844 }
1845
1846 /**
1847  * nvmem_cell_read_variable_le_u32() - Read up to 32-bits of data as a little endian number.
1848  *
1849  * @dev: Device that requests the nvmem cell.
1850  * @cell_id: Name of nvmem cell to read.
1851  * @val: pointer to output value.
1852  *
1853  * Return: 0 on success or negative errno.
1854  */
1855 int nvmem_cell_read_variable_le_u32(struct device *dev, const char *cell_id,
1856                                     u32 *val)
1857 {
1858         size_t len;
1859         const u8 *buf;
1860         int i;
1861
1862         buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len);
1863         if (IS_ERR(buf))
1864                 return PTR_ERR(buf);
1865
1866         /* Copy w/ implicit endian conversion */
1867         *val = 0;
1868         for (i = 0; i < len; i++)
1869                 *val |= buf[i] << (8 * i);
1870
1871         kfree(buf);
1872
1873         return 0;
1874 }
1875 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u32);
1876
1877 /**
1878  * nvmem_cell_read_variable_le_u64() - Read up to 64-bits of data as a little endian number.
1879  *
1880  * @dev: Device that requests the nvmem cell.
1881  * @cell_id: Name of nvmem cell to read.
1882  * @val: pointer to output value.
1883  *
1884  * Return: 0 on success or negative errno.
1885  */
1886 int nvmem_cell_read_variable_le_u64(struct device *dev, const char *cell_id,
1887                                     u64 *val)
1888 {
1889         size_t len;
1890         const u8 *buf;
1891         int i;
1892
1893         buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len);
1894         if (IS_ERR(buf))
1895                 return PTR_ERR(buf);
1896
1897         /* Copy w/ implicit endian conversion */
1898         *val = 0;
1899         for (i = 0; i < len; i++)
1900                 *val |= (uint64_t)buf[i] << (8 * i);
1901
1902         kfree(buf);
1903
1904         return 0;
1905 }
1906 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u64);
1907
1908 /**
1909  * nvmem_device_cell_read() - Read a given nvmem device and cell
1910  *
1911  * @nvmem: nvmem device to read from.
1912  * @info: nvmem cell info to be read.
1913  * @buf: buffer pointer which will be populated on successful read.
1914  *
1915  * Return: length of successful bytes read on success and negative
1916  * error code on error.
1917  */
1918 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
1919                            struct nvmem_cell_info *info, void *buf)
1920 {
1921         struct nvmem_cell_entry cell;
1922         int rc;
1923         ssize_t len;
1924
1925         if (!nvmem)
1926                 return -EINVAL;
1927
1928         rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell);
1929         if (rc)
1930                 return rc;
1931
1932         rc = __nvmem_cell_read(nvmem, &cell, buf, &len, NULL, 0);
1933         if (rc)
1934                 return rc;
1935
1936         return len;
1937 }
1938 EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
1939
1940 /**
1941  * nvmem_device_cell_write() - Write cell to a given nvmem device
1942  *
1943  * @nvmem: nvmem device to be written to.
1944  * @info: nvmem cell info to be written.
1945  * @buf: buffer to be written to cell.
1946  *
1947  * Return: length of bytes written or negative error code on failure.
1948  */
1949 int nvmem_device_cell_write(struct nvmem_device *nvmem,
1950                             struct nvmem_cell_info *info, void *buf)
1951 {
1952         struct nvmem_cell_entry cell;
1953         int rc;
1954
1955         if (!nvmem)
1956                 return -EINVAL;
1957
1958         rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell);
1959         if (rc)
1960                 return rc;
1961
1962         return __nvmem_cell_entry_write(&cell, buf, cell.bytes);
1963 }
1964 EXPORT_SYMBOL_GPL(nvmem_device_cell_write);
1965
1966 /**
1967  * nvmem_device_read() - Read from a given nvmem device
1968  *
1969  * @nvmem: nvmem device to read from.
1970  * @offset: offset in nvmem device.
1971  * @bytes: number of bytes to read.
1972  * @buf: buffer pointer which will be populated on successful read.
1973  *
1974  * Return: length of successful bytes read on success and negative
1975  * error code on error.
1976  */
1977 int nvmem_device_read(struct nvmem_device *nvmem,
1978                       unsigned int offset,
1979                       size_t bytes, void *buf)
1980 {
1981         int rc;
1982
1983         if (!nvmem)
1984                 return -EINVAL;
1985
1986         rc = nvmem_reg_read(nvmem, offset, buf, bytes);
1987
1988         if (rc)
1989                 return rc;
1990
1991         return bytes;
1992 }
1993 EXPORT_SYMBOL_GPL(nvmem_device_read);
1994
1995 /**
1996  * nvmem_device_write() - Write cell to a given nvmem device
1997  *
1998  * @nvmem: nvmem device to be written to.
1999  * @offset: offset in nvmem device.
2000  * @bytes: number of bytes to write.
2001  * @buf: buffer to be written.
2002  *
2003  * Return: length of bytes written or negative error code on failure.
2004  */
2005 int nvmem_device_write(struct nvmem_device *nvmem,
2006                        unsigned int offset,
2007                        size_t bytes, void *buf)
2008 {
2009         int rc;
2010
2011         if (!nvmem)
2012                 return -EINVAL;
2013
2014         rc = nvmem_reg_write(nvmem, offset, buf, bytes);
2015
2016         if (rc)
2017                 return rc;
2018
2019
2020         return bytes;
2021 }
2022 EXPORT_SYMBOL_GPL(nvmem_device_write);
2023
2024 /**
2025  * nvmem_add_cell_table() - register a table of cell info entries
2026  *
2027  * @table: table of cell info entries
2028  */
2029 void nvmem_add_cell_table(struct nvmem_cell_table *table)
2030 {
2031         mutex_lock(&nvmem_cell_mutex);
2032         list_add_tail(&table->node, &nvmem_cell_tables);
2033         mutex_unlock(&nvmem_cell_mutex);
2034 }
2035 EXPORT_SYMBOL_GPL(nvmem_add_cell_table);
2036
2037 /**
2038  * nvmem_del_cell_table() - remove a previously registered cell info table
2039  *
2040  * @table: table of cell info entries
2041  */
2042 void nvmem_del_cell_table(struct nvmem_cell_table *table)
2043 {
2044         mutex_lock(&nvmem_cell_mutex);
2045         list_del(&table->node);
2046         mutex_unlock(&nvmem_cell_mutex);
2047 }
2048 EXPORT_SYMBOL_GPL(nvmem_del_cell_table);
2049
2050 /**
2051  * nvmem_add_cell_lookups() - register a list of cell lookup entries
2052  *
2053  * @entries: array of cell lookup entries
2054  * @nentries: number of cell lookup entries in the array
2055  */
2056 void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
2057 {
2058         int i;
2059
2060         mutex_lock(&nvmem_lookup_mutex);
2061         for (i = 0; i < nentries; i++)
2062                 list_add_tail(&entries[i].node, &nvmem_lookup_list);
2063         mutex_unlock(&nvmem_lookup_mutex);
2064 }
2065 EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups);
2066
2067 /**
2068  * nvmem_del_cell_lookups() - remove a list of previously added cell lookup
2069  *                            entries
2070  *
2071  * @entries: array of cell lookup entries
2072  * @nentries: number of cell lookup entries in the array
2073  */
2074 void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
2075 {
2076         int i;
2077
2078         mutex_lock(&nvmem_lookup_mutex);
2079         for (i = 0; i < nentries; i++)
2080                 list_del(&entries[i].node);
2081         mutex_unlock(&nvmem_lookup_mutex);
2082 }
2083 EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups);
2084
2085 /**
2086  * nvmem_dev_name() - Get the name of a given nvmem device.
2087  *
2088  * @nvmem: nvmem device.
2089  *
2090  * Return: name of the nvmem device.
2091  */
2092 const char *nvmem_dev_name(struct nvmem_device *nvmem)
2093 {
2094         return dev_name(&nvmem->dev);
2095 }
2096 EXPORT_SYMBOL_GPL(nvmem_dev_name);
2097
2098 static int __init nvmem_init(void)
2099 {
2100         return bus_register(&nvmem_bus_type);
2101 }
2102
2103 static void __exit nvmem_exit(void)
2104 {
2105         bus_unregister(&nvmem_bus_type);
2106 }
2107
2108 subsys_initcall(nvmem_init);
2109 module_exit(nvmem_exit);
2110
2111 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
2112 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
2113 MODULE_DESCRIPTION("nvmem Driver Core");