1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * nvmem framework core.
4 *
5 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
6 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
7 */
8
9 #include <linux/device.h>
10 #include <linux/export.h>
11 #include <linux/fs.h>
12 #include <linux/idr.h>
13 #include <linux/init.h>
14 #include <linux/kref.h>
15 #include <linux/module.h>
16 #include <linux/nvmem-consumer.h>
17 #include <linux/nvmem-provider.h>
18 #include <linux/gpio/consumer.h>
19 #include <linux/of.h>
20 #include <linux/slab.h>
21
22 struct nvmem_device {
23 struct module *owner;
24 struct device dev;
25 int stride;
26 int word_size;
27 int id;
28 struct kref refcnt;
29 size_t size;
30 bool read_only;
31 bool root_only;
32 int flags;
33 enum nvmem_type type;
34 struct bin_attribute eeprom;
35 struct device *base_dev;
36 struct list_head cells;
37 const struct nvmem_keepout *keepout;
38 unsigned int nkeepout;
39 nvmem_reg_read_t reg_read;
40 nvmem_reg_write_t reg_write;
41 nvmem_cell_post_process_t cell_post_process;
42 struct gpio_desc *wp_gpio;
43 void *priv;
44 };
45
46 #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
47
48 #define FLAG_COMPAT BIT(0)
49 struct nvmem_cell_entry {
50 const char *name;
51 int offset;
52 int bytes;
53 int bit_offset;
54 int nbits;
55 struct device_node *np;
56 struct nvmem_device *nvmem;
57 struct list_head node;
58 };
59
60 struct nvmem_cell {
61 struct nvmem_cell_entry *entry;
62 const char *id;
63 int index;
64 };
65
66 static DEFINE_MUTEX(nvmem_mutex);
67 static DEFINE_IDA(nvmem_ida);
68
69 static DEFINE_MUTEX(nvmem_cell_mutex);
70 static LIST_HEAD(nvmem_cell_tables);
71
72 static DEFINE_MUTEX(nvmem_lookup_mutex);
73 static LIST_HEAD(nvmem_lookup_list);
74
75 static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
76
__nvmem_reg_read(struct nvmem_device * nvmem,unsigned int offset,void * val,size_t bytes)77 static int __nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
78 void *val, size_t bytes)
79 {
80 if (nvmem->reg_read)
81 return nvmem->reg_read(nvmem->priv, offset, val, bytes);
82
83 return -EINVAL;
84 }
85
__nvmem_reg_write(struct nvmem_device * nvmem,unsigned int offset,void * val,size_t bytes)86 static int __nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
87 void *val, size_t bytes)
88 {
89 int ret;
90
91 if (nvmem->reg_write) {
92 gpiod_set_value_cansleep(nvmem->wp_gpio, 0);
93 ret = nvmem->reg_write(nvmem->priv, offset, val, bytes);
94 gpiod_set_value_cansleep(nvmem->wp_gpio, 1);
95 return ret;
96 }
97
98 return -EINVAL;
99 }
100
nvmem_access_with_keepouts(struct nvmem_device * nvmem,unsigned int offset,void * val,size_t bytes,int write)101 static int nvmem_access_with_keepouts(struct nvmem_device *nvmem,
102 unsigned int offset, void *val,
103 size_t bytes, int write)
104 {
105
106 unsigned int end = offset + bytes;
107 unsigned int kend, ksize;
108 const struct nvmem_keepout *keepout = nvmem->keepout;
109 const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout;
110 int rc;
111
112 /*
113 * Skip all keepouts before the range being accessed.
114 * Keepouts are sorted.
115 */
116 while ((keepout < keepoutend) && (keepout->end <= offset))
117 keepout++;
118
119 while ((offset < end) && (keepout < keepoutend)) {
120 /* Access the valid portion before the keepout. */
121 if (offset < keepout->start) {
122 kend = min(end, keepout->start);
123 ksize = kend - offset;
124 if (write)
125 rc = __nvmem_reg_write(nvmem, offset, val, ksize);
126 else
127 rc = __nvmem_reg_read(nvmem, offset, val, ksize);
128
129 if (rc)
130 return rc;
131
132 offset += ksize;
133 val += ksize;
134 }
135
136 /*
137 * Now we're aligned to the start of this keepout zone. Go
138 * through it.
139 */
140 kend = min(end, keepout->end);
141 ksize = kend - offset;
142 if (!write)
143 memset(val, keepout->value, ksize);
144
145 val += ksize;
146 offset += ksize;
147 keepout++;
148 }
149
150 /*
151 * If we ran out of keepouts but there's still stuff to do, send it
152 * down directly
153 */
154 if (offset < end) {
155 ksize = end - offset;
156 if (write)
157 return __nvmem_reg_write(nvmem, offset, val, ksize);
158 else
159 return __nvmem_reg_read(nvmem, offset, val, ksize);
160 }
161
162 return 0;
163 }
164
nvmem_reg_read(struct nvmem_device * nvmem,unsigned int offset,void * val,size_t bytes)165 static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
166 void *val, size_t bytes)
167 {
168 if (!nvmem->nkeepout)
169 return __nvmem_reg_read(nvmem, offset, val, bytes);
170
171 return nvmem_access_with_keepouts(nvmem, offset, val, bytes, false);
172 }
173
nvmem_reg_write(struct nvmem_device * nvmem,unsigned int offset,void * val,size_t bytes)174 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
175 void *val, size_t bytes)
176 {
177 if (!nvmem->nkeepout)
178 return __nvmem_reg_write(nvmem, offset, val, bytes);
179
180 return nvmem_access_with_keepouts(nvmem, offset, val, bytes, true);
181 }
182
183 #ifdef CONFIG_NVMEM_SYSFS
184 static const char * const nvmem_type_str[] = {
185 [NVMEM_TYPE_UNKNOWN] = "Unknown",
186 [NVMEM_TYPE_EEPROM] = "EEPROM",
187 [NVMEM_TYPE_OTP] = "OTP",
188 [NVMEM_TYPE_BATTERY_BACKED] = "Battery backed",
189 [NVMEM_TYPE_FRAM] = "FRAM",
190 };
191
192 #ifdef CONFIG_DEBUG_LOCK_ALLOC
193 static struct lock_class_key eeprom_lock_key;
194 #endif
195
type_show(struct device * dev,struct device_attribute * attr,char * buf)196 static ssize_t type_show(struct device *dev,
197 struct device_attribute *attr, char *buf)
198 {
199 struct nvmem_device *nvmem = to_nvmem_device(dev);
200
201 return sprintf(buf, "%s\n", nvmem_type_str[nvmem->type]);
202 }
203
204 static DEVICE_ATTR_RO(type);
205
206 static struct attribute *nvmem_attrs[] = {
207 &dev_attr_type.attr,
208 NULL,
209 };
210
bin_attr_nvmem_read(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t pos,size_t count)211 static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
212 struct bin_attribute *attr, char *buf,
213 loff_t pos, size_t count)
214 {
215 struct device *dev;
216 struct nvmem_device *nvmem;
217 int rc;
218
219 if (attr->private)
220 dev = attr->private;
221 else
222 dev = kobj_to_dev(kobj);
223 nvmem = to_nvmem_device(dev);
224
225 /* Stop the user from reading */
226 if (pos >= nvmem->size)
227 return 0;
228
229 if (!IS_ALIGNED(pos, nvmem->stride))
230 return -EINVAL;
231
232 if (count < nvmem->word_size)
233 return -EINVAL;
234
235 if (pos + count > nvmem->size)
236 count = nvmem->size - pos;
237
238 count = round_down(count, nvmem->word_size);
239
240 if (!nvmem->reg_read)
241 return -EPERM;
242
243 rc = nvmem_reg_read(nvmem, pos, buf, count);
244
245 if (rc)
246 return rc;
247
248 return count;
249 }
250
bin_attr_nvmem_write(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t pos,size_t count)251 static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
252 struct bin_attribute *attr, char *buf,
253 loff_t pos, size_t count)
254 {
255 struct device *dev;
256 struct nvmem_device *nvmem;
257 int rc;
258
259 if (attr->private)
260 dev = attr->private;
261 else
262 dev = kobj_to_dev(kobj);
263 nvmem = to_nvmem_device(dev);
264
265 /* Stop the user from writing */
266 if (pos >= nvmem->size)
267 return -EFBIG;
268
269 if (!IS_ALIGNED(pos, nvmem->stride))
270 return -EINVAL;
271
272 if (count < nvmem->word_size)
273 return -EINVAL;
274
275 if (pos + count > nvmem->size)
276 count = nvmem->size - pos;
277
278 count = round_down(count, nvmem->word_size);
279
280 if (!nvmem->reg_write)
281 return -EPERM;
282
283 rc = nvmem_reg_write(nvmem, pos, buf, count);
284
285 if (rc)
286 return rc;
287
288 return count;
289 }
290
nvmem_bin_attr_get_umode(struct nvmem_device * nvmem)291 static umode_t nvmem_bin_attr_get_umode(struct nvmem_device *nvmem)
292 {
293 umode_t mode = 0400;
294
295 if (!nvmem->root_only)
296 mode |= 0044;
297
298 if (!nvmem->read_only)
299 mode |= 0200;
300
301 if (!nvmem->reg_write)
302 mode &= ~0200;
303
304 if (!nvmem->reg_read)
305 mode &= ~0444;
306
307 return mode;
308 }
309
nvmem_bin_attr_is_visible(struct kobject * kobj,struct bin_attribute * attr,int i)310 static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj,
311 struct bin_attribute *attr, int i)
312 {
313 struct device *dev = kobj_to_dev(kobj);
314 struct nvmem_device *nvmem = to_nvmem_device(dev);
315
316 attr->size = nvmem->size;
317
318 return nvmem_bin_attr_get_umode(nvmem);
319 }
320
321 /* default read/write permissions */
322 static struct bin_attribute bin_attr_rw_nvmem = {
323 .attr = {
324 .name = "nvmem",
325 .mode = 0644,
326 },
327 .read = bin_attr_nvmem_read,
328 .write = bin_attr_nvmem_write,
329 };
330
331 static struct bin_attribute *nvmem_bin_attributes[] = {
332 &bin_attr_rw_nvmem,
333 NULL,
334 };
335
336 static const struct attribute_group nvmem_bin_group = {
337 .bin_attrs = nvmem_bin_attributes,
338 .attrs = nvmem_attrs,
339 .is_bin_visible = nvmem_bin_attr_is_visible,
340 };
341
342 static const struct attribute_group *nvmem_dev_groups[] = {
343 &nvmem_bin_group,
344 NULL,
345 };
346
347 static struct bin_attribute bin_attr_nvmem_eeprom_compat = {
348 .attr = {
349 .name = "eeprom",
350 },
351 .read = bin_attr_nvmem_read,
352 .write = bin_attr_nvmem_write,
353 };
354
355 /*
356 * nvmem_setup_compat() - Create an additional binary entry in
357 * drivers sys directory, to be backwards compatible with the older
358 * drivers/misc/eeprom drivers.
359 */
nvmem_sysfs_setup_compat(struct nvmem_device * nvmem,const struct nvmem_config * config)360 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
361 const struct nvmem_config *config)
362 {
363 int rval;
364
365 if (!config->compat)
366 return 0;
367
368 if (!config->base_dev)
369 return -EINVAL;
370
371 if (config->type == NVMEM_TYPE_FRAM)
372 bin_attr_nvmem_eeprom_compat.attr.name = "fram";
373
374 nvmem->eeprom = bin_attr_nvmem_eeprom_compat;
375 nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem);
376 nvmem->eeprom.size = nvmem->size;
377 #ifdef CONFIG_DEBUG_LOCK_ALLOC
378 nvmem->eeprom.attr.key = &eeprom_lock_key;
379 #endif
380 nvmem->eeprom.private = &nvmem->dev;
381 nvmem->base_dev = config->base_dev;
382
383 rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom);
384 if (rval) {
385 dev_err(&nvmem->dev,
386 "Failed to create eeprom binary file %d\n", rval);
387 return rval;
388 }
389
390 nvmem->flags |= FLAG_COMPAT;
391
392 return 0;
393 }
394
nvmem_sysfs_remove_compat(struct nvmem_device * nvmem,const struct nvmem_config * config)395 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
396 const struct nvmem_config *config)
397 {
398 if (config->compat)
399 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
400 }
401
402 #else /* CONFIG_NVMEM_SYSFS */
403
nvmem_sysfs_setup_compat(struct nvmem_device * nvmem,const struct nvmem_config * config)404 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
405 const struct nvmem_config *config)
406 {
407 return -ENOSYS;
408 }
nvmem_sysfs_remove_compat(struct nvmem_device * nvmem,const struct nvmem_config * config)409 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
410 const struct nvmem_config *config)
411 {
412 }
413
414 #endif /* CONFIG_NVMEM_SYSFS */
415
nvmem_release(struct device * dev)416 static void nvmem_release(struct device *dev)
417 {
418 struct nvmem_device *nvmem = to_nvmem_device(dev);
419
420 ida_free(&nvmem_ida, nvmem->id);
421 gpiod_put(nvmem->wp_gpio);
422 kfree(nvmem);
423 }
424
425 static const struct device_type nvmem_provider_type = {
426 .release = nvmem_release,
427 };
428
429 static struct bus_type nvmem_bus_type = {
430 .name = "nvmem",
431 };
432
nvmem_cell_entry_drop(struct nvmem_cell_entry * cell)433 static void nvmem_cell_entry_drop(struct nvmem_cell_entry *cell)
434 {
435 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell);
436 mutex_lock(&nvmem_mutex);
437 list_del(&cell->node);
438 mutex_unlock(&nvmem_mutex);
439 of_node_put(cell->np);
440 kfree_const(cell->name);
441 kfree(cell);
442 }
443
nvmem_device_remove_all_cells(const struct nvmem_device * nvmem)444 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem)
445 {
446 struct nvmem_cell_entry *cell, *p;
447
448 list_for_each_entry_safe(cell, p, &nvmem->cells, node)
449 nvmem_cell_entry_drop(cell);
450 }
451
nvmem_cell_entry_add(struct nvmem_cell_entry * cell)452 static void nvmem_cell_entry_add(struct nvmem_cell_entry *cell)
453 {
454 mutex_lock(&nvmem_mutex);
455 list_add_tail(&cell->node, &cell->nvmem->cells);
456 mutex_unlock(&nvmem_mutex);
457 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell);
458 }
459
nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device * nvmem,const struct nvmem_cell_info * info,struct nvmem_cell_entry * cell)460 static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem,
461 const struct nvmem_cell_info *info,
462 struct nvmem_cell_entry *cell)
463 {
464 cell->nvmem = nvmem;
465 cell->offset = info->offset;
466 cell->bytes = info->bytes;
467 cell->name = info->name;
468
469 cell->bit_offset = info->bit_offset;
470 cell->nbits = info->nbits;
471 cell->np = info->np;
472
473 if (cell->nbits)
474 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
475 BITS_PER_BYTE);
476
477 if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
478 dev_err(&nvmem->dev,
479 "cell %s unaligned to nvmem stride %d\n",
480 cell->name ?: "<unknown>", nvmem->stride);
481 return -EINVAL;
482 }
483
484 return 0;
485 }
486
nvmem_cell_info_to_nvmem_cell_entry(struct nvmem_device * nvmem,const struct nvmem_cell_info * info,struct nvmem_cell_entry * cell)487 static int nvmem_cell_info_to_nvmem_cell_entry(struct nvmem_device *nvmem,
488 const struct nvmem_cell_info *info,
489 struct nvmem_cell_entry *cell)
490 {
491 int err;
492
493 err = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, cell);
494 if (err)
495 return err;
496
497 cell->name = kstrdup_const(info->name, GFP_KERNEL);
498 if (!cell->name)
499 return -ENOMEM;
500
501 return 0;
502 }
503
504 /**
505 * nvmem_add_one_cell() - Add one cell information to an nvmem device
506 *
507 * @nvmem: nvmem device to add cells to.
508 * @info: nvmem cell info to add to the device
509 *
510 * Return: 0 or negative error code on failure.
511 */
nvmem_add_one_cell(struct nvmem_device * nvmem,const struct nvmem_cell_info * info)512 int nvmem_add_one_cell(struct nvmem_device *nvmem,
513 const struct nvmem_cell_info *info)
514 {
515 struct nvmem_cell_entry *cell;
516 int rval;
517
518 cell = kzalloc(sizeof(*cell), GFP_KERNEL);
519 if (!cell)
520 return -ENOMEM;
521
522 rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell);
523 if (rval) {
524 kfree(cell);
525 return rval;
526 }
527
528 nvmem_cell_entry_add(cell);
529
530 return 0;
531 }
532 EXPORT_SYMBOL_GPL(nvmem_add_one_cell);
533
534 /**
535 * nvmem_add_cells() - Add cell information to an nvmem device
536 *
537 * @nvmem: nvmem device to add cells to.
538 * @info: nvmem cell info to add to the device
539 * @ncells: number of cells in info
540 *
541 * Return: 0 or negative error code on failure.
542 */
nvmem_add_cells(struct nvmem_device * nvmem,const struct nvmem_cell_info * info,int ncells)543 static int nvmem_add_cells(struct nvmem_device *nvmem,
544 const struct nvmem_cell_info *info,
545 int ncells)
546 {
547 int i, rval;
548
549 for (i = 0; i < ncells; i++) {
550 rval = nvmem_add_one_cell(nvmem, &info[i]);
551 if (rval)
552 return rval;
553 }
554
555 return 0;
556 }
557
558 /**
559 * nvmem_register_notifier() - Register a notifier block for nvmem events.
560 *
561 * @nb: notifier block to be called on nvmem events.
562 *
563 * Return: 0 on success, negative error number on failure.
564 */
nvmem_register_notifier(struct notifier_block * nb)565 int nvmem_register_notifier(struct notifier_block *nb)
566 {
567 return blocking_notifier_chain_register(&nvmem_notifier, nb);
568 }
569 EXPORT_SYMBOL_GPL(nvmem_register_notifier);
570
571 /**
572 * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events.
573 *
574 * @nb: notifier block to be unregistered.
575 *
576 * Return: 0 on success, negative error number on failure.
577 */
nvmem_unregister_notifier(struct notifier_block * nb)578 int nvmem_unregister_notifier(struct notifier_block *nb)
579 {
580 return blocking_notifier_chain_unregister(&nvmem_notifier, nb);
581 }
582 EXPORT_SYMBOL_GPL(nvmem_unregister_notifier);
583
nvmem_add_cells_from_table(struct nvmem_device * nvmem)584 static int nvmem_add_cells_from_table(struct nvmem_device *nvmem)
585 {
586 const struct nvmem_cell_info *info;
587 struct nvmem_cell_table *table;
588 struct nvmem_cell_entry *cell;
589 int rval = 0, i;
590
591 mutex_lock(&nvmem_cell_mutex);
592 list_for_each_entry(table, &nvmem_cell_tables, node) {
593 if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) {
594 for (i = 0; i < table->ncells; i++) {
595 info = &table->cells[i];
596
597 cell = kzalloc(sizeof(*cell), GFP_KERNEL);
598 if (!cell) {
599 rval = -ENOMEM;
600 goto out;
601 }
602
603 rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell);
604 if (rval) {
605 kfree(cell);
606 goto out;
607 }
608
609 nvmem_cell_entry_add(cell);
610 }
611 }
612 }
613
614 out:
615 mutex_unlock(&nvmem_cell_mutex);
616 return rval;
617 }
618
619 static struct nvmem_cell_entry *
nvmem_find_cell_entry_by_name(struct nvmem_device * nvmem,const char * cell_id)620 nvmem_find_cell_entry_by_name(struct nvmem_device *nvmem, const char *cell_id)
621 {
622 struct nvmem_cell_entry *iter, *cell = NULL;
623
624 mutex_lock(&nvmem_mutex);
625 list_for_each_entry(iter, &nvmem->cells, node) {
626 if (strcmp(cell_id, iter->name) == 0) {
627 cell = iter;
628 break;
629 }
630 }
631 mutex_unlock(&nvmem_mutex);
632
633 return cell;
634 }
635
nvmem_validate_keepouts(struct nvmem_device * nvmem)636 static int nvmem_validate_keepouts(struct nvmem_device *nvmem)
637 {
638 unsigned int cur = 0;
639 const struct nvmem_keepout *keepout = nvmem->keepout;
640 const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout;
641
642 while (keepout < keepoutend) {
643 /* Ensure keepouts are sorted and don't overlap. */
644 if (keepout->start < cur) {
645 dev_err(&nvmem->dev,
646 "Keepout regions aren't sorted or overlap.\n");
647
648 return -ERANGE;
649 }
650
651 if (keepout->end < keepout->start) {
652 dev_err(&nvmem->dev,
653 "Invalid keepout region.\n");
654
655 return -EINVAL;
656 }
657
658 /*
659 * Validate keepouts (and holes between) don't violate
660 * word_size constraints.
661 */
662 if ((keepout->end - keepout->start < nvmem->word_size) ||
663 ((keepout->start != cur) &&
664 (keepout->start - cur < nvmem->word_size))) {
665
666 dev_err(&nvmem->dev,
667 "Keepout regions violate word_size constraints.\n");
668
669 return -ERANGE;
670 }
671
672 /* Validate keepouts don't violate stride (alignment). */
673 if (!IS_ALIGNED(keepout->start, nvmem->stride) ||
674 !IS_ALIGNED(keepout->end, nvmem->stride)) {
675
676 dev_err(&nvmem->dev,
677 "Keepout regions violate stride.\n");
678
679 return -EINVAL;
680 }
681
682 cur = keepout->end;
683 keepout++;
684 }
685
686 return 0;
687 }
688
nvmem_add_cells_from_of(struct nvmem_device * nvmem)689 static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
690 {
691 struct device *dev = &nvmem->dev;
692 struct device_node *child;
693 const __be32 *addr;
694 int len, ret;
695
696 for_each_child_of_node(dev->of_node, child) {
697 struct nvmem_cell_info info = {0};
698
699 addr = of_get_property(child, "reg", &len);
700 if (!addr)
701 continue;
702 if (len < 2 * sizeof(u32)) {
703 dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
704 of_node_put(child);
705 return -EINVAL;
706 }
707
708 info.offset = be32_to_cpup(addr++);
709 info.bytes = be32_to_cpup(addr);
710 info.name = kasprintf(GFP_KERNEL, "%pOFn", child);
711
712 addr = of_get_property(child, "bits", &len);
713 if (addr && len == (2 * sizeof(u32))) {
714 info.bit_offset = be32_to_cpup(addr++);
715 info.nbits = be32_to_cpup(addr);
716 }
717
718 info.np = of_node_get(child);
719
720 ret = nvmem_add_one_cell(nvmem, &info);
721 kfree(info.name);
722 if (ret) {
723 of_node_put(child);
724 return ret;
725 }
726 }
727
728 return 0;
729 }
730
731 /**
732 * nvmem_register() - Register a nvmem device for given nvmem_config.
733 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
734 *
735 * @config: nvmem device configuration with which nvmem device is created.
736 *
737 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
738 * on success.
739 */
740
nvmem_register(const struct nvmem_config * config)741 struct nvmem_device *nvmem_register(const struct nvmem_config *config)
742 {
743 struct nvmem_device *nvmem;
744 int rval;
745
746 if (!config->dev)
747 return ERR_PTR(-EINVAL);
748
749 if (!config->reg_read && !config->reg_write)
750 return ERR_PTR(-EINVAL);
751
752 nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL);
753 if (!nvmem)
754 return ERR_PTR(-ENOMEM);
755
756 rval = ida_alloc(&nvmem_ida, GFP_KERNEL);
757 if (rval < 0) {
758 kfree(nvmem);
759 return ERR_PTR(rval);
760 }
761
762 nvmem->id = rval;
763
764 nvmem->dev.type = &nvmem_provider_type;
765 nvmem->dev.bus = &nvmem_bus_type;
766 nvmem->dev.parent = config->dev;
767
768 device_initialize(&nvmem->dev);
769
770 if (!config->ignore_wp)
771 nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp",
772 GPIOD_OUT_HIGH);
773 if (IS_ERR(nvmem->wp_gpio)) {
774 rval = PTR_ERR(nvmem->wp_gpio);
775 nvmem->wp_gpio = NULL;
776 goto err_put_device;
777 }
778
779 kref_init(&nvmem->refcnt);
780 INIT_LIST_HEAD(&nvmem->cells);
781
782 nvmem->owner = config->owner;
783 if (!nvmem->owner && config->dev->driver)
784 nvmem->owner = config->dev->driver->owner;
785 nvmem->stride = config->stride ?: 1;
786 nvmem->word_size = config->word_size ?: 1;
787 nvmem->size = config->size;
788 nvmem->root_only = config->root_only;
789 nvmem->priv = config->priv;
790 nvmem->type = config->type;
791 nvmem->reg_read = config->reg_read;
792 nvmem->reg_write = config->reg_write;
793 nvmem->cell_post_process = config->cell_post_process;
794 nvmem->keepout = config->keepout;
795 nvmem->nkeepout = config->nkeepout;
796 if (config->of_node)
797 nvmem->dev.of_node = config->of_node;
798 else if (!config->no_of_node)
799 nvmem->dev.of_node = config->dev->of_node;
800
801 switch (config->id) {
802 case NVMEM_DEVID_NONE:
803 rval = dev_set_name(&nvmem->dev, "%s", config->name);
804 break;
805 case NVMEM_DEVID_AUTO:
806 rval = dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id);
807 break;
808 default:
809 rval = dev_set_name(&nvmem->dev, "%s%d",
810 config->name ? : "nvmem",
811 config->name ? config->id : nvmem->id);
812 break;
813 }
814
815 if (rval)
816 goto err_put_device;
817
818 nvmem->read_only = device_property_present(config->dev, "read-only") ||
819 config->read_only || !nvmem->reg_write;
820
821 #ifdef CONFIG_NVMEM_SYSFS
822 nvmem->dev.groups = nvmem_dev_groups;
823 #endif
824
825 if (nvmem->nkeepout) {
826 rval = nvmem_validate_keepouts(nvmem);
827 if (rval)
828 goto err_put_device;
829 }
830
831 if (config->compat) {
832 rval = nvmem_sysfs_setup_compat(nvmem, config);
833 if (rval)
834 goto err_put_device;
835 }
836
837 if (config->cells) {
838 rval = nvmem_add_cells(nvmem, config->cells, config->ncells);
839 if (rval)
840 goto err_remove_cells;
841 }
842
843 rval = nvmem_add_cells_from_table(nvmem);
844 if (rval)
845 goto err_remove_cells;
846
847 rval = nvmem_add_cells_from_of(nvmem);
848 if (rval)
849 goto err_remove_cells;
850
851 dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
852
853 rval = device_add(&nvmem->dev);
854 if (rval)
855 goto err_remove_cells;
856
857 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
858
859 return nvmem;
860
861 err_remove_cells:
862 nvmem_device_remove_all_cells(nvmem);
863 if (config->compat)
864 nvmem_sysfs_remove_compat(nvmem, config);
865 err_put_device:
866 put_device(&nvmem->dev);
867
868 return ERR_PTR(rval);
869 }
870 EXPORT_SYMBOL_GPL(nvmem_register);
871
nvmem_device_release(struct kref * kref)872 static void nvmem_device_release(struct kref *kref)
873 {
874 struct nvmem_device *nvmem;
875
876 nvmem = container_of(kref, struct nvmem_device, refcnt);
877
878 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem);
879
880 if (nvmem->flags & FLAG_COMPAT)
881 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
882
883 nvmem_device_remove_all_cells(nvmem);
884 device_unregister(&nvmem->dev);
885 }
886
887 /**
888 * nvmem_unregister() - Unregister previously registered nvmem device
889 *
890 * @nvmem: Pointer to previously registered nvmem device.
891 */
nvmem_unregister(struct nvmem_device * nvmem)892 void nvmem_unregister(struct nvmem_device *nvmem)
893 {
894 if (nvmem)
895 kref_put(&nvmem->refcnt, nvmem_device_release);
896 }
897 EXPORT_SYMBOL_GPL(nvmem_unregister);
898
devm_nvmem_unregister(void * nvmem)899 static void devm_nvmem_unregister(void *nvmem)
900 {
901 nvmem_unregister(nvmem);
902 }
903
904 /**
905 * devm_nvmem_register() - Register a managed nvmem device for given
906 * nvmem_config.
907 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
908 *
909 * @dev: Device that uses the nvmem device.
910 * @config: nvmem device configuration with which nvmem device is created.
911 *
912 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
913 * on success.
914 */
devm_nvmem_register(struct device * dev,const struct nvmem_config * config)915 struct nvmem_device *devm_nvmem_register(struct device *dev,
916 const struct nvmem_config *config)
917 {
918 struct nvmem_device *nvmem;
919 int ret;
920
921 nvmem = nvmem_register(config);
922 if (IS_ERR(nvmem))
923 return nvmem;
924
925 ret = devm_add_action_or_reset(dev, devm_nvmem_unregister, nvmem);
926 if (ret)
927 return ERR_PTR(ret);
928
929 return nvmem;
930 }
931 EXPORT_SYMBOL_GPL(devm_nvmem_register);
932
__nvmem_device_get(void * data,int (* match)(struct device * dev,const void * data))933 static struct nvmem_device *__nvmem_device_get(void *data,
934 int (*match)(struct device *dev, const void *data))
935 {
936 struct nvmem_device *nvmem = NULL;
937 struct device *dev;
938
939 mutex_lock(&nvmem_mutex);
940 dev = bus_find_device(&nvmem_bus_type, NULL, data, match);
941 if (dev)
942 nvmem = to_nvmem_device(dev);
943 mutex_unlock(&nvmem_mutex);
944 if (!nvmem)
945 return ERR_PTR(-EPROBE_DEFER);
946
947 if (!try_module_get(nvmem->owner)) {
948 dev_err(&nvmem->dev,
949 "could not increase module refcount for cell %s\n",
950 nvmem_dev_name(nvmem));
951
952 put_device(&nvmem->dev);
953 return ERR_PTR(-EINVAL);
954 }
955
956 kref_get(&nvmem->refcnt);
957
958 return nvmem;
959 }
960
__nvmem_device_put(struct nvmem_device * nvmem)961 static void __nvmem_device_put(struct nvmem_device *nvmem)
962 {
963 put_device(&nvmem->dev);
964 module_put(nvmem->owner);
965 kref_put(&nvmem->refcnt, nvmem_device_release);
966 }
967
968 #if IS_ENABLED(CONFIG_OF)
969 /**
970 * of_nvmem_device_get() - Get nvmem device from a given id
971 *
972 * @np: Device tree node that uses the nvmem device.
973 * @id: nvmem name from nvmem-names property.
974 *
975 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
976 * on success.
977 */
of_nvmem_device_get(struct device_node * np,const char * id)978 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
979 {
980
981 struct device_node *nvmem_np;
982 struct nvmem_device *nvmem;
983 int index = 0;
984
985 if (id)
986 index = of_property_match_string(np, "nvmem-names", id);
987
988 nvmem_np = of_parse_phandle(np, "nvmem", index);
989 if (!nvmem_np)
990 return ERR_PTR(-ENOENT);
991
992 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
993 of_node_put(nvmem_np);
994 return nvmem;
995 }
996 EXPORT_SYMBOL_GPL(of_nvmem_device_get);
997 #endif
998
999 /**
1000 * nvmem_device_get() - Get nvmem device from a given id
1001 *
1002 * @dev: Device that uses the nvmem device.
1003 * @dev_name: name of the requested nvmem device.
1004 *
1005 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
1006 * on success.
1007 */
nvmem_device_get(struct device * dev,const char * dev_name)1008 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name)
1009 {
1010 if (dev->of_node) { /* try dt first */
1011 struct nvmem_device *nvmem;
1012
1013 nvmem = of_nvmem_device_get(dev->of_node, dev_name);
1014
1015 if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER)
1016 return nvmem;
1017
1018 }
1019
1020 return __nvmem_device_get((void *)dev_name, device_match_name);
1021 }
1022 EXPORT_SYMBOL_GPL(nvmem_device_get);
1023
1024 /**
1025 * nvmem_device_find() - Find nvmem device with matching function
1026 *
1027 * @data: Data to pass to match function
1028 * @match: Callback function to check device
1029 *
1030 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
1031 * on success.
1032 */
nvmem_device_find(void * data,int (* match)(struct device * dev,const void * data))1033 struct nvmem_device *nvmem_device_find(void *data,
1034 int (*match)(struct device *dev, const void *data))
1035 {
1036 return __nvmem_device_get(data, match);
1037 }
1038 EXPORT_SYMBOL_GPL(nvmem_device_find);
1039
devm_nvmem_device_match(struct device * dev,void * res,void * data)1040 static int devm_nvmem_device_match(struct device *dev, void *res, void *data)
1041 {
1042 struct nvmem_device **nvmem = res;
1043
1044 if (WARN_ON(!nvmem || !*nvmem))
1045 return 0;
1046
1047 return *nvmem == data;
1048 }
1049
devm_nvmem_device_release(struct device * dev,void * res)1050 static void devm_nvmem_device_release(struct device *dev, void *res)
1051 {
1052 nvmem_device_put(*(struct nvmem_device **)res);
1053 }
1054
1055 /**
1056 * devm_nvmem_device_put() - put alredy got nvmem device
1057 *
1058 * @dev: Device that uses the nvmem device.
1059 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(),
1060 * that needs to be released.
1061 */
devm_nvmem_device_put(struct device * dev,struct nvmem_device * nvmem)1062 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem)
1063 {
1064 int ret;
1065
1066 ret = devres_release(dev, devm_nvmem_device_release,
1067 devm_nvmem_device_match, nvmem);
1068
1069 WARN_ON(ret);
1070 }
1071 EXPORT_SYMBOL_GPL(devm_nvmem_device_put);
1072
1073 /**
1074 * nvmem_device_put() - put alredy got nvmem device
1075 *
1076 * @nvmem: pointer to nvmem device that needs to be released.
1077 */
nvmem_device_put(struct nvmem_device * nvmem)1078 void nvmem_device_put(struct nvmem_device *nvmem)
1079 {
1080 __nvmem_device_put(nvmem);
1081 }
1082 EXPORT_SYMBOL_GPL(nvmem_device_put);
1083
1084 /**
1085 * devm_nvmem_device_get() - Get nvmem cell of device form a given id
1086 *
1087 * @dev: Device that requests the nvmem device.
1088 * @id: name id for the requested nvmem device.
1089 *
1090 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
1091 * on success. The nvmem_cell will be freed by the automatically once the
1092 * device is freed.
1093 */
devm_nvmem_device_get(struct device * dev,const char * id)1094 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
1095 {
1096 struct nvmem_device **ptr, *nvmem;
1097
1098 ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL);
1099 if (!ptr)
1100 return ERR_PTR(-ENOMEM);
1101
1102 nvmem = nvmem_device_get(dev, id);
1103 if (!IS_ERR(nvmem)) {
1104 *ptr = nvmem;
1105 devres_add(dev, ptr);
1106 } else {
1107 devres_free(ptr);
1108 }
1109
1110 return nvmem;
1111 }
1112 EXPORT_SYMBOL_GPL(devm_nvmem_device_get);
1113
nvmem_create_cell(struct nvmem_cell_entry * entry,const char * id,int index)1114 static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry,
1115 const char *id, int index)
1116 {
1117 struct nvmem_cell *cell;
1118 const char *name = NULL;
1119
1120 cell = kzalloc(sizeof(*cell), GFP_KERNEL);
1121 if (!cell)
1122 return ERR_PTR(-ENOMEM);
1123
1124 if (id) {
1125 name = kstrdup_const(id, GFP_KERNEL);
1126 if (!name) {
1127 kfree(cell);
1128 return ERR_PTR(-ENOMEM);
1129 }
1130 }
1131
1132 cell->id = name;
1133 cell->entry = entry;
1134 cell->index = index;
1135
1136 return cell;
1137 }
1138
1139 static struct nvmem_cell *
nvmem_cell_get_from_lookup(struct device * dev,const char * con_id)1140 nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
1141 {
1142 struct nvmem_cell_entry *cell_entry;
1143 struct nvmem_cell *cell = ERR_PTR(-ENOENT);
1144 struct nvmem_cell_lookup *lookup;
1145 struct nvmem_device *nvmem;
1146 const char *dev_id;
1147
1148 if (!dev)
1149 return ERR_PTR(-EINVAL);
1150
1151 dev_id = dev_name(dev);
1152
1153 mutex_lock(&nvmem_lookup_mutex);
1154
1155 list_for_each_entry(lookup, &nvmem_lookup_list, node) {
1156 if ((strcmp(lookup->dev_id, dev_id) == 0) &&
1157 (strcmp(lookup->con_id, con_id) == 0)) {
1158 /* This is the right entry. */
1159 nvmem = __nvmem_device_get((void *)lookup->nvmem_name,
1160 device_match_name);
1161 if (IS_ERR(nvmem)) {
1162 /* Provider may not be registered yet. */
1163 cell = ERR_CAST(nvmem);
1164 break;
1165 }
1166
1167 cell_entry = nvmem_find_cell_entry_by_name(nvmem,
1168 lookup->cell_name);
1169 if (!cell_entry) {
1170 __nvmem_device_put(nvmem);
1171 cell = ERR_PTR(-ENOENT);
1172 } else {
1173 cell = nvmem_create_cell(cell_entry, con_id, 0);
1174 if (IS_ERR(cell))
1175 __nvmem_device_put(nvmem);
1176 }
1177 break;
1178 }
1179 }
1180
1181 mutex_unlock(&nvmem_lookup_mutex);
1182 return cell;
1183 }
1184
1185 #if IS_ENABLED(CONFIG_OF)
1186 static struct nvmem_cell_entry *
nvmem_find_cell_entry_by_node(struct nvmem_device * nvmem,struct device_node * np)1187 nvmem_find_cell_entry_by_node(struct nvmem_device *nvmem, struct device_node *np)
1188 {
1189 struct nvmem_cell_entry *iter, *cell = NULL;
1190
1191 mutex_lock(&nvmem_mutex);
1192 list_for_each_entry(iter, &nvmem->cells, node) {
1193 if (np == iter->np) {
1194 cell = iter;
1195 break;
1196 }
1197 }
1198 mutex_unlock(&nvmem_mutex);
1199
1200 return cell;
1201 }
1202
1203 /**
1204 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
1205 *
1206 * @np: Device tree node that uses the nvmem cell.
1207 * @id: nvmem cell name from nvmem-cell-names property, or NULL
1208 * for the cell at index 0 (the lone cell with no accompanying
1209 * nvmem-cell-names property).
1210 *
1211 * Return: Will be an ERR_PTR() on error or a valid pointer
1212 * to a struct nvmem_cell. The nvmem_cell will be freed by the
1213 * nvmem_cell_put().
1214 */
of_nvmem_cell_get(struct device_node * np,const char * id)1215 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
1216 {
1217 struct device_node *cell_np, *nvmem_np;
1218 struct nvmem_device *nvmem;
1219 struct nvmem_cell_entry *cell_entry;
1220 struct nvmem_cell *cell;
1221 struct of_phandle_args cell_spec;
1222 int index = 0;
1223 int cell_index = 0;
1224 int ret;
1225
1226 /* if cell name exists, find index to the name */
1227 if (id)
1228 index = of_property_match_string(np, "nvmem-cell-names", id);
1229
1230 ret = of_parse_phandle_with_optional_args(np, "nvmem-cells",
1231 "#nvmem-cell-cells",
1232 index, &cell_spec);
1233 if (ret)
1234 return ERR_PTR(ret);
1235
1236 if (cell_spec.args_count > 1)
1237 return ERR_PTR(-EINVAL);
1238
1239 cell_np = cell_spec.np;
1240 if (cell_spec.args_count)
1241 cell_index = cell_spec.args[0];
1242
1243 nvmem_np = of_get_parent(cell_np);
1244 if (!nvmem_np) {
1245 of_node_put(cell_np);
1246 return ERR_PTR(-EINVAL);
1247 }
1248
1249 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
1250 of_node_put(nvmem_np);
1251 if (IS_ERR(nvmem)) {
1252 of_node_put(cell_np);
1253 return ERR_CAST(nvmem);
1254 }
1255
1256 cell_entry = nvmem_find_cell_entry_by_node(nvmem, cell_np);
1257 of_node_put(cell_np);
1258 if (!cell_entry) {
1259 __nvmem_device_put(nvmem);
1260 return ERR_PTR(-ENOENT);
1261 }
1262
1263 cell = nvmem_create_cell(cell_entry, id, cell_index);
1264 if (IS_ERR(cell))
1265 __nvmem_device_put(nvmem);
1266
1267 return cell;
1268 }
1269 EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
1270 #endif
1271
1272 /**
1273 * nvmem_cell_get() - Get nvmem cell of device form a given cell name
1274 *
1275 * @dev: Device that requests the nvmem cell.
1276 * @id: nvmem cell name to get (this corresponds with the name from the
1277 * nvmem-cell-names property for DT systems and with the con_id from
1278 * the lookup entry for non-DT systems).
1279 *
1280 * Return: Will be an ERR_PTR() on error or a valid pointer
1281 * to a struct nvmem_cell. The nvmem_cell will be freed by the
1282 * nvmem_cell_put().
1283 */
nvmem_cell_get(struct device * dev,const char * id)1284 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id)
1285 {
1286 struct nvmem_cell *cell;
1287
1288 if (dev->of_node) { /* try dt first */
1289 cell = of_nvmem_cell_get(dev->of_node, id);
1290 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER)
1291 return cell;
1292 }
1293
1294 /* NULL cell id only allowed for device tree; invalid otherwise */
1295 if (!id)
1296 return ERR_PTR(-EINVAL);
1297
1298 return nvmem_cell_get_from_lookup(dev, id);
1299 }
1300 EXPORT_SYMBOL_GPL(nvmem_cell_get);
1301
devm_nvmem_cell_release(struct device * dev,void * res)1302 static void devm_nvmem_cell_release(struct device *dev, void *res)
1303 {
1304 nvmem_cell_put(*(struct nvmem_cell **)res);
1305 }
1306
1307 /**
1308 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id
1309 *
1310 * @dev: Device that requests the nvmem cell.
1311 * @id: nvmem cell name id to get.
1312 *
1313 * Return: Will be an ERR_PTR() on error or a valid pointer
1314 * to a struct nvmem_cell. The nvmem_cell will be freed by the
1315 * automatically once the device is freed.
1316 */
devm_nvmem_cell_get(struct device * dev,const char * id)1317 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id)
1318 {
1319 struct nvmem_cell **ptr, *cell;
1320
1321 ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL);
1322 if (!ptr)
1323 return ERR_PTR(-ENOMEM);
1324
1325 cell = nvmem_cell_get(dev, id);
1326 if (!IS_ERR(cell)) {
1327 *ptr = cell;
1328 devres_add(dev, ptr);
1329 } else {
1330 devres_free(ptr);
1331 }
1332
1333 return cell;
1334 }
1335 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get);
1336
devm_nvmem_cell_match(struct device * dev,void * res,void * data)1337 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data)
1338 {
1339 struct nvmem_cell **c = res;
1340
1341 if (WARN_ON(!c || !*c))
1342 return 0;
1343
1344 return *c == data;
1345 }
1346
1347 /**
1348 * devm_nvmem_cell_put() - Release previously allocated nvmem cell
1349 * from devm_nvmem_cell_get.
1350 *
1351 * @dev: Device that requests the nvmem cell.
1352 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get().
1353 */
devm_nvmem_cell_put(struct device * dev,struct nvmem_cell * cell)1354 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell)
1355 {
1356 int ret;
1357
1358 ret = devres_release(dev, devm_nvmem_cell_release,
1359 devm_nvmem_cell_match, cell);
1360
1361 WARN_ON(ret);
1362 }
1363 EXPORT_SYMBOL(devm_nvmem_cell_put);
1364
1365 /**
1366 * nvmem_cell_put() - Release previously allocated nvmem cell.
1367 *
1368 * @cell: Previously allocated nvmem cell by nvmem_cell_get().
1369 */
nvmem_cell_put(struct nvmem_cell * cell)1370 void nvmem_cell_put(struct nvmem_cell *cell)
1371 {
1372 struct nvmem_device *nvmem = cell->entry->nvmem;
1373
1374 if (cell->id)
1375 kfree_const(cell->id);
1376
1377 kfree(cell);
1378 __nvmem_device_put(nvmem);
1379 }
1380 EXPORT_SYMBOL_GPL(nvmem_cell_put);
1381
nvmem_shift_read_buffer_in_place(struct nvmem_cell_entry * cell,void * buf)1382 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell_entry *cell, void *buf)
1383 {
1384 u8 *p, *b;
1385 int i, extra, bit_offset = cell->bit_offset;
1386
1387 p = b = buf;
1388 if (bit_offset) {
1389 /* First shift */
1390 *b++ >>= bit_offset;
1391
1392 /* setup rest of the bytes if any */
1393 for (i = 1; i < cell->bytes; i++) {
1394 /* Get bits from next byte and shift them towards msb */
1395 *p |= *b << (BITS_PER_BYTE - bit_offset);
1396
1397 p = b;
1398 *b++ >>= bit_offset;
1399 }
1400 } else {
1401 /* point to the msb */
1402 p += cell->bytes - 1;
1403 }
1404
1405 /* result fits in less bytes */
1406 extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE);
1407 while (--extra >= 0)
1408 *p-- = 0;
1409
1410 /* clear msb bits if any leftover in the last byte */
1411 if (cell->nbits % BITS_PER_BYTE)
1412 *p &= GENMASK((cell->nbits % BITS_PER_BYTE) - 1, 0);
1413 }
1414
__nvmem_cell_read(struct nvmem_device * nvmem,struct nvmem_cell_entry * cell,void * buf,size_t * len,const char * id,int index)1415 static int __nvmem_cell_read(struct nvmem_device *nvmem,
1416 struct nvmem_cell_entry *cell,
1417 void *buf, size_t *len, const char *id, int index)
1418 {
1419 int rc;
1420
1421 rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes);
1422
1423 if (rc)
1424 return rc;
1425
1426 /* shift bits in-place */
1427 if (cell->bit_offset || cell->nbits)
1428 nvmem_shift_read_buffer_in_place(cell, buf);
1429
1430 if (nvmem->cell_post_process) {
1431 rc = nvmem->cell_post_process(nvmem->priv, id, index,
1432 cell->offset, buf, cell->bytes);
1433 if (rc)
1434 return rc;
1435 }
1436
1437 if (len)
1438 *len = cell->bytes;
1439
1440 return 0;
1441 }
1442
1443 /**
1444 * nvmem_cell_read() - Read a given nvmem cell
1445 *
1446 * @cell: nvmem cell to be read.
1447 * @len: pointer to length of cell which will be populated on successful read;
1448 * can be NULL.
1449 *
1450 * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The
1451 * buffer should be freed by the consumer with a kfree().
1452 */
nvmem_cell_read(struct nvmem_cell * cell,size_t * len)1453 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
1454 {
1455 struct nvmem_device *nvmem = cell->entry->nvmem;
1456 u8 *buf;
1457 int rc;
1458
1459 if (!nvmem)
1460 return ERR_PTR(-EINVAL);
1461
1462 buf = kzalloc(cell->entry->bytes, GFP_KERNEL);
1463 if (!buf)
1464 return ERR_PTR(-ENOMEM);
1465
1466 rc = __nvmem_cell_read(nvmem, cell->entry, buf, len, cell->id, cell->index);
1467 if (rc) {
1468 kfree(buf);
1469 return ERR_PTR(rc);
1470 }
1471
1472 return buf;
1473 }
1474 EXPORT_SYMBOL_GPL(nvmem_cell_read);
1475
nvmem_cell_prepare_write_buffer(struct nvmem_cell_entry * cell,u8 * _buf,int len)1476 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell_entry *cell,
1477 u8 *_buf, int len)
1478 {
1479 struct nvmem_device *nvmem = cell->nvmem;
1480 int i, rc, nbits, bit_offset = cell->bit_offset;
1481 u8 v, *p, *buf, *b, pbyte, pbits;
1482
1483 nbits = cell->nbits;
1484 buf = kzalloc(cell->bytes, GFP_KERNEL);
1485 if (!buf)
1486 return ERR_PTR(-ENOMEM);
1487
1488 memcpy(buf, _buf, len);
1489 p = b = buf;
1490
1491 if (bit_offset) {
1492 pbyte = *b;
1493 *b <<= bit_offset;
1494
1495 /* setup the first byte with lsb bits from nvmem */
1496 rc = nvmem_reg_read(nvmem, cell->offset, &v, 1);
1497 if (rc)
1498 goto err;
1499 *b++ |= GENMASK(bit_offset - 1, 0) & v;
1500
1501 /* setup rest of the byte if any */
1502 for (i = 1; i < cell->bytes; i++) {
1503 /* Get last byte bits and shift them towards lsb */
1504 pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset);
1505 pbyte = *b;
1506 p = b;
1507 *b <<= bit_offset;
1508 *b++ |= pbits;
1509 }
1510 }
1511
1512 /* if it's not end on byte boundary */
1513 if ((nbits + bit_offset) % BITS_PER_BYTE) {
1514 /* setup the last byte with msb bits from nvmem */
1515 rc = nvmem_reg_read(nvmem,
1516 cell->offset + cell->bytes - 1, &v, 1);
1517 if (rc)
1518 goto err;
1519 *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
1520
1521 }
1522
1523 return buf;
1524 err:
1525 kfree(buf);
1526 return ERR_PTR(rc);
1527 }
1528
__nvmem_cell_entry_write(struct nvmem_cell_entry * cell,void * buf,size_t len)1529 static int __nvmem_cell_entry_write(struct nvmem_cell_entry *cell, void *buf, size_t len)
1530 {
1531 struct nvmem_device *nvmem = cell->nvmem;
1532 int rc;
1533
1534 if (!nvmem || nvmem->read_only ||
1535 (cell->bit_offset == 0 && len != cell->bytes))
1536 return -EINVAL;
1537
1538 if (cell->bit_offset || cell->nbits) {
1539 buf = nvmem_cell_prepare_write_buffer(cell, buf, len);
1540 if (IS_ERR(buf))
1541 return PTR_ERR(buf);
1542 }
1543
1544 rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes);
1545
1546 /* free the tmp buffer */
1547 if (cell->bit_offset || cell->nbits)
1548 kfree(buf);
1549
1550 if (rc)
1551 return rc;
1552
1553 return len;
1554 }
1555
1556 /**
1557 * nvmem_cell_write() - Write to a given nvmem cell
1558 *
1559 * @cell: nvmem cell to be written.
1560 * @buf: Buffer to be written.
1561 * @len: length of buffer to be written to nvmem cell.
1562 *
1563 * Return: length of bytes written or negative on failure.
1564 */
nvmem_cell_write(struct nvmem_cell * cell,void * buf,size_t len)1565 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
1566 {
1567 return __nvmem_cell_entry_write(cell->entry, buf, len);
1568 }
1569
1570 EXPORT_SYMBOL_GPL(nvmem_cell_write);
1571
nvmem_cell_read_common(struct device * dev,const char * cell_id,void * val,size_t count)1572 static int nvmem_cell_read_common(struct device *dev, const char *cell_id,
1573 void *val, size_t count)
1574 {
1575 struct nvmem_cell *cell;
1576 void *buf;
1577 size_t len;
1578
1579 cell = nvmem_cell_get(dev, cell_id);
1580 if (IS_ERR(cell))
1581 return PTR_ERR(cell);
1582
1583 buf = nvmem_cell_read(cell, &len);
1584 if (IS_ERR(buf)) {
1585 nvmem_cell_put(cell);
1586 return PTR_ERR(buf);
1587 }
1588 if (len != count) {
1589 kfree(buf);
1590 nvmem_cell_put(cell);
1591 return -EINVAL;
1592 }
1593 memcpy(val, buf, count);
1594 kfree(buf);
1595 nvmem_cell_put(cell);
1596
1597 return 0;
1598 }
1599
1600 /**
1601 * nvmem_cell_read_u8() - Read a cell value as a u8
1602 *
1603 * @dev: Device that requests the nvmem cell.
1604 * @cell_id: Name of nvmem cell to read.
1605 * @val: pointer to output value.
1606 *
1607 * Return: 0 on success or negative errno.
1608 */
nvmem_cell_read_u8(struct device * dev,const char * cell_id,u8 * val)1609 int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val)
1610 {
1611 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1612 }
1613 EXPORT_SYMBOL_GPL(nvmem_cell_read_u8);
1614
1615 /**
1616 * nvmem_cell_read_u16() - Read a cell value as a u16
1617 *
1618 * @dev: Device that requests the nvmem cell.
1619 * @cell_id: Name of nvmem cell to read.
1620 * @val: pointer to output value.
1621 *
1622 * Return: 0 on success or negative errno.
1623 */
nvmem_cell_read_u16(struct device * dev,const char * cell_id,u16 * val)1624 int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val)
1625 {
1626 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1627 }
1628 EXPORT_SYMBOL_GPL(nvmem_cell_read_u16);
1629
1630 /**
1631 * nvmem_cell_read_u32() - Read a cell value as a u32
1632 *
1633 * @dev: Device that requests the nvmem cell.
1634 * @cell_id: Name of nvmem cell to read.
1635 * @val: pointer to output value.
1636 *
1637 * Return: 0 on success or negative errno.
1638 */
nvmem_cell_read_u32(struct device * dev,const char * cell_id,u32 * val)1639 int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val)
1640 {
1641 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1642 }
1643 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32);
1644
1645 /**
1646 * nvmem_cell_read_u64() - Read a cell value as a u64
1647 *
1648 * @dev: Device that requests the nvmem cell.
1649 * @cell_id: Name of nvmem cell to read.
1650 * @val: pointer to output value.
1651 *
1652 * Return: 0 on success or negative errno.
1653 */
nvmem_cell_read_u64(struct device * dev,const char * cell_id,u64 * val)1654 int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val)
1655 {
1656 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1657 }
1658 EXPORT_SYMBOL_GPL(nvmem_cell_read_u64);
1659
nvmem_cell_read_variable_common(struct device * dev,const char * cell_id,size_t max_len,size_t * len)1660 static const void *nvmem_cell_read_variable_common(struct device *dev,
1661 const char *cell_id,
1662 size_t max_len, size_t *len)
1663 {
1664 struct nvmem_cell *cell;
1665 int nbits;
1666 void *buf;
1667
1668 cell = nvmem_cell_get(dev, cell_id);
1669 if (IS_ERR(cell))
1670 return cell;
1671
1672 nbits = cell->entry->nbits;
1673 buf = nvmem_cell_read(cell, len);
1674 nvmem_cell_put(cell);
1675 if (IS_ERR(buf))
1676 return buf;
1677
1678 /*
1679 * If nbits is set then nvmem_cell_read() can significantly exaggerate
1680 * the length of the real data. Throw away the extra junk.
1681 */
1682 if (nbits)
1683 *len = DIV_ROUND_UP(nbits, 8);
1684
1685 if (*len > max_len) {
1686 kfree(buf);
1687 return ERR_PTR(-ERANGE);
1688 }
1689
1690 return buf;
1691 }
1692
1693 /**
1694 * nvmem_cell_read_variable_le_u32() - Read up to 32-bits of data as a little endian number.
1695 *
1696 * @dev: Device that requests the nvmem cell.
1697 * @cell_id: Name of nvmem cell to read.
1698 * @val: pointer to output value.
1699 *
1700 * Return: 0 on success or negative errno.
1701 */
nvmem_cell_read_variable_le_u32(struct device * dev,const char * cell_id,u32 * val)1702 int nvmem_cell_read_variable_le_u32(struct device *dev, const char *cell_id,
1703 u32 *val)
1704 {
1705 size_t len;
1706 const u8 *buf;
1707 int i;
1708
1709 buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len);
1710 if (IS_ERR(buf))
1711 return PTR_ERR(buf);
1712
1713 /* Copy w/ implicit endian conversion */
1714 *val = 0;
1715 for (i = 0; i < len; i++)
1716 *val |= buf[i] << (8 * i);
1717
1718 kfree(buf);
1719
1720 return 0;
1721 }
1722 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u32);
1723
1724 /**
1725 * nvmem_cell_read_variable_le_u64() - Read up to 64-bits of data as a little endian number.
1726 *
1727 * @dev: Device that requests the nvmem cell.
1728 * @cell_id: Name of nvmem cell to read.
1729 * @val: pointer to output value.
1730 *
1731 * Return: 0 on success or negative errno.
1732 */
nvmem_cell_read_variable_le_u64(struct device * dev,const char * cell_id,u64 * val)1733 int nvmem_cell_read_variable_le_u64(struct device *dev, const char *cell_id,
1734 u64 *val)
1735 {
1736 size_t len;
1737 const u8 *buf;
1738 int i;
1739
1740 buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len);
1741 if (IS_ERR(buf))
1742 return PTR_ERR(buf);
1743
1744 /* Copy w/ implicit endian conversion */
1745 *val = 0;
1746 for (i = 0; i < len; i++)
1747 *val |= (uint64_t)buf[i] << (8 * i);
1748
1749 kfree(buf);
1750
1751 return 0;
1752 }
1753 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u64);
1754
1755 /**
1756 * nvmem_device_cell_read() - Read a given nvmem device and cell
1757 *
1758 * @nvmem: nvmem device to read from.
1759 * @info: nvmem cell info to be read.
1760 * @buf: buffer pointer which will be populated on successful read.
1761 *
1762 * Return: length of successful bytes read on success and negative
1763 * error code on error.
1764 */
nvmem_device_cell_read(struct nvmem_device * nvmem,struct nvmem_cell_info * info,void * buf)1765 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
1766 struct nvmem_cell_info *info, void *buf)
1767 {
1768 struct nvmem_cell_entry cell;
1769 int rc;
1770 ssize_t len;
1771
1772 if (!nvmem)
1773 return -EINVAL;
1774
1775 rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell);
1776 if (rc)
1777 return rc;
1778
1779 rc = __nvmem_cell_read(nvmem, &cell, buf, &len, NULL, 0);
1780 if (rc)
1781 return rc;
1782
1783 return len;
1784 }
1785 EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
1786
1787 /**
1788 * nvmem_device_cell_write() - Write cell to a given nvmem device
1789 *
1790 * @nvmem: nvmem device to be written to.
1791 * @info: nvmem cell info to be written.
1792 * @buf: buffer to be written to cell.
1793 *
1794 * Return: length of bytes written or negative error code on failure.
1795 */
nvmem_device_cell_write(struct nvmem_device * nvmem,struct nvmem_cell_info * info,void * buf)1796 int nvmem_device_cell_write(struct nvmem_device *nvmem,
1797 struct nvmem_cell_info *info, void *buf)
1798 {
1799 struct nvmem_cell_entry cell;
1800 int rc;
1801
1802 if (!nvmem)
1803 return -EINVAL;
1804
1805 rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell);
1806 if (rc)
1807 return rc;
1808
1809 return __nvmem_cell_entry_write(&cell, buf, cell.bytes);
1810 }
1811 EXPORT_SYMBOL_GPL(nvmem_device_cell_write);
1812
1813 /**
1814 * nvmem_device_read() - Read from a given nvmem device
1815 *
1816 * @nvmem: nvmem device to read from.
1817 * @offset: offset in nvmem device.
1818 * @bytes: number of bytes to read.
1819 * @buf: buffer pointer which will be populated on successful read.
1820 *
1821 * Return: length of successful bytes read on success and negative
1822 * error code on error.
1823 */
nvmem_device_read(struct nvmem_device * nvmem,unsigned int offset,size_t bytes,void * buf)1824 int nvmem_device_read(struct nvmem_device *nvmem,
1825 unsigned int offset,
1826 size_t bytes, void *buf)
1827 {
1828 int rc;
1829
1830 if (!nvmem)
1831 return -EINVAL;
1832
1833 rc = nvmem_reg_read(nvmem, offset, buf, bytes);
1834
1835 if (rc)
1836 return rc;
1837
1838 return bytes;
1839 }
1840 EXPORT_SYMBOL_GPL(nvmem_device_read);
1841
1842 /**
1843 * nvmem_device_write() - Write cell to a given nvmem device
1844 *
1845 * @nvmem: nvmem device to be written to.
1846 * @offset: offset in nvmem device.
1847 * @bytes: number of bytes to write.
1848 * @buf: buffer to be written.
1849 *
1850 * Return: length of bytes written or negative error code on failure.
1851 */
nvmem_device_write(struct nvmem_device * nvmem,unsigned int offset,size_t bytes,void * buf)1852 int nvmem_device_write(struct nvmem_device *nvmem,
1853 unsigned int offset,
1854 size_t bytes, void *buf)
1855 {
1856 int rc;
1857
1858 if (!nvmem)
1859 return -EINVAL;
1860
1861 rc = nvmem_reg_write(nvmem, offset, buf, bytes);
1862
1863 if (rc)
1864 return rc;
1865
1866
1867 return bytes;
1868 }
1869 EXPORT_SYMBOL_GPL(nvmem_device_write);
1870
1871 /**
1872 * nvmem_add_cell_table() - register a table of cell info entries
1873 *
1874 * @table: table of cell info entries
1875 */
nvmem_add_cell_table(struct nvmem_cell_table * table)1876 void nvmem_add_cell_table(struct nvmem_cell_table *table)
1877 {
1878 mutex_lock(&nvmem_cell_mutex);
1879 list_add_tail(&table->node, &nvmem_cell_tables);
1880 mutex_unlock(&nvmem_cell_mutex);
1881 }
1882 EXPORT_SYMBOL_GPL(nvmem_add_cell_table);
1883
1884 /**
1885 * nvmem_del_cell_table() - remove a previously registered cell info table
1886 *
1887 * @table: table of cell info entries
1888 */
nvmem_del_cell_table(struct nvmem_cell_table * table)1889 void nvmem_del_cell_table(struct nvmem_cell_table *table)
1890 {
1891 mutex_lock(&nvmem_cell_mutex);
1892 list_del(&table->node);
1893 mutex_unlock(&nvmem_cell_mutex);
1894 }
1895 EXPORT_SYMBOL_GPL(nvmem_del_cell_table);
1896
1897 /**
1898 * nvmem_add_cell_lookups() - register a list of cell lookup entries
1899 *
1900 * @entries: array of cell lookup entries
1901 * @nentries: number of cell lookup entries in the array
1902 */
nvmem_add_cell_lookups(struct nvmem_cell_lookup * entries,size_t nentries)1903 void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1904 {
1905 int i;
1906
1907 mutex_lock(&nvmem_lookup_mutex);
1908 for (i = 0; i < nentries; i++)
1909 list_add_tail(&entries[i].node, &nvmem_lookup_list);
1910 mutex_unlock(&nvmem_lookup_mutex);
1911 }
1912 EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups);
1913
1914 /**
1915 * nvmem_del_cell_lookups() - remove a list of previously added cell lookup
1916 * entries
1917 *
1918 * @entries: array of cell lookup entries
1919 * @nentries: number of cell lookup entries in the array
1920 */
nvmem_del_cell_lookups(struct nvmem_cell_lookup * entries,size_t nentries)1921 void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1922 {
1923 int i;
1924
1925 mutex_lock(&nvmem_lookup_mutex);
1926 for (i = 0; i < nentries; i++)
1927 list_del(&entries[i].node);
1928 mutex_unlock(&nvmem_lookup_mutex);
1929 }
1930 EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups);
1931
1932 /**
1933 * nvmem_dev_name() - Get the name of a given nvmem device.
1934 *
1935 * @nvmem: nvmem device.
1936 *
1937 * Return: name of the nvmem device.
1938 */
nvmem_dev_name(struct nvmem_device * nvmem)1939 const char *nvmem_dev_name(struct nvmem_device *nvmem)
1940 {
1941 return dev_name(&nvmem->dev);
1942 }
1943 EXPORT_SYMBOL_GPL(nvmem_dev_name);
1944
nvmem_init(void)1945 static int __init nvmem_init(void)
1946 {
1947 return bus_register(&nvmem_bus_type);
1948 }
1949
nvmem_exit(void)1950 static void __exit nvmem_exit(void)
1951 {
1952 bus_unregister(&nvmem_bus_type);
1953 }
1954
1955 subsys_initcall(nvmem_init);
1956 module_exit(nvmem_exit);
1957
1958 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
1959 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
1960 MODULE_DESCRIPTION("nvmem Driver Core");
1961 MODULE_LICENSE("GPL v2");
1962