1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Configfs interface for the NVMe target.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/kstrtox.h>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/stat.h>
12 #include <linux/ctype.h>
13 #include <linux/pci.h>
14 #include <linux/pci-p2pdma.h>
15 #ifdef CONFIG_NVME_TARGET_AUTH
16 #include <linux/nvme-auth.h>
17 #endif
18 #include <crypto/hash.h>
19 #include <crypto/kpp.h>
20
21 #include "nvmet.h"
22
23 static const struct config_item_type nvmet_host_type;
24 static const struct config_item_type nvmet_subsys_type;
25
26 static LIST_HEAD(nvmet_ports_list);
27 struct list_head *nvmet_ports = &nvmet_ports_list;
28
29 struct nvmet_type_name_map {
30 u8 type;
31 const char *name;
32 };
33
34 static struct nvmet_type_name_map nvmet_transport[] = {
35 { NVMF_TRTYPE_RDMA, "rdma" },
36 { NVMF_TRTYPE_FC, "fc" },
37 { NVMF_TRTYPE_TCP, "tcp" },
38 { NVMF_TRTYPE_LOOP, "loop" },
39 };
40
41 static const struct nvmet_type_name_map nvmet_addr_family[] = {
42 { NVMF_ADDR_FAMILY_PCI, "pcie" },
43 { NVMF_ADDR_FAMILY_IP4, "ipv4" },
44 { NVMF_ADDR_FAMILY_IP6, "ipv6" },
45 { NVMF_ADDR_FAMILY_IB, "ib" },
46 { NVMF_ADDR_FAMILY_FC, "fc" },
47 { NVMF_ADDR_FAMILY_LOOP, "loop" },
48 };
49
nvmet_is_port_enabled(struct nvmet_port * p,const char * caller)50 static bool nvmet_is_port_enabled(struct nvmet_port *p, const char *caller)
51 {
52 if (p->enabled)
53 pr_err("Disable port '%u' before changing attribute in %s\n",
54 le16_to_cpu(p->disc_addr.portid), caller);
55 return p->enabled;
56 }
57
58 /*
59 * nvmet_port Generic ConfigFS definitions.
60 * Used in any place in the ConfigFS tree that refers to an address.
61 */
nvmet_addr_adrfam_show(struct config_item * item,char * page)62 static ssize_t nvmet_addr_adrfam_show(struct config_item *item, char *page)
63 {
64 u8 adrfam = to_nvmet_port(item)->disc_addr.adrfam;
65 int i;
66
67 for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) {
68 if (nvmet_addr_family[i].type == adrfam)
69 return snprintf(page, PAGE_SIZE, "%s\n",
70 nvmet_addr_family[i].name);
71 }
72
73 return snprintf(page, PAGE_SIZE, "\n");
74 }
75
nvmet_addr_adrfam_store(struct config_item * item,const char * page,size_t count)76 static ssize_t nvmet_addr_adrfam_store(struct config_item *item,
77 const char *page, size_t count)
78 {
79 struct nvmet_port *port = to_nvmet_port(item);
80 int i;
81
82 if (nvmet_is_port_enabled(port, __func__))
83 return -EACCES;
84
85 for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) {
86 if (sysfs_streq(page, nvmet_addr_family[i].name))
87 goto found;
88 }
89
90 pr_err("Invalid value '%s' for adrfam\n", page);
91 return -EINVAL;
92
93 found:
94 port->disc_addr.adrfam = nvmet_addr_family[i].type;
95 return count;
96 }
97
98 CONFIGFS_ATTR(nvmet_, addr_adrfam);
99
nvmet_addr_portid_show(struct config_item * item,char * page)100 static ssize_t nvmet_addr_portid_show(struct config_item *item,
101 char *page)
102 {
103 __le16 portid = to_nvmet_port(item)->disc_addr.portid;
104
105 return snprintf(page, PAGE_SIZE, "%d\n", le16_to_cpu(portid));
106 }
107
nvmet_addr_portid_store(struct config_item * item,const char * page,size_t count)108 static ssize_t nvmet_addr_portid_store(struct config_item *item,
109 const char *page, size_t count)
110 {
111 struct nvmet_port *port = to_nvmet_port(item);
112 u16 portid = 0;
113
114 if (kstrtou16(page, 0, &portid)) {
115 pr_err("Invalid value '%s' for portid\n", page);
116 return -EINVAL;
117 }
118
119 if (nvmet_is_port_enabled(port, __func__))
120 return -EACCES;
121
122 port->disc_addr.portid = cpu_to_le16(portid);
123 return count;
124 }
125
126 CONFIGFS_ATTR(nvmet_, addr_portid);
127
nvmet_addr_traddr_show(struct config_item * item,char * page)128 static ssize_t nvmet_addr_traddr_show(struct config_item *item,
129 char *page)
130 {
131 struct nvmet_port *port = to_nvmet_port(item);
132
133 return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.traddr);
134 }
135
nvmet_addr_traddr_store(struct config_item * item,const char * page,size_t count)136 static ssize_t nvmet_addr_traddr_store(struct config_item *item,
137 const char *page, size_t count)
138 {
139 struct nvmet_port *port = to_nvmet_port(item);
140
141 if (count > NVMF_TRADDR_SIZE) {
142 pr_err("Invalid value '%s' for traddr\n", page);
143 return -EINVAL;
144 }
145
146 if (nvmet_is_port_enabled(port, __func__))
147 return -EACCES;
148
149 if (sscanf(page, "%s\n", port->disc_addr.traddr) != 1)
150 return -EINVAL;
151 return count;
152 }
153
154 CONFIGFS_ATTR(nvmet_, addr_traddr);
155
156 static const struct nvmet_type_name_map nvmet_addr_treq[] = {
157 { NVMF_TREQ_NOT_SPECIFIED, "not specified" },
158 { NVMF_TREQ_REQUIRED, "required" },
159 { NVMF_TREQ_NOT_REQUIRED, "not required" },
160 };
161
nvmet_addr_treq_show(struct config_item * item,char * page)162 static ssize_t nvmet_addr_treq_show(struct config_item *item, char *page)
163 {
164 u8 treq = to_nvmet_port(item)->disc_addr.treq &
165 NVME_TREQ_SECURE_CHANNEL_MASK;
166 int i;
167
168 for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) {
169 if (treq == nvmet_addr_treq[i].type)
170 return snprintf(page, PAGE_SIZE, "%s\n",
171 nvmet_addr_treq[i].name);
172 }
173
174 return snprintf(page, PAGE_SIZE, "\n");
175 }
176
nvmet_addr_treq_store(struct config_item * item,const char * page,size_t count)177 static ssize_t nvmet_addr_treq_store(struct config_item *item,
178 const char *page, size_t count)
179 {
180 struct nvmet_port *port = to_nvmet_port(item);
181 u8 treq = port->disc_addr.treq & ~NVME_TREQ_SECURE_CHANNEL_MASK;
182 int i;
183
184 if (nvmet_is_port_enabled(port, __func__))
185 return -EACCES;
186
187 for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) {
188 if (sysfs_streq(page, nvmet_addr_treq[i].name))
189 goto found;
190 }
191
192 pr_err("Invalid value '%s' for treq\n", page);
193 return -EINVAL;
194
195 found:
196 treq |= nvmet_addr_treq[i].type;
197 port->disc_addr.treq = treq;
198 return count;
199 }
200
201 CONFIGFS_ATTR(nvmet_, addr_treq);
202
nvmet_addr_trsvcid_show(struct config_item * item,char * page)203 static ssize_t nvmet_addr_trsvcid_show(struct config_item *item,
204 char *page)
205 {
206 struct nvmet_port *port = to_nvmet_port(item);
207
208 return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.trsvcid);
209 }
210
nvmet_addr_trsvcid_store(struct config_item * item,const char * page,size_t count)211 static ssize_t nvmet_addr_trsvcid_store(struct config_item *item,
212 const char *page, size_t count)
213 {
214 struct nvmet_port *port = to_nvmet_port(item);
215
216 if (count > NVMF_TRSVCID_SIZE) {
217 pr_err("Invalid value '%s' for trsvcid\n", page);
218 return -EINVAL;
219 }
220 if (nvmet_is_port_enabled(port, __func__))
221 return -EACCES;
222
223 if (sscanf(page, "%s\n", port->disc_addr.trsvcid) != 1)
224 return -EINVAL;
225 return count;
226 }
227
228 CONFIGFS_ATTR(nvmet_, addr_trsvcid);
229
nvmet_param_inline_data_size_show(struct config_item * item,char * page)230 static ssize_t nvmet_param_inline_data_size_show(struct config_item *item,
231 char *page)
232 {
233 struct nvmet_port *port = to_nvmet_port(item);
234
235 return snprintf(page, PAGE_SIZE, "%d\n", port->inline_data_size);
236 }
237
nvmet_param_inline_data_size_store(struct config_item * item,const char * page,size_t count)238 static ssize_t nvmet_param_inline_data_size_store(struct config_item *item,
239 const char *page, size_t count)
240 {
241 struct nvmet_port *port = to_nvmet_port(item);
242 int ret;
243
244 if (nvmet_is_port_enabled(port, __func__))
245 return -EACCES;
246 ret = kstrtoint(page, 0, &port->inline_data_size);
247 if (ret) {
248 pr_err("Invalid value '%s' for inline_data_size\n", page);
249 return -EINVAL;
250 }
251 return count;
252 }
253
254 CONFIGFS_ATTR(nvmet_, param_inline_data_size);
255
256 #ifdef CONFIG_BLK_DEV_INTEGRITY
nvmet_param_pi_enable_show(struct config_item * item,char * page)257 static ssize_t nvmet_param_pi_enable_show(struct config_item *item,
258 char *page)
259 {
260 struct nvmet_port *port = to_nvmet_port(item);
261
262 return snprintf(page, PAGE_SIZE, "%d\n", port->pi_enable);
263 }
264
nvmet_param_pi_enable_store(struct config_item * item,const char * page,size_t count)265 static ssize_t nvmet_param_pi_enable_store(struct config_item *item,
266 const char *page, size_t count)
267 {
268 struct nvmet_port *port = to_nvmet_port(item);
269 bool val;
270
271 if (kstrtobool(page, &val))
272 return -EINVAL;
273
274 if (nvmet_is_port_enabled(port, __func__))
275 return -EACCES;
276
277 port->pi_enable = val;
278 return count;
279 }
280
281 CONFIGFS_ATTR(nvmet_, param_pi_enable);
282 #endif
283
nvmet_addr_trtype_show(struct config_item * item,char * page)284 static ssize_t nvmet_addr_trtype_show(struct config_item *item,
285 char *page)
286 {
287 struct nvmet_port *port = to_nvmet_port(item);
288 int i;
289
290 for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) {
291 if (port->disc_addr.trtype == nvmet_transport[i].type)
292 return snprintf(page, PAGE_SIZE,
293 "%s\n", nvmet_transport[i].name);
294 }
295
296 return sprintf(page, "\n");
297 }
298
nvmet_port_init_tsas_rdma(struct nvmet_port * port)299 static void nvmet_port_init_tsas_rdma(struct nvmet_port *port)
300 {
301 port->disc_addr.tsas.rdma.qptype = NVMF_RDMA_QPTYPE_CONNECTED;
302 port->disc_addr.tsas.rdma.prtype = NVMF_RDMA_PRTYPE_NOT_SPECIFIED;
303 port->disc_addr.tsas.rdma.cms = NVMF_RDMA_CMS_RDMA_CM;
304 }
305
nvmet_addr_trtype_store(struct config_item * item,const char * page,size_t count)306 static ssize_t nvmet_addr_trtype_store(struct config_item *item,
307 const char *page, size_t count)
308 {
309 struct nvmet_port *port = to_nvmet_port(item);
310 int i;
311
312 if (nvmet_is_port_enabled(port, __func__))
313 return -EACCES;
314
315 for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) {
316 if (sysfs_streq(page, nvmet_transport[i].name))
317 goto found;
318 }
319
320 pr_err("Invalid value '%s' for trtype\n", page);
321 return -EINVAL;
322
323 found:
324 memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE);
325 port->disc_addr.trtype = nvmet_transport[i].type;
326 if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA)
327 nvmet_port_init_tsas_rdma(port);
328 return count;
329 }
330
331 CONFIGFS_ATTR(nvmet_, addr_trtype);
332
333 /*
334 * Namespace structures & file operation functions below
335 */
nvmet_ns_device_path_show(struct config_item * item,char * page)336 static ssize_t nvmet_ns_device_path_show(struct config_item *item, char *page)
337 {
338 return sprintf(page, "%s\n", to_nvmet_ns(item)->device_path);
339 }
340
nvmet_ns_device_path_store(struct config_item * item,const char * page,size_t count)341 static ssize_t nvmet_ns_device_path_store(struct config_item *item,
342 const char *page, size_t count)
343 {
344 struct nvmet_ns *ns = to_nvmet_ns(item);
345 struct nvmet_subsys *subsys = ns->subsys;
346 size_t len;
347 int ret;
348
349 mutex_lock(&subsys->lock);
350 ret = -EBUSY;
351 if (ns->enabled)
352 goto out_unlock;
353
354 ret = -EINVAL;
355 len = strcspn(page, "\n");
356 if (!len)
357 goto out_unlock;
358
359 kfree(ns->device_path);
360 ret = -ENOMEM;
361 ns->device_path = kmemdup_nul(page, len, GFP_KERNEL);
362 if (!ns->device_path)
363 goto out_unlock;
364
365 mutex_unlock(&subsys->lock);
366 return count;
367
368 out_unlock:
369 mutex_unlock(&subsys->lock);
370 return ret;
371 }
372
373 CONFIGFS_ATTR(nvmet_ns_, device_path);
374
375 #ifdef CONFIG_PCI_P2PDMA
nvmet_ns_p2pmem_show(struct config_item * item,char * page)376 static ssize_t nvmet_ns_p2pmem_show(struct config_item *item, char *page)
377 {
378 struct nvmet_ns *ns = to_nvmet_ns(item);
379
380 return pci_p2pdma_enable_show(page, ns->p2p_dev, ns->use_p2pmem);
381 }
382
nvmet_ns_p2pmem_store(struct config_item * item,const char * page,size_t count)383 static ssize_t nvmet_ns_p2pmem_store(struct config_item *item,
384 const char *page, size_t count)
385 {
386 struct nvmet_ns *ns = to_nvmet_ns(item);
387 struct pci_dev *p2p_dev = NULL;
388 bool use_p2pmem;
389 int ret = count;
390 int error;
391
392 mutex_lock(&ns->subsys->lock);
393 if (ns->enabled) {
394 ret = -EBUSY;
395 goto out_unlock;
396 }
397
398 error = pci_p2pdma_enable_store(page, &p2p_dev, &use_p2pmem);
399 if (error) {
400 ret = error;
401 goto out_unlock;
402 }
403
404 ns->use_p2pmem = use_p2pmem;
405 pci_dev_put(ns->p2p_dev);
406 ns->p2p_dev = p2p_dev;
407
408 out_unlock:
409 mutex_unlock(&ns->subsys->lock);
410
411 return ret;
412 }
413
414 CONFIGFS_ATTR(nvmet_ns_, p2pmem);
415 #endif /* CONFIG_PCI_P2PDMA */
416
nvmet_ns_device_uuid_show(struct config_item * item,char * page)417 static ssize_t nvmet_ns_device_uuid_show(struct config_item *item, char *page)
418 {
419 return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->uuid);
420 }
421
nvmet_ns_device_uuid_store(struct config_item * item,const char * page,size_t count)422 static ssize_t nvmet_ns_device_uuid_store(struct config_item *item,
423 const char *page, size_t count)
424 {
425 struct nvmet_ns *ns = to_nvmet_ns(item);
426 struct nvmet_subsys *subsys = ns->subsys;
427 int ret = 0;
428
429 mutex_lock(&subsys->lock);
430 if (ns->enabled) {
431 ret = -EBUSY;
432 goto out_unlock;
433 }
434
435 if (uuid_parse(page, &ns->uuid))
436 ret = -EINVAL;
437
438 out_unlock:
439 mutex_unlock(&subsys->lock);
440 return ret ? ret : count;
441 }
442
443 CONFIGFS_ATTR(nvmet_ns_, device_uuid);
444
nvmet_ns_device_nguid_show(struct config_item * item,char * page)445 static ssize_t nvmet_ns_device_nguid_show(struct config_item *item, char *page)
446 {
447 return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->nguid);
448 }
449
nvmet_ns_device_nguid_store(struct config_item * item,const char * page,size_t count)450 static ssize_t nvmet_ns_device_nguid_store(struct config_item *item,
451 const char *page, size_t count)
452 {
453 struct nvmet_ns *ns = to_nvmet_ns(item);
454 struct nvmet_subsys *subsys = ns->subsys;
455 u8 nguid[16];
456 const char *p = page;
457 int i;
458 int ret = 0;
459
460 mutex_lock(&subsys->lock);
461 if (ns->enabled) {
462 ret = -EBUSY;
463 goto out_unlock;
464 }
465
466 for (i = 0; i < 16; i++) {
467 if (p + 2 > page + count) {
468 ret = -EINVAL;
469 goto out_unlock;
470 }
471 if (!isxdigit(p[0]) || !isxdigit(p[1])) {
472 ret = -EINVAL;
473 goto out_unlock;
474 }
475
476 nguid[i] = (hex_to_bin(p[0]) << 4) | hex_to_bin(p[1]);
477 p += 2;
478
479 if (*p == '-' || *p == ':')
480 p++;
481 }
482
483 memcpy(&ns->nguid, nguid, sizeof(nguid));
484 out_unlock:
485 mutex_unlock(&subsys->lock);
486 return ret ? ret : count;
487 }
488
489 CONFIGFS_ATTR(nvmet_ns_, device_nguid);
490
nvmet_ns_ana_grpid_show(struct config_item * item,char * page)491 static ssize_t nvmet_ns_ana_grpid_show(struct config_item *item, char *page)
492 {
493 return sprintf(page, "%u\n", to_nvmet_ns(item)->anagrpid);
494 }
495
nvmet_ns_ana_grpid_store(struct config_item * item,const char * page,size_t count)496 static ssize_t nvmet_ns_ana_grpid_store(struct config_item *item,
497 const char *page, size_t count)
498 {
499 struct nvmet_ns *ns = to_nvmet_ns(item);
500 u32 oldgrpid, newgrpid;
501 int ret;
502
503 ret = kstrtou32(page, 0, &newgrpid);
504 if (ret)
505 return ret;
506
507 if (newgrpid < 1 || newgrpid > NVMET_MAX_ANAGRPS)
508 return -EINVAL;
509
510 down_write(&nvmet_ana_sem);
511 oldgrpid = ns->anagrpid;
512 nvmet_ana_group_enabled[newgrpid]++;
513 ns->anagrpid = newgrpid;
514 nvmet_ana_group_enabled[oldgrpid]--;
515 nvmet_ana_chgcnt++;
516 up_write(&nvmet_ana_sem);
517
518 nvmet_send_ana_event(ns->subsys, NULL);
519 return count;
520 }
521
522 CONFIGFS_ATTR(nvmet_ns_, ana_grpid);
523
nvmet_ns_enable_show(struct config_item * item,char * page)524 static ssize_t nvmet_ns_enable_show(struct config_item *item, char *page)
525 {
526 return sprintf(page, "%d\n", to_nvmet_ns(item)->enabled);
527 }
528
nvmet_ns_enable_store(struct config_item * item,const char * page,size_t count)529 static ssize_t nvmet_ns_enable_store(struct config_item *item,
530 const char *page, size_t count)
531 {
532 struct nvmet_ns *ns = to_nvmet_ns(item);
533 bool enable;
534 int ret = 0;
535
536 if (kstrtobool(page, &enable))
537 return -EINVAL;
538
539 if (enable)
540 ret = nvmet_ns_enable(ns);
541 else
542 nvmet_ns_disable(ns);
543
544 return ret ? ret : count;
545 }
546
547 CONFIGFS_ATTR(nvmet_ns_, enable);
548
nvmet_ns_buffered_io_show(struct config_item * item,char * page)549 static ssize_t nvmet_ns_buffered_io_show(struct config_item *item, char *page)
550 {
551 return sprintf(page, "%d\n", to_nvmet_ns(item)->buffered_io);
552 }
553
nvmet_ns_buffered_io_store(struct config_item * item,const char * page,size_t count)554 static ssize_t nvmet_ns_buffered_io_store(struct config_item *item,
555 const char *page, size_t count)
556 {
557 struct nvmet_ns *ns = to_nvmet_ns(item);
558 bool val;
559
560 if (kstrtobool(page, &val))
561 return -EINVAL;
562
563 mutex_lock(&ns->subsys->lock);
564 if (ns->enabled) {
565 pr_err("disable ns before setting buffered_io value.\n");
566 mutex_unlock(&ns->subsys->lock);
567 return -EINVAL;
568 }
569
570 ns->buffered_io = val;
571 mutex_unlock(&ns->subsys->lock);
572 return count;
573 }
574
575 CONFIGFS_ATTR(nvmet_ns_, buffered_io);
576
nvmet_ns_revalidate_size_store(struct config_item * item,const char * page,size_t count)577 static ssize_t nvmet_ns_revalidate_size_store(struct config_item *item,
578 const char *page, size_t count)
579 {
580 struct nvmet_ns *ns = to_nvmet_ns(item);
581 bool val;
582
583 if (kstrtobool(page, &val))
584 return -EINVAL;
585
586 if (!val)
587 return -EINVAL;
588
589 mutex_lock(&ns->subsys->lock);
590 if (!ns->enabled) {
591 pr_err("enable ns before revalidate.\n");
592 mutex_unlock(&ns->subsys->lock);
593 return -EINVAL;
594 }
595 if (nvmet_ns_revalidate(ns))
596 nvmet_ns_changed(ns->subsys, ns->nsid);
597 mutex_unlock(&ns->subsys->lock);
598 return count;
599 }
600
601 CONFIGFS_ATTR_WO(nvmet_ns_, revalidate_size);
602
603 static struct configfs_attribute *nvmet_ns_attrs[] = {
604 &nvmet_ns_attr_device_path,
605 &nvmet_ns_attr_device_nguid,
606 &nvmet_ns_attr_device_uuid,
607 &nvmet_ns_attr_ana_grpid,
608 &nvmet_ns_attr_enable,
609 &nvmet_ns_attr_buffered_io,
610 &nvmet_ns_attr_revalidate_size,
611 #ifdef CONFIG_PCI_P2PDMA
612 &nvmet_ns_attr_p2pmem,
613 #endif
614 NULL,
615 };
616
nvmet_ns_release(struct config_item * item)617 static void nvmet_ns_release(struct config_item *item)
618 {
619 struct nvmet_ns *ns = to_nvmet_ns(item);
620
621 nvmet_ns_free(ns);
622 }
623
624 static struct configfs_item_operations nvmet_ns_item_ops = {
625 .release = nvmet_ns_release,
626 };
627
628 static const struct config_item_type nvmet_ns_type = {
629 .ct_item_ops = &nvmet_ns_item_ops,
630 .ct_attrs = nvmet_ns_attrs,
631 .ct_owner = THIS_MODULE,
632 };
633
nvmet_ns_make(struct config_group * group,const char * name)634 static struct config_group *nvmet_ns_make(struct config_group *group,
635 const char *name)
636 {
637 struct nvmet_subsys *subsys = namespaces_to_subsys(&group->cg_item);
638 struct nvmet_ns *ns;
639 int ret;
640 u32 nsid;
641
642 ret = kstrtou32(name, 0, &nsid);
643 if (ret)
644 goto out;
645
646 ret = -EINVAL;
647 if (nsid == 0 || nsid == NVME_NSID_ALL) {
648 pr_err("invalid nsid %#x", nsid);
649 goto out;
650 }
651
652 ret = -ENOMEM;
653 ns = nvmet_ns_alloc(subsys, nsid);
654 if (!ns)
655 goto out;
656 config_group_init_type_name(&ns->group, name, &nvmet_ns_type);
657
658 pr_info("adding nsid %d to subsystem %s\n", nsid, subsys->subsysnqn);
659
660 return &ns->group;
661 out:
662 return ERR_PTR(ret);
663 }
664
665 static struct configfs_group_operations nvmet_namespaces_group_ops = {
666 .make_group = nvmet_ns_make,
667 };
668
669 static const struct config_item_type nvmet_namespaces_type = {
670 .ct_group_ops = &nvmet_namespaces_group_ops,
671 .ct_owner = THIS_MODULE,
672 };
673
674 #ifdef CONFIG_NVME_TARGET_PASSTHRU
675
nvmet_passthru_device_path_show(struct config_item * item,char * page)676 static ssize_t nvmet_passthru_device_path_show(struct config_item *item,
677 char *page)
678 {
679 struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
680
681 return snprintf(page, PAGE_SIZE, "%s\n", subsys->passthru_ctrl_path);
682 }
683
nvmet_passthru_device_path_store(struct config_item * item,const char * page,size_t count)684 static ssize_t nvmet_passthru_device_path_store(struct config_item *item,
685 const char *page, size_t count)
686 {
687 struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
688 size_t len;
689 int ret;
690
691 mutex_lock(&subsys->lock);
692
693 ret = -EBUSY;
694 if (subsys->passthru_ctrl)
695 goto out_unlock;
696
697 ret = -EINVAL;
698 len = strcspn(page, "\n");
699 if (!len)
700 goto out_unlock;
701
702 kfree(subsys->passthru_ctrl_path);
703 ret = -ENOMEM;
704 subsys->passthru_ctrl_path = kstrndup(page, len, GFP_KERNEL);
705 if (!subsys->passthru_ctrl_path)
706 goto out_unlock;
707
708 mutex_unlock(&subsys->lock);
709
710 return count;
711 out_unlock:
712 mutex_unlock(&subsys->lock);
713 return ret;
714 }
715 CONFIGFS_ATTR(nvmet_passthru_, device_path);
716
nvmet_passthru_enable_show(struct config_item * item,char * page)717 static ssize_t nvmet_passthru_enable_show(struct config_item *item,
718 char *page)
719 {
720 struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
721
722 return sprintf(page, "%d\n", subsys->passthru_ctrl ? 1 : 0);
723 }
724
nvmet_passthru_enable_store(struct config_item * item,const char * page,size_t count)725 static ssize_t nvmet_passthru_enable_store(struct config_item *item,
726 const char *page, size_t count)
727 {
728 struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
729 bool enable;
730 int ret = 0;
731
732 if (kstrtobool(page, &enable))
733 return -EINVAL;
734
735 if (enable)
736 ret = nvmet_passthru_ctrl_enable(subsys);
737 else
738 nvmet_passthru_ctrl_disable(subsys);
739
740 return ret ? ret : count;
741 }
742 CONFIGFS_ATTR(nvmet_passthru_, enable);
743
nvmet_passthru_admin_timeout_show(struct config_item * item,char * page)744 static ssize_t nvmet_passthru_admin_timeout_show(struct config_item *item,
745 char *page)
746 {
747 return sprintf(page, "%u\n", to_subsys(item->ci_parent)->admin_timeout);
748 }
749
nvmet_passthru_admin_timeout_store(struct config_item * item,const char * page,size_t count)750 static ssize_t nvmet_passthru_admin_timeout_store(struct config_item *item,
751 const char *page, size_t count)
752 {
753 struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
754 unsigned int timeout;
755
756 if (kstrtouint(page, 0, &timeout))
757 return -EINVAL;
758 subsys->admin_timeout = timeout;
759 return count;
760 }
761 CONFIGFS_ATTR(nvmet_passthru_, admin_timeout);
762
nvmet_passthru_io_timeout_show(struct config_item * item,char * page)763 static ssize_t nvmet_passthru_io_timeout_show(struct config_item *item,
764 char *page)
765 {
766 return sprintf(page, "%u\n", to_subsys(item->ci_parent)->io_timeout);
767 }
768
nvmet_passthru_io_timeout_store(struct config_item * item,const char * page,size_t count)769 static ssize_t nvmet_passthru_io_timeout_store(struct config_item *item,
770 const char *page, size_t count)
771 {
772 struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
773 unsigned int timeout;
774
775 if (kstrtouint(page, 0, &timeout))
776 return -EINVAL;
777 subsys->io_timeout = timeout;
778 return count;
779 }
780 CONFIGFS_ATTR(nvmet_passthru_, io_timeout);
781
nvmet_passthru_clear_ids_show(struct config_item * item,char * page)782 static ssize_t nvmet_passthru_clear_ids_show(struct config_item *item,
783 char *page)
784 {
785 return sprintf(page, "%u\n", to_subsys(item->ci_parent)->clear_ids);
786 }
787
nvmet_passthru_clear_ids_store(struct config_item * item,const char * page,size_t count)788 static ssize_t nvmet_passthru_clear_ids_store(struct config_item *item,
789 const char *page, size_t count)
790 {
791 struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
792 unsigned int clear_ids;
793
794 if (kstrtouint(page, 0, &clear_ids))
795 return -EINVAL;
796 subsys->clear_ids = clear_ids;
797 return count;
798 }
799 CONFIGFS_ATTR(nvmet_passthru_, clear_ids);
800
801 static struct configfs_attribute *nvmet_passthru_attrs[] = {
802 &nvmet_passthru_attr_device_path,
803 &nvmet_passthru_attr_enable,
804 &nvmet_passthru_attr_admin_timeout,
805 &nvmet_passthru_attr_io_timeout,
806 &nvmet_passthru_attr_clear_ids,
807 NULL,
808 };
809
810 static const struct config_item_type nvmet_passthru_type = {
811 .ct_attrs = nvmet_passthru_attrs,
812 .ct_owner = THIS_MODULE,
813 };
814
nvmet_add_passthru_group(struct nvmet_subsys * subsys)815 static void nvmet_add_passthru_group(struct nvmet_subsys *subsys)
816 {
817 config_group_init_type_name(&subsys->passthru_group,
818 "passthru", &nvmet_passthru_type);
819 configfs_add_default_group(&subsys->passthru_group,
820 &subsys->group);
821 }
822
823 #else /* CONFIG_NVME_TARGET_PASSTHRU */
824
nvmet_add_passthru_group(struct nvmet_subsys * subsys)825 static void nvmet_add_passthru_group(struct nvmet_subsys *subsys)
826 {
827 }
828
829 #endif /* CONFIG_NVME_TARGET_PASSTHRU */
830
nvmet_port_subsys_allow_link(struct config_item * parent,struct config_item * target)831 static int nvmet_port_subsys_allow_link(struct config_item *parent,
832 struct config_item *target)
833 {
834 struct nvmet_port *port = to_nvmet_port(parent->ci_parent);
835 struct nvmet_subsys *subsys;
836 struct nvmet_subsys_link *link, *p;
837 int ret;
838
839 if (target->ci_type != &nvmet_subsys_type) {
840 pr_err("can only link subsystems into the subsystems dir.!\n");
841 return -EINVAL;
842 }
843 subsys = to_subsys(target);
844 link = kmalloc(sizeof(*link), GFP_KERNEL);
845 if (!link)
846 return -ENOMEM;
847 link->subsys = subsys;
848
849 down_write(&nvmet_config_sem);
850 ret = -EEXIST;
851 list_for_each_entry(p, &port->subsystems, entry) {
852 if (p->subsys == subsys)
853 goto out_free_link;
854 }
855
856 if (list_empty(&port->subsystems)) {
857 ret = nvmet_enable_port(port);
858 if (ret)
859 goto out_free_link;
860 }
861
862 list_add_tail(&link->entry, &port->subsystems);
863 nvmet_port_disc_changed(port, subsys);
864
865 up_write(&nvmet_config_sem);
866 return 0;
867
868 out_free_link:
869 up_write(&nvmet_config_sem);
870 kfree(link);
871 return ret;
872 }
873
nvmet_port_subsys_drop_link(struct config_item * parent,struct config_item * target)874 static void nvmet_port_subsys_drop_link(struct config_item *parent,
875 struct config_item *target)
876 {
877 struct nvmet_port *port = to_nvmet_port(parent->ci_parent);
878 struct nvmet_subsys *subsys = to_subsys(target);
879 struct nvmet_subsys_link *p;
880
881 down_write(&nvmet_config_sem);
882 list_for_each_entry(p, &port->subsystems, entry) {
883 if (p->subsys == subsys)
884 goto found;
885 }
886 up_write(&nvmet_config_sem);
887 return;
888
889 found:
890 list_del(&p->entry);
891 nvmet_port_del_ctrls(port, subsys);
892 nvmet_port_disc_changed(port, subsys);
893
894 if (list_empty(&port->subsystems))
895 nvmet_disable_port(port);
896 up_write(&nvmet_config_sem);
897 kfree(p);
898 }
899
900 static struct configfs_item_operations nvmet_port_subsys_item_ops = {
901 .allow_link = nvmet_port_subsys_allow_link,
902 .drop_link = nvmet_port_subsys_drop_link,
903 };
904
905 static const struct config_item_type nvmet_port_subsys_type = {
906 .ct_item_ops = &nvmet_port_subsys_item_ops,
907 .ct_owner = THIS_MODULE,
908 };
909
nvmet_allowed_hosts_allow_link(struct config_item * parent,struct config_item * target)910 static int nvmet_allowed_hosts_allow_link(struct config_item *parent,
911 struct config_item *target)
912 {
913 struct nvmet_subsys *subsys = to_subsys(parent->ci_parent);
914 struct nvmet_host *host;
915 struct nvmet_host_link *link, *p;
916 int ret;
917
918 if (target->ci_type != &nvmet_host_type) {
919 pr_err("can only link hosts into the allowed_hosts directory!\n");
920 return -EINVAL;
921 }
922
923 host = to_host(target);
924 link = kmalloc(sizeof(*link), GFP_KERNEL);
925 if (!link)
926 return -ENOMEM;
927 link->host = host;
928
929 down_write(&nvmet_config_sem);
930 ret = -EINVAL;
931 if (subsys->allow_any_host) {
932 pr_err("can't add hosts when allow_any_host is set!\n");
933 goto out_free_link;
934 }
935
936 ret = -EEXIST;
937 list_for_each_entry(p, &subsys->hosts, entry) {
938 if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host)))
939 goto out_free_link;
940 }
941 list_add_tail(&link->entry, &subsys->hosts);
942 nvmet_subsys_disc_changed(subsys, host);
943
944 up_write(&nvmet_config_sem);
945 return 0;
946 out_free_link:
947 up_write(&nvmet_config_sem);
948 kfree(link);
949 return ret;
950 }
951
nvmet_allowed_hosts_drop_link(struct config_item * parent,struct config_item * target)952 static void nvmet_allowed_hosts_drop_link(struct config_item *parent,
953 struct config_item *target)
954 {
955 struct nvmet_subsys *subsys = to_subsys(parent->ci_parent);
956 struct nvmet_host *host = to_host(target);
957 struct nvmet_host_link *p;
958
959 down_write(&nvmet_config_sem);
960 list_for_each_entry(p, &subsys->hosts, entry) {
961 if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host)))
962 goto found;
963 }
964 up_write(&nvmet_config_sem);
965 return;
966
967 found:
968 list_del(&p->entry);
969 nvmet_subsys_disc_changed(subsys, host);
970
971 up_write(&nvmet_config_sem);
972 kfree(p);
973 }
974
975 static struct configfs_item_operations nvmet_allowed_hosts_item_ops = {
976 .allow_link = nvmet_allowed_hosts_allow_link,
977 .drop_link = nvmet_allowed_hosts_drop_link,
978 };
979
980 static const struct config_item_type nvmet_allowed_hosts_type = {
981 .ct_item_ops = &nvmet_allowed_hosts_item_ops,
982 .ct_owner = THIS_MODULE,
983 };
984
nvmet_subsys_attr_allow_any_host_show(struct config_item * item,char * page)985 static ssize_t nvmet_subsys_attr_allow_any_host_show(struct config_item *item,
986 char *page)
987 {
988 return snprintf(page, PAGE_SIZE, "%d\n",
989 to_subsys(item)->allow_any_host);
990 }
991
nvmet_subsys_attr_allow_any_host_store(struct config_item * item,const char * page,size_t count)992 static ssize_t nvmet_subsys_attr_allow_any_host_store(struct config_item *item,
993 const char *page, size_t count)
994 {
995 struct nvmet_subsys *subsys = to_subsys(item);
996 bool allow_any_host;
997 int ret = 0;
998
999 if (kstrtobool(page, &allow_any_host))
1000 return -EINVAL;
1001
1002 down_write(&nvmet_config_sem);
1003 if (allow_any_host && !list_empty(&subsys->hosts)) {
1004 pr_err("Can't set allow_any_host when explicit hosts are set!\n");
1005 ret = -EINVAL;
1006 goto out_unlock;
1007 }
1008
1009 if (subsys->allow_any_host != allow_any_host) {
1010 subsys->allow_any_host = allow_any_host;
1011 nvmet_subsys_disc_changed(subsys, NULL);
1012 }
1013
1014 out_unlock:
1015 up_write(&nvmet_config_sem);
1016 return ret ? ret : count;
1017 }
1018
1019 CONFIGFS_ATTR(nvmet_subsys_, attr_allow_any_host);
1020
nvmet_subsys_attr_version_show(struct config_item * item,char * page)1021 static ssize_t nvmet_subsys_attr_version_show(struct config_item *item,
1022 char *page)
1023 {
1024 struct nvmet_subsys *subsys = to_subsys(item);
1025
1026 if (NVME_TERTIARY(subsys->ver))
1027 return snprintf(page, PAGE_SIZE, "%llu.%llu.%llu\n",
1028 NVME_MAJOR(subsys->ver),
1029 NVME_MINOR(subsys->ver),
1030 NVME_TERTIARY(subsys->ver));
1031
1032 return snprintf(page, PAGE_SIZE, "%llu.%llu\n",
1033 NVME_MAJOR(subsys->ver),
1034 NVME_MINOR(subsys->ver));
1035 }
1036
1037 static ssize_t
nvmet_subsys_attr_version_store_locked(struct nvmet_subsys * subsys,const char * page,size_t count)1038 nvmet_subsys_attr_version_store_locked(struct nvmet_subsys *subsys,
1039 const char *page, size_t count)
1040 {
1041 int major, minor, tertiary = 0;
1042 int ret;
1043
1044 if (subsys->subsys_discovered) {
1045 if (NVME_TERTIARY(subsys->ver))
1046 pr_err("Can't set version number. %llu.%llu.%llu is already assigned\n",
1047 NVME_MAJOR(subsys->ver),
1048 NVME_MINOR(subsys->ver),
1049 NVME_TERTIARY(subsys->ver));
1050 else
1051 pr_err("Can't set version number. %llu.%llu is already assigned\n",
1052 NVME_MAJOR(subsys->ver),
1053 NVME_MINOR(subsys->ver));
1054 return -EINVAL;
1055 }
1056
1057 /* passthru subsystems use the underlying controller's version */
1058 if (nvmet_is_passthru_subsys(subsys))
1059 return -EINVAL;
1060
1061 ret = sscanf(page, "%d.%d.%d\n", &major, &minor, &tertiary);
1062 if (ret != 2 && ret != 3)
1063 return -EINVAL;
1064
1065 subsys->ver = NVME_VS(major, minor, tertiary);
1066
1067 return count;
1068 }
1069
nvmet_subsys_attr_version_store(struct config_item * item,const char * page,size_t count)1070 static ssize_t nvmet_subsys_attr_version_store(struct config_item *item,
1071 const char *page, size_t count)
1072 {
1073 struct nvmet_subsys *subsys = to_subsys(item);
1074 ssize_t ret;
1075
1076 down_write(&nvmet_config_sem);
1077 mutex_lock(&subsys->lock);
1078 ret = nvmet_subsys_attr_version_store_locked(subsys, page, count);
1079 mutex_unlock(&subsys->lock);
1080 up_write(&nvmet_config_sem);
1081
1082 return ret;
1083 }
1084 CONFIGFS_ATTR(nvmet_subsys_, attr_version);
1085
1086 /* See Section 1.5 of NVMe 1.4 */
nvmet_is_ascii(const char c)1087 static bool nvmet_is_ascii(const char c)
1088 {
1089 return c >= 0x20 && c <= 0x7e;
1090 }
1091
nvmet_subsys_attr_serial_show(struct config_item * item,char * page)1092 static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item,
1093 char *page)
1094 {
1095 struct nvmet_subsys *subsys = to_subsys(item);
1096
1097 return snprintf(page, PAGE_SIZE, "%.*s\n",
1098 NVMET_SN_MAX_SIZE, subsys->serial);
1099 }
1100
1101 static ssize_t
nvmet_subsys_attr_serial_store_locked(struct nvmet_subsys * subsys,const char * page,size_t count)1102 nvmet_subsys_attr_serial_store_locked(struct nvmet_subsys *subsys,
1103 const char *page, size_t count)
1104 {
1105 int pos, len = strcspn(page, "\n");
1106
1107 if (subsys->subsys_discovered) {
1108 pr_err("Can't set serial number. %s is already assigned\n",
1109 subsys->serial);
1110 return -EINVAL;
1111 }
1112
1113 if (!len || len > NVMET_SN_MAX_SIZE) {
1114 pr_err("Serial Number can not be empty or exceed %d Bytes\n",
1115 NVMET_SN_MAX_SIZE);
1116 return -EINVAL;
1117 }
1118
1119 for (pos = 0; pos < len; pos++) {
1120 if (!nvmet_is_ascii(page[pos])) {
1121 pr_err("Serial Number must contain only ASCII strings\n");
1122 return -EINVAL;
1123 }
1124 }
1125
1126 memcpy_and_pad(subsys->serial, NVMET_SN_MAX_SIZE, page, len, ' ');
1127
1128 return count;
1129 }
1130
nvmet_subsys_attr_serial_store(struct config_item * item,const char * page,size_t count)1131 static ssize_t nvmet_subsys_attr_serial_store(struct config_item *item,
1132 const char *page, size_t count)
1133 {
1134 struct nvmet_subsys *subsys = to_subsys(item);
1135 ssize_t ret;
1136
1137 down_write(&nvmet_config_sem);
1138 mutex_lock(&subsys->lock);
1139 ret = nvmet_subsys_attr_serial_store_locked(subsys, page, count);
1140 mutex_unlock(&subsys->lock);
1141 up_write(&nvmet_config_sem);
1142
1143 return ret;
1144 }
1145 CONFIGFS_ATTR(nvmet_subsys_, attr_serial);
1146
nvmet_subsys_attr_cntlid_min_show(struct config_item * item,char * page)1147 static ssize_t nvmet_subsys_attr_cntlid_min_show(struct config_item *item,
1148 char *page)
1149 {
1150 return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_min);
1151 }
1152
nvmet_subsys_attr_cntlid_min_store(struct config_item * item,const char * page,size_t cnt)1153 static ssize_t nvmet_subsys_attr_cntlid_min_store(struct config_item *item,
1154 const char *page, size_t cnt)
1155 {
1156 u16 cntlid_min;
1157
1158 if (sscanf(page, "%hu\n", &cntlid_min) != 1)
1159 return -EINVAL;
1160
1161 if (cntlid_min == 0)
1162 return -EINVAL;
1163
1164 down_write(&nvmet_config_sem);
1165 if (cntlid_min >= to_subsys(item)->cntlid_max)
1166 goto out_unlock;
1167 to_subsys(item)->cntlid_min = cntlid_min;
1168 up_write(&nvmet_config_sem);
1169 return cnt;
1170
1171 out_unlock:
1172 up_write(&nvmet_config_sem);
1173 return -EINVAL;
1174 }
1175 CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_min);
1176
nvmet_subsys_attr_cntlid_max_show(struct config_item * item,char * page)1177 static ssize_t nvmet_subsys_attr_cntlid_max_show(struct config_item *item,
1178 char *page)
1179 {
1180 return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_max);
1181 }
1182
nvmet_subsys_attr_cntlid_max_store(struct config_item * item,const char * page,size_t cnt)1183 static ssize_t nvmet_subsys_attr_cntlid_max_store(struct config_item *item,
1184 const char *page, size_t cnt)
1185 {
1186 u16 cntlid_max;
1187
1188 if (sscanf(page, "%hu\n", &cntlid_max) != 1)
1189 return -EINVAL;
1190
1191 if (cntlid_max == 0)
1192 return -EINVAL;
1193
1194 down_write(&nvmet_config_sem);
1195 if (cntlid_max <= to_subsys(item)->cntlid_min)
1196 goto out_unlock;
1197 to_subsys(item)->cntlid_max = cntlid_max;
1198 up_write(&nvmet_config_sem);
1199 return cnt;
1200
1201 out_unlock:
1202 up_write(&nvmet_config_sem);
1203 return -EINVAL;
1204 }
1205 CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_max);
1206
nvmet_subsys_attr_model_show(struct config_item * item,char * page)1207 static ssize_t nvmet_subsys_attr_model_show(struct config_item *item,
1208 char *page)
1209 {
1210 struct nvmet_subsys *subsys = to_subsys(item);
1211
1212 return snprintf(page, PAGE_SIZE, "%s\n", subsys->model_number);
1213 }
1214
nvmet_subsys_attr_model_store_locked(struct nvmet_subsys * subsys,const char * page,size_t count)1215 static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys,
1216 const char *page, size_t count)
1217 {
1218 int pos = 0, len;
1219 char *val;
1220
1221 if (subsys->subsys_discovered) {
1222 pr_err("Can't set model number. %s is already assigned\n",
1223 subsys->model_number);
1224 return -EINVAL;
1225 }
1226
1227 len = strcspn(page, "\n");
1228 if (!len)
1229 return -EINVAL;
1230
1231 if (len > NVMET_MN_MAX_SIZE) {
1232 pr_err("Model number size can not exceed %d Bytes\n",
1233 NVMET_MN_MAX_SIZE);
1234 return -EINVAL;
1235 }
1236
1237 for (pos = 0; pos < len; pos++) {
1238 if (!nvmet_is_ascii(page[pos]))
1239 return -EINVAL;
1240 }
1241
1242 val = kmemdup_nul(page, len, GFP_KERNEL);
1243 if (!val)
1244 return -ENOMEM;
1245 kfree(subsys->model_number);
1246 subsys->model_number = val;
1247 return count;
1248 }
1249
nvmet_subsys_attr_model_store(struct config_item * item,const char * page,size_t count)1250 static ssize_t nvmet_subsys_attr_model_store(struct config_item *item,
1251 const char *page, size_t count)
1252 {
1253 struct nvmet_subsys *subsys = to_subsys(item);
1254 ssize_t ret;
1255
1256 down_write(&nvmet_config_sem);
1257 mutex_lock(&subsys->lock);
1258 ret = nvmet_subsys_attr_model_store_locked(subsys, page, count);
1259 mutex_unlock(&subsys->lock);
1260 up_write(&nvmet_config_sem);
1261
1262 return ret;
1263 }
1264 CONFIGFS_ATTR(nvmet_subsys_, attr_model);
1265
nvmet_subsys_attr_ieee_oui_show(struct config_item * item,char * page)1266 static ssize_t nvmet_subsys_attr_ieee_oui_show(struct config_item *item,
1267 char *page)
1268 {
1269 struct nvmet_subsys *subsys = to_subsys(item);
1270
1271 return sysfs_emit(page, "0x%06x\n", subsys->ieee_oui);
1272 }
1273
nvmet_subsys_attr_ieee_oui_store_locked(struct nvmet_subsys * subsys,const char * page,size_t count)1274 static ssize_t nvmet_subsys_attr_ieee_oui_store_locked(struct nvmet_subsys *subsys,
1275 const char *page, size_t count)
1276 {
1277 uint32_t val = 0;
1278 int ret;
1279
1280 if (subsys->subsys_discovered) {
1281 pr_err("Can't set IEEE OUI. 0x%06x is already assigned\n",
1282 subsys->ieee_oui);
1283 return -EINVAL;
1284 }
1285
1286 ret = kstrtou32(page, 0, &val);
1287 if (ret < 0)
1288 return ret;
1289
1290 if (val >= 0x1000000)
1291 return -EINVAL;
1292
1293 subsys->ieee_oui = val;
1294
1295 return count;
1296 }
1297
nvmet_subsys_attr_ieee_oui_store(struct config_item * item,const char * page,size_t count)1298 static ssize_t nvmet_subsys_attr_ieee_oui_store(struct config_item *item,
1299 const char *page, size_t count)
1300 {
1301 struct nvmet_subsys *subsys = to_subsys(item);
1302 ssize_t ret;
1303
1304 down_write(&nvmet_config_sem);
1305 mutex_lock(&subsys->lock);
1306 ret = nvmet_subsys_attr_ieee_oui_store_locked(subsys, page, count);
1307 mutex_unlock(&subsys->lock);
1308 up_write(&nvmet_config_sem);
1309
1310 return ret;
1311 }
1312 CONFIGFS_ATTR(nvmet_subsys_, attr_ieee_oui);
1313
nvmet_subsys_attr_firmware_show(struct config_item * item,char * page)1314 static ssize_t nvmet_subsys_attr_firmware_show(struct config_item *item,
1315 char *page)
1316 {
1317 struct nvmet_subsys *subsys = to_subsys(item);
1318
1319 return sysfs_emit(page, "%s\n", subsys->firmware_rev);
1320 }
1321
nvmet_subsys_attr_firmware_store_locked(struct nvmet_subsys * subsys,const char * page,size_t count)1322 static ssize_t nvmet_subsys_attr_firmware_store_locked(struct nvmet_subsys *subsys,
1323 const char *page, size_t count)
1324 {
1325 int pos = 0, len;
1326 char *val;
1327
1328 if (subsys->subsys_discovered) {
1329 pr_err("Can't set firmware revision. %s is already assigned\n",
1330 subsys->firmware_rev);
1331 return -EINVAL;
1332 }
1333
1334 len = strcspn(page, "\n");
1335 if (!len)
1336 return -EINVAL;
1337
1338 if (len > NVMET_FR_MAX_SIZE) {
1339 pr_err("Firmware revision size can not exceed %d Bytes\n",
1340 NVMET_FR_MAX_SIZE);
1341 return -EINVAL;
1342 }
1343
1344 for (pos = 0; pos < len; pos++) {
1345 if (!nvmet_is_ascii(page[pos]))
1346 return -EINVAL;
1347 }
1348
1349 val = kmemdup_nul(page, len, GFP_KERNEL);
1350 if (!val)
1351 return -ENOMEM;
1352
1353 kfree(subsys->firmware_rev);
1354
1355 subsys->firmware_rev = val;
1356
1357 return count;
1358 }
1359
nvmet_subsys_attr_firmware_store(struct config_item * item,const char * page,size_t count)1360 static ssize_t nvmet_subsys_attr_firmware_store(struct config_item *item,
1361 const char *page, size_t count)
1362 {
1363 struct nvmet_subsys *subsys = to_subsys(item);
1364 ssize_t ret;
1365
1366 down_write(&nvmet_config_sem);
1367 mutex_lock(&subsys->lock);
1368 ret = nvmet_subsys_attr_firmware_store_locked(subsys, page, count);
1369 mutex_unlock(&subsys->lock);
1370 up_write(&nvmet_config_sem);
1371
1372 return ret;
1373 }
1374 CONFIGFS_ATTR(nvmet_subsys_, attr_firmware);
1375
1376 #ifdef CONFIG_BLK_DEV_INTEGRITY
nvmet_subsys_attr_pi_enable_show(struct config_item * item,char * page)1377 static ssize_t nvmet_subsys_attr_pi_enable_show(struct config_item *item,
1378 char *page)
1379 {
1380 return snprintf(page, PAGE_SIZE, "%d\n", to_subsys(item)->pi_support);
1381 }
1382
nvmet_subsys_attr_pi_enable_store(struct config_item * item,const char * page,size_t count)1383 static ssize_t nvmet_subsys_attr_pi_enable_store(struct config_item *item,
1384 const char *page, size_t count)
1385 {
1386 struct nvmet_subsys *subsys = to_subsys(item);
1387 bool pi_enable;
1388
1389 if (kstrtobool(page, &pi_enable))
1390 return -EINVAL;
1391
1392 subsys->pi_support = pi_enable;
1393 return count;
1394 }
1395 CONFIGFS_ATTR(nvmet_subsys_, attr_pi_enable);
1396 #endif
1397
nvmet_subsys_attr_qid_max_show(struct config_item * item,char * page)1398 static ssize_t nvmet_subsys_attr_qid_max_show(struct config_item *item,
1399 char *page)
1400 {
1401 return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->max_qid);
1402 }
1403
nvmet_subsys_attr_qid_max_store(struct config_item * item,const char * page,size_t cnt)1404 static ssize_t nvmet_subsys_attr_qid_max_store(struct config_item *item,
1405 const char *page, size_t cnt)
1406 {
1407 struct nvmet_subsys *subsys = to_subsys(item);
1408 struct nvmet_ctrl *ctrl;
1409 u16 qid_max;
1410
1411 if (sscanf(page, "%hu\n", &qid_max) != 1)
1412 return -EINVAL;
1413
1414 if (qid_max < 1 || qid_max > NVMET_NR_QUEUES)
1415 return -EINVAL;
1416
1417 down_write(&nvmet_config_sem);
1418 subsys->max_qid = qid_max;
1419
1420 /* Force reconnect */
1421 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
1422 ctrl->ops->delete_ctrl(ctrl);
1423 up_write(&nvmet_config_sem);
1424
1425 return cnt;
1426 }
1427 CONFIGFS_ATTR(nvmet_subsys_, attr_qid_max);
1428
1429 static struct configfs_attribute *nvmet_subsys_attrs[] = {
1430 &nvmet_subsys_attr_attr_allow_any_host,
1431 &nvmet_subsys_attr_attr_version,
1432 &nvmet_subsys_attr_attr_serial,
1433 &nvmet_subsys_attr_attr_cntlid_min,
1434 &nvmet_subsys_attr_attr_cntlid_max,
1435 &nvmet_subsys_attr_attr_model,
1436 &nvmet_subsys_attr_attr_qid_max,
1437 &nvmet_subsys_attr_attr_ieee_oui,
1438 &nvmet_subsys_attr_attr_firmware,
1439 #ifdef CONFIG_BLK_DEV_INTEGRITY
1440 &nvmet_subsys_attr_attr_pi_enable,
1441 #endif
1442 NULL,
1443 };
1444
1445 /*
1446 * Subsystem structures & folder operation functions below
1447 */
nvmet_subsys_release(struct config_item * item)1448 static void nvmet_subsys_release(struct config_item *item)
1449 {
1450 struct nvmet_subsys *subsys = to_subsys(item);
1451
1452 nvmet_subsys_del_ctrls(subsys);
1453 nvmet_subsys_put(subsys);
1454 }
1455
1456 static struct configfs_item_operations nvmet_subsys_item_ops = {
1457 .release = nvmet_subsys_release,
1458 };
1459
1460 static const struct config_item_type nvmet_subsys_type = {
1461 .ct_item_ops = &nvmet_subsys_item_ops,
1462 .ct_attrs = nvmet_subsys_attrs,
1463 .ct_owner = THIS_MODULE,
1464 };
1465
nvmet_subsys_make(struct config_group * group,const char * name)1466 static struct config_group *nvmet_subsys_make(struct config_group *group,
1467 const char *name)
1468 {
1469 struct nvmet_subsys *subsys;
1470
1471 if (sysfs_streq(name, NVME_DISC_SUBSYS_NAME)) {
1472 pr_err("can't create discovery subsystem through configfs\n");
1473 return ERR_PTR(-EINVAL);
1474 }
1475
1476 subsys = nvmet_subsys_alloc(name, NVME_NQN_NVME);
1477 if (IS_ERR(subsys))
1478 return ERR_CAST(subsys);
1479
1480 config_group_init_type_name(&subsys->group, name, &nvmet_subsys_type);
1481
1482 config_group_init_type_name(&subsys->namespaces_group,
1483 "namespaces", &nvmet_namespaces_type);
1484 configfs_add_default_group(&subsys->namespaces_group, &subsys->group);
1485
1486 config_group_init_type_name(&subsys->allowed_hosts_group,
1487 "allowed_hosts", &nvmet_allowed_hosts_type);
1488 configfs_add_default_group(&subsys->allowed_hosts_group,
1489 &subsys->group);
1490
1491 nvmet_add_passthru_group(subsys);
1492
1493 return &subsys->group;
1494 }
1495
1496 static struct configfs_group_operations nvmet_subsystems_group_ops = {
1497 .make_group = nvmet_subsys_make,
1498 };
1499
1500 static const struct config_item_type nvmet_subsystems_type = {
1501 .ct_group_ops = &nvmet_subsystems_group_ops,
1502 .ct_owner = THIS_MODULE,
1503 };
1504
nvmet_referral_enable_show(struct config_item * item,char * page)1505 static ssize_t nvmet_referral_enable_show(struct config_item *item,
1506 char *page)
1507 {
1508 return snprintf(page, PAGE_SIZE, "%d\n", to_nvmet_port(item)->enabled);
1509 }
1510
nvmet_referral_enable_store(struct config_item * item,const char * page,size_t count)1511 static ssize_t nvmet_referral_enable_store(struct config_item *item,
1512 const char *page, size_t count)
1513 {
1514 struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent);
1515 struct nvmet_port *port = to_nvmet_port(item);
1516 bool enable;
1517
1518 if (kstrtobool(page, &enable))
1519 goto inval;
1520
1521 if (enable)
1522 nvmet_referral_enable(parent, port);
1523 else
1524 nvmet_referral_disable(parent, port);
1525
1526 return count;
1527 inval:
1528 pr_err("Invalid value '%s' for enable\n", page);
1529 return -EINVAL;
1530 }
1531
1532 CONFIGFS_ATTR(nvmet_referral_, enable);
1533
1534 /*
1535 * Discovery Service subsystem definitions
1536 */
1537 static struct configfs_attribute *nvmet_referral_attrs[] = {
1538 &nvmet_attr_addr_adrfam,
1539 &nvmet_attr_addr_portid,
1540 &nvmet_attr_addr_treq,
1541 &nvmet_attr_addr_traddr,
1542 &nvmet_attr_addr_trsvcid,
1543 &nvmet_attr_addr_trtype,
1544 &nvmet_referral_attr_enable,
1545 NULL,
1546 };
1547
nvmet_referral_notify(struct config_group * group,struct config_item * item)1548 static void nvmet_referral_notify(struct config_group *group,
1549 struct config_item *item)
1550 {
1551 struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent);
1552 struct nvmet_port *port = to_nvmet_port(item);
1553
1554 nvmet_referral_disable(parent, port);
1555 }
1556
nvmet_referral_release(struct config_item * item)1557 static void nvmet_referral_release(struct config_item *item)
1558 {
1559 struct nvmet_port *port = to_nvmet_port(item);
1560
1561 kfree(port);
1562 }
1563
1564 static struct configfs_item_operations nvmet_referral_item_ops = {
1565 .release = nvmet_referral_release,
1566 };
1567
1568 static const struct config_item_type nvmet_referral_type = {
1569 .ct_owner = THIS_MODULE,
1570 .ct_attrs = nvmet_referral_attrs,
1571 .ct_item_ops = &nvmet_referral_item_ops,
1572 };
1573
nvmet_referral_make(struct config_group * group,const char * name)1574 static struct config_group *nvmet_referral_make(
1575 struct config_group *group, const char *name)
1576 {
1577 struct nvmet_port *port;
1578
1579 port = kzalloc(sizeof(*port), GFP_KERNEL);
1580 if (!port)
1581 return ERR_PTR(-ENOMEM);
1582
1583 INIT_LIST_HEAD(&port->entry);
1584 config_group_init_type_name(&port->group, name, &nvmet_referral_type);
1585
1586 return &port->group;
1587 }
1588
1589 static struct configfs_group_operations nvmet_referral_group_ops = {
1590 .make_group = nvmet_referral_make,
1591 .disconnect_notify = nvmet_referral_notify,
1592 };
1593
1594 static const struct config_item_type nvmet_referrals_type = {
1595 .ct_owner = THIS_MODULE,
1596 .ct_group_ops = &nvmet_referral_group_ops,
1597 };
1598
1599 static struct nvmet_type_name_map nvmet_ana_state[] = {
1600 { NVME_ANA_OPTIMIZED, "optimized" },
1601 { NVME_ANA_NONOPTIMIZED, "non-optimized" },
1602 { NVME_ANA_INACCESSIBLE, "inaccessible" },
1603 { NVME_ANA_PERSISTENT_LOSS, "persistent-loss" },
1604 { NVME_ANA_CHANGE, "change" },
1605 };
1606
nvmet_ana_group_ana_state_show(struct config_item * item,char * page)1607 static ssize_t nvmet_ana_group_ana_state_show(struct config_item *item,
1608 char *page)
1609 {
1610 struct nvmet_ana_group *grp = to_ana_group(item);
1611 enum nvme_ana_state state = grp->port->ana_state[grp->grpid];
1612 int i;
1613
1614 for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) {
1615 if (state == nvmet_ana_state[i].type)
1616 return sprintf(page, "%s\n", nvmet_ana_state[i].name);
1617 }
1618
1619 return sprintf(page, "\n");
1620 }
1621
nvmet_ana_group_ana_state_store(struct config_item * item,const char * page,size_t count)1622 static ssize_t nvmet_ana_group_ana_state_store(struct config_item *item,
1623 const char *page, size_t count)
1624 {
1625 struct nvmet_ana_group *grp = to_ana_group(item);
1626 enum nvme_ana_state *ana_state = grp->port->ana_state;
1627 int i;
1628
1629 for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) {
1630 if (sysfs_streq(page, nvmet_ana_state[i].name))
1631 goto found;
1632 }
1633
1634 pr_err("Invalid value '%s' for ana_state\n", page);
1635 return -EINVAL;
1636
1637 found:
1638 down_write(&nvmet_ana_sem);
1639 ana_state[grp->grpid] = (enum nvme_ana_state) nvmet_ana_state[i].type;
1640 nvmet_ana_chgcnt++;
1641 up_write(&nvmet_ana_sem);
1642 nvmet_port_send_ana_event(grp->port);
1643 return count;
1644 }
1645
1646 CONFIGFS_ATTR(nvmet_ana_group_, ana_state);
1647
1648 static struct configfs_attribute *nvmet_ana_group_attrs[] = {
1649 &nvmet_ana_group_attr_ana_state,
1650 NULL,
1651 };
1652
nvmet_ana_group_release(struct config_item * item)1653 static void nvmet_ana_group_release(struct config_item *item)
1654 {
1655 struct nvmet_ana_group *grp = to_ana_group(item);
1656
1657 if (grp == &grp->port->ana_default_group)
1658 return;
1659
1660 down_write(&nvmet_ana_sem);
1661 grp->port->ana_state[grp->grpid] = NVME_ANA_INACCESSIBLE;
1662 nvmet_ana_group_enabled[grp->grpid]--;
1663 up_write(&nvmet_ana_sem);
1664
1665 nvmet_port_send_ana_event(grp->port);
1666 kfree(grp);
1667 }
1668
1669 static struct configfs_item_operations nvmet_ana_group_item_ops = {
1670 .release = nvmet_ana_group_release,
1671 };
1672
1673 static const struct config_item_type nvmet_ana_group_type = {
1674 .ct_item_ops = &nvmet_ana_group_item_ops,
1675 .ct_attrs = nvmet_ana_group_attrs,
1676 .ct_owner = THIS_MODULE,
1677 };
1678
nvmet_ana_groups_make_group(struct config_group * group,const char * name)1679 static struct config_group *nvmet_ana_groups_make_group(
1680 struct config_group *group, const char *name)
1681 {
1682 struct nvmet_port *port = ana_groups_to_port(&group->cg_item);
1683 struct nvmet_ana_group *grp;
1684 u32 grpid;
1685 int ret;
1686
1687 ret = kstrtou32(name, 0, &grpid);
1688 if (ret)
1689 goto out;
1690
1691 ret = -EINVAL;
1692 if (grpid <= 1 || grpid > NVMET_MAX_ANAGRPS)
1693 goto out;
1694
1695 ret = -ENOMEM;
1696 grp = kzalloc(sizeof(*grp), GFP_KERNEL);
1697 if (!grp)
1698 goto out;
1699 grp->port = port;
1700 grp->grpid = grpid;
1701
1702 down_write(&nvmet_ana_sem);
1703 nvmet_ana_group_enabled[grpid]++;
1704 up_write(&nvmet_ana_sem);
1705
1706 nvmet_port_send_ana_event(grp->port);
1707
1708 config_group_init_type_name(&grp->group, name, &nvmet_ana_group_type);
1709 return &grp->group;
1710 out:
1711 return ERR_PTR(ret);
1712 }
1713
1714 static struct configfs_group_operations nvmet_ana_groups_group_ops = {
1715 .make_group = nvmet_ana_groups_make_group,
1716 };
1717
1718 static const struct config_item_type nvmet_ana_groups_type = {
1719 .ct_group_ops = &nvmet_ana_groups_group_ops,
1720 .ct_owner = THIS_MODULE,
1721 };
1722
1723 /*
1724 * Ports definitions.
1725 */
nvmet_port_release(struct config_item * item)1726 static void nvmet_port_release(struct config_item *item)
1727 {
1728 struct nvmet_port *port = to_nvmet_port(item);
1729
1730 /* Let inflight controllers teardown complete */
1731 flush_workqueue(nvmet_wq);
1732 list_del(&port->global_entry);
1733
1734 kfree(port->ana_state);
1735 kfree(port);
1736 }
1737
1738 static struct configfs_attribute *nvmet_port_attrs[] = {
1739 &nvmet_attr_addr_adrfam,
1740 &nvmet_attr_addr_treq,
1741 &nvmet_attr_addr_traddr,
1742 &nvmet_attr_addr_trsvcid,
1743 &nvmet_attr_addr_trtype,
1744 &nvmet_attr_param_inline_data_size,
1745 #ifdef CONFIG_BLK_DEV_INTEGRITY
1746 &nvmet_attr_param_pi_enable,
1747 #endif
1748 NULL,
1749 };
1750
1751 static struct configfs_item_operations nvmet_port_item_ops = {
1752 .release = nvmet_port_release,
1753 };
1754
1755 static const struct config_item_type nvmet_port_type = {
1756 .ct_attrs = nvmet_port_attrs,
1757 .ct_item_ops = &nvmet_port_item_ops,
1758 .ct_owner = THIS_MODULE,
1759 };
1760
nvmet_ports_make(struct config_group * group,const char * name)1761 static struct config_group *nvmet_ports_make(struct config_group *group,
1762 const char *name)
1763 {
1764 struct nvmet_port *port;
1765 u16 portid;
1766 u32 i;
1767
1768 if (kstrtou16(name, 0, &portid))
1769 return ERR_PTR(-EINVAL);
1770
1771 port = kzalloc(sizeof(*port), GFP_KERNEL);
1772 if (!port)
1773 return ERR_PTR(-ENOMEM);
1774
1775 port->ana_state = kcalloc(NVMET_MAX_ANAGRPS + 1,
1776 sizeof(*port->ana_state), GFP_KERNEL);
1777 if (!port->ana_state) {
1778 kfree(port);
1779 return ERR_PTR(-ENOMEM);
1780 }
1781
1782 for (i = 1; i <= NVMET_MAX_ANAGRPS; i++) {
1783 if (i == NVMET_DEFAULT_ANA_GRPID)
1784 port->ana_state[1] = NVME_ANA_OPTIMIZED;
1785 else
1786 port->ana_state[i] = NVME_ANA_INACCESSIBLE;
1787 }
1788
1789 list_add(&port->global_entry, &nvmet_ports_list);
1790
1791 INIT_LIST_HEAD(&port->entry);
1792 INIT_LIST_HEAD(&port->subsystems);
1793 INIT_LIST_HEAD(&port->referrals);
1794 port->inline_data_size = -1; /* < 0 == let the transport choose */
1795
1796 port->disc_addr.portid = cpu_to_le16(portid);
1797 port->disc_addr.adrfam = NVMF_ADDR_FAMILY_MAX;
1798 port->disc_addr.treq = NVMF_TREQ_DISABLE_SQFLOW;
1799 config_group_init_type_name(&port->group, name, &nvmet_port_type);
1800
1801 config_group_init_type_name(&port->subsys_group,
1802 "subsystems", &nvmet_port_subsys_type);
1803 configfs_add_default_group(&port->subsys_group, &port->group);
1804
1805 config_group_init_type_name(&port->referrals_group,
1806 "referrals", &nvmet_referrals_type);
1807 configfs_add_default_group(&port->referrals_group, &port->group);
1808
1809 config_group_init_type_name(&port->ana_groups_group,
1810 "ana_groups", &nvmet_ana_groups_type);
1811 configfs_add_default_group(&port->ana_groups_group, &port->group);
1812
1813 port->ana_default_group.port = port;
1814 port->ana_default_group.grpid = NVMET_DEFAULT_ANA_GRPID;
1815 config_group_init_type_name(&port->ana_default_group.group,
1816 __stringify(NVMET_DEFAULT_ANA_GRPID),
1817 &nvmet_ana_group_type);
1818 configfs_add_default_group(&port->ana_default_group.group,
1819 &port->ana_groups_group);
1820
1821 return &port->group;
1822 }
1823
1824 static struct configfs_group_operations nvmet_ports_group_ops = {
1825 .make_group = nvmet_ports_make,
1826 };
1827
1828 static const struct config_item_type nvmet_ports_type = {
1829 .ct_group_ops = &nvmet_ports_group_ops,
1830 .ct_owner = THIS_MODULE,
1831 };
1832
1833 static struct config_group nvmet_subsystems_group;
1834 static struct config_group nvmet_ports_group;
1835
1836 #ifdef CONFIG_NVME_TARGET_AUTH
nvmet_host_dhchap_key_show(struct config_item * item,char * page)1837 static ssize_t nvmet_host_dhchap_key_show(struct config_item *item,
1838 char *page)
1839 {
1840 u8 *dhchap_secret = to_host(item)->dhchap_secret;
1841
1842 if (!dhchap_secret)
1843 return sprintf(page, "\n");
1844 return sprintf(page, "%s\n", dhchap_secret);
1845 }
1846
nvmet_host_dhchap_key_store(struct config_item * item,const char * page,size_t count)1847 static ssize_t nvmet_host_dhchap_key_store(struct config_item *item,
1848 const char *page, size_t count)
1849 {
1850 struct nvmet_host *host = to_host(item);
1851 int ret;
1852
1853 ret = nvmet_auth_set_key(host, page, false);
1854 /*
1855 * Re-authentication is a soft state, so keep the
1856 * current authentication valid until the host
1857 * requests re-authentication.
1858 */
1859 return ret < 0 ? ret : count;
1860 }
1861
1862 CONFIGFS_ATTR(nvmet_host_, dhchap_key);
1863
nvmet_host_dhchap_ctrl_key_show(struct config_item * item,char * page)1864 static ssize_t nvmet_host_dhchap_ctrl_key_show(struct config_item *item,
1865 char *page)
1866 {
1867 u8 *dhchap_secret = to_host(item)->dhchap_ctrl_secret;
1868
1869 if (!dhchap_secret)
1870 return sprintf(page, "\n");
1871 return sprintf(page, "%s\n", dhchap_secret);
1872 }
1873
nvmet_host_dhchap_ctrl_key_store(struct config_item * item,const char * page,size_t count)1874 static ssize_t nvmet_host_dhchap_ctrl_key_store(struct config_item *item,
1875 const char *page, size_t count)
1876 {
1877 struct nvmet_host *host = to_host(item);
1878 int ret;
1879
1880 ret = nvmet_auth_set_key(host, page, true);
1881 /*
1882 * Re-authentication is a soft state, so keep the
1883 * current authentication valid until the host
1884 * requests re-authentication.
1885 */
1886 return ret < 0 ? ret : count;
1887 }
1888
1889 CONFIGFS_ATTR(nvmet_host_, dhchap_ctrl_key);
1890
nvmet_host_dhchap_hash_show(struct config_item * item,char * page)1891 static ssize_t nvmet_host_dhchap_hash_show(struct config_item *item,
1892 char *page)
1893 {
1894 struct nvmet_host *host = to_host(item);
1895 const char *hash_name = nvme_auth_hmac_name(host->dhchap_hash_id);
1896
1897 return sprintf(page, "%s\n", hash_name ? hash_name : "none");
1898 }
1899
nvmet_host_dhchap_hash_store(struct config_item * item,const char * page,size_t count)1900 static ssize_t nvmet_host_dhchap_hash_store(struct config_item *item,
1901 const char *page, size_t count)
1902 {
1903 struct nvmet_host *host = to_host(item);
1904 u8 hmac_id;
1905
1906 hmac_id = nvme_auth_hmac_id(page);
1907 if (hmac_id == NVME_AUTH_HASH_INVALID)
1908 return -EINVAL;
1909 if (!crypto_has_shash(nvme_auth_hmac_name(hmac_id), 0, 0))
1910 return -ENOTSUPP;
1911 host->dhchap_hash_id = hmac_id;
1912 return count;
1913 }
1914
1915 CONFIGFS_ATTR(nvmet_host_, dhchap_hash);
1916
nvmet_host_dhchap_dhgroup_show(struct config_item * item,char * page)1917 static ssize_t nvmet_host_dhchap_dhgroup_show(struct config_item *item,
1918 char *page)
1919 {
1920 struct nvmet_host *host = to_host(item);
1921 const char *dhgroup = nvme_auth_dhgroup_name(host->dhchap_dhgroup_id);
1922
1923 return sprintf(page, "%s\n", dhgroup ? dhgroup : "none");
1924 }
1925
nvmet_host_dhchap_dhgroup_store(struct config_item * item,const char * page,size_t count)1926 static ssize_t nvmet_host_dhchap_dhgroup_store(struct config_item *item,
1927 const char *page, size_t count)
1928 {
1929 struct nvmet_host *host = to_host(item);
1930 int dhgroup_id;
1931
1932 dhgroup_id = nvme_auth_dhgroup_id(page);
1933 if (dhgroup_id == NVME_AUTH_DHGROUP_INVALID)
1934 return -EINVAL;
1935 if (dhgroup_id != NVME_AUTH_DHGROUP_NULL) {
1936 const char *kpp = nvme_auth_dhgroup_kpp(dhgroup_id);
1937
1938 if (!crypto_has_kpp(kpp, 0, 0))
1939 return -EINVAL;
1940 }
1941 host->dhchap_dhgroup_id = dhgroup_id;
1942 return count;
1943 }
1944
1945 CONFIGFS_ATTR(nvmet_host_, dhchap_dhgroup);
1946
1947 static struct configfs_attribute *nvmet_host_attrs[] = {
1948 &nvmet_host_attr_dhchap_key,
1949 &nvmet_host_attr_dhchap_ctrl_key,
1950 &nvmet_host_attr_dhchap_hash,
1951 &nvmet_host_attr_dhchap_dhgroup,
1952 NULL,
1953 };
1954 #endif /* CONFIG_NVME_TARGET_AUTH */
1955
nvmet_host_release(struct config_item * item)1956 static void nvmet_host_release(struct config_item *item)
1957 {
1958 struct nvmet_host *host = to_host(item);
1959
1960 #ifdef CONFIG_NVME_TARGET_AUTH
1961 kfree(host->dhchap_secret);
1962 kfree(host->dhchap_ctrl_secret);
1963 #endif
1964 kfree(host);
1965 }
1966
1967 static struct configfs_item_operations nvmet_host_item_ops = {
1968 .release = nvmet_host_release,
1969 };
1970
1971 static const struct config_item_type nvmet_host_type = {
1972 .ct_item_ops = &nvmet_host_item_ops,
1973 #ifdef CONFIG_NVME_TARGET_AUTH
1974 .ct_attrs = nvmet_host_attrs,
1975 #endif
1976 .ct_owner = THIS_MODULE,
1977 };
1978
nvmet_hosts_make_group(struct config_group * group,const char * name)1979 static struct config_group *nvmet_hosts_make_group(struct config_group *group,
1980 const char *name)
1981 {
1982 struct nvmet_host *host;
1983
1984 host = kzalloc(sizeof(*host), GFP_KERNEL);
1985 if (!host)
1986 return ERR_PTR(-ENOMEM);
1987
1988 #ifdef CONFIG_NVME_TARGET_AUTH
1989 /* Default to SHA256 */
1990 host->dhchap_hash_id = NVME_AUTH_HASH_SHA256;
1991 #endif
1992
1993 config_group_init_type_name(&host->group, name, &nvmet_host_type);
1994
1995 return &host->group;
1996 }
1997
1998 static struct configfs_group_operations nvmet_hosts_group_ops = {
1999 .make_group = nvmet_hosts_make_group,
2000 };
2001
2002 static const struct config_item_type nvmet_hosts_type = {
2003 .ct_group_ops = &nvmet_hosts_group_ops,
2004 .ct_owner = THIS_MODULE,
2005 };
2006
2007 static struct config_group nvmet_hosts_group;
2008
2009 static const struct config_item_type nvmet_root_type = {
2010 .ct_owner = THIS_MODULE,
2011 };
2012
2013 static struct configfs_subsystem nvmet_configfs_subsystem = {
2014 .su_group = {
2015 .cg_item = {
2016 .ci_namebuf = "nvmet",
2017 .ci_type = &nvmet_root_type,
2018 },
2019 },
2020 };
2021
nvmet_init_configfs(void)2022 int __init nvmet_init_configfs(void)
2023 {
2024 int ret;
2025
2026 config_group_init(&nvmet_configfs_subsystem.su_group);
2027 mutex_init(&nvmet_configfs_subsystem.su_mutex);
2028
2029 config_group_init_type_name(&nvmet_subsystems_group,
2030 "subsystems", &nvmet_subsystems_type);
2031 configfs_add_default_group(&nvmet_subsystems_group,
2032 &nvmet_configfs_subsystem.su_group);
2033
2034 config_group_init_type_name(&nvmet_ports_group,
2035 "ports", &nvmet_ports_type);
2036 configfs_add_default_group(&nvmet_ports_group,
2037 &nvmet_configfs_subsystem.su_group);
2038
2039 config_group_init_type_name(&nvmet_hosts_group,
2040 "hosts", &nvmet_hosts_type);
2041 configfs_add_default_group(&nvmet_hosts_group,
2042 &nvmet_configfs_subsystem.su_group);
2043
2044 ret = configfs_register_subsystem(&nvmet_configfs_subsystem);
2045 if (ret) {
2046 pr_err("configfs_register_subsystem: %d\n", ret);
2047 return ret;
2048 }
2049
2050 return 0;
2051 }
2052
nvmet_exit_configfs(void)2053 void __exit nvmet_exit_configfs(void)
2054 {
2055 configfs_unregister_subsystem(&nvmet_configfs_subsystem);
2056 }
2057