1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Common code for the NVMe target.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/random.h>
9 #include <linux/rculist.h>
10 #include <linux/pci-p2pdma.h>
11 #include <linux/scatterlist.h>
12
13 #include <generated/utsrelease.h>
14
15 #define CREATE_TRACE_POINTS
16 #include "trace.h"
17
18 #include "nvmet.h"
19
20 struct kmem_cache *nvmet_bvec_cache;
21 struct workqueue_struct *buffered_io_wq;
22 struct workqueue_struct *zbd_wq;
23 static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
24 static DEFINE_IDA(cntlid_ida);
25
26 struct workqueue_struct *nvmet_wq;
27 EXPORT_SYMBOL_GPL(nvmet_wq);
28
29 /*
30 * This read/write semaphore is used to synchronize access to configuration
31 * information on a target system that will result in discovery log page
32 * information change for at least one host.
33 * The full list of resources to protected by this semaphore is:
34 *
35 * - subsystems list
36 * - per-subsystem allowed hosts list
37 * - allow_any_host subsystem attribute
38 * - nvmet_genctr
39 * - the nvmet_transports array
40 *
41 * When updating any of those lists/structures write lock should be obtained,
42 * while when reading (popolating discovery log page or checking host-subsystem
43 * link) read lock is obtained to allow concurrent reads.
44 */
45 DECLARE_RWSEM(nvmet_config_sem);
46
47 u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
48 u64 nvmet_ana_chgcnt;
49 DECLARE_RWSEM(nvmet_ana_sem);
50
errno_to_nvme_status(struct nvmet_req * req,int errno)51 inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
52 {
53 switch (errno) {
54 case 0:
55 return NVME_SC_SUCCESS;
56 case -ENOSPC:
57 req->error_loc = offsetof(struct nvme_rw_command, length);
58 return NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
59 case -EREMOTEIO:
60 req->error_loc = offsetof(struct nvme_rw_command, slba);
61 return NVME_SC_LBA_RANGE | NVME_SC_DNR;
62 case -EOPNOTSUPP:
63 req->error_loc = offsetof(struct nvme_common_command, opcode);
64 switch (req->cmd->common.opcode) {
65 case nvme_cmd_dsm:
66 case nvme_cmd_write_zeroes:
67 return NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
68 default:
69 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
70 }
71 break;
72 case -ENODATA:
73 req->error_loc = offsetof(struct nvme_rw_command, nsid);
74 return NVME_SC_ACCESS_DENIED;
75 case -EIO:
76 fallthrough;
77 default:
78 req->error_loc = offsetof(struct nvme_common_command, opcode);
79 return NVME_SC_INTERNAL | NVME_SC_DNR;
80 }
81 }
82
nvmet_report_invalid_opcode(struct nvmet_req * req)83 u16 nvmet_report_invalid_opcode(struct nvmet_req *req)
84 {
85 pr_debug("unhandled cmd %d on qid %d\n", req->cmd->common.opcode,
86 req->sq->qid);
87
88 req->error_loc = offsetof(struct nvme_common_command, opcode);
89 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
90 }
91
92 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
93 const char *subsysnqn);
94
nvmet_copy_to_sgl(struct nvmet_req * req,off_t off,const void * buf,size_t len)95 u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
96 size_t len)
97 {
98 if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
99 req->error_loc = offsetof(struct nvme_common_command, dptr);
100 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
101 }
102 return 0;
103 }
104
nvmet_copy_from_sgl(struct nvmet_req * req,off_t off,void * buf,size_t len)105 u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
106 {
107 if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
108 req->error_loc = offsetof(struct nvme_common_command, dptr);
109 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
110 }
111 return 0;
112 }
113
nvmet_zero_sgl(struct nvmet_req * req,off_t off,size_t len)114 u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len)
115 {
116 if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len) {
117 req->error_loc = offsetof(struct nvme_common_command, dptr);
118 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
119 }
120 return 0;
121 }
122
nvmet_max_nsid(struct nvmet_subsys * subsys)123 static u32 nvmet_max_nsid(struct nvmet_subsys *subsys)
124 {
125 struct nvmet_ns *cur;
126 unsigned long idx;
127 u32 nsid = 0;
128
129 xa_for_each(&subsys->namespaces, idx, cur)
130 nsid = cur->nsid;
131
132 return nsid;
133 }
134
nvmet_async_event_result(struct nvmet_async_event * aen)135 static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
136 {
137 return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
138 }
139
nvmet_async_events_failall(struct nvmet_ctrl * ctrl)140 static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl)
141 {
142 struct nvmet_req *req;
143
144 mutex_lock(&ctrl->lock);
145 while (ctrl->nr_async_event_cmds) {
146 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
147 mutex_unlock(&ctrl->lock);
148 nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
149 mutex_lock(&ctrl->lock);
150 }
151 mutex_unlock(&ctrl->lock);
152 }
153
nvmet_async_events_process(struct nvmet_ctrl * ctrl)154 static void nvmet_async_events_process(struct nvmet_ctrl *ctrl)
155 {
156 struct nvmet_async_event *aen;
157 struct nvmet_req *req;
158
159 mutex_lock(&ctrl->lock);
160 while (ctrl->nr_async_event_cmds && !list_empty(&ctrl->async_events)) {
161 aen = list_first_entry(&ctrl->async_events,
162 struct nvmet_async_event, entry);
163 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
164 nvmet_set_result(req, nvmet_async_event_result(aen));
165
166 list_del(&aen->entry);
167 kfree(aen);
168
169 mutex_unlock(&ctrl->lock);
170 trace_nvmet_async_event(ctrl, req->cqe->result.u32);
171 nvmet_req_complete(req, 0);
172 mutex_lock(&ctrl->lock);
173 }
174 mutex_unlock(&ctrl->lock);
175 }
176
nvmet_async_events_free(struct nvmet_ctrl * ctrl)177 static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
178 {
179 struct nvmet_async_event *aen, *tmp;
180
181 mutex_lock(&ctrl->lock);
182 list_for_each_entry_safe(aen, tmp, &ctrl->async_events, entry) {
183 list_del(&aen->entry);
184 kfree(aen);
185 }
186 mutex_unlock(&ctrl->lock);
187 }
188
nvmet_async_event_work(struct work_struct * work)189 static void nvmet_async_event_work(struct work_struct *work)
190 {
191 struct nvmet_ctrl *ctrl =
192 container_of(work, struct nvmet_ctrl, async_event_work);
193
194 nvmet_async_events_process(ctrl);
195 }
196
nvmet_add_async_event(struct nvmet_ctrl * ctrl,u8 event_type,u8 event_info,u8 log_page)197 void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
198 u8 event_info, u8 log_page)
199 {
200 struct nvmet_async_event *aen;
201
202 aen = kmalloc(sizeof(*aen), GFP_KERNEL);
203 if (!aen)
204 return;
205
206 aen->event_type = event_type;
207 aen->event_info = event_info;
208 aen->log_page = log_page;
209
210 mutex_lock(&ctrl->lock);
211 list_add_tail(&aen->entry, &ctrl->async_events);
212 mutex_unlock(&ctrl->lock);
213
214 queue_work(nvmet_wq, &ctrl->async_event_work);
215 }
216
nvmet_add_to_changed_ns_log(struct nvmet_ctrl * ctrl,__le32 nsid)217 static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
218 {
219 u32 i;
220
221 mutex_lock(&ctrl->lock);
222 if (ctrl->nr_changed_ns > NVME_MAX_CHANGED_NAMESPACES)
223 goto out_unlock;
224
225 for (i = 0; i < ctrl->nr_changed_ns; i++) {
226 if (ctrl->changed_ns_list[i] == nsid)
227 goto out_unlock;
228 }
229
230 if (ctrl->nr_changed_ns == NVME_MAX_CHANGED_NAMESPACES) {
231 ctrl->changed_ns_list[0] = cpu_to_le32(0xffffffff);
232 ctrl->nr_changed_ns = U32_MAX;
233 goto out_unlock;
234 }
235
236 ctrl->changed_ns_list[ctrl->nr_changed_ns++] = nsid;
237 out_unlock:
238 mutex_unlock(&ctrl->lock);
239 }
240
nvmet_ns_changed(struct nvmet_subsys * subsys,u32 nsid)241 void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
242 {
243 struct nvmet_ctrl *ctrl;
244
245 lockdep_assert_held(&subsys->lock);
246
247 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
248 nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid));
249 if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_NS_ATTR))
250 continue;
251 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
252 NVME_AER_NOTICE_NS_CHANGED,
253 NVME_LOG_CHANGED_NS);
254 }
255 }
256
nvmet_send_ana_event(struct nvmet_subsys * subsys,struct nvmet_port * port)257 void nvmet_send_ana_event(struct nvmet_subsys *subsys,
258 struct nvmet_port *port)
259 {
260 struct nvmet_ctrl *ctrl;
261
262 mutex_lock(&subsys->lock);
263 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
264 if (port && ctrl->port != port)
265 continue;
266 if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_ANA_CHANGE))
267 continue;
268 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
269 NVME_AER_NOTICE_ANA, NVME_LOG_ANA);
270 }
271 mutex_unlock(&subsys->lock);
272 }
273
nvmet_port_send_ana_event(struct nvmet_port * port)274 void nvmet_port_send_ana_event(struct nvmet_port *port)
275 {
276 struct nvmet_subsys_link *p;
277
278 down_read(&nvmet_config_sem);
279 list_for_each_entry(p, &port->subsystems, entry)
280 nvmet_send_ana_event(p->subsys, port);
281 up_read(&nvmet_config_sem);
282 }
283
nvmet_register_transport(const struct nvmet_fabrics_ops * ops)284 int nvmet_register_transport(const struct nvmet_fabrics_ops *ops)
285 {
286 int ret = 0;
287
288 down_write(&nvmet_config_sem);
289 if (nvmet_transports[ops->type])
290 ret = -EINVAL;
291 else
292 nvmet_transports[ops->type] = ops;
293 up_write(&nvmet_config_sem);
294
295 return ret;
296 }
297 EXPORT_SYMBOL_GPL(nvmet_register_transport);
298
nvmet_unregister_transport(const struct nvmet_fabrics_ops * ops)299 void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops)
300 {
301 down_write(&nvmet_config_sem);
302 nvmet_transports[ops->type] = NULL;
303 up_write(&nvmet_config_sem);
304 }
305 EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
306
nvmet_port_del_ctrls(struct nvmet_port * port,struct nvmet_subsys * subsys)307 void nvmet_port_del_ctrls(struct nvmet_port *port, struct nvmet_subsys *subsys)
308 {
309 struct nvmet_ctrl *ctrl;
310
311 mutex_lock(&subsys->lock);
312 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
313 if (ctrl->port == port)
314 ctrl->ops->delete_ctrl(ctrl);
315 }
316 mutex_unlock(&subsys->lock);
317 }
318
nvmet_enable_port(struct nvmet_port * port)319 int nvmet_enable_port(struct nvmet_port *port)
320 {
321 const struct nvmet_fabrics_ops *ops;
322 int ret;
323
324 lockdep_assert_held(&nvmet_config_sem);
325
326 ops = nvmet_transports[port->disc_addr.trtype];
327 if (!ops) {
328 up_write(&nvmet_config_sem);
329 request_module("nvmet-transport-%d", port->disc_addr.trtype);
330 down_write(&nvmet_config_sem);
331 ops = nvmet_transports[port->disc_addr.trtype];
332 if (!ops) {
333 pr_err("transport type %d not supported\n",
334 port->disc_addr.trtype);
335 return -EINVAL;
336 }
337 }
338
339 if (!try_module_get(ops->owner))
340 return -EINVAL;
341
342 /*
343 * If the user requested PI support and the transport isn't pi capable,
344 * don't enable the port.
345 */
346 if (port->pi_enable && !(ops->flags & NVMF_METADATA_SUPPORTED)) {
347 pr_err("T10-PI is not supported by transport type %d\n",
348 port->disc_addr.trtype);
349 ret = -EINVAL;
350 goto out_put;
351 }
352
353 ret = ops->add_port(port);
354 if (ret)
355 goto out_put;
356
357 /* If the transport didn't set inline_data_size, then disable it. */
358 if (port->inline_data_size < 0)
359 port->inline_data_size = 0;
360
361 port->enabled = true;
362 port->tr_ops = ops;
363 return 0;
364
365 out_put:
366 module_put(ops->owner);
367 return ret;
368 }
369
nvmet_disable_port(struct nvmet_port * port)370 void nvmet_disable_port(struct nvmet_port *port)
371 {
372 const struct nvmet_fabrics_ops *ops;
373
374 lockdep_assert_held(&nvmet_config_sem);
375
376 port->enabled = false;
377 port->tr_ops = NULL;
378
379 ops = nvmet_transports[port->disc_addr.trtype];
380 ops->remove_port(port);
381 module_put(ops->owner);
382 }
383
nvmet_keep_alive_timer(struct work_struct * work)384 static void nvmet_keep_alive_timer(struct work_struct *work)
385 {
386 struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
387 struct nvmet_ctrl, ka_work);
388 bool reset_tbkas = ctrl->reset_tbkas;
389
390 ctrl->reset_tbkas = false;
391 if (reset_tbkas) {
392 pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
393 ctrl->cntlid);
394 queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
395 return;
396 }
397
398 pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
399 ctrl->cntlid, ctrl->kato);
400
401 nvmet_ctrl_fatal_error(ctrl);
402 }
403
nvmet_start_keep_alive_timer(struct nvmet_ctrl * ctrl)404 void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
405 {
406 if (unlikely(ctrl->kato == 0))
407 return;
408
409 pr_debug("ctrl %d start keep-alive timer for %d secs\n",
410 ctrl->cntlid, ctrl->kato);
411
412 queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
413 }
414
nvmet_stop_keep_alive_timer(struct nvmet_ctrl * ctrl)415 void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
416 {
417 if (unlikely(ctrl->kato == 0))
418 return;
419
420 pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid);
421
422 cancel_delayed_work_sync(&ctrl->ka_work);
423 }
424
nvmet_req_find_ns(struct nvmet_req * req)425 u16 nvmet_req_find_ns(struct nvmet_req *req)
426 {
427 u32 nsid = le32_to_cpu(req->cmd->common.nsid);
428
429 req->ns = xa_load(&nvmet_req_subsys(req)->namespaces, nsid);
430 if (unlikely(!req->ns)) {
431 req->error_loc = offsetof(struct nvme_common_command, nsid);
432 return NVME_SC_INVALID_NS | NVME_SC_DNR;
433 }
434
435 percpu_ref_get(&req->ns->ref);
436 return NVME_SC_SUCCESS;
437 }
438
nvmet_destroy_namespace(struct percpu_ref * ref)439 static void nvmet_destroy_namespace(struct percpu_ref *ref)
440 {
441 struct nvmet_ns *ns = container_of(ref, struct nvmet_ns, ref);
442
443 complete(&ns->disable_done);
444 }
445
nvmet_put_namespace(struct nvmet_ns * ns)446 void nvmet_put_namespace(struct nvmet_ns *ns)
447 {
448 percpu_ref_put(&ns->ref);
449 }
450
nvmet_ns_dev_disable(struct nvmet_ns * ns)451 static void nvmet_ns_dev_disable(struct nvmet_ns *ns)
452 {
453 nvmet_bdev_ns_disable(ns);
454 nvmet_file_ns_disable(ns);
455 }
456
nvmet_p2pmem_ns_enable(struct nvmet_ns * ns)457 static int nvmet_p2pmem_ns_enable(struct nvmet_ns *ns)
458 {
459 int ret;
460 struct pci_dev *p2p_dev;
461
462 if (!ns->use_p2pmem)
463 return 0;
464
465 if (!ns->bdev) {
466 pr_err("peer-to-peer DMA is not supported by non-block device namespaces\n");
467 return -EINVAL;
468 }
469
470 if (!blk_queue_pci_p2pdma(ns->bdev->bd_disk->queue)) {
471 pr_err("peer-to-peer DMA is not supported by the driver of %s\n",
472 ns->device_path);
473 return -EINVAL;
474 }
475
476 if (ns->p2p_dev) {
477 ret = pci_p2pdma_distance(ns->p2p_dev, nvmet_ns_dev(ns), true);
478 if (ret < 0)
479 return -EINVAL;
480 } else {
481 /*
482 * Right now we just check that there is p2pmem available so
483 * we can report an error to the user right away if there
484 * is not. We'll find the actual device to use once we
485 * setup the controller when the port's device is available.
486 */
487
488 p2p_dev = pci_p2pmem_find(nvmet_ns_dev(ns));
489 if (!p2p_dev) {
490 pr_err("no peer-to-peer memory is available for %s\n",
491 ns->device_path);
492 return -EINVAL;
493 }
494
495 pci_dev_put(p2p_dev);
496 }
497
498 return 0;
499 }
500
501 /*
502 * Note: ctrl->subsys->lock should be held when calling this function
503 */
nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl * ctrl,struct nvmet_ns * ns)504 static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
505 struct nvmet_ns *ns)
506 {
507 struct device *clients[2];
508 struct pci_dev *p2p_dev;
509 int ret;
510
511 if (!ctrl->p2p_client || !ns->use_p2pmem)
512 return;
513
514 if (ns->p2p_dev) {
515 ret = pci_p2pdma_distance(ns->p2p_dev, ctrl->p2p_client, true);
516 if (ret < 0)
517 return;
518
519 p2p_dev = pci_dev_get(ns->p2p_dev);
520 } else {
521 clients[0] = ctrl->p2p_client;
522 clients[1] = nvmet_ns_dev(ns);
523
524 p2p_dev = pci_p2pmem_find_many(clients, ARRAY_SIZE(clients));
525 if (!p2p_dev) {
526 pr_err("no peer-to-peer memory is available that's supported by %s and %s\n",
527 dev_name(ctrl->p2p_client), ns->device_path);
528 return;
529 }
530 }
531
532 ret = radix_tree_insert(&ctrl->p2p_ns_map, ns->nsid, p2p_dev);
533 if (ret < 0)
534 pci_dev_put(p2p_dev);
535
536 pr_info("using p2pmem on %s for nsid %d\n", pci_name(p2p_dev),
537 ns->nsid);
538 }
539
nvmet_ns_revalidate(struct nvmet_ns * ns)540 bool nvmet_ns_revalidate(struct nvmet_ns *ns)
541 {
542 loff_t oldsize = ns->size;
543
544 if (ns->bdev)
545 nvmet_bdev_ns_revalidate(ns);
546 else
547 nvmet_file_ns_revalidate(ns);
548
549 return oldsize != ns->size;
550 }
551
nvmet_ns_enable(struct nvmet_ns * ns)552 int nvmet_ns_enable(struct nvmet_ns *ns)
553 {
554 struct nvmet_subsys *subsys = ns->subsys;
555 struct nvmet_ctrl *ctrl;
556 int ret;
557
558 mutex_lock(&subsys->lock);
559 ret = 0;
560
561 if (nvmet_is_passthru_subsys(subsys)) {
562 pr_info("cannot enable both passthru and regular namespaces for a single subsystem");
563 goto out_unlock;
564 }
565
566 if (ns->enabled)
567 goto out_unlock;
568
569 ret = -EMFILE;
570 if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
571 goto out_unlock;
572
573 ret = nvmet_bdev_ns_enable(ns);
574 if (ret == -ENOTBLK)
575 ret = nvmet_file_ns_enable(ns);
576 if (ret)
577 goto out_unlock;
578
579 ret = nvmet_p2pmem_ns_enable(ns);
580 if (ret)
581 goto out_dev_disable;
582
583 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
584 nvmet_p2pmem_ns_add_p2p(ctrl, ns);
585
586 ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
587 0, GFP_KERNEL);
588 if (ret)
589 goto out_dev_put;
590
591 if (ns->nsid > subsys->max_nsid)
592 subsys->max_nsid = ns->nsid;
593
594 ret = xa_insert(&subsys->namespaces, ns->nsid, ns, GFP_KERNEL);
595 if (ret)
596 goto out_restore_subsys_maxnsid;
597
598 subsys->nr_namespaces++;
599
600 nvmet_ns_changed(subsys, ns->nsid);
601 ns->enabled = true;
602 ret = 0;
603 out_unlock:
604 mutex_unlock(&subsys->lock);
605 return ret;
606
607 out_restore_subsys_maxnsid:
608 subsys->max_nsid = nvmet_max_nsid(subsys);
609 percpu_ref_exit(&ns->ref);
610 out_dev_put:
611 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
612 pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
613 out_dev_disable:
614 nvmet_ns_dev_disable(ns);
615 goto out_unlock;
616 }
617
nvmet_ns_disable(struct nvmet_ns * ns)618 void nvmet_ns_disable(struct nvmet_ns *ns)
619 {
620 struct nvmet_subsys *subsys = ns->subsys;
621 struct nvmet_ctrl *ctrl;
622
623 mutex_lock(&subsys->lock);
624 if (!ns->enabled)
625 goto out_unlock;
626
627 ns->enabled = false;
628 xa_erase(&ns->subsys->namespaces, ns->nsid);
629 if (ns->nsid == subsys->max_nsid)
630 subsys->max_nsid = nvmet_max_nsid(subsys);
631
632 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
633 pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
634
635 mutex_unlock(&subsys->lock);
636
637 /*
638 * Now that we removed the namespaces from the lookup list, we
639 * can kill the per_cpu ref and wait for any remaining references
640 * to be dropped, as well as a RCU grace period for anyone only
641 * using the namepace under rcu_read_lock(). Note that we can't
642 * use call_rcu here as we need to ensure the namespaces have
643 * been fully destroyed before unloading the module.
644 */
645 percpu_ref_kill(&ns->ref);
646 synchronize_rcu();
647 wait_for_completion(&ns->disable_done);
648 percpu_ref_exit(&ns->ref);
649
650 mutex_lock(&subsys->lock);
651
652 subsys->nr_namespaces--;
653 nvmet_ns_changed(subsys, ns->nsid);
654 nvmet_ns_dev_disable(ns);
655 out_unlock:
656 mutex_unlock(&subsys->lock);
657 }
658
nvmet_ns_free(struct nvmet_ns * ns)659 void nvmet_ns_free(struct nvmet_ns *ns)
660 {
661 nvmet_ns_disable(ns);
662
663 down_write(&nvmet_ana_sem);
664 nvmet_ana_group_enabled[ns->anagrpid]--;
665 up_write(&nvmet_ana_sem);
666
667 kfree(ns->device_path);
668 kfree(ns);
669 }
670
nvmet_ns_alloc(struct nvmet_subsys * subsys,u32 nsid)671 struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
672 {
673 struct nvmet_ns *ns;
674
675 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
676 if (!ns)
677 return NULL;
678
679 init_completion(&ns->disable_done);
680
681 ns->nsid = nsid;
682 ns->subsys = subsys;
683
684 down_write(&nvmet_ana_sem);
685 ns->anagrpid = NVMET_DEFAULT_ANA_GRPID;
686 nvmet_ana_group_enabled[ns->anagrpid]++;
687 up_write(&nvmet_ana_sem);
688
689 uuid_gen(&ns->uuid);
690 ns->buffered_io = false;
691 ns->csi = NVME_CSI_NVM;
692
693 return ns;
694 }
695
nvmet_update_sq_head(struct nvmet_req * req)696 static void nvmet_update_sq_head(struct nvmet_req *req)
697 {
698 if (req->sq->size) {
699 u32 old_sqhd, new_sqhd;
700
701 old_sqhd = READ_ONCE(req->sq->sqhd);
702 do {
703 new_sqhd = (old_sqhd + 1) % req->sq->size;
704 } while (!try_cmpxchg(&req->sq->sqhd, &old_sqhd, new_sqhd));
705 }
706 req->cqe->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF);
707 }
708
nvmet_set_error(struct nvmet_req * req,u16 status)709 static void nvmet_set_error(struct nvmet_req *req, u16 status)
710 {
711 struct nvmet_ctrl *ctrl = req->sq->ctrl;
712 struct nvme_error_slot *new_error_slot;
713 unsigned long flags;
714
715 req->cqe->status = cpu_to_le16(status << 1);
716
717 if (!ctrl || req->error_loc == NVMET_NO_ERROR_LOC)
718 return;
719
720 spin_lock_irqsave(&ctrl->error_lock, flags);
721 ctrl->err_counter++;
722 new_error_slot =
723 &ctrl->slots[ctrl->err_counter % NVMET_ERROR_LOG_SLOTS];
724
725 new_error_slot->error_count = cpu_to_le64(ctrl->err_counter);
726 new_error_slot->sqid = cpu_to_le16(req->sq->qid);
727 new_error_slot->cmdid = cpu_to_le16(req->cmd->common.command_id);
728 new_error_slot->status_field = cpu_to_le16(status << 1);
729 new_error_slot->param_error_location = cpu_to_le16(req->error_loc);
730 new_error_slot->lba = cpu_to_le64(req->error_slba);
731 new_error_slot->nsid = req->cmd->common.nsid;
732 spin_unlock_irqrestore(&ctrl->error_lock, flags);
733
734 /* set the more bit for this request */
735 req->cqe->status |= cpu_to_le16(1 << 14);
736 }
737
__nvmet_req_complete(struct nvmet_req * req,u16 status)738 static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
739 {
740 struct nvmet_ns *ns = req->ns;
741
742 if (!req->sq->sqhd_disabled)
743 nvmet_update_sq_head(req);
744 req->cqe->sq_id = cpu_to_le16(req->sq->qid);
745 req->cqe->command_id = req->cmd->common.command_id;
746
747 if (unlikely(status))
748 nvmet_set_error(req, status);
749
750 trace_nvmet_req_complete(req);
751
752 req->ops->queue_response(req);
753 if (ns)
754 nvmet_put_namespace(ns);
755 }
756
nvmet_req_complete(struct nvmet_req * req,u16 status)757 void nvmet_req_complete(struct nvmet_req *req, u16 status)
758 {
759 __nvmet_req_complete(req, status);
760 percpu_ref_put(&req->sq->ref);
761 }
762 EXPORT_SYMBOL_GPL(nvmet_req_complete);
763
nvmet_cq_setup(struct nvmet_ctrl * ctrl,struct nvmet_cq * cq,u16 qid,u16 size)764 void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
765 u16 qid, u16 size)
766 {
767 cq->qid = qid;
768 cq->size = size;
769 }
770
nvmet_sq_setup(struct nvmet_ctrl * ctrl,struct nvmet_sq * sq,u16 qid,u16 size)771 void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
772 u16 qid, u16 size)
773 {
774 sq->sqhd = 0;
775 sq->qid = qid;
776 sq->size = size;
777
778 ctrl->sqs[qid] = sq;
779 }
780
nvmet_confirm_sq(struct percpu_ref * ref)781 static void nvmet_confirm_sq(struct percpu_ref *ref)
782 {
783 struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
784
785 complete(&sq->confirm_done);
786 }
787
nvmet_sq_destroy(struct nvmet_sq * sq)788 void nvmet_sq_destroy(struct nvmet_sq *sq)
789 {
790 struct nvmet_ctrl *ctrl = sq->ctrl;
791
792 /*
793 * If this is the admin queue, complete all AERs so that our
794 * queue doesn't have outstanding requests on it.
795 */
796 if (ctrl && ctrl->sqs && ctrl->sqs[0] == sq)
797 nvmet_async_events_failall(ctrl);
798 percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
799 wait_for_completion(&sq->confirm_done);
800 wait_for_completion(&sq->free_done);
801 percpu_ref_exit(&sq->ref);
802 nvmet_auth_sq_free(sq);
803
804 if (ctrl) {
805 /*
806 * The teardown flow may take some time, and the host may not
807 * send us keep-alive during this period, hence reset the
808 * traffic based keep-alive timer so we don't trigger a
809 * controller teardown as a result of a keep-alive expiration.
810 */
811 ctrl->reset_tbkas = true;
812 sq->ctrl->sqs[sq->qid] = NULL;
813 nvmet_ctrl_put(ctrl);
814 sq->ctrl = NULL; /* allows reusing the queue later */
815 }
816 }
817 EXPORT_SYMBOL_GPL(nvmet_sq_destroy);
818
nvmet_sq_free(struct percpu_ref * ref)819 static void nvmet_sq_free(struct percpu_ref *ref)
820 {
821 struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
822
823 complete(&sq->free_done);
824 }
825
nvmet_sq_init(struct nvmet_sq * sq)826 int nvmet_sq_init(struct nvmet_sq *sq)
827 {
828 int ret;
829
830 ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL);
831 if (ret) {
832 pr_err("percpu_ref init failed!\n");
833 return ret;
834 }
835 init_completion(&sq->free_done);
836 init_completion(&sq->confirm_done);
837 nvmet_auth_sq_init(sq);
838
839 return 0;
840 }
841 EXPORT_SYMBOL_GPL(nvmet_sq_init);
842
nvmet_check_ana_state(struct nvmet_port * port,struct nvmet_ns * ns)843 static inline u16 nvmet_check_ana_state(struct nvmet_port *port,
844 struct nvmet_ns *ns)
845 {
846 enum nvme_ana_state state = port->ana_state[ns->anagrpid];
847
848 if (unlikely(state == NVME_ANA_INACCESSIBLE))
849 return NVME_SC_ANA_INACCESSIBLE;
850 if (unlikely(state == NVME_ANA_PERSISTENT_LOSS))
851 return NVME_SC_ANA_PERSISTENT_LOSS;
852 if (unlikely(state == NVME_ANA_CHANGE))
853 return NVME_SC_ANA_TRANSITION;
854 return 0;
855 }
856
nvmet_io_cmd_check_access(struct nvmet_req * req)857 static inline u16 nvmet_io_cmd_check_access(struct nvmet_req *req)
858 {
859 if (unlikely(req->ns->readonly)) {
860 switch (req->cmd->common.opcode) {
861 case nvme_cmd_read:
862 case nvme_cmd_flush:
863 break;
864 default:
865 return NVME_SC_NS_WRITE_PROTECTED;
866 }
867 }
868
869 return 0;
870 }
871
nvmet_parse_io_cmd(struct nvmet_req * req)872 static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
873 {
874 struct nvme_command *cmd = req->cmd;
875 u16 ret;
876
877 if (nvme_is_fabrics(cmd))
878 return nvmet_parse_fabrics_io_cmd(req);
879
880 if (unlikely(!nvmet_check_auth_status(req)))
881 return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR;
882
883 ret = nvmet_check_ctrl_status(req);
884 if (unlikely(ret))
885 return ret;
886
887 if (nvmet_is_passthru_req(req))
888 return nvmet_parse_passthru_io_cmd(req);
889
890 ret = nvmet_req_find_ns(req);
891 if (unlikely(ret))
892 return ret;
893
894 ret = nvmet_check_ana_state(req->port, req->ns);
895 if (unlikely(ret)) {
896 req->error_loc = offsetof(struct nvme_common_command, nsid);
897 return ret;
898 }
899 ret = nvmet_io_cmd_check_access(req);
900 if (unlikely(ret)) {
901 req->error_loc = offsetof(struct nvme_common_command, nsid);
902 return ret;
903 }
904
905 switch (req->ns->csi) {
906 case NVME_CSI_NVM:
907 if (req->ns->file)
908 return nvmet_file_parse_io_cmd(req);
909 return nvmet_bdev_parse_io_cmd(req);
910 case NVME_CSI_ZNS:
911 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED))
912 return nvmet_bdev_zns_parse_io_cmd(req);
913 return NVME_SC_INVALID_IO_CMD_SET;
914 default:
915 return NVME_SC_INVALID_IO_CMD_SET;
916 }
917 }
918
nvmet_req_init(struct nvmet_req * req,struct nvmet_cq * cq,struct nvmet_sq * sq,const struct nvmet_fabrics_ops * ops)919 bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
920 struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops)
921 {
922 u8 flags = req->cmd->common.flags;
923 u16 status;
924
925 req->cq = cq;
926 req->sq = sq;
927 req->ops = ops;
928 req->sg = NULL;
929 req->metadata_sg = NULL;
930 req->sg_cnt = 0;
931 req->metadata_sg_cnt = 0;
932 req->transfer_len = 0;
933 req->metadata_len = 0;
934 req->cqe->status = 0;
935 req->cqe->sq_head = 0;
936 req->ns = NULL;
937 req->error_loc = NVMET_NO_ERROR_LOC;
938 req->error_slba = 0;
939
940 /* no support for fused commands yet */
941 if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
942 req->error_loc = offsetof(struct nvme_common_command, flags);
943 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
944 goto fail;
945 }
946
947 /*
948 * For fabrics, PSDT field shall describe metadata pointer (MPTR) that
949 * contains an address of a single contiguous physical buffer that is
950 * byte aligned.
951 */
952 if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
953 req->error_loc = offsetof(struct nvme_common_command, flags);
954 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
955 goto fail;
956 }
957
958 if (unlikely(!req->sq->ctrl))
959 /* will return an error for any non-connect command: */
960 status = nvmet_parse_connect_cmd(req);
961 else if (likely(req->sq->qid != 0))
962 status = nvmet_parse_io_cmd(req);
963 else
964 status = nvmet_parse_admin_cmd(req);
965
966 if (status)
967 goto fail;
968
969 trace_nvmet_req_init(req, req->cmd);
970
971 if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
972 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
973 goto fail;
974 }
975
976 if (sq->ctrl)
977 sq->ctrl->reset_tbkas = true;
978
979 return true;
980
981 fail:
982 __nvmet_req_complete(req, status);
983 return false;
984 }
985 EXPORT_SYMBOL_GPL(nvmet_req_init);
986
nvmet_req_uninit(struct nvmet_req * req)987 void nvmet_req_uninit(struct nvmet_req *req)
988 {
989 percpu_ref_put(&req->sq->ref);
990 if (req->ns)
991 nvmet_put_namespace(req->ns);
992 }
993 EXPORT_SYMBOL_GPL(nvmet_req_uninit);
994
nvmet_check_transfer_len(struct nvmet_req * req,size_t len)995 bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len)
996 {
997 if (unlikely(len != req->transfer_len)) {
998 req->error_loc = offsetof(struct nvme_common_command, dptr);
999 nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
1000 return false;
1001 }
1002
1003 return true;
1004 }
1005 EXPORT_SYMBOL_GPL(nvmet_check_transfer_len);
1006
nvmet_check_data_len_lte(struct nvmet_req * req,size_t data_len)1007 bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len)
1008 {
1009 if (unlikely(data_len > req->transfer_len)) {
1010 req->error_loc = offsetof(struct nvme_common_command, dptr);
1011 nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
1012 return false;
1013 }
1014
1015 return true;
1016 }
1017
nvmet_data_transfer_len(struct nvmet_req * req)1018 static unsigned int nvmet_data_transfer_len(struct nvmet_req *req)
1019 {
1020 return req->transfer_len - req->metadata_len;
1021 }
1022
nvmet_req_alloc_p2pmem_sgls(struct pci_dev * p2p_dev,struct nvmet_req * req)1023 static int nvmet_req_alloc_p2pmem_sgls(struct pci_dev *p2p_dev,
1024 struct nvmet_req *req)
1025 {
1026 req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt,
1027 nvmet_data_transfer_len(req));
1028 if (!req->sg)
1029 goto out_err;
1030
1031 if (req->metadata_len) {
1032 req->metadata_sg = pci_p2pmem_alloc_sgl(p2p_dev,
1033 &req->metadata_sg_cnt, req->metadata_len);
1034 if (!req->metadata_sg)
1035 goto out_free_sg;
1036 }
1037
1038 req->p2p_dev = p2p_dev;
1039
1040 return 0;
1041 out_free_sg:
1042 pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
1043 out_err:
1044 return -ENOMEM;
1045 }
1046
nvmet_req_find_p2p_dev(struct nvmet_req * req)1047 static struct pci_dev *nvmet_req_find_p2p_dev(struct nvmet_req *req)
1048 {
1049 if (!IS_ENABLED(CONFIG_PCI_P2PDMA) ||
1050 !req->sq->ctrl || !req->sq->qid || !req->ns)
1051 return NULL;
1052 return radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, req->ns->nsid);
1053 }
1054
nvmet_req_alloc_sgls(struct nvmet_req * req)1055 int nvmet_req_alloc_sgls(struct nvmet_req *req)
1056 {
1057 struct pci_dev *p2p_dev = nvmet_req_find_p2p_dev(req);
1058
1059 if (p2p_dev && !nvmet_req_alloc_p2pmem_sgls(p2p_dev, req))
1060 return 0;
1061
1062 req->sg = sgl_alloc(nvmet_data_transfer_len(req), GFP_KERNEL,
1063 &req->sg_cnt);
1064 if (unlikely(!req->sg))
1065 goto out;
1066
1067 if (req->metadata_len) {
1068 req->metadata_sg = sgl_alloc(req->metadata_len, GFP_KERNEL,
1069 &req->metadata_sg_cnt);
1070 if (unlikely(!req->metadata_sg))
1071 goto out_free;
1072 }
1073
1074 return 0;
1075 out_free:
1076 sgl_free(req->sg);
1077 out:
1078 return -ENOMEM;
1079 }
1080 EXPORT_SYMBOL_GPL(nvmet_req_alloc_sgls);
1081
nvmet_req_free_sgls(struct nvmet_req * req)1082 void nvmet_req_free_sgls(struct nvmet_req *req)
1083 {
1084 if (req->p2p_dev) {
1085 pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
1086 if (req->metadata_sg)
1087 pci_p2pmem_free_sgl(req->p2p_dev, req->metadata_sg);
1088 req->p2p_dev = NULL;
1089 } else {
1090 sgl_free(req->sg);
1091 if (req->metadata_sg)
1092 sgl_free(req->metadata_sg);
1093 }
1094
1095 req->sg = NULL;
1096 req->metadata_sg = NULL;
1097 req->sg_cnt = 0;
1098 req->metadata_sg_cnt = 0;
1099 }
1100 EXPORT_SYMBOL_GPL(nvmet_req_free_sgls);
1101
nvmet_cc_en(u32 cc)1102 static inline bool nvmet_cc_en(u32 cc)
1103 {
1104 return (cc >> NVME_CC_EN_SHIFT) & 0x1;
1105 }
1106
nvmet_cc_css(u32 cc)1107 static inline u8 nvmet_cc_css(u32 cc)
1108 {
1109 return (cc >> NVME_CC_CSS_SHIFT) & 0x7;
1110 }
1111
nvmet_cc_mps(u32 cc)1112 static inline u8 nvmet_cc_mps(u32 cc)
1113 {
1114 return (cc >> NVME_CC_MPS_SHIFT) & 0xf;
1115 }
1116
nvmet_cc_ams(u32 cc)1117 static inline u8 nvmet_cc_ams(u32 cc)
1118 {
1119 return (cc >> NVME_CC_AMS_SHIFT) & 0x7;
1120 }
1121
nvmet_cc_shn(u32 cc)1122 static inline u8 nvmet_cc_shn(u32 cc)
1123 {
1124 return (cc >> NVME_CC_SHN_SHIFT) & 0x3;
1125 }
1126
nvmet_cc_iosqes(u32 cc)1127 static inline u8 nvmet_cc_iosqes(u32 cc)
1128 {
1129 return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf;
1130 }
1131
nvmet_cc_iocqes(u32 cc)1132 static inline u8 nvmet_cc_iocqes(u32 cc)
1133 {
1134 return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf;
1135 }
1136
nvmet_css_supported(u8 cc_css)1137 static inline bool nvmet_css_supported(u8 cc_css)
1138 {
1139 switch (cc_css << NVME_CC_CSS_SHIFT) {
1140 case NVME_CC_CSS_NVM:
1141 case NVME_CC_CSS_CSI:
1142 return true;
1143 default:
1144 return false;
1145 }
1146 }
1147
nvmet_start_ctrl(struct nvmet_ctrl * ctrl)1148 static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
1149 {
1150 lockdep_assert_held(&ctrl->lock);
1151
1152 /*
1153 * Only I/O controllers should verify iosqes,iocqes.
1154 * Strictly speaking, the spec says a discovery controller
1155 * should verify iosqes,iocqes are zeroed, however that
1156 * would break backwards compatibility, so don't enforce it.
1157 */
1158 if (!nvmet_is_disc_subsys(ctrl->subsys) &&
1159 (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
1160 nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES)) {
1161 ctrl->csts = NVME_CSTS_CFS;
1162 return;
1163 }
1164
1165 if (nvmet_cc_mps(ctrl->cc) != 0 ||
1166 nvmet_cc_ams(ctrl->cc) != 0 ||
1167 !nvmet_css_supported(nvmet_cc_css(ctrl->cc))) {
1168 ctrl->csts = NVME_CSTS_CFS;
1169 return;
1170 }
1171
1172 ctrl->csts = NVME_CSTS_RDY;
1173
1174 /*
1175 * Controllers that are not yet enabled should not really enforce the
1176 * keep alive timeout, but we still want to track a timeout and cleanup
1177 * in case a host died before it enabled the controller. Hence, simply
1178 * reset the keep alive timer when the controller is enabled.
1179 */
1180 if (ctrl->kato)
1181 mod_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
1182 }
1183
nvmet_clear_ctrl(struct nvmet_ctrl * ctrl)1184 static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
1185 {
1186 lockdep_assert_held(&ctrl->lock);
1187
1188 /* XXX: tear down queues? */
1189 ctrl->csts &= ~NVME_CSTS_RDY;
1190 ctrl->cc = 0;
1191 }
1192
nvmet_update_cc(struct nvmet_ctrl * ctrl,u32 new)1193 void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new)
1194 {
1195 u32 old;
1196
1197 mutex_lock(&ctrl->lock);
1198 old = ctrl->cc;
1199 ctrl->cc = new;
1200
1201 if (nvmet_cc_en(new) && !nvmet_cc_en(old))
1202 nvmet_start_ctrl(ctrl);
1203 if (!nvmet_cc_en(new) && nvmet_cc_en(old))
1204 nvmet_clear_ctrl(ctrl);
1205 if (nvmet_cc_shn(new) && !nvmet_cc_shn(old)) {
1206 nvmet_clear_ctrl(ctrl);
1207 ctrl->csts |= NVME_CSTS_SHST_CMPLT;
1208 }
1209 if (!nvmet_cc_shn(new) && nvmet_cc_shn(old))
1210 ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
1211 mutex_unlock(&ctrl->lock);
1212 }
1213
nvmet_init_cap(struct nvmet_ctrl * ctrl)1214 static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
1215 {
1216 /* command sets supported: NVMe command set: */
1217 ctrl->cap = (1ULL << 37);
1218 /* Controller supports one or more I/O Command Sets */
1219 ctrl->cap |= (1ULL << 43);
1220 /* CC.EN timeout in 500msec units: */
1221 ctrl->cap |= (15ULL << 24);
1222 /* maximum queue entries supported: */
1223 if (ctrl->ops->get_max_queue_size)
1224 ctrl->cap |= ctrl->ops->get_max_queue_size(ctrl) - 1;
1225 else
1226 ctrl->cap |= NVMET_QUEUE_SIZE - 1;
1227
1228 if (nvmet_is_passthru_subsys(ctrl->subsys))
1229 nvmet_passthrough_override_cap(ctrl);
1230 }
1231
nvmet_ctrl_find_get(const char * subsysnqn,const char * hostnqn,u16 cntlid,struct nvmet_req * req)1232 struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
1233 const char *hostnqn, u16 cntlid,
1234 struct nvmet_req *req)
1235 {
1236 struct nvmet_ctrl *ctrl = NULL;
1237 struct nvmet_subsys *subsys;
1238
1239 subsys = nvmet_find_get_subsys(req->port, subsysnqn);
1240 if (!subsys) {
1241 pr_warn("connect request for invalid subsystem %s!\n",
1242 subsysnqn);
1243 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
1244 goto out;
1245 }
1246
1247 mutex_lock(&subsys->lock);
1248 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
1249 if (ctrl->cntlid == cntlid) {
1250 if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) {
1251 pr_warn("hostnqn mismatch.\n");
1252 continue;
1253 }
1254 if (!kref_get_unless_zero(&ctrl->ref))
1255 continue;
1256
1257 /* ctrl found */
1258 goto found;
1259 }
1260 }
1261
1262 ctrl = NULL; /* ctrl not found */
1263 pr_warn("could not find controller %d for subsys %s / host %s\n",
1264 cntlid, subsysnqn, hostnqn);
1265 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
1266
1267 found:
1268 mutex_unlock(&subsys->lock);
1269 nvmet_subsys_put(subsys);
1270 out:
1271 return ctrl;
1272 }
1273
nvmet_check_ctrl_status(struct nvmet_req * req)1274 u16 nvmet_check_ctrl_status(struct nvmet_req *req)
1275 {
1276 if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
1277 pr_err("got cmd %d while CC.EN == 0 on qid = %d\n",
1278 req->cmd->common.opcode, req->sq->qid);
1279 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
1280 }
1281
1282 if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
1283 pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n",
1284 req->cmd->common.opcode, req->sq->qid);
1285 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
1286 }
1287
1288 if (unlikely(!nvmet_check_auth_status(req))) {
1289 pr_warn("qid %d not authenticated\n", req->sq->qid);
1290 return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR;
1291 }
1292 return 0;
1293 }
1294
nvmet_host_allowed(struct nvmet_subsys * subsys,const char * hostnqn)1295 bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn)
1296 {
1297 struct nvmet_host_link *p;
1298
1299 lockdep_assert_held(&nvmet_config_sem);
1300
1301 if (subsys->allow_any_host)
1302 return true;
1303
1304 if (nvmet_is_disc_subsys(subsys)) /* allow all access to disc subsys */
1305 return true;
1306
1307 list_for_each_entry(p, &subsys->hosts, entry) {
1308 if (!strcmp(nvmet_host_name(p->host), hostnqn))
1309 return true;
1310 }
1311
1312 return false;
1313 }
1314
1315 /*
1316 * Note: ctrl->subsys->lock should be held when calling this function
1317 */
nvmet_setup_p2p_ns_map(struct nvmet_ctrl * ctrl,struct nvmet_req * req)1318 static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
1319 struct nvmet_req *req)
1320 {
1321 struct nvmet_ns *ns;
1322 unsigned long idx;
1323
1324 if (!req->p2p_client)
1325 return;
1326
1327 ctrl->p2p_client = get_device(req->p2p_client);
1328
1329 xa_for_each(&ctrl->subsys->namespaces, idx, ns)
1330 nvmet_p2pmem_ns_add_p2p(ctrl, ns);
1331 }
1332
1333 /*
1334 * Note: ctrl->subsys->lock should be held when calling this function
1335 */
nvmet_release_p2p_ns_map(struct nvmet_ctrl * ctrl)1336 static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl)
1337 {
1338 struct radix_tree_iter iter;
1339 void __rcu **slot;
1340
1341 radix_tree_for_each_slot(slot, &ctrl->p2p_ns_map, &iter, 0)
1342 pci_dev_put(radix_tree_deref_slot(slot));
1343
1344 put_device(ctrl->p2p_client);
1345 }
1346
nvmet_fatal_error_handler(struct work_struct * work)1347 static void nvmet_fatal_error_handler(struct work_struct *work)
1348 {
1349 struct nvmet_ctrl *ctrl =
1350 container_of(work, struct nvmet_ctrl, fatal_err_work);
1351
1352 pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
1353 ctrl->ops->delete_ctrl(ctrl);
1354 }
1355
nvmet_alloc_ctrl(const char * subsysnqn,const char * hostnqn,struct nvmet_req * req,u32 kato,struct nvmet_ctrl ** ctrlp)1356 u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
1357 struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
1358 {
1359 struct nvmet_subsys *subsys;
1360 struct nvmet_ctrl *ctrl;
1361 int ret;
1362 u16 status;
1363
1364 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
1365 subsys = nvmet_find_get_subsys(req->port, subsysnqn);
1366 if (!subsys) {
1367 pr_warn("connect request for invalid subsystem %s!\n",
1368 subsysnqn);
1369 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
1370 req->error_loc = offsetof(struct nvme_common_command, dptr);
1371 goto out;
1372 }
1373
1374 down_read(&nvmet_config_sem);
1375 if (!nvmet_host_allowed(subsys, hostnqn)) {
1376 pr_info("connect by host %s for subsystem %s not allowed\n",
1377 hostnqn, subsysnqn);
1378 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
1379 up_read(&nvmet_config_sem);
1380 status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR;
1381 req->error_loc = offsetof(struct nvme_common_command, dptr);
1382 goto out_put_subsystem;
1383 }
1384 up_read(&nvmet_config_sem);
1385
1386 status = NVME_SC_INTERNAL;
1387 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
1388 if (!ctrl)
1389 goto out_put_subsystem;
1390 mutex_init(&ctrl->lock);
1391
1392 ctrl->port = req->port;
1393 ctrl->ops = req->ops;
1394
1395 #ifdef CONFIG_NVME_TARGET_PASSTHRU
1396 /* By default, set loop targets to clear IDS by default */
1397 if (ctrl->port->disc_addr.trtype == NVMF_TRTYPE_LOOP)
1398 subsys->clear_ids = 1;
1399 #endif
1400
1401 INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
1402 INIT_LIST_HEAD(&ctrl->async_events);
1403 INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
1404 INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
1405 INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
1406
1407 memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
1408 memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
1409
1410 kref_init(&ctrl->ref);
1411 ctrl->subsys = subsys;
1412 nvmet_init_cap(ctrl);
1413 WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL);
1414
1415 ctrl->changed_ns_list = kmalloc_array(NVME_MAX_CHANGED_NAMESPACES,
1416 sizeof(__le32), GFP_KERNEL);
1417 if (!ctrl->changed_ns_list)
1418 goto out_free_ctrl;
1419
1420 ctrl->sqs = kcalloc(subsys->max_qid + 1,
1421 sizeof(struct nvmet_sq *),
1422 GFP_KERNEL);
1423 if (!ctrl->sqs)
1424 goto out_free_changed_ns_list;
1425
1426 if (subsys->cntlid_min > subsys->cntlid_max)
1427 goto out_free_sqs;
1428
1429 ret = ida_alloc_range(&cntlid_ida,
1430 subsys->cntlid_min, subsys->cntlid_max,
1431 GFP_KERNEL);
1432 if (ret < 0) {
1433 status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
1434 goto out_free_sqs;
1435 }
1436 ctrl->cntlid = ret;
1437
1438 /*
1439 * Discovery controllers may use some arbitrary high value
1440 * in order to cleanup stale discovery sessions
1441 */
1442 if (nvmet_is_disc_subsys(ctrl->subsys) && !kato)
1443 kato = NVMET_DISC_KATO_MS;
1444
1445 /* keep-alive timeout in seconds */
1446 ctrl->kato = DIV_ROUND_UP(kato, 1000);
1447
1448 ctrl->err_counter = 0;
1449 spin_lock_init(&ctrl->error_lock);
1450
1451 nvmet_start_keep_alive_timer(ctrl);
1452
1453 mutex_lock(&subsys->lock);
1454 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
1455 nvmet_setup_p2p_ns_map(ctrl, req);
1456 mutex_unlock(&subsys->lock);
1457
1458 *ctrlp = ctrl;
1459 return 0;
1460
1461 out_free_sqs:
1462 kfree(ctrl->sqs);
1463 out_free_changed_ns_list:
1464 kfree(ctrl->changed_ns_list);
1465 out_free_ctrl:
1466 kfree(ctrl);
1467 out_put_subsystem:
1468 nvmet_subsys_put(subsys);
1469 out:
1470 return status;
1471 }
1472
nvmet_ctrl_free(struct kref * ref)1473 static void nvmet_ctrl_free(struct kref *ref)
1474 {
1475 struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref);
1476 struct nvmet_subsys *subsys = ctrl->subsys;
1477
1478 mutex_lock(&subsys->lock);
1479 nvmet_release_p2p_ns_map(ctrl);
1480 list_del(&ctrl->subsys_entry);
1481 mutex_unlock(&subsys->lock);
1482
1483 nvmet_stop_keep_alive_timer(ctrl);
1484
1485 flush_work(&ctrl->async_event_work);
1486 cancel_work_sync(&ctrl->fatal_err_work);
1487
1488 nvmet_destroy_auth(ctrl);
1489
1490 ida_free(&cntlid_ida, ctrl->cntlid);
1491
1492 nvmet_async_events_free(ctrl);
1493 kfree(ctrl->sqs);
1494 kfree(ctrl->changed_ns_list);
1495 kfree(ctrl);
1496
1497 nvmet_subsys_put(subsys);
1498 }
1499
nvmet_ctrl_put(struct nvmet_ctrl * ctrl)1500 void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
1501 {
1502 kref_put(&ctrl->ref, nvmet_ctrl_free);
1503 }
1504
nvmet_ctrl_fatal_error(struct nvmet_ctrl * ctrl)1505 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
1506 {
1507 mutex_lock(&ctrl->lock);
1508 if (!(ctrl->csts & NVME_CSTS_CFS)) {
1509 ctrl->csts |= NVME_CSTS_CFS;
1510 queue_work(nvmet_wq, &ctrl->fatal_err_work);
1511 }
1512 mutex_unlock(&ctrl->lock);
1513 }
1514 EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error);
1515
nvmet_find_get_subsys(struct nvmet_port * port,const char * subsysnqn)1516 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
1517 const char *subsysnqn)
1518 {
1519 struct nvmet_subsys_link *p;
1520
1521 if (!port)
1522 return NULL;
1523
1524 if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn)) {
1525 if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
1526 return NULL;
1527 return nvmet_disc_subsys;
1528 }
1529
1530 down_read(&nvmet_config_sem);
1531 list_for_each_entry(p, &port->subsystems, entry) {
1532 if (!strncmp(p->subsys->subsysnqn, subsysnqn,
1533 NVMF_NQN_SIZE)) {
1534 if (!kref_get_unless_zero(&p->subsys->ref))
1535 break;
1536 up_read(&nvmet_config_sem);
1537 return p->subsys;
1538 }
1539 }
1540 up_read(&nvmet_config_sem);
1541 return NULL;
1542 }
1543
nvmet_subsys_alloc(const char * subsysnqn,enum nvme_subsys_type type)1544 struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
1545 enum nvme_subsys_type type)
1546 {
1547 struct nvmet_subsys *subsys;
1548 char serial[NVMET_SN_MAX_SIZE / 2];
1549 int ret;
1550
1551 subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
1552 if (!subsys)
1553 return ERR_PTR(-ENOMEM);
1554
1555 subsys->ver = NVMET_DEFAULT_VS;
1556 /* generate a random serial number as our controllers are ephemeral: */
1557 get_random_bytes(&serial, sizeof(serial));
1558 bin2hex(subsys->serial, &serial, sizeof(serial));
1559
1560 subsys->model_number = kstrdup(NVMET_DEFAULT_CTRL_MODEL, GFP_KERNEL);
1561 if (!subsys->model_number) {
1562 ret = -ENOMEM;
1563 goto free_subsys;
1564 }
1565
1566 subsys->ieee_oui = 0;
1567
1568 subsys->firmware_rev = kstrndup(UTS_RELEASE, NVMET_FR_MAX_SIZE, GFP_KERNEL);
1569 if (!subsys->firmware_rev) {
1570 ret = -ENOMEM;
1571 goto free_mn;
1572 }
1573
1574 switch (type) {
1575 case NVME_NQN_NVME:
1576 subsys->max_qid = NVMET_NR_QUEUES;
1577 break;
1578 case NVME_NQN_DISC:
1579 case NVME_NQN_CURR:
1580 subsys->max_qid = 0;
1581 break;
1582 default:
1583 pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
1584 ret = -EINVAL;
1585 goto free_fr;
1586 }
1587 subsys->type = type;
1588 subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
1589 GFP_KERNEL);
1590 if (!subsys->subsysnqn) {
1591 ret = -ENOMEM;
1592 goto free_fr;
1593 }
1594 subsys->cntlid_min = NVME_CNTLID_MIN;
1595 subsys->cntlid_max = NVME_CNTLID_MAX;
1596 kref_init(&subsys->ref);
1597
1598 mutex_init(&subsys->lock);
1599 xa_init(&subsys->namespaces);
1600 INIT_LIST_HEAD(&subsys->ctrls);
1601 INIT_LIST_HEAD(&subsys->hosts);
1602
1603 return subsys;
1604
1605 free_fr:
1606 kfree(subsys->firmware_rev);
1607 free_mn:
1608 kfree(subsys->model_number);
1609 free_subsys:
1610 kfree(subsys);
1611 return ERR_PTR(ret);
1612 }
1613
nvmet_subsys_free(struct kref * ref)1614 static void nvmet_subsys_free(struct kref *ref)
1615 {
1616 struct nvmet_subsys *subsys =
1617 container_of(ref, struct nvmet_subsys, ref);
1618
1619 WARN_ON_ONCE(!xa_empty(&subsys->namespaces));
1620
1621 xa_destroy(&subsys->namespaces);
1622 nvmet_passthru_subsys_free(subsys);
1623
1624 kfree(subsys->subsysnqn);
1625 kfree(subsys->model_number);
1626 kfree(subsys->firmware_rev);
1627 kfree(subsys);
1628 }
1629
nvmet_subsys_del_ctrls(struct nvmet_subsys * subsys)1630 void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys)
1631 {
1632 struct nvmet_ctrl *ctrl;
1633
1634 mutex_lock(&subsys->lock);
1635 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
1636 ctrl->ops->delete_ctrl(ctrl);
1637 mutex_unlock(&subsys->lock);
1638 }
1639
nvmet_subsys_put(struct nvmet_subsys * subsys)1640 void nvmet_subsys_put(struct nvmet_subsys *subsys)
1641 {
1642 kref_put(&subsys->ref, nvmet_subsys_free);
1643 }
1644
nvmet_init(void)1645 static int __init nvmet_init(void)
1646 {
1647 int error = -ENOMEM;
1648
1649 nvmet_ana_group_enabled[NVMET_DEFAULT_ANA_GRPID] = 1;
1650
1651 nvmet_bvec_cache = kmem_cache_create("nvmet-bvec",
1652 NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec), 0,
1653 SLAB_HWCACHE_ALIGN, NULL);
1654 if (!nvmet_bvec_cache)
1655 return -ENOMEM;
1656
1657 zbd_wq = alloc_workqueue("nvmet-zbd-wq", WQ_MEM_RECLAIM, 0);
1658 if (!zbd_wq)
1659 goto out_destroy_bvec_cache;
1660
1661 buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq",
1662 WQ_MEM_RECLAIM, 0);
1663 if (!buffered_io_wq)
1664 goto out_free_zbd_work_queue;
1665
1666 nvmet_wq = alloc_workqueue("nvmet-wq", WQ_MEM_RECLAIM, 0);
1667 if (!nvmet_wq)
1668 goto out_free_buffered_work_queue;
1669
1670 error = nvmet_init_discovery();
1671 if (error)
1672 goto out_free_nvmet_work_queue;
1673
1674 error = nvmet_init_configfs();
1675 if (error)
1676 goto out_exit_discovery;
1677 return 0;
1678
1679 out_exit_discovery:
1680 nvmet_exit_discovery();
1681 out_free_nvmet_work_queue:
1682 destroy_workqueue(nvmet_wq);
1683 out_free_buffered_work_queue:
1684 destroy_workqueue(buffered_io_wq);
1685 out_free_zbd_work_queue:
1686 destroy_workqueue(zbd_wq);
1687 out_destroy_bvec_cache:
1688 kmem_cache_destroy(nvmet_bvec_cache);
1689 return error;
1690 }
1691
nvmet_exit(void)1692 static void __exit nvmet_exit(void)
1693 {
1694 nvmet_exit_configfs();
1695 nvmet_exit_discovery();
1696 ida_destroy(&cntlid_ida);
1697 destroy_workqueue(nvmet_wq);
1698 destroy_workqueue(buffered_io_wq);
1699 destroy_workqueue(zbd_wq);
1700 kmem_cache_destroy(nvmet_bvec_cache);
1701
1702 BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
1703 BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
1704 }
1705
1706 module_init(nvmet_init);
1707 module_exit(nvmet_exit);
1708
1709 MODULE_LICENSE("GPL v2");
1710