1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * NVMe over Fabrics TCP target.
4 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
5 */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/err.h>
11 #include <linux/nvme-tcp.h>
12 #include <net/sock.h>
13 #include <net/tcp.h>
14 #include <linux/inet.h>
15 #include <linux/llist.h>
16 #include <crypto/hash.h>
17
18 #include "nvmet.h"
19
20 #define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE)
21
22 /* Define the socket priority to use for connections were it is desirable
23 * that the NIC consider performing optimized packet processing or filtering.
24 * A non-zero value being sufficient to indicate general consideration of any
25 * possible optimization. Making it a module param allows for alternative
26 * values that may be unique for some NIC implementations.
27 */
28 static int so_priority;
29 module_param(so_priority, int, 0644);
30 MODULE_PARM_DESC(so_priority, "nvmet tcp socket optimize priority");
31
32 /* Define a time period (in usecs) that io_work() shall sample an activated
33 * queue before determining it to be idle. This optional module behavior
34 * can enable NIC solutions that support socket optimized packet processing
35 * using advanced interrupt moderation techniques.
36 */
37 static int idle_poll_period_usecs;
38 module_param(idle_poll_period_usecs, int, 0644);
39 MODULE_PARM_DESC(idle_poll_period_usecs,
40 "nvmet tcp io_work poll till idle time period in usecs");
41
42 #define NVMET_TCP_RECV_BUDGET 8
43 #define NVMET_TCP_SEND_BUDGET 8
44 #define NVMET_TCP_IO_WORK_BUDGET 64
45
46 enum nvmet_tcp_send_state {
47 NVMET_TCP_SEND_DATA_PDU,
48 NVMET_TCP_SEND_DATA,
49 NVMET_TCP_SEND_R2T,
50 NVMET_TCP_SEND_DDGST,
51 NVMET_TCP_SEND_RESPONSE
52 };
53
54 enum nvmet_tcp_recv_state {
55 NVMET_TCP_RECV_PDU,
56 NVMET_TCP_RECV_DATA,
57 NVMET_TCP_RECV_DDGST,
58 NVMET_TCP_RECV_ERR,
59 };
60
61 enum {
62 NVMET_TCP_F_INIT_FAILED = (1 << 0),
63 };
64
65 struct nvmet_tcp_cmd {
66 struct nvmet_tcp_queue *queue;
67 struct nvmet_req req;
68
69 struct nvme_tcp_cmd_pdu *cmd_pdu;
70 struct nvme_tcp_rsp_pdu *rsp_pdu;
71 struct nvme_tcp_data_pdu *data_pdu;
72 struct nvme_tcp_r2t_pdu *r2t_pdu;
73
74 u32 rbytes_done;
75 u32 wbytes_done;
76
77 u32 pdu_len;
78 u32 pdu_recv;
79 int sg_idx;
80 int nr_mapped;
81 struct msghdr recv_msg;
82 struct kvec *iov;
83 u32 flags;
84
85 struct list_head entry;
86 struct llist_node lentry;
87
88 /* send state */
89 u32 offset;
90 struct scatterlist *cur_sg;
91 enum nvmet_tcp_send_state state;
92
93 __le32 exp_ddgst;
94 __le32 recv_ddgst;
95 };
96
97 enum nvmet_tcp_queue_state {
98 NVMET_TCP_Q_CONNECTING,
99 NVMET_TCP_Q_LIVE,
100 NVMET_TCP_Q_DISCONNECTING,
101 };
102
103 struct nvmet_tcp_queue {
104 struct socket *sock;
105 struct nvmet_tcp_port *port;
106 struct work_struct io_work;
107 struct nvmet_cq nvme_cq;
108 struct nvmet_sq nvme_sq;
109
110 /* send state */
111 struct nvmet_tcp_cmd *cmds;
112 unsigned int nr_cmds;
113 struct list_head free_list;
114 struct llist_head resp_list;
115 struct list_head resp_send_list;
116 int send_list_len;
117 struct nvmet_tcp_cmd *snd_cmd;
118
119 /* recv state */
120 int offset;
121 int left;
122 enum nvmet_tcp_recv_state rcv_state;
123 struct nvmet_tcp_cmd *cmd;
124 union nvme_tcp_pdu pdu;
125
126 /* digest state */
127 bool hdr_digest;
128 bool data_digest;
129 struct ahash_request *snd_hash;
130 struct ahash_request *rcv_hash;
131
132 unsigned long poll_end;
133
134 spinlock_t state_lock;
135 enum nvmet_tcp_queue_state state;
136
137 struct sockaddr_storage sockaddr;
138 struct sockaddr_storage sockaddr_peer;
139 struct work_struct release_work;
140
141 int idx;
142 struct list_head queue_list;
143
144 struct nvmet_tcp_cmd connect;
145
146 struct page_frag_cache pf_cache;
147
148 void (*data_ready)(struct sock *);
149 void (*state_change)(struct sock *);
150 void (*write_space)(struct sock *);
151 };
152
153 struct nvmet_tcp_port {
154 struct socket *sock;
155 struct work_struct accept_work;
156 struct nvmet_port *nport;
157 struct sockaddr_storage addr;
158 void (*data_ready)(struct sock *);
159 };
160
161 static DEFINE_IDA(nvmet_tcp_queue_ida);
162 static LIST_HEAD(nvmet_tcp_queue_list);
163 static DEFINE_MUTEX(nvmet_tcp_queue_mutex);
164
165 static struct workqueue_struct *nvmet_tcp_wq;
166 static const struct nvmet_fabrics_ops nvmet_tcp_ops;
167 static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c);
168 static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd);
169 static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd);
170 static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd);
171
nvmet_tcp_cmd_tag(struct nvmet_tcp_queue * queue,struct nvmet_tcp_cmd * cmd)172 static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue,
173 struct nvmet_tcp_cmd *cmd)
174 {
175 if (unlikely(!queue->nr_cmds)) {
176 /* We didn't allocate cmds yet, send 0xffff */
177 return USHRT_MAX;
178 }
179
180 return cmd - queue->cmds;
181 }
182
nvmet_tcp_has_data_in(struct nvmet_tcp_cmd * cmd)183 static inline bool nvmet_tcp_has_data_in(struct nvmet_tcp_cmd *cmd)
184 {
185 return nvme_is_write(cmd->req.cmd) &&
186 cmd->rbytes_done < cmd->req.transfer_len;
187 }
188
nvmet_tcp_need_data_in(struct nvmet_tcp_cmd * cmd)189 static inline bool nvmet_tcp_need_data_in(struct nvmet_tcp_cmd *cmd)
190 {
191 return nvmet_tcp_has_data_in(cmd) && !cmd->req.cqe->status;
192 }
193
nvmet_tcp_need_data_out(struct nvmet_tcp_cmd * cmd)194 static inline bool nvmet_tcp_need_data_out(struct nvmet_tcp_cmd *cmd)
195 {
196 return !nvme_is_write(cmd->req.cmd) &&
197 cmd->req.transfer_len > 0 &&
198 !cmd->req.cqe->status;
199 }
200
nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd * cmd)201 static inline bool nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd *cmd)
202 {
203 return nvme_is_write(cmd->req.cmd) && cmd->pdu_len &&
204 !cmd->rbytes_done;
205 }
206
207 static inline struct nvmet_tcp_cmd *
nvmet_tcp_get_cmd(struct nvmet_tcp_queue * queue)208 nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue)
209 {
210 struct nvmet_tcp_cmd *cmd;
211
212 cmd = list_first_entry_or_null(&queue->free_list,
213 struct nvmet_tcp_cmd, entry);
214 if (!cmd)
215 return NULL;
216 list_del_init(&cmd->entry);
217
218 cmd->rbytes_done = cmd->wbytes_done = 0;
219 cmd->pdu_len = 0;
220 cmd->pdu_recv = 0;
221 cmd->iov = NULL;
222 cmd->flags = 0;
223 return cmd;
224 }
225
nvmet_tcp_put_cmd(struct nvmet_tcp_cmd * cmd)226 static inline void nvmet_tcp_put_cmd(struct nvmet_tcp_cmd *cmd)
227 {
228 if (unlikely(cmd == &cmd->queue->connect))
229 return;
230
231 list_add_tail(&cmd->entry, &cmd->queue->free_list);
232 }
233
queue_cpu(struct nvmet_tcp_queue * queue)234 static inline int queue_cpu(struct nvmet_tcp_queue *queue)
235 {
236 return queue->sock->sk->sk_incoming_cpu;
237 }
238
nvmet_tcp_hdgst_len(struct nvmet_tcp_queue * queue)239 static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue)
240 {
241 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
242 }
243
nvmet_tcp_ddgst_len(struct nvmet_tcp_queue * queue)244 static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue)
245 {
246 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
247 }
248
nvmet_tcp_hdgst(struct ahash_request * hash,void * pdu,size_t len)249 static inline void nvmet_tcp_hdgst(struct ahash_request *hash,
250 void *pdu, size_t len)
251 {
252 struct scatterlist sg;
253
254 sg_init_one(&sg, pdu, len);
255 ahash_request_set_crypt(hash, &sg, pdu + len, len);
256 crypto_ahash_digest(hash);
257 }
258
nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue * queue,void * pdu,size_t len)259 static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue,
260 void *pdu, size_t len)
261 {
262 struct nvme_tcp_hdr *hdr = pdu;
263 __le32 recv_digest;
264 __le32 exp_digest;
265
266 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
267 pr_err("queue %d: header digest enabled but no header digest\n",
268 queue->idx);
269 return -EPROTO;
270 }
271
272 recv_digest = *(__le32 *)(pdu + hdr->hlen);
273 nvmet_tcp_hdgst(queue->rcv_hash, pdu, len);
274 exp_digest = *(__le32 *)(pdu + hdr->hlen);
275 if (recv_digest != exp_digest) {
276 pr_err("queue %d: header digest error: recv %#x expected %#x\n",
277 queue->idx, le32_to_cpu(recv_digest),
278 le32_to_cpu(exp_digest));
279 return -EPROTO;
280 }
281
282 return 0;
283 }
284
nvmet_tcp_check_ddgst(struct nvmet_tcp_queue * queue,void * pdu)285 static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu)
286 {
287 struct nvme_tcp_hdr *hdr = pdu;
288 u8 digest_len = nvmet_tcp_hdgst_len(queue);
289 u32 len;
290
291 len = le32_to_cpu(hdr->plen) - hdr->hlen -
292 (hdr->flags & NVME_TCP_F_HDGST ? digest_len : 0);
293
294 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
295 pr_err("queue %d: data digest flag is cleared\n", queue->idx);
296 return -EPROTO;
297 }
298
299 return 0;
300 }
301
nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd * cmd)302 static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd)
303 {
304 WARN_ON(unlikely(cmd->nr_mapped > 0));
305
306 kfree(cmd->iov);
307 sgl_free(cmd->req.sg);
308 cmd->iov = NULL;
309 cmd->req.sg = NULL;
310 }
311
nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd * cmd)312 static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd)
313 {
314 struct scatterlist *sg;
315 int i;
316
317 sg = &cmd->req.sg[cmd->sg_idx];
318
319 for (i = 0; i < cmd->nr_mapped; i++)
320 kunmap(sg_page(&sg[i]));
321
322 cmd->nr_mapped = 0;
323 }
324
nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd * cmd)325 static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd)
326 {
327 struct kvec *iov = cmd->iov;
328 struct scatterlist *sg;
329 u32 length, offset, sg_offset;
330
331 length = cmd->pdu_len;
332 cmd->nr_mapped = DIV_ROUND_UP(length, PAGE_SIZE);
333 offset = cmd->rbytes_done;
334 cmd->sg_idx = offset / PAGE_SIZE;
335 sg_offset = offset % PAGE_SIZE;
336 sg = &cmd->req.sg[cmd->sg_idx];
337
338 while (length) {
339 u32 iov_len = min_t(u32, length, sg->length - sg_offset);
340
341 iov->iov_base = kmap(sg_page(sg)) + sg->offset + sg_offset;
342 iov->iov_len = iov_len;
343
344 length -= iov_len;
345 sg = sg_next(sg);
346 iov++;
347 sg_offset = 0;
348 }
349
350 iov_iter_kvec(&cmd->recv_msg.msg_iter, READ, cmd->iov,
351 cmd->nr_mapped, cmd->pdu_len);
352 }
353
nvmet_tcp_fatal_error(struct nvmet_tcp_queue * queue)354 static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
355 {
356 queue->rcv_state = NVMET_TCP_RECV_ERR;
357 if (queue->nvme_sq.ctrl)
358 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
359 else
360 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
361 }
362
nvmet_tcp_socket_error(struct nvmet_tcp_queue * queue,int status)363 static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status)
364 {
365 if (status == -EPIPE || status == -ECONNRESET)
366 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
367 else
368 nvmet_tcp_fatal_error(queue);
369 }
370
nvmet_tcp_map_data(struct nvmet_tcp_cmd * cmd)371 static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
372 {
373 struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl;
374 u32 len = le32_to_cpu(sgl->length);
375
376 if (!len)
377 return 0;
378
379 if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) |
380 NVME_SGL_FMT_OFFSET)) {
381 if (!nvme_is_write(cmd->req.cmd))
382 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
383
384 if (len > cmd->req.port->inline_data_size)
385 return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
386 cmd->pdu_len = len;
387 }
388 cmd->req.transfer_len += len;
389
390 cmd->req.sg = sgl_alloc(len, GFP_KERNEL, &cmd->req.sg_cnt);
391 if (!cmd->req.sg)
392 return NVME_SC_INTERNAL;
393 cmd->cur_sg = cmd->req.sg;
394
395 if (nvmet_tcp_has_data_in(cmd)) {
396 cmd->iov = kmalloc_array(cmd->req.sg_cnt,
397 sizeof(*cmd->iov), GFP_KERNEL);
398 if (!cmd->iov)
399 goto err;
400 }
401
402 return 0;
403 err:
404 nvmet_tcp_free_cmd_buffers(cmd);
405 return NVME_SC_INTERNAL;
406 }
407
nvmet_tcp_send_ddgst(struct ahash_request * hash,struct nvmet_tcp_cmd * cmd)408 static void nvmet_tcp_send_ddgst(struct ahash_request *hash,
409 struct nvmet_tcp_cmd *cmd)
410 {
411 ahash_request_set_crypt(hash, cmd->req.sg,
412 (void *)&cmd->exp_ddgst, cmd->req.transfer_len);
413 crypto_ahash_digest(hash);
414 }
415
nvmet_tcp_recv_ddgst(struct ahash_request * hash,struct nvmet_tcp_cmd * cmd)416 static void nvmet_tcp_recv_ddgst(struct ahash_request *hash,
417 struct nvmet_tcp_cmd *cmd)
418 {
419 struct scatterlist sg;
420 struct kvec *iov;
421 int i;
422
423 crypto_ahash_init(hash);
424 for (i = 0, iov = cmd->iov; i < cmd->nr_mapped; i++, iov++) {
425 sg_init_one(&sg, iov->iov_base, iov->iov_len);
426 ahash_request_set_crypt(hash, &sg, NULL, iov->iov_len);
427 crypto_ahash_update(hash);
428 }
429 ahash_request_set_crypt(hash, NULL, (void *)&cmd->exp_ddgst, 0);
430 crypto_ahash_final(hash);
431 }
432
nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd * cmd)433 static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
434 {
435 struct nvme_tcp_data_pdu *pdu = cmd->data_pdu;
436 struct nvmet_tcp_queue *queue = cmd->queue;
437 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
438 u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue);
439
440 cmd->offset = 0;
441 cmd->state = NVMET_TCP_SEND_DATA_PDU;
442
443 pdu->hdr.type = nvme_tcp_c2h_data;
444 pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ?
445 NVME_TCP_F_DATA_SUCCESS : 0);
446 pdu->hdr.hlen = sizeof(*pdu);
447 pdu->hdr.pdo = pdu->hdr.hlen + hdgst;
448 pdu->hdr.plen =
449 cpu_to_le32(pdu->hdr.hlen + hdgst +
450 cmd->req.transfer_len + ddgst);
451 pdu->command_id = cmd->req.cqe->command_id;
452 pdu->data_length = cpu_to_le32(cmd->req.transfer_len);
453 pdu->data_offset = cpu_to_le32(cmd->wbytes_done);
454
455 if (queue->data_digest) {
456 pdu->hdr.flags |= NVME_TCP_F_DDGST;
457 nvmet_tcp_send_ddgst(queue->snd_hash, cmd);
458 }
459
460 if (cmd->queue->hdr_digest) {
461 pdu->hdr.flags |= NVME_TCP_F_HDGST;
462 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
463 }
464 }
465
nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd * cmd)466 static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd)
467 {
468 struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu;
469 struct nvmet_tcp_queue *queue = cmd->queue;
470 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
471
472 cmd->offset = 0;
473 cmd->state = NVMET_TCP_SEND_R2T;
474
475 pdu->hdr.type = nvme_tcp_r2t;
476 pdu->hdr.flags = 0;
477 pdu->hdr.hlen = sizeof(*pdu);
478 pdu->hdr.pdo = 0;
479 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
480
481 pdu->command_id = cmd->req.cmd->common.command_id;
482 pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd);
483 pdu->r2t_length = cpu_to_le32(cmd->req.transfer_len - cmd->rbytes_done);
484 pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done);
485 if (cmd->queue->hdr_digest) {
486 pdu->hdr.flags |= NVME_TCP_F_HDGST;
487 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
488 }
489 }
490
nvmet_setup_response_pdu(struct nvmet_tcp_cmd * cmd)491 static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd)
492 {
493 struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu;
494 struct nvmet_tcp_queue *queue = cmd->queue;
495 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
496
497 cmd->offset = 0;
498 cmd->state = NVMET_TCP_SEND_RESPONSE;
499
500 pdu->hdr.type = nvme_tcp_rsp;
501 pdu->hdr.flags = 0;
502 pdu->hdr.hlen = sizeof(*pdu);
503 pdu->hdr.pdo = 0;
504 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
505 if (cmd->queue->hdr_digest) {
506 pdu->hdr.flags |= NVME_TCP_F_HDGST;
507 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
508 }
509 }
510
nvmet_tcp_process_resp_list(struct nvmet_tcp_queue * queue)511 static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue)
512 {
513 struct llist_node *node;
514 struct nvmet_tcp_cmd *cmd;
515
516 for (node = llist_del_all(&queue->resp_list); node; node = node->next) {
517 cmd = llist_entry(node, struct nvmet_tcp_cmd, lentry);
518 list_add(&cmd->entry, &queue->resp_send_list);
519 queue->send_list_len++;
520 }
521 }
522
nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue * queue)523 static struct nvmet_tcp_cmd *nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue)
524 {
525 queue->snd_cmd = list_first_entry_or_null(&queue->resp_send_list,
526 struct nvmet_tcp_cmd, entry);
527 if (!queue->snd_cmd) {
528 nvmet_tcp_process_resp_list(queue);
529 queue->snd_cmd =
530 list_first_entry_or_null(&queue->resp_send_list,
531 struct nvmet_tcp_cmd, entry);
532 if (unlikely(!queue->snd_cmd))
533 return NULL;
534 }
535
536 list_del_init(&queue->snd_cmd->entry);
537 queue->send_list_len--;
538
539 if (nvmet_tcp_need_data_out(queue->snd_cmd))
540 nvmet_setup_c2h_data_pdu(queue->snd_cmd);
541 else if (nvmet_tcp_need_data_in(queue->snd_cmd))
542 nvmet_setup_r2t_pdu(queue->snd_cmd);
543 else
544 nvmet_setup_response_pdu(queue->snd_cmd);
545
546 return queue->snd_cmd;
547 }
548
nvmet_tcp_queue_response(struct nvmet_req * req)549 static void nvmet_tcp_queue_response(struct nvmet_req *req)
550 {
551 struct nvmet_tcp_cmd *cmd =
552 container_of(req, struct nvmet_tcp_cmd, req);
553 struct nvmet_tcp_queue *queue = cmd->queue;
554 struct nvme_sgl_desc *sgl;
555 u32 len;
556
557 if (unlikely(cmd == queue->cmd)) {
558 sgl = &cmd->req.cmd->common.dptr.sgl;
559 len = le32_to_cpu(sgl->length);
560
561 /*
562 * Wait for inline data before processing the response.
563 * Avoid using helpers, this might happen before
564 * nvmet_req_init is completed.
565 */
566 if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
567 len && len <= cmd->req.port->inline_data_size &&
568 nvme_is_write(cmd->req.cmd))
569 return;
570 }
571
572 llist_add(&cmd->lentry, &queue->resp_list);
573 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work);
574 }
575
nvmet_tcp_execute_request(struct nvmet_tcp_cmd * cmd)576 static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd)
577 {
578 if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED))
579 nvmet_tcp_queue_response(&cmd->req);
580 else
581 cmd->req.execute(&cmd->req);
582 }
583
nvmet_try_send_data_pdu(struct nvmet_tcp_cmd * cmd)584 static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
585 {
586 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
587 int left = sizeof(*cmd->data_pdu) - cmd->offset + hdgst;
588 int ret;
589
590 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->data_pdu),
591 offset_in_page(cmd->data_pdu) + cmd->offset,
592 left, MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
593 if (ret <= 0)
594 return ret;
595
596 cmd->offset += ret;
597 left -= ret;
598
599 if (left)
600 return -EAGAIN;
601
602 cmd->state = NVMET_TCP_SEND_DATA;
603 cmd->offset = 0;
604 return 1;
605 }
606
nvmet_try_send_data(struct nvmet_tcp_cmd * cmd,bool last_in_batch)607 static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
608 {
609 struct nvmet_tcp_queue *queue = cmd->queue;
610 int ret;
611
612 while (cmd->cur_sg) {
613 struct page *page = sg_page(cmd->cur_sg);
614 u32 left = cmd->cur_sg->length - cmd->offset;
615 int flags = MSG_DONTWAIT;
616
617 if ((!last_in_batch && cmd->queue->send_list_len) ||
618 cmd->wbytes_done + left < cmd->req.transfer_len ||
619 queue->data_digest || !queue->nvme_sq.sqhd_disabled)
620 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
621
622 ret = kernel_sendpage(cmd->queue->sock, page, cmd->offset,
623 left, flags);
624 if (ret <= 0)
625 return ret;
626
627 cmd->offset += ret;
628 cmd->wbytes_done += ret;
629
630 /* Done with sg?*/
631 if (cmd->offset == cmd->cur_sg->length) {
632 cmd->cur_sg = sg_next(cmd->cur_sg);
633 cmd->offset = 0;
634 }
635 }
636
637 if (queue->data_digest) {
638 cmd->state = NVMET_TCP_SEND_DDGST;
639 cmd->offset = 0;
640 } else {
641 if (queue->nvme_sq.sqhd_disabled) {
642 cmd->queue->snd_cmd = NULL;
643 nvmet_tcp_put_cmd(cmd);
644 } else {
645 nvmet_setup_response_pdu(cmd);
646 }
647 }
648
649 if (queue->nvme_sq.sqhd_disabled)
650 nvmet_tcp_free_cmd_buffers(cmd);
651
652 return 1;
653
654 }
655
nvmet_try_send_response(struct nvmet_tcp_cmd * cmd,bool last_in_batch)656 static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd,
657 bool last_in_batch)
658 {
659 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
660 int left = sizeof(*cmd->rsp_pdu) - cmd->offset + hdgst;
661 int flags = MSG_DONTWAIT;
662 int ret;
663
664 if (!last_in_batch && cmd->queue->send_list_len)
665 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
666 else
667 flags |= MSG_EOR;
668
669 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->rsp_pdu),
670 offset_in_page(cmd->rsp_pdu) + cmd->offset, left, flags);
671 if (ret <= 0)
672 return ret;
673 cmd->offset += ret;
674 left -= ret;
675
676 if (left)
677 return -EAGAIN;
678
679 nvmet_tcp_free_cmd_buffers(cmd);
680 cmd->queue->snd_cmd = NULL;
681 nvmet_tcp_put_cmd(cmd);
682 return 1;
683 }
684
nvmet_try_send_r2t(struct nvmet_tcp_cmd * cmd,bool last_in_batch)685 static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
686 {
687 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
688 int left = sizeof(*cmd->r2t_pdu) - cmd->offset + hdgst;
689 int flags = MSG_DONTWAIT;
690 int ret;
691
692 if (!last_in_batch && cmd->queue->send_list_len)
693 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
694 else
695 flags |= MSG_EOR;
696
697 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->r2t_pdu),
698 offset_in_page(cmd->r2t_pdu) + cmd->offset, left, flags);
699 if (ret <= 0)
700 return ret;
701 cmd->offset += ret;
702 left -= ret;
703
704 if (left)
705 return -EAGAIN;
706
707 cmd->queue->snd_cmd = NULL;
708 return 1;
709 }
710
nvmet_try_send_ddgst(struct nvmet_tcp_cmd * cmd,bool last_in_batch)711 static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
712 {
713 struct nvmet_tcp_queue *queue = cmd->queue;
714 int left = NVME_TCP_DIGEST_LENGTH - cmd->offset;
715 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
716 struct kvec iov = {
717 .iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset,
718 .iov_len = left
719 };
720 int ret;
721
722 if (!last_in_batch && cmd->queue->send_list_len)
723 msg.msg_flags |= MSG_MORE;
724 else
725 msg.msg_flags |= MSG_EOR;
726
727 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
728 if (unlikely(ret <= 0))
729 return ret;
730
731 cmd->offset += ret;
732 left -= ret;
733
734 if (left)
735 return -EAGAIN;
736
737 if (queue->nvme_sq.sqhd_disabled) {
738 cmd->queue->snd_cmd = NULL;
739 nvmet_tcp_put_cmd(cmd);
740 } else {
741 nvmet_setup_response_pdu(cmd);
742 }
743 return 1;
744 }
745
nvmet_tcp_try_send_one(struct nvmet_tcp_queue * queue,bool last_in_batch)746 static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
747 bool last_in_batch)
748 {
749 struct nvmet_tcp_cmd *cmd = queue->snd_cmd;
750 int ret = 0;
751
752 if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) {
753 cmd = nvmet_tcp_fetch_cmd(queue);
754 if (unlikely(!cmd))
755 return 0;
756 }
757
758 if (cmd->state == NVMET_TCP_SEND_DATA_PDU) {
759 ret = nvmet_try_send_data_pdu(cmd);
760 if (ret <= 0)
761 goto done_send;
762 }
763
764 if (cmd->state == NVMET_TCP_SEND_DATA) {
765 ret = nvmet_try_send_data(cmd, last_in_batch);
766 if (ret <= 0)
767 goto done_send;
768 }
769
770 if (cmd->state == NVMET_TCP_SEND_DDGST) {
771 ret = nvmet_try_send_ddgst(cmd, last_in_batch);
772 if (ret <= 0)
773 goto done_send;
774 }
775
776 if (cmd->state == NVMET_TCP_SEND_R2T) {
777 ret = nvmet_try_send_r2t(cmd, last_in_batch);
778 if (ret <= 0)
779 goto done_send;
780 }
781
782 if (cmd->state == NVMET_TCP_SEND_RESPONSE)
783 ret = nvmet_try_send_response(cmd, last_in_batch);
784
785 done_send:
786 if (ret < 0) {
787 if (ret == -EAGAIN)
788 return 0;
789 return ret;
790 }
791
792 return 1;
793 }
794
nvmet_tcp_try_send(struct nvmet_tcp_queue * queue,int budget,int * sends)795 static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue,
796 int budget, int *sends)
797 {
798 int i, ret = 0;
799
800 for (i = 0; i < budget; i++) {
801 ret = nvmet_tcp_try_send_one(queue, i == budget - 1);
802 if (unlikely(ret < 0)) {
803 nvmet_tcp_socket_error(queue, ret);
804 goto done;
805 } else if (ret == 0) {
806 break;
807 }
808 (*sends)++;
809 }
810 done:
811 return ret;
812 }
813
nvmet_prepare_receive_pdu(struct nvmet_tcp_queue * queue)814 static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue)
815 {
816 queue->offset = 0;
817 queue->left = sizeof(struct nvme_tcp_hdr);
818 queue->cmd = NULL;
819 queue->rcv_state = NVMET_TCP_RECV_PDU;
820 }
821
nvmet_tcp_free_crypto(struct nvmet_tcp_queue * queue)822 static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue)
823 {
824 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
825
826 ahash_request_free(queue->rcv_hash);
827 ahash_request_free(queue->snd_hash);
828 crypto_free_ahash(tfm);
829 }
830
nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue * queue)831 static int nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue *queue)
832 {
833 struct crypto_ahash *tfm;
834
835 tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
836 if (IS_ERR(tfm))
837 return PTR_ERR(tfm);
838
839 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
840 if (!queue->snd_hash)
841 goto free_tfm;
842 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
843
844 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
845 if (!queue->rcv_hash)
846 goto free_snd_hash;
847 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
848
849 return 0;
850 free_snd_hash:
851 ahash_request_free(queue->snd_hash);
852 free_tfm:
853 crypto_free_ahash(tfm);
854 return -ENOMEM;
855 }
856
857
nvmet_tcp_handle_icreq(struct nvmet_tcp_queue * queue)858 static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
859 {
860 struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq;
861 struct nvme_tcp_icresp_pdu *icresp = &queue->pdu.icresp;
862 struct msghdr msg = {};
863 struct kvec iov;
864 int ret;
865
866 if (le32_to_cpu(icreq->hdr.plen) != sizeof(struct nvme_tcp_icreq_pdu)) {
867 pr_err("bad nvme-tcp pdu length (%d)\n",
868 le32_to_cpu(icreq->hdr.plen));
869 nvmet_tcp_fatal_error(queue);
870 }
871
872 if (icreq->pfv != NVME_TCP_PFV_1_0) {
873 pr_err("queue %d: bad pfv %d\n", queue->idx, icreq->pfv);
874 return -EPROTO;
875 }
876
877 if (icreq->hpda != 0) {
878 pr_err("queue %d: unsupported hpda %d\n", queue->idx,
879 icreq->hpda);
880 return -EPROTO;
881 }
882
883 queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE);
884 queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE);
885 if (queue->hdr_digest || queue->data_digest) {
886 ret = nvmet_tcp_alloc_crypto(queue);
887 if (ret)
888 return ret;
889 }
890
891 memset(icresp, 0, sizeof(*icresp));
892 icresp->hdr.type = nvme_tcp_icresp;
893 icresp->hdr.hlen = sizeof(*icresp);
894 icresp->hdr.pdo = 0;
895 icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen);
896 icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
897 icresp->maxdata = cpu_to_le32(0x400000); /* 16M arbitrary limit */
898 icresp->cpda = 0;
899 if (queue->hdr_digest)
900 icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
901 if (queue->data_digest)
902 icresp->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
903
904 iov.iov_base = icresp;
905 iov.iov_len = sizeof(*icresp);
906 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
907 if (ret < 0)
908 goto free_crypto;
909
910 queue->state = NVMET_TCP_Q_LIVE;
911 nvmet_prepare_receive_pdu(queue);
912 return 0;
913 free_crypto:
914 if (queue->hdr_digest || queue->data_digest)
915 nvmet_tcp_free_crypto(queue);
916 return ret;
917 }
918
nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue * queue,struct nvmet_tcp_cmd * cmd,struct nvmet_req * req)919 static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
920 struct nvmet_tcp_cmd *cmd, struct nvmet_req *req)
921 {
922 size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
923 int ret;
924
925 /*
926 * This command has not been processed yet, hence we are trying to
927 * figure out if there is still pending data left to receive. If
928 * we don't, we can simply prepare for the next pdu and bail out,
929 * otherwise we will need to prepare a buffer and receive the
930 * stale data before continuing forward.
931 */
932 if (!nvme_is_write(cmd->req.cmd) || !data_len ||
933 data_len > cmd->req.port->inline_data_size) {
934 nvmet_prepare_receive_pdu(queue);
935 return;
936 }
937
938 ret = nvmet_tcp_map_data(cmd);
939 if (unlikely(ret)) {
940 pr_err("queue %d: failed to map data\n", queue->idx);
941 nvmet_tcp_fatal_error(queue);
942 return;
943 }
944
945 queue->rcv_state = NVMET_TCP_RECV_DATA;
946 nvmet_tcp_map_pdu_iovec(cmd);
947 cmd->flags |= NVMET_TCP_F_INIT_FAILED;
948 }
949
nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue * queue)950 static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
951 {
952 struct nvme_tcp_data_pdu *data = &queue->pdu.data;
953 struct nvmet_tcp_cmd *cmd;
954
955 if (likely(queue->nr_cmds))
956 cmd = &queue->cmds[data->ttag];
957 else
958 cmd = &queue->connect;
959
960 if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) {
961 pr_err("ttag %u unexpected data offset %u (expected %u)\n",
962 data->ttag, le32_to_cpu(data->data_offset),
963 cmd->rbytes_done);
964 /* FIXME: use path and transport errors */
965 nvmet_req_complete(&cmd->req,
966 NVME_SC_INVALID_FIELD | NVME_SC_DNR);
967 return -EPROTO;
968 }
969
970 cmd->pdu_len = le32_to_cpu(data->data_length);
971 cmd->pdu_recv = 0;
972 nvmet_tcp_map_pdu_iovec(cmd);
973 queue->cmd = cmd;
974 queue->rcv_state = NVMET_TCP_RECV_DATA;
975
976 return 0;
977 }
978
nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue * queue)979 static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
980 {
981 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
982 struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd;
983 struct nvmet_req *req;
984 int ret;
985
986 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
987 if (hdr->type != nvme_tcp_icreq) {
988 pr_err("unexpected pdu type (%d) before icreq\n",
989 hdr->type);
990 nvmet_tcp_fatal_error(queue);
991 return -EPROTO;
992 }
993 return nvmet_tcp_handle_icreq(queue);
994 }
995
996 if (hdr->type == nvme_tcp_h2c_data) {
997 ret = nvmet_tcp_handle_h2c_data_pdu(queue);
998 if (unlikely(ret))
999 return ret;
1000 return 0;
1001 }
1002
1003 queue->cmd = nvmet_tcp_get_cmd(queue);
1004 if (unlikely(!queue->cmd)) {
1005 /* This should never happen */
1006 pr_err("queue %d: out of commands (%d) send_list_len: %d, opcode: %d",
1007 queue->idx, queue->nr_cmds, queue->send_list_len,
1008 nvme_cmd->common.opcode);
1009 nvmet_tcp_fatal_error(queue);
1010 return -ENOMEM;
1011 }
1012
1013 req = &queue->cmd->req;
1014 memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd));
1015
1016 if (unlikely(!nvmet_req_init(req, &queue->nvme_cq,
1017 &queue->nvme_sq, &nvmet_tcp_ops))) {
1018 pr_err("failed cmd %p id %d opcode %d, data_len: %d\n",
1019 req->cmd, req->cmd->common.command_id,
1020 req->cmd->common.opcode,
1021 le32_to_cpu(req->cmd->common.dptr.sgl.length));
1022
1023 nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
1024 return 0;
1025 }
1026
1027 ret = nvmet_tcp_map_data(queue->cmd);
1028 if (unlikely(ret)) {
1029 pr_err("queue %d: failed to map data\n", queue->idx);
1030 if (nvmet_tcp_has_inline_data(queue->cmd))
1031 nvmet_tcp_fatal_error(queue);
1032 else
1033 nvmet_req_complete(req, ret);
1034 ret = -EAGAIN;
1035 goto out;
1036 }
1037
1038 if (nvmet_tcp_need_data_in(queue->cmd)) {
1039 if (nvmet_tcp_has_inline_data(queue->cmd)) {
1040 queue->rcv_state = NVMET_TCP_RECV_DATA;
1041 nvmet_tcp_map_pdu_iovec(queue->cmd);
1042 return 0;
1043 }
1044 /* send back R2T */
1045 nvmet_tcp_queue_response(&queue->cmd->req);
1046 goto out;
1047 }
1048
1049 queue->cmd->req.execute(&queue->cmd->req);
1050 out:
1051 nvmet_prepare_receive_pdu(queue);
1052 return ret;
1053 }
1054
1055 static const u8 nvme_tcp_pdu_sizes[] = {
1056 [nvme_tcp_icreq] = sizeof(struct nvme_tcp_icreq_pdu),
1057 [nvme_tcp_cmd] = sizeof(struct nvme_tcp_cmd_pdu),
1058 [nvme_tcp_h2c_data] = sizeof(struct nvme_tcp_data_pdu),
1059 };
1060
nvmet_tcp_pdu_size(u8 type)1061 static inline u8 nvmet_tcp_pdu_size(u8 type)
1062 {
1063 size_t idx = type;
1064
1065 return (idx < ARRAY_SIZE(nvme_tcp_pdu_sizes) &&
1066 nvme_tcp_pdu_sizes[idx]) ?
1067 nvme_tcp_pdu_sizes[idx] : 0;
1068 }
1069
nvmet_tcp_pdu_valid(u8 type)1070 static inline bool nvmet_tcp_pdu_valid(u8 type)
1071 {
1072 switch (type) {
1073 case nvme_tcp_icreq:
1074 case nvme_tcp_cmd:
1075 case nvme_tcp_h2c_data:
1076 /* fallthru */
1077 return true;
1078 }
1079
1080 return false;
1081 }
1082
nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue * queue)1083 static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue)
1084 {
1085 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
1086 int len;
1087 struct kvec iov;
1088 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1089
1090 recv:
1091 iov.iov_base = (void *)&queue->pdu + queue->offset;
1092 iov.iov_len = queue->left;
1093 len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1094 iov.iov_len, msg.msg_flags);
1095 if (unlikely(len < 0))
1096 return len;
1097
1098 queue->offset += len;
1099 queue->left -= len;
1100 if (queue->left)
1101 return -EAGAIN;
1102
1103 if (queue->offset == sizeof(struct nvme_tcp_hdr)) {
1104 u8 hdgst = nvmet_tcp_hdgst_len(queue);
1105
1106 if (unlikely(!nvmet_tcp_pdu_valid(hdr->type))) {
1107 pr_err("unexpected pdu type %d\n", hdr->type);
1108 nvmet_tcp_fatal_error(queue);
1109 return -EIO;
1110 }
1111
1112 if (unlikely(hdr->hlen != nvmet_tcp_pdu_size(hdr->type))) {
1113 pr_err("pdu %d bad hlen %d\n", hdr->type, hdr->hlen);
1114 return -EIO;
1115 }
1116
1117 queue->left = hdr->hlen - queue->offset + hdgst;
1118 goto recv;
1119 }
1120
1121 if (queue->hdr_digest &&
1122 nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) {
1123 nvmet_tcp_fatal_error(queue); /* fatal */
1124 return -EPROTO;
1125 }
1126
1127 if (queue->data_digest &&
1128 nvmet_tcp_check_ddgst(queue, &queue->pdu)) {
1129 nvmet_tcp_fatal_error(queue); /* fatal */
1130 return -EPROTO;
1131 }
1132
1133 return nvmet_tcp_done_recv_pdu(queue);
1134 }
1135
nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd * cmd)1136 static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd)
1137 {
1138 struct nvmet_tcp_queue *queue = cmd->queue;
1139
1140 nvmet_tcp_recv_ddgst(queue->rcv_hash, cmd);
1141 queue->offset = 0;
1142 queue->left = NVME_TCP_DIGEST_LENGTH;
1143 queue->rcv_state = NVMET_TCP_RECV_DDGST;
1144 }
1145
nvmet_tcp_try_recv_data(struct nvmet_tcp_queue * queue)1146 static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
1147 {
1148 struct nvmet_tcp_cmd *cmd = queue->cmd;
1149 int ret;
1150
1151 while (msg_data_left(&cmd->recv_msg)) {
1152 ret = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg,
1153 cmd->recv_msg.msg_flags);
1154 if (ret <= 0)
1155 return ret;
1156
1157 cmd->pdu_recv += ret;
1158 cmd->rbytes_done += ret;
1159 }
1160
1161 nvmet_tcp_unmap_pdu_iovec(cmd);
1162 if (queue->data_digest) {
1163 nvmet_tcp_prep_recv_ddgst(cmd);
1164 return 0;
1165 }
1166
1167 if (cmd->rbytes_done == cmd->req.transfer_len)
1168 nvmet_tcp_execute_request(cmd);
1169
1170 nvmet_prepare_receive_pdu(queue);
1171 return 0;
1172 }
1173
nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue * queue)1174 static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
1175 {
1176 struct nvmet_tcp_cmd *cmd = queue->cmd;
1177 int ret;
1178 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1179 struct kvec iov = {
1180 .iov_base = (void *)&cmd->recv_ddgst + queue->offset,
1181 .iov_len = queue->left
1182 };
1183
1184 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1185 iov.iov_len, msg.msg_flags);
1186 if (unlikely(ret < 0))
1187 return ret;
1188
1189 queue->offset += ret;
1190 queue->left -= ret;
1191 if (queue->left)
1192 return -EAGAIN;
1193
1194 if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) {
1195 pr_err("queue %d: cmd %d pdu (%d) data digest error: recv %#x expected %#x\n",
1196 queue->idx, cmd->req.cmd->common.command_id,
1197 queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst),
1198 le32_to_cpu(cmd->exp_ddgst));
1199 nvmet_tcp_finish_cmd(cmd);
1200 nvmet_tcp_fatal_error(queue);
1201 ret = -EPROTO;
1202 goto out;
1203 }
1204
1205 if (cmd->rbytes_done == cmd->req.transfer_len)
1206 nvmet_tcp_execute_request(cmd);
1207
1208 ret = 0;
1209 out:
1210 nvmet_prepare_receive_pdu(queue);
1211 return ret;
1212 }
1213
nvmet_tcp_try_recv_one(struct nvmet_tcp_queue * queue)1214 static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue)
1215 {
1216 int result = 0;
1217
1218 if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR))
1219 return 0;
1220
1221 if (queue->rcv_state == NVMET_TCP_RECV_PDU) {
1222 result = nvmet_tcp_try_recv_pdu(queue);
1223 if (result != 0)
1224 goto done_recv;
1225 }
1226
1227 if (queue->rcv_state == NVMET_TCP_RECV_DATA) {
1228 result = nvmet_tcp_try_recv_data(queue);
1229 if (result != 0)
1230 goto done_recv;
1231 }
1232
1233 if (queue->rcv_state == NVMET_TCP_RECV_DDGST) {
1234 result = nvmet_tcp_try_recv_ddgst(queue);
1235 if (result != 0)
1236 goto done_recv;
1237 }
1238
1239 done_recv:
1240 if (result < 0) {
1241 if (result == -EAGAIN)
1242 return 0;
1243 return result;
1244 }
1245 return 1;
1246 }
1247
nvmet_tcp_try_recv(struct nvmet_tcp_queue * queue,int budget,int * recvs)1248 static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue,
1249 int budget, int *recvs)
1250 {
1251 int i, ret = 0;
1252
1253 for (i = 0; i < budget; i++) {
1254 ret = nvmet_tcp_try_recv_one(queue);
1255 if (unlikely(ret < 0)) {
1256 nvmet_tcp_socket_error(queue, ret);
1257 goto done;
1258 } else if (ret == 0) {
1259 break;
1260 }
1261 (*recvs)++;
1262 }
1263 done:
1264 return ret;
1265 }
1266
nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue * queue)1267 static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
1268 {
1269 spin_lock(&queue->state_lock);
1270 if (queue->state != NVMET_TCP_Q_DISCONNECTING) {
1271 queue->state = NVMET_TCP_Q_DISCONNECTING;
1272 schedule_work(&queue->release_work);
1273 }
1274 spin_unlock(&queue->state_lock);
1275 }
1276
nvmet_tcp_arm_queue_deadline(struct nvmet_tcp_queue * queue)1277 static inline void nvmet_tcp_arm_queue_deadline(struct nvmet_tcp_queue *queue)
1278 {
1279 queue->poll_end = jiffies + usecs_to_jiffies(idle_poll_period_usecs);
1280 }
1281
nvmet_tcp_check_queue_deadline(struct nvmet_tcp_queue * queue,int ops)1282 static bool nvmet_tcp_check_queue_deadline(struct nvmet_tcp_queue *queue,
1283 int ops)
1284 {
1285 if (!idle_poll_period_usecs)
1286 return false;
1287
1288 if (ops)
1289 nvmet_tcp_arm_queue_deadline(queue);
1290
1291 return !time_after(jiffies, queue->poll_end);
1292 }
1293
nvmet_tcp_io_work(struct work_struct * w)1294 static void nvmet_tcp_io_work(struct work_struct *w)
1295 {
1296 struct nvmet_tcp_queue *queue =
1297 container_of(w, struct nvmet_tcp_queue, io_work);
1298 bool pending;
1299 int ret, ops = 0;
1300
1301 do {
1302 pending = false;
1303
1304 ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops);
1305 if (ret > 0)
1306 pending = true;
1307 else if (ret < 0)
1308 return;
1309
1310 ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops);
1311 if (ret > 0)
1312 pending = true;
1313 else if (ret < 0)
1314 return;
1315
1316 } while (pending && ops < NVMET_TCP_IO_WORK_BUDGET);
1317
1318 /*
1319 * Requeue the worker if idle deadline period is in progress or any
1320 * ops activity was recorded during the do-while loop above.
1321 */
1322 if (nvmet_tcp_check_queue_deadline(queue, ops) || pending)
1323 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1324 }
1325
nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue * queue,struct nvmet_tcp_cmd * c)1326 static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue,
1327 struct nvmet_tcp_cmd *c)
1328 {
1329 u8 hdgst = nvmet_tcp_hdgst_len(queue);
1330
1331 c->queue = queue;
1332 c->req.port = queue->port->nport;
1333
1334 c->cmd_pdu = page_frag_alloc(&queue->pf_cache,
1335 sizeof(*c->cmd_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1336 if (!c->cmd_pdu)
1337 return -ENOMEM;
1338 c->req.cmd = &c->cmd_pdu->cmd;
1339
1340 c->rsp_pdu = page_frag_alloc(&queue->pf_cache,
1341 sizeof(*c->rsp_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1342 if (!c->rsp_pdu)
1343 goto out_free_cmd;
1344 c->req.cqe = &c->rsp_pdu->cqe;
1345
1346 c->data_pdu = page_frag_alloc(&queue->pf_cache,
1347 sizeof(*c->data_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1348 if (!c->data_pdu)
1349 goto out_free_rsp;
1350
1351 c->r2t_pdu = page_frag_alloc(&queue->pf_cache,
1352 sizeof(*c->r2t_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1353 if (!c->r2t_pdu)
1354 goto out_free_data;
1355
1356 c->recv_msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
1357
1358 list_add_tail(&c->entry, &queue->free_list);
1359
1360 return 0;
1361 out_free_data:
1362 page_frag_free(c->data_pdu);
1363 out_free_rsp:
1364 page_frag_free(c->rsp_pdu);
1365 out_free_cmd:
1366 page_frag_free(c->cmd_pdu);
1367 return -ENOMEM;
1368 }
1369
nvmet_tcp_free_cmd(struct nvmet_tcp_cmd * c)1370 static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c)
1371 {
1372 page_frag_free(c->r2t_pdu);
1373 page_frag_free(c->data_pdu);
1374 page_frag_free(c->rsp_pdu);
1375 page_frag_free(c->cmd_pdu);
1376 }
1377
nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue * queue)1378 static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue)
1379 {
1380 struct nvmet_tcp_cmd *cmds;
1381 int i, ret = -EINVAL, nr_cmds = queue->nr_cmds;
1382
1383 cmds = kcalloc(nr_cmds, sizeof(struct nvmet_tcp_cmd), GFP_KERNEL);
1384 if (!cmds)
1385 goto out;
1386
1387 for (i = 0; i < nr_cmds; i++) {
1388 ret = nvmet_tcp_alloc_cmd(queue, cmds + i);
1389 if (ret)
1390 goto out_free;
1391 }
1392
1393 queue->cmds = cmds;
1394
1395 return 0;
1396 out_free:
1397 while (--i >= 0)
1398 nvmet_tcp_free_cmd(cmds + i);
1399 kfree(cmds);
1400 out:
1401 return ret;
1402 }
1403
nvmet_tcp_free_cmds(struct nvmet_tcp_queue * queue)1404 static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue)
1405 {
1406 struct nvmet_tcp_cmd *cmds = queue->cmds;
1407 int i;
1408
1409 for (i = 0; i < queue->nr_cmds; i++)
1410 nvmet_tcp_free_cmd(cmds + i);
1411
1412 nvmet_tcp_free_cmd(&queue->connect);
1413 kfree(cmds);
1414 }
1415
nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue * queue)1416 static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)
1417 {
1418 struct socket *sock = queue->sock;
1419
1420 write_lock_bh(&sock->sk->sk_callback_lock);
1421 sock->sk->sk_data_ready = queue->data_ready;
1422 sock->sk->sk_state_change = queue->state_change;
1423 sock->sk->sk_write_space = queue->write_space;
1424 sock->sk->sk_user_data = NULL;
1425 write_unlock_bh(&sock->sk->sk_callback_lock);
1426 }
1427
nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd * cmd)1428 static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd)
1429 {
1430 nvmet_req_uninit(&cmd->req);
1431 nvmet_tcp_unmap_pdu_iovec(cmd);
1432 nvmet_tcp_free_cmd_buffers(cmd);
1433 }
1434
nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue * queue)1435 static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
1436 {
1437 struct nvmet_tcp_cmd *cmd = queue->cmds;
1438 int i;
1439
1440 for (i = 0; i < queue->nr_cmds; i++, cmd++) {
1441 if (nvmet_tcp_need_data_in(cmd))
1442 nvmet_req_uninit(&cmd->req);
1443
1444 nvmet_tcp_unmap_pdu_iovec(cmd);
1445 nvmet_tcp_free_cmd_buffers(cmd);
1446 }
1447
1448 if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) {
1449 /* failed in connect */
1450 nvmet_tcp_finish_cmd(&queue->connect);
1451 }
1452 }
1453
nvmet_tcp_release_queue_work(struct work_struct * w)1454 static void nvmet_tcp_release_queue_work(struct work_struct *w)
1455 {
1456 struct page *page;
1457 struct nvmet_tcp_queue *queue =
1458 container_of(w, struct nvmet_tcp_queue, release_work);
1459
1460 mutex_lock(&nvmet_tcp_queue_mutex);
1461 list_del_init(&queue->queue_list);
1462 mutex_unlock(&nvmet_tcp_queue_mutex);
1463
1464 nvmet_tcp_restore_socket_callbacks(queue);
1465 cancel_work_sync(&queue->io_work);
1466 /* stop accepting incoming data */
1467 queue->rcv_state = NVMET_TCP_RECV_ERR;
1468
1469 nvmet_tcp_uninit_data_in_cmds(queue);
1470 nvmet_sq_destroy(&queue->nvme_sq);
1471 cancel_work_sync(&queue->io_work);
1472 sock_release(queue->sock);
1473 nvmet_tcp_free_cmds(queue);
1474 if (queue->hdr_digest || queue->data_digest)
1475 nvmet_tcp_free_crypto(queue);
1476 ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx);
1477
1478 page = virt_to_head_page(queue->pf_cache.va);
1479 __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
1480 kfree(queue);
1481 }
1482
nvmet_tcp_data_ready(struct sock * sk)1483 static void nvmet_tcp_data_ready(struct sock *sk)
1484 {
1485 struct nvmet_tcp_queue *queue;
1486
1487 read_lock_bh(&sk->sk_callback_lock);
1488 queue = sk->sk_user_data;
1489 if (likely(queue))
1490 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1491 read_unlock_bh(&sk->sk_callback_lock);
1492 }
1493
nvmet_tcp_write_space(struct sock * sk)1494 static void nvmet_tcp_write_space(struct sock *sk)
1495 {
1496 struct nvmet_tcp_queue *queue;
1497
1498 read_lock_bh(&sk->sk_callback_lock);
1499 queue = sk->sk_user_data;
1500 if (unlikely(!queue))
1501 goto out;
1502
1503 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
1504 queue->write_space(sk);
1505 goto out;
1506 }
1507
1508 if (sk_stream_is_writeable(sk)) {
1509 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1510 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1511 }
1512 out:
1513 read_unlock_bh(&sk->sk_callback_lock);
1514 }
1515
nvmet_tcp_state_change(struct sock * sk)1516 static void nvmet_tcp_state_change(struct sock *sk)
1517 {
1518 struct nvmet_tcp_queue *queue;
1519
1520 read_lock_bh(&sk->sk_callback_lock);
1521 queue = sk->sk_user_data;
1522 if (!queue)
1523 goto done;
1524
1525 switch (sk->sk_state) {
1526 case TCP_FIN_WAIT1:
1527 case TCP_CLOSE_WAIT:
1528 case TCP_CLOSE:
1529 /* FALLTHRU */
1530 nvmet_tcp_schedule_release_queue(queue);
1531 break;
1532 default:
1533 pr_warn("queue %d unhandled state %d\n",
1534 queue->idx, sk->sk_state);
1535 }
1536 done:
1537 read_unlock_bh(&sk->sk_callback_lock);
1538 }
1539
nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue * queue)1540 static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
1541 {
1542 struct socket *sock = queue->sock;
1543 struct inet_sock *inet = inet_sk(sock->sk);
1544 int ret;
1545
1546 ret = kernel_getsockname(sock,
1547 (struct sockaddr *)&queue->sockaddr);
1548 if (ret < 0)
1549 return ret;
1550
1551 ret = kernel_getpeername(sock,
1552 (struct sockaddr *)&queue->sockaddr_peer);
1553 if (ret < 0)
1554 return ret;
1555
1556 /*
1557 * Cleanup whatever is sitting in the TCP transmit queue on socket
1558 * close. This is done to prevent stale data from being sent should
1559 * the network connection be restored before TCP times out.
1560 */
1561 sock_no_linger(sock->sk);
1562
1563 if (so_priority > 0)
1564 sock_set_priority(sock->sk, so_priority);
1565
1566 /* Set socket type of service */
1567 if (inet->rcv_tos > 0)
1568 ip_sock_set_tos(sock->sk, inet->rcv_tos);
1569
1570 ret = 0;
1571 write_lock_bh(&sock->sk->sk_callback_lock);
1572 if (sock->sk->sk_state != TCP_ESTABLISHED) {
1573 /*
1574 * If the socket is already closing, don't even start
1575 * consuming it
1576 */
1577 ret = -ENOTCONN;
1578 } else {
1579 sock->sk->sk_user_data = queue;
1580 queue->data_ready = sock->sk->sk_data_ready;
1581 sock->sk->sk_data_ready = nvmet_tcp_data_ready;
1582 queue->state_change = sock->sk->sk_state_change;
1583 sock->sk->sk_state_change = nvmet_tcp_state_change;
1584 queue->write_space = sock->sk->sk_write_space;
1585 sock->sk->sk_write_space = nvmet_tcp_write_space;
1586 if (idle_poll_period_usecs)
1587 nvmet_tcp_arm_queue_deadline(queue);
1588 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1589 }
1590 write_unlock_bh(&sock->sk->sk_callback_lock);
1591
1592 return ret;
1593 }
1594
nvmet_tcp_alloc_queue(struct nvmet_tcp_port * port,struct socket * newsock)1595 static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
1596 struct socket *newsock)
1597 {
1598 struct nvmet_tcp_queue *queue;
1599 int ret;
1600
1601 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1602 if (!queue)
1603 return -ENOMEM;
1604
1605 INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work);
1606 INIT_WORK(&queue->io_work, nvmet_tcp_io_work);
1607 queue->sock = newsock;
1608 queue->port = port;
1609 queue->nr_cmds = 0;
1610 spin_lock_init(&queue->state_lock);
1611 queue->state = NVMET_TCP_Q_CONNECTING;
1612 INIT_LIST_HEAD(&queue->free_list);
1613 init_llist_head(&queue->resp_list);
1614 INIT_LIST_HEAD(&queue->resp_send_list);
1615
1616 queue->idx = ida_simple_get(&nvmet_tcp_queue_ida, 0, 0, GFP_KERNEL);
1617 if (queue->idx < 0) {
1618 ret = queue->idx;
1619 goto out_free_queue;
1620 }
1621
1622 ret = nvmet_tcp_alloc_cmd(queue, &queue->connect);
1623 if (ret)
1624 goto out_ida_remove;
1625
1626 ret = nvmet_sq_init(&queue->nvme_sq);
1627 if (ret)
1628 goto out_free_connect;
1629
1630 nvmet_prepare_receive_pdu(queue);
1631
1632 mutex_lock(&nvmet_tcp_queue_mutex);
1633 list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list);
1634 mutex_unlock(&nvmet_tcp_queue_mutex);
1635
1636 ret = nvmet_tcp_set_queue_sock(queue);
1637 if (ret)
1638 goto out_destroy_sq;
1639
1640 return 0;
1641 out_destroy_sq:
1642 mutex_lock(&nvmet_tcp_queue_mutex);
1643 list_del_init(&queue->queue_list);
1644 mutex_unlock(&nvmet_tcp_queue_mutex);
1645 nvmet_sq_destroy(&queue->nvme_sq);
1646 out_free_connect:
1647 nvmet_tcp_free_cmd(&queue->connect);
1648 out_ida_remove:
1649 ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx);
1650 out_free_queue:
1651 kfree(queue);
1652 return ret;
1653 }
1654
nvmet_tcp_accept_work(struct work_struct * w)1655 static void nvmet_tcp_accept_work(struct work_struct *w)
1656 {
1657 struct nvmet_tcp_port *port =
1658 container_of(w, struct nvmet_tcp_port, accept_work);
1659 struct socket *newsock;
1660 int ret;
1661
1662 while (true) {
1663 ret = kernel_accept(port->sock, &newsock, O_NONBLOCK);
1664 if (ret < 0) {
1665 if (ret != -EAGAIN)
1666 pr_warn("failed to accept err=%d\n", ret);
1667 return;
1668 }
1669 ret = nvmet_tcp_alloc_queue(port, newsock);
1670 if (ret) {
1671 pr_err("failed to allocate queue\n");
1672 sock_release(newsock);
1673 }
1674 }
1675 }
1676
nvmet_tcp_listen_data_ready(struct sock * sk)1677 static void nvmet_tcp_listen_data_ready(struct sock *sk)
1678 {
1679 struct nvmet_tcp_port *port;
1680
1681 read_lock_bh(&sk->sk_callback_lock);
1682 port = sk->sk_user_data;
1683 if (!port)
1684 goto out;
1685
1686 if (sk->sk_state == TCP_LISTEN)
1687 schedule_work(&port->accept_work);
1688 out:
1689 read_unlock_bh(&sk->sk_callback_lock);
1690 }
1691
nvmet_tcp_add_port(struct nvmet_port * nport)1692 static int nvmet_tcp_add_port(struct nvmet_port *nport)
1693 {
1694 struct nvmet_tcp_port *port;
1695 __kernel_sa_family_t af;
1696 int ret;
1697
1698 port = kzalloc(sizeof(*port), GFP_KERNEL);
1699 if (!port)
1700 return -ENOMEM;
1701
1702 switch (nport->disc_addr.adrfam) {
1703 case NVMF_ADDR_FAMILY_IP4:
1704 af = AF_INET;
1705 break;
1706 case NVMF_ADDR_FAMILY_IP6:
1707 af = AF_INET6;
1708 break;
1709 default:
1710 pr_err("address family %d not supported\n",
1711 nport->disc_addr.adrfam);
1712 ret = -EINVAL;
1713 goto err_port;
1714 }
1715
1716 ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr,
1717 nport->disc_addr.trsvcid, &port->addr);
1718 if (ret) {
1719 pr_err("malformed ip/port passed: %s:%s\n",
1720 nport->disc_addr.traddr, nport->disc_addr.trsvcid);
1721 goto err_port;
1722 }
1723
1724 port->nport = nport;
1725 INIT_WORK(&port->accept_work, nvmet_tcp_accept_work);
1726 if (port->nport->inline_data_size < 0)
1727 port->nport->inline_data_size = NVMET_TCP_DEF_INLINE_DATA_SIZE;
1728
1729 ret = sock_create(port->addr.ss_family, SOCK_STREAM,
1730 IPPROTO_TCP, &port->sock);
1731 if (ret) {
1732 pr_err("failed to create a socket\n");
1733 goto err_port;
1734 }
1735
1736 port->sock->sk->sk_user_data = port;
1737 port->data_ready = port->sock->sk->sk_data_ready;
1738 port->sock->sk->sk_data_ready = nvmet_tcp_listen_data_ready;
1739 sock_set_reuseaddr(port->sock->sk);
1740 tcp_sock_set_nodelay(port->sock->sk);
1741 if (so_priority > 0)
1742 sock_set_priority(port->sock->sk, so_priority);
1743
1744 ret = kernel_bind(port->sock, (struct sockaddr *)&port->addr,
1745 sizeof(port->addr));
1746 if (ret) {
1747 pr_err("failed to bind port socket %d\n", ret);
1748 goto err_sock;
1749 }
1750
1751 ret = kernel_listen(port->sock, 128);
1752 if (ret) {
1753 pr_err("failed to listen %d on port sock\n", ret);
1754 goto err_sock;
1755 }
1756
1757 nport->priv = port;
1758 pr_info("enabling port %d (%pISpc)\n",
1759 le16_to_cpu(nport->disc_addr.portid), &port->addr);
1760
1761 return 0;
1762
1763 err_sock:
1764 sock_release(port->sock);
1765 err_port:
1766 kfree(port);
1767 return ret;
1768 }
1769
nvmet_tcp_destroy_port_queues(struct nvmet_tcp_port * port)1770 static void nvmet_tcp_destroy_port_queues(struct nvmet_tcp_port *port)
1771 {
1772 struct nvmet_tcp_queue *queue;
1773
1774 mutex_lock(&nvmet_tcp_queue_mutex);
1775 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1776 if (queue->port == port)
1777 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1778 mutex_unlock(&nvmet_tcp_queue_mutex);
1779 }
1780
nvmet_tcp_remove_port(struct nvmet_port * nport)1781 static void nvmet_tcp_remove_port(struct nvmet_port *nport)
1782 {
1783 struct nvmet_tcp_port *port = nport->priv;
1784
1785 write_lock_bh(&port->sock->sk->sk_callback_lock);
1786 port->sock->sk->sk_data_ready = port->data_ready;
1787 port->sock->sk->sk_user_data = NULL;
1788 write_unlock_bh(&port->sock->sk->sk_callback_lock);
1789 cancel_work_sync(&port->accept_work);
1790 /*
1791 * Destroy the remaining queues, which are not belong to any
1792 * controller yet.
1793 */
1794 nvmet_tcp_destroy_port_queues(port);
1795
1796 sock_release(port->sock);
1797 kfree(port);
1798 }
1799
nvmet_tcp_delete_ctrl(struct nvmet_ctrl * ctrl)1800 static void nvmet_tcp_delete_ctrl(struct nvmet_ctrl *ctrl)
1801 {
1802 struct nvmet_tcp_queue *queue;
1803
1804 mutex_lock(&nvmet_tcp_queue_mutex);
1805 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1806 if (queue->nvme_sq.ctrl == ctrl)
1807 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1808 mutex_unlock(&nvmet_tcp_queue_mutex);
1809 }
1810
nvmet_tcp_install_queue(struct nvmet_sq * sq)1811 static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
1812 {
1813 struct nvmet_tcp_queue *queue =
1814 container_of(sq, struct nvmet_tcp_queue, nvme_sq);
1815
1816 if (sq->qid == 0) {
1817 /* Let inflight controller teardown complete */
1818 flush_scheduled_work();
1819 }
1820
1821 queue->nr_cmds = sq->size * 2;
1822 if (nvmet_tcp_alloc_cmds(queue))
1823 return NVME_SC_INTERNAL;
1824 return 0;
1825 }
1826
nvmet_tcp_disc_port_addr(struct nvmet_req * req,struct nvmet_port * nport,char * traddr)1827 static void nvmet_tcp_disc_port_addr(struct nvmet_req *req,
1828 struct nvmet_port *nport, char *traddr)
1829 {
1830 struct nvmet_tcp_port *port = nport->priv;
1831
1832 if (inet_addr_is_any((struct sockaddr *)&port->addr)) {
1833 struct nvmet_tcp_cmd *cmd =
1834 container_of(req, struct nvmet_tcp_cmd, req);
1835 struct nvmet_tcp_queue *queue = cmd->queue;
1836
1837 sprintf(traddr, "%pISc", (struct sockaddr *)&queue->sockaddr);
1838 } else {
1839 memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE);
1840 }
1841 }
1842
1843 static const struct nvmet_fabrics_ops nvmet_tcp_ops = {
1844 .owner = THIS_MODULE,
1845 .type = NVMF_TRTYPE_TCP,
1846 .msdbd = 1,
1847 .add_port = nvmet_tcp_add_port,
1848 .remove_port = nvmet_tcp_remove_port,
1849 .queue_response = nvmet_tcp_queue_response,
1850 .delete_ctrl = nvmet_tcp_delete_ctrl,
1851 .install_queue = nvmet_tcp_install_queue,
1852 .disc_traddr = nvmet_tcp_disc_port_addr,
1853 };
1854
nvmet_tcp_init(void)1855 static int __init nvmet_tcp_init(void)
1856 {
1857 int ret;
1858
1859 nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq", WQ_HIGHPRI, 0);
1860 if (!nvmet_tcp_wq)
1861 return -ENOMEM;
1862
1863 ret = nvmet_register_transport(&nvmet_tcp_ops);
1864 if (ret)
1865 goto err;
1866
1867 return 0;
1868 err:
1869 destroy_workqueue(nvmet_tcp_wq);
1870 return ret;
1871 }
1872
nvmet_tcp_exit(void)1873 static void __exit nvmet_tcp_exit(void)
1874 {
1875 struct nvmet_tcp_queue *queue;
1876
1877 nvmet_unregister_transport(&nvmet_tcp_ops);
1878
1879 flush_scheduled_work();
1880 mutex_lock(&nvmet_tcp_queue_mutex);
1881 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1882 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1883 mutex_unlock(&nvmet_tcp_queue_mutex);
1884 flush_scheduled_work();
1885
1886 destroy_workqueue(nvmet_tcp_wq);
1887 }
1888
1889 module_init(nvmet_tcp_init);
1890 module_exit(nvmet_tcp_exit);
1891
1892 MODULE_LICENSE("GPL v2");
1893 MODULE_ALIAS("nvmet-transport-3"); /* 3 == NVMF_TRTYPE_TCP */
1894