Lines Matching refs:queue

43 	struct nvme_tcp_queue	*queue;  member
140 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
147 static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue) in nvme_tcp_queue_id() argument
149 return queue - queue->ctrl->queues; in nvme_tcp_queue_id()
152 static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue) in nvme_tcp_tagset() argument
154 u32 queue_idx = nvme_tcp_queue_id(queue); in nvme_tcp_tagset()
157 return queue->ctrl->admin_tag_set.tags[queue_idx]; in nvme_tcp_tagset()
158 return queue->ctrl->tag_set.tags[queue_idx - 1]; in nvme_tcp_tagset()
161 static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue) in nvme_tcp_hdgst_len() argument
163 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0; in nvme_tcp_hdgst_len()
166 static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue) in nvme_tcp_ddgst_len() argument
168 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0; in nvme_tcp_ddgst_len()
171 static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_queue *queue) in nvme_tcp_inline_data_size() argument
173 return queue->cmnd_capsule_len - sizeof(struct nvme_command); in nvme_tcp_inline_data_size()
178 return req == &req->queue->ctrl->async_req; in nvme_tcp_async_req()
191 req->data_len <= nvme_tcp_inline_data_size(req->queue); in nvme_tcp_has_inline_data()
267 static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue) in nvme_tcp_send_all() argument
273 ret = nvme_tcp_try_send(queue); in nvme_tcp_send_all()
277 static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue) in nvme_tcp_queue_more() argument
279 return !list_empty(&queue->send_list) || in nvme_tcp_queue_more()
280 !llist_empty(&queue->req_list) || queue->more_requests; in nvme_tcp_queue_more()
286 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_queue_request() local
289 empty = llist_add(&req->lentry, &queue->req_list) && in nvme_tcp_queue_request()
290 list_empty(&queue->send_list) && !queue->request; in nvme_tcp_queue_request()
297 if (queue->io_cpu == raw_smp_processor_id() && in nvme_tcp_queue_request()
298 sync && empty && mutex_trylock(&queue->send_mutex)) { in nvme_tcp_queue_request()
299 queue->more_requests = !last; in nvme_tcp_queue_request()
300 nvme_tcp_send_all(queue); in nvme_tcp_queue_request()
301 queue->more_requests = false; in nvme_tcp_queue_request()
302 mutex_unlock(&queue->send_mutex); in nvme_tcp_queue_request()
305 if (last && nvme_tcp_queue_more(queue)) in nvme_tcp_queue_request()
306 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_queue_request()
309 static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue) in nvme_tcp_process_req_list() argument
314 for (node = llist_del_all(&queue->req_list); node; node = node->next) { in nvme_tcp_process_req_list()
316 list_add(&req->entry, &queue->send_list); in nvme_tcp_process_req_list()
321 nvme_tcp_fetch_request(struct nvme_tcp_queue *queue) in nvme_tcp_fetch_request() argument
325 req = list_first_entry_or_null(&queue->send_list, in nvme_tcp_fetch_request()
328 nvme_tcp_process_req_list(queue); in nvme_tcp_fetch_request()
329 req = list_first_entry_or_null(&queue->send_list, in nvme_tcp_fetch_request()
367 static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue, in nvme_tcp_verify_hdgst() argument
375 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_verify_hdgst()
377 nvme_tcp_queue_id(queue)); in nvme_tcp_verify_hdgst()
382 nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len); in nvme_tcp_verify_hdgst()
385 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_verify_hdgst()
394 static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu) in nvme_tcp_check_ddgst() argument
397 u8 digest_len = nvme_tcp_hdgst_len(queue); in nvme_tcp_check_ddgst()
404 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_check_ddgst()
406 nvme_tcp_queue_id(queue)); in nvme_tcp_check_ddgst()
409 crypto_ahash_init(queue->rcv_hash); in nvme_tcp_check_ddgst()
430 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx]; in nvme_tcp_init_request() local
431 u8 hdgst = nvme_tcp_hdgst_len(queue); in nvme_tcp_init_request()
433 req->pdu = page_frag_alloc(&queue->pf_cache, in nvme_tcp_init_request()
440 req->queue = queue; in nvme_tcp_init_request()
451 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1]; in nvme_tcp_init_hctx() local
453 hctx->driver_data = queue; in nvme_tcp_init_hctx()
461 struct nvme_tcp_queue *queue = &ctrl->queues[0]; in nvme_tcp_init_admin_hctx() local
463 hctx->driver_data = queue; in nvme_tcp_init_admin_hctx()
468 nvme_tcp_recv_state(struct nvme_tcp_queue *queue) in nvme_tcp_recv_state() argument
470 return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU : in nvme_tcp_recv_state()
471 (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST : in nvme_tcp_recv_state()
475 static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue) in nvme_tcp_init_recv_ctx() argument
477 queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) + in nvme_tcp_init_recv_ctx()
478 nvme_tcp_hdgst_len(queue); in nvme_tcp_init_recv_ctx()
479 queue->pdu_offset = 0; in nvme_tcp_init_recv_ctx()
480 queue->data_remaining = -1; in nvme_tcp_init_recv_ctx()
481 queue->ddgst_remaining = 0; in nvme_tcp_init_recv_ctx()
493 static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue, in nvme_tcp_process_nvme_cqe() argument
499 rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id); in nvme_tcp_process_nvme_cqe()
501 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_process_nvme_cqe()
503 cqe->command_id, nvme_tcp_queue_id(queue)); in nvme_tcp_process_nvme_cqe()
504 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_process_nvme_cqe()
514 queue->nr_cqe++; in nvme_tcp_process_nvme_cqe()
519 static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue, in nvme_tcp_handle_c2h_data() argument
524 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id); in nvme_tcp_handle_c2h_data()
526 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_c2h_data()
528 pdu->command_id, nvme_tcp_queue_id(queue)); in nvme_tcp_handle_c2h_data()
533 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_c2h_data()
535 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_handle_c2h_data()
539 queue->data_remaining = le32_to_cpu(pdu->data_length); in nvme_tcp_handle_c2h_data()
543 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_c2h_data()
545 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_handle_c2h_data()
546 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_handle_c2h_data()
553 static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue, in nvme_tcp_handle_comp() argument
565 if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue), in nvme_tcp_handle_comp()
567 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status, in nvme_tcp_handle_comp()
570 ret = nvme_tcp_process_nvme_cqe(queue, cqe); in nvme_tcp_handle_comp()
579 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_setup_h2c_data_pdu() local
581 u8 hdgst = nvme_tcp_hdgst_len(queue); in nvme_tcp_setup_h2c_data_pdu()
582 u8 ddgst = nvme_tcp_ddgst_len(queue); in nvme_tcp_setup_h2c_data_pdu()
592 if (queue->hdr_digest) in nvme_tcp_setup_h2c_data_pdu()
594 if (queue->data_digest) in nvme_tcp_setup_h2c_data_pdu()
606 static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue, in nvme_tcp_handle_r2t() argument
613 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id); in nvme_tcp_handle_r2t()
615 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_r2t()
617 pdu->command_id, nvme_tcp_queue_id(queue)); in nvme_tcp_handle_r2t()
623 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_r2t()
630 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_r2t()
637 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_r2t()
649 static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb, in nvme_tcp_recv_pdu() argument
653 char *pdu = queue->pdu; in nvme_tcp_recv_pdu()
654 size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining); in nvme_tcp_recv_pdu()
658 &pdu[queue->pdu_offset], rcv_len); in nvme_tcp_recv_pdu()
662 queue->pdu_remaining -= rcv_len; in nvme_tcp_recv_pdu()
663 queue->pdu_offset += rcv_len; in nvme_tcp_recv_pdu()
666 if (queue->pdu_remaining) in nvme_tcp_recv_pdu()
669 hdr = queue->pdu; in nvme_tcp_recv_pdu()
670 if (queue->hdr_digest) { in nvme_tcp_recv_pdu()
671 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen); in nvme_tcp_recv_pdu()
677 if (queue->data_digest) { in nvme_tcp_recv_pdu()
678 ret = nvme_tcp_check_ddgst(queue, queue->pdu); in nvme_tcp_recv_pdu()
685 return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu); in nvme_tcp_recv_pdu()
687 nvme_tcp_init_recv_ctx(queue); in nvme_tcp_recv_pdu()
688 return nvme_tcp_handle_comp(queue, (void *)queue->pdu); in nvme_tcp_recv_pdu()
690 nvme_tcp_init_recv_ctx(queue); in nvme_tcp_recv_pdu()
691 return nvme_tcp_handle_r2t(queue, (void *)queue->pdu); in nvme_tcp_recv_pdu()
693 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_pdu()
707 static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb, in nvme_tcp_recv_data() argument
710 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu; in nvme_tcp_recv_data()
712 nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id); in nvme_tcp_recv_data()
718 recv_len = min_t(size_t, *len, queue->data_remaining); in nvme_tcp_recv_data()
730 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_data()
732 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_recv_data()
733 nvme_tcp_init_recv_ctx(queue); in nvme_tcp_recv_data()
743 if (queue->data_digest) in nvme_tcp_recv_data()
745 &req->iter, recv_len, queue->rcv_hash); in nvme_tcp_recv_data()
750 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_data()
752 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_recv_data()
758 queue->data_remaining -= recv_len; in nvme_tcp_recv_data()
761 if (!queue->data_remaining) { in nvme_tcp_recv_data()
762 if (queue->data_digest) { in nvme_tcp_recv_data()
763 nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst); in nvme_tcp_recv_data()
764 queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH; in nvme_tcp_recv_data()
769 queue->nr_cqe++; in nvme_tcp_recv_data()
771 nvme_tcp_init_recv_ctx(queue); in nvme_tcp_recv_data()
778 static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue, in nvme_tcp_recv_ddgst() argument
781 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu; in nvme_tcp_recv_ddgst()
782 char *ddgst = (char *)&queue->recv_ddgst; in nvme_tcp_recv_ddgst()
783 size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining); in nvme_tcp_recv_ddgst()
784 off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining; in nvme_tcp_recv_ddgst()
791 queue->ddgst_remaining -= recv_len; in nvme_tcp_recv_ddgst()
794 if (queue->ddgst_remaining) in nvme_tcp_recv_ddgst()
797 if (queue->recv_ddgst != queue->exp_ddgst) { in nvme_tcp_recv_ddgst()
798 struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue), in nvme_tcp_recv_ddgst()
804 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_ddgst()
806 le32_to_cpu(queue->recv_ddgst), in nvme_tcp_recv_ddgst()
807 le32_to_cpu(queue->exp_ddgst)); in nvme_tcp_recv_ddgst()
811 struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue), in nvme_tcp_recv_ddgst()
816 queue->nr_cqe++; in nvme_tcp_recv_ddgst()
819 nvme_tcp_init_recv_ctx(queue); in nvme_tcp_recv_ddgst()
826 struct nvme_tcp_queue *queue = desc->arg.data; in nvme_tcp_recv_skb() local
831 switch (nvme_tcp_recv_state(queue)) { in nvme_tcp_recv_skb()
833 result = nvme_tcp_recv_pdu(queue, skb, &offset, &len); in nvme_tcp_recv_skb()
836 result = nvme_tcp_recv_data(queue, skb, &offset, &len); in nvme_tcp_recv_skb()
839 result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len); in nvme_tcp_recv_skb()
845 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_skb()
847 queue->rd_enabled = false; in nvme_tcp_recv_skb()
848 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_recv_skb()
858 struct nvme_tcp_queue *queue; in nvme_tcp_data_ready() local
861 queue = sk->sk_user_data; in nvme_tcp_data_ready()
862 if (likely(queue && queue->rd_enabled) && in nvme_tcp_data_ready()
863 !test_bit(NVME_TCP_Q_POLLING, &queue->flags)) in nvme_tcp_data_ready()
864 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_data_ready()
870 struct nvme_tcp_queue *queue; in nvme_tcp_write_space() local
873 queue = sk->sk_user_data; in nvme_tcp_write_space()
874 if (likely(queue && sk_stream_is_writeable(sk))) { in nvme_tcp_write_space()
876 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_write_space()
883 struct nvme_tcp_queue *queue; in nvme_tcp_state_change() local
886 queue = sk->sk_user_data; in nvme_tcp_state_change()
887 if (!queue) in nvme_tcp_state_change()
896 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_state_change()
899 dev_info(queue->ctrl->ctrl.device, in nvme_tcp_state_change()
901 nvme_tcp_queue_id(queue), sk->sk_state); in nvme_tcp_state_change()
904 queue->state_change(sk); in nvme_tcp_state_change()
909 static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue) in nvme_tcp_done_send_req() argument
911 queue->request = NULL; in nvme_tcp_done_send_req()
921 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_data() local
932 if (last && !queue->data_digest && !nvme_tcp_queue_more(queue)) in nvme_tcp_try_send_data()
938 ret = kernel_sendpage(queue->sock, page, offset, len, in nvme_tcp_try_send_data()
941 ret = sock_no_sendpage(queue->sock, page, offset, len, in nvme_tcp_try_send_data()
947 if (queue->data_digest) in nvme_tcp_try_send_data()
948 nvme_tcp_ddgst_update(queue->snd_hash, page, in nvme_tcp_try_send_data()
961 if (queue->data_digest) { in nvme_tcp_try_send_data()
962 nvme_tcp_ddgst_final(queue->snd_hash, in nvme_tcp_try_send_data()
967 nvme_tcp_done_send_req(queue); in nvme_tcp_try_send_data()
977 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_cmd_pdu() local
980 u8 hdgst = nvme_tcp_hdgst_len(queue); in nvme_tcp_try_send_cmd_pdu()
985 if (inline_data || nvme_tcp_queue_more(queue)) in nvme_tcp_try_send_cmd_pdu()
990 if (queue->hdr_digest && !req->offset) in nvme_tcp_try_send_cmd_pdu()
991 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); in nvme_tcp_try_send_cmd_pdu()
993 ret = kernel_sendpage(queue->sock, virt_to_page(pdu), in nvme_tcp_try_send_cmd_pdu()
1002 if (queue->data_digest) in nvme_tcp_try_send_cmd_pdu()
1003 crypto_ahash_init(queue->snd_hash); in nvme_tcp_try_send_cmd_pdu()
1005 nvme_tcp_done_send_req(queue); in nvme_tcp_try_send_cmd_pdu()
1016 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_data_pdu() local
1018 u8 hdgst = nvme_tcp_hdgst_len(queue); in nvme_tcp_try_send_data_pdu()
1022 if (queue->hdr_digest && !req->offset) in nvme_tcp_try_send_data_pdu()
1023 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); in nvme_tcp_try_send_data_pdu()
1025 ret = kernel_sendpage(queue->sock, virt_to_page(pdu), in nvme_tcp_try_send_data_pdu()
1034 if (queue->data_digest) in nvme_tcp_try_send_data_pdu()
1035 crypto_ahash_init(queue->snd_hash); in nvme_tcp_try_send_data_pdu()
1045 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_ddgst() local
1054 if (nvme_tcp_queue_more(queue)) in nvme_tcp_try_send_ddgst()
1059 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); in nvme_tcp_try_send_ddgst()
1064 nvme_tcp_done_send_req(queue); in nvme_tcp_try_send_ddgst()
1072 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue) in nvme_tcp_try_send() argument
1077 if (!queue->request) { in nvme_tcp_try_send()
1078 queue->request = nvme_tcp_fetch_request(queue); in nvme_tcp_try_send()
1079 if (!queue->request) in nvme_tcp_try_send()
1082 req = queue->request; in nvme_tcp_try_send()
1110 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_try_send()
1113 nvme_tcp_fail_request(queue->request); in nvme_tcp_try_send()
1114 nvme_tcp_done_send_req(queue); in nvme_tcp_try_send()
1119 static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue) in nvme_tcp_try_recv() argument
1121 struct socket *sock = queue->sock; in nvme_tcp_try_recv()
1126 rd_desc.arg.data = queue; in nvme_tcp_try_recv()
1129 queue->nr_cqe = 0; in nvme_tcp_try_recv()
1137 struct nvme_tcp_queue *queue = in nvme_tcp_io_work() local
1145 if (mutex_trylock(&queue->send_mutex)) { in nvme_tcp_io_work()
1146 result = nvme_tcp_try_send(queue); in nvme_tcp_io_work()
1147 mutex_unlock(&queue->send_mutex); in nvme_tcp_io_work()
1154 result = nvme_tcp_try_recv(queue); in nvme_tcp_io_work()
1165 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_io_work()
1168 static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue) in nvme_tcp_free_crypto() argument
1170 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash); in nvme_tcp_free_crypto()
1172 ahash_request_free(queue->rcv_hash); in nvme_tcp_free_crypto()
1173 ahash_request_free(queue->snd_hash); in nvme_tcp_free_crypto()
1177 static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue) in nvme_tcp_alloc_crypto() argument
1185 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL); in nvme_tcp_alloc_crypto()
1186 if (!queue->snd_hash) in nvme_tcp_alloc_crypto()
1188 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL); in nvme_tcp_alloc_crypto()
1190 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL); in nvme_tcp_alloc_crypto()
1191 if (!queue->rcv_hash) in nvme_tcp_alloc_crypto()
1193 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL); in nvme_tcp_alloc_crypto()
1197 ahash_request_free(queue->snd_hash); in nvme_tcp_alloc_crypto()
1212 struct nvme_tcp_queue *queue = &ctrl->queues[0]; in nvme_tcp_alloc_async_req() local
1214 u8 hdgst = nvme_tcp_hdgst_len(queue); in nvme_tcp_alloc_async_req()
1216 async->pdu = page_frag_alloc(&queue->pf_cache, in nvme_tcp_alloc_async_req()
1222 async->queue = &ctrl->queues[0]; in nvme_tcp_alloc_async_req()
1230 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; in nvme_tcp_free_queue() local
1232 if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags)) in nvme_tcp_free_queue()
1235 if (queue->hdr_digest || queue->data_digest) in nvme_tcp_free_queue()
1236 nvme_tcp_free_crypto(queue); in nvme_tcp_free_queue()
1238 if (queue->pf_cache.va) { in nvme_tcp_free_queue()
1239 page = virt_to_head_page(queue->pf_cache.va); in nvme_tcp_free_queue()
1240 __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias); in nvme_tcp_free_queue()
1241 queue->pf_cache.va = NULL; in nvme_tcp_free_queue()
1243 sock_release(queue->sock); in nvme_tcp_free_queue()
1244 kfree(queue->pdu); in nvme_tcp_free_queue()
1245 mutex_destroy(&queue->send_mutex); in nvme_tcp_free_queue()
1246 mutex_destroy(&queue->queue_lock); in nvme_tcp_free_queue()
1249 static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue) in nvme_tcp_init_connection() argument
1275 if (queue->hdr_digest) in nvme_tcp_init_connection()
1277 if (queue->data_digest) in nvme_tcp_init_connection()
1282 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); in nvme_tcp_init_connection()
1289 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1, in nvme_tcp_init_connection()
1297 nvme_tcp_queue_id(queue), icresp->hdr.type); in nvme_tcp_init_connection()
1303 nvme_tcp_queue_id(queue), icresp->hdr.plen); in nvme_tcp_init_connection()
1309 nvme_tcp_queue_id(queue), icresp->pfv); in nvme_tcp_init_connection()
1314 if ((queue->data_digest && !ctrl_ddgst) || in nvme_tcp_init_connection()
1315 (!queue->data_digest && ctrl_ddgst)) { in nvme_tcp_init_connection()
1317 nvme_tcp_queue_id(queue), in nvme_tcp_init_connection()
1318 queue->data_digest ? "enabled" : "disabled", in nvme_tcp_init_connection()
1324 if ((queue->hdr_digest && !ctrl_hdgst) || in nvme_tcp_init_connection()
1325 (!queue->hdr_digest && ctrl_hdgst)) { in nvme_tcp_init_connection()
1327 nvme_tcp_queue_id(queue), in nvme_tcp_init_connection()
1328 queue->hdr_digest ? "enabled" : "disabled", in nvme_tcp_init_connection()
1335 nvme_tcp_queue_id(queue), icresp->cpda); in nvme_tcp_init_connection()
1347 static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue) in nvme_tcp_admin_queue() argument
1349 return nvme_tcp_queue_id(queue) == 0; in nvme_tcp_admin_queue()
1352 static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue) in nvme_tcp_default_queue() argument
1354 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_default_queue()
1355 int qid = nvme_tcp_queue_id(queue); in nvme_tcp_default_queue()
1357 return !nvme_tcp_admin_queue(queue) && in nvme_tcp_default_queue()
1361 static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue) in nvme_tcp_read_queue() argument
1363 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_read_queue()
1364 int qid = nvme_tcp_queue_id(queue); in nvme_tcp_read_queue()
1366 return !nvme_tcp_admin_queue(queue) && in nvme_tcp_read_queue()
1367 !nvme_tcp_default_queue(queue) && in nvme_tcp_read_queue()
1372 static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue) in nvme_tcp_poll_queue() argument
1374 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_poll_queue()
1375 int qid = nvme_tcp_queue_id(queue); in nvme_tcp_poll_queue()
1377 return !nvme_tcp_admin_queue(queue) && in nvme_tcp_poll_queue()
1378 !nvme_tcp_default_queue(queue) && in nvme_tcp_poll_queue()
1379 !nvme_tcp_read_queue(queue) && in nvme_tcp_poll_queue()
1385 static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue) in nvme_tcp_set_queue_io_cpu() argument
1387 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_set_queue_io_cpu()
1388 int qid = nvme_tcp_queue_id(queue); in nvme_tcp_set_queue_io_cpu()
1391 if (nvme_tcp_default_queue(queue)) in nvme_tcp_set_queue_io_cpu()
1393 else if (nvme_tcp_read_queue(queue)) in nvme_tcp_set_queue_io_cpu()
1395 else if (nvme_tcp_poll_queue(queue)) in nvme_tcp_set_queue_io_cpu()
1398 queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false); in nvme_tcp_set_queue_io_cpu()
1405 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; in nvme_tcp_alloc_queue() local
1408 mutex_init(&queue->queue_lock); in nvme_tcp_alloc_queue()
1409 queue->ctrl = ctrl; in nvme_tcp_alloc_queue()
1410 init_llist_head(&queue->req_list); in nvme_tcp_alloc_queue()
1411 INIT_LIST_HEAD(&queue->send_list); in nvme_tcp_alloc_queue()
1412 mutex_init(&queue->send_mutex); in nvme_tcp_alloc_queue()
1413 INIT_WORK(&queue->io_work, nvme_tcp_io_work); in nvme_tcp_alloc_queue()
1414 queue->queue_size = queue_size; in nvme_tcp_alloc_queue()
1417 queue->cmnd_capsule_len = nctrl->ioccsz * 16; in nvme_tcp_alloc_queue()
1419 queue->cmnd_capsule_len = sizeof(struct nvme_command) + in nvme_tcp_alloc_queue()
1423 IPPROTO_TCP, &queue->sock); in nvme_tcp_alloc_queue()
1431 tcp_sock_set_syncnt(queue->sock->sk, 1); in nvme_tcp_alloc_queue()
1434 tcp_sock_set_nodelay(queue->sock->sk); in nvme_tcp_alloc_queue()
1441 sock_no_linger(queue->sock->sk); in nvme_tcp_alloc_queue()
1444 sock_set_priority(queue->sock->sk, so_priority); in nvme_tcp_alloc_queue()
1448 ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos); in nvme_tcp_alloc_queue()
1451 queue->sock->sk->sk_rcvtimeo = 10 * HZ; in nvme_tcp_alloc_queue()
1453 queue->sock->sk->sk_allocation = GFP_ATOMIC; in nvme_tcp_alloc_queue()
1454 nvme_tcp_set_queue_io_cpu(queue); in nvme_tcp_alloc_queue()
1455 queue->request = NULL; in nvme_tcp_alloc_queue()
1456 queue->data_remaining = 0; in nvme_tcp_alloc_queue()
1457 queue->ddgst_remaining = 0; in nvme_tcp_alloc_queue()
1458 queue->pdu_remaining = 0; in nvme_tcp_alloc_queue()
1459 queue->pdu_offset = 0; in nvme_tcp_alloc_queue()
1460 sk_set_memalloc(queue->sock->sk); in nvme_tcp_alloc_queue()
1463 ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr, in nvme_tcp_alloc_queue()
1477 ret = sock_setsockopt(queue->sock, SOL_SOCKET, SO_BINDTODEVICE, in nvme_tcp_alloc_queue()
1487 queue->hdr_digest = nctrl->opts->hdr_digest; in nvme_tcp_alloc_queue()
1488 queue->data_digest = nctrl->opts->data_digest; in nvme_tcp_alloc_queue()
1489 if (queue->hdr_digest || queue->data_digest) { in nvme_tcp_alloc_queue()
1490 ret = nvme_tcp_alloc_crypto(queue); in nvme_tcp_alloc_queue()
1499 nvme_tcp_hdgst_len(queue); in nvme_tcp_alloc_queue()
1500 queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL); in nvme_tcp_alloc_queue()
1501 if (!queue->pdu) { in nvme_tcp_alloc_queue()
1507 nvme_tcp_queue_id(queue)); in nvme_tcp_alloc_queue()
1509 ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr, in nvme_tcp_alloc_queue()
1517 ret = nvme_tcp_init_connection(queue); in nvme_tcp_alloc_queue()
1521 queue->rd_enabled = true; in nvme_tcp_alloc_queue()
1522 set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags); in nvme_tcp_alloc_queue()
1523 nvme_tcp_init_recv_ctx(queue); in nvme_tcp_alloc_queue()
1525 write_lock_bh(&queue->sock->sk->sk_callback_lock); in nvme_tcp_alloc_queue()
1526 queue->sock->sk->sk_user_data = queue; in nvme_tcp_alloc_queue()
1527 queue->state_change = queue->sock->sk->sk_state_change; in nvme_tcp_alloc_queue()
1528 queue->data_ready = queue->sock->sk->sk_data_ready; in nvme_tcp_alloc_queue()
1529 queue->write_space = queue->sock->sk->sk_write_space; in nvme_tcp_alloc_queue()
1530 queue->sock->sk->sk_data_ready = nvme_tcp_data_ready; in nvme_tcp_alloc_queue()
1531 queue->sock->sk->sk_state_change = nvme_tcp_state_change; in nvme_tcp_alloc_queue()
1532 queue->sock->sk->sk_write_space = nvme_tcp_write_space; in nvme_tcp_alloc_queue()
1534 queue->sock->sk->sk_ll_usec = 1; in nvme_tcp_alloc_queue()
1536 write_unlock_bh(&queue->sock->sk->sk_callback_lock); in nvme_tcp_alloc_queue()
1541 kernel_sock_shutdown(queue->sock, SHUT_RDWR); in nvme_tcp_alloc_queue()
1543 kfree(queue->pdu); in nvme_tcp_alloc_queue()
1545 if (queue->hdr_digest || queue->data_digest) in nvme_tcp_alloc_queue()
1546 nvme_tcp_free_crypto(queue); in nvme_tcp_alloc_queue()
1548 sock_release(queue->sock); in nvme_tcp_alloc_queue()
1549 queue->sock = NULL; in nvme_tcp_alloc_queue()
1551 mutex_destroy(&queue->send_mutex); in nvme_tcp_alloc_queue()
1552 mutex_destroy(&queue->queue_lock); in nvme_tcp_alloc_queue()
1556 static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue) in nvme_tcp_restore_sock_calls() argument
1558 struct socket *sock = queue->sock; in nvme_tcp_restore_sock_calls()
1562 sock->sk->sk_data_ready = queue->data_ready; in nvme_tcp_restore_sock_calls()
1563 sock->sk->sk_state_change = queue->state_change; in nvme_tcp_restore_sock_calls()
1564 sock->sk->sk_write_space = queue->write_space; in nvme_tcp_restore_sock_calls()
1568 static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue) in __nvme_tcp_stop_queue() argument
1570 kernel_sock_shutdown(queue->sock, SHUT_RDWR); in __nvme_tcp_stop_queue()
1571 nvme_tcp_restore_sock_calls(queue); in __nvme_tcp_stop_queue()
1572 cancel_work_sync(&queue->io_work); in __nvme_tcp_stop_queue()
1578 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; in nvme_tcp_stop_queue() local
1580 mutex_lock(&queue->queue_lock); in nvme_tcp_stop_queue()
1581 if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags)) in nvme_tcp_stop_queue()
1582 __nvme_tcp_stop_queue(queue); in nvme_tcp_stop_queue()
1583 mutex_unlock(&queue->queue_lock); in nvme_tcp_stop_queue()
2186 static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue, in nvme_tcp_set_sg_inline() argument
2191 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff); in nvme_tcp_set_sg_inline()
2210 struct nvme_tcp_queue *queue = &ctrl->queues[0]; in nvme_tcp_submit_async_event() local
2213 u8 hdgst = nvme_tcp_hdgst_len(queue); in nvme_tcp_submit_async_event()
2217 if (queue->hdr_digest) in nvme_tcp_submit_async_event()
2238 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; in nvme_tcp_complete_timed_out()
2240 nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue)); in nvme_tcp_complete_timed_out()
2251 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; in nvme_tcp_timeout()
2256 nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type); in nvme_tcp_timeout()
2284 static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue, in nvme_tcp_map_data() argument
2296 req->data_len <= nvme_tcp_inline_data_size(queue)) in nvme_tcp_map_data()
2297 nvme_tcp_set_sg_inline(queue, c, req->data_len); in nvme_tcp_map_data()
2309 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_setup_cmd_pdu() local
2310 u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0; in nvme_tcp_setup_cmd_pdu()
2330 req->data_len <= nvme_tcp_inline_data_size(queue)) in nvme_tcp_setup_cmd_pdu()
2335 if (queue->hdr_digest) in nvme_tcp_setup_cmd_pdu()
2337 if (queue->data_digest && req->pdu_len) { in nvme_tcp_setup_cmd_pdu()
2339 ddgst = nvme_tcp_ddgst_len(queue); in nvme_tcp_setup_cmd_pdu()
2346 ret = nvme_tcp_map_data(queue, rq); in nvme_tcp_setup_cmd_pdu()
2349 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_setup_cmd_pdu()
2359 struct nvme_tcp_queue *queue = hctx->driver_data; in nvme_tcp_commit_rqs() local
2361 if (!llist_empty(&queue->req_list)) in nvme_tcp_commit_rqs()
2362 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_commit_rqs()
2368 struct nvme_ns *ns = hctx->queue->queuedata; in nvme_tcp_queue_rq()
2369 struct nvme_tcp_queue *queue = hctx->driver_data; in nvme_tcp_queue_rq() local
2372 bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags); in nvme_tcp_queue_rq()
2375 if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) in nvme_tcp_queue_rq()
2376 return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq); in nvme_tcp_queue_rq()
2436 struct nvme_tcp_queue *queue = hctx->driver_data; in nvme_tcp_poll() local
2437 struct sock *sk = queue->sock->sk; in nvme_tcp_poll()
2439 if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags)) in nvme_tcp_poll()
2442 set_bit(NVME_TCP_Q_POLLING, &queue->flags); in nvme_tcp_poll()
2445 nvme_tcp_try_recv(queue); in nvme_tcp_poll()
2446 clear_bit(NVME_TCP_Q_POLLING, &queue->flags); in nvme_tcp_poll()
2447 return queue->nr_cqe; in nvme_tcp_poll()