Home
last modified time | relevance | path

Searched refs:cqe (Results 1 – 25 of 209) sorted by relevance

123456789

/linux/drivers/infiniband/hw/mthca/
A Dmthca_cq.c176 return MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner ? NULL : cqe; in cqe_sw()
195 be32_to_cpu(cqe[0]), be32_to_cpu(cqe[1]), be32_to_cpu(cqe[2]), in dump_cqe()
196 be32_to_cpu(cqe[3]), be32_to_cpu(cqe[4]), be32_to_cpu(cqe[5]), in dump_cqe()
197 be32_to_cpu(cqe[6]), be32_to_cpu(cqe[7])); in dump_cqe()
304 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); in mthca_cq_clean()
336 cq->ibcq.cqe < cq->resize_buf->cqe) { in mthca_cq_resize_copy_cqes()
383 be32_to_cpu(cqe->my_qpn), be32_to_cpu(cqe->wqe), in handle_error_cqe()
494 if (!cqe) in mthca_poll_one()
512 is_send = is_error ? cqe->opcode & 0x01 : cqe->is_send & 0x80; in mthca_poll_one()
646 set_cqe_hw(cqe); in mthca_poll_one()
[all …]
/linux/drivers/infiniband/hw/mlx4/
A Dcq.c81 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe() local
85 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe; in get_sw_cqe()
362 cqe = get_cqe(cq, i & cq->ibcq.cqe); in mlx4_ib_cq_resize_copy_cqes()
373 cqe = get_cqe(cq, ++i & cq->ibcq.cqe); in mlx4_ib_cq_resize_copy_cqes()
430 cq->ibcq.cqe = cq->resize_buf->cqe; in mlx4_ib_resize_cq()
447 cq->ibcq.cqe = cq->resize_buf->cqe; in mlx4_ib_resize_cq()
674 if (!cqe) in mlx4_ib_poll_one()
678 cqe++; in mlx4_ib_poll_one()
699 cq->ibcq.cqe = cq->resize_buf->cqe; in mlx4_ib_poll_one()
939 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); in __mlx4_ib_cq_clean()
[all …]
/linux/drivers/infiniband/hw/vmw_pvrdma/
A Dpvrdma_cq.c105 int entries = attr->cqe; in pvrdma_create_cq()
132 cq->ibcq.cqe = entries; in pvrdma_create_cq()
186 cmd->cqe = entries; in pvrdma_create_cq()
195 cq->ibcq.cqe = resp->cqe; in pvrdma_create_cq()
293 cq->ibcq.cqe); in _pvrdma_flush_cqe()
309 *cqe = *curr_cqe; in _pvrdma_flush_cqe()
315 cq->ibcq.cqe); in _pvrdma_flush_cqe()
329 struct pvrdma_cqe *cqe; in pvrdma_poll_one() local
358 wc->wr_id = cqe->wr_id; in pvrdma_poll_one()
365 wc->slid = cqe->slid; in pvrdma_poll_one()
[all …]
/linux/tools/io_uring/
A Dio_uring-cp.c126 struct io_uring_cqe *cqe; in copy_file() local
179 cqe = NULL; in copy_file()
188 if (!cqe) in copy_file()
192 if (cqe->res < 0) { in copy_file()
193 if (cqe->res == -EAGAIN) { in copy_file()
199 strerror(-cqe->res)); in copy_file()
205 data->offset += cqe->res; in copy_file()
207 io_uring_cqe_seen(ring, cqe); in copy_file()
224 io_uring_cqe_seen(ring, cqe); in copy_file()
237 if (cqe->res < 0) { in copy_file()
[all …]
/linux/drivers/infiniband/sw/rxe/
A Drxe_cq.c12 int cqe, int comp_vector) in rxe_cq_chk_attr() argument
16 if (cqe <= 0) { in rxe_cq_chk_attr()
17 pr_warn("cqe(%d) <= 0\n", cqe); in rxe_cq_chk_attr()
21 if (cqe > rxe->attr.max_cqe) { in rxe_cq_chk_attr()
23 cqe, rxe->attr.max_cqe); in rxe_cq_chk_attr()
29 if (cqe < count) { in rxe_cq_chk_attr()
31 cqe, count); in rxe_cq_chk_attr()
65 cq->queue = rxe_queue_init(rxe, &cqe, in rxe_cq_from_init()
87 cq->ibcq.cqe = cqe; in rxe_cq_from_init()
101 cq->ibcq.cqe = cqe; in rxe_cq_resize_queue()
[all …]
/linux/drivers/infiniband/sw/siw/
A Dsiw_cq.c50 struct siw_cqe *cqe; in siw_reap_cqe() local
55 cqe = &cq->queue[cq->cq_get % cq->num_cqe]; in siw_reap_cqe()
56 if (READ_ONCE(cqe->flags) & SIW_WQE_VALID) { in siw_reap_cqe()
58 wc->wr_id = cqe->id; in siw_reap_cqe()
60 wc->opcode = map_wc_opcode[cqe->opcode]; in siw_reap_cqe()
61 wc->byte_len = cqe->bytes; in siw_reap_cqe()
69 if (cqe->flags & SIW_WQE_REM_INVAL) { in siw_reap_cqe()
70 wc->ex.invalidate_rkey = cqe->inval_stag; in siw_reap_cqe()
73 wc->qp = cqe->base_qp; in siw_reap_cqe()
77 cqe->flags, (void *)(uintptr_t)cqe->id); in siw_reap_cqe()
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/
A Den_rx.c121 cqe->op_own = op_own; in mlx5e_cqes_update_owner()
129 cqe->op_own = op_own; in mlx5e_cqes_update_owner()
857 struct mlx5_cqe64 *cqe; in mlx5e_poll_ico_cq() local
865 if (likely(!cqe)) in mlx5e_poll_ico_cq()
1435 if (cqe_has_vlan(cqe)) { in mlx5e_build_rx_skb()
1618 trigger_report(rq, cqe); in mlx5e_handle_rx_cqe()
1721 trigger_report(rq, cqe); in mlx5e_handle_rx_cqe_mpwrq_rep()
1969 u16 data_bcnt = mpwrq_get_cqe_byte_cnt(cqe) - cqe->shampo.header_size; in mlx5e_handle_rx_cqe_mpwrq_shampo()
2106 struct mlx5_cqe64 *cqe; in mlx5e_poll_rx_cq() local
2119 if (!cqe) { in mlx5e_poll_rx_cq()
[all …]
/linux/drivers/infiniband/hw/cxgb4/
A Dcq.c190 memset(&cqe, 0, sizeof(cqe)); in insert_recv_cqe()
224 memset(&cqe, 0, sizeof(cqe)); in insert_sq_cqe()
432 if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe)) in cqe_completes_wr()
435 if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe)) in cqe_completes_wr()
438 if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq)) in cqe_completes_wr()
783 CQE_TYPE(&cqe), CQE_OPCODE(&cqe), in __c4iw_poll_cq_one()
784 CQE_STATUS(&cqe), CQE_LEN(&cqe), in __c4iw_poll_cq_one()
785 CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe), in __c4iw_poll_cq_one()
812 CQE_OPCODE(&cqe), CQE_QPID(&cqe)); in __c4iw_poll_cq_one()
849 CQE_OPCODE(&cqe), CQE_QPID(&cqe)); in __c4iw_poll_cq_one()
[all …]
/linux/drivers/net/ethernet/mellanox/mlxsw/
A Dpci_hw.h119 return mlxsw_pci_cqe##v0##_##name##_get(cqe); \
121 return mlxsw_pci_cqe##v1##_##name##_get(cqe); \
123 return mlxsw_pci_cqe##v2##_##name##_get(cqe); \
127 char *cqe, u32 val) \
132 mlxsw_pci_cqe##v0##_##name##_set(cqe, val); \
135 mlxsw_pci_cqe##v1##_##name##_set(cqe, val); \
138 mlxsw_pci_cqe##v2##_##name##_set(cqe, val); \
156 MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16);
167 MLXSW_ITEM32(pci, cqe, wqe_counter, 0x04, 16, 16);
174 MLXSW_ITEM32(pci, cqe, byte_count, 0x04, 0, 14);
[all …]
/linux/drivers/infiniband/hw/mlx5/
A Dcq.c81 void *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe() local
84 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in get_sw_cqe()
88 return cqe; in get_sw_cqe()
453 void *cqe; in mlx5_poll_one() local
458 if (!cqe) in mlx5_poll_one()
461 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in mlx5_poll_one()
859 void *cqe; in init_cq_frag_buf() local
864 cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64; in init_cq_frag_buf()
1064 void *cqe, *dest; in __mlx5_ib_cq_clean() local
1086 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); in __mlx5_ib_cq_clean()
[all …]
/linux/drivers/infiniband/hw/bnxt_re/
A Dqplib_fp.c2145 memset(cqe, 0, sizeof(*cqe)); in __flush_sq()
2152 cqe++; in __flush_sq()
2194 memset(cqe, 0, sizeof(*cqe)); in __flush_rq()
2200 cqe++; in __flush_rq()
2364 memset(cqe, 0, sizeof(*cqe)); in bnxt_qplib_cq_process_req()
2382 sq->swq_last, cqe->wr_id, cqe->status); in bnxt_qplib_cq_process_req()
2480 cqe++; in bnxt_qplib_cq_process_res_rc()
2497 cqe++; in bnxt_qplib_cq_process_res_rc()
2567 cqe++; in bnxt_qplib_cq_process_res_ud()
2585 cqe++; in bnxt_qplib_cq_process_res_ud()
[all …]
/linux/drivers/net/ethernet/marvell/octeontx2/nic/
A Dotx2_txrx.c242 hash = cqe->hdr.flow_tag; in otx2_set_rxhash()
326 if (cqe->sg.segs) in otx2_check_rcv_errors()
383 struct nix_cqe_rx_s *cqe; in otx2_rx_napi_handler() local
396 !cqe->sg.seg_addr) { in otx2_rx_napi_handler()
407 cqe->sg.seg_addr = 0x00; in otx2_rx_napi_handler()
436 struct nix_cqe_tx_s *cqe; in otx2_tx_napi_handler() local
448 if (unlikely(!cqe)) { in otx2_tx_napi_handler()
456 cqe); in otx2_tx_napi_handler()
1031 if (!cqe) in otx2_cleanup_rx_cqes()
1033 if (cqe->sg.segs > 1) { in otx2_cleanup_rx_cqes()
[all …]
/linux/drivers/infiniband/sw/rdmavt/
A Dcq.c55 head = cq->ibcq.cqe; in rvt_cq_enter()
249 cq->ibcq.cqe = entries; in rvt_create_cq()
350 if (cqe < 1 || cqe > rdi->dparms.props.max_cqe) in rvt_resize_cq()
393 if (head > (u32)cq->ibcq.cqe) in rvt_resize_cq()
394 head = (u32)cq->ibcq.cqe; in rvt_resize_cq()
395 if (tail > (u32)cq->ibcq.cqe) in rvt_resize_cq()
396 tail = (u32)cq->ibcq.cqe; in rvt_resize_cq()
401 if (unlikely((u32)cqe < n)) { in rvt_resize_cq()
415 cq->ibcq.cqe = cqe; in rvt_resize_cq()
493 tail = (u32)cq->ibcq.cqe; in rvt_poll_cq()
[all …]
/linux/drivers/net/ethernet/qlogic/qede/
A Dqede_fp.c662 cqe->header_len; in qede_set_gro_params()
869 qede_get_rxhash(tpa_info->skb, cqe->bitfields, cqe->rss_hash); in qede_tpa_start()
875 if (likely(cqe->bw_ext_bd_len_list[0])) in qede_tpa_start()
961 for (i = 0; cqe->len_list[i]; i++) in qede_tpa_cont()
963 le16_to_cpu(cqe->len_list[i])); in qede_tpa_cont()
986 for (i = 0; cqe->len_list[i]; i++) in qede_tpa_end()
988 le16_to_cpu(cqe->len_list[i])); in qede_tpa_end()
1224 union eth_rx_cqe *cqe, in qede_rx_process_tpa_cqe() argument
1249 union eth_rx_cqe *cqe; in qede_rx_process_cqe() local
1278 fp_cqe = &cqe->fast_path_regular; in qede_rx_process_cqe()
[all …]
/linux/drivers/net/ethernet/huawei/hinic/
A Dhinic_hw_qp.c326 if (!rq->cqe) in alloc_rq_cqe()
336 sizeof(*rq->cqe[i]), in alloc_rq_cqe()
338 if (!rq->cqe[i]) in alloc_rq_cqe()
346 dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[j]), rq->cqe[j], in alloc_rq_cqe()
352 vfree(rq->cqe); in alloc_rq_cqe()
368 dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[i]), rq->cqe[i], in free_rq_cqe()
372 vfree(rq->cqe); in free_rq_cqe()
853 cqe = rq->cqe[*cons_idx]; in hinic_rq_read_wqe()
905 struct hinic_rq_cqe *cqe = rq->cqe[cons_idx]; in hinic_rq_put_wqe() local
928 struct hinic_rq_cqe *cqe = rq->cqe[cons_idx]; in hinic_rq_get_sge() local
[all …]
/linux/drivers/infiniband/ulp/iser/
A Discsi_iser.h248 struct ib_cqe cqe; member
275 struct ib_cqe cqe; member
295 struct ib_cqe cqe; member
577 iser_rx(struct ib_cqe *cqe) in iser_rx() argument
579 return container_of(cqe, struct iser_rx_desc, cqe); in iser_rx()
583 iser_tx(struct ib_cqe *cqe) in iser_tx() argument
585 return container_of(cqe, struct iser_tx_desc, cqe); in iser_tx()
589 iser_login(struct ib_cqe *cqe) in iser_login() argument
591 return container_of(cqe, struct iser_login_desc, cqe); in iser_login()
/linux/drivers/scsi/qedi/
A Dqedi_fw.c31 union iscsi_cqe *cqe, in qedi_process_logout_resp() argument
82 union iscsi_cqe *cqe, in qedi_process_text_resp() argument
178 union iscsi_cqe *cqe, in qedi_process_tmf_resp() argument
331 idx = cqe->rqe_opaque; in qedi_get_rq_bdq_buf()
366 idx = cqe->rqe_opaque; in qedi_put_rq_bdq_buf()
409 union iscsi_cqe *cqe, in qedi_process_nopin_mesg() argument
576 union iscsi_cqe *cqe, in qedi_scsi_completion() argument
674 union iscsi_cqe *cqe, in qedi_mtask_completion() argument
734 u32 proto_itt = cqe->itid; in qedi_process_cmd_cleanup_resp()
743 iscsi_cid = cqe->conn_id; in qedi_process_cmd_cleanup_resp()
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/en_accel/
A Dtls_rxtx.h68 struct mlx5_cqe64 *cqe, u32 *cqe_bcnt) in mlx5e_tls_handle_rx_skb() argument
70 if (unlikely(get_cqe_tls_offload(cqe))) /* cqe bit indicates a TLS device */ in mlx5e_tls_handle_rx_skb()
71 return mlx5e_ktls_handle_rx_skb(rq, skb, cqe, cqe_bcnt); in mlx5e_tls_handle_rx_skb()
80 mlx5e_accel_is_tls(struct mlx5_cqe64 *cqe, struct sk_buff *skb) { return false; } in mlx5e_accel_is_tls() argument
83 struct mlx5_cqe64 *cqe, u32 *cqe_bcnt) {} in mlx5e_tls_handle_rx_skb() argument
A Dipsec_rxtx.h72 struct mlx5_cqe64 *cqe);
78 static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe) in mlx5_ipsec_is_rx_flow() argument
80 return MLX5_IPSEC_METADATA_MARKER(be32_to_cpu(cqe->ft_metadata)); in mlx5_ipsec_is_rx_flow()
153 struct mlx5_cqe64 *cqe) in mlx5e_ipsec_offload_handle_rx_skb() argument
161 static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe) { return false; } in mlx5_ipsec_is_rx_flow() argument
/linux/drivers/infiniband/hw/ocrdma/
A Docrdma_verbs.c967 int entries = attr->cqe; in ocrdma_create_cq()
1021 ibcq->cqe = new_cnt; in ocrdma_resize_cq()
1034 cqe = cq->va; in ocrdma_flush_cq()
1044 cqe++; in ocrdma_flush_cq()
1592 struct ocrdma_cqe *cqe; in ocrdma_discard_cqes() local
1611 cqe = cq->va + cur_getp; in ocrdma_discard_cqes()
1643 cqe->cmn.qpn = 0; in ocrdma_discard_cqes()
2444 if (is_cqe_for_sq(cqe)) { in ocrdma_set_cqe_status_flushed()
2698 if (is_cqe_imm(cqe)) { in ocrdma_poll_success_rcqe()
2764 struct ocrdma_cqe *cqe; in ocrdma_poll_hwcq() local
[all …]
A Docrdma.h496 static inline int is_cqe_valid(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe) in is_cqe_valid() argument
499 cqe_valid = le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_VALID; in is_cqe_valid()
503 static inline int is_cqe_for_sq(struct ocrdma_cqe *cqe) in is_cqe_for_sq() argument
505 return (le32_to_cpu(cqe->flags_status_srcqpn) & in is_cqe_for_sq()
509 static inline int is_cqe_invalidated(struct ocrdma_cqe *cqe) in is_cqe_invalidated() argument
511 return (le32_to_cpu(cqe->flags_status_srcqpn) & in is_cqe_invalidated()
515 static inline int is_cqe_imm(struct ocrdma_cqe *cqe) in is_cqe_imm() argument
517 return (le32_to_cpu(cqe->flags_status_srcqpn) & in is_cqe_imm()
521 static inline int is_cqe_wr_imm(struct ocrdma_cqe *cqe) in is_cqe_wr_imm() argument
523 return (le32_to_cpu(cqe->flags_status_srcqpn) & in is_cqe_wr_imm()
/linux/include/linux/mlx5/
A Ddevice.h879 return (cqe->op_own >> 2) & 0x3; in mlx5_get_cqe_format()
884 return cqe->op_own >> 4; in get_cqe_opcode()
889 return (cqe->lro.tcppsh_abort_dupack >> 6) & 1; in get_cqe_lro_tcppsh()
894 return (cqe->l4_l3_hdr_type >> 4) & 0x7; in get_cqe_l4_hdr_type()
899 return (cqe->l4_l3_hdr_type >> 2) & 0x3; in get_cqe_l3_hdr_type()
904 return cqe->tls_outer_l3_tunneled & 0x1; in cqe_is_tunneled()
914 return cqe->l4_l3_hdr_type & 0x1; in cqe_has_vlan()
921 hi = be32_to_cpu(cqe->timestamp_h); in get_cqe_ts()
922 lo = be32_to_cpu(cqe->timestamp_l); in get_cqe_ts()
929 return be32_to_cpu(cqe->sop_drop_qpn) & 0xFFF; in get_cqe_flow_tag()
[all …]
/linux/drivers/nvme/target/
A Dfabrics-cmd.c81 req->cqe->result.u64 = cpu_to_le64(val); in nvmet_execute_prop_get()
118 req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize); in nvmet_install_queue()
133 req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize); in nvmet_install_queue()
150 req->cqe->sq_head = cpu_to_le16(0xffff); in nvmet_install_queue()
191 req->cqe->result.u32 = 0; in nvmet_execute_admin_connect()
205 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid); in nvmet_execute_admin_connect()
228 req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid); in nvmet_execute_admin_connect()
258 req->cqe->result.u32 = 0; in nvmet_execute_io_connect()
277 req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(qid); in nvmet_execute_io_connect()
286 req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid); in nvmet_execute_io_connect()
/linux/drivers/scsi/qedf/
A Dqedf.h250 struct fcoe_cqe cqe; member
486 extern void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
489 struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
491 struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
496 extern void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
509 extern void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
516 struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
518 extern void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
520 extern void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe);
529 struct fcoe_cqe *cqe);
[all …]
/linux/drivers/scsi/bnx2i/
A Dbnx2i.h506 struct cqe { struct
650 struct cqe *cq_virt;
654 struct cqe *cq_prod_qe;
655 struct cqe *cq_cons_qe;
656 struct cqe *cq_first_qe;
657 struct cqe *cq_last_qe;
774 struct cqe cqe; member
881 struct cqe *cqe);

Completed in 73 milliseconds

123456789