| /linux/drivers/net/ethernet/mellanox/mlx4/ |
| A D | cq.c | 110 cqn & (dev->caps.num_cqs - 1)); in mlx4_cq_completion() 223 *cqn = mlx4_bitmap_alloc(&cq_table->bitmap); in __mlx4_cq_alloc_icm() 224 if (*cqn == -1) in __mlx4_cq_alloc_icm() 257 *cqn = get_param_l(&out_param); in mlx4_cq_alloc_icm() 261 return __mlx4_cq_alloc_icm(dev, cqn); in mlx4_cq_alloc_icm() 270 mlx4_table_put(dev, &cq_table->table, cqn); in __mlx4_cq_free_icm() 280 set_param_l(&in_param, cqn); in mlx4_cq_free_icm() 287 __mlx4_cq_free_icm(dev, cqn); in mlx4_cq_free_icm() 430 mlx4_cq_free_icm(dev, cq->cqn); in mlx4_cq_alloc() 442 err = mlx4_HW2SW_CQ(dev, NULL, cq->cqn); in mlx4_cq_free() [all …]
|
| A D | en_resources.c | 41 int is_tx, int rss, int qpn, int cqn, in mlx4_en_fill_qp_context() argument 72 context->cqn_send = cpu_to_be32(cqn); in mlx4_en_fill_qp_context() 73 context->cqn_recv = cpu_to_be32(cqn); in mlx4_en_fill_qp_context()
|
| A D | resource_tracker.c | 1938 int cqn; in cq_alloc_res() local 1960 set_param_l(out_param, cqn); in cq_alloc_res() 2464 int cqn; in cq_free_res() local 2469 cqn = get_param_l(&in_param); in cq_free_res() 3441 int cqn = vhcr->in_modifier; in mlx4_SW2HW_CQ_wrapper() local 3479 int cqn = vhcr->in_modifier; in mlx4_HW2SW_CQ_wrapper() local 3503 int cqn = vhcr->in_modifier; in mlx4_QUERY_CQ_wrapper() local 3575 int cqn = vhcr->in_modifier; in mlx4_MODIFY_CQ_wrapper() local 4808 int cqn; in rem_slave_cqs() local 4820 cqn = cq->com.res_id; in rem_slave_cqs() [all …]
|
| A D | srq.c | 162 int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd, in mlx4_srq_alloc() argument 192 srq_context->pg_offset_cqn = cpu_to_be32(cqn & 0xffffff); in mlx4_srq_alloc()
|
| /linux/drivers/infiniband/hw/hns/ |
| A D | hns_roce_cq.c | 87 return (u8)(cqn & GENMASK(1, 0)); in get_cq_bankid() 124 hr_cq->cqn, ret); in alloc_cqc() 150 hr_cq->cqn, ret); in alloc_cqc() 183 hr_cq->cqn); in free_cqc() 419 resp.cqn = hr_cq->cqn; in hns_roce_create_cq() 431 free_cqn(hr_dev, hr_cq->cqn); in hns_roce_create_cq() 448 free_cqn(hr_dev, hr_cq->cqn); in hns_roce_destroy_cq() 461 cqn & (hr_dev->caps.num_cqs - 1)); in hns_roce_cq_completion() 464 cqn); in hns_roce_cq_completion() 482 cqn & (hr_dev->caps.num_cqs - 1)); in hns_roce_cq_event() [all …]
|
| A D | hns_roce_hw_v2_dfx.c | 9 int hns_roce_v2_query_cqc_info(struct hns_roce_dev *hr_dev, u32 cqn, in hns_roce_v2_query_cqc_info() argument 21 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, cqn, 0, in hns_roce_v2_query_cqc_info()
|
| A D | hns_roce_hw_v1.c | 2645 to_hr_cq(ibqp->recv_cq)->cqn); in hns_roce_v1_m_sqp() 2649 to_hr_cq(ibqp->send_cq)->cqn); in hns_roce_v1_m_sqp() 2819 to_hr_cq(ibqp->send_cq)->cqn); in hns_roce_v1_m_qp() 2823 to_hr_cq(ibqp->recv_cq)->cqn); in hns_roce_v1_m_qp() 2884 to_hr_cq(ibqp->send_cq)->cqn); in hns_roce_v1_m_qp() 2888 to_hr_cq(ibqp->recv_cq)->cqn); in hns_roce_v1_m_qp() 3667 hr_cq->cqn); in hns_roce_v1_destroy_cq() 3790 u32 cqn; in hns_roce_v1_cq_err_handle() local 3964 u32 cqn; in hns_roce_v1_ceq_int() local 3972 cqn = roce_get_field(ceqe->comp, in hns_roce_v1_ceq_int() [all …]
|
| A D | hns_roce_device.h | 459 unsigned long cqn; member 484 u32 cqn; member 889 int (*query_cqc_info)(struct hns_roce_dev *hr_dev, u32 cqn, 1271 void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn); 1272 void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
|
| A D | hns_roce_restrack.c | 95 ret = hr_dev->dfx->query_cqc_info(hr_dev, hr_cq->cqn, (int *)context); in hns_roce_fill_res_cq_entry()
|
| /linux/drivers/infiniband/hw/mthca/ |
| A D | mthca_cq.c | 76 __be32 cqn; member 384 cq->cqn, cq->cons_index); in handle_error_cqe() 728 to_mcq(cq)->cqn; in mthca_tavor_arm_cq() 759 MTHCA_ARBEL_CQ_DB_REQ_NOT) | cq->cqn; in mthca_arbel_arm_cq() 780 if (cq->cqn == -1) in mthca_init_cq() 794 cq->cqn, &cq->set_ci_db); in mthca_init_cq() 799 cq->cqn, &cq->arm_db); in mthca_init_cq() 837 cq_context->cqn = cpu_to_be32(cq->cqn); in mthca_init_cq() 884 mthca_free(&dev->cq_table.alloc, cq->cqn); in mthca_init_cq() 921 cq->cqn, cq->cons_index, in mthca_free_cq() [all …]
|
| A D | mthca_eq.c | 132 __be32 cqn; member 149 __be32 cqn; member 219 static inline void disarm_cq(struct mthca_dev *dev, int eqn, int cqn) in disarm_cq() argument 222 mthca_write64(MTHCA_EQ_DB_DISARM_CQ | eqn, cqn, in disarm_cq() 276 disarm_cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff; in mthca_eq_int() 343 be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff); in mthca_eq_int() 344 mthca_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn), in mthca_eq_int()
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/ |
| A D | cq.c | 108 cq->cqn = MLX5_GET(create_cq_out, out, cqn); in mlx5_core_create_cq() 135 cq->cqn); in mlx5_core_create_cq() 146 MLX5_SET(destroy_cq_in, din, cqn, cq->cqn); in mlx5_core_create_cq() 164 MLX5_SET(destroy_cq_in, in, cqn, cq->cqn); in mlx5_core_destroy_cq() 184 MLX5_SET(query_cq_in, in, cqn, cq->cqn); in mlx5_core_query_cq() 208 MLX5_SET(modify_cq_in, in, cqn, cq->cqn); in mlx5_core_modify_cq_moderation()
|
| A D | eq.c | 95 cq = radix_tree_lookup(&table->tree, cqn); in mlx5_eq_cq_get() 112 u32 cqn = -1; in mlx5_eq_comp_int() local 126 cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff; in mlx5_eq_comp_int() 128 cq = mlx5_eq_cq_get(eq, cqn); in mlx5_eq_comp_int() 145 if (cqn != -1) in mlx5_eq_comp_int() 432 eq->eqn, cq->cqn); in mlx5_eq_del_cq() 438 eq->eqn, cq->cqn); in mlx5_eq_del_cq() 500 u32 cqn; in cq_err_event_notifier() local 508 cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff; in cq_err_event_notifier() 510 cqn, eqe->data.cq_err.syndrome); in cq_err_event_notifier() [all …]
|
| /linux/drivers/infiniband/hw/mlx4/ |
| A D | srq.c | 82 u32 cqn; in mlx4_ib_create_srq() local 180 cqn = ib_srq_has_cq(init_attr->srq_type) ? in mlx4_ib_create_srq() 181 to_mcq(init_attr->ext.cq)->mcq.cqn : 0; in mlx4_ib_create_srq() 185 err = mlx4_srq_alloc(dev->dev, to_mpd(ib_srq->pd)->pdn, cqn, xrcdn, in mlx4_ib_create_srq()
|
| /linux/include/linux/mlx5/ |
| A D | cq.h | 40 u32 cqn; member 170 doorbell[1] = cpu_to_be32(cq->cqn); in mlx5_cq_arm()
|
| /linux/drivers/infiniband/hw/mlx5/ |
| A D | cq.c | 57 type, mcq->cqn); in mlx5_ib_cq_event() 520 "Requestor" : "Responder", cq->mcq.cqn); in mlx5_poll_one() 557 cq->mcq.cqn, sig->err_item.key, in mlx5_poll_one() 583 cq->mcq.cqn); in poll_soft_wc() 1008 mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn); in mlx5_ib_create_cq() 1018 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) { in mlx5_ib_create_cq() 1137 mlx5_ib_warn(dev, "modify cq 0x%x failed\n", mcq->mcq.cqn); in mlx5_ib_modify_cq() 1244 cq->mcq.cqn); in copy_resize_cqes() 1351 MLX5_SET(modify_cq_in, in, cqn, cq->mcq.cqn); in mlx5_ib_resize_cq()
|
| A D | srq.c | 275 in.cqn = to_mcq(init_attr->ext.cq)->mcq.cqn; in mlx5_ib_create_srq() 277 in.cqn = to_mcq(dev->devr.c0)->mcq.cqn; in mlx5_ib_create_srq()
|
| A D | srq.h | 25 u32 cqn; member
|
| A D | qp.c | 1301 MLX5_SET(sqc, sqc, cqn, MLX5_GET(qpc, qpc, cqn_snd)); in create_raw_packet_qp_sq() 2089 to_mcq(init_attr->send_cq)->mcq.cqn); in create_dci() 2093 to_mcq(init_attr->recv_cq)->mcq.cqn); in create_dci() 2480 if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { in mlx5_ib_lock_cqs() 2484 } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) { in mlx5_ib_lock_cqs() 2510 if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { in mlx5_ib_unlock_cqs() 2513 } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) { in mlx5_ib_unlock_cqs() 2659 MLX5_SET(dctc, dctc, cqn, to_mcq(attr->recv_cq)->mcq.cqn); in create_dct() 4145 MLX5_SET(qpc, qpc, cqn_snd, send_cq->mcq.cqn); in __mlx5_ib_modify_qp() 4147 MLX5_SET(qpc, qpc, cqn_rcv, recv_cq->mcq.cqn); in __mlx5_ib_modify_qp() [all …]
|
| /linux/include/uapi/rdma/ |
| A D | hns-abi.h | 51 __aligned_u64 cqn; /* Only 32 bits used, 64 for compat */ member
|
| A D | mthca-abi.h | 84 __u32 cqn; member
|
| A D | mlx4-abi.h | 85 __u32 cqn; member
|
| /linux/drivers/infiniband/hw/efa/ |
| A D | efa_main.c | 72 u16 cqn = eqe->u.comp_event.cqn; in efa_process_comp_eqe() local 76 cq = xa_load(&dev->cqs_xa, cqn); in efa_process_comp_eqe() 80 cqn); in efa_process_comp_eqe()
|
| A D | efa_admin_defs.h | 128 u16 cqn; member
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/steering/ |
| A D | dr_send.c | 45 u32 cqn; member 169 MLX5_SET(qpc, qpc, cqn_snd, attr->cqn); in dr_create_rc_qp() 170 MLX5_SET(qpc, qpc, cqn_rcv, attr->cqn); in dr_create_rc_qp() 752 pr_err("CQ completion CQ: #%u\n", mcq->cqn); in dr_cq_complete() 935 init_attr.cqn = dmn->send_ring->cq->mcq.cqn; in mlx5dr_send_ring_alloc()
|