Home
last modified time | relevance | path

Searched refs:cm_id (Results 1 – 25 of 49) sorted by relevance

12

/linux/drivers/infiniband/core/
A Diwcm.c402 cm_id->device->ops.iw_destroy_listen(cm_id); in destroy_cm_id()
425 cm_id->device->ops.iw_reject(cm_id, NULL, 0); in destroy_cm_id()
439 iwpm_remove_mapinfo(&cm_id->local_addr, &cm_id->m_local_addr); in destroy_cm_id()
519 cm_id->m_local_addr = cm_id->local_addr; in iw_cm_map()
520 cm_id->m_remote_addr = cm_id->remote_addr; in iw_cm_map()
583 ret = cm_id->device->ops.iw_create_listen(cm_id, in iw_cm_listen()
624 ret = cm_id->device->ops.iw_reject(cm_id, private_data, in iw_cm_reject()
660 qp = cm_id->device->ops.iw_get_qp(cm_id->device, iw_param->qpn); in iw_cm_accept()
671 ret = cm_id->device->ops.iw_accept(cm_id, iw_param); in iw_cm_accept()
731 ret = cm_id->device->ops.iw_connect(cm_id, iw_param); in iw_cm_connect()
[all …]
A Ducma.c93 struct rdma_cm_id *cm_id; member
167 if (!ctx->cm_id->device) { in ucma_get_ctx_dev()
188 ctx->cm_id = NULL; in ucma_close_id()
217 ctx->cm_id = cm_id; in ucma_set_ctx_cm_id()
462 if (IS_ERR(cm_id)) { in ucma_create_id()
463 ret = PTR_ERR(cm_id); in ucma_create_id()
850 if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num)) in ucma_query_route()
852 else if (rdma_protocol_roce(ctx->cm_id->device, ctx->cm_id->port_num)) in ucma_query_route()
854 else if (rdma_protocol_iwarp(ctx->cm_id->device, ctx->cm_id->port_num)) in ucma_query_route()
870 if (!cm_id->device) in ucma_query_device_addr()
[all …]
A Dcm_trace.h139 const struct ib_cm_id *cm_id
142 TP_ARGS(cm_id),
153 __entry->cm_id = cm_id;
156 __entry->state = cm_id->state;
173 TP_ARGS(cm_id))
188 const struct ib_cm_id *cm_id,
192 TP_ARGS(cm_id, reason),
195 __field(const void *, cm_id)
203 __entry->cm_id = cm_id;
223 TP_ARGS(cm_id))
[all …]
A Dcma_trace.h29 __field(u32, cm_id)
36 __entry->cm_id = id_priv->res.id;
75 __field(u32, cm_id)
82 __entry->cm_id = id_priv->res.id;
104 __field(u32, cm_id)
112 __entry->cm_id = id_priv->res.id;
182 __field(u32, cm_id)
195 __entry->cm_id = id_priv->res.id;
232 __field(u32, cm_id)
264 __field(u32, cm_id)
[all …]
A Dcm.c1043 switch (cm_id->state) { in cm_destroy_id()
1054 cm_id->state = IB_CM_IDLE; in cm_destroy_id()
1060 cm_id->state = IB_CM_IDLE; in cm_destroy_id()
1068 cm_id->state = IB_CM_IDLE; in cm_destroy_id()
1121 cm_id->state = IB_CM_IDLE; in cm_destroy_id()
1157 cm_destroy_id(cm_id, 0); in ib_destroy_cm_id()
2287 trace_icm_send_rep(cm_id); in ib_send_cm_rep()
2362 trace_icm_send_rtu(cm_id); in ib_send_cm_rtu()
3132 cm_id->state = cm_state; in ib_send_cm_mra()
3912 switch (cm_id->state) { in cm_establish()
[all …]
A Dcma.c140 return id_priv->cm_id.iw; in rdma_iw_cm_id()
2269 conn_id->cm_id.ib = cm_id; in cma_ib_req_handler()
2313 if (!cm_id->device) { in rdma_read_gids()
2321 if (rdma_protocol_roce(cm_id->device, cm_id->port_num)) { in rdma_read_gids()
2446 conn_id->cm_id.iw = cm_id; in iw_conn_req_handler()
2481 id_priv->cm_id.ib = id; in cma_ib_listen()
2502 id_priv->cm_id.iw = id; in cma_iw_listen()
4065 id_priv->cm_id.ib = id; in cma_resolve_ib_udp()
4117 id_priv->cm_id.ib = id; in cma_connect_ib()
4169 if (IS_ERR(cm_id)) in cma_connect_iw()
[all …]
/linux/net/rds/
A Drdma_transport.c92 ret = rdma_resolve_route(cm_id, in rds_rdma_cm_event_handler_cmn()
105 cm_id->route.path_rec[0].sl = in rds_rdma_cm_event_handler_cmn()
199 struct rdma_cm_id *cm_id; in rds_rdma_listen_init_common() local
204 if (IS_ERR(cm_id)) { in rds_rdma_listen_init_common()
205 ret = PTR_ERR(cm_id); in rds_rdma_listen_init_common()
215 ret = rdma_bind_addr(cm_id, sa); in rds_rdma_listen_init_common()
222 ret = rdma_listen(cm_id, 128); in rds_rdma_listen_init_common()
231 *ret_cm_id = cm_id; in rds_rdma_listen_init_common()
232 cm_id = NULL; in rds_rdma_listen_init_common()
234 if (cm_id) in rds_rdma_listen_init_common()
[all …]
A Dib.c410 struct rdma_cm_id *cm_id; in rds_ib_laddr_check() local
422 cm_id = rdma_create_id(&init_net, rds_rdma_cm_event_handler, in rds_ib_laddr_check()
424 if (IS_ERR(cm_id)) in rds_ib_laddr_check()
425 return PTR_ERR(cm_id); in rds_ib_laddr_check()
475 ret = rdma_bind_addr(cm_id, sa); in rds_ib_laddr_check()
478 if (ret || !cm_id->device || in rds_ib_laddr_check()
479 cm_id->device->node_type != RDMA_NODE_IB_CA) in rds_ib_laddr_check()
484 cm_id->device ? cm_id->device->node_type : -1); in rds_ib_laddr_check()
487 rdma_destroy_id(cm_id); in rds_ib_laddr_check()
/linux/drivers/infiniband/hw/qedr/
A Dqedr_iw_cm.c98 ep->cm_id->rem_ref(ep->cm_id); in qedr_iw_free_ep()
167 ep->cm_id->event_handler(ep->cm_id, &event); in qedr_iw_issue_event()
240 ep->cm_id->event_handler(ep->cm_id, &event); in qedr_iw_disconnect_worker()
576 cm_id->add_ref(cm_id); in qedr_iw_connect()
577 ep->cm_id = cm_id; in qedr_iw_connect()
679 cm_id->add_ref(cm_id); in qedr_iw_create_listen()
680 listener->cm_id = cm_id; in qedr_iw_create_listen()
716 cm_id->rem_ref(cm_id); in qedr_iw_create_listen()
731 cm_id->rem_ref(cm_id); in qedr_iw_destroy_listen()
753 cm_id->add_ref(cm_id); in qedr_iw_accept()
[all …]
A Dqedr_iw_cm.h34 int qedr_iw_connect(struct iw_cm_id *cm_id,
37 int qedr_iw_create_listen(struct iw_cm_id *cm_id, int backlog);
39 int qedr_iw_destroy_listen(struct iw_cm_id *cm_id);
41 int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
43 int qedr_iw_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
/linux/include/rdma/
A Diw_cm.h42 typedef int (*iw_cm_handler)(struct iw_cm_id *cm_id,
53 typedef int (*iw_event_handler)(struct iw_cm_id *cm_id,
115 void iw_destroy_cm_id(struct iw_cm_id *cm_id);
128 void iw_cm_unbind_qp(struct iw_cm_id *cm_id, struct ib_qp *qp);
149 int iw_cm_listen(struct iw_cm_id *cm_id, int backlog);
165 int iw_cm_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param);
179 int iw_cm_reject(struct iw_cm_id *cm_id, const void *private_data,
194 int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param);
206 int iw_cm_disconnect(struct iw_cm_id *cm_id, int abrupt);
217 int iw_cm_init_qp_attr(struct iw_cm_id *cm_id, struct ib_qp_attr *qp_attr,
A Dib_cm.h326 void ib_destroy_cm_id(struct ib_cm_id *cm_id);
384 int ib_send_cm_req(struct ib_cm_id *cm_id,
409 int ib_send_cm_rep(struct ib_cm_id *cm_id,
420 int ib_send_cm_rtu(struct ib_cm_id *cm_id,
433 int ib_send_cm_dreq(struct ib_cm_id *cm_id,
448 int ib_send_cm_drep(struct ib_cm_id *cm_id,
480 int ib_send_cm_rej(struct ib_cm_id *cm_id,
500 int ib_send_cm_mra(struct ib_cm_id *cm_id,
521 int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
542 int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
[all …]
/linux/drivers/nvme/target/
A Drdma.c636 struct rdma_cm_id *cm_id = rsp->queue->cm_id; in nvmet_rdma_rw_ctx_init() local
655 struct rdma_cm_id *cm_id = rsp->queue->cm_id; in nvmet_rdma_rw_ctx_destroy() local
664 rdma_rw_ctx_destroy(&rsp->rw, cm_id->qp, cm_id->port_num, in nvmet_rdma_rw_ctx_destroy()
720 struct rdma_cm_id *cm_id = rsp->queue->cm_id; in nvmet_rdma_queue_response() local
791 struct rdma_cm_id *cm_id = rsp->queue->cm_id; in nvmet_rdma_write_data_done() local
1450 queue->cm_id = cm_id; in nvmet_rdma_alloc_queue()
1742 if (xchg(&port->cm_id, NULL) != cm_id) in nvmet_rdma_device_removal()
1841 struct rdma_cm_id *cm_id = xchg(&port->cm_id, NULL); in nvmet_rdma_disable_port() local
1843 if (cm_id) in nvmet_rdma_disable_port()
1889 port->cm_id = cm_id; in nvmet_rdma_enable_port()
[all …]
/linux/net/9p/
A Dtrans_rdma.c77 struct rdma_cm_id *cm_id; member
281 rdma_disconnect(rdma->cm_id); in p9_cm_event_handler()
349 ib_dma_unmap_single(rdma->cm_id->device, in send_done()
377 if (rdma->cm_id && !IS_ERR(rdma->cm_id)) in rdma_destroy_trans()
378 rdma_destroy_id(rdma->cm_id); in rdma_destroy_trans()
537 rdma_disconnect(rdma->cm_id); in rdma_request()
555 rdma_disconnect(rdma->cm_id); in rdma_close()
651 if (IS_ERR(rdma->cm_id)) in rdma_create_trans()
671 err = rdma_resolve_addr(rdma->cm_id, NULL, in rdma_create_trans()
715 rdma->qp = rdma->cm_id->qp; in rdma_create_trans()
[all …]
/linux/fs/ksmbd/
A Dtransport_rdma.c83 struct rdma_cm_id *cm_id; member
102 struct rdma_cm_id *cm_id; member
355 t->cm_id = cm_id; in alloc_transport()
356 cm_id->context = t; in alloc_transport()
441 if (t->cm_id) in free_transport()
1563 t->cm_id->device->ops.get_port_immutable(t->cm_id->device, in smb_direct_accept_client()
1853 t->qp = t->cm_id->qp; in smb_direct_create_qpair()
1962 cm_id); in smb_direct_listen_handler()
1985 if (IS_ERR(cm_id)) { in smb_direct_listen()
1987 return PTR_ERR(cm_id); in smb_direct_listen()
[all …]
/linux/drivers/infiniband/hw/irdma/
A Dcm.c3467 cm_id = iwqp->cm_id; in irdma_cm_disconn_true()
3687 iwqp->cm_id = cm_id; in irdma_accept()
3688 cm_node->cm_id = cm_id; in irdma_accept()
3696 cm_id->add_ref(cm_id); in irdma_accept()
3835 cm_info.cm_id = cm_id; in irdma_connect()
3873 iwqp->cm_id = cm_id; in irdma_connect()
3875 cm_id->add_ref(cm_id); in irdma_connect()
3965 cm_info.cm_id = cm_id; in irdma_create_listen()
4003 cm_id->add_ref(cm_id); in irdma_create_listen()
4036 cm_id->rem_ref(cm_id); in irdma_destroy_listen()
[all …]
A Dcm.h245 struct iw_cm_id *cm_id; member
296 struct iw_cm_id *cm_id; member
330 struct iw_cm_id *cm_id; member
387 int irdma_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
388 int irdma_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
389 int irdma_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
390 int irdma_create_listen(struct iw_cm_id *cm_id, int backlog);
391 int irdma_destroy_listen(struct iw_cm_id *cm_id);
A Dtrace_cm.h201 TP_PROTO(struct irdma_cm_node *cm_node, struct iw_cm_id *cm_id,
203 TP_ARGS(cm_node, cm_id, type, status, caller),
206 __field(struct iw_cm_id *, cm_id)
222 __entry->cm_id = cm_id;
241 __entry->cm_id,
257 TP_PROTO(struct iw_cm_id *cm_id, enum iw_cm_event_type type,
259 TP_ARGS(cm_id, type, status, caller),
260 TP_STRUCT__entry(__field(struct iw_cm_id *, cm_id)
265 TP_fast_assign(__entry->cm_id = cm_id;
271 __entry->cm_id,
/linux/drivers/infiniband/hw/cxgb4/
A Dcm.c152 epc->cm_id->rem_ref(epc->cm_id); in deref_cm_id()
153 epc->cm_id = NULL; in deref_cm_id()
160 epc->cm_id->add_ref(epc->cm_id); in ref_cm_id()
1278 ep->com.cm_id->event_handler(ep->com.cm_id, &event); in close_complete_upcall()
1294 ep->com.cm_id->event_handler(ep->com.cm_id, &event); in peer_close_upcall()
1310 ep->com.cm_id->event_handler(ep->com.cm_id, &event); in peer_abort_upcall()
1353 ep->com.cm_id->event_handler(ep->com.cm_id, &event); in connect_reply_upcall()
1408 ep->com.cm_id->event_handler(ep->com.cm_id, &event); in established_upcall()
3184 ep->com.cm_id = cm_id; in c4iw_accept_cr()
3338 ep->com.cm_id = cm_id; in c4iw_connect()
[all …]
/linux/drivers/infiniband/ulp/ipoib/
A Dipoib_cm.c439 return ib_send_cm_rep(cm_id, &rep); in ipoib_cm_send_rep()
445 struct net_device *dev = cm_id->context; in ipoib_cm_req_handler()
456 p->id = cm_id; in ipoib_cm_req_handler()
457 cm_id->context = p; in ipoib_cm_req_handler()
512 return ipoib_cm_req_handler(cm_id, event); in ipoib_cm_rx_handler()
514 ib_send_cm_drep(cm_id, NULL, 0); in ipoib_cm_rx_handler()
517 p = cm_id->context; in ipoib_cm_rx_handler()
988 struct ipoib_cm_tx *p = cm_id->context; in ipoib_cm_rep_handler()
1049 ret = ib_send_cm_rtu(cm_id, NULL, 0); in ipoib_cm_rep_handler()
1252 struct ipoib_cm_tx *tx = cm_id->context; in ipoib_cm_tx_handler()
[all …]
/linux/drivers/infiniband/sw/siw/
A Dsiw_cm.c323 id = cep->cm_id; in siw_cm_upcall()
396 if (cep->cm_id) { in siw_qp_cm_drop()
416 cep->cm_id->rem_ref(cep->cm_id); in siw_qp_cm_drop()
1061 if (cep->cm_id) in siw_cm_work_handler()
1117 if (cep->cm_id) in siw_cm_work_handler()
1169 cep->cm_id->rem_ref(cep->cm_id); in siw_cm_work_handler()
1402 cep->cm_id = id; in siw_connect()
1640 cep->cm_id = id; in siw_accept()
1834 cep->cm_id = id; in siw_create_listen()
1880 cep->cm_id->rem_ref(cep->cm_id); in siw_create_listen()
[all …]
/linux/drivers/infiniband/ulp/rtrs/
A Drtrs-srv.c1528 rdma_disconnect(con->c.cm_id); in rtrs_srv_close_work()
1552 rdma_destroy_id(con->c.cm_id); in rtrs_srv_close_work()
1632 struct rdma_cm_id *cm_id, in create_con() argument
1650 con->c.cm_id = cm_id; in create_con()
1709 cm_id->context = &con->c; in create_con()
1986 struct rdma_cm_id *cm_id; in rtrs_srv_cm_init() local
1991 if (IS_ERR(cm_id)) { in rtrs_srv_cm_init()
1992 ret = PTR_ERR(cm_id); in rtrs_srv_cm_init()
2002 ret = rdma_listen(cm_id, 64); in rtrs_srv_cm_init()
2009 return cm_id; in rtrs_srv_cm_init()
[all …]
A Drtrs.c216 rdma_notify(con->cm_id, IB_EVENT_COMM_EST); in qp_event_handler()
233 struct rdma_cm_id *cm_id = con->cm_id; in create_cq() local
237 cq = ib_alloc_cq(cm_id->device, con, nr_cqe, cq_vector, in create_cq()
240 cq = ib_cq_pool_get(cm_id->device, nr_cqe, cq_vector, poll_ctx); in create_cq()
257 struct rdma_cm_id *cm_id = con->cm_id; in create_qp() local
272 ret = rdma_create_qp(cm_id, pd, &init_attr); in create_qp()
277 con->qp = cm_id->qp; in create_qp()
319 rdma_destroy_qp(con->cm_id); in rtrs_cq_qp_destroy()
/linux/drivers/infiniband/ulp/isert/
A Dib_isert.c439 isert_conn->cm_id = cma_id; in isert_connect_request()
496 if (isert_conn->cm_id && in isert_connect_release()
609 isert_np->cm_id = NULL; in isert_np_cma_handler()
613 if (IS_ERR(isert_np->cm_id)) { in isert_np_cma_handler()
616 isert_np->cm_id = NULL; in isert_np_cma_handler()
662 isert_conn->cm_id = NULL; in isert_connect_error()
679 if (isert_np->cm_id == cma_id) in isert_cma_handler()
2294 isert_np->cm_id = isert_lid; in isert_setup_np()
2308 struct rdma_cm_id *cm_id = isert_conn->cm_id; in isert_rdma_accept() local
2374 struct rdma_cm_id *cm_id = isert_conn->cm_id; in isert_set_conn_info() local
[all …]
/linux/drivers/infiniband/ulp/srpt/
A Dib_srpt.c2225 ch->ib_cm.cm_id = ib_cm_id; in srpt_cm_req_recv()
2532 return srpt_cm_req_recv(cm_id->context, cm_id, NULL, param->port, in srpt_ib_cm_req_recv()
2568 return srpt_cm_req_recv(sdev, NULL, cm_id, cm_id->port_num, in srpt_rdma_cm_req_recv()
3142 if (IS_ERR(sdev->cm_id)) { in srpt_add_one()
3144 PTR_ERR(sdev->cm_id)); in srpt_add_one()
3145 ret = PTR_ERR(sdev->cm_id); in srpt_add_one()
3146 sdev->cm_id = NULL; in srpt_add_one()
3161 ret = sdev->cm_id ? in srpt_add_one()
3166 sdev->cm_id->state); in srpt_add_one()
3211 if (sdev->cm_id) in srpt_add_one()
[all …]

Completed in 95 milliseconds

12