/linux/drivers/md/ |
A D | dm-writecache.c | 104 #define WC_MODE_PMEM(wc) ((wc)->pmem_mode) argument 500 wc, in ssd_commit_flushed() 544 writecache_disk_flush(wc, wc->ssd_dev); in ssd_commit_flushed() 785 writecache_flush_region(wc, memory_data(wc, e), wc->block_size); in writecache_flush_entry() 1638 struct dm_writecache *wc = wb->wc; in writecache_writeback_endio() local 1651 struct dm_writecache *wc = c->wc; in writecache_copy_endio() local 1779 struct dm_writecache *wc = wb->wc; in wc_add_block() local 1826 wb->wc = wc; in __writecache_writeback_pmem() 1893 c->wc = wc; in __writecache_writeback_ssd() 2152 writecache_flush_region(wc, &sb(wc)->magic, sizeof sb(wc)->magic); in init_memory() [all …]
|
/linux/include/math-emu/ |
A D | op-common.h | 123 _FP_FRAC_SET_##wc(X, _FP_ZEROFRAC_##wc); \ 129 _FP_FRAC_SET_##wc(X, _FP_MAXFRAC_##wc); \ 146 _FP_FRAC_SET_##wc(X, _FP_ZEROFRAC_##wc); \ 155 _FP_FRAC_SET_##wc(X, _FP_ZEROFRAC_##wc); \ 174 _FP_FRAC_SET_##wc(X, _FP_MINFRAC_##wc); \ 185 _FP_FRAC_SET_##wc(X, _FP_ZEROFRAC_##wc); \ 190 _FP_FRAC_SET_##wc(X, _FP_ZEROFRAC_##wc); \ 244 _FP_FRAC_SET_##wc(X, _FP_MINFRAC_##wc); \ 634 _FP_FRAC_SET_##wc(S, _FP_ZEROFRAC_##wc); \ 635 _FP_FRAC_SET_##wc(R, _FP_ZEROFRAC_##wc); \ [all …]
|
A D | soft-fp.h | 135 #define _FP_ROUND_NEAREST(wc, X) \ argument 141 #define _FP_ROUND_ZERO(wc, X) (void)0 argument 143 #define _FP_ROUND_PINF(wc, X) \ argument 146 _FP_FRAC_ADDI_##wc(X, _FP_WORK_LSB); \ 149 #define _FP_ROUND_MINF(wc, X) \ argument 155 #define _FP_ROUND(wc, X) \ argument 157 if (_FP_FRAC_LOW_##wc(X) & 7) \ 162 _FP_ROUND_NEAREST(wc,X); \ 165 _FP_ROUND_ZERO(wc,X); \ 168 _FP_ROUND_PINF(wc,X); \ [all …]
|
/linux/fs/ocfs2/ |
A D | aops.c | 803 if (wc->w_target_page == wc->w_pages[i]) { in ocfs2_unlock_pages() 847 if (!wc) in ocfs2_alloc_write_ctxt() 853 wc->w_clen = cend - wc->w_cpos + 1; in ocfs2_alloc_write_ctxt() 1104 wc->w_target_page = wc->w_pages[i]; in ocfs2_grab_pages_for_write() 1140 wc->w_di_bh, wc->w_handle, in ocfs2_write_cluster() 1275 wc->w_target_to = wc->w_target_from + len; in ocfs2_set_target_boundaries() 1303 desc = &wc->w_desc[wc->w_clen - 1]; in ocfs2_set_target_boundaries() 1501 wc->w_pages[0] = wc->w_target_page = page; in ocfs2_write_begin_inline() 1628 if (wc) in ocfs2_expand_nonsparse_inode() 1704 wc->w_cpos, wc->w_clen, UINT_MAX); in ocfs2_write_begin_nolock() [all …]
|
/linux/drivers/infiniband/hw/mlx4/ |
A D | cq.c | 593 wc->dlid_path_bits = 0; in use_tunnel_data() 596 wc->slid = 0; in use_tunnel_data() 626 wc->qp = &qp->ibqp; in mlx4_ib_qp_sw_comp() 627 wc++; in mlx4_ib_qp_sw_comp() 764 wc->wc_flags = 0; in mlx4_ib_poll_one() 785 wc->byte_len = 8; in mlx4_ib_poll_one() 789 wc->byte_len = 8; in mlx4_ib_poll_one() 793 wc->byte_len = 8; in mlx4_ib_poll_one() 797 wc->byte_len = 8; in mlx4_ib_poll_one() 825 wc->wc_flags = 0; in mlx4_ib_poll_one() [all …]
|
/linux/drivers/infiniband/hw/mlx5/ |
A D | cq.c | 120 wc->wc_flags = 0; in handle_good_req() 141 wc->byte_len = 8; in handle_good_req() 145 wc->byte_len = 8; in handle_good_req() 149 wc->byte_len = 8; in handle_good_req() 153 wc->byte_len = 8; in handle_good_req() 213 wc->wc_flags = 0; in handle_responder() 245 wc->slid = 0; in handle_responder() 253 wc->sl = 0; in handle_responder() 416 wc++; in sw_comp() 589 wc[npolled++] = soft_wc->wc; in poll_soft_wc() [all …]
|
A D | gsi.c | 37 struct ib_wc wc; member 79 wr_id = wr->wc.wr_id; in handle_single_completion() 80 wr->wc = *wc; in handle_single_completion() 81 wr->wc.wr_id = wr_id; in handle_single_completion() 82 wr->wc.qp = &mqp->ibqp; in handle_single_completion() 375 if (!wc) { in mlx5_ib_add_outstanding_wr() 376 memset(&gsi_wr->wc, 0, sizeof(gsi_wr->wc)); in mlx5_ib_add_outstanding_wr() 377 gsi_wr->wc.pkey_index = wr->pkey_index; in mlx5_ib_add_outstanding_wr() 378 gsi_wr->wc.wr_id = wr->wr.wr_id; in mlx5_ib_add_outstanding_wr() 380 gsi_wr->wc = *wc; in mlx5_ib_add_outstanding_wr() [all …]
|
/linux/drivers/net/ethernet/brocade/bna/ |
A D | bfa_cs.h | 73 bfa_wc_up(struct bfa_wc *wc) in bfa_wc_up() argument 75 wc->wc_count++; in bfa_wc_up() 79 bfa_wc_down(struct bfa_wc *wc) in bfa_wc_down() argument 81 wc->wc_count--; in bfa_wc_down() 82 if (wc->wc_count == 0) in bfa_wc_down() 83 wc->wc_resume(wc->wc_cbarg); in bfa_wc_down() 90 wc->wc_resume = wc_resume; in bfa_wc_init() 91 wc->wc_cbarg = wc_cbarg; in bfa_wc_init() 92 wc->wc_count = 0; in bfa_wc_init() 93 bfa_wc_up(wc); in bfa_wc_init() [all …]
|
/linux/drivers/infiniband/hw/qib/ |
A D | qib_ud.c | 62 struct ib_wc wc; in qib_ud_loopback() local 125 memset(&wc, 0, sizeof(wc)); in qib_ud_loopback() 201 wc.wr_id = qp->r_wr_id; in qib_ud_loopback() 203 wc.opcode = IB_WC_RECV; in qib_ud_loopback() 204 wc.qp = &qp->ibqp; in qib_ud_loopback() 434 struct ib_wc wc; in qib_ud_rcv() local 514 wc.ex.imm_data = 0; in qib_ud_rcv() 515 wc.wc_flags = 0; in qib_ud_rcv() 563 wc.vendor_err = 0; in qib_ud_rcv() 564 wc.qp = &qp->ibqp; in qib_ud_rcv() [all …]
|
A D | qib_uc.c | 246 struct ib_wc wc; in qib_uc_rcv() local 374 wc.ex.imm_data = 0; in qib_uc_rcv() 375 wc.wc_flags = 0; in qib_uc_rcv() 388 wc.opcode = IB_WC_RECV; in qib_uc_rcv() 392 wc.wr_id = qp->r_wr_id; in qib_uc_rcv() 394 wc.qp = &qp->ibqp; in qib_uc_rcv() 399 wc.vendor_err = 0; in qib_uc_rcv() 400 wc.pkey_index = 0; in qib_uc_rcv() 401 wc.dlid_path_bits = 0; in qib_uc_rcv() 402 wc.port_num = 0; in qib_uc_rcv() [all …]
|
/linux/drivers/infiniband/sw/siw/ |
A D | siw_cq.c | 48 int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc) in siw_reap_cqe() argument 57 memset(wc, 0, sizeof(*wc)); in siw_reap_cqe() 58 wc->wr_id = cqe->id; in siw_reap_cqe() 59 wc->status = map_cqe_status[cqe->status].ib; in siw_reap_cqe() 60 wc->opcode = map_wc_opcode[cqe->opcode]; in siw_reap_cqe() 61 wc->byte_len = cqe->bytes; in siw_reap_cqe() 70 wc->ex.invalidate_rkey = cqe->inval_stag; in siw_reap_cqe() 71 wc->wc_flags = IB_WC_WITH_INVALIDATE; in siw_reap_cqe() 73 wc->qp = cqe->base_qp; in siw_reap_cqe() 98 struct ib_wc wc; in siw_cq_flush() local [all …]
|
/linux/drivers/infiniband/sw/rdmavt/ |
A D | trace_cq.h | 70 TP_PROTO(struct rvt_cq *cq, struct ib_wc *wc, u32 idx), 71 TP_ARGS(cq, wc, idx), 85 __entry->wr_id = wc->wr_id; 86 __entry->status = wc->status; 87 __entry->opcode = wc->opcode; 88 __entry->length = wc->byte_len; 89 __entry->qpn = wc->qp->qp_num; 91 __entry->flags = wc->wc_flags; 92 __entry->imm = be32_to_cpu(wc->ex.imm_data); 111 TP_ARGS(cq, wc, idx)); [all …]
|
/linux/drivers/infiniband/hw/hfi1/ |
A D | ud.c | 41 struct ib_wc wc; in ud_loopback() local 107 memset(&wc, 0, sizeof(wc)); in ud_loopback() 196 wc.qp = &qp->ibqp; in ud_loopback() 205 wc.pkey_index = 0; in ud_loopback() 811 struct ib_wc wc; in hfi1_ud_rcv() local 926 wc.wc_flags = 0; in hfi1_ud_rcv() 987 wc.vendor_err = 0; in hfi1_ud_rcv() 988 wc.qp = &qp->ibqp; in hfi1_ud_rcv() 989 wc.src_qp = src_qp; in hfi1_ud_rcv() 1004 wc.pkey_index = 0; in hfi1_ud_rcv() [all …]
|
A D | uc.c | 270 struct ib_wc wc; in hfi1_uc_rcv() local 394 wc.ex.imm_data = 0; in hfi1_uc_rcv() 395 wc.wc_flags = 0; in hfi1_uc_rcv() 406 wc.opcode = IB_WC_RECV; in hfi1_uc_rcv() 410 wc.wr_id = qp->r_wr_id; in hfi1_uc_rcv() 412 wc.qp = &qp->ibqp; in hfi1_uc_rcv() 428 wc.vendor_err = 0; in hfi1_uc_rcv() 429 wc.pkey_index = 0; in hfi1_uc_rcv() 430 wc.dlid_path_bits = 0; in hfi1_uc_rcv() 431 wc.port_num = 0; in hfi1_uc_rcv() [all …]
|
/linux/fs/ntfs/ |
A D | unistr.c | 250 wchar_t wc; in ntfs_nlstoucs() local 259 &wc); in ntfs_nlstoucs() 262 if (likely(wc)) { in ntfs_nlstoucs() 327 int i, o, ns_len, wc; in ntfs_ucstonls() local 334 wc = -ENAMETOOLONG; in ntfs_ucstonls() 346 if (wc > 0) { in ntfs_ucstonls() 347 o += wc; in ntfs_ucstonls() 349 } else if (!wc) in ntfs_ucstonls() 378 if (wc != -ENAMETOOLONG) in ntfs_ucstonls() 379 wc = -EILSEQ; in ntfs_ucstonls() [all …]
|
/linux/drivers/infiniband/hw/vmw_pvrdma/ |
A D | pvrdma_cq.c | 323 struct ib_wc *wc) in pvrdma_poll_one() argument 358 wc->wr_id = cqe->wr_id; in pvrdma_poll_one() 359 wc->qp = &(*cur_qp)->ibqp; in pvrdma_poll_one() 360 wc->byte_len = cqe->byte_len; in pvrdma_poll_one() 361 wc->ex.imm_data = cqe->imm_data; in pvrdma_poll_one() 362 wc->src_qp = cqe->src_qp; in pvrdma_poll_one() 364 wc->pkey_index = cqe->pkey_index; in pvrdma_poll_one() 365 wc->slid = cqe->slid; in pvrdma_poll_one() 366 wc->sl = cqe->sl; in pvrdma_poll_one() 368 wc->port_num = cqe->port_num; in pvrdma_poll_one() [all …]
|
/linux/drivers/scsi/bfa/ |
A D | bfa_cs.h | 253 bfa_wc_up(struct bfa_wc_s *wc) in bfa_wc_up() argument 255 wc->wc_count++; in bfa_wc_up() 259 bfa_wc_down(struct bfa_wc_s *wc) in bfa_wc_down() argument 261 wc->wc_count--; in bfa_wc_down() 262 if (wc->wc_count == 0) in bfa_wc_down() 263 wc->wc_resume(wc->wc_cbarg); in bfa_wc_down() 272 wc->wc_resume = wc_resume; in bfa_wc_init() 273 wc->wc_cbarg = wc_cbarg; in bfa_wc_init() 274 wc->wc_count = 0; in bfa_wc_init() 275 bfa_wc_up(wc); in bfa_wc_init() [all …]
|
/linux/drivers/infiniband/ulp/iser/ |
A D | iser_initiator.c | 572 iser_err_comp(wc, "login_rsp"); in iser_login_rsp() 582 length = wc->byte_len - ISER_HEADERS_LEN; in iser_login_rsp() 612 struct ib_wc *wc, in iser_check_remote_inv() argument 617 u32 rkey = wc->ex.invalidate_rkey; in iser_check_remote_inv() 664 iser_err_comp(wc, "task_rsp"); in iser_task_rsp() 673 length = wc->byte_len - ISER_HEADERS_LEN; in iser_task_rsp() 708 if (unlikely(wc->status != IB_WC_SUCCESS)) in iser_cmd_comp() 709 iser_err_comp(wc, "command"); in iser_cmd_comp() 718 iser_err_comp(wc, "control"); in iser_ctrl_comp() 734 if (unlikely(wc->status != IB_WC_SUCCESS)) in iser_dataout_comp() [all …]
|
/linux/sound/isa/wavefront/ |
A D | wavefront_synth.c | 1548 wc->status = 0; in wavefront_synth_control() 1552 i = wc->wbuf[0] | (wc->wbuf[1] << 7); in wavefront_synth_control() 1560 wc->status = 0; in wavefront_synth_control() 1588 wc->status = snd_wavefront_cmd (dev, wc->cmd, wc->rbuf, wc->wbuf); in wavefront_synth_control() 1608 demunge_buf (wc->rbuf, wc->rbuf, WF_PATCH_BYTES); in wavefront_synth_control() 1612 demunge_buf (wc->rbuf, wc->rbuf, WF_PROGRAM_BYTES); in wavefront_synth_control() 1616 demunge_buf (wc->rbuf, wc->rbuf, WF_DRUM_BYTES - 1); in wavefront_synth_control() 1690 wc = memdup_user(argp, sizeof(*wc)); in snd_wavefront_synth_ioctl() 1691 if (IS_ERR(wc)) in snd_wavefront_synth_ioctl() 1696 else if (copy_to_user (argp, wc, sizeof (*wc))) in snd_wavefront_synth_ioctl() [all …]
|
/linux/drivers/infiniband/hw/cxgb4/ |
A D | cq.c | 769 wc->wr_id = cookie; in __c4iw_poll_cq_one() 772 wc->wc_flags = 0; in __c4iw_poll_cq_one() 790 wc->byte_len = CQE_LEN(&cqe); in __c4iw_poll_cq_one() 792 wc->byte_len = 0; in __c4iw_poll_cq_one() 796 wc->opcode = IB_WC_RECV; in __c4iw_poll_cq_one() 800 wc->opcode = IB_WC_RECV; in __c4iw_poll_cq_one() 823 wc->opcode = IB_WC_RDMA_READ; in __c4iw_poll_cq_one() 828 wc->opcode = IB_WC_SEND; in __c4iw_poll_cq_one() 833 wc->opcode = IB_WC_SEND; in __c4iw_poll_cq_one() 840 wc->opcode = IB_WC_REG_MR; in __c4iw_poll_cq_one() [all …]
|
/linux/net/sunrpc/xprtrdma/ |
A D | frwr_ops.c | 368 struct ib_cqe *cqe = wc->wr_cqe; in frwr_wc_fastreg() 372 trace_xprtrdma_wc_fastreg(wc, &mr->mr_cid); in frwr_wc_fastreg() 451 if (likely(wc->status == IB_WC_SUCCESS)) in frwr_mr_done() 463 struct ib_cqe *cqe = wc->wr_cqe; in frwr_wc_localinv() 467 trace_xprtrdma_wc_li(wc, &mr->mr_cid); in frwr_wc_localinv() 468 frwr_mr_done(wc, mr); in frwr_wc_localinv() 482 struct ib_cqe *cqe = wc->wr_cqe; in frwr_wc_localinv_wake() 486 trace_xprtrdma_wc_li_wake(wc, &mr->mr_cid); in frwr_wc_localinv_wake() 487 frwr_mr_done(wc, mr); in frwr_wc_localinv_wake() 579 struct ib_cqe *cqe = wc->wr_cqe; in frwr_wc_localinv_done() [all …]
|
/linux/net/smc/ |
A D | smc_wr.c | 87 link = wc->qp->qp_context; in smc_wr_tx_process_cqe() 89 if (wc->opcode == IB_WC_REG_MR) { in smc_wr_tx_process_cqe() 90 if (wc->status) in smc_wr_tx_process_cqe() 125 if (wc->status) { in smc_wr_tx_process_cqe() 150 memset(&wc, 0, sizeof(wc)); in smc_wr_tx_tasklet_fn() 160 smc_wr_tx_process_cqe(&wc[i]); in smc_wr_tx_tasklet_fn() 445 temp_wr_id = wc->wr_id; in smc_wr_rx_demultiplex() 450 handler->handler(wc, wr_rx); in smc_wr_rx_demultiplex() 460 link = wc[i].qp->qp_context; in smc_wr_rx_process_cqes() 467 switch (wc[i].status) { in smc_wr_rx_process_cqes() [all …]
|
/linux/drivers/infiniband/ulp/ipoib/ |
A D | ipoib_ib.c | 183 wr_id, wc->status); in ipoib_ib_handle_rx_wc() 197 wc->status, wr_id, wc->vendor_err); in ipoib_ib_handle_rx_wc() 217 wc->byte_len, wc->slid); in ipoib_ib_handle_rx_wc() 221 skb_put(skb, wc->byte_len); in ipoib_ib_handle_rx_wc() 239 if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num) { in ipoib_ib_handle_rx_wc() 392 wr_id, wc->status); in ipoib_ib_handle_tx_wc() 423 wc->status, wr_id, wc->vendor_err); in ipoib_ib_handle_tx_wc() 437 struct ib_wc *wc; in poll_tx() local 441 wc = priv->send_wc + i; in poll_tx() 504 struct ib_wc *wc; in ipoib_tx_poll() local [all …]
|
/linux/fs/btrfs/ |
A D | extent-tree.c | 4992 if (path->slots[wc->level] < wc->reada_slot) { in reada_walk_down() 4993 wc->reada_count = wc->reada_count * 2 / 3; in reada_walk_down() 4994 wc->reada_count = max(wc->reada_count, 2); in reada_walk_down() 4996 wc->reada_count = wc->reada_count * 3 / 2; in reada_walk_down() 4997 wc->reada_count = min_t(int, wc->reada_count, in reada_walk_down() 5590 wc = kzalloc(sizeof(*wc), GFP_NOFS); in btrfs_drop_snapshot() 5591 if (!wc) { in btrfs_drop_snapshot() 5705 wc->drop_level = wc->level; in btrfs_drop_snapshot() 5794 kfree(wc); in btrfs_drop_snapshot() 5834 wc = kzalloc(sizeof(*wc), GFP_NOFS); in btrfs_drop_subtree() [all …]
|
/linux/drivers/infiniband/core/ |
A D | cq.c | 88 rc = ib_poll_cq(cq, num_entries, wc); in __poll_cq() 108 struct ib_wc *wc = &wcs[i]; in __ib_process_cq() local 110 if (wc->wr_cqe) in __ib_process_cq() 111 wc->wr_cqe->done(cq, wc); in __ib_process_cq() 113 WARN_ON_ONCE(wc->status == IB_WC_SUCCESS); in __ib_process_cq() 158 completed = __ib_process_cq(cq, budget, cq->wc, IB_POLL_BATCH); in ib_poll_handler() 184 completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, cq->wc, in ib_cq_poll_work() 234 cq->wc = kmalloc_array(IB_POLL_BATCH, sizeof(*cq->wc), GFP_KERNEL); in __ib_alloc_cq() 235 if (!cq->wc) in __ib_alloc_cq() 279 kfree(cq->wc); in __ib_alloc_cq() [all …]
|