Home
last modified time | relevance | path

Searched refs:blk_rq_pos (Results 1 – 25 of 50) sorted by relevance

12

/linux/block/
A Delevator.c54 #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
264 if (blk_rq_pos(rq) < blk_rq_pos(__rq)) in elv_rb_add()
266 else if (blk_rq_pos(rq) >= blk_rq_pos(__rq)) in elv_rb_add()
291 if (sector < blk_rq_pos(rq)) in elv_rb_find()
293 else if (sector > blk_rq_pos(rq)) in elv_rb_find()
384 __rq = elv_rqhash_find(q, blk_rq_pos(rq)); in elv_attempt_insert_merge()
A Dblk-merge.c633 blk_rq_get_max_sectors(req, blk_rq_pos(req))) { in ll_back_merge_fn()
668 blk_rq_get_max_sectors(req, blk_rq_pos(req))) in req_attempt_discard_merge()
690 blk_rq_get_max_sectors(req, blk_rq_pos(req))) in ll_merge_requests_fn()
752 else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next)) in blk_try_req_merge()
940 else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) in blk_try_merge()
942 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) in blk_try_merge()
1021 blk_rq_get_max_sectors(req, blk_rq_pos(req))) in bio_attempt_discard_merge()
A Dbfq-iosched.c504 s1 = blk_rq_pos(rq1); in bfq_choose_req()
505 s2 = blk_rq_pos(rq2); in bfq_choose_req()
610 if (sector > blk_rq_pos(bfqq->next_rq)) in bfq_rq_pos_tree_lookup()
612 else if (sector < blk_rq_pos(bfqq->next_rq)) in bfq_rq_pos_tree_lookup()
676 blk_rq_pos(bfqq->next_rq), &parent, &p); in bfq_pos_tree_add_move()
954 return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last)); in bfq_find_next_rq()
2241 return abs(blk_rq_pos(rq) - last_pos); in get_sdist()
2380 blk_rq_pos(req) < in bfq_request_merged()
2381 blk_rq_pos(container_of(rb_prev(&req->rb_node), in bfq_request_merged()
2536 return blk_rq_pos(io_struct); in bfq_io_struct_pos()
[all …]
A Dblk-core.c231 blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)), in blk_print_req_error()
244 (unsigned long long)blk_rq_pos(rq), in blk_dump_rq_flags()
1455 rq->__sector = blk_rq_pos(rq_src); in blk_rq_prep_clone()
/linux/include/linux/
A Dblktrace_api.h128 if (blk_rq_is_passthrough(rq) || blk_rq_pos(rq) == (sector_t)-1) in blk_rq_trace_sector()
130 return blk_rq_pos(rq); in blk_rq_trace_sector()
A Dt10-pi.h48 return blk_rq_pos(rq) >> (shift - SECTOR_SHIFT) & 0xffffffff; in t10_pi_ref_tag()
A Dblk-mq.h955 static inline sector_t blk_rq_pos(const struct request *rq) in blk_rq_pos() function
1077 return blk_queue_zone_no(rq->q, blk_rq_pos(rq)); in blk_rq_zone_no()
1082 return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq)); in blk_rq_zone_is_seq()
/linux/drivers/s390/block/
A Ddasd_fba.c338 first_rec = blk_rq_pos(req) >> block->s2b_shift; in dasd_fba_build_cp_discard()
340 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; in dasd_fba_build_cp_discard()
455 first_rec = blk_rq_pos(req) >> block->s2b_shift; in dasd_fba_build_cp_regular()
457 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; in dasd_fba_build_cp_regular()
493 block->bp_block, blk_rq_pos(req), blk_rq_sectors(req)); in dasd_fba_build_cp_regular()
A Ddasd_diag.c530 first_rec = blk_rq_pos(req) >> block->s2b_shift; in dasd_diag_build_cp()
532 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; in dasd_diag_build_cp()
A Ddasd_eckd.c3156 first_trk = blk_rq_pos(req) >> block->s2b_shift; in dasd_eckd_ese_format()
3159 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; in dasd_eckd_ese_format()
3239 first_trk = first_blk = blk_rq_pos(req) >> block->s2b_shift; in dasd_eckd_ese_read()
3242 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; in dasd_eckd_ese_read()
4607 first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift; in dasd_eckd_build_cp()
4610 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; in dasd_eckd_build_cp()
4689 start_padding_sectors = blk_rq_pos(req) % DASD_RAW_SECTORS_PER_TRACK; in dasd_eckd_build_cp_raw()
4690 end_sector_offset = (blk_rq_pos(req) + blk_rq_sectors(req)) % in dasd_eckd_build_cp_raw()
4703 first_trk = blk_rq_pos(req) / DASD_RAW_SECTORS_PER_TRACK; in dasd_eckd_build_cp_raw()
4704 last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) / in dasd_eckd_build_cp_raw()
[all …]
/linux/include/scsi/
A Dscsi_cmnd.h233 return blk_rq_pos(scsi_cmd_to_rq(scmd)); in scsi_get_sector()
240 return blk_rq_pos(scsi_cmd_to_rq(scmd)) >> shift; in scsi_get_lba()
/linux/drivers/block/
A Dz2ram.c72 unsigned long start = blk_rq_pos(req) << 9; in z2_queue_rq()
80 (unsigned long long)blk_rq_pos(req), in z2_queue_rq()
A Dswim3.c348 fs->req_cyl = ((long)blk_rq_pos(req)) / fs->secpercyl; in swim3_queue_rq()
349 x = ((long)blk_rq_pos(req)) % fs->secpercyl; in swim3_queue_rq()
650 (long)blk_rq_pos(fs->cur_req)); in xfer_timeout()
768 (long)blk_rq_pos(req), err); in swim3_interrupt()
A Dvirtio_blk.c155 range[0].sector = cpu_to_le64(blk_rq_pos(req)); in virtblk_setup_discard_write_zeroes()
224 blk_rq_pos(req)); in virtblk_setup_cmd()
229 blk_rq_pos(req)); in virtblk_setup_cmd()
A Dfloppy.c2296 block = current_count_sectors + blk_rq_pos(req); in request_done()
2307 write_errors[current_drive].first_error_sector = blk_rq_pos(req); in request_done()
2310 write_errors[current_drive].last_error_sector = blk_rq_pos(req); in request_done()
2571 raw_cmd->cmd[TRACK] = (int)blk_rq_pos(current_req) / max_sector; in make_raw_rw_request()
2572 fsector_t = (int)blk_rq_pos(current_req) % max_sector; in make_raw_rw_request()
2865 current_req, (long)blk_rq_pos(current_req), in floppy_queue_rq()
A Dps3disk.c116 start_sector = blk_rq_pos(req) * priv->blocking_factor; in ps3disk_submit_request_sg()
/linux/drivers/md/
A Ddm-rq.c133 blk_rq_pos(orig), tio->n_sectors, true, in rq_end_stats()
400 blk_rq_pos(rq)); in map_request()
450 blk_rq_pos(orig), tio->n_sectors, false, 0, in dm_start_request()
/linux/include/trace/events/
A Dblock.h132 __entry->sector = blk_rq_pos(rq);
516 __entry->sector = blk_rq_pos(rq);
/linux/drivers/scsi/
A Dsd.c876 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); in sd_setup_unmap_cmnd()
912 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); in sd_setup_write_same16_cmnd()
944 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); in sd_setup_write_same10_cmnd()
975 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); in sd_setup_write_zeroes_cmnd()
1073 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); in sd_setup_write_same_cmnd()
1219 sector_t lba = sectors_to_logical(sdp, blk_rq_pos(rq)); in sd_setup_read_write_cmnd()
1239 if (blk_rq_pos(rq) + blk_rq_sectors(rq) > get_capacity(rq->rq_disk)) { in sd_setup_read_write_cmnd()
1244 if ((blk_rq_pos(rq) & mask) || (blk_rq_sectors(rq) & mask)) { in sd_setup_read_write_cmnd()
1311 (unsigned long long)blk_rq_pos(rq), in sd_setup_read_write_cmnd()
2003 start_lba = sectors_to_logical(sdev, blk_rq_pos(req)); in sd_completed_bytes()
A Dsr.c367 good_bytes = (error_sector - blk_rq_pos(rq)) << 9; in sr_done()
466 if (((unsigned int)blk_rq_pos(rq) % (s_size >> 9)) || in sr_init_command()
482 block = (unsigned int)blk_rq_pos(rq) / (s_size >> 9); in sr_init_command()
A Dsd_zbc.c248 sector_t sector = blk_rq_pos(rq); in sd_zbc_cmnd_checks()
390 sector_t sector = blk_rq_pos(rq); in sd_zbc_setup_zone_mgmt_cmnd()
/linux/drivers/mtd/
A Dmtd_blkdevs.c52 block = blk_rq_pos(req) << 9 >> tr->blkshift; in do_blktrans_request()
61 if (blk_rq_pos(req) + blk_rq_cur_sectors(req) > in do_blktrans_request()
/linux/drivers/nvme/host/
A Dzns.c243 c->zms.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); in nvme_setup_zone_mgmt_send()
/linux/drivers/mmc/core/
A Dblock.c1106 from = blk_rq_pos(req); in mmc_blk_issue_discard_rq()
1144 from = blk_rq_pos(req); in mmc_blk_issue_secdiscard_rq()
1223 if (!IS_ALIGNED(blk_rq_pos(req), card->ext_csd.rel_sectors)) in mmc_apply_rel_rw()
1325 brq->data.blk_addr = blk_rq_pos(req); in mmc_blk_data_prep()
1349 (blk_rq_pos(req) + blk_rq_sectors(req) == in mmc_blk_data_prep()
1590 brq->cmd.arg = blk_rq_pos(req); in mmc_blk_rw_rq_prep()
/linux/drivers/mtd/ubi/
A Dblock.c192 pos = blk_rq_pos(req) << 9; in ubiblock_read()

Completed in 118 milliseconds

12