mirror of
https://github.com/torvalds/linux.git
synced 2025-08-15 14:11:42 +02:00
block: remove unused parameter 'q' parameter in __blk_rq_map_sg()
request_queue param is no longer used by blk_rq_map_sg and __blk_rq_map_sg. Remove it. Signed-off-by: Anuj Gupta <anuj20.g@samsung.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/r/20250313035322.243239-1-anuj20.g@samsung.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
017ff379b6
commit
75618ac6e9
18 changed files with 22 additions and 26 deletions
|
@ -551,8 +551,8 @@ static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
|
||||||
* Map a request to scatterlist, return number of sg entries setup. Caller
|
* Map a request to scatterlist, return number of sg entries setup. Caller
|
||||||
* must make sure sg can hold rq->nr_phys_segments entries.
|
* must make sure sg can hold rq->nr_phys_segments entries.
|
||||||
*/
|
*/
|
||||||
int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
|
int __blk_rq_map_sg(struct request *rq, struct scatterlist *sglist,
|
||||||
struct scatterlist *sglist, struct scatterlist **last_sg)
|
struct scatterlist **last_sg)
|
||||||
{
|
{
|
||||||
struct req_iterator iter = {
|
struct req_iterator iter = {
|
||||||
.bio = rq->bio,
|
.bio = rq->bio,
|
||||||
|
|
|
@ -219,7 +219,7 @@ static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req)
|
||||||
if (!buf->sg_list)
|
if (!buf->sg_list)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
sg_init_table(buf->sg_list, req->nr_phys_segments);
|
sg_init_table(buf->sg_list, req->nr_phys_segments);
|
||||||
buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list);
|
buf->sg_cnt = blk_rq_map_sg(req, buf->sg_list);
|
||||||
buf->payload_len = blk_rq_bytes(req);
|
buf->payload_len = blk_rq_bytes(req);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -2056,7 +2056,7 @@ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
|
||||||
unsigned int nents;
|
unsigned int nents;
|
||||||
|
|
||||||
/* Map the scatter list for DMA access */
|
/* Map the scatter list for DMA access */
|
||||||
nents = blk_rq_map_sg(hctx->queue, rq, command->sg);
|
nents = blk_rq_map_sg(rq, command->sg);
|
||||||
nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir);
|
nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir);
|
||||||
|
|
||||||
prefetch(&port->flags);
|
prefetch(&port->flags);
|
||||||
|
|
|
@ -1010,7 +1010,7 @@ static int rnbd_client_xfer_request(struct rnbd_clt_dev *dev,
|
||||||
* See queue limits.
|
* See queue limits.
|
||||||
*/
|
*/
|
||||||
if ((req_op(rq) != REQ_OP_DISCARD) && (req_op(rq) != REQ_OP_WRITE_ZEROES))
|
if ((req_op(rq) != REQ_OP_DISCARD) && (req_op(rq) != REQ_OP_WRITE_ZEROES))
|
||||||
sg_cnt = blk_rq_map_sg(dev->queue, rq, iu->sgt.sgl);
|
sg_cnt = blk_rq_map_sg(rq, iu->sgt.sgl);
|
||||||
|
|
||||||
if (sg_cnt == 0)
|
if (sg_cnt == 0)
|
||||||
sg_mark_end(&iu->sgt.sgl[0]);
|
sg_mark_end(&iu->sgt.sgl[0]);
|
||||||
|
|
|
@ -485,7 +485,7 @@ static int __send_request(struct request *req)
|
||||||
}
|
}
|
||||||
|
|
||||||
sg_init_table(sg, port->ring_cookies);
|
sg_init_table(sg, port->ring_cookies);
|
||||||
nsg = blk_rq_map_sg(req->q, req, sg);
|
nsg = blk_rq_map_sg(req, sg);
|
||||||
|
|
||||||
len = 0;
|
len = 0;
|
||||||
for (i = 0; i < nsg; i++)
|
for (i = 0; i < nsg; i++)
|
||||||
|
|
|
@ -226,7 +226,7 @@ static int virtblk_map_data(struct blk_mq_hw_ctx *hctx, struct request *req,
|
||||||
if (unlikely(err))
|
if (unlikely(err))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
return blk_rq_map_sg(hctx->queue, req, vbr->sg_table.sgl);
|
return blk_rq_map_sg(req, vbr->sg_table.sgl);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void virtblk_cleanup_cmd(struct request *req)
|
static void virtblk_cleanup_cmd(struct request *req)
|
||||||
|
|
|
@ -751,7 +751,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
|
||||||
id = blkif_ring_get_request(rinfo, req, &final_ring_req);
|
id = blkif_ring_get_request(rinfo, req, &final_ring_req);
|
||||||
ring_req = &rinfo->shadow[id].req;
|
ring_req = &rinfo->shadow[id].req;
|
||||||
|
|
||||||
num_sg = blk_rq_map_sg(req->q, req, rinfo->shadow[id].sg);
|
num_sg = blk_rq_map_sg(req, rinfo->shadow[id].sg);
|
||||||
num_grant = 0;
|
num_grant = 0;
|
||||||
/* Calculate the number of grant used */
|
/* Calculate the number of grant used */
|
||||||
for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i)
|
for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i)
|
||||||
|
|
|
@ -1904,7 +1904,7 @@ static void msb_io_work(struct work_struct *work)
|
||||||
|
|
||||||
/* process the request */
|
/* process the request */
|
||||||
dbg_verbose("IO: processing new request");
|
dbg_verbose("IO: processing new request");
|
||||||
blk_rq_map_sg(msb->queue, req, sg);
|
blk_rq_map_sg(req, sg);
|
||||||
|
|
||||||
lba = blk_rq_pos(req);
|
lba = blk_rq_pos(req);
|
||||||
|
|
||||||
|
|
|
@ -627,9 +627,7 @@ static int mspro_block_issue_req(struct memstick_dev *card)
|
||||||
while (true) {
|
while (true) {
|
||||||
msb->current_page = 0;
|
msb->current_page = 0;
|
||||||
msb->current_seg = 0;
|
msb->current_seg = 0;
|
||||||
msb->seg_count = blk_rq_map_sg(msb->block_req->q,
|
msb->seg_count = blk_rq_map_sg(msb->block_req, msb->req_sg);
|
||||||
msb->block_req,
|
|
||||||
msb->req_sg);
|
|
||||||
|
|
||||||
if (!msb->seg_count) {
|
if (!msb->seg_count) {
|
||||||
unsigned int bytes = blk_rq_cur_bytes(msb->block_req);
|
unsigned int bytes = blk_rq_cur_bytes(msb->block_req);
|
||||||
|
|
|
@ -523,5 +523,5 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
|
||||||
{
|
{
|
||||||
struct request *req = mmc_queue_req_to_req(mqrq);
|
struct request *req = mmc_queue_req_to_req(mqrq);
|
||||||
|
|
||||||
return blk_rq_map_sg(mq->queue, req, mqrq->sg);
|
return blk_rq_map_sg(req, mqrq->sg);
|
||||||
}
|
}
|
||||||
|
|
|
@ -199,7 +199,7 @@ static blk_status_t ubiblock_read(struct request *req)
|
||||||
* and ubi_read_sg() will check that limit.
|
* and ubi_read_sg() will check that limit.
|
||||||
*/
|
*/
|
||||||
ubi_sgl_init(&pdu->usgl);
|
ubi_sgl_init(&pdu->usgl);
|
||||||
blk_rq_map_sg(req->q, req, pdu->usgl.sg);
|
blk_rq_map_sg(req, pdu->usgl.sg);
|
||||||
|
|
||||||
while (bytes_left) {
|
while (bytes_left) {
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -525,7 +525,7 @@ static blk_status_t apple_nvme_map_data(struct apple_nvme *anv,
|
||||||
if (!iod->sg)
|
if (!iod->sg)
|
||||||
return BLK_STS_RESOURCE;
|
return BLK_STS_RESOURCE;
|
||||||
sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
|
sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
|
||||||
iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
|
iod->nents = blk_rq_map_sg(req, iod->sg);
|
||||||
if (!iod->nents)
|
if (!iod->nents)
|
||||||
goto out_free_sg;
|
goto out_free_sg;
|
||||||
|
|
||||||
|
|
|
@ -2620,7 +2620,7 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
|
||||||
if (ret)
|
if (ret)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
|
op->nents = blk_rq_map_sg(rq, freq->sg_table.sgl);
|
||||||
WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
|
WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
|
||||||
freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
|
freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
|
||||||
op->nents, rq_dma_dir(rq));
|
op->nents, rq_dma_dir(rq));
|
||||||
|
|
|
@ -812,7 +812,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
|
||||||
if (!iod->sgt.sgl)
|
if (!iod->sgt.sgl)
|
||||||
return BLK_STS_RESOURCE;
|
return BLK_STS_RESOURCE;
|
||||||
sg_init_table(iod->sgt.sgl, blk_rq_nr_phys_segments(req));
|
sg_init_table(iod->sgt.sgl, blk_rq_nr_phys_segments(req));
|
||||||
iod->sgt.orig_nents = blk_rq_map_sg(req->q, req, iod->sgt.sgl);
|
iod->sgt.orig_nents = blk_rq_map_sg(req, iod->sgt.sgl);
|
||||||
if (!iod->sgt.orig_nents)
|
if (!iod->sgt.orig_nents)
|
||||||
goto out_free_sg;
|
goto out_free_sg;
|
||||||
|
|
||||||
|
|
|
@ -1476,8 +1476,7 @@ static int nvme_rdma_dma_map_req(struct ib_device *ibdev, struct request *rq,
|
||||||
if (ret)
|
if (ret)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
req->data_sgl.nents = blk_rq_map_sg(rq->q, rq,
|
req->data_sgl.nents = blk_rq_map_sg(rq, req->data_sgl.sg_table.sgl);
|
||||||
req->data_sgl.sg_table.sgl);
|
|
||||||
|
|
||||||
*count = ib_dma_map_sg(ibdev, req->data_sgl.sg_table.sgl,
|
*count = ib_dma_map_sg(ibdev, req->data_sgl.sg_table.sgl,
|
||||||
req->data_sgl.nents, rq_dma_dir(rq));
|
req->data_sgl.nents, rq_dma_dir(rq));
|
||||||
|
|
|
@ -162,7 +162,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||||
}
|
}
|
||||||
|
|
||||||
iod->req.sg = iod->sg_table.sgl;
|
iod->req.sg = iod->sg_table.sgl;
|
||||||
iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
|
iod->req.sg_cnt = blk_rq_map_sg(req, iod->sg_table.sgl);
|
||||||
iod->req.transfer_len = blk_rq_payload_bytes(req);
|
iod->req.transfer_len = blk_rq_payload_bytes(req);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1149,7 +1149,7 @@ blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd)
|
||||||
* Next, walk the list, and fill in the addresses and sizes of
|
* Next, walk the list, and fill in the addresses and sizes of
|
||||||
* each segment.
|
* each segment.
|
||||||
*/
|
*/
|
||||||
count = __blk_rq_map_sg(rq->q, rq, cmd->sdb.table.sgl, &last_sg);
|
count = __blk_rq_map_sg(rq, cmd->sdb.table.sgl, &last_sg);
|
||||||
|
|
||||||
if (blk_rq_bytes(rq) & rq->q->limits.dma_pad_mask) {
|
if (blk_rq_bytes(rq) & rq->q->limits.dma_pad_mask) {
|
||||||
unsigned int pad_len =
|
unsigned int pad_len =
|
||||||
|
|
|
@ -1155,14 +1155,13 @@ static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
|
||||||
return max_t(unsigned short, rq->nr_phys_segments, 1);
|
return max_t(unsigned short, rq->nr_phys_segments, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
|
int __blk_rq_map_sg(struct request *rq, struct scatterlist *sglist,
|
||||||
struct scatterlist *sglist, struct scatterlist **last_sg);
|
struct scatterlist **last_sg);
|
||||||
static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq,
|
static inline int blk_rq_map_sg(struct request *rq, struct scatterlist *sglist)
|
||||||
struct scatterlist *sglist)
|
|
||||||
{
|
{
|
||||||
struct scatterlist *last_sg = NULL;
|
struct scatterlist *last_sg = NULL;
|
||||||
|
|
||||||
return __blk_rq_map_sg(q, rq, sglist, &last_sg);
|
return __blk_rq_map_sg(rq, sglist, &last_sg);
|
||||||
}
|
}
|
||||||
void blk_dump_rq_flags(struct request *, char *);
|
void blk_dump_rq_flags(struct request *, char *);
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue