mirror of
https://github.com/torvalds/linux.git
synced 2025-08-15 14:11:42 +02:00
blk-ioc: don't hold queue_lock for ioc_lookup_icq()
Currently issue io can grab queue_lock three times from bfq_bio_merge(), bfq_limit_depth() and bfq_prepare_request(), the queue_lock is not necessary if icq is already created because both queue and ioc can't be freed before io issuing is done, hence remove the unnecessary queue_lock and use rcu to protect radix tree lookup. Noted this is also a prep patch to support request batch dispatching[1]. [1] https://lore.kernel.org/all/20250722072431.610354-1-yukuai1@huaweicloud.com/ Signed-off-by: Yu Kuai <yukuai3@huawei.com> Reviewed-by: Damien Le Moal <dlemoal@kernel.org> Reviewed-by: Jan Kara <jack@suse.cz> Link: https://lore.kernel.org/r/20250729023229.2944898-1-yukuai1@huaweicloud.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
1da67b5b17
commit
5421681bc3
2 changed files with 8 additions and 26 deletions
|
@ -454,17 +454,10 @@ static struct bfq_io_cq *icq_to_bic(struct io_cq *icq)
|
|||
*/
|
||||
static struct bfq_io_cq *bfq_bic_lookup(struct request_queue *q)
|
||||
{
|
||||
struct bfq_io_cq *icq;
|
||||
unsigned long flags;
|
||||
|
||||
if (!current->io_context)
|
||||
return NULL;
|
||||
|
||||
spin_lock_irqsave(&q->queue_lock, flags);
|
||||
icq = icq_to_bic(ioc_lookup_icq(q));
|
||||
spin_unlock_irqrestore(&q->queue_lock, flags);
|
||||
|
||||
return icq;
|
||||
return icq_to_bic(ioc_lookup_icq(q));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2457,15 +2450,8 @@ static bool bfq_bio_merge(struct request_queue *q, struct bio *bio,
|
|||
unsigned int nr_segs)
|
||||
{
|
||||
struct bfq_data *bfqd = q->elevator->elevator_data;
|
||||
struct request *free = NULL;
|
||||
/*
|
||||
* bfq_bic_lookup grabs the queue_lock: invoke it now and
|
||||
* store its return value for later use, to avoid nesting
|
||||
* queue_lock inside the bfqd->lock. We assume that the bic
|
||||
* returned by bfq_bic_lookup does not go away before
|
||||
* bfqd->lock is taken.
|
||||
*/
|
||||
struct bfq_io_cq *bic = bfq_bic_lookup(q);
|
||||
struct request *free = NULL;
|
||||
bool ret;
|
||||
|
||||
spin_lock_irq(&bfqd->lock);
|
||||
|
|
|
@ -308,24 +308,23 @@ int __copy_io(unsigned long clone_flags, struct task_struct *tsk)
|
|||
|
||||
#ifdef CONFIG_BLK_ICQ
|
||||
/**
|
||||
* ioc_lookup_icq - lookup io_cq from ioc
|
||||
* ioc_lookup_icq - lookup io_cq from ioc in io issue path
|
||||
* @q: the associated request_queue
|
||||
*
|
||||
* Look up io_cq associated with @ioc - @q pair from @ioc. Must be called
|
||||
* with @q->queue_lock held.
|
||||
* from io issue path, either return NULL if current issue io to @q for the
|
||||
* first time, or return a valid icq.
|
||||
*/
|
||||
struct io_cq *ioc_lookup_icq(struct request_queue *q)
|
||||
{
|
||||
struct io_context *ioc = current->io_context;
|
||||
struct io_cq *icq;
|
||||
|
||||
lockdep_assert_held(&q->queue_lock);
|
||||
|
||||
/*
|
||||
* icq's are indexed from @ioc using radix tree and hint pointer,
|
||||
* both of which are protected with RCU. All removals are done
|
||||
* holding both q and ioc locks, and we're holding q lock - if we
|
||||
* find a icq which points to us, it's guaranteed to be valid.
|
||||
* both of which are protected with RCU, io issue path ensures that
|
||||
* both request_queue and current task are valid, the found icq
|
||||
* is guaranteed to be valid until the io is done.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
icq = rcu_dereference(ioc->icq_hint);
|
||||
|
@ -419,10 +418,7 @@ struct io_cq *ioc_find_get_icq(struct request_queue *q)
|
|||
task_unlock(current);
|
||||
} else {
|
||||
get_io_context(ioc);
|
||||
|
||||
spin_lock_irq(&q->queue_lock);
|
||||
icq = ioc_lookup_icq(q);
|
||||
spin_unlock_irq(&q->queue_lock);
|
||||
}
|
||||
|
||||
if (!icq) {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue