block: protect hctx attributes/params using q->elevator_lock

Currently, hctx attributes (nr_tags, nr_reserved_tags, and cpu_list)
are protected using `q->sysfs_lock`. However, these attributes can be
updated in multiple scenarios:
  - During the driver's probe method.
  - When updating nr_hw_queues.
  - When writing to the sysfs attribute nr_requests,
    which can modify nr_tags.
The nr_requests attribute is already protected using q->elevator_lock,
but none of the update paths actually use q->sysfs_lock to protect hctx
attributes. So to ensure proper synchronization, replace q->sysfs_lock
with q->elevator_lock when reading hctx attributes through sysfs.

Additionally, blk_mq_update_nr_hw_queues allocates and updates hctx.
The allocation of hctx is protected using q->elevator_lock, however,
updating hctx params happens without any protection, so safeguard hctx
param update path by also using q->elevator_lock.

Signed-off-by: Nilay Shroff <nilay@linux.ibm.com>
Link: https://lore.kernel.org/r/20250306093956.2818808-1-nilay@linux.ibm.com
[axboe: wrap comment at 80 chars]
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Nilay Shroff 2025-03-06 15:09:53 +05:30 committed by Jens Axboe
parent 5e40f4452d
commit 5abba4cebe
3 changed files with 14 additions and 8 deletions

View file

@ -61,9 +61,9 @@ static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
if (!entry->show) if (!entry->show)
return -EIO; return -EIO;
mutex_lock(&q->sysfs_lock); mutex_lock(&q->elevator_lock);
res = entry->show(hctx, page); res = entry->show(hctx, page);
mutex_unlock(&q->sysfs_lock); mutex_unlock(&q->elevator_lock);
return res; return res;
} }

View file

@ -4094,6 +4094,8 @@ static void blk_mq_map_swqueue(struct request_queue *q)
struct blk_mq_ctx *ctx; struct blk_mq_ctx *ctx;
struct blk_mq_tag_set *set = q->tag_set; struct blk_mq_tag_set *set = q->tag_set;
mutex_lock(&q->elevator_lock);
queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx(q, hctx, i) {
cpumask_clear(hctx->cpumask); cpumask_clear(hctx->cpumask);
hctx->nr_ctx = 0; hctx->nr_ctx = 0;
@ -4198,6 +4200,8 @@ static void blk_mq_map_swqueue(struct request_queue *q)
hctx->next_cpu = blk_mq_first_mapped_cpu(hctx); hctx->next_cpu = blk_mq_first_mapped_cpu(hctx);
hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
} }
mutex_unlock(&q->elevator_lock);
} }
/* /*

View file

@ -561,12 +561,14 @@ struct request_queue {
struct list_head flush_list; struct list_head flush_list;
/* /*
* Protects against I/O scheduler switching, particularly when * Protects against I/O scheduler switching, particularly when updating
* updating q->elevator. Since the elevator update code path may * q->elevator. Since the elevator update code path may also modify q->
* also modify q->nr_requests and wbt latency, this lock also * nr_requests and wbt latency, this lock also protects the sysfs attrs
* protects the sysfs attributes nr_requests and wbt_lat_usec. * nr_requests and wbt_lat_usec. Additionally the nr_hw_queues update
* To ensure proper locking order during an elevator update, first * may modify hctx tags, reserved-tags and cpumask, so this lock also
* freeze the queue, then acquire ->elevator_lock. * helps protect the hctx attrs. To ensure proper locking order during
* an elevator or nr_hw_queue update, first freeze the queue, then
* acquire ->elevator_lock.
*/ */
struct mutex elevator_lock; struct mutex elevator_lock;