mirror of
https://github.com/torvalds/linux.git
synced 2025-08-17 15:11:34 +02:00
locking/mutex: implement mutex_lock_killable_nest_lock
KVM's SEV intra-host migration code needs to lock all vCPUs of the source and the target VM, before it proceeds with the migration. The number of vCPUs that belong to each VM is not bounded by anything except a self-imposed KVM limit of CONFIG_KVM_MAX_NR_VCPUS vCPUs which is significantly larger than the depth of lockdep's lock stack. Luckily, the locks in both of the cases mentioned above, are held under the 'kvm->lock' of each VM, which means that we can use the little known lockdep feature called a "nest_lock" to support this use case in a cleaner way, compared to the way it's currently done. Implement and expose 'mutex_lock_killable_nest_lock' for this purpose. Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Message-ID: <20250512180407.659015-3-mlevitsk@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
c5b6ababd2
commit
fb49f07ba1
2 changed files with 17 additions and 7 deletions
|
@ -156,16 +156,15 @@ static inline int __devm_mutex_init(struct device *dev, struct mutex *lock)
|
||||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||||
extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
|
extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
|
||||||
extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
|
extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
|
||||||
|
|
||||||
extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
|
extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
|
||||||
unsigned int subclass);
|
unsigned int subclass);
|
||||||
extern int __must_check mutex_lock_killable_nested(struct mutex *lock,
|
extern int __must_check _mutex_lock_killable(struct mutex *lock,
|
||||||
unsigned int subclass);
|
unsigned int subclass, struct lockdep_map *nest_lock);
|
||||||
extern void mutex_lock_io_nested(struct mutex *lock, unsigned int subclass);
|
extern void mutex_lock_io_nested(struct mutex *lock, unsigned int subclass);
|
||||||
|
|
||||||
#define mutex_lock(lock) mutex_lock_nested(lock, 0)
|
#define mutex_lock(lock) mutex_lock_nested(lock, 0)
|
||||||
#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0)
|
#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0)
|
||||||
#define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0)
|
#define mutex_lock_killable(lock) _mutex_lock_killable(lock, 0, NULL)
|
||||||
#define mutex_lock_io(lock) mutex_lock_io_nested(lock, 0)
|
#define mutex_lock_io(lock) mutex_lock_io_nested(lock, 0)
|
||||||
|
|
||||||
#define mutex_lock_nest_lock(lock, nest_lock) \
|
#define mutex_lock_nest_lock(lock, nest_lock) \
|
||||||
|
@ -174,6 +173,15 @@ do { \
|
||||||
_mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \
|
_mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
#define mutex_lock_killable_nest_lock(lock, nest_lock) \
|
||||||
|
( \
|
||||||
|
typecheck(struct lockdep_map *, &(nest_lock)->dep_map), \
|
||||||
|
_mutex_lock_killable(lock, 0, &(nest_lock)->dep_map) \
|
||||||
|
)
|
||||||
|
|
||||||
|
#define mutex_lock_killable_nested(lock, subclass) \
|
||||||
|
_mutex_lock_killable(lock, subclass, NULL)
|
||||||
|
|
||||||
#else
|
#else
|
||||||
extern void mutex_lock(struct mutex *lock);
|
extern void mutex_lock(struct mutex *lock);
|
||||||
extern int __must_check mutex_lock_interruptible(struct mutex *lock);
|
extern int __must_check mutex_lock_interruptible(struct mutex *lock);
|
||||||
|
@ -183,6 +191,7 @@ extern void mutex_lock_io(struct mutex *lock);
|
||||||
# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
|
# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
|
||||||
# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
|
# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
|
||||||
# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
|
# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
|
||||||
|
# define mutex_lock_killable_nest_lock(lock, nest_lock) mutex_lock_killable(lock)
|
||||||
# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
|
# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
|
||||||
# define mutex_lock_io_nested(lock, subclass) mutex_lock_io(lock)
|
# define mutex_lock_io_nested(lock, subclass) mutex_lock_io(lock)
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -808,11 +808,12 @@ _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
|
||||||
EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
|
EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
|
||||||
|
|
||||||
int __sched
|
int __sched
|
||||||
mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
|
_mutex_lock_killable(struct mutex *lock, unsigned int subclass,
|
||||||
|
struct lockdep_map *nest)
|
||||||
{
|
{
|
||||||
return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
|
return __mutex_lock(lock, TASK_KILLABLE, subclass, nest, _RET_IP_);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
|
EXPORT_SYMBOL_GPL(_mutex_lock_killable);
|
||||||
|
|
||||||
int __sched
|
int __sched
|
||||||
mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
|
mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue