mirror of
https://github.com/torvalds/linux.git
synced 2025-08-16 06:31:34 +02:00
genirq: Retain disable depth for managed interrupts across CPU hotplug
Affinity-managed interrupts can be shut down and restarted during CPU hotunplug/plug. Thereby the interrupt may be left in an unexpected state. Specifically: 1. Interrupt is affine to CPU N 2. disable_irq() -> depth is 1 3. CPU N goes offline 4. irq_shutdown() -> depth is set to 1 (again) 5. CPU N goes online 6. irq_startup() -> depth is set to 0 (BUG! driver expects that the interrupt still disabled) 7. enable_irq() -> depth underflow / unbalanced enable_irq() warning This is only a problem for managed interrupts and CPU hotplug, all other cases like request()/free()/request() truly needs to reset a possibly stale disable depth value. Provide a startup function, which takes the disable depth into account, and invoked it for the managed interrupts in the CPU hotplug path. This requires to change irq_shutdown() to do a depth increment instead of setting it to 1, which allows to retain the disable depth, but is harmless for the other code paths using irq_startup(), which will still reset the disable depth unconditionally to keep the original correct behaviour. A kunit tests will be added separately to cover some of these aspects. [ tglx: Massaged changelog ] Suggested-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Brian Norris <briannorris@chromium.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Link: https://lore.kernel.org/all/20250514201353.3481400-2-briannorris@chromium.org
This commit is contained in:
parent
a4a39c81e1
commit
788019eb55
3 changed files with 23 additions and 2 deletions
|
@ -202,6 +202,19 @@ __irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff,
|
||||||
return IRQ_STARTUP_ABORT;
|
return IRQ_STARTUP_ABORT;
|
||||||
return IRQ_STARTUP_MANAGED;
|
return IRQ_STARTUP_MANAGED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void irq_startup_managed(struct irq_desc *desc)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Only start it up when the disable depth is 1, so that a disable,
|
||||||
|
* hotunplug, hotplug sequence does not end up enabling it during
|
||||||
|
* hotplug unconditionally.
|
||||||
|
*/
|
||||||
|
desc->depth--;
|
||||||
|
if (!desc->depth)
|
||||||
|
irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
|
||||||
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
static __always_inline int
|
static __always_inline int
|
||||||
__irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff,
|
__irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff,
|
||||||
|
@ -269,6 +282,7 @@ int irq_startup(struct irq_desc *desc, bool resend, bool force)
|
||||||
ret = __irq_startup(desc);
|
ret = __irq_startup(desc);
|
||||||
break;
|
break;
|
||||||
case IRQ_STARTUP_ABORT:
|
case IRQ_STARTUP_ABORT:
|
||||||
|
desc->depth = 1;
|
||||||
irqd_set_managed_shutdown(d);
|
irqd_set_managed_shutdown(d);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -301,7 +315,13 @@ void irq_shutdown(struct irq_desc *desc)
|
||||||
{
|
{
|
||||||
if (irqd_is_started(&desc->irq_data)) {
|
if (irqd_is_started(&desc->irq_data)) {
|
||||||
clear_irq_resend(desc);
|
clear_irq_resend(desc);
|
||||||
desc->depth = 1;
|
/*
|
||||||
|
* Increment disable depth, so that a managed shutdown on
|
||||||
|
* CPU hotunplug preserves the actual disabled state when the
|
||||||
|
* CPU comes back online. See irq_startup_managed().
|
||||||
|
*/
|
||||||
|
desc->depth++;
|
||||||
|
|
||||||
if (desc->irq_data.chip->irq_shutdown) {
|
if (desc->irq_data.chip->irq_shutdown) {
|
||||||
desc->irq_data.chip->irq_shutdown(&desc->irq_data);
|
desc->irq_data.chip->irq_shutdown(&desc->irq_data);
|
||||||
irq_state_set_disabled(desc);
|
irq_state_set_disabled(desc);
|
||||||
|
|
|
@ -218,7 +218,7 @@ static void irq_restore_affinity_of_irq(struct irq_desc *desc, unsigned int cpu)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (irqd_is_managed_and_shutdown(data))
|
if (irqd_is_managed_and_shutdown(data))
|
||||||
irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
|
irq_startup_managed(desc);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the interrupt can only be directed to a single target
|
* If the interrupt can only be directed to a single target
|
||||||
|
|
|
@ -87,6 +87,7 @@ extern void __enable_irq(struct irq_desc *desc);
|
||||||
extern int irq_activate(struct irq_desc *desc);
|
extern int irq_activate(struct irq_desc *desc);
|
||||||
extern int irq_activate_and_startup(struct irq_desc *desc, bool resend);
|
extern int irq_activate_and_startup(struct irq_desc *desc, bool resend);
|
||||||
extern int irq_startup(struct irq_desc *desc, bool resend, bool force);
|
extern int irq_startup(struct irq_desc *desc, bool resend, bool force);
|
||||||
|
extern void irq_startup_managed(struct irq_desc *desc);
|
||||||
|
|
||||||
extern void irq_shutdown(struct irq_desc *desc);
|
extern void irq_shutdown(struct irq_desc *desc);
|
||||||
extern void irq_shutdown_and_deactivate(struct irq_desc *desc);
|
extern void irq_shutdown_and_deactivate(struct irq_desc *desc);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue