linux/kernel/irq/irq_test.c
Brian Norris 5b65258229 genirq/test: Resolve irq lock inversion warnings
irq_shutdown_and_deactivate() is normally called with the descriptor lock
held, and interrupts disabled. Nested a few levels down, it grabs the
global irq_resend_lock. Lockdep rightfully complains when interrupts are
not disabled:

       CPU0                    CPU1
       ----                    ----
  lock(irq_resend_lock);
                               local_irq_disable();
                               lock(&irq_desc_lock_class);
                               lock(irq_resend_lock);
  <Interrupt>
    lock(&irq_desc_lock_class);

...
   _raw_spin_lock+0x2b/0x40
   clear_irq_resend+0x14/0x70
   irq_shutdown_and_deactivate+0x29/0x80
   irq_shutdown_depth_test+0x1ce/0x600
   kunit_try_run_case+0x90/0x120

Grab the descriptor lock and disable interrupts, to resolve the
problem.

Fixes: 66067c3c8a ("genirq: Add kunit tests for depth counts")
Reported-by: Guenter Roeck <linux@roeck-us.net>
Signed-off-by: Brian Norris <briannorris@chromium.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Guenter Roeck <linux@roeck-us.net>
Link: https://lore.kernel.org/all/aJJONEIoIiTSDMqc@google.com
Closes: https://lore.kernel.org/lkml/31a761e4-8f81-40cf-aaf5-d220ba11911c@roeck-us.net/
2025-08-06 10:29:48 +02:00

231 lines
5.7 KiB
C

// SPDX-License-Identifier: LGPL-2.1+
#include <linux/cleanup.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdesc.h>
#include <linux/irqdomain.h>
#include <linux/nodemask.h>
#include <kunit/test.h>
#include "internals.h"
static irqreturn_t noop_handler(int irq, void *data)
{
return IRQ_HANDLED;
}
static void noop(struct irq_data *data) { }
static unsigned int noop_ret(struct irq_data *data) { return 0; }
static int noop_affinity(struct irq_data *data, const struct cpumask *dest,
bool force)
{
irq_data_update_effective_affinity(data, dest);
return 0;
}
static struct irq_chip fake_irq_chip = {
.name = "fake",
.irq_startup = noop_ret,
.irq_shutdown = noop,
.irq_enable = noop,
.irq_disable = noop,
.irq_ack = noop,
.irq_mask = noop,
.irq_unmask = noop,
.irq_set_affinity = noop_affinity,
.flags = IRQCHIP_SKIP_SET_WAKE,
};
static void irq_disable_depth_test(struct kunit *test)
{
struct irq_desc *desc;
int virq, ret;
virq = irq_domain_alloc_descs(-1, 1, 0, NUMA_NO_NODE, NULL);
KUNIT_ASSERT_GE(test, virq, 0);
irq_set_chip_and_handler(virq, &dummy_irq_chip, handle_simple_irq);
desc = irq_to_desc(virq);
KUNIT_ASSERT_PTR_NE(test, desc, NULL);
ret = request_irq(virq, noop_handler, 0, "test_irq", NULL);
KUNIT_EXPECT_EQ(test, ret, 0);
KUNIT_EXPECT_EQ(test, desc->depth, 0);
disable_irq(virq);
KUNIT_EXPECT_EQ(test, desc->depth, 1);
enable_irq(virq);
KUNIT_EXPECT_EQ(test, desc->depth, 0);
free_irq(virq, NULL);
}
static void irq_free_disabled_test(struct kunit *test)
{
struct irq_desc *desc;
int virq, ret;
virq = irq_domain_alloc_descs(-1, 1, 0, NUMA_NO_NODE, NULL);
KUNIT_ASSERT_GE(test, virq, 0);
irq_set_chip_and_handler(virq, &dummy_irq_chip, handle_simple_irq);
desc = irq_to_desc(virq);
KUNIT_ASSERT_PTR_NE(test, desc, NULL);
ret = request_irq(virq, noop_handler, 0, "test_irq", NULL);
KUNIT_EXPECT_EQ(test, ret, 0);
KUNIT_EXPECT_EQ(test, desc->depth, 0);
disable_irq(virq);
KUNIT_EXPECT_EQ(test, desc->depth, 1);
free_irq(virq, NULL);
KUNIT_EXPECT_GE(test, desc->depth, 1);
ret = request_irq(virq, noop_handler, 0, "test_irq", NULL);
KUNIT_EXPECT_EQ(test, ret, 0);
KUNIT_EXPECT_EQ(test, desc->depth, 0);
free_irq(virq, NULL);
}
static void irq_shutdown_depth_test(struct kunit *test)
{
struct irq_desc *desc;
struct irq_data *data;
int virq, ret;
struct irq_affinity_desc affinity = {
.is_managed = 1,
.mask = CPU_MASK_ALL,
};
if (!IS_ENABLED(CONFIG_SMP))
kunit_skip(test, "requires CONFIG_SMP for managed shutdown");
virq = irq_domain_alloc_descs(-1, 1, 0, NUMA_NO_NODE, &affinity);
KUNIT_ASSERT_GE(test, virq, 0);
irq_set_chip_and_handler(virq, &dummy_irq_chip, handle_simple_irq);
desc = irq_to_desc(virq);
KUNIT_ASSERT_PTR_NE(test, desc, NULL);
data = irq_desc_get_irq_data(desc);
KUNIT_ASSERT_PTR_NE(test, data, NULL);
ret = request_irq(virq, noop_handler, 0, "test_irq", NULL);
KUNIT_EXPECT_EQ(test, ret, 0);
KUNIT_EXPECT_TRUE(test, irqd_is_activated(data));
KUNIT_EXPECT_TRUE(test, irqd_is_started(data));
KUNIT_EXPECT_TRUE(test, irqd_affinity_is_managed(data));
KUNIT_EXPECT_EQ(test, desc->depth, 0);
disable_irq(virq);
KUNIT_EXPECT_EQ(test, desc->depth, 1);
scoped_guard(raw_spinlock_irqsave, &desc->lock)
irq_shutdown_and_deactivate(desc);
KUNIT_EXPECT_FALSE(test, irqd_is_activated(data));
KUNIT_EXPECT_FALSE(test, irqd_is_started(data));
KUNIT_EXPECT_EQ(test, irq_activate(desc), 0);
#ifdef CONFIG_SMP
irq_startup_managed(desc);
#endif
KUNIT_EXPECT_EQ(test, desc->depth, 1);
enable_irq(virq);
KUNIT_EXPECT_EQ(test, desc->depth, 0);
free_irq(virq, NULL);
}
static void irq_cpuhotplug_test(struct kunit *test)
{
struct irq_desc *desc;
struct irq_data *data;
int virq, ret;
struct irq_affinity_desc affinity = {
.is_managed = 1,
};
if (!IS_ENABLED(CONFIG_SMP))
kunit_skip(test, "requires CONFIG_SMP for CPU hotplug");
if (!get_cpu_device(1))
kunit_skip(test, "requires more than 1 CPU for CPU hotplug");
if (!cpu_is_hotpluggable(1))
kunit_skip(test, "CPU 1 must be hotpluggable");
cpumask_copy(&affinity.mask, cpumask_of(1));
virq = irq_domain_alloc_descs(-1, 1, 0, NUMA_NO_NODE, &affinity);
KUNIT_ASSERT_GE(test, virq, 0);
irq_set_chip_and_handler(virq, &fake_irq_chip, handle_simple_irq);
desc = irq_to_desc(virq);
KUNIT_ASSERT_PTR_NE(test, desc, NULL);
data = irq_desc_get_irq_data(desc);
KUNIT_ASSERT_PTR_NE(test, data, NULL);
ret = request_irq(virq, noop_handler, 0, "test_irq", NULL);
KUNIT_EXPECT_EQ(test, ret, 0);
KUNIT_EXPECT_TRUE(test, irqd_is_activated(data));
KUNIT_EXPECT_TRUE(test, irqd_is_started(data));
KUNIT_EXPECT_TRUE(test, irqd_affinity_is_managed(data));
KUNIT_EXPECT_EQ(test, desc->depth, 0);
disable_irq(virq);
KUNIT_EXPECT_EQ(test, desc->depth, 1);
KUNIT_EXPECT_EQ(test, remove_cpu(1), 0);
KUNIT_EXPECT_FALSE(test, irqd_is_activated(data));
KUNIT_EXPECT_FALSE(test, irqd_is_started(data));
KUNIT_EXPECT_GE(test, desc->depth, 1);
KUNIT_EXPECT_EQ(test, add_cpu(1), 0);
KUNIT_EXPECT_FALSE(test, irqd_is_activated(data));
KUNIT_EXPECT_FALSE(test, irqd_is_started(data));
KUNIT_EXPECT_EQ(test, desc->depth, 1);
enable_irq(virq);
KUNIT_EXPECT_TRUE(test, irqd_is_activated(data));
KUNIT_EXPECT_TRUE(test, irqd_is_started(data));
KUNIT_EXPECT_EQ(test, desc->depth, 0);
free_irq(virq, NULL);
}
static struct kunit_case irq_test_cases[] = {
KUNIT_CASE(irq_disable_depth_test),
KUNIT_CASE(irq_free_disabled_test),
KUNIT_CASE(irq_shutdown_depth_test),
KUNIT_CASE(irq_cpuhotplug_test),
{}
};
static struct kunit_suite irq_test_suite = {
.name = "irq_test_cases",
.test_cases = irq_test_cases,
};
kunit_test_suite(irq_test_suite);
MODULE_DESCRIPTION("IRQ unit test suite");
MODULE_LICENSE("GPL");