mirror of
https://github.com/torvalds/linux.git
synced 2025-08-15 22:21:42 +02:00
sched/psi: Optimize psi_group_change() cpu_clock() usage
Dietmar reported that commit3840cbe24c
("sched: psi: fix bogus pressure spikes from aggregation race") caused a regression for him on a high context switch rate benchmark (schbench) due to the now repeating cpu_clock() calls. In particular the problem is that get_recent_times() will extrapolate the current state to 'now'. But if an update uses a timestamp from before the start of the update, it is possible to get two reads with inconsistent results. It is effectively back-dating an update. (note that this all hard-relies on the clock being synchronized across CPUs -- if this is not the case, all bets are off). Combine this problem with the fact that there are per-group-per-cpu seqcounts, the commit in question pushed the clock read into the group iteration, causing tree-depth cpu_clock() calls. On architectures where cpu_clock() has appreciable overhead, this hurts. Instead move to a per-cpu seqcount, which allows us to have a single clock read for all group updates, increasing internal consistency and lowering update overhead. This comes at the cost of a longer update side (proportional to the tree depth) which can cause the read side to retry more often. Fixes:3840cbe24c
("sched: psi: fix bogus pressure spikes from aggregation race") Reported-by: Dietmar Eggemann <dietmar.eggemann@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Tested-by: Dietmar Eggemann <dietmar.eggemann@arm.com>, Link: https://lkml.kernel.org/20250522084844.GC31726@noisy.programming.kicks-ass.net
This commit is contained in:
parent
155213a2ae
commit
570c8efd5e
2 changed files with 68 additions and 59 deletions
|
@ -84,11 +84,9 @@ enum psi_aggregators {
|
||||||
struct psi_group_cpu {
|
struct psi_group_cpu {
|
||||||
/* 1st cacheline updated by the scheduler */
|
/* 1st cacheline updated by the scheduler */
|
||||||
|
|
||||||
/* Aggregator needs to know of concurrent changes */
|
|
||||||
seqcount_t seq ____cacheline_aligned_in_smp;
|
|
||||||
|
|
||||||
/* States of the tasks belonging to this group */
|
/* States of the tasks belonging to this group */
|
||||||
unsigned int tasks[NR_PSI_TASK_COUNTS];
|
unsigned int tasks[NR_PSI_TASK_COUNTS]
|
||||||
|
____cacheline_aligned_in_smp;
|
||||||
|
|
||||||
/* Aggregate pressure state derived from the tasks */
|
/* Aggregate pressure state derived from the tasks */
|
||||||
u32 state_mask;
|
u32 state_mask;
|
||||||
|
|
|
@ -176,6 +176,28 @@ struct psi_group psi_system = {
|
||||||
.pcpu = &system_group_pcpu,
|
.pcpu = &system_group_pcpu,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static DEFINE_PER_CPU(seqcount_t, psi_seq);
|
||||||
|
|
||||||
|
static inline void psi_write_begin(int cpu)
|
||||||
|
{
|
||||||
|
write_seqcount_begin(per_cpu_ptr(&psi_seq, cpu));
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void psi_write_end(int cpu)
|
||||||
|
{
|
||||||
|
write_seqcount_end(per_cpu_ptr(&psi_seq, cpu));
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 psi_read_begin(int cpu)
|
||||||
|
{
|
||||||
|
return read_seqcount_begin(per_cpu_ptr(&psi_seq, cpu));
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool psi_read_retry(int cpu, u32 seq)
|
||||||
|
{
|
||||||
|
return read_seqcount_retry(per_cpu_ptr(&psi_seq, cpu), seq);
|
||||||
|
}
|
||||||
|
|
||||||
static void psi_avgs_work(struct work_struct *work);
|
static void psi_avgs_work(struct work_struct *work);
|
||||||
|
|
||||||
static void poll_timer_fn(struct timer_list *t);
|
static void poll_timer_fn(struct timer_list *t);
|
||||||
|
@ -186,7 +208,7 @@ static void group_init(struct psi_group *group)
|
||||||
|
|
||||||
group->enabled = true;
|
group->enabled = true;
|
||||||
for_each_possible_cpu(cpu)
|
for_each_possible_cpu(cpu)
|
||||||
seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq);
|
seqcount_init(per_cpu_ptr(&psi_seq, cpu));
|
||||||
group->avg_last_update = sched_clock();
|
group->avg_last_update = sched_clock();
|
||||||
group->avg_next_update = group->avg_last_update + psi_period;
|
group->avg_next_update = group->avg_last_update + psi_period;
|
||||||
mutex_init(&group->avgs_lock);
|
mutex_init(&group->avgs_lock);
|
||||||
|
@ -266,14 +288,14 @@ static void get_recent_times(struct psi_group *group, int cpu,
|
||||||
|
|
||||||
/* Snapshot a coherent view of the CPU state */
|
/* Snapshot a coherent view of the CPU state */
|
||||||
do {
|
do {
|
||||||
seq = read_seqcount_begin(&groupc->seq);
|
seq = psi_read_begin(cpu);
|
||||||
now = cpu_clock(cpu);
|
now = cpu_clock(cpu);
|
||||||
memcpy(times, groupc->times, sizeof(groupc->times));
|
memcpy(times, groupc->times, sizeof(groupc->times));
|
||||||
state_mask = groupc->state_mask;
|
state_mask = groupc->state_mask;
|
||||||
state_start = groupc->state_start;
|
state_start = groupc->state_start;
|
||||||
if (cpu == current_cpu)
|
if (cpu == current_cpu)
|
||||||
memcpy(tasks, groupc->tasks, sizeof(groupc->tasks));
|
memcpy(tasks, groupc->tasks, sizeof(groupc->tasks));
|
||||||
} while (read_seqcount_retry(&groupc->seq, seq));
|
} while (psi_read_retry(cpu, seq));
|
||||||
|
|
||||||
/* Calculate state time deltas against the previous snapshot */
|
/* Calculate state time deltas against the previous snapshot */
|
||||||
for (s = 0; s < NR_PSI_STATES; s++) {
|
for (s = 0; s < NR_PSI_STATES; s++) {
|
||||||
|
@ -772,30 +794,20 @@ static void record_times(struct psi_group_cpu *groupc, u64 now)
|
||||||
groupc->times[PSI_NONIDLE] += delta;
|
groupc->times[PSI_NONIDLE] += delta;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define for_each_group(iter, group) \
|
||||||
|
for (typeof(group) iter = group; iter; iter = iter->parent)
|
||||||
|
|
||||||
static void psi_group_change(struct psi_group *group, int cpu,
|
static void psi_group_change(struct psi_group *group, int cpu,
|
||||||
unsigned int clear, unsigned int set,
|
unsigned int clear, unsigned int set,
|
||||||
bool wake_clock)
|
u64 now, bool wake_clock)
|
||||||
{
|
{
|
||||||
struct psi_group_cpu *groupc;
|
struct psi_group_cpu *groupc;
|
||||||
unsigned int t, m;
|
unsigned int t, m;
|
||||||
u32 state_mask;
|
u32 state_mask;
|
||||||
u64 now;
|
|
||||||
|
|
||||||
lockdep_assert_rq_held(cpu_rq(cpu));
|
lockdep_assert_rq_held(cpu_rq(cpu));
|
||||||
groupc = per_cpu_ptr(group->pcpu, cpu);
|
groupc = per_cpu_ptr(group->pcpu, cpu);
|
||||||
|
|
||||||
/*
|
|
||||||
* First we update the task counts according to the state
|
|
||||||
* change requested through the @clear and @set bits.
|
|
||||||
*
|
|
||||||
* Then if the cgroup PSI stats accounting enabled, we
|
|
||||||
* assess the aggregate resource states this CPU's tasks
|
|
||||||
* have been in since the last change, and account any
|
|
||||||
* SOME and FULL time these may have resulted in.
|
|
||||||
*/
|
|
||||||
write_seqcount_begin(&groupc->seq);
|
|
||||||
now = cpu_clock(cpu);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Start with TSK_ONCPU, which doesn't have a corresponding
|
* Start with TSK_ONCPU, which doesn't have a corresponding
|
||||||
* task count - it's just a boolean flag directly encoded in
|
* task count - it's just a boolean flag directly encoded in
|
||||||
|
@ -847,7 +859,6 @@ static void psi_group_change(struct psi_group *group, int cpu,
|
||||||
|
|
||||||
groupc->state_mask = state_mask;
|
groupc->state_mask = state_mask;
|
||||||
|
|
||||||
write_seqcount_end(&groupc->seq);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -868,8 +879,6 @@ static void psi_group_change(struct psi_group *group, int cpu,
|
||||||
|
|
||||||
groupc->state_mask = state_mask;
|
groupc->state_mask = state_mask;
|
||||||
|
|
||||||
write_seqcount_end(&groupc->seq);
|
|
||||||
|
|
||||||
if (state_mask & group->rtpoll_states)
|
if (state_mask & group->rtpoll_states)
|
||||||
psi_schedule_rtpoll_work(group, 1, false);
|
psi_schedule_rtpoll_work(group, 1, false);
|
||||||
|
|
||||||
|
@ -904,24 +913,29 @@ static void psi_flags_change(struct task_struct *task, int clear, int set)
|
||||||
void psi_task_change(struct task_struct *task, int clear, int set)
|
void psi_task_change(struct task_struct *task, int clear, int set)
|
||||||
{
|
{
|
||||||
int cpu = task_cpu(task);
|
int cpu = task_cpu(task);
|
||||||
struct psi_group *group;
|
u64 now;
|
||||||
|
|
||||||
if (!task->pid)
|
if (!task->pid)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
psi_flags_change(task, clear, set);
|
psi_flags_change(task, clear, set);
|
||||||
|
|
||||||
group = task_psi_group(task);
|
psi_write_begin(cpu);
|
||||||
do {
|
now = cpu_clock(cpu);
|
||||||
psi_group_change(group, cpu, clear, set, true);
|
for_each_group(group, task_psi_group(task))
|
||||||
} while ((group = group->parent));
|
psi_group_change(group, cpu, clear, set, now, true);
|
||||||
|
psi_write_end(cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
void psi_task_switch(struct task_struct *prev, struct task_struct *next,
|
void psi_task_switch(struct task_struct *prev, struct task_struct *next,
|
||||||
bool sleep)
|
bool sleep)
|
||||||
{
|
{
|
||||||
struct psi_group *group, *common = NULL;
|
struct psi_group *common = NULL;
|
||||||
int cpu = task_cpu(prev);
|
int cpu = task_cpu(prev);
|
||||||
|
u64 now;
|
||||||
|
|
||||||
|
psi_write_begin(cpu);
|
||||||
|
now = cpu_clock(cpu);
|
||||||
|
|
||||||
if (next->pid) {
|
if (next->pid) {
|
||||||
psi_flags_change(next, 0, TSK_ONCPU);
|
psi_flags_change(next, 0, TSK_ONCPU);
|
||||||
|
@ -930,16 +944,15 @@ void psi_task_switch(struct task_struct *prev, struct task_struct *next,
|
||||||
* ancestors with @prev, those will already have @prev's
|
* ancestors with @prev, those will already have @prev's
|
||||||
* TSK_ONCPU bit set, and we can stop the iteration there.
|
* TSK_ONCPU bit set, and we can stop the iteration there.
|
||||||
*/
|
*/
|
||||||
group = task_psi_group(next);
|
for_each_group(group, task_psi_group(next)) {
|
||||||
do {
|
struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu);
|
||||||
if (per_cpu_ptr(group->pcpu, cpu)->state_mask &
|
|
||||||
PSI_ONCPU) {
|
if (groupc->state_mask & PSI_ONCPU) {
|
||||||
common = group;
|
common = group;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
psi_group_change(group, cpu, 0, TSK_ONCPU, now, true);
|
||||||
psi_group_change(group, cpu, 0, TSK_ONCPU, true);
|
}
|
||||||
} while ((group = group->parent));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (prev->pid) {
|
if (prev->pid) {
|
||||||
|
@ -972,12 +985,11 @@ void psi_task_switch(struct task_struct *prev, struct task_struct *next,
|
||||||
|
|
||||||
psi_flags_change(prev, clear, set);
|
psi_flags_change(prev, clear, set);
|
||||||
|
|
||||||
group = task_psi_group(prev);
|
for_each_group(group, task_psi_group(prev)) {
|
||||||
do {
|
|
||||||
if (group == common)
|
if (group == common)
|
||||||
break;
|
break;
|
||||||
psi_group_change(group, cpu, clear, set, wake_clock);
|
psi_group_change(group, cpu, clear, set, now, wake_clock);
|
||||||
} while ((group = group->parent));
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* TSK_ONCPU is handled up to the common ancestor. If there are
|
* TSK_ONCPU is handled up to the common ancestor. If there are
|
||||||
|
@ -987,20 +999,21 @@ void psi_task_switch(struct task_struct *prev, struct task_struct *next,
|
||||||
*/
|
*/
|
||||||
if ((prev->psi_flags ^ next->psi_flags) & ~TSK_ONCPU) {
|
if ((prev->psi_flags ^ next->psi_flags) & ~TSK_ONCPU) {
|
||||||
clear &= ~TSK_ONCPU;
|
clear &= ~TSK_ONCPU;
|
||||||
for (; group; group = group->parent)
|
for_each_group(group, common)
|
||||||
psi_group_change(group, cpu, clear, set, wake_clock);
|
psi_group_change(group, cpu, clear, set, now, wake_clock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
psi_write_end(cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
|
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
|
||||||
void psi_account_irqtime(struct rq *rq, struct task_struct *curr, struct task_struct *prev)
|
void psi_account_irqtime(struct rq *rq, struct task_struct *curr, struct task_struct *prev)
|
||||||
{
|
{
|
||||||
int cpu = task_cpu(curr);
|
int cpu = task_cpu(curr);
|
||||||
struct psi_group *group;
|
|
||||||
struct psi_group_cpu *groupc;
|
struct psi_group_cpu *groupc;
|
||||||
s64 delta;
|
s64 delta;
|
||||||
u64 irq;
|
u64 irq;
|
||||||
|
u64 now;
|
||||||
|
|
||||||
if (static_branch_likely(&psi_disabled) || !irqtime_enabled())
|
if (static_branch_likely(&psi_disabled) || !irqtime_enabled())
|
||||||
return;
|
return;
|
||||||
|
@ -1009,8 +1022,7 @@ void psi_account_irqtime(struct rq *rq, struct task_struct *curr, struct task_st
|
||||||
return;
|
return;
|
||||||
|
|
||||||
lockdep_assert_rq_held(rq);
|
lockdep_assert_rq_held(rq);
|
||||||
group = task_psi_group(curr);
|
if (prev && task_psi_group(prev) == task_psi_group(curr))
|
||||||
if (prev && task_psi_group(prev) == group)
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
irq = irq_time_read(cpu);
|
irq = irq_time_read(cpu);
|
||||||
|
@ -1019,25 +1031,22 @@ void psi_account_irqtime(struct rq *rq, struct task_struct *curr, struct task_st
|
||||||
return;
|
return;
|
||||||
rq->psi_irq_time = irq;
|
rq->psi_irq_time = irq;
|
||||||
|
|
||||||
do {
|
psi_write_begin(cpu);
|
||||||
u64 now;
|
now = cpu_clock(cpu);
|
||||||
|
|
||||||
|
for_each_group(group, task_psi_group(curr)) {
|
||||||
if (!group->enabled)
|
if (!group->enabled)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
groupc = per_cpu_ptr(group->pcpu, cpu);
|
groupc = per_cpu_ptr(group->pcpu, cpu);
|
||||||
|
|
||||||
write_seqcount_begin(&groupc->seq);
|
|
||||||
now = cpu_clock(cpu);
|
|
||||||
|
|
||||||
record_times(groupc, now);
|
record_times(groupc, now);
|
||||||
groupc->times[PSI_IRQ_FULL] += delta;
|
groupc->times[PSI_IRQ_FULL] += delta;
|
||||||
|
|
||||||
write_seqcount_end(&groupc->seq);
|
|
||||||
|
|
||||||
if (group->rtpoll_states & (1 << PSI_IRQ_FULL))
|
if (group->rtpoll_states & (1 << PSI_IRQ_FULL))
|
||||||
psi_schedule_rtpoll_work(group, 1, false);
|
psi_schedule_rtpoll_work(group, 1, false);
|
||||||
} while ((group = group->parent));
|
}
|
||||||
|
psi_write_end(cpu);
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
|
#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
|
||||||
|
|
||||||
|
@ -1225,12 +1234,14 @@ void psi_cgroup_restart(struct psi_group *group)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
for_each_possible_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
struct rq *rq = cpu_rq(cpu);
|
u64 now;
|
||||||
struct rq_flags rf;
|
|
||||||
|
|
||||||
rq_lock_irq(rq, &rf);
|
guard(rq_lock_irq)(cpu_rq(cpu));
|
||||||
psi_group_change(group, cpu, 0, 0, true);
|
|
||||||
rq_unlock_irq(rq, &rf);
|
psi_write_begin(cpu);
|
||||||
|
now = cpu_clock(cpu);
|
||||||
|
psi_group_change(group, cpu, 0, 0, now, true);
|
||||||
|
psi_write_end(cpu);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_CGROUPS */
|
#endif /* CONFIG_CGROUPS */
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue