Revert "Revert "sched/psi: Stop relying on timer_pending() for poll_work rescheduling""

This reverts commit 02bdd918e6.  It was
perserving the ABI, but that is not needed anymore at this point in
time.

Change-Id: I486cebed8ec0f91985d117eed3e1069d6160e267
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-02-28 12:57:37 +00:00 committed by Todd Kjos
parent a0bdc392de
commit 9154eb052f
2 changed files with 53 additions and 10 deletions

View File

@ -177,6 +177,7 @@ struct psi_group {
struct timer_list poll_timer;
wait_queue_head_t poll_wait;
atomic_t poll_wakeup;
atomic_t poll_scheduled;
/* Protects data used by the monitor */
struct mutex trigger_lock;

View File

@ -189,6 +189,7 @@ static void group_init(struct psi_group *group)
INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work);
mutex_init(&group->avgs_lock);
/* Init trigger-related members */
atomic_set(&group->poll_scheduled, 0);
mutex_init(&group->trigger_lock);
INIT_LIST_HEAD(&group->triggers);
group->poll_min_period = U32_MAX;
@ -565,18 +566,17 @@ static u64 update_triggers(struct psi_group *group, u64 now)
return now + group->poll_min_period;
}
/* Schedule polling if it's not already scheduled. */
static void psi_schedule_poll_work(struct psi_group *group, unsigned long delay)
/* Schedule polling if it's not already scheduled or forced. */
static void psi_schedule_poll_work(struct psi_group *group, unsigned long delay,
bool force)
{
struct task_struct *task;
/*
* Do not reschedule if already scheduled.
* Possible race with a timer scheduled after this check but before
* mod_timer below can be tolerated because group->polling_next_update
* will keep updates on schedule.
* atomic_xchg should be called even when !force to provide a
* full memory barrier (see the comment inside psi_poll_work).
*/
if (timer_pending(&group->poll_timer))
if (atomic_xchg(&group->poll_scheduled, 1) && !force)
return;
rcu_read_lock();
@ -588,12 +588,15 @@ static void psi_schedule_poll_work(struct psi_group *group, unsigned long delay)
*/
if (likely(task))
mod_timer(&group->poll_timer, jiffies + delay);
else
atomic_set(&group->poll_scheduled, 0);
rcu_read_unlock();
}
static void psi_poll_work(struct psi_group *group)
{
bool force_reschedule = false;
u32 changed_states;
u64 now;
@ -601,6 +604,43 @@ static void psi_poll_work(struct psi_group *group)
now = sched_clock();
if (now > group->polling_until) {
/*
* We are either about to start or might stop polling if no
* state change was recorded. Resetting poll_scheduled leaves
* a small window for psi_group_change to sneak in and schedule
* an immediate poll_work before we get to rescheduling. One
* potential extra wakeup at the end of the polling window
* should be negligible and polling_next_update still keeps
* updates correctly on schedule.
*/
atomic_set(&group->poll_scheduled, 0);
/*
* A task change can race with the poll worker that is supposed to
* report on it. To avoid missing events, ensure ordering between
* poll_scheduled and the task state accesses, such that if the poll
* worker misses the state update, the task change is guaranteed to
* reschedule the poll worker:
*
* poll worker:
* atomic_set(poll_scheduled, 0)
* smp_mb()
* LOAD states
*
* task change:
* STORE states
* if atomic_xchg(poll_scheduled, 1) == 0:
* schedule poll worker
*
* The atomic_xchg() implies a full barrier.
*/
smp_mb();
} else {
/* Polling window is not over, keep rescheduling */
force_reschedule = true;
}
collect_percpu_times(group, PSI_POLL, &changed_states);
if (changed_states & group->poll_states) {
@ -626,7 +666,8 @@ static void psi_poll_work(struct psi_group *group)
group->polling_next_update = update_triggers(group, now);
psi_schedule_poll_work(group,
nsecs_to_jiffies(group->polling_next_update - now) + 1);
nsecs_to_jiffies(group->polling_next_update - now) + 1,
force_reschedule);
out:
mutex_unlock(&group->trigger_lock);
@ -787,7 +828,7 @@ static void psi_group_change(struct psi_group *group, int cpu,
write_seqcount_end(&groupc->seq);
if (state_mask & group->poll_states)
psi_schedule_poll_work(group, 1);
psi_schedule_poll_work(group, 1, false);
if (wake_clock && !delayed_work_pending(&group->avgs_work))
schedule_delayed_work(&group->avgs_work, PSI_FREQ);
@ -941,7 +982,7 @@ void psi_account_irqtime(struct task_struct *task, u32 delta)
write_seqcount_end(&groupc->seq);
if (group->poll_states & (1 << PSI_IRQ_FULL))
psi_schedule_poll_work(group, 1);
psi_schedule_poll_work(group, 1, false);
} while ((group = group->parent));
}
#endif
@ -1328,6 +1369,7 @@ void psi_trigger_destroy(struct psi_trigger *t)
* can no longer be found through group->poll_task.
*/
kthread_stop(task_to_destroy);
atomic_set(&group->poll_scheduled, 0);
}
kfree(t);
}