rcu/rcuscale: Move rcu_scale_*() after kfree_scale_cleanup()
[ Upstream commit bf5ddd736509a7d9077c0b6793e6f0852214dbea ] This code-movement-only commit moves the rcu_scale_cleanup() and rcu_scale_shutdown() functions to follow kfree_scale_cleanup(). This is code movement is in preparation for a bug-fix patch that invokes kfree_scale_cleanup() from rcu_scale_cleanup(). Signed-off-by: Qiuxu Zhuo <qiuxu.zhuo@intel.com> Signed-off-by: Paul E. McKenney <paulmck@kernel.org> Reviewed-by: Joel Fernandes (Google) <joel@joelfernandes.org> Stable-dep-of: 23fc8df26dea ("rcu/rcuscale: Stop kfree_scale_thread thread(s) after unloading rcuscale") Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
7a34922194
commit
3506e64ec1
@ -521,89 +521,6 @@ rcu_scale_print_module_parms(struct rcu_scale_ops *cur_ops, const char *tag)
|
|||||||
scale_type, tag, nrealreaders, nrealwriters, verbose, shutdown);
|
scale_type, tag, nrealreaders, nrealwriters, verbose, shutdown);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
|
||||||
rcu_scale_cleanup(void)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
int j;
|
|
||||||
int ngps = 0;
|
|
||||||
u64 *wdp;
|
|
||||||
u64 *wdpp;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Would like warning at start, but everything is expedited
|
|
||||||
* during the mid-boot phase, so have to wait till the end.
|
|
||||||
*/
|
|
||||||
if (rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp)
|
|
||||||
SCALEOUT_ERRSTRING("All grace periods expedited, no normal ones to measure!");
|
|
||||||
if (rcu_gp_is_normal() && gp_exp)
|
|
||||||
SCALEOUT_ERRSTRING("All grace periods normal, no expedited ones to measure!");
|
|
||||||
if (gp_exp && gp_async)
|
|
||||||
SCALEOUT_ERRSTRING("No expedited async GPs, so went with async!");
|
|
||||||
|
|
||||||
if (torture_cleanup_begin())
|
|
||||||
return;
|
|
||||||
if (!cur_ops) {
|
|
||||||
torture_cleanup_end();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (reader_tasks) {
|
|
||||||
for (i = 0; i < nrealreaders; i++)
|
|
||||||
torture_stop_kthread(rcu_scale_reader,
|
|
||||||
reader_tasks[i]);
|
|
||||||
kfree(reader_tasks);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (writer_tasks) {
|
|
||||||
for (i = 0; i < nrealwriters; i++) {
|
|
||||||
torture_stop_kthread(rcu_scale_writer,
|
|
||||||
writer_tasks[i]);
|
|
||||||
if (!writer_n_durations)
|
|
||||||
continue;
|
|
||||||
j = writer_n_durations[i];
|
|
||||||
pr_alert("%s%s writer %d gps: %d\n",
|
|
||||||
scale_type, SCALE_FLAG, i, j);
|
|
||||||
ngps += j;
|
|
||||||
}
|
|
||||||
pr_alert("%s%s start: %llu end: %llu duration: %llu gps: %d batches: %ld\n",
|
|
||||||
scale_type, SCALE_FLAG,
|
|
||||||
t_rcu_scale_writer_started, t_rcu_scale_writer_finished,
|
|
||||||
t_rcu_scale_writer_finished -
|
|
||||||
t_rcu_scale_writer_started,
|
|
||||||
ngps,
|
|
||||||
rcuscale_seq_diff(b_rcu_gp_test_finished,
|
|
||||||
b_rcu_gp_test_started));
|
|
||||||
for (i = 0; i < nrealwriters; i++) {
|
|
||||||
if (!writer_durations)
|
|
||||||
break;
|
|
||||||
if (!writer_n_durations)
|
|
||||||
continue;
|
|
||||||
wdpp = writer_durations[i];
|
|
||||||
if (!wdpp)
|
|
||||||
continue;
|
|
||||||
for (j = 0; j < writer_n_durations[i]; j++) {
|
|
||||||
wdp = &wdpp[j];
|
|
||||||
pr_alert("%s%s %4d writer-duration: %5d %llu\n",
|
|
||||||
scale_type, SCALE_FLAG,
|
|
||||||
i, j, *wdp);
|
|
||||||
if (j % 100 == 0)
|
|
||||||
schedule_timeout_uninterruptible(1);
|
|
||||||
}
|
|
||||||
kfree(writer_durations[i]);
|
|
||||||
}
|
|
||||||
kfree(writer_tasks);
|
|
||||||
kfree(writer_durations);
|
|
||||||
kfree(writer_n_durations);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Do torture-type-specific cleanup operations. */
|
|
||||||
if (cur_ops->cleanup != NULL)
|
|
||||||
cur_ops->cleanup();
|
|
||||||
|
|
||||||
torture_cleanup_end();
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Return the number if non-negative. If -1, the number of CPUs.
|
* Return the number if non-negative. If -1, the number of CPUs.
|
||||||
* If less than -1, that much less than the number of CPUs, but
|
* If less than -1, that much less than the number of CPUs, but
|
||||||
@ -623,20 +540,6 @@ static int compute_real(int n)
|
|||||||
return nr;
|
return nr;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* RCU scalability shutdown kthread. Just waits to be awakened, then shuts
|
|
||||||
* down system.
|
|
||||||
*/
|
|
||||||
static int
|
|
||||||
rcu_scale_shutdown(void *arg)
|
|
||||||
{
|
|
||||||
wait_event_idle(shutdown_wq, atomic_read(&n_rcu_scale_writer_finished) >= nrealwriters);
|
|
||||||
smp_mb(); /* Wake before output. */
|
|
||||||
rcu_scale_cleanup();
|
|
||||||
kernel_power_off();
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* kfree_rcu() scalability tests: Start a kfree_rcu() loop on all CPUs for number
|
* kfree_rcu() scalability tests: Start a kfree_rcu() loop on all CPUs for number
|
||||||
* of iterations and measure total time and number of GP for all iterations to complete.
|
* of iterations and measure total time and number of GP for all iterations to complete.
|
||||||
@ -811,6 +714,103 @@ kfree_scale_init(void)
|
|||||||
return firsterr;
|
return firsterr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
rcu_scale_cleanup(void)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
int j;
|
||||||
|
int ngps = 0;
|
||||||
|
u64 *wdp;
|
||||||
|
u64 *wdpp;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Would like warning at start, but everything is expedited
|
||||||
|
* during the mid-boot phase, so have to wait till the end.
|
||||||
|
*/
|
||||||
|
if (rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp)
|
||||||
|
SCALEOUT_ERRSTRING("All grace periods expedited, no normal ones to measure!");
|
||||||
|
if (rcu_gp_is_normal() && gp_exp)
|
||||||
|
SCALEOUT_ERRSTRING("All grace periods normal, no expedited ones to measure!");
|
||||||
|
if (gp_exp && gp_async)
|
||||||
|
SCALEOUT_ERRSTRING("No expedited async GPs, so went with async!");
|
||||||
|
|
||||||
|
if (torture_cleanup_begin())
|
||||||
|
return;
|
||||||
|
if (!cur_ops) {
|
||||||
|
torture_cleanup_end();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (reader_tasks) {
|
||||||
|
for (i = 0; i < nrealreaders; i++)
|
||||||
|
torture_stop_kthread(rcu_scale_reader,
|
||||||
|
reader_tasks[i]);
|
||||||
|
kfree(reader_tasks);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (writer_tasks) {
|
||||||
|
for (i = 0; i < nrealwriters; i++) {
|
||||||
|
torture_stop_kthread(rcu_scale_writer,
|
||||||
|
writer_tasks[i]);
|
||||||
|
if (!writer_n_durations)
|
||||||
|
continue;
|
||||||
|
j = writer_n_durations[i];
|
||||||
|
pr_alert("%s%s writer %d gps: %d\n",
|
||||||
|
scale_type, SCALE_FLAG, i, j);
|
||||||
|
ngps += j;
|
||||||
|
}
|
||||||
|
pr_alert("%s%s start: %llu end: %llu duration: %llu gps: %d batches: %ld\n",
|
||||||
|
scale_type, SCALE_FLAG,
|
||||||
|
t_rcu_scale_writer_started, t_rcu_scale_writer_finished,
|
||||||
|
t_rcu_scale_writer_finished -
|
||||||
|
t_rcu_scale_writer_started,
|
||||||
|
ngps,
|
||||||
|
rcuscale_seq_diff(b_rcu_gp_test_finished,
|
||||||
|
b_rcu_gp_test_started));
|
||||||
|
for (i = 0; i < nrealwriters; i++) {
|
||||||
|
if (!writer_durations)
|
||||||
|
break;
|
||||||
|
if (!writer_n_durations)
|
||||||
|
continue;
|
||||||
|
wdpp = writer_durations[i];
|
||||||
|
if (!wdpp)
|
||||||
|
continue;
|
||||||
|
for (j = 0; j < writer_n_durations[i]; j++) {
|
||||||
|
wdp = &wdpp[j];
|
||||||
|
pr_alert("%s%s %4d writer-duration: %5d %llu\n",
|
||||||
|
scale_type, SCALE_FLAG,
|
||||||
|
i, j, *wdp);
|
||||||
|
if (j % 100 == 0)
|
||||||
|
schedule_timeout_uninterruptible(1);
|
||||||
|
}
|
||||||
|
kfree(writer_durations[i]);
|
||||||
|
}
|
||||||
|
kfree(writer_tasks);
|
||||||
|
kfree(writer_durations);
|
||||||
|
kfree(writer_n_durations);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Do torture-type-specific cleanup operations. */
|
||||||
|
if (cur_ops->cleanup != NULL)
|
||||||
|
cur_ops->cleanup();
|
||||||
|
|
||||||
|
torture_cleanup_end();
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* RCU scalability shutdown kthread. Just waits to be awakened, then shuts
|
||||||
|
* down system.
|
||||||
|
*/
|
||||||
|
static int
|
||||||
|
rcu_scale_shutdown(void *arg)
|
||||||
|
{
|
||||||
|
wait_event_idle(shutdown_wq, atomic_read(&n_rcu_scale_writer_finished) >= nrealwriters);
|
||||||
|
smp_mb(); /* Wake before output. */
|
||||||
|
rcu_scale_cleanup();
|
||||||
|
kernel_power_off();
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
static int __init
|
static int __init
|
||||||
rcu_scale_init(void)
|
rcu_scale_init(void)
|
||||||
{
|
{
|
||||||
|
Loading…
Reference in New Issue
Block a user