cpumask: convert kernel/workqueue.c
Impact: Reduce memory usage, use new cpumask API. cpu_populated_map becomes a cpumask_var_t, and cpu_singlethread_map is simply a cpumask pointer: it's simply the cpumask containing the first possible CPU anyway. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
This commit is contained in:
parent
a45185d2d7
commit
e7577c50f2
@ -73,7 +73,7 @@ static DEFINE_SPINLOCK(workqueue_lock);
|
|||||||
static LIST_HEAD(workqueues);
|
static LIST_HEAD(workqueues);
|
||||||
|
|
||||||
static int singlethread_cpu __read_mostly;
|
static int singlethread_cpu __read_mostly;
|
||||||
static cpumask_t cpu_singlethread_map __read_mostly;
|
static const struct cpumask *cpu_singlethread_map __read_mostly;
|
||||||
/*
|
/*
|
||||||
* _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
|
* _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
|
||||||
* flushes cwq->worklist. This means that flush_workqueue/wait_on_work
|
* flushes cwq->worklist. This means that flush_workqueue/wait_on_work
|
||||||
@ -81,7 +81,7 @@ static cpumask_t cpu_singlethread_map __read_mostly;
|
|||||||
* use cpu_possible_map, the cpumask below is more a documentation
|
* use cpu_possible_map, the cpumask below is more a documentation
|
||||||
* than optimization.
|
* than optimization.
|
||||||
*/
|
*/
|
||||||
static cpumask_t cpu_populated_map __read_mostly;
|
static cpumask_var_t cpu_populated_map __read_mostly;
|
||||||
|
|
||||||
/* If it's single threaded, it isn't in the list of workqueues. */
|
/* If it's single threaded, it isn't in the list of workqueues. */
|
||||||
static inline int is_wq_single_threaded(struct workqueue_struct *wq)
|
static inline int is_wq_single_threaded(struct workqueue_struct *wq)
|
||||||
@ -89,10 +89,10 @@ static inline int is_wq_single_threaded(struct workqueue_struct *wq)
|
|||||||
return wq->singlethread;
|
return wq->singlethread;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq)
|
static const struct cpumask *wq_cpu_map(struct workqueue_struct *wq)
|
||||||
{
|
{
|
||||||
return is_wq_single_threaded(wq)
|
return is_wq_single_threaded(wq)
|
||||||
? &cpu_singlethread_map : &cpu_populated_map;
|
? cpu_singlethread_map : cpu_populated_map;
|
||||||
}
|
}
|
||||||
|
|
||||||
static
|
static
|
||||||
@ -410,7 +410,7 @@ static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
|
|||||||
*/
|
*/
|
||||||
void flush_workqueue(struct workqueue_struct *wq)
|
void flush_workqueue(struct workqueue_struct *wq)
|
||||||
{
|
{
|
||||||
const cpumask_t *cpu_map = wq_cpu_map(wq);
|
const struct cpumask *cpu_map = wq_cpu_map(wq);
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
might_sleep();
|
might_sleep();
|
||||||
@ -532,7 +532,7 @@ static void wait_on_work(struct work_struct *work)
|
|||||||
{
|
{
|
||||||
struct cpu_workqueue_struct *cwq;
|
struct cpu_workqueue_struct *cwq;
|
||||||
struct workqueue_struct *wq;
|
struct workqueue_struct *wq;
|
||||||
const cpumask_t *cpu_map;
|
const struct cpumask *cpu_map;
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
might_sleep();
|
might_sleep();
|
||||||
@ -903,7 +903,7 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
|
|||||||
*/
|
*/
|
||||||
void destroy_workqueue(struct workqueue_struct *wq)
|
void destroy_workqueue(struct workqueue_struct *wq)
|
||||||
{
|
{
|
||||||
const cpumask_t *cpu_map = wq_cpu_map(wq);
|
const struct cpumask *cpu_map = wq_cpu_map(wq);
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
cpu_maps_update_begin();
|
cpu_maps_update_begin();
|
||||||
@ -933,7 +933,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
|
|||||||
|
|
||||||
switch (action) {
|
switch (action) {
|
||||||
case CPU_UP_PREPARE:
|
case CPU_UP_PREPARE:
|
||||||
cpu_set(cpu, cpu_populated_map);
|
cpumask_set_cpu(cpu, cpu_populated_map);
|
||||||
}
|
}
|
||||||
undo:
|
undo:
|
||||||
list_for_each_entry(wq, &workqueues, list) {
|
list_for_each_entry(wq, &workqueues, list) {
|
||||||
@ -964,7 +964,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
|
|||||||
switch (action) {
|
switch (action) {
|
||||||
case CPU_UP_CANCELED:
|
case CPU_UP_CANCELED:
|
||||||
case CPU_POST_DEAD:
|
case CPU_POST_DEAD:
|
||||||
cpu_clear(cpu, cpu_populated_map);
|
cpumask_clear_cpu(cpu, cpu_populated_map);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
@ -1017,9 +1017,11 @@ EXPORT_SYMBOL_GPL(work_on_cpu);
|
|||||||
|
|
||||||
void __init init_workqueues(void)
|
void __init init_workqueues(void)
|
||||||
{
|
{
|
||||||
cpu_populated_map = cpu_online_map;
|
alloc_cpumask_var(&cpu_populated_map, GFP_KERNEL);
|
||||||
singlethread_cpu = first_cpu(cpu_possible_map);
|
|
||||||
cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu);
|
cpumask_copy(cpu_populated_map, cpu_online_mask);
|
||||||
|
singlethread_cpu = cpumask_first(cpu_possible_mask);
|
||||||
|
cpu_singlethread_map = cpumask_of(singlethread_cpu);
|
||||||
hotcpu_notifier(workqueue_cpu_callback, 0);
|
hotcpu_notifier(workqueue_cpu_callback, 0);
|
||||||
keventd_wq = create_workqueue("events");
|
keventd_wq = create_workqueue("events");
|
||||||
BUG_ON(!keventd_wq);
|
BUG_ON(!keventd_wq);
|
||||||
|
Loading…
Reference in New Issue
Block a user