Merge branch 'for-5.8' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
Pull workqueue updates from Tejun Heo: "Mostly cleanups and other trivial changes. The only interesting change is Sebastian's rcuwait conversion for RT" * 'for-5.8' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq: workqueue: use BUILD_BUG_ON() for compile time test instead of WARN_ON() workqueue: fix a piece of comment about reserved bits for work flags workqueue: remove useless unlock() and lock() in series workqueue: void unneeded requeuing the pwq in rescuer thread workqueue: Convert the pool::lock and wq_mayday_lock to raw_spinlock_t workqueue: Use rcuwait for wq_manager_wait workqueue: Remove unnecessary kfree() call in rcu_free_wq() workqueue: Fix an use after free in init_rescuer() workqueue: Use IS_ERR and PTR_ERR instead of PTR_ERR_OR_ZERO.
This commit is contained in:
commit
fe3bc8a988
@ -62,7 +62,7 @@ enum {
|
|||||||
WORK_CPU_UNBOUND = NR_CPUS,
|
WORK_CPU_UNBOUND = NR_CPUS,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Reserve 7 bits off of pwq pointer w/ debugobjects turned off.
|
* Reserve 8 bits off of pwq pointer w/ debugobjects turned off.
|
||||||
* This makes pwqs aligned to 256 bytes and allows 15 workqueue
|
* This makes pwqs aligned to 256 bytes and allows 15 workqueue
|
||||||
* flush colors.
|
* flush colors.
|
||||||
*/
|
*/
|
||||||
|
@ -145,7 +145,7 @@ enum {
|
|||||||
/* struct worker is defined in workqueue_internal.h */
|
/* struct worker is defined in workqueue_internal.h */
|
||||||
|
|
||||||
struct worker_pool {
|
struct worker_pool {
|
||||||
spinlock_t lock; /* the pool lock */
|
raw_spinlock_t lock; /* the pool lock */
|
||||||
int cpu; /* I: the associated cpu */
|
int cpu; /* I: the associated cpu */
|
||||||
int node; /* I: the associated node ID */
|
int node; /* I: the associated node ID */
|
||||||
int id; /* I: pool ID */
|
int id; /* I: pool ID */
|
||||||
@ -300,8 +300,9 @@ static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
|
|||||||
|
|
||||||
static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
|
static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
|
||||||
static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */
|
static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */
|
||||||
static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
|
static DEFINE_RAW_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
|
||||||
static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */
|
/* wait for manager to go away */
|
||||||
|
static struct rcuwait manager_wait = __RCUWAIT_INITIALIZER(manager_wait);
|
||||||
|
|
||||||
static LIST_HEAD(workqueues); /* PR: list of all workqueues */
|
static LIST_HEAD(workqueues); /* PR: list of all workqueues */
|
||||||
static bool workqueue_freezing; /* PL: have wqs started freezing? */
|
static bool workqueue_freezing; /* PL: have wqs started freezing? */
|
||||||
@ -826,7 +827,7 @@ static struct worker *first_idle_worker(struct worker_pool *pool)
|
|||||||
* Wake up the first idle worker of @pool.
|
* Wake up the first idle worker of @pool.
|
||||||
*
|
*
|
||||||
* CONTEXT:
|
* CONTEXT:
|
||||||
* spin_lock_irq(pool->lock).
|
* raw_spin_lock_irq(pool->lock).
|
||||||
*/
|
*/
|
||||||
static void wake_up_worker(struct worker_pool *pool)
|
static void wake_up_worker(struct worker_pool *pool)
|
||||||
{
|
{
|
||||||
@ -881,7 +882,7 @@ void wq_worker_sleeping(struct task_struct *task)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
worker->sleeping = 1;
|
worker->sleeping = 1;
|
||||||
spin_lock_irq(&pool->lock);
|
raw_spin_lock_irq(&pool->lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The counterpart of the following dec_and_test, implied mb,
|
* The counterpart of the following dec_and_test, implied mb,
|
||||||
@ -900,7 +901,7 @@ void wq_worker_sleeping(struct task_struct *task)
|
|||||||
if (next)
|
if (next)
|
||||||
wake_up_process(next->task);
|
wake_up_process(next->task);
|
||||||
}
|
}
|
||||||
spin_unlock_irq(&pool->lock);
|
raw_spin_unlock_irq(&pool->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -911,7 +912,7 @@ void wq_worker_sleeping(struct task_struct *task)
|
|||||||
* the scheduler to get a worker's last known identity.
|
* the scheduler to get a worker's last known identity.
|
||||||
*
|
*
|
||||||
* CONTEXT:
|
* CONTEXT:
|
||||||
* spin_lock_irq(rq->lock)
|
* raw_spin_lock_irq(rq->lock)
|
||||||
*
|
*
|
||||||
* This function is called during schedule() when a kworker is going
|
* This function is called during schedule() when a kworker is going
|
||||||
* to sleep. It's used by psi to identify aggregation workers during
|
* to sleep. It's used by psi to identify aggregation workers during
|
||||||
@ -942,7 +943,7 @@ work_func_t wq_worker_last_func(struct task_struct *task)
|
|||||||
* Set @flags in @worker->flags and adjust nr_running accordingly.
|
* Set @flags in @worker->flags and adjust nr_running accordingly.
|
||||||
*
|
*
|
||||||
* CONTEXT:
|
* CONTEXT:
|
||||||
* spin_lock_irq(pool->lock)
|
* raw_spin_lock_irq(pool->lock)
|
||||||
*/
|
*/
|
||||||
static inline void worker_set_flags(struct worker *worker, unsigned int flags)
|
static inline void worker_set_flags(struct worker *worker, unsigned int flags)
|
||||||
{
|
{
|
||||||
@ -967,7 +968,7 @@ static inline void worker_set_flags(struct worker *worker, unsigned int flags)
|
|||||||
* Clear @flags in @worker->flags and adjust nr_running accordingly.
|
* Clear @flags in @worker->flags and adjust nr_running accordingly.
|
||||||
*
|
*
|
||||||
* CONTEXT:
|
* CONTEXT:
|
||||||
* spin_lock_irq(pool->lock)
|
* raw_spin_lock_irq(pool->lock)
|
||||||
*/
|
*/
|
||||||
static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
|
static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
|
||||||
{
|
{
|
||||||
@ -1015,7 +1016,7 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
|
|||||||
* actually occurs, it should be easy to locate the culprit work function.
|
* actually occurs, it should be easy to locate the culprit work function.
|
||||||
*
|
*
|
||||||
* CONTEXT:
|
* CONTEXT:
|
||||||
* spin_lock_irq(pool->lock).
|
* raw_spin_lock_irq(pool->lock).
|
||||||
*
|
*
|
||||||
* Return:
|
* Return:
|
||||||
* Pointer to worker which is executing @work if found, %NULL
|
* Pointer to worker which is executing @work if found, %NULL
|
||||||
@ -1050,7 +1051,7 @@ static struct worker *find_worker_executing_work(struct worker_pool *pool,
|
|||||||
* nested inside outer list_for_each_entry_safe().
|
* nested inside outer list_for_each_entry_safe().
|
||||||
*
|
*
|
||||||
* CONTEXT:
|
* CONTEXT:
|
||||||
* spin_lock_irq(pool->lock).
|
* raw_spin_lock_irq(pool->lock).
|
||||||
*/
|
*/
|
||||||
static void move_linked_works(struct work_struct *work, struct list_head *head,
|
static void move_linked_works(struct work_struct *work, struct list_head *head,
|
||||||
struct work_struct **nextp)
|
struct work_struct **nextp)
|
||||||
@ -1128,9 +1129,9 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq)
|
|||||||
* As both pwqs and pools are RCU protected, the
|
* As both pwqs and pools are RCU protected, the
|
||||||
* following lock operations are safe.
|
* following lock operations are safe.
|
||||||
*/
|
*/
|
||||||
spin_lock_irq(&pwq->pool->lock);
|
raw_spin_lock_irq(&pwq->pool->lock);
|
||||||
put_pwq(pwq);
|
put_pwq(pwq);
|
||||||
spin_unlock_irq(&pwq->pool->lock);
|
raw_spin_unlock_irq(&pwq->pool->lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1163,7 +1164,7 @@ static void pwq_activate_first_delayed(struct pool_workqueue *pwq)
|
|||||||
* decrement nr_in_flight of its pwq and handle workqueue flushing.
|
* decrement nr_in_flight of its pwq and handle workqueue flushing.
|
||||||
*
|
*
|
||||||
* CONTEXT:
|
* CONTEXT:
|
||||||
* spin_lock_irq(pool->lock).
|
* raw_spin_lock_irq(pool->lock).
|
||||||
*/
|
*/
|
||||||
static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
|
static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
|
||||||
{
|
{
|
||||||
@ -1262,7 +1263,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
|
|||||||
if (!pool)
|
if (!pool)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
spin_lock(&pool->lock);
|
raw_spin_lock(&pool->lock);
|
||||||
/*
|
/*
|
||||||
* work->data is guaranteed to point to pwq only while the work
|
* work->data is guaranteed to point to pwq only while the work
|
||||||
* item is queued on pwq->wq, and both updating work->data to point
|
* item is queued on pwq->wq, and both updating work->data to point
|
||||||
@ -1291,11 +1292,11 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
|
|||||||
/* work->data points to pwq iff queued, point to pool */
|
/* work->data points to pwq iff queued, point to pool */
|
||||||
set_work_pool_and_keep_pending(work, pool->id);
|
set_work_pool_and_keep_pending(work, pool->id);
|
||||||
|
|
||||||
spin_unlock(&pool->lock);
|
raw_spin_unlock(&pool->lock);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
spin_unlock(&pool->lock);
|
raw_spin_unlock(&pool->lock);
|
||||||
fail:
|
fail:
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
local_irq_restore(*flags);
|
local_irq_restore(*flags);
|
||||||
@ -1316,7 +1317,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
|
|||||||
* work_struct flags.
|
* work_struct flags.
|
||||||
*
|
*
|
||||||
* CONTEXT:
|
* CONTEXT:
|
||||||
* spin_lock_irq(pool->lock).
|
* raw_spin_lock_irq(pool->lock).
|
||||||
*/
|
*/
|
||||||
static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
|
static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
|
||||||
struct list_head *head, unsigned int extra_flags)
|
struct list_head *head, unsigned int extra_flags)
|
||||||
@ -1433,7 +1434,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
|
|||||||
if (last_pool && last_pool != pwq->pool) {
|
if (last_pool && last_pool != pwq->pool) {
|
||||||
struct worker *worker;
|
struct worker *worker;
|
||||||
|
|
||||||
spin_lock(&last_pool->lock);
|
raw_spin_lock(&last_pool->lock);
|
||||||
|
|
||||||
worker = find_worker_executing_work(last_pool, work);
|
worker = find_worker_executing_work(last_pool, work);
|
||||||
|
|
||||||
@ -1441,11 +1442,11 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
|
|||||||
pwq = worker->current_pwq;
|
pwq = worker->current_pwq;
|
||||||
} else {
|
} else {
|
||||||
/* meh... not running there, queue here */
|
/* meh... not running there, queue here */
|
||||||
spin_unlock(&last_pool->lock);
|
raw_spin_unlock(&last_pool->lock);
|
||||||
spin_lock(&pwq->pool->lock);
|
raw_spin_lock(&pwq->pool->lock);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
spin_lock(&pwq->pool->lock);
|
raw_spin_lock(&pwq->pool->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1458,7 +1459,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
|
|||||||
*/
|
*/
|
||||||
if (unlikely(!pwq->refcnt)) {
|
if (unlikely(!pwq->refcnt)) {
|
||||||
if (wq->flags & WQ_UNBOUND) {
|
if (wq->flags & WQ_UNBOUND) {
|
||||||
spin_unlock(&pwq->pool->lock);
|
raw_spin_unlock(&pwq->pool->lock);
|
||||||
cpu_relax();
|
cpu_relax();
|
||||||
goto retry;
|
goto retry;
|
||||||
}
|
}
|
||||||
@ -1490,7 +1491,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
|
|||||||
insert_work(pwq, work, worklist, work_flags);
|
insert_work(pwq, work, worklist, work_flags);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
spin_unlock(&pwq->pool->lock);
|
raw_spin_unlock(&pwq->pool->lock);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1759,7 +1760,7 @@ EXPORT_SYMBOL(queue_rcu_work);
|
|||||||
* necessary.
|
* necessary.
|
||||||
*
|
*
|
||||||
* LOCKING:
|
* LOCKING:
|
||||||
* spin_lock_irq(pool->lock).
|
* raw_spin_lock_irq(pool->lock).
|
||||||
*/
|
*/
|
||||||
static void worker_enter_idle(struct worker *worker)
|
static void worker_enter_idle(struct worker *worker)
|
||||||
{
|
{
|
||||||
@ -1799,7 +1800,7 @@ static void worker_enter_idle(struct worker *worker)
|
|||||||
* @worker is leaving idle state. Update stats.
|
* @worker is leaving idle state. Update stats.
|
||||||
*
|
*
|
||||||
* LOCKING:
|
* LOCKING:
|
||||||
* spin_lock_irq(pool->lock).
|
* raw_spin_lock_irq(pool->lock).
|
||||||
*/
|
*/
|
||||||
static void worker_leave_idle(struct worker *worker)
|
static void worker_leave_idle(struct worker *worker)
|
||||||
{
|
{
|
||||||
@ -1937,11 +1938,11 @@ static struct worker *create_worker(struct worker_pool *pool)
|
|||||||
worker_attach_to_pool(worker, pool);
|
worker_attach_to_pool(worker, pool);
|
||||||
|
|
||||||
/* start the newly created worker */
|
/* start the newly created worker */
|
||||||
spin_lock_irq(&pool->lock);
|
raw_spin_lock_irq(&pool->lock);
|
||||||
worker->pool->nr_workers++;
|
worker->pool->nr_workers++;
|
||||||
worker_enter_idle(worker);
|
worker_enter_idle(worker);
|
||||||
wake_up_process(worker->task);
|
wake_up_process(worker->task);
|
||||||
spin_unlock_irq(&pool->lock);
|
raw_spin_unlock_irq(&pool->lock);
|
||||||
|
|
||||||
return worker;
|
return worker;
|
||||||
|
|
||||||
@ -1960,7 +1961,7 @@ static struct worker *create_worker(struct worker_pool *pool)
|
|||||||
* be idle.
|
* be idle.
|
||||||
*
|
*
|
||||||
* CONTEXT:
|
* CONTEXT:
|
||||||
* spin_lock_irq(pool->lock).
|
* raw_spin_lock_irq(pool->lock).
|
||||||
*/
|
*/
|
||||||
static void destroy_worker(struct worker *worker)
|
static void destroy_worker(struct worker *worker)
|
||||||
{
|
{
|
||||||
@ -1986,7 +1987,7 @@ static void idle_worker_timeout(struct timer_list *t)
|
|||||||
{
|
{
|
||||||
struct worker_pool *pool = from_timer(pool, t, idle_timer);
|
struct worker_pool *pool = from_timer(pool, t, idle_timer);
|
||||||
|
|
||||||
spin_lock_irq(&pool->lock);
|
raw_spin_lock_irq(&pool->lock);
|
||||||
|
|
||||||
while (too_many_workers(pool)) {
|
while (too_many_workers(pool)) {
|
||||||
struct worker *worker;
|
struct worker *worker;
|
||||||
@ -2004,7 +2005,7 @@ static void idle_worker_timeout(struct timer_list *t)
|
|||||||
destroy_worker(worker);
|
destroy_worker(worker);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irq(&pool->lock);
|
raw_spin_unlock_irq(&pool->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void send_mayday(struct work_struct *work)
|
static void send_mayday(struct work_struct *work)
|
||||||
@ -2035,8 +2036,8 @@ static void pool_mayday_timeout(struct timer_list *t)
|
|||||||
struct worker_pool *pool = from_timer(pool, t, mayday_timer);
|
struct worker_pool *pool = from_timer(pool, t, mayday_timer);
|
||||||
struct work_struct *work;
|
struct work_struct *work;
|
||||||
|
|
||||||
spin_lock_irq(&pool->lock);
|
raw_spin_lock_irq(&pool->lock);
|
||||||
spin_lock(&wq_mayday_lock); /* for wq->maydays */
|
raw_spin_lock(&wq_mayday_lock); /* for wq->maydays */
|
||||||
|
|
||||||
if (need_to_create_worker(pool)) {
|
if (need_to_create_worker(pool)) {
|
||||||
/*
|
/*
|
||||||
@ -2049,8 +2050,8 @@ static void pool_mayday_timeout(struct timer_list *t)
|
|||||||
send_mayday(work);
|
send_mayday(work);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock(&wq_mayday_lock);
|
raw_spin_unlock(&wq_mayday_lock);
|
||||||
spin_unlock_irq(&pool->lock);
|
raw_spin_unlock_irq(&pool->lock);
|
||||||
|
|
||||||
mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
|
mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
|
||||||
}
|
}
|
||||||
@ -2069,7 +2070,7 @@ static void pool_mayday_timeout(struct timer_list *t)
|
|||||||
* may_start_working() %true.
|
* may_start_working() %true.
|
||||||
*
|
*
|
||||||
* LOCKING:
|
* LOCKING:
|
||||||
* spin_lock_irq(pool->lock) which may be released and regrabbed
|
* raw_spin_lock_irq(pool->lock) which may be released and regrabbed
|
||||||
* multiple times. Does GFP_KERNEL allocations. Called only from
|
* multiple times. Does GFP_KERNEL allocations. Called only from
|
||||||
* manager.
|
* manager.
|
||||||
*/
|
*/
|
||||||
@ -2078,7 +2079,7 @@ __releases(&pool->lock)
|
|||||||
__acquires(&pool->lock)
|
__acquires(&pool->lock)
|
||||||
{
|
{
|
||||||
restart:
|
restart:
|
||||||
spin_unlock_irq(&pool->lock);
|
raw_spin_unlock_irq(&pool->lock);
|
||||||
|
|
||||||
/* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
|
/* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
|
||||||
mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
|
mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
|
||||||
@ -2094,7 +2095,7 @@ __acquires(&pool->lock)
|
|||||||
}
|
}
|
||||||
|
|
||||||
del_timer_sync(&pool->mayday_timer);
|
del_timer_sync(&pool->mayday_timer);
|
||||||
spin_lock_irq(&pool->lock);
|
raw_spin_lock_irq(&pool->lock);
|
||||||
/*
|
/*
|
||||||
* This is necessary even after a new worker was just successfully
|
* This is necessary even after a new worker was just successfully
|
||||||
* created as @pool->lock was dropped and the new worker might have
|
* created as @pool->lock was dropped and the new worker might have
|
||||||
@ -2117,7 +2118,7 @@ __acquires(&pool->lock)
|
|||||||
* and may_start_working() is true.
|
* and may_start_working() is true.
|
||||||
*
|
*
|
||||||
* CONTEXT:
|
* CONTEXT:
|
||||||
* spin_lock_irq(pool->lock) which may be released and regrabbed
|
* raw_spin_lock_irq(pool->lock) which may be released and regrabbed
|
||||||
* multiple times. Does GFP_KERNEL allocations.
|
* multiple times. Does GFP_KERNEL allocations.
|
||||||
*
|
*
|
||||||
* Return:
|
* Return:
|
||||||
@ -2140,7 +2141,7 @@ static bool manage_workers(struct worker *worker)
|
|||||||
|
|
||||||
pool->manager = NULL;
|
pool->manager = NULL;
|
||||||
pool->flags &= ~POOL_MANAGER_ACTIVE;
|
pool->flags &= ~POOL_MANAGER_ACTIVE;
|
||||||
wake_up(&wq_manager_wait);
|
rcuwait_wake_up(&manager_wait);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2156,7 +2157,7 @@ static bool manage_workers(struct worker *worker)
|
|||||||
* call this function to process a work.
|
* call this function to process a work.
|
||||||
*
|
*
|
||||||
* CONTEXT:
|
* CONTEXT:
|
||||||
* spin_lock_irq(pool->lock) which is released and regrabbed.
|
* raw_spin_lock_irq(pool->lock) which is released and regrabbed.
|
||||||
*/
|
*/
|
||||||
static void process_one_work(struct worker *worker, struct work_struct *work)
|
static void process_one_work(struct worker *worker, struct work_struct *work)
|
||||||
__releases(&pool->lock)
|
__releases(&pool->lock)
|
||||||
@ -2238,7 +2239,7 @@ __acquires(&pool->lock)
|
|||||||
*/
|
*/
|
||||||
set_work_pool_and_clear_pending(work, pool->id);
|
set_work_pool_and_clear_pending(work, pool->id);
|
||||||
|
|
||||||
spin_unlock_irq(&pool->lock);
|
raw_spin_unlock_irq(&pool->lock);
|
||||||
|
|
||||||
lock_map_acquire(&pwq->wq->lockdep_map);
|
lock_map_acquire(&pwq->wq->lockdep_map);
|
||||||
lock_map_acquire(&lockdep_map);
|
lock_map_acquire(&lockdep_map);
|
||||||
@ -2293,7 +2294,7 @@ __acquires(&pool->lock)
|
|||||||
*/
|
*/
|
||||||
cond_resched();
|
cond_resched();
|
||||||
|
|
||||||
spin_lock_irq(&pool->lock);
|
raw_spin_lock_irq(&pool->lock);
|
||||||
|
|
||||||
/* clear cpu intensive status */
|
/* clear cpu intensive status */
|
||||||
if (unlikely(cpu_intensive))
|
if (unlikely(cpu_intensive))
|
||||||
@ -2319,7 +2320,7 @@ __acquires(&pool->lock)
|
|||||||
* fetches a work from the top and executes it.
|
* fetches a work from the top and executes it.
|
||||||
*
|
*
|
||||||
* CONTEXT:
|
* CONTEXT:
|
||||||
* spin_lock_irq(pool->lock) which may be released and regrabbed
|
* raw_spin_lock_irq(pool->lock) which may be released and regrabbed
|
||||||
* multiple times.
|
* multiple times.
|
||||||
*/
|
*/
|
||||||
static void process_scheduled_works(struct worker *worker)
|
static void process_scheduled_works(struct worker *worker)
|
||||||
@ -2361,11 +2362,11 @@ static int worker_thread(void *__worker)
|
|||||||
/* tell the scheduler that this is a workqueue worker */
|
/* tell the scheduler that this is a workqueue worker */
|
||||||
set_pf_worker(true);
|
set_pf_worker(true);
|
||||||
woke_up:
|
woke_up:
|
||||||
spin_lock_irq(&pool->lock);
|
raw_spin_lock_irq(&pool->lock);
|
||||||
|
|
||||||
/* am I supposed to die? */
|
/* am I supposed to die? */
|
||||||
if (unlikely(worker->flags & WORKER_DIE)) {
|
if (unlikely(worker->flags & WORKER_DIE)) {
|
||||||
spin_unlock_irq(&pool->lock);
|
raw_spin_unlock_irq(&pool->lock);
|
||||||
WARN_ON_ONCE(!list_empty(&worker->entry));
|
WARN_ON_ONCE(!list_empty(&worker->entry));
|
||||||
set_pf_worker(false);
|
set_pf_worker(false);
|
||||||
|
|
||||||
@ -2431,7 +2432,7 @@ static int worker_thread(void *__worker)
|
|||||||
*/
|
*/
|
||||||
worker_enter_idle(worker);
|
worker_enter_idle(worker);
|
||||||
__set_current_state(TASK_IDLE);
|
__set_current_state(TASK_IDLE);
|
||||||
spin_unlock_irq(&pool->lock);
|
raw_spin_unlock_irq(&pool->lock);
|
||||||
schedule();
|
schedule();
|
||||||
goto woke_up;
|
goto woke_up;
|
||||||
}
|
}
|
||||||
@ -2485,7 +2486,7 @@ static int rescuer_thread(void *__rescuer)
|
|||||||
should_stop = kthread_should_stop();
|
should_stop = kthread_should_stop();
|
||||||
|
|
||||||
/* see whether any pwq is asking for help */
|
/* see whether any pwq is asking for help */
|
||||||
spin_lock_irq(&wq_mayday_lock);
|
raw_spin_lock_irq(&wq_mayday_lock);
|
||||||
|
|
||||||
while (!list_empty(&wq->maydays)) {
|
while (!list_empty(&wq->maydays)) {
|
||||||
struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
|
struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
|
||||||
@ -2497,11 +2498,11 @@ static int rescuer_thread(void *__rescuer)
|
|||||||
__set_current_state(TASK_RUNNING);
|
__set_current_state(TASK_RUNNING);
|
||||||
list_del_init(&pwq->mayday_node);
|
list_del_init(&pwq->mayday_node);
|
||||||
|
|
||||||
spin_unlock_irq(&wq_mayday_lock);
|
raw_spin_unlock_irq(&wq_mayday_lock);
|
||||||
|
|
||||||
worker_attach_to_pool(rescuer, pool);
|
worker_attach_to_pool(rescuer, pool);
|
||||||
|
|
||||||
spin_lock_irq(&pool->lock);
|
raw_spin_lock_irq(&pool->lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Slurp in all works issued via this workqueue and
|
* Slurp in all works issued via this workqueue and
|
||||||
@ -2529,8 +2530,8 @@ static int rescuer_thread(void *__rescuer)
|
|||||||
* being used to relieve memory pressure, don't
|
* being used to relieve memory pressure, don't
|
||||||
* incur MAYDAY_INTERVAL delay inbetween.
|
* incur MAYDAY_INTERVAL delay inbetween.
|
||||||
*/
|
*/
|
||||||
if (need_to_create_worker(pool)) {
|
if (pwq->nr_active && need_to_create_worker(pool)) {
|
||||||
spin_lock(&wq_mayday_lock);
|
raw_spin_lock(&wq_mayday_lock);
|
||||||
/*
|
/*
|
||||||
* Queue iff we aren't racing destruction
|
* Queue iff we aren't racing destruction
|
||||||
* and somebody else hasn't queued it already.
|
* and somebody else hasn't queued it already.
|
||||||
@ -2539,7 +2540,7 @@ static int rescuer_thread(void *__rescuer)
|
|||||||
get_pwq(pwq);
|
get_pwq(pwq);
|
||||||
list_add_tail(&pwq->mayday_node, &wq->maydays);
|
list_add_tail(&pwq->mayday_node, &wq->maydays);
|
||||||
}
|
}
|
||||||
spin_unlock(&wq_mayday_lock);
|
raw_spin_unlock(&wq_mayday_lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2557,14 +2558,14 @@ static int rescuer_thread(void *__rescuer)
|
|||||||
if (need_more_worker(pool))
|
if (need_more_worker(pool))
|
||||||
wake_up_worker(pool);
|
wake_up_worker(pool);
|
||||||
|
|
||||||
spin_unlock_irq(&pool->lock);
|
raw_spin_unlock_irq(&pool->lock);
|
||||||
|
|
||||||
worker_detach_from_pool(rescuer);
|
worker_detach_from_pool(rescuer);
|
||||||
|
|
||||||
spin_lock_irq(&wq_mayday_lock);
|
raw_spin_lock_irq(&wq_mayday_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irq(&wq_mayday_lock);
|
raw_spin_unlock_irq(&wq_mayday_lock);
|
||||||
|
|
||||||
if (should_stop) {
|
if (should_stop) {
|
||||||
__set_current_state(TASK_RUNNING);
|
__set_current_state(TASK_RUNNING);
|
||||||
@ -2644,7 +2645,7 @@ static void wq_barrier_func(struct work_struct *work)
|
|||||||
* underneath us, so we can't reliably determine pwq from @target.
|
* underneath us, so we can't reliably determine pwq from @target.
|
||||||
*
|
*
|
||||||
* CONTEXT:
|
* CONTEXT:
|
||||||
* spin_lock_irq(pool->lock).
|
* raw_spin_lock_irq(pool->lock).
|
||||||
*/
|
*/
|
||||||
static void insert_wq_barrier(struct pool_workqueue *pwq,
|
static void insert_wq_barrier(struct pool_workqueue *pwq,
|
||||||
struct wq_barrier *barr,
|
struct wq_barrier *barr,
|
||||||
@ -2731,7 +2732,7 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
|
|||||||
for_each_pwq(pwq, wq) {
|
for_each_pwq(pwq, wq) {
|
||||||
struct worker_pool *pool = pwq->pool;
|
struct worker_pool *pool = pwq->pool;
|
||||||
|
|
||||||
spin_lock_irq(&pool->lock);
|
raw_spin_lock_irq(&pool->lock);
|
||||||
|
|
||||||
if (flush_color >= 0) {
|
if (flush_color >= 0) {
|
||||||
WARN_ON_ONCE(pwq->flush_color != -1);
|
WARN_ON_ONCE(pwq->flush_color != -1);
|
||||||
@ -2748,7 +2749,7 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
|
|||||||
pwq->work_color = work_color;
|
pwq->work_color = work_color;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irq(&pool->lock);
|
raw_spin_unlock_irq(&pool->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
|
if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
|
||||||
@ -2948,9 +2949,9 @@ void drain_workqueue(struct workqueue_struct *wq)
|
|||||||
for_each_pwq(pwq, wq) {
|
for_each_pwq(pwq, wq) {
|
||||||
bool drained;
|
bool drained;
|
||||||
|
|
||||||
spin_lock_irq(&pwq->pool->lock);
|
raw_spin_lock_irq(&pwq->pool->lock);
|
||||||
drained = !pwq->nr_active && list_empty(&pwq->delayed_works);
|
drained = !pwq->nr_active && list_empty(&pwq->delayed_works);
|
||||||
spin_unlock_irq(&pwq->pool->lock);
|
raw_spin_unlock_irq(&pwq->pool->lock);
|
||||||
|
|
||||||
if (drained)
|
if (drained)
|
||||||
continue;
|
continue;
|
||||||
@ -2986,7 +2987,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_irq(&pool->lock);
|
raw_spin_lock_irq(&pool->lock);
|
||||||
/* see the comment in try_to_grab_pending() with the same code */
|
/* see the comment in try_to_grab_pending() with the same code */
|
||||||
pwq = get_work_pwq(work);
|
pwq = get_work_pwq(work);
|
||||||
if (pwq) {
|
if (pwq) {
|
||||||
@ -3002,7 +3003,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
|
|||||||
check_flush_dependency(pwq->wq, work);
|
check_flush_dependency(pwq->wq, work);
|
||||||
|
|
||||||
insert_wq_barrier(pwq, barr, work, worker);
|
insert_wq_barrier(pwq, barr, work, worker);
|
||||||
spin_unlock_irq(&pool->lock);
|
raw_spin_unlock_irq(&pool->lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Force a lock recursion deadlock when using flush_work() inside a
|
* Force a lock recursion deadlock when using flush_work() inside a
|
||||||
@ -3021,7 +3022,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
|
|||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
return true;
|
return true;
|
||||||
already_gone:
|
already_gone:
|
||||||
spin_unlock_irq(&pool->lock);
|
raw_spin_unlock_irq(&pool->lock);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -3414,7 +3415,7 @@ static bool wqattrs_equal(const struct workqueue_attrs *a,
|
|||||||
*/
|
*/
|
||||||
static int init_worker_pool(struct worker_pool *pool)
|
static int init_worker_pool(struct worker_pool *pool)
|
||||||
{
|
{
|
||||||
spin_lock_init(&pool->lock);
|
raw_spin_lock_init(&pool->lock);
|
||||||
pool->id = -1;
|
pool->id = -1;
|
||||||
pool->cpu = -1;
|
pool->cpu = -1;
|
||||||
pool->node = NUMA_NO_NODE;
|
pool->node = NUMA_NO_NODE;
|
||||||
@ -3491,7 +3492,6 @@ static void rcu_free_wq(struct rcu_head *rcu)
|
|||||||
else
|
else
|
||||||
free_workqueue_attrs(wq->unbound_attrs);
|
free_workqueue_attrs(wq->unbound_attrs);
|
||||||
|
|
||||||
kfree(wq->rescuer);
|
|
||||||
kfree(wq);
|
kfree(wq);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3504,6 +3504,18 @@ static void rcu_free_pool(struct rcu_head *rcu)
|
|||||||
kfree(pool);
|
kfree(pool);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* This returns with the lock held on success (pool manager is inactive). */
|
||||||
|
static bool wq_manager_inactive(struct worker_pool *pool)
|
||||||
|
{
|
||||||
|
raw_spin_lock_irq(&pool->lock);
|
||||||
|
|
||||||
|
if (pool->flags & POOL_MANAGER_ACTIVE) {
|
||||||
|
raw_spin_unlock_irq(&pool->lock);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* put_unbound_pool - put a worker_pool
|
* put_unbound_pool - put a worker_pool
|
||||||
* @pool: worker_pool to put
|
* @pool: worker_pool to put
|
||||||
@ -3539,16 +3551,17 @@ static void put_unbound_pool(struct worker_pool *pool)
|
|||||||
* Become the manager and destroy all workers. This prevents
|
* Become the manager and destroy all workers. This prevents
|
||||||
* @pool's workers from blocking on attach_mutex. We're the last
|
* @pool's workers from blocking on attach_mutex. We're the last
|
||||||
* manager and @pool gets freed with the flag set.
|
* manager and @pool gets freed with the flag set.
|
||||||
|
* Because of how wq_manager_inactive() works, we will hold the
|
||||||
|
* spinlock after a successful wait.
|
||||||
*/
|
*/
|
||||||
spin_lock_irq(&pool->lock);
|
rcuwait_wait_event(&manager_wait, wq_manager_inactive(pool),
|
||||||
wait_event_lock_irq(wq_manager_wait,
|
TASK_UNINTERRUPTIBLE);
|
||||||
!(pool->flags & POOL_MANAGER_ACTIVE), pool->lock);
|
|
||||||
pool->flags |= POOL_MANAGER_ACTIVE;
|
pool->flags |= POOL_MANAGER_ACTIVE;
|
||||||
|
|
||||||
while ((worker = first_idle_worker(pool)))
|
while ((worker = first_idle_worker(pool)))
|
||||||
destroy_worker(worker);
|
destroy_worker(worker);
|
||||||
WARN_ON(pool->nr_workers || pool->nr_idle);
|
WARN_ON(pool->nr_workers || pool->nr_idle);
|
||||||
spin_unlock_irq(&pool->lock);
|
raw_spin_unlock_irq(&pool->lock);
|
||||||
|
|
||||||
mutex_lock(&wq_pool_attach_mutex);
|
mutex_lock(&wq_pool_attach_mutex);
|
||||||
if (!list_empty(&pool->workers))
|
if (!list_empty(&pool->workers))
|
||||||
@ -3704,7 +3717,7 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
/* this function can be called during early boot w/ irq disabled */
|
/* this function can be called during early boot w/ irq disabled */
|
||||||
spin_lock_irqsave(&pwq->pool->lock, flags);
|
raw_spin_lock_irqsave(&pwq->pool->lock, flags);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* During [un]freezing, the caller is responsible for ensuring that
|
* During [un]freezing, the caller is responsible for ensuring that
|
||||||
@ -3727,7 +3740,7 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq)
|
|||||||
pwq->max_active = 0;
|
pwq->max_active = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irqrestore(&pwq->pool->lock, flags);
|
raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* initialize newly alloced @pwq which is associated with @wq and @pool */
|
/* initialize newly alloced @pwq which is associated with @wq and @pool */
|
||||||
@ -4129,9 +4142,9 @@ static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
|
|||||||
|
|
||||||
use_dfl_pwq:
|
use_dfl_pwq:
|
||||||
mutex_lock(&wq->mutex);
|
mutex_lock(&wq->mutex);
|
||||||
spin_lock_irq(&wq->dfl_pwq->pool->lock);
|
raw_spin_lock_irq(&wq->dfl_pwq->pool->lock);
|
||||||
get_pwq(wq->dfl_pwq);
|
get_pwq(wq->dfl_pwq);
|
||||||
spin_unlock_irq(&wq->dfl_pwq->pool->lock);
|
raw_spin_unlock_irq(&wq->dfl_pwq->pool->lock);
|
||||||
old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq);
|
old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq);
|
||||||
out_unlock:
|
out_unlock:
|
||||||
mutex_unlock(&wq->mutex);
|
mutex_unlock(&wq->mutex);
|
||||||
@ -4208,8 +4221,8 @@ static int init_rescuer(struct workqueue_struct *wq)
|
|||||||
|
|
||||||
rescuer->rescue_wq = wq;
|
rescuer->rescue_wq = wq;
|
||||||
rescuer->task = kthread_create(rescuer_thread, rescuer, "%s", wq->name);
|
rescuer->task = kthread_create(rescuer_thread, rescuer, "%s", wq->name);
|
||||||
ret = PTR_ERR_OR_ZERO(rescuer->task);
|
if (IS_ERR(rescuer->task)) {
|
||||||
if (ret) {
|
ret = PTR_ERR(rescuer->task);
|
||||||
kfree(rescuer);
|
kfree(rescuer);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -4360,9 +4373,9 @@ void destroy_workqueue(struct workqueue_struct *wq)
|
|||||||
struct worker *rescuer = wq->rescuer;
|
struct worker *rescuer = wq->rescuer;
|
||||||
|
|
||||||
/* this prevents new queueing */
|
/* this prevents new queueing */
|
||||||
spin_lock_irq(&wq_mayday_lock);
|
raw_spin_lock_irq(&wq_mayday_lock);
|
||||||
wq->rescuer = NULL;
|
wq->rescuer = NULL;
|
||||||
spin_unlock_irq(&wq_mayday_lock);
|
raw_spin_unlock_irq(&wq_mayday_lock);
|
||||||
|
|
||||||
/* rescuer will empty maydays list before exiting */
|
/* rescuer will empty maydays list before exiting */
|
||||||
kthread_stop(rescuer->task);
|
kthread_stop(rescuer->task);
|
||||||
@ -4376,27 +4389,25 @@ void destroy_workqueue(struct workqueue_struct *wq)
|
|||||||
mutex_lock(&wq_pool_mutex);
|
mutex_lock(&wq_pool_mutex);
|
||||||
mutex_lock(&wq->mutex);
|
mutex_lock(&wq->mutex);
|
||||||
for_each_pwq(pwq, wq) {
|
for_each_pwq(pwq, wq) {
|
||||||
spin_lock_irq(&pwq->pool->lock);
|
raw_spin_lock_irq(&pwq->pool->lock);
|
||||||
if (WARN_ON(pwq_busy(pwq))) {
|
if (WARN_ON(pwq_busy(pwq))) {
|
||||||
pr_warn("%s: %s has the following busy pwq\n",
|
pr_warn("%s: %s has the following busy pwq\n",
|
||||||
__func__, wq->name);
|
__func__, wq->name);
|
||||||
show_pwq(pwq);
|
show_pwq(pwq);
|
||||||
spin_unlock_irq(&pwq->pool->lock);
|
raw_spin_unlock_irq(&pwq->pool->lock);
|
||||||
mutex_unlock(&wq->mutex);
|
mutex_unlock(&wq->mutex);
|
||||||
mutex_unlock(&wq_pool_mutex);
|
mutex_unlock(&wq_pool_mutex);
|
||||||
show_workqueue_state();
|
show_workqueue_state();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
spin_unlock_irq(&pwq->pool->lock);
|
raw_spin_unlock_irq(&pwq->pool->lock);
|
||||||
}
|
}
|
||||||
mutex_unlock(&wq->mutex);
|
mutex_unlock(&wq->mutex);
|
||||||
mutex_unlock(&wq_pool_mutex);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* wq list is used to freeze wq, remove from list after
|
* wq list is used to freeze wq, remove from list after
|
||||||
* flushing is complete in case freeze races us.
|
* flushing is complete in case freeze races us.
|
||||||
*/
|
*/
|
||||||
mutex_lock(&wq_pool_mutex);
|
|
||||||
list_del_rcu(&wq->list);
|
list_del_rcu(&wq->list);
|
||||||
mutex_unlock(&wq_pool_mutex);
|
mutex_unlock(&wq_pool_mutex);
|
||||||
|
|
||||||
@ -4558,10 +4569,10 @@ unsigned int work_busy(struct work_struct *work)
|
|||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
pool = get_work_pool(work);
|
pool = get_work_pool(work);
|
||||||
if (pool) {
|
if (pool) {
|
||||||
spin_lock_irqsave(&pool->lock, flags);
|
raw_spin_lock_irqsave(&pool->lock, flags);
|
||||||
if (find_worker_executing_work(pool, work))
|
if (find_worker_executing_work(pool, work))
|
||||||
ret |= WORK_BUSY_RUNNING;
|
ret |= WORK_BUSY_RUNNING;
|
||||||
spin_unlock_irqrestore(&pool->lock, flags);
|
raw_spin_unlock_irqrestore(&pool->lock, flags);
|
||||||
}
|
}
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
@ -4768,10 +4779,10 @@ void show_workqueue_state(void)
|
|||||||
pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
|
pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
|
||||||
|
|
||||||
for_each_pwq(pwq, wq) {
|
for_each_pwq(pwq, wq) {
|
||||||
spin_lock_irqsave(&pwq->pool->lock, flags);
|
raw_spin_lock_irqsave(&pwq->pool->lock, flags);
|
||||||
if (pwq->nr_active || !list_empty(&pwq->delayed_works))
|
if (pwq->nr_active || !list_empty(&pwq->delayed_works))
|
||||||
show_pwq(pwq);
|
show_pwq(pwq);
|
||||||
spin_unlock_irqrestore(&pwq->pool->lock, flags);
|
raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
|
||||||
/*
|
/*
|
||||||
* We could be printing a lot from atomic context, e.g.
|
* We could be printing a lot from atomic context, e.g.
|
||||||
* sysrq-t -> show_workqueue_state(). Avoid triggering
|
* sysrq-t -> show_workqueue_state(). Avoid triggering
|
||||||
@ -4785,7 +4796,7 @@ void show_workqueue_state(void)
|
|||||||
struct worker *worker;
|
struct worker *worker;
|
||||||
bool first = true;
|
bool first = true;
|
||||||
|
|
||||||
spin_lock_irqsave(&pool->lock, flags);
|
raw_spin_lock_irqsave(&pool->lock, flags);
|
||||||
if (pool->nr_workers == pool->nr_idle)
|
if (pool->nr_workers == pool->nr_idle)
|
||||||
goto next_pool;
|
goto next_pool;
|
||||||
|
|
||||||
@ -4804,7 +4815,7 @@ void show_workqueue_state(void)
|
|||||||
}
|
}
|
||||||
pr_cont("\n");
|
pr_cont("\n");
|
||||||
next_pool:
|
next_pool:
|
||||||
spin_unlock_irqrestore(&pool->lock, flags);
|
raw_spin_unlock_irqrestore(&pool->lock, flags);
|
||||||
/*
|
/*
|
||||||
* We could be printing a lot from atomic context, e.g.
|
* We could be printing a lot from atomic context, e.g.
|
||||||
* sysrq-t -> show_workqueue_state(). Avoid triggering
|
* sysrq-t -> show_workqueue_state(). Avoid triggering
|
||||||
@ -4834,7 +4845,7 @@ void wq_worker_comm(char *buf, size_t size, struct task_struct *task)
|
|||||||
struct worker_pool *pool = worker->pool;
|
struct worker_pool *pool = worker->pool;
|
||||||
|
|
||||||
if (pool) {
|
if (pool) {
|
||||||
spin_lock_irq(&pool->lock);
|
raw_spin_lock_irq(&pool->lock);
|
||||||
/*
|
/*
|
||||||
* ->desc tracks information (wq name or
|
* ->desc tracks information (wq name or
|
||||||
* set_worker_desc()) for the latest execution. If
|
* set_worker_desc()) for the latest execution. If
|
||||||
@ -4848,7 +4859,7 @@ void wq_worker_comm(char *buf, size_t size, struct task_struct *task)
|
|||||||
scnprintf(buf + off, size - off, "-%s",
|
scnprintf(buf + off, size - off, "-%s",
|
||||||
worker->desc);
|
worker->desc);
|
||||||
}
|
}
|
||||||
spin_unlock_irq(&pool->lock);
|
raw_spin_unlock_irq(&pool->lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4879,7 +4890,7 @@ static void unbind_workers(int cpu)
|
|||||||
|
|
||||||
for_each_cpu_worker_pool(pool, cpu) {
|
for_each_cpu_worker_pool(pool, cpu) {
|
||||||
mutex_lock(&wq_pool_attach_mutex);
|
mutex_lock(&wq_pool_attach_mutex);
|
||||||
spin_lock_irq(&pool->lock);
|
raw_spin_lock_irq(&pool->lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We've blocked all attach/detach operations. Make all workers
|
* We've blocked all attach/detach operations. Make all workers
|
||||||
@ -4893,7 +4904,7 @@ static void unbind_workers(int cpu)
|
|||||||
|
|
||||||
pool->flags |= POOL_DISASSOCIATED;
|
pool->flags |= POOL_DISASSOCIATED;
|
||||||
|
|
||||||
spin_unlock_irq(&pool->lock);
|
raw_spin_unlock_irq(&pool->lock);
|
||||||
mutex_unlock(&wq_pool_attach_mutex);
|
mutex_unlock(&wq_pool_attach_mutex);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -4919,9 +4930,9 @@ static void unbind_workers(int cpu)
|
|||||||
* worker blocking could lead to lengthy stalls. Kick off
|
* worker blocking could lead to lengthy stalls. Kick off
|
||||||
* unbound chain execution of currently pending work items.
|
* unbound chain execution of currently pending work items.
|
||||||
*/
|
*/
|
||||||
spin_lock_irq(&pool->lock);
|
raw_spin_lock_irq(&pool->lock);
|
||||||
wake_up_worker(pool);
|
wake_up_worker(pool);
|
||||||
spin_unlock_irq(&pool->lock);
|
raw_spin_unlock_irq(&pool->lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4948,7 +4959,7 @@ static void rebind_workers(struct worker_pool *pool)
|
|||||||
WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
|
WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
|
||||||
pool->attrs->cpumask) < 0);
|
pool->attrs->cpumask) < 0);
|
||||||
|
|
||||||
spin_lock_irq(&pool->lock);
|
raw_spin_lock_irq(&pool->lock);
|
||||||
|
|
||||||
pool->flags &= ~POOL_DISASSOCIATED;
|
pool->flags &= ~POOL_DISASSOCIATED;
|
||||||
|
|
||||||
@ -4987,7 +4998,7 @@ static void rebind_workers(struct worker_pool *pool)
|
|||||||
WRITE_ONCE(worker->flags, worker_flags);
|
WRITE_ONCE(worker->flags, worker_flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irq(&pool->lock);
|
raw_spin_unlock_irq(&pool->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -5906,7 +5917,7 @@ void __init workqueue_init_early(void)
|
|||||||
int hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ;
|
int hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ;
|
||||||
int i, cpu;
|
int i, cpu;
|
||||||
|
|
||||||
WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
|
BUILD_BUG_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
|
||||||
|
|
||||||
BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
|
BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
|
||||||
cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(hk_flags));
|
cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(hk_flags));
|
||||||
|
Loading…
Reference in New Issue
Block a user