softirq: Eliminate unused cond_resched_softirq() macro
The cond_resched_softirq() macro is not used anywhere in mainline, so this commit simplifies the kernel by eliminating it. Suggested-by: Eric Dumazet <edumazet@google.com> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Ingo Molnar <mingo@redhat.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Eric Dumazet <edumazet@google.com> Tested-by: Nicholas Piggin <npiggin@gmail.com>
This commit is contained in:
parent
cee4393989
commit
c3442697c2
@ -1613,7 +1613,6 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
|
|||||||
* explicit rescheduling in places that are safe. The return
|
* explicit rescheduling in places that are safe. The return
|
||||||
* value indicates whether a reschedule was done in fact.
|
* value indicates whether a reschedule was done in fact.
|
||||||
* cond_resched_lock() will drop the spinlock before scheduling,
|
* cond_resched_lock() will drop the spinlock before scheduling,
|
||||||
* cond_resched_softirq() will enable bhs before scheduling.
|
|
||||||
*/
|
*/
|
||||||
#ifndef CONFIG_PREEMPT
|
#ifndef CONFIG_PREEMPT
|
||||||
extern int _cond_resched(void);
|
extern int _cond_resched(void);
|
||||||
@ -1633,13 +1632,6 @@ extern int __cond_resched_lock(spinlock_t *lock);
|
|||||||
__cond_resched_lock(lock); \
|
__cond_resched_lock(lock); \
|
||||||
})
|
})
|
||||||
|
|
||||||
extern int __cond_resched_softirq(void);
|
|
||||||
|
|
||||||
#define cond_resched_softirq() ({ \
|
|
||||||
___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
|
|
||||||
__cond_resched_softirq(); \
|
|
||||||
})
|
|
||||||
|
|
||||||
static inline void cond_resched_rcu(void)
|
static inline void cond_resched_rcu(void)
|
||||||
{
|
{
|
||||||
#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
|
#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
|
||||||
|
@ -5012,20 +5012,6 @@ int __cond_resched_lock(spinlock_t *lock)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__cond_resched_lock);
|
EXPORT_SYMBOL(__cond_resched_lock);
|
||||||
|
|
||||||
int __sched __cond_resched_softirq(void)
|
|
||||||
{
|
|
||||||
BUG_ON(!in_softirq());
|
|
||||||
|
|
||||||
if (should_resched(SOFTIRQ_DISABLE_OFFSET)) {
|
|
||||||
local_bh_enable();
|
|
||||||
preempt_schedule_common();
|
|
||||||
local_bh_disable();
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(__cond_resched_softirq);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* yield - yield the current processor to other threads.
|
* yield - yield the current processor to other threads.
|
||||||
*
|
*
|
||||||
|
@ -145,8 +145,7 @@ static void __local_bh_enable(unsigned int cnt)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Special-case - softirqs can safely be enabled in
|
* Special-case - softirqs can safely be enabled by __do_softirq(),
|
||||||
* cond_resched_softirq(), or by __do_softirq(),
|
|
||||||
* without processing still-pending softirqs:
|
* without processing still-pending softirqs:
|
||||||
*/
|
*/
|
||||||
void _local_bh_enable(void)
|
void _local_bh_enable(void)
|
||||||
|
Loading…
Reference in New Issue
Block a user