coredump: ensure all coredumping tasks have SIGNAL_GROUP_COREDUMP
task_will_free_mem() is wrong in many ways, and in particular the SIGNAL_GROUP_COREDUMP check is not reliable: a task can participate in the coredumping without SIGNAL_GROUP_COREDUMP bit set. change zap_threads() paths to always set SIGNAL_GROUP_COREDUMP even if other CLONE_VM processes can't react to SIGKILL. Fortunately, at least oom-kill case if fine; it kills all tasks sharing the same mm, so it should also kill the process which actually dumps the core. The change in prepare_signal() is not strictly necessary, it just ensures that the patch does not bring another subtle behavioural change. But it reminds us that this SIGNAL_GROUP_EXIT/COREDUMP case needs more changes. Signed-off-by: Oleg Nesterov <oleg@redhat.com> Cc: David Rientjes <rientjes@google.com> Cc: Kyle Walker <kwalker@redhat.com> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Stanislav Kozina <skozina@redhat.com> Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
9317bb9696
commit
5fa534c987
@ -280,11 +280,13 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm)
|
|||||||
return ispipe;
|
return ispipe;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int zap_process(struct task_struct *start, int exit_code)
|
static int zap_process(struct task_struct *start, int exit_code, int flags)
|
||||||
{
|
{
|
||||||
struct task_struct *t;
|
struct task_struct *t;
|
||||||
int nr = 0;
|
int nr = 0;
|
||||||
|
|
||||||
|
/* ignore all signals except SIGKILL, see prepare_signal() */
|
||||||
|
start->signal->flags = SIGNAL_GROUP_COREDUMP | flags;
|
||||||
start->signal->group_exit_code = exit_code;
|
start->signal->group_exit_code = exit_code;
|
||||||
start->signal->group_stop_count = 0;
|
start->signal->group_stop_count = 0;
|
||||||
|
|
||||||
@ -311,10 +313,8 @@ static int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
|
|||||||
spin_lock_irq(&tsk->sighand->siglock);
|
spin_lock_irq(&tsk->sighand->siglock);
|
||||||
if (!signal_group_exit(tsk->signal)) {
|
if (!signal_group_exit(tsk->signal)) {
|
||||||
mm->core_state = core_state;
|
mm->core_state = core_state;
|
||||||
nr = zap_process(tsk, exit_code);
|
|
||||||
tsk->signal->group_exit_task = tsk;
|
tsk->signal->group_exit_task = tsk;
|
||||||
/* ignore all signals except SIGKILL, see prepare_signal() */
|
nr = zap_process(tsk, exit_code, 0);
|
||||||
tsk->signal->flags = SIGNAL_GROUP_COREDUMP;
|
|
||||||
clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
|
clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
|
||||||
}
|
}
|
||||||
spin_unlock_irq(&tsk->sighand->siglock);
|
spin_unlock_irq(&tsk->sighand->siglock);
|
||||||
@ -365,8 +365,8 @@ static int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
|
|||||||
if (p->mm) {
|
if (p->mm) {
|
||||||
if (unlikely(p->mm == mm)) {
|
if (unlikely(p->mm == mm)) {
|
||||||
lock_task_sighand(p, &flags);
|
lock_task_sighand(p, &flags);
|
||||||
nr += zap_process(p, exit_code);
|
nr += zap_process(p, exit_code,
|
||||||
p->signal->flags = SIGNAL_GROUP_EXIT;
|
SIGNAL_GROUP_EXIT);
|
||||||
unlock_task_sighand(p, &flags);
|
unlock_task_sighand(p, &flags);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
@ -788,7 +788,7 @@ static bool prepare_signal(int sig, struct task_struct *p, bool force)
|
|||||||
sigset_t flush;
|
sigset_t flush;
|
||||||
|
|
||||||
if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
|
if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
|
||||||
if (signal->flags & SIGNAL_GROUP_COREDUMP)
|
if (!(signal->flags & SIGNAL_GROUP_EXIT))
|
||||||
return sig == SIGKILL;
|
return sig == SIGKILL;
|
||||||
/*
|
/*
|
||||||
* The process is in the middle of dying, nothing to do.
|
* The process is in the middle of dying, nothing to do.
|
||||||
|
Loading…
Reference in New Issue
Block a user