Merge 5.10.162 into android12-5.10-lts
Changes in 5.10.162 kernel: provide create_io_thread() helper iov_iter: add helper to save iov_iter state saner calling conventions for unlazy_child() fs: add support for LOOKUP_CACHED fix handling of nd->depth on LOOKUP_CACHED failures in try_to_unlazy* Make sure nd->path.mnt and nd->path.dentry are always valid pointers fs: expose LOOKUP_CACHED through openat2() RESOLVE_CACHED tools headers UAPI: Sync openat2.h with the kernel sources net: provide __sys_shutdown_sock() that takes a socket net: add accept helper not installing fd signal: Add task_sigpending() helper fs: make do_renameat2() take struct filename file: Rename __close_fd_get_file close_fd_get_file fs: provide locked helper variant of close_fd_get_file() entry: Add support for TIF_NOTIFY_SIGNAL task_work: Use TIF_NOTIFY_SIGNAL if available x86: Wire up TIF_NOTIFY_SIGNAL arc: add support for TIF_NOTIFY_SIGNAL arm64: add support for TIF_NOTIFY_SIGNAL m68k: add support for TIF_NOTIFY_SIGNAL nios32: add support for TIF_NOTIFY_SIGNAL parisc: add support for TIF_NOTIFY_SIGNAL powerpc: add support for TIF_NOTIFY_SIGNAL mips: add support for TIF_NOTIFY_SIGNAL s390: add support for TIF_NOTIFY_SIGNAL um: add support for TIF_NOTIFY_SIGNAL sh: add support for TIF_NOTIFY_SIGNAL openrisc: add support for TIF_NOTIFY_SIGNAL csky: add support for TIF_NOTIFY_SIGNAL hexagon: add support for TIF_NOTIFY_SIGNAL microblaze: add support for TIF_NOTIFY_SIGNAL arm: add support for TIF_NOTIFY_SIGNAL xtensa: add support for TIF_NOTIFY_SIGNAL alpha: add support for TIF_NOTIFY_SIGNAL c6x: add support for TIF_NOTIFY_SIGNAL h8300: add support for TIF_NOTIFY_SIGNAL ia64: add support for TIF_NOTIFY_SIGNAL nds32: add support for TIF_NOTIFY_SIGNAL riscv: add support for TIF_NOTIFY_SIGNAL sparc: add support for TIF_NOTIFY_SIGNAL ia64: don't call handle_signal() unless there's actually a signal queued ARC: unbork 5.11 bootup: fix snafu in _TIF_NOTIFY_SIGNAL handling alpha: fix TIF_NOTIFY_SIGNAL handling task_work: remove legacy TWA_SIGNAL path kernel: remove checking for TIF_NOTIFY_SIGNAL coredump: Limit what can interrupt coredumps kernel: allow fork with TIF_NOTIFY_SIGNAL pending entry/kvm: Exit to user mode when TIF_NOTIFY_SIGNAL is set arch: setup PF_IO_WORKER threads like PF_KTHREAD arch: ensure parisc/powerpc handle PF_IO_WORKER in copy_thread() x86/process: setup io_threads more like normal user space threads kernel: stop masking signals in create_io_thread() kernel: don't call do_exit() for PF_IO_WORKER threads task_work: add helper for more targeted task_work canceling io_uring: import 5.15-stable io_uring signal: kill JOBCTL_TASK_WORK task_work: unconditionally run task_work from get_signal() net: remove cmsg restriction from io_uring based send/recvmsg calls Revert "proc: don't allow async path resolution of /proc/thread-self components" Revert "proc: don't allow async path resolution of /proc/self components" eventpoll: add EPOLL_URING_WAKE poll wakeup flag eventfd: provide a eventfd_signal_mask() helper io_uring: pass in EPOLL_URING_WAKE for eventfd signaling and wakeups Linux 5.10.162 Change-Id: I50a7b8bc8d38fac612113281b218cf5323b0af5e Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
8596b99884
4
Makefile
4
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 161
|
||||
SUBLEVEL = 162
|
||||
EXTRAVERSION =
|
||||
NAME = Dare mighty things
|
||||
|
||||
@ -1229,7 +1229,7 @@ endif
|
||||
$(Q)$(MAKE) $(hdr-inst)=$(hdr-prefix)arch/$(SRCARCH)/include/uapi
|
||||
|
||||
ifeq ($(KBUILD_EXTMOD),)
|
||||
core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/
|
||||
core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/ io_uring/
|
||||
|
||||
vmlinux-dirs := $(patsubst %/,%,$(filter %/, \
|
||||
$(core-y) $(core-m) $(drivers-y) $(drivers-m) \
|
||||
|
@ -62,6 +62,7 @@ register struct thread_info *__current_thread_info __asm__("$8");
|
||||
#define TIF_SIGPENDING 2 /* signal pending */
|
||||
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
|
||||
#define TIF_SYSCALL_AUDIT 4 /* syscall audit active */
|
||||
#define TIF_NOTIFY_SIGNAL 5 /* signal notifications exist */
|
||||
#define TIF_DIE_IF_KERNEL 9 /* dik recursion lock */
|
||||
#define TIF_MEMDIE 13 /* is terminating due to OOM killer */
|
||||
#define TIF_POLLING_NRFLAG 14 /* idle is polling for TIF_NEED_RESCHED */
|
||||
@ -71,11 +72,12 @@ register struct thread_info *__current_thread_info __asm__("$8");
|
||||
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
|
||||
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
|
||||
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
|
||||
#define _TIF_NOTIFY_SIGNAL (1<<TIF_NOTIFY_SIGNAL)
|
||||
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
||||
|
||||
/* Work to do on interrupt/exception return. */
|
||||
#define _TIF_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
|
||||
_TIF_NOTIFY_RESUME)
|
||||
_TIF_NOTIFY_RESUME | _TIF_NOTIFY_SIGNAL)
|
||||
|
||||
/* Work to do on any return to userspace. */
|
||||
#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK \
|
||||
|
@ -544,7 +544,7 @@ $ret_success:
|
||||
.align 4
|
||||
.type work_pending, @function
|
||||
work_pending:
|
||||
and $17, _TIF_NOTIFY_RESUME | _TIF_SIGPENDING, $2
|
||||
and $17, _TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL, $2
|
||||
bne $2, $work_notifysig
|
||||
|
||||
$work_resched:
|
||||
|
@ -249,7 +249,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
|
||||
childti->pcb.ksp = (unsigned long) childstack;
|
||||
childti->pcb.flags = 1; /* set FEN, clear everything else */
|
||||
|
||||
if (unlikely(p->flags & PF_KTHREAD)) {
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
/* kernel thread */
|
||||
memset(childstack, 0,
|
||||
sizeof(struct switch_stack) + sizeof(struct pt_regs));
|
||||
|
@ -527,7 +527,7 @@ do_work_pending(struct pt_regs *regs, unsigned long thread_flags,
|
||||
schedule();
|
||||
} else {
|
||||
local_irq_enable();
|
||||
if (thread_flags & _TIF_SIGPENDING) {
|
||||
if (thread_flags & (_TIF_SIGPENDING|_TIF_NOTIFY_SIGNAL)) {
|
||||
do_signal(regs, r0, r19);
|
||||
r0 = 0;
|
||||
} else {
|
||||
|
@ -79,6 +79,7 @@ static inline __attribute_const__ struct thread_info *current_thread_info(void)
|
||||
#define TIF_SIGPENDING 2 /* signal pending */
|
||||
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
|
||||
#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */
|
||||
#define TIF_NOTIFY_SIGNAL 5 /* signal notifications exist */
|
||||
#define TIF_SYSCALL_TRACE 15 /* syscall trace active */
|
||||
|
||||
/* true if poll_idle() is polling TIF_NEED_RESCHED */
|
||||
@ -89,11 +90,12 @@ static inline __attribute_const__ struct thread_info *current_thread_info(void)
|
||||
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
|
||||
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
|
||||
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
|
||||
#define _TIF_NOTIFY_SIGNAL (1<<TIF_NOTIFY_SIGNAL)
|
||||
#define _TIF_MEMDIE (1<<TIF_MEMDIE)
|
||||
|
||||
/* work to do on interrupt/exception return */
|
||||
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
|
||||
_TIF_NOTIFY_RESUME)
|
||||
_TIF_NOTIFY_RESUME | _TIF_NOTIFY_SIGNAL)
|
||||
|
||||
/*
|
||||
* _TIF_ALLWORK_MASK includes SYSCALL_TRACE, but we don't need it.
|
||||
|
@ -308,7 +308,8 @@ resume_user_mode_begin:
|
||||
mov r0, sp ; pt_regs for arg to do_signal()/do_notify_resume()
|
||||
|
||||
GET_CURR_THR_INFO_FLAGS r9
|
||||
bbit0 r9, TIF_SIGPENDING, .Lchk_notify_resume
|
||||
and.f 0, r9, _TIF_SIGPENDING|_TIF_NOTIFY_SIGNAL
|
||||
bz .Lchk_notify_resume
|
||||
|
||||
; Normal Trap/IRQ entry only saves Scratch (caller-saved) regs
|
||||
; in pt_reg since the "C" ABI (kernel code) will automatically
|
||||
|
@ -191,7 +191,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
|
||||
childksp[0] = 0; /* fp */
|
||||
childksp[1] = (unsigned long)ret_from_fork; /* blink */
|
||||
|
||||
if (unlikely(p->flags & PF_KTHREAD)) {
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
memset(c_regs, 0, sizeof(struct pt_regs));
|
||||
|
||||
c_callee->r13 = kthread_arg;
|
||||
|
@ -405,7 +405,7 @@ void do_signal(struct pt_regs *regs)
|
||||
|
||||
restart_scall = in_syscall(regs) && syscall_restartable(regs);
|
||||
|
||||
if (get_signal(&ksig)) {
|
||||
if (test_thread_flag(TIF_SIGPENDING) && get_signal(&ksig)) {
|
||||
if (restart_scall) {
|
||||
arc_restart_syscall(&ksig.ka, regs);
|
||||
syscall_wont_restart(regs); /* No more restarts */
|
||||
|
@ -126,6 +126,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp *,
|
||||
* thread information flags:
|
||||
* TIF_USEDFPU - FPU was used by this task this quantum (SMP)
|
||||
* TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED
|
||||
*
|
||||
* Any bit in the range of 0..15 will cause do_work_pending() to be invoked.
|
||||
*/
|
||||
#define TIF_SIGPENDING 0 /* signal pending */
|
||||
#define TIF_NEED_RESCHED 1 /* rescheduling necessary */
|
||||
@ -135,6 +137,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp *,
|
||||
#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
|
||||
#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
|
||||
#define TIF_SECCOMP 7 /* seccomp syscall filtering active */
|
||||
#define TIF_NOTIFY_SIGNAL 8 /* signal notifications exist */
|
||||
|
||||
#define TIF_USING_IWMMXT 17
|
||||
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
|
||||
@ -148,6 +151,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp *,
|
||||
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
|
||||
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
|
||||
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
|
||||
#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
|
||||
#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
|
||||
|
||||
/* Checks for any syscall work in entry-common.S */
|
||||
@ -158,7 +162,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp *,
|
||||
* Change these and you break ASM code in entry-common.S
|
||||
*/
|
||||
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
|
||||
_TIF_NOTIFY_RESUME | _TIF_UPROBE)
|
||||
_TIF_NOTIFY_RESUME | _TIF_UPROBE | \
|
||||
_TIF_NOTIFY_SIGNAL)
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* __ASM_ARM_THREAD_INFO_H */
|
||||
|
@ -53,7 +53,7 @@ __ret_fast_syscall:
|
||||
cmp r2, #TASK_SIZE
|
||||
blne addr_limit_check_failed
|
||||
ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
|
||||
tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
|
||||
movs r1, r1, lsl #16
|
||||
bne fast_work_pending
|
||||
|
||||
|
||||
@ -90,7 +90,7 @@ __ret_fast_syscall:
|
||||
cmp r2, #TASK_SIZE
|
||||
blne addr_limit_check_failed
|
||||
ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
|
||||
tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
|
||||
movs r1, r1, lsl #16
|
||||
beq no_work_pending
|
||||
UNWIND(.fnend )
|
||||
ENDPROC(ret_fast_syscall)
|
||||
@ -131,7 +131,7 @@ ENTRY(ret_to_user_from_irq)
|
||||
cmp r2, #TASK_SIZE
|
||||
blne addr_limit_check_failed
|
||||
ldr r1, [tsk, #TI_FLAGS]
|
||||
tst r1, #_TIF_WORK_MASK
|
||||
movs r1, r1, lsl #16
|
||||
bne slow_work_pending
|
||||
no_work_pending:
|
||||
asm_trace_hardirqs_on save = 0
|
||||
|
@ -59,7 +59,7 @@ __irq_entry:
|
||||
|
||||
get_thread_info tsk
|
||||
ldr r2, [tsk, #TI_FLAGS]
|
||||
tst r2, #_TIF_WORK_MASK
|
||||
movs r2, r2, lsl #16
|
||||
beq 2f @ no work pending
|
||||
mov r0, #V7M_SCB_ICSR_PENDSVSET
|
||||
str r0, [r1, V7M_SCB_ICSR] @ raise PendSV
|
||||
|
@ -243,7 +243,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
|
||||
thread->cpu_domain = get_domain();
|
||||
#endif
|
||||
|
||||
if (likely(!(p->flags & PF_KTHREAD))) {
|
||||
if (likely(!(p->flags & (PF_KTHREAD | PF_IO_WORKER)))) {
|
||||
*childregs = *current_pt_regs();
|
||||
childregs->ARM_r0 = 0;
|
||||
if (stack_start)
|
||||
|
@ -655,7 +655,7 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
|
||||
if (unlikely(!user_mode(regs)))
|
||||
return 0;
|
||||
local_irq_enable();
|
||||
if (thread_flags & _TIF_SIGPENDING) {
|
||||
if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) {
|
||||
int restart = do_signal(regs, syscall);
|
||||
if (unlikely(restart)) {
|
||||
/*
|
||||
|
@ -68,6 +68,7 @@ void arch_release_task_struct(struct task_struct *tsk);
|
||||
#define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */
|
||||
#define TIF_FSCHECK 5 /* Check FS is USER_DS on return */
|
||||
#define TIF_MTE_ASYNC_FAULT 6 /* MTE Asynchronous Tag Check Fault */
|
||||
#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */
|
||||
#define TIF_SYSCALL_TRACE 8 /* syscall trace active */
|
||||
#define TIF_SYSCALL_AUDIT 9 /* syscall auditing */
|
||||
#define TIF_SYSCALL_TRACEPOINT 10 /* syscall tracepoint for ftrace */
|
||||
@ -98,10 +99,12 @@ void arch_release_task_struct(struct task_struct *tsk);
|
||||
#define _TIF_32BIT (1 << TIF_32BIT)
|
||||
#define _TIF_SVE (1 << TIF_SVE)
|
||||
#define _TIF_MTE_ASYNC_FAULT (1 << TIF_MTE_ASYNC_FAULT)
|
||||
#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
|
||||
|
||||
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
|
||||
_TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
|
||||
_TIF_UPROBE | _TIF_FSCHECK | _TIF_MTE_ASYNC_FAULT)
|
||||
_TIF_UPROBE | _TIF_FSCHECK | _TIF_MTE_ASYNC_FAULT | \
|
||||
_TIF_NOTIFY_SIGNAL)
|
||||
|
||||
#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
|
||||
_TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
|
||||
|
@ -394,7 +394,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
|
||||
|
||||
ptrauth_thread_init_kernel(p);
|
||||
|
||||
if (likely(!(p->flags & PF_KTHREAD))) {
|
||||
if (likely(!(p->flags & (PF_KTHREAD | PF_IO_WORKER)))) {
|
||||
*childregs = *current_pt_regs();
|
||||
childregs->regs[0] = 0;
|
||||
|
||||
|
@ -938,7 +938,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
|
||||
(void __user *)NULL, current);
|
||||
}
|
||||
|
||||
if (thread_flags & _TIF_SIGPENDING)
|
||||
if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
|
||||
do_signal(regs);
|
||||
|
||||
if (thread_flags & _TIF_NOTIFY_RESUME) {
|
||||
|
@ -82,6 +82,7 @@ struct thread_info *current_thread_info(void)
|
||||
#define TIF_SIGPENDING 2 /* signal pending */
|
||||
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
|
||||
#define TIF_RESTORE_SIGMASK 4 /* restore signal mask in do_signal() */
|
||||
#define TIF_NOTIFY_SIGNAL 5 /* signal notifications exist */
|
||||
|
||||
#define TIF_MEMDIE 17 /* OOM killer killed process */
|
||||
|
||||
|
@ -116,6 +116,7 @@ void foo(void)
|
||||
DEFINE(_TIF_NOTIFY_RESUME, (1<<TIF_NOTIFY_RESUME));
|
||||
DEFINE(_TIF_SIGPENDING, (1<<TIF_SIGPENDING));
|
||||
DEFINE(_TIF_NEED_RESCHED, (1<<TIF_NEED_RESCHED));
|
||||
DEFINE(_TIF_NOTIFY_SIGNAL, (1<<TIF_NOTIFY_SIGNAL));
|
||||
|
||||
DEFINE(_TIF_ALLWORK_MASK, TIF_ALLWORK_MASK);
|
||||
DEFINE(_TIF_WORK_MASK, TIF_WORK_MASK);
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/tracehook.h>
|
||||
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/ucontext.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
@ -313,7 +314,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags,
|
||||
int syscall)
|
||||
{
|
||||
/* deal with pending signal delivery */
|
||||
if (thread_info_flags & (1 << TIF_SIGPENDING))
|
||||
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
|
||||
do_signal(regs, syscall);
|
||||
|
||||
if (thread_info_flags & (1 << TIF_NOTIFY_RESUME))
|
||||
|
@ -64,6 +64,7 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define TIF_SYSCALL_TRACE 4 /* syscall trace active */
|
||||
#define TIF_SYSCALL_TRACEPOINT 5 /* syscall tracepoint instrumentation */
|
||||
#define TIF_SYSCALL_AUDIT 6 /* syscall auditing */
|
||||
#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */
|
||||
#define TIF_POLLING_NRFLAG 16 /* poll_idle() is TIF_NEED_RESCHED */
|
||||
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
|
||||
#define TIF_RESTORE_SIGMASK 20 /* restore signal mask in do_signal() */
|
||||
@ -75,6 +76,7 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
||||
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
|
||||
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
|
||||
#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
|
||||
#define _TIF_UPROBE (1 << TIF_UPROBE)
|
||||
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
|
||||
#define _TIF_MEMDIE (1 << TIF_MEMDIE)
|
||||
@ -82,7 +84,8 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
|
||||
|
||||
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
|
||||
_TIF_NOTIFY_RESUME | _TIF_UPROBE)
|
||||
_TIF_NOTIFY_RESUME | _TIF_UPROBE | \
|
||||
_TIF_NOTIFY_SIGNAL)
|
||||
|
||||
#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
|
||||
_TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
|
||||
|
@ -49,7 +49,7 @@ int copy_thread(unsigned long clone_flags,
|
||||
/* setup thread.sp for switch_to !!! */
|
||||
p->thread.sp = (unsigned long)childstack;
|
||||
|
||||
if (unlikely(p->flags & PF_KTHREAD)) {
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
memset(childregs, 0, sizeof(struct pt_regs));
|
||||
childstack->r15 = (unsigned long) ret_from_kernel_thread;
|
||||
childstack->r10 = kthread_arg;
|
||||
|
@ -261,7 +261,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
|
||||
uprobe_notify_resume(regs);
|
||||
|
||||
/* Handle pending signal delivery */
|
||||
if (thread_info_flags & _TIF_SIGPENDING)
|
||||
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
|
||||
do_signal(regs);
|
||||
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
|
||||
|
@ -73,6 +73,7 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
|
||||
#define TIF_SYSCALL_TRACEPOINT 8 /* for ftrace syscall instrumentation */
|
||||
#define TIF_POLLING_NRFLAG 9 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
||||
#define TIF_NOTIFY_SIGNAL 10 /* signal notifications exist */
|
||||
|
||||
/* as above, but as bit values */
|
||||
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
||||
@ -83,6 +84,7 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
|
||||
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
|
||||
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
|
||||
#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
|
||||
|
||||
/* work to do in syscall trace */
|
||||
#define _TIF_WORK_SYSCALL_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \
|
||||
@ -92,7 +94,7 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define _TIF_ALLWORK_MASK (_TIF_SYSCALL_TRACE | _TIF_SIGPENDING | \
|
||||
_TIF_NEED_RESCHED | _TIF_SYSCALL_AUDIT | \
|
||||
_TIF_SINGLESTEP | _TIF_NOTIFY_RESUME | \
|
||||
_TIF_SYSCALL_TRACEPOINT)
|
||||
_TIF_SYSCALL_TRACEPOINT | _TIF_NOTIFY_SIGNAL)
|
||||
|
||||
/* work to do on interrupt/exception return */
|
||||
#define _TIF_WORK_MASK (_TIF_ALLWORK_MASK & ~(_TIF_SYSCALL_TRACE | \
|
||||
|
@ -112,7 +112,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
|
||||
|
||||
childregs = (struct pt_regs *) (THREAD_SIZE + task_stack_page(p)) - 1;
|
||||
|
||||
if (unlikely(p->flags & PF_KTHREAD)) {
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
memset(childregs, 0, sizeof(struct pt_regs));
|
||||
childregs->retpc = (unsigned long) ret_from_kernel_thread;
|
||||
childregs->er4 = topstk; /* arg */
|
||||
|
@ -279,7 +279,7 @@ static void do_signal(struct pt_regs *regs)
|
||||
|
||||
asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags)
|
||||
{
|
||||
if (thread_info_flags & _TIF_SIGPENDING)
|
||||
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
|
||||
do_signal(regs);
|
||||
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME)
|
||||
|
@ -95,6 +95,7 @@ register struct thread_info *__current_thread_info asm(QUOTED_THREADINFO_REG);
|
||||
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
|
||||
#define TIF_SINGLESTEP 4 /* restore ss @ return to usr mode */
|
||||
#define TIF_RESTORE_SIGMASK 6 /* restore sig mask in do_signal() */
|
||||
#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */
|
||||
/* true if poll_idle() is polling TIF_NEED_RESCHED */
|
||||
#define TIF_MEMDIE 17 /* OOM killer killed process */
|
||||
|
||||
@ -103,6 +104,7 @@ register struct thread_info *__current_thread_info asm(QUOTED_THREADINFO_REG);
|
||||
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
|
||||
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
|
||||
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
|
||||
#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
|
||||
|
||||
/* work to do on interrupt/exception return - All but TIF_SYSCALL_TRACE */
|
||||
#define _TIF_WORK_MASK (0x0000FFFF & ~_TIF_SYSCALL_TRACE)
|
||||
|
@ -73,7 +73,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
|
||||
sizeof(*ss));
|
||||
ss->lr = (unsigned long)ret_from_fork;
|
||||
p->thread.switch_sp = ss;
|
||||
if (unlikely(p->flags & PF_KTHREAD)) {
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
memset(childregs, 0, sizeof(struct pt_regs));
|
||||
/* r24 <- fn, r25 <- arg */
|
||||
ss->r24 = usp;
|
||||
@ -174,7 +174,7 @@ int do_work_pending(struct pt_regs *regs, u32 thread_info_flags)
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (thread_info_flags & _TIF_SIGPENDING) {
|
||||
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) {
|
||||
do_signal(regs);
|
||||
return 1;
|
||||
}
|
||||
|
@ -103,6 +103,7 @@ struct thread_info {
|
||||
#define TIF_SYSCALL_TRACE 2 /* syscall trace active */
|
||||
#define TIF_SYSCALL_AUDIT 3 /* syscall auditing active */
|
||||
#define TIF_SINGLESTEP 4 /* restore singlestep on return to user mode */
|
||||
#define TIF_NOTIFY_SIGNAL 5 /* signal notification exist */
|
||||
#define TIF_NOTIFY_RESUME 6 /* resumption notification requested */
|
||||
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
|
||||
#define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */
|
||||
@ -115,6 +116,7 @@ struct thread_info {
|
||||
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
|
||||
#define _TIF_SYSCALL_TRACEAUDIT (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP)
|
||||
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
||||
#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
|
||||
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
|
||||
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
|
||||
#define _TIF_MCA_INIT (1 << TIF_MCA_INIT)
|
||||
@ -124,7 +126,7 @@ struct thread_info {
|
||||
|
||||
/* "work to do on user-return" bits */
|
||||
#define TIF_ALLWORK_MASK (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SYSCALL_AUDIT|\
|
||||
_TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE)
|
||||
_TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE|_TIF_NOTIFY_SIGNAL)
|
||||
/* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */
|
||||
#define TIF_WORK_MASK (TIF_ALLWORK_MASK&~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT))
|
||||
|
||||
|
@ -171,7 +171,8 @@ do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall)
|
||||
}
|
||||
|
||||
/* deal with pending signal delivery */
|
||||
if (test_thread_flag(TIF_SIGPENDING)) {
|
||||
if (test_thread_flag(TIF_SIGPENDING) ||
|
||||
test_thread_flag(TIF_NOTIFY_SIGNAL)) {
|
||||
local_irq_enable(); /* force interrupt enable */
|
||||
ia64_do_signal(scr, in_syscall);
|
||||
}
|
||||
@ -337,7 +338,7 @@ copy_thread(unsigned long clone_flags, unsigned long user_stack_base,
|
||||
|
||||
ia64_drop_fpu(p); /* don't pick up stale state from a CPU's fph */
|
||||
|
||||
if (unlikely(p->flags & PF_KTHREAD)) {
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
if (unlikely(!user_stack_base)) {
|
||||
/* fork_idle() called us */
|
||||
return 0;
|
||||
|
@ -341,7 +341,8 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall)
|
||||
* need to push through a forced SIGSEGV.
|
||||
*/
|
||||
while (1) {
|
||||
get_signal(&ksig);
|
||||
if (!get_signal(&ksig))
|
||||
break;
|
||||
|
||||
/*
|
||||
* get_signal() may have run a debugger (via notify_parent())
|
||||
|
@ -60,6 +60,7 @@ static inline struct thread_info *current_thread_info(void)
|
||||
* bits 0-7 are tested at every exception exit
|
||||
* bits 8-15 are also tested at syscall exit
|
||||
*/
|
||||
#define TIF_NOTIFY_SIGNAL 4
|
||||
#define TIF_NOTIFY_RESUME 5 /* callback before returning to user */
|
||||
#define TIF_SIGPENDING 6 /* signal pending */
|
||||
#define TIF_NEED_RESCHED 7 /* rescheduling necessary */
|
||||
|
@ -157,7 +157,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
|
||||
*/
|
||||
p->thread.fs = get_fs().seg;
|
||||
|
||||
if (unlikely(p->flags & PF_KTHREAD)) {
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
/* kernel thread */
|
||||
memset(frame, 0, sizeof(struct fork_frame));
|
||||
frame->regs.sr = PS_S;
|
||||
|
@ -1129,7 +1129,8 @@ static void do_signal(struct pt_regs *regs)
|
||||
|
||||
void do_notify_resume(struct pt_regs *regs)
|
||||
{
|
||||
if (test_thread_flag(TIF_SIGPENDING))
|
||||
if (test_thread_flag(TIF_NOTIFY_SIGNAL) ||
|
||||
test_thread_flag(TIF_SIGPENDING))
|
||||
do_signal(regs);
|
||||
|
||||
if (test_thread_flag(TIF_NOTIFY_RESUME))
|
||||
|
@ -107,6 +107,7 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
|
||||
/* restore singlestep on return to user mode */
|
||||
#define TIF_SINGLESTEP 4
|
||||
#define TIF_NOTIFY_SIGNAL 5 /* signal notifications exist */
|
||||
#define TIF_MEMDIE 6 /* is terminating due to OOM killer */
|
||||
#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */
|
||||
#define TIF_SECCOMP 10 /* secure computing */
|
||||
@ -119,6 +120,7 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
|
||||
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
|
||||
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
|
||||
#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
|
||||
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
|
||||
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
|
||||
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
|
||||
|
@ -59,7 +59,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
|
||||
struct pt_regs *childregs = task_pt_regs(p);
|
||||
struct thread_info *ti = task_thread_info(p);
|
||||
|
||||
if (unlikely(p->flags & PF_KTHREAD)) {
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
/* if we're creating a new kernel thread then just zeroing all
|
||||
* the registers. That's OK for a brand new thread.*/
|
||||
memset(childregs, 0, sizeof(struct pt_regs));
|
||||
|
@ -313,7 +313,8 @@ static void do_signal(struct pt_regs *regs, int in_syscall)
|
||||
|
||||
asmlinkage void do_notify_resume(struct pt_regs *regs, int in_syscall)
|
||||
{
|
||||
if (test_thread_flag(TIF_SIGPENDING))
|
||||
if (test_thread_flag(TIF_SIGPENDING) ||
|
||||
test_thread_flag(TIF_NOTIFY_SIGNAL))
|
||||
do_signal(regs, in_syscall);
|
||||
|
||||
if (test_thread_flag(TIF_NOTIFY_RESUME))
|
||||
|
@ -115,6 +115,7 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define TIF_SECCOMP 4 /* secure computing */
|
||||
#define TIF_NOTIFY_RESUME 5 /* callback before returning to user */
|
||||
#define TIF_UPROBE 6 /* breakpointed or singlestepping */
|
||||
#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */
|
||||
#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
|
||||
#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
|
||||
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
|
||||
@ -139,6 +140,7 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define _TIF_SECCOMP (1<<TIF_SECCOMP)
|
||||
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
|
||||
#define _TIF_UPROBE (1<<TIF_UPROBE)
|
||||
#define _TIF_NOTIFY_SIGNAL (1<<TIF_NOTIFY_SIGNAL)
|
||||
#define _TIF_USEDFPU (1<<TIF_USEDFPU)
|
||||
#define _TIF_NOHZ (1<<TIF_NOHZ)
|
||||
#define _TIF_FIXADE (1<<TIF_FIXADE)
|
||||
@ -164,7 +166,7 @@ static inline struct thread_info *current_thread_info(void)
|
||||
/* work to do on interrupt/exception return */
|
||||
#define _TIF_WORK_MASK \
|
||||
(_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_NOTIFY_RESUME | \
|
||||
_TIF_UPROBE)
|
||||
_TIF_UPROBE | _TIF_NOTIFY_SIGNAL)
|
||||
/* work to do on any return to u-space */
|
||||
#define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | \
|
||||
_TIF_WORK_SYSCALL_EXIT | \
|
||||
|
@ -135,7 +135,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
|
||||
/* Put the stack after the struct pt_regs. */
|
||||
childksp = (unsigned long) childregs;
|
||||
p->thread.cp0_status = (read_c0_status() & ~(ST0_CU2|ST0_CU1)) | ST0_KERNEL_CUMASK;
|
||||
if (unlikely(p->flags & PF_KTHREAD)) {
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
/* kernel thread */
|
||||
unsigned long status = p->thread.cp0_status;
|
||||
memset(childregs, 0, sizeof(struct pt_regs));
|
||||
|
@ -903,7 +903,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
|
||||
uprobe_notify_resume(regs);
|
||||
|
||||
/* deal with pending signal delivery */
|
||||
if (thread_info_flags & _TIF_SIGPENDING)
|
||||
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
|
||||
do_signal(regs);
|
||||
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
|
||||
|
@ -48,6 +48,7 @@ struct thread_info {
|
||||
#define TIF_NEED_RESCHED 2
|
||||
#define TIF_SINGLESTEP 3
|
||||
#define TIF_NOTIFY_RESUME 4 /* callback before returning to user */
|
||||
#define TIF_NOTIFY_SIGNAL 5 /* signal notifications exist */
|
||||
#define TIF_SYSCALL_TRACE 8
|
||||
#define TIF_POLLING_NRFLAG 17
|
||||
#define TIF_MEMDIE 18
|
||||
@ -57,6 +58,7 @@ struct thread_info {
|
||||
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
|
||||
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
|
||||
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
||||
#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
|
||||
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
|
||||
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
||||
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
|
||||
|
@ -120,7 +120,7 @@ work_pending:
|
||||
andi $p1, $r1, #_TIF_NEED_RESCHED
|
||||
bnez $p1, work_resched
|
||||
|
||||
andi $p1, $r1, #_TIF_SIGPENDING|#_TIF_NOTIFY_RESUME
|
||||
andi $p1, $r1, #_TIF_SIGPENDING|#_TIF_NOTIFY_RESUME|#_TIF_NOTIFY_SIGNAL
|
||||
beqz $p1, no_work_pending
|
||||
|
||||
move $r0, $sp ! 'regs'
|
||||
|
@ -156,7 +156,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
|
||||
|
||||
memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
|
||||
|
||||
if (unlikely(p->flags & PF_KTHREAD)) {
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
memset(childregs, 0, sizeof(struct pt_regs));
|
||||
/* kernel thread fn */
|
||||
p->thread.cpu_context.r6 = stack_start;
|
||||
|
@ -376,7 +376,7 @@ static void do_signal(struct pt_regs *regs)
|
||||
asmlinkage void
|
||||
do_notify_resume(struct pt_regs *regs, unsigned int thread_flags)
|
||||
{
|
||||
if (thread_flags & _TIF_SIGPENDING)
|
||||
if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
|
||||
do_signal(regs);
|
||||
|
||||
if (thread_flags & _TIF_NOTIFY_RESUME)
|
||||
|
@ -86,6 +86,7 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define TIF_MEMDIE 4 /* is terminating due to OOM killer */
|
||||
#define TIF_SECCOMP 5 /* secure computing */
|
||||
#define TIF_SYSCALL_AUDIT 6 /* syscall auditing active */
|
||||
#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */
|
||||
#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
|
||||
|
||||
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling
|
||||
@ -97,6 +98,7 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
|
||||
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
|
||||
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
|
||||
#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
|
||||
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
|
||||
|
||||
|
@ -109,7 +109,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
|
||||
struct switch_stack *childstack =
|
||||
((struct switch_stack *)childregs) - 1;
|
||||
|
||||
if (unlikely(p->flags & PF_KTHREAD)) {
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
memset(childstack, 0,
|
||||
sizeof(struct switch_stack) + sizeof(struct pt_regs));
|
||||
|
||||
|
@ -309,7 +309,8 @@ asmlinkage int do_notify_resume(struct pt_regs *regs)
|
||||
if (!user_mode(regs))
|
||||
return 0;
|
||||
|
||||
if (test_thread_flag(TIF_SIGPENDING)) {
|
||||
if (test_thread_flag(TIF_SIGPENDING) ||
|
||||
test_thread_flag(TIF_NOTIFY_SIGNAL)) {
|
||||
int restart = do_signal(regs);
|
||||
|
||||
if (unlikely(restart)) {
|
||||
|
@ -98,6 +98,7 @@ register struct thread_info *current_thread_info_reg asm("r10");
|
||||
#define TIF_SINGLESTEP 4 /* restore singlestep on return to user
|
||||
* mode
|
||||
*/
|
||||
#define TIF_NOTIFY_SIGNAL 5 /* signal notifications exist */
|
||||
#define TIF_SYSCALL_TRACEPOINT 8 /* for ftrace syscall instrumentation */
|
||||
#define TIF_RESTORE_SIGMASK 9
|
||||
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling * TIF_NEED_RESCHED
|
||||
@ -109,6 +110,7 @@ register struct thread_info *current_thread_info_reg asm("r10");
|
||||
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
|
||||
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
|
||||
#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
|
||||
#define _TIF_NOTIFY_SIGNAL (1<<TIF_NOTIFY_SIGNAL)
|
||||
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
||||
|
||||
|
||||
|
@ -167,7 +167,7 @@ copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
|
||||
sp -= sizeof(struct pt_regs);
|
||||
kregs = (struct pt_regs *)sp;
|
||||
|
||||
if (unlikely(p->flags & PF_KTHREAD)) {
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
memset(kregs, 0, sizeof(struct pt_regs));
|
||||
kregs->gpr[20] = usp; /* fn, kernel thread */
|
||||
kregs->gpr[22] = arg;
|
||||
|
@ -299,7 +299,7 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
|
||||
if (unlikely(!user_mode(regs)))
|
||||
return 0;
|
||||
local_irq_enable();
|
||||
if (thread_flags & _TIF_SIGPENDING) {
|
||||
if (thread_flags & (_TIF_SIGPENDING|_TIF_NOTIFY_SIGNAL)) {
|
||||
int restart = do_signal(regs, syscall);
|
||||
if (unlikely(restart)) {
|
||||
/*
|
||||
|
@ -52,6 +52,7 @@ struct thread_info {
|
||||
#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
||||
#define TIF_32BIT 4 /* 32 bit binary */
|
||||
#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
|
||||
#define TIF_NOTIFY_SIGNAL 6 /* signal notifications exist */
|
||||
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
|
||||
#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
|
||||
#define TIF_SINGLESTEP 9 /* single stepping? */
|
||||
@ -61,6 +62,7 @@ struct thread_info {
|
||||
|
||||
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
||||
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
|
||||
#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
|
||||
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
|
||||
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
|
||||
#define _TIF_32BIT (1 << TIF_32BIT)
|
||||
@ -72,7 +74,7 @@ struct thread_info {
|
||||
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
|
||||
|
||||
#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \
|
||||
_TIF_NEED_RESCHED)
|
||||
_TIF_NEED_RESCHED | _TIF_NOTIFY_SIGNAL)
|
||||
#define _TIF_SYSCALL_TRACE_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \
|
||||
_TIF_BLOCKSTEP | _TIF_SYSCALL_AUDIT | \
|
||||
_TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
|
||||
|
@ -200,7 +200,7 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
|
||||
extern void * const ret_from_kernel_thread;
|
||||
extern void * const child_return;
|
||||
|
||||
if (unlikely(p->flags & PF_KTHREAD)) {
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
/* kernel thread */
|
||||
memset(cregs, 0, sizeof(struct pt_regs));
|
||||
if (!usp) /* idle thread */
|
||||
|
@ -609,7 +609,8 @@ do_signal(struct pt_regs *regs, long in_syscall)
|
||||
|
||||
void do_notify_resume(struct pt_regs *regs, long in_syscall)
|
||||
{
|
||||
if (test_thread_flag(TIF_SIGPENDING))
|
||||
if (test_thread_flag(TIF_SIGPENDING) ||
|
||||
test_thread_flag(TIF_NOTIFY_SIGNAL))
|
||||
do_signal(regs, in_syscall);
|
||||
|
||||
if (test_thread_flag(TIF_NOTIFY_RESUME))
|
||||
|
@ -96,6 +96,7 @@ void arch_setup_new_exec(void);
|
||||
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
|
||||
#define TIF_SIGPENDING 1 /* signal pending */
|
||||
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
|
||||
#define TIF_NOTIFY_SIGNAL 3 /* signal notifications exist */
|
||||
#define TIF_SYSCALL_EMU 4 /* syscall emulation active */
|
||||
#define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */
|
||||
#define TIF_PATCH_PENDING 6 /* pending live patching update */
|
||||
@ -121,6 +122,7 @@ void arch_setup_new_exec(void);
|
||||
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
||||
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
|
||||
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
|
||||
#define _TIF_NOTIFY_SIGNAL (1<<TIF_NOTIFY_SIGNAL)
|
||||
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
||||
#define _TIF_32BIT (1<<TIF_32BIT)
|
||||
#define _TIF_RESTORE_TM (1<<TIF_RESTORE_TM)
|
||||
@ -142,7 +144,8 @@ void arch_setup_new_exec(void);
|
||||
|
||||
#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
|
||||
_TIF_NOTIFY_RESUME | _TIF_UPROBE | \
|
||||
_TIF_RESTORE_TM | _TIF_PATCH_PENDING)
|
||||
_TIF_RESTORE_TM | _TIF_PATCH_PENDING | \
|
||||
_TIF_NOTIFY_SIGNAL)
|
||||
#define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR)
|
||||
|
||||
/* Bits in local_flags */
|
||||
|
@ -1684,7 +1684,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
|
||||
/* Copy registers */
|
||||
sp -= sizeof(struct pt_regs);
|
||||
childregs = (struct pt_regs *) sp;
|
||||
if (unlikely(p->flags & PF_KTHREAD)) {
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
/* kernel thread */
|
||||
memset(childregs, 0, sizeof(struct pt_regs));
|
||||
childregs->gpr[1] = sp + sizeof(struct pt_regs);
|
||||
|
@ -318,7 +318,7 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
|
||||
if (thread_info_flags & _TIF_PATCH_PENDING)
|
||||
klp_update_patch_state(current);
|
||||
|
||||
if (thread_info_flags & _TIF_SIGPENDING) {
|
||||
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) {
|
||||
BUG_ON(regs != current->thread.regs);
|
||||
do_signal(current);
|
||||
}
|
||||
|
@ -80,6 +80,7 @@ struct thread_info {
|
||||
#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
|
||||
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing */
|
||||
#define TIF_SECCOMP 8 /* syscall secure computing */
|
||||
#define TIF_NOTIFY_SIGNAL 9 /* signal notifications exist */
|
||||
|
||||
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
||||
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
||||
@ -88,9 +89,11 @@ struct thread_info {
|
||||
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
|
||||
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
|
||||
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
|
||||
#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
|
||||
|
||||
#define _TIF_WORK_MASK \
|
||||
(_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED)
|
||||
(_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED | \
|
||||
_TIF_NOTIFY_SIGNAL)
|
||||
|
||||
#define _TIF_SYSCALL_WORK \
|
||||
(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT | \
|
||||
|
@ -114,7 +114,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
|
||||
memset(&p->thread.s, 0, sizeof(p->thread.s));
|
||||
|
||||
/* p->thread holds context to be restored by __switch_to() */
|
||||
if (unlikely(p->flags & PF_KTHREAD)) {
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
/* Kernel thread */
|
||||
memset(childregs, 0, sizeof(struct pt_regs));
|
||||
childregs->gp = gp_in_global;
|
||||
|
@ -312,7 +312,7 @@ asmlinkage __visible void do_notify_resume(struct pt_regs *regs,
|
||||
unsigned long thread_info_flags)
|
||||
{
|
||||
/* Handle pending signal delivery */
|
||||
if (thread_info_flags & _TIF_SIGPENDING)
|
||||
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
|
||||
do_signal(regs);
|
||||
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME)
|
||||
|
@ -65,6 +65,7 @@ void arch_setup_new_exec(void);
|
||||
#define TIF_GUARDED_STORAGE 4 /* load guarded storage control block */
|
||||
#define TIF_PATCH_PENDING 5 /* pending live patching update */
|
||||
#define TIF_PGSTE 6 /* New mm's will use 4K page tables */
|
||||
#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */
|
||||
#define TIF_ISOLATE_BP 8 /* Run process with isolated BP */
|
||||
#define TIF_ISOLATE_BP_GUEST 9 /* Run KVM guests with isolated BP */
|
||||
|
||||
@ -82,6 +83,7 @@ void arch_setup_new_exec(void);
|
||||
#define TIF_SYSCALL_TRACEPOINT 27 /* syscall tracepoint instrumentation */
|
||||
|
||||
#define _TIF_NOTIFY_RESUME BIT(TIF_NOTIFY_RESUME)
|
||||
#define _TIF_NOTIFY_SIGNAL BIT(TIF_NOTIFY_SIGNAL)
|
||||
#define _TIF_SIGPENDING BIT(TIF_SIGPENDING)
|
||||
#define _TIF_NEED_RESCHED BIT(TIF_NEED_RESCHED)
|
||||
#define _TIF_UPROBE BIT(TIF_UPROBE)
|
||||
|
@ -52,7 +52,8 @@ STACK_SIZE = 1 << STACK_SHIFT
|
||||
STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
|
||||
|
||||
_TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
|
||||
_TIF_UPROBE | _TIF_GUARDED_STORAGE | _TIF_PATCH_PENDING)
|
||||
_TIF_UPROBE | _TIF_GUARDED_STORAGE | _TIF_PATCH_PENDING | \
|
||||
_TIF_NOTIFY_SIGNAL)
|
||||
_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
|
||||
_TIF_SYSCALL_TRACEPOINT)
|
||||
_CIF_WORK = (_CIF_ASCE_PRIMARY | _CIF_ASCE_SECONDARY | _CIF_FPU)
|
||||
@ -481,8 +482,8 @@ ENTRY(system_call)
|
||||
#endif
|
||||
TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL_RESTART
|
||||
jo .Lsysc_syscall_restart
|
||||
TSTMSK __TI_flags(%r12),_TIF_SIGPENDING
|
||||
jo .Lsysc_sigpending
|
||||
TSTMSK __TI_flags(%r12),(_TIF_SIGPENDING|_TIF_NOTIFY_SIGNAL)
|
||||
jnz .Lsysc_sigpending
|
||||
TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME
|
||||
jo .Lsysc_notify_resume
|
||||
TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY)
|
||||
@ -863,8 +864,8 @@ ENTRY(io_int_handler)
|
||||
TSTMSK __TI_flags(%r12),_TIF_PATCH_PENDING
|
||||
jo .Lio_patch_pending
|
||||
#endif
|
||||
TSTMSK __TI_flags(%r12),_TIF_SIGPENDING
|
||||
jo .Lio_sigpending
|
||||
TSTMSK __TI_flags(%r12),(_TIF_SIGPENDING|_TIF_NOTIFY_SIGNAL)
|
||||
jnz .Lio_sigpending
|
||||
TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME
|
||||
jo .Lio_notify_resume
|
||||
TSTMSK __TI_flags(%r12),_TIF_GUARDED_STORAGE
|
||||
|
@ -127,7 +127,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
|
||||
frame->sf.gprs[9] = (unsigned long) frame;
|
||||
|
||||
/* Store access registers to kernel stack of new process. */
|
||||
if (unlikely(p->flags & PF_KTHREAD)) {
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
/* kernel thread */
|
||||
memset(&frame->childregs, 0, sizeof(struct pt_regs));
|
||||
frame->childregs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT |
|
||||
|
@ -472,7 +472,7 @@ void do_signal(struct pt_regs *regs)
|
||||
current->thread.system_call =
|
||||
test_pt_regs_flag(regs, PIF_SYSCALL) ? regs->int_code : 0;
|
||||
|
||||
if (get_signal(&ksig)) {
|
||||
if (test_thread_flag(TIF_SIGPENDING) && get_signal(&ksig)) {
|
||||
/* Whee! Actually deliver the signal. */
|
||||
if (current->thread.system_call) {
|
||||
regs->int_code = current->thread.system_call;
|
||||
|
@ -105,6 +105,7 @@ extern void init_thread_xstate(void);
|
||||
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
|
||||
#define TIF_SIGPENDING 1 /* signal pending */
|
||||
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
|
||||
#define TIF_NOTIFY_SIGNAL 3 /* signal notifications exist */
|
||||
#define TIF_SINGLESTEP 4 /* singlestepping active */
|
||||
#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
|
||||
#define TIF_SECCOMP 6 /* secure computing */
|
||||
@ -116,6 +117,7 @@ extern void init_thread_xstate(void);
|
||||
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
||||
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
|
||||
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
|
||||
#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
|
||||
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
|
||||
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
|
||||
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
|
||||
@ -132,7 +134,7 @@ extern void init_thread_xstate(void);
|
||||
#define _TIF_ALLWORK_MASK (_TIF_SYSCALL_TRACE | _TIF_SIGPENDING | \
|
||||
_TIF_NEED_RESCHED | _TIF_SYSCALL_AUDIT | \
|
||||
_TIF_SINGLESTEP | _TIF_NOTIFY_RESUME | \
|
||||
_TIF_SYSCALL_TRACEPOINT)
|
||||
_TIF_SYSCALL_TRACEPOINT | _TIF_NOTIFY_SIGNAL)
|
||||
|
||||
/* work to do on interrupt/exception return */
|
||||
#define _TIF_WORK_MASK (_TIF_ALLWORK_MASK & ~(_TIF_SYSCALL_TRACE | \
|
||||
|
@ -114,7 +114,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
|
||||
|
||||
childregs = task_pt_regs(p);
|
||||
p->thread.sp = (unsigned long) childregs;
|
||||
if (unlikely(p->flags & PF_KTHREAD)) {
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
memset(childregs, 0, sizeof(struct pt_regs));
|
||||
p->thread.pc = (unsigned long) ret_from_kernel_thread;
|
||||
childregs->regs[4] = arg;
|
||||
|
@ -499,7 +499,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned int save_r0,
|
||||
unsigned long thread_info_flags)
|
||||
{
|
||||
/* deal with pending signal delivery */
|
||||
if (thread_info_flags & _TIF_SIGPENDING)
|
||||
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
|
||||
do_signal(regs, save_r0);
|
||||
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME)
|
||||
|
@ -104,6 +104,7 @@ register struct thread_info *current_thread_info_reg asm("g6");
|
||||
#define TIF_SIGPENDING 2 /* signal pending */
|
||||
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
|
||||
#define TIF_RESTORE_SIGMASK 4 /* restore signal mask in do_signal() */
|
||||
#define TIF_NOTIFY_SIGNAL 5 /* signal notifications exist */
|
||||
#define TIF_USEDFPU 8 /* FPU was used by this task
|
||||
* this quantum (SMP) */
|
||||
#define TIF_POLLING_NRFLAG 9 /* true if poll_idle() is polling
|
||||
@ -115,11 +116,12 @@ register struct thread_info *current_thread_info_reg asm("g6");
|
||||
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
|
||||
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
|
||||
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
|
||||
#define _TIF_NOTIFY_SIGNAL (1<<TIF_NOTIFY_SIGNAL)
|
||||
#define _TIF_USEDFPU (1<<TIF_USEDFPU)
|
||||
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
||||
|
||||
#define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | \
|
||||
_TIF_SIGPENDING)
|
||||
_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)
|
||||
|
||||
#define is_32bit_task() (1)
|
||||
|
||||
|
@ -180,7 +180,7 @@ extern struct thread_info *current_thread_info(void);
|
||||
#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
|
||||
#define TIF_SIGPENDING 2 /* signal pending */
|
||||
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
|
||||
/* flag bit 4 is available */
|
||||
#define TIF_NOTIFY_SIGNAL 4 /* signal notifications exist */
|
||||
#define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
|
||||
#define TIF_UPROBE 6 /* breakpointed or singlestepped */
|
||||
#define TIF_32BIT 7 /* 32-bit binary */
|
||||
@ -200,6 +200,7 @@ extern struct thread_info *current_thread_info(void);
|
||||
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
|
||||
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
|
||||
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
|
||||
#define _TIF_NOTIFY_SIGNAL (1<<TIF_NOTIFY_SIGNAL)
|
||||
#define _TIF_UNALIGNED (1<<TIF_UNALIGNED)
|
||||
#define _TIF_UPROBE (1<<TIF_UPROBE)
|
||||
#define _TIF_32BIT (1<<TIF_32BIT)
|
||||
@ -213,7 +214,8 @@ extern struct thread_info *current_thread_info(void);
|
||||
_TIF_DO_NOTIFY_RESUME_MASK | \
|
||||
_TIF_NEED_RESCHED)
|
||||
#define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | \
|
||||
_TIF_SIGPENDING | _TIF_UPROBE)
|
||||
_TIF_SIGPENDING | _TIF_UPROBE | \
|
||||
_TIF_NOTIFY_SIGNAL)
|
||||
|
||||
#define is_32bit_task() (test_thread_flag(TIF_32BIT))
|
||||
|
||||
|
@ -309,7 +309,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
|
||||
ti->ksp = (unsigned long) new_stack;
|
||||
p->thread.kregs = childregs;
|
||||
|
||||
if (unlikely(p->flags & PF_KTHREAD)) {
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
extern int nwindows;
|
||||
unsigned long psr;
|
||||
memset(new_stack, 0, STACKFRAME_SZ + TRACEREG_SZ);
|
||||
|
@ -597,7 +597,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
|
||||
sizeof(struct sparc_stackf));
|
||||
t->fpsaved[0] = 0;
|
||||
|
||||
if (unlikely(p->flags & PF_KTHREAD)) {
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
memset(child_trap_frame, 0, child_stack_sz);
|
||||
__thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP] =
|
||||
(current_pt_regs()->tstate + 1) & TSTATE_CWP;
|
||||
|
@ -521,7 +521,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
|
||||
void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0,
|
||||
unsigned long thread_info_flags)
|
||||
{
|
||||
if (thread_info_flags & _TIF_SIGPENDING)
|
||||
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
|
||||
do_signal(regs, orig_i0);
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME)
|
||||
tracehook_notify_resume(regs);
|
||||
|
@ -549,7 +549,7 @@ void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long
|
||||
user_exit();
|
||||
if (thread_info_flags & _TIF_UPROBE)
|
||||
uprobe_notify_resume(regs);
|
||||
if (thread_info_flags & _TIF_SIGPENDING)
|
||||
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
|
||||
do_signal(regs, orig_i0);
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME)
|
||||
tracehook_notify_resume(regs);
|
||||
|
@ -57,6 +57,7 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
|
||||
#define TIF_SIGPENDING 1 /* signal pending */
|
||||
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
|
||||
#define TIF_NOTIFY_SIGNAL 3 /* signal notifications exist */
|
||||
#define TIF_RESTART_BLOCK 4
|
||||
#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
|
||||
#define TIF_SYSCALL_AUDIT 6
|
||||
@ -68,6 +69,7 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
||||
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
|
||||
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
|
||||
#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
|
||||
#define _TIF_MEMDIE (1 << TIF_MEMDIE)
|
||||
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
|
||||
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
|
||||
|
@ -99,7 +99,8 @@ void interrupt_end(void)
|
||||
|
||||
if (need_resched())
|
||||
schedule();
|
||||
if (test_thread_flag(TIF_SIGPENDING))
|
||||
if (test_thread_flag(TIF_SIGPENDING) ||
|
||||
test_thread_flag(TIF_NOTIFY_SIGNAL))
|
||||
do_signal(regs);
|
||||
if (test_thread_flag(TIF_NOTIFY_RESUME))
|
||||
tracehook_notify_resume(regs);
|
||||
@ -156,7 +157,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
|
||||
unsigned long arg, struct task_struct * p, unsigned long tls)
|
||||
{
|
||||
void (*handler)(void);
|
||||
int kthread = current->flags & PF_KTHREAD;
|
||||
int kthread = current->flags & (PF_KTHREAD | PF_IO_WORKER);
|
||||
int ret = 0;
|
||||
|
||||
p->thread = (struct thread_struct) INIT_THREAD;
|
||||
|
@ -93,6 +93,7 @@ struct thread_info {
|
||||
#define TIF_NOTSC 16 /* TSC is not accessible in userland */
|
||||
#define TIF_IA32 17 /* IA32 compatibility process */
|
||||
#define TIF_SLD 18 /* Restore split lock detection on context switch */
|
||||
#define TIF_NOTIFY_SIGNAL 19 /* signal notifications exist */
|
||||
#define TIF_MEMDIE 20 /* is terminating due to OOM killer */
|
||||
#define TIF_POLLING_NRFLAG 21 /* idle is polling for TIF_NEED_RESCHED */
|
||||
#define TIF_IO_BITMAP 22 /* uses I/O bitmap */
|
||||
@ -121,6 +122,7 @@ struct thread_info {
|
||||
#define _TIF_NOCPUID (1 << TIF_NOCPUID)
|
||||
#define _TIF_NOTSC (1 << TIF_NOTSC)
|
||||
#define _TIF_IA32 (1 << TIF_IA32)
|
||||
#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
|
||||
#define _TIF_SLD (1 << TIF_SLD)
|
||||
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
|
||||
#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
|
||||
|
@ -178,6 +178,23 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
|
||||
task_user_gs(p) = get_user_gs(current_pt_regs());
|
||||
#endif
|
||||
|
||||
if (unlikely(p->flags & PF_IO_WORKER)) {
|
||||
/*
|
||||
* An IO thread is a user space thread, but it doesn't
|
||||
* return to ret_after_fork().
|
||||
*
|
||||
* In order to indicate that to tools like gdb,
|
||||
* we reset the stack and instruction pointers.
|
||||
*
|
||||
* It does the same kernel frame setup to return to a kernel
|
||||
* function that a kernel thread does.
|
||||
*/
|
||||
childregs->sp = 0;
|
||||
childregs->ip = 0;
|
||||
kthread_frame_init(frame, sp, arg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Set a new TLS for the child thread? */
|
||||
if (clone_flags & CLONE_SETTLS)
|
||||
ret = set_new_tls(p, tls);
|
||||
|
@ -798,11 +798,11 @@ static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs)
|
||||
* want to handle. Thus you cannot kill init even with a SIGKILL even by
|
||||
* mistake.
|
||||
*/
|
||||
void arch_do_signal(struct pt_regs *regs)
|
||||
void arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal)
|
||||
{
|
||||
struct ksignal ksig;
|
||||
|
||||
if (get_signal(&ksig)) {
|
||||
if (has_signal && get_signal(&ksig)) {
|
||||
/* Whee! Actually deliver the signal. */
|
||||
handle_signal(&ksig, regs);
|
||||
return;
|
||||
|
@ -111,18 +111,21 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
|
||||
#define TIF_SINGLESTEP 3 /* restore singlestep on return to user mode */
|
||||
#define TIF_SYSCALL_TRACEPOINT 4 /* syscall tracepoint instrumentation */
|
||||
#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
|
||||
#define TIF_NOTIFY_SIGNAL 5 /* signal notifications exist */
|
||||
#define TIF_RESTORE_SIGMASK 6 /* restore signal mask in do_signal() */
|
||||
#define TIF_NOTIFY_RESUME 7 /* callback before returning to user */
|
||||
#define TIF_DB_DISABLED 8 /* debug trap disabled for syscall */
|
||||
#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */
|
||||
#define TIF_SECCOMP 10 /* secure computing */
|
||||
#define TIF_MEMDIE 11 /* is terminating due to OOM killer */
|
||||
|
||||
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
||||
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
|
||||
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
|
||||
#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
|
||||
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
|
||||
#define _TIF_NOTIFY_SIGNAL (1<<TIF_NOTIFY_SIGNAL)
|
||||
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
|
||||
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
|
||||
#define _TIF_SECCOMP (1<<TIF_SECCOMP)
|
||||
|
||||
|
@ -500,8 +500,8 @@ common_exception_return:
|
||||
*/
|
||||
|
||||
_bbsi.l a4, TIF_NEED_RESCHED, 3f
|
||||
_bbsi.l a4, TIF_NOTIFY_RESUME, 2f
|
||||
_bbci.l a4, TIF_SIGPENDING, 5f
|
||||
movi a2, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NOTIFY_SIGNAL
|
||||
bnone a4, a2, 5f
|
||||
|
||||
2: l32i a4, a1, PT_DEPC
|
||||
bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
|
||||
|
@ -217,7 +217,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
|
||||
|
||||
p->thread.sp = (unsigned long)childregs;
|
||||
|
||||
if (!(p->flags & PF_KTHREAD)) {
|
||||
if (!(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
struct pt_regs *regs = current_pt_regs();
|
||||
unsigned long usp = usp_thread_fn ?
|
||||
usp_thread_fn : regs->areg[1];
|
||||
|
@ -498,7 +498,8 @@ static void do_signal(struct pt_regs *regs)
|
||||
|
||||
void do_notify_resume(struct pt_regs *regs)
|
||||
{
|
||||
if (test_thread_flag(TIF_SIGPENDING))
|
||||
if (test_thread_flag(TIF_SIGPENDING) ||
|
||||
test_thread_flag(TIF_NOTIFY_SIGNAL))
|
||||
do_signal(regs);
|
||||
|
||||
if (test_thread_flag(TIF_NOTIFY_RESUME))
|
||||
|
@ -2021,7 +2021,7 @@ static void binder_deferred_fd_close(int fd)
|
||||
if (!twcb)
|
||||
return;
|
||||
init_task_work(&twcb->twork, binder_do_fd_close);
|
||||
__close_fd_get_file(fd, &twcb->file);
|
||||
close_fd_get_file(fd, &twcb->file);
|
||||
if (twcb->file) {
|
||||
filp_close(twcb->file, current->files);
|
||||
task_work_add(current, &twcb->twork, TWA_RESUME);
|
||||
|
@ -34,8 +34,6 @@ obj-$(CONFIG_TIMERFD) += timerfd.o
|
||||
obj-$(CONFIG_EVENTFD) += eventfd.o
|
||||
obj-$(CONFIG_USERFAULTFD) += userfaultfd.o
|
||||
obj-$(CONFIG_AIO) += aio.o
|
||||
obj-$(CONFIG_IO_URING) += io_uring.o
|
||||
obj-$(CONFIG_IO_WQ) += io-wq.o
|
||||
obj-$(CONFIG_FS_DAX) += dax.o
|
||||
obj-$(CONFIG_FS_ENCRYPTION) += crypto/
|
||||
obj-$(CONFIG_FS_VERITY) += verity/
|
||||
|
@ -520,7 +520,7 @@ static bool dump_interrupted(void)
|
||||
* but then we need to teach dump_write() to restart and clear
|
||||
* TIF_SIGPENDING.
|
||||
*/
|
||||
return signal_pending(current);
|
||||
return fatal_signal_pending(current) || freezing(current);
|
||||
}
|
||||
|
||||
static void wait_for_dump_helpers(struct file *file)
|
||||
|
37
fs/eventfd.c
37
fs/eventfd.c
@ -45,21 +45,7 @@ struct eventfd_ctx {
|
||||
int id;
|
||||
};
|
||||
|
||||
/**
|
||||
* eventfd_signal - Adds @n to the eventfd counter.
|
||||
* @ctx: [in] Pointer to the eventfd context.
|
||||
* @n: [in] Value of the counter to be added to the eventfd internal counter.
|
||||
* The value cannot be negative.
|
||||
*
|
||||
* This function is supposed to be called by the kernel in paths that do not
|
||||
* allow sleeping. In this function we allow the counter to reach the ULLONG_MAX
|
||||
* value, and we signal this as overflow condition by returning a EPOLLERR
|
||||
* to poll(2).
|
||||
*
|
||||
* Returns the amount by which the counter was incremented. This will be less
|
||||
* than @n if the counter has overflowed.
|
||||
*/
|
||||
__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
|
||||
__u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, unsigned mask)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
@ -80,12 +66,31 @@ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
|
||||
n = ULLONG_MAX - ctx->count;
|
||||
ctx->count += n;
|
||||
if (waitqueue_active(&ctx->wqh))
|
||||
wake_up_locked_poll(&ctx->wqh, EPOLLIN);
|
||||
wake_up_locked_poll(&ctx->wqh, EPOLLIN | mask);
|
||||
this_cpu_dec(eventfd_wake_count);
|
||||
spin_unlock_irqrestore(&ctx->wqh.lock, flags);
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
/**
|
||||
* eventfd_signal - Adds @n to the eventfd counter.
|
||||
* @ctx: [in] Pointer to the eventfd context.
|
||||
* @n: [in] Value of the counter to be added to the eventfd internal counter.
|
||||
* The value cannot be negative.
|
||||
*
|
||||
* This function is supposed to be called by the kernel in paths that do not
|
||||
* allow sleeping. In this function we allow the counter to reach the ULLONG_MAX
|
||||
* value, and we signal this as overflow condition by returning a EPOLLERR
|
||||
* to poll(2).
|
||||
*
|
||||
* Returns the amount by which the counter was incremented. This will be less
|
||||
* than @n if the counter has overflowed.
|
||||
*/
|
||||
__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
|
||||
{
|
||||
return eventfd_signal_mask(ctx, n, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(eventfd_signal);
|
||||
|
||||
static void eventfd_free_ctx(struct eventfd_ctx *ctx)
|
||||
|
@ -551,7 +551,8 @@ static int ep_call_nested(struct nested_calls *ncalls,
|
||||
*/
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
|
||||
static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi)
|
||||
static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi,
|
||||
unsigned pollflags)
|
||||
{
|
||||
struct eventpoll *ep_src;
|
||||
unsigned long flags;
|
||||
@ -582,16 +583,17 @@ static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi)
|
||||
}
|
||||
spin_lock_irqsave_nested(&ep->poll_wait.lock, flags, nests);
|
||||
ep->nests = nests + 1;
|
||||
wake_up_locked_poll(&ep->poll_wait, EPOLLIN);
|
||||
wake_up_locked_poll(&ep->poll_wait, EPOLLIN | pollflags);
|
||||
ep->nests = 0;
|
||||
spin_unlock_irqrestore(&ep->poll_wait.lock, flags);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi)
|
||||
static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi,
|
||||
unsigned pollflags)
|
||||
{
|
||||
wake_up_poll(&ep->poll_wait, EPOLLIN);
|
||||
wake_up_poll(&ep->poll_wait, EPOLLIN | pollflags);
|
||||
}
|
||||
|
||||
#endif
|
||||
@ -818,7 +820,7 @@ static void ep_free(struct eventpoll *ep)
|
||||
|
||||
/* We need to release all tasks waiting for these file */
|
||||
if (waitqueue_active(&ep->poll_wait))
|
||||
ep_poll_safewake(ep, NULL);
|
||||
ep_poll_safewake(ep, NULL, 0);
|
||||
|
||||
/*
|
||||
* We need to lock this because we could be hit by
|
||||
@ -1287,7 +1289,7 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v
|
||||
|
||||
/* We have to call this outside the lock */
|
||||
if (pwake)
|
||||
ep_poll_safewake(ep, epi);
|
||||
ep_poll_safewake(ep, epi, pollflags & EPOLL_URING_WAKE);
|
||||
|
||||
if (!(epi->event.events & EPOLLEXCLUSIVE))
|
||||
ewake = 1;
|
||||
@ -1597,7 +1599,7 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
|
||||
|
||||
/* We have to call this outside the lock */
|
||||
if (pwake)
|
||||
ep_poll_safewake(ep, NULL);
|
||||
ep_poll_safewake(ep, NULL, 0);
|
||||
|
||||
return 0;
|
||||
|
||||
@ -1700,7 +1702,7 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi,
|
||||
|
||||
/* We have to call this outside the lock */
|
||||
if (pwake)
|
||||
ep_poll_safewake(ep, NULL);
|
||||
ep_poll_safewake(ep, NULL, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
34
fs/file.c
34
fs/file.c
@ -22,6 +22,8 @@
|
||||
#include <linux/close_range.h>
|
||||
#include <net/sock.h>
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
unsigned int sysctl_nr_open __read_mostly = 1024*1024;
|
||||
unsigned int sysctl_nr_open_min = BITS_PER_LONG;
|
||||
/* our min() is unusable in constant expressions ;-/ */
|
||||
@ -780,9 +782,8 @@ int __close_range(unsigned fd, unsigned max_fd, unsigned int flags)
|
||||
}
|
||||
|
||||
/*
|
||||
* variant of __close_fd that gets a ref on the file for later fput.
|
||||
* The caller must ensure that filp_close() called on the file, and then
|
||||
* an fput().
|
||||
* See close_fd_get_file() below, this variant assumes current->files->file_lock
|
||||
* is held.
|
||||
*/
|
||||
int __close_fd_get_file(unsigned int fd, struct file **res)
|
||||
{
|
||||
@ -790,26 +791,39 @@ int __close_fd_get_file(unsigned int fd, struct file **res)
|
||||
struct file *file;
|
||||
struct fdtable *fdt;
|
||||
|
||||
spin_lock(&files->file_lock);
|
||||
fdt = files_fdtable(files);
|
||||
if (fd >= fdt->max_fds)
|
||||
goto out_unlock;
|
||||
goto out_err;
|
||||
file = fdt->fd[fd];
|
||||
if (!file)
|
||||
goto out_unlock;
|
||||
goto out_err;
|
||||
rcu_assign_pointer(fdt->fd[fd], NULL);
|
||||
__put_unused_fd(files, fd);
|
||||
spin_unlock(&files->file_lock);
|
||||
get_file(file);
|
||||
*res = file;
|
||||
return 0;
|
||||
|
||||
out_unlock:
|
||||
spin_unlock(&files->file_lock);
|
||||
out_err:
|
||||
*res = NULL;
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
/*
|
||||
* variant of close_fd that gets a ref on the file for later fput.
|
||||
* The caller must ensure that filp_close() called on the file, and then
|
||||
* an fput().
|
||||
*/
|
||||
int close_fd_get_file(unsigned int fd, struct file **res)
|
||||
{
|
||||
struct files_struct *files = current->files;
|
||||
int ret;
|
||||
|
||||
spin_lock(&files->file_lock);
|
||||
ret = __close_fd_get_file(fd, res);
|
||||
spin_unlock(&files->file_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void do_close_on_exec(struct files_struct *files)
|
||||
{
|
||||
unsigned i;
|
||||
|
@ -77,6 +77,8 @@ extern int vfs_path_lookup(struct dentry *, struct vfsmount *,
|
||||
long do_rmdir(int dfd, struct filename *name);
|
||||
long do_unlinkat(int dfd, struct filename *name);
|
||||
int may_linkat(struct path *link);
|
||||
int do_renameat2(int olddfd, struct filename *oldname, int newdfd,
|
||||
struct filename *newname, unsigned int flags);
|
||||
|
||||
/*
|
||||
* namespace.c
|
||||
@ -132,6 +134,7 @@ extern struct file *do_file_open_root(struct dentry *, struct vfsmount *,
|
||||
const char *, const struct open_flags *);
|
||||
extern struct open_how build_open_how(int flags, umode_t mode);
|
||||
extern int build_open_flags(const struct open_how *how, struct open_flags *op);
|
||||
extern int __close_fd_get_file(unsigned int fd, struct file **res);
|
||||
|
||||
long do_sys_ftruncate(unsigned int fd, loff_t length, int small);
|
||||
int chmod_common(const struct path *path, umode_t mode);
|
||||
|
1242
fs/io-wq.c
1242
fs/io-wq.c
File diff suppressed because it is too large
Load Diff
84
fs/namei.c
84
fs/namei.c
@ -532,6 +532,8 @@ static void set_nameidata(struct nameidata *p, int dfd, struct filename *name)
|
||||
p->stack = p->internal;
|
||||
p->dfd = dfd;
|
||||
p->name = name;
|
||||
p->path.mnt = NULL;
|
||||
p->path.dentry = NULL;
|
||||
p->total_link_count = old ? old->total_link_count : 0;
|
||||
p->saved = old;
|
||||
current->nameidata = p;
|
||||
@ -605,6 +607,8 @@ static void terminate_walk(struct nameidata *nd)
|
||||
rcu_read_unlock();
|
||||
}
|
||||
nd->depth = 0;
|
||||
nd->path.mnt = NULL;
|
||||
nd->path.dentry = NULL;
|
||||
}
|
||||
|
||||
/* path_put is needed afterwards regardless of success or failure */
|
||||
@ -633,6 +637,11 @@ static inline bool legitimize_path(struct nameidata *nd,
|
||||
static bool legitimize_links(struct nameidata *nd)
|
||||
{
|
||||
int i;
|
||||
if (unlikely(nd->flags & LOOKUP_CACHED)) {
|
||||
drop_links(nd);
|
||||
nd->depth = 0;
|
||||
return false;
|
||||
}
|
||||
for (i = 0; i < nd->depth; i++) {
|
||||
struct saved *last = nd->stack + i;
|
||||
if (unlikely(!legitimize_path(nd, &last->link, last->seq))) {
|
||||
@ -708,19 +717,19 @@ static bool try_to_unlazy(struct nameidata *nd)
|
||||
}
|
||||
|
||||
/**
|
||||
* unlazy_child - try to switch to ref-walk mode.
|
||||
* try_to_unlazy_next - try to switch to ref-walk mode.
|
||||
* @nd: nameidata pathwalk data
|
||||
* @dentry: child of nd->path.dentry
|
||||
* @seq: seq number to check dentry against
|
||||
* Returns: 0 on success, -ECHILD on failure
|
||||
* @dentry: next dentry to step into
|
||||
* @seq: seq number to check @dentry against
|
||||
* Returns: true on success, false on failure
|
||||
*
|
||||
* unlazy_child attempts to legitimize the current nd->path, nd->root and dentry
|
||||
* for ref-walk mode. @dentry must be a path found by a do_lookup call on
|
||||
* @nd. Must be called from rcu-walk context.
|
||||
* Nothing should touch nameidata between unlazy_child() failure and
|
||||
* Similar to to try_to_unlazy(), but here we have the next dentry already
|
||||
* picked by rcu-walk and want to legitimize that in addition to the current
|
||||
* nd->path and nd->root for ref-walk mode. Must be called from rcu-walk context.
|
||||
* Nothing should touch nameidata between try_to_unlazy_next() failure and
|
||||
* terminate_walk().
|
||||
*/
|
||||
static int unlazy_child(struct nameidata *nd, struct dentry *dentry, unsigned seq)
|
||||
static bool try_to_unlazy_next(struct nameidata *nd, struct dentry *dentry, unsigned seq)
|
||||
{
|
||||
BUG_ON(!(nd->flags & LOOKUP_RCU));
|
||||
|
||||
@ -750,7 +759,7 @@ static int unlazy_child(struct nameidata *nd, struct dentry *dentry, unsigned se
|
||||
if (unlikely(!legitimize_root(nd)))
|
||||
goto out_dput;
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
return true;
|
||||
|
||||
out2:
|
||||
nd->path.mnt = NULL;
|
||||
@ -758,11 +767,11 @@ static int unlazy_child(struct nameidata *nd, struct dentry *dentry, unsigned se
|
||||
nd->path.dentry = NULL;
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
return -ECHILD;
|
||||
return false;
|
||||
out_dput:
|
||||
rcu_read_unlock();
|
||||
dput(dentry);
|
||||
return -ECHILD;
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline int d_revalidate(struct dentry *dentry, unsigned int flags)
|
||||
@ -870,6 +879,7 @@ static int complete_walk(struct nameidata *nd)
|
||||
*/
|
||||
if (!(nd->flags & (LOOKUP_ROOT | LOOKUP_IS_SCOPED)))
|
||||
nd->root.mnt = NULL;
|
||||
nd->flags &= ~LOOKUP_CACHED;
|
||||
if (!try_to_unlazy(nd))
|
||||
return -ECHILD;
|
||||
}
|
||||
@ -1458,7 +1468,7 @@ static inline int handle_mounts(struct nameidata *nd, struct dentry *dentry,
|
||||
return -ENOENT;
|
||||
if (likely(__follow_mount_rcu(nd, path, inode, seqp)))
|
||||
return 0;
|
||||
if (unlazy_child(nd, dentry, seq))
|
||||
if (!try_to_unlazy_next(nd, dentry, seq))
|
||||
return -ECHILD;
|
||||
// *path might've been clobbered by __follow_mount_rcu()
|
||||
path->mnt = nd->path.mnt;
|
||||
@ -1579,7 +1589,7 @@ static struct dentry *lookup_fast(struct nameidata *nd,
|
||||
status = d_revalidate(dentry, nd->flags);
|
||||
if (likely(status > 0))
|
||||
return dentry;
|
||||
if (unlazy_child(nd, dentry, seq))
|
||||
if (!try_to_unlazy_next(nd, dentry, seq))
|
||||
return ERR_PTR(-ECHILD);
|
||||
if (unlikely(status == -ECHILD))
|
||||
/* we'd been told to redo it in non-rcu mode */
|
||||
@ -2288,6 +2298,10 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
|
||||
int error;
|
||||
const char *s = nd->name->name;
|
||||
|
||||
/* LOOKUP_CACHED requires RCU, ask caller to retry */
|
||||
if ((flags & (LOOKUP_RCU | LOOKUP_CACHED)) == LOOKUP_CACHED)
|
||||
return ERR_PTR(-EAGAIN);
|
||||
|
||||
if (!*s)
|
||||
flags &= ~LOOKUP_RCU;
|
||||
if (flags & LOOKUP_RCU)
|
||||
@ -2317,8 +2331,6 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
|
||||
}
|
||||
|
||||
nd->root.mnt = NULL;
|
||||
nd->path.mnt = NULL;
|
||||
nd->path.dentry = NULL;
|
||||
|
||||
/* Absolute pathname -- fetch the root (LOOKUP_IN_ROOT uses nd->dfd). */
|
||||
if (*s == '/' && !(flags & LOOKUP_IN_ROOT)) {
|
||||
@ -4425,8 +4437,8 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
|
||||
}
|
||||
EXPORT_SYMBOL_NS(vfs_rename, ANDROID_GKI_VFS_EXPORT_ONLY);
|
||||
|
||||
static int do_renameat2(int olddfd, const char __user *oldname, int newdfd,
|
||||
const char __user *newname, unsigned int flags)
|
||||
int do_renameat2(int olddfd, struct filename *from, int newdfd,
|
||||
struct filename *to, unsigned int flags)
|
||||
{
|
||||
struct dentry *old_dentry, *new_dentry;
|
||||
struct dentry *trap;
|
||||
@ -4434,32 +4446,30 @@ static int do_renameat2(int olddfd, const char __user *oldname, int newdfd,
|
||||
struct qstr old_last, new_last;
|
||||
int old_type, new_type;
|
||||
struct inode *delegated_inode = NULL;
|
||||
struct filename *from;
|
||||
struct filename *to;
|
||||
unsigned int lookup_flags = 0, target_flags = LOOKUP_RENAME_TARGET;
|
||||
bool should_retry = false;
|
||||
int error;
|
||||
int error = -EINVAL;
|
||||
|
||||
if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
|
||||
return -EINVAL;
|
||||
goto put_both;
|
||||
|
||||
if ((flags & (RENAME_NOREPLACE | RENAME_WHITEOUT)) &&
|
||||
(flags & RENAME_EXCHANGE))
|
||||
return -EINVAL;
|
||||
goto put_both;
|
||||
|
||||
if (flags & RENAME_EXCHANGE)
|
||||
target_flags = 0;
|
||||
|
||||
retry:
|
||||
from = filename_parentat(olddfd, getname(oldname), lookup_flags,
|
||||
&old_path, &old_last, &old_type);
|
||||
from = filename_parentat(olddfd, from, lookup_flags, &old_path,
|
||||
&old_last, &old_type);
|
||||
if (IS_ERR(from)) {
|
||||
error = PTR_ERR(from);
|
||||
goto exit;
|
||||
goto put_new;
|
||||
}
|
||||
|
||||
to = filename_parentat(newdfd, getname(newname), lookup_flags,
|
||||
&new_path, &new_last, &new_type);
|
||||
to = filename_parentat(newdfd, to, lookup_flags, &new_path, &new_last,
|
||||
&new_type);
|
||||
if (IS_ERR(to)) {
|
||||
error = PTR_ERR(to);
|
||||
goto exit1;
|
||||
@ -4552,34 +4562,40 @@ static int do_renameat2(int olddfd, const char __user *oldname, int newdfd,
|
||||
if (retry_estale(error, lookup_flags))
|
||||
should_retry = true;
|
||||
path_put(&new_path);
|
||||
putname(to);
|
||||
exit1:
|
||||
path_put(&old_path);
|
||||
putname(from);
|
||||
if (should_retry) {
|
||||
should_retry = false;
|
||||
lookup_flags |= LOOKUP_REVAL;
|
||||
goto retry;
|
||||
}
|
||||
exit:
|
||||
put_both:
|
||||
if (!IS_ERR(from))
|
||||
putname(from);
|
||||
put_new:
|
||||
if (!IS_ERR(to))
|
||||
putname(to);
|
||||
return error;
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE5(renameat2, int, olddfd, const char __user *, oldname,
|
||||
int, newdfd, const char __user *, newname, unsigned int, flags)
|
||||
{
|
||||
return do_renameat2(olddfd, oldname, newdfd, newname, flags);
|
||||
return do_renameat2(olddfd, getname(oldname), newdfd, getname(newname),
|
||||
flags);
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
|
||||
int, newdfd, const char __user *, newname)
|
||||
{
|
||||
return do_renameat2(olddfd, oldname, newdfd, newname, 0);
|
||||
return do_renameat2(olddfd, getname(oldname), newdfd, getname(newname),
|
||||
0);
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newname)
|
||||
{
|
||||
return do_renameat2(AT_FDCWD, oldname, AT_FDCWD, newname, 0);
|
||||
return do_renameat2(AT_FDCWD, getname(oldname), AT_FDCWD,
|
||||
getname(newname), 0);
|
||||
}
|
||||
|
||||
int readlink_copy(char __user *buffer, int buflen, const char *link)
|
||||
|
@ -1110,6 +1110,12 @@ inline int build_open_flags(const struct open_how *how, struct open_flags *op)
|
||||
lookup_flags |= LOOKUP_BENEATH;
|
||||
if (how->resolve & RESOLVE_IN_ROOT)
|
||||
lookup_flags |= LOOKUP_IN_ROOT;
|
||||
if (how->resolve & RESOLVE_CACHED) {
|
||||
/* Don't bother even trying for create/truncate/tmpfile open */
|
||||
if (flags & (O_TRUNC | O_CREAT | O_TMPFILE))
|
||||
return -EAGAIN;
|
||||
lookup_flags |= LOOKUP_CACHED;
|
||||
}
|
||||
|
||||
op->lookup_flags = lookup_flags;
|
||||
return 0;
|
||||
|
@ -16,13 +16,6 @@ static const char *proc_self_get_link(struct dentry *dentry,
|
||||
pid_t tgid = task_tgid_nr_ns(current, ns);
|
||||
char *name;
|
||||
|
||||
/*
|
||||
* Not currently supported. Once we can inherit all of struct pid,
|
||||
* we can allow this.
|
||||
*/
|
||||
if (current->flags & PF_IO_WORKER)
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
|
||||
if (!tgid)
|
||||
return ERR_PTR(-ENOENT);
|
||||
/* max length of unsigned int in decimal + NULL term */
|
||||
|
@ -17,13 +17,6 @@ static const char *proc_thread_self_get_link(struct dentry *dentry,
|
||||
pid_t pid = task_pid_nr_ns(current, ns);
|
||||
char *name;
|
||||
|
||||
/*
|
||||
* Not currently supported. Once we can inherit all of struct pid,
|
||||
* we can allow this.
|
||||
*/
|
||||
if (current->flags & PF_IO_WORKER)
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
|
||||
if (!pid)
|
||||
return ERR_PTR(-ENOENT);
|
||||
name = kmalloc(10 + 6 + 10 + 1, dentry ? GFP_KERNEL : GFP_ATOMIC);
|
||||
|
@ -69,7 +69,7 @@
|
||||
|
||||
#define EXIT_TO_USER_MODE_WORK \
|
||||
(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
|
||||
_TIF_NEED_RESCHED | _TIF_PATCH_PENDING | \
|
||||
_TIF_NEED_RESCHED | _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \
|
||||
ARCH_EXIT_TO_USER_MODE_WORK)
|
||||
|
||||
/**
|
||||
@ -259,12 +259,13 @@ static __always_inline void arch_exit_to_user_mode(void) { }
|
||||
#endif
|
||||
|
||||
/**
|
||||
* arch_do_signal - Architecture specific signal delivery function
|
||||
* arch_do_signal_or_restart - Architecture specific signal delivery function
|
||||
* @regs: Pointer to currents pt_regs
|
||||
* @has_signal: actual signal to handle
|
||||
*
|
||||
* Invoked from exit_to_user_mode_loop().
|
||||
*/
|
||||
void arch_do_signal(struct pt_regs *regs);
|
||||
void arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal);
|
||||
|
||||
/**
|
||||
* arch_syscall_exit_tracehook - Wrapper around tracehook_report_syscall_exit()
|
||||
|
@ -11,8 +11,8 @@
|
||||
# define ARCH_XFER_TO_GUEST_MODE_WORK (0)
|
||||
#endif
|
||||
|
||||
#define XFER_TO_GUEST_MODE_WORK \
|
||||
(_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
|
||||
#define XFER_TO_GUEST_MODE_WORK \
|
||||
(_TIF_NEED_RESCHED | _TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL | \
|
||||
_TIF_NOTIFY_RESUME | ARCH_XFER_TO_GUEST_MODE_WORK)
|
||||
|
||||
struct kvm_vcpu;
|
||||
|
@ -39,6 +39,7 @@ struct file *eventfd_fget(int fd);
|
||||
struct eventfd_ctx *eventfd_ctx_fdget(int fd);
|
||||
struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
|
||||
__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n);
|
||||
__u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, unsigned mask);
|
||||
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
|
||||
__u64 *cnt);
|
||||
|
||||
@ -66,6 +67,12 @@ static inline int eventfd_signal(struct eventfd_ctx *ctx, int n)
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static inline int eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n,
|
||||
unsigned mask)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static inline void eventfd_ctx_put(struct eventfd_ctx *ctx)
|
||||
{
|
||||
|
||||
|
@ -19,7 +19,7 @@
|
||||
/* List of all valid flags for the how->resolve argument: */
|
||||
#define VALID_RESOLVE_FLAGS \
|
||||
(RESOLVE_NO_XDEV | RESOLVE_NO_MAGICLINKS | RESOLVE_NO_SYMLINKS | \
|
||||
RESOLVE_BENEATH | RESOLVE_IN_ROOT)
|
||||
RESOLVE_BENEATH | RESOLVE_IN_ROOT | RESOLVE_CACHED)
|
||||
|
||||
/* List of all open_how "versions". */
|
||||
#define OPEN_HOW_SIZE_VER0 24 /* sizeof first published struct */
|
||||
|
@ -123,7 +123,7 @@ extern void __fd_install(struct files_struct *files,
|
||||
extern int __close_fd(struct files_struct *files,
|
||||
unsigned int fd);
|
||||
extern int __close_range(unsigned int fd, unsigned int max_fd, unsigned int flags);
|
||||
extern int __close_fd_get_file(unsigned int fd, struct file **res);
|
||||
extern int close_fd_get_file(unsigned int fd, struct file **res);
|
||||
extern int unshare_fd(unsigned long unshare_flags, unsigned int max_fds,
|
||||
struct files_struct **new_fdp);
|
||||
|
||||
|
@ -5,50 +5,20 @@
|
||||
#include <linux/sched.h>
|
||||
#include <linux/xarray.h>
|
||||
|
||||
struct io_identity {
|
||||
struct files_struct *files;
|
||||
struct mm_struct *mm;
|
||||
#ifdef CONFIG_BLK_CGROUP
|
||||
struct cgroup_subsys_state *blkcg_css;
|
||||
#endif
|
||||
const struct cred *creds;
|
||||
struct nsproxy *nsproxy;
|
||||
struct fs_struct *fs;
|
||||
unsigned long fsize;
|
||||
#ifdef CONFIG_AUDIT
|
||||
kuid_t loginuid;
|
||||
unsigned int sessionid;
|
||||
#endif
|
||||
refcount_t count;
|
||||
};
|
||||
|
||||
struct io_uring_task {
|
||||
/* submission side */
|
||||
struct xarray xa;
|
||||
struct wait_queue_head wait;
|
||||
struct file *last;
|
||||
struct percpu_counter inflight;
|
||||
struct io_identity __identity;
|
||||
struct io_identity *identity;
|
||||
atomic_t in_idle;
|
||||
bool sqpoll;
|
||||
};
|
||||
|
||||
#if defined(CONFIG_IO_URING)
|
||||
struct sock *io_uring_get_socket(struct file *file);
|
||||
void __io_uring_task_cancel(void);
|
||||
void __io_uring_files_cancel(struct files_struct *files);
|
||||
void __io_uring_cancel(bool cancel_all);
|
||||
void __io_uring_free(struct task_struct *tsk);
|
||||
|
||||
static inline void io_uring_files_cancel(void)
|
||||
{
|
||||
if (current->io_uring)
|
||||
__io_uring_cancel(false);
|
||||
}
|
||||
static inline void io_uring_task_cancel(void)
|
||||
{
|
||||
if (current->io_uring && !xa_empty(¤t->io_uring->xa))
|
||||
__io_uring_task_cancel();
|
||||
}
|
||||
static inline void io_uring_files_cancel(struct files_struct *files)
|
||||
{
|
||||
if (current->io_uring && !xa_empty(¤t->io_uring->xa))
|
||||
__io_uring_files_cancel(files);
|
||||
if (current->io_uring)
|
||||
__io_uring_cancel(true);
|
||||
}
|
||||
static inline void io_uring_free(struct task_struct *tsk)
|
||||
{
|
||||
@ -63,7 +33,7 @@ static inline struct sock *io_uring_get_socket(struct file *file)
|
||||
static inline void io_uring_task_cancel(void)
|
||||
{
|
||||
}
|
||||
static inline void io_uring_files_cancel(struct files_struct *files)
|
||||
static inline void io_uring_files_cancel(void)
|
||||
{
|
||||
}
|
||||
static inline void io_uring_free(struct task_struct *tsk)
|
||||
|
@ -46,6 +46,7 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT};
|
||||
#define LOOKUP_NO_XDEV 0x040000 /* No mountpoint crossing. */
|
||||
#define LOOKUP_BENEATH 0x080000 /* No escaping from starting point. */
|
||||
#define LOOKUP_IN_ROOT 0x100000 /* Treat dirfd as fs root. */
|
||||
#define LOOKUP_CACHED 0x200000 /* Only do cached lookup */
|
||||
/* LOOKUP_* flags which do scope-related checks based on the dirfd. */
|
||||
#define LOOKUP_IS_SCOPED (LOOKUP_BENEATH | LOOKUP_IN_ROOT)
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user