Linux 5.13
-----BEGIN PGP SIGNATURE----- iQFSBAABCAA8FiEEq68RxlopcLEwq+PEeb4+QwBBGIYFAmDY+dceHHRvcnZhbGRz QGxpbnV4LWZvdW5kYXRpb24ub3JnAAoJEHm+PkMAQRiGx1YH/idbXKwfQkBpBCud BvUo2RaRetACEXo38ydiHNxSyAkde79AVMNDXWCBgnWFwpUG51TtFIAn2VhVIv7d WlBJPWhbmHwddQB+HACYXsRcBRCc2md7RufOqR/yulx+T8QxQy7yHEd7wOlSdYWC /BUb/94qREK60FwdWjATSdqO5ditOd5XxvBnfGh04iUmiMRwubOtYPfaomo9MIK6 Qs/Yt6SkIROi9cMQf2NakE2UFeVnQ+/TrDTRsTqokUtLSzpjxDjX39JoRNjLVgS1 XOhrOlUQ+sJ1O1Hq4vSfy8maWivzF9XCCsApd6+Ks1yMB6yw15kU7cQ2yF++UPvC ktrHsiM= =I8Bu -----END PGP SIGNATURE----- Merge tag 'v5.13' into android-mainline Linux 5.13 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: Iead72999b7d11801452cc18508415ddf07291fa8
This commit is contained in:
commit
7c4aaf7f0d
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 13
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc7
|
||||
EXTRAVERSION =
|
||||
NAME = Opossums on Parade
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -1002,7 +1002,6 @@ struct task_struct {
|
||||
/* Signal handlers: */
|
||||
struct signal_struct *signal;
|
||||
struct sighand_struct __rcu *sighand;
|
||||
struct sigqueue *sigqueue_cache;
|
||||
sigset_t blocked;
|
||||
sigset_t real_blocked;
|
||||
/* Restored if set_restore_sigmask() was used: */
|
||||
|
@ -267,7 +267,6 @@ static inline void init_sigpending(struct sigpending *sig)
|
||||
}
|
||||
|
||||
extern void flush_sigqueue(struct sigpending *queue);
|
||||
extern void exit_task_sigqueue_cache(struct task_struct *tsk);
|
||||
|
||||
/* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */
|
||||
static inline int valid_signal(unsigned long sig)
|
||||
|
@ -162,7 +162,6 @@ static void __exit_signal(struct task_struct *tsk)
|
||||
flush_sigqueue(&sig->shared_pending);
|
||||
tty_kref_put(tty);
|
||||
}
|
||||
exit_task_sigqueue_cache(tsk);
|
||||
}
|
||||
|
||||
static void delayed_put_task_struct(struct rcu_head *rhp)
|
||||
|
@ -2013,7 +2013,6 @@ static __latent_entropy struct task_struct *copy_process(
|
||||
spin_lock_init(&p->alloc_lock);
|
||||
|
||||
init_sigpending(&p->pending);
|
||||
p->sigqueue_cache = NULL;
|
||||
|
||||
p->utime = p->stime = p->gtime = 0;
|
||||
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
|
||||
|
@ -431,22 +431,7 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
|
||||
rcu_read_unlock();
|
||||
|
||||
if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
|
||||
/*
|
||||
* Preallocation does not hold sighand::siglock so it can't
|
||||
* use the cache. The lockless caching requires that only
|
||||
* one consumer and only one producer run at a time.
|
||||
*
|
||||
* For the regular allocation case it is sufficient to
|
||||
* check @q for NULL because this code can only be called
|
||||
* if the target task @t has not been reaped yet; which
|
||||
* means this code can never observe the error pointer which is
|
||||
* written to @t->sigqueue_cache in exit_task_sigqueue_cache().
|
||||
*/
|
||||
q = READ_ONCE(t->sigqueue_cache);
|
||||
if (!q || sigqueue_flags)
|
||||
q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
|
||||
else
|
||||
WRITE_ONCE(t->sigqueue_cache, NULL);
|
||||
q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
|
||||
} else {
|
||||
print_dropped_signal(sig);
|
||||
}
|
||||
@ -463,53 +448,13 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
|
||||
return q;
|
||||
}
|
||||
|
||||
void exit_task_sigqueue_cache(struct task_struct *tsk)
|
||||
{
|
||||
/* Race free because @tsk is mopped up */
|
||||
struct sigqueue *q = tsk->sigqueue_cache;
|
||||
|
||||
if (q) {
|
||||
/*
|
||||
* Hand it back to the cache as the task might
|
||||
* be self reaping which would leak the object.
|
||||
*/
|
||||
kmem_cache_free(sigqueue_cachep, q);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set an error pointer to ensure that @tsk will not cache a
|
||||
* sigqueue when it is reaping it's child tasks
|
||||
*/
|
||||
tsk->sigqueue_cache = ERR_PTR(-1);
|
||||
}
|
||||
|
||||
static void sigqueue_cache_or_free(struct sigqueue *q)
|
||||
{
|
||||
/*
|
||||
* Cache one sigqueue per task. This pairs with the consumer side
|
||||
* in __sigqueue_alloc() and needs READ/WRITE_ONCE() to prevent the
|
||||
* compiler from store tearing and to tell KCSAN that the data race
|
||||
* is intentional when run without holding current->sighand->siglock,
|
||||
* which is fine as current obviously cannot run __sigqueue_free()
|
||||
* concurrently.
|
||||
*
|
||||
* The NULL check is safe even if current has been reaped already,
|
||||
* in which case exit_task_sigqueue_cache() wrote an error pointer
|
||||
* into current->sigqueue_cache.
|
||||
*/
|
||||
if (!READ_ONCE(current->sigqueue_cache))
|
||||
WRITE_ONCE(current->sigqueue_cache, q);
|
||||
else
|
||||
kmem_cache_free(sigqueue_cachep, q);
|
||||
}
|
||||
|
||||
static void __sigqueue_free(struct sigqueue *q)
|
||||
{
|
||||
if (q->flags & SIGQUEUE_PREALLOC)
|
||||
return;
|
||||
if (atomic_dec_and_test(&q->user->sigpending))
|
||||
free_uid(q->user);
|
||||
sigqueue_cache_or_free(q);
|
||||
kmem_cache_free(sigqueue_cachep, q);
|
||||
}
|
||||
|
||||
void flush_sigqueue(struct sigpending *queue)
|
||||
|
Loading…
Reference in New Issue
Block a user