Merge branch 'android12-5.10' into branch 'android12-5.10-lts'

Do a backmerge to catch the android12-5.10-lts branch up with recent
changes done in android12-5.10.  Included in here are the following
commits:

* c761121f9a Merge tag 'android12-5.10.218_r00' into android12-5.10
* e0ab5345d6 UPSTREAM: f2fs: avoid false alarm of circular locking
* 758dd4cd50 UPSTREAM: f2fs: fix deadlock in i_xattr_sem and inode page lock
* 6f61666ab1 ANDROID: userfaultfd: Fix use-after-free in userfaultfd_using_sigbus()
* 441ca240dd ANDROID: 16K: Don't set padding vm_flags on 32-bit archs
* 3889296829 FROMLIST: binder_alloc: Replace kcalloc with kvcalloc to mitigate OOM issues
* 6d9feaf249 ANDROID: fix kernelci build breaks due to hid/uhid cyclic dependency
* b07354bd32 Merge tag 'android12-5.10.214_r00' into android12-5.10
* 0a36a75b28 UPSTREAM: af_unix: Fix garbage collector racing against connect()
* 5fd2d91390 ANDROID: uid_sys_stats: Use llist for deferred work
* dbfd6a5812 ANDROID: uid_sys_stats: Use a single work for deferred updates
* 98440be320 ANDROID: GKI: Add new ABI symbol list
* 93bad8a473 ANDROID: 16K: Only check basename of linker context
* f91f368b2e UPSTREAM: af_unix: Do not use atomic ops for unix_sk(sk)->inflight.
* 732004ab69 ANDROID: GKI: Update symbols to symbol list
* 9d06d47cd2 ANDROID: ABI fixup for abi break in struct dst_ops
* bff4c6bace BACKPORT: net: fix __dst_negative_advice() race

Change-Id: Ibe1bb644ae24c59bf17c9b8fec0cabe8f8288733
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2024-07-22 09:44:16 +00:00
commit b7647fb740
18 changed files with 10540 additions and 7972 deletions

File diff suppressed because it is too large Load Diff

2545
android/abi_gki_aarch64_arg Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,7 @@
[abi_symbol_list]
__traceiter_android_rvh_dma_buf_stats_teardown
__traceiter_android_vh_tune_fault_around_bytes
__traceiter_android_vh_do_swap_page_spf
__tracepoint_android_rvh_dma_buf_stats_teardown
__tracepoint_android_vh_tune_fault_around_bytes
__tracepoint_android_vh_do_swap_page_spf

View File

@ -11,6 +11,7 @@ TIDY_ABI=1
KMI_SYMBOL_LIST=android/abi_gki_aarch64
ADDITIONAL_KMI_SYMBOL_LISTS="
android/abi_gki_aarch64_type_visibility
android/abi_gki_aarch64_arg
android/abi_gki_aarch64_core
android/abi_gki_aarch64_db845c
android/abi_gki_aarch64_exynos

View File

@ -766,9 +766,9 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
alloc->buffer = (void __user *)vma->vm_start;
alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE,
sizeof(alloc->pages[0]),
GFP_KERNEL);
alloc->pages = kvcalloc(alloc->buffer_size / PAGE_SIZE,
sizeof(alloc->pages[0]),
GFP_KERNEL);
if (alloc->pages == NULL) {
ret = -ENOMEM;
failure_string = "alloc page array";
@ -793,7 +793,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
return 0;
err_alloc_buf_struct_failed:
kfree(alloc->pages);
kvfree(alloc->pages);
alloc->pages = NULL;
err_alloc_pages_failed:
alloc->buffer = NULL;
@ -864,7 +864,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
__free_page(alloc->pages[i].page_ptr);
page_count++;
}
kfree(alloc->pages);
kvfree(alloc->pages);
}
mutex_unlock(&alloc->mutex);
if (alloc->vma_vm_mm)

View File

@ -290,7 +290,7 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
offset = report->size;
report->size += parser->global.report_size * parser->global.report_count;
if (IS_ENABLED(CONFIG_UHID) && parser->device->ll_driver == &uhid_hid_driver)
if (IS_BUILTIN(CONFIG_UHID) && parser->device->ll_driver == &uhid_hid_driver)
max_buffer_size = UHID_DATA_MAX;
/* Total size check: Allow for possible report index byte */
@ -1773,7 +1773,7 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
rsize = hid_compute_report_size(report);
if (IS_ENABLED(CONFIG_UHID) && hid->ll_driver == &uhid_hid_driver)
if (IS_BUILTIN(CONFIG_UHID) && hid->ll_driver == &uhid_hid_driver)
max_buffer_size = UHID_DATA_MAX;
if (report_enum->numbered && rsize >= max_buffer_size)

View File

@ -19,6 +19,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/llist.h>
#include <linux/mm.h>
#include <linux/proc_fs.h>
#include <linux/profile.h>
@ -629,7 +630,6 @@ static const struct proc_ops uid_procstat_fops = {
};
struct update_stats_work {
struct work_struct work;
uid_t uid;
#ifdef CONFIG_UID_SYS_STATS_DEBUG
struct task_struct *task;
@ -637,38 +637,46 @@ struct update_stats_work {
struct task_io_accounting ioac;
u64 utime;
u64 stime;
struct llist_node node;
};
static LLIST_HEAD(work_usw);
static void update_stats_workfn(struct work_struct *work)
{
struct update_stats_work *usw =
container_of(work, struct update_stats_work, work);
struct update_stats_work *usw, *t;
struct uid_entry *uid_entry;
struct task_entry *task_entry __maybe_unused;
struct llist_node *node;
rt_mutex_lock(&uid_lock);
uid_entry = find_uid_entry(usw->uid);
if (!uid_entry)
goto exit;
uid_entry->utime += usw->utime;
uid_entry->stime += usw->stime;
node = llist_del_all(&work_usw);
llist_for_each_entry_safe(usw, t, node, node) {
uid_entry = find_uid_entry(usw->uid);
if (!uid_entry)
goto next;
uid_entry->utime += usw->utime;
uid_entry->stime += usw->stime;
#ifdef CONFIG_UID_SYS_STATS_DEBUG
task_entry = find_task_entry(uid_entry, usw->task);
if (!task_entry)
goto exit;
add_uid_tasks_io_stats(task_entry, &usw->ioac,
UID_STATE_DEAD_TASKS);
task_entry = find_task_entry(uid_entry, usw->task);
if (!task_entry)
goto next;
add_uid_tasks_io_stats(task_entry, &usw->ioac,
UID_STATE_DEAD_TASKS);
#endif
__add_uid_io_stats(uid_entry, &usw->ioac, UID_STATE_DEAD_TASKS);
exit:
__add_uid_io_stats(uid_entry, &usw->ioac, UID_STATE_DEAD_TASKS);
next:
#ifdef CONFIG_UID_SYS_STATS_DEBUG
put_task_struct(usw->task);
#endif
kfree(usw);
}
rt_mutex_unlock(&uid_lock);
#ifdef CONFIG_UID_SYS_STATS_DEBUG
put_task_struct(usw->task);
#endif
kfree(usw);
}
static DECLARE_WORK(update_stats_work, update_stats_workfn);
static int process_notifier(struct notifier_block *self,
unsigned long cmd, void *v)
@ -687,7 +695,6 @@ static int process_notifier(struct notifier_block *self,
usw = kmalloc(sizeof(struct update_stats_work), GFP_KERNEL);
if (usw) {
INIT_WORK(&usw->work, update_stats_workfn);
usw->uid = uid;
#ifdef CONFIG_UID_SYS_STATS_DEBUG
usw->task = get_task_struct(task);
@ -698,7 +705,8 @@ static int process_notifier(struct notifier_block *self,
*/
usw->ioac = task->ioac;
task_cputime_adjusted(task, &usw->utime, &usw->stime);
schedule_work(&usw->work);
llist_add(&usw->node, &work_usw);
schedule_work(&update_stats_work);
}
return NOTIFY_OK;
}

View File

@ -808,8 +808,15 @@ int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname,
{
int err = -EAGAIN;
if (f2fs_has_inline_dentry(dir))
if (f2fs_has_inline_dentry(dir)) {
/*
* Should get i_xattr_sem to keep the lock order:
* i_xattr_sem -> inode_page lock used by f2fs_setxattr.
*/
f2fs_down_read(&F2FS_I(dir)->i_xattr_sem);
err = f2fs_add_inline_entry(dir, fname, inode, ino, mode);
f2fs_up_read(&F2FS_I(dir)->i_xattr_sem);
}
if (err == -EAGAIN)
err = f2fs_add_regular_entry(dir, fname, inode, ino, mode);

View File

@ -2147,15 +2147,6 @@ static inline int f2fs_down_read_trylock(struct f2fs_rwsem *sem)
return down_read_trylock(&sem->internal_rwsem);
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static inline void f2fs_down_read_nested(struct f2fs_rwsem *sem, int subclass)
{
down_read_nested(&sem->internal_rwsem, subclass);
}
#else
#define f2fs_down_read_nested(sem, subclass) f2fs_down_read(sem)
#endif
static inline void f2fs_up_read(struct f2fs_rwsem *sem)
{
up_read(&sem->internal_rwsem);
@ -2166,6 +2157,21 @@ static inline void f2fs_down_write(struct f2fs_rwsem *sem)
down_write(&sem->internal_rwsem);
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static inline void f2fs_down_read_nested(struct f2fs_rwsem *sem, int subclass)
{
down_read_nested(&sem->internal_rwsem, subclass);
}
static inline void f2fs_down_write_nested(struct f2fs_rwsem *sem, int subclass)
{
down_write_nested(&sem->internal_rwsem, subclass);
}
#else
#define f2fs_down_read_nested(sem, subclass) f2fs_down_read(sem)
#define f2fs_down_write_nested(sem, subclass) f2fs_down_write(sem)
#endif
static inline int f2fs_down_write_trylock(struct f2fs_rwsem *sem)
{
return down_write_trylock(&sem->internal_rwsem);

View File

@ -658,7 +658,8 @@ int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
}
if (inode) {
f2fs_down_write(&F2FS_I(inode)->i_sem);
f2fs_down_write_nested(&F2FS_I(inode)->i_sem,
SINGLE_DEPTH_NESTING);
page = f2fs_init_inode_metadata(inode, dir, fname, ipage);
if (IS_ERR(page)) {
err = PTR_ERR(page);

View File

@ -529,10 +529,12 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
if (len > F2FS_NAME_LEN)
return -ERANGE;
f2fs_down_read(&F2FS_I(inode)->i_xattr_sem);
if (!ipage)
f2fs_down_read(&F2FS_I(inode)->i_xattr_sem);
error = lookup_all_xattrs(inode, ipage, index, len, name,
&entry, &base_addr, &base_size, &is_inline);
f2fs_up_read(&F2FS_I(inode)->i_xattr_sem);
if (!ipage)
f2fs_up_read(&F2FS_I(inode)->i_xattr_sem);
if (error)
return error;

View File

@ -358,18 +358,26 @@ static inline long userfaultfd_get_blocking_state(unsigned int flags)
}
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
bool userfaultfd_using_sigbus(struct vm_area_struct *vma)
bool userfaultfd_using_sigbus(struct vm_fault *vmf)
{
struct userfaultfd_ctx *ctx;
bool ret;
bool ret = false;
/*
* Do it inside RCU section to ensure that the ctx doesn't
* disappear under us.
*/
rcu_read_lock();
ctx = rcu_dereference(vma->vm_userfaultfd_ctx.ctx);
ret = ctx && (ctx->features & UFFD_FEATURE_SIGBUS);
/*
* Ensure that we are not looking at dangling pointer to
* userfaultfd_ctx, which could happen if userfaultfd_release() is
* called and vma is unlinked.
*/
if (!vma_has_changed(vmf)) {
struct userfaultfd_ctx *ctx;
ctx = rcu_dereference(vmf->vma->vm_userfaultfd_ctx.ctx);
ret = ctx && (ctx->features & UFFD_FEATURE_SIGBUS);
}
rcu_read_unlock();
return ret;
}

View File

@ -1776,6 +1776,20 @@ static inline void vm_write_end(struct vm_area_struct *vma)
{
raw_write_seqcount_end(&vma->vm_sequence);
}
static inline bool vma_has_changed(struct vm_fault *vmf)
{
int ret = RB_EMPTY_NODE(&vmf->vma->vm_rb);
unsigned int seq = READ_ONCE(vmf->vma->vm_sequence.sequence);
/*
* Matches both the wmb in write_seqlock_{begin,end}() and
* the wmb in vma_rb_erase().
*/
smp_rmb();
return ret || seq != vmf->sequence;
}
#else
static inline void vm_write_begin(struct vm_area_struct *vma)
{

View File

@ -62,6 +62,14 @@ extern void show_map_pad_vma(struct vm_area_struct *vma,
extern void split_pad_vma(struct vm_area_struct *vma, struct vm_area_struct *new,
unsigned long addr, int new_below);
extern unsigned long vma_pad_fixup_flags(struct vm_area_struct *vma,
unsigned long newflags);
extern bool is_mergable_pad_vma(struct vm_area_struct *vma,
unsigned long vm_flags);
extern unsigned long vma_data_pages(struct vm_area_struct *vma);
#else /* PAGE_SIZE != SZ_4K || !defined(CONFIG_64BIT) */
static inline void vma_set_pad_pages(struct vm_area_struct *vma,
unsigned long nr_pages)
@ -98,36 +106,22 @@ static inline void split_pad_vma(struct vm_area_struct *vma, struct vm_area_stru
unsigned long addr, int new_below)
{
}
#endif /* PAGE_SIZE == SZ_4K && defined(CONFIG_64BIT) */
static inline unsigned long vma_data_pages(struct vm_area_struct *vma)
{
return vma_pages(vma) - vma_pad_pages(vma);
}
/*
* Sets the correct padding bits / flags for a VMA split.
*/
static inline unsigned long vma_pad_fixup_flags(struct vm_area_struct *vma,
unsigned long newflags)
{
if (newflags & VM_PAD_MASK)
return (newflags & ~VM_PAD_MASK) | (vma->vm_flags & VM_PAD_MASK);
else
return newflags;
return newflags;
}
/*
* Merging of padding VMAs is uncommon, as padding is only allowed
* from the linker context.
*
* To simplify the semantics, adjacent VMAs with padding are not
* allowed to merge.
*/
static inline bool is_mergable_pad_vma(struct vm_area_struct *vma,
unsigned long vm_flags)
{
/* Padding VMAs cannot be merged with other padding or real VMAs */
return !((vma->vm_flags | vm_flags) & VM_PAD_MASK);
return true;
}
static inline unsigned long vma_data_pages(struct vm_area_struct *vma)
{
return vma_pages(vma);
}
#endif /* PAGE_SIZE == SZ_4K && defined(CONFIG_64BIT) */
#endif /* _LINUX_PAGE_SIZE_MIGRATION_H */

View File

@ -40,7 +40,7 @@ extern int sysctl_unprivileged_userfaultfd;
extern vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason);
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
extern bool userfaultfd_using_sigbus(struct vm_area_struct *vma);
extern bool userfaultfd_using_sigbus(struct vm_fault *vmf);
#endif
/*

View File

@ -40,20 +40,6 @@ vm_fault_t do_swap_page(struct vm_fault *vmf);
extern struct vm_area_struct *get_vma(struct mm_struct *mm,
unsigned long addr);
extern void put_vma(struct vm_area_struct *vma);
static inline bool vma_has_changed(struct vm_fault *vmf)
{
int ret = RB_EMPTY_NODE(&vmf->vma->vm_rb);
unsigned int seq = READ_ONCE(vmf->vma->vm_sequence.sequence);
/*
* Matches both the wmb in write_seqlock_{begin,end}() and
* the wmb in vma_rb_erase().
*/
smp_rmb();
return ret || seq != vmf->sequence;
}
#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */
void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,

View File

@ -5058,6 +5058,7 @@ static vm_fault_t ___handle_speculative_fault(struct mm_struct *mm,
vmf.vma_flags = READ_ONCE(vmf.vma->vm_flags);
vmf.vma_page_prot = READ_ONCE(vmf.vma->vm_page_prot);
vmf.sequence = seq;
#ifdef CONFIG_USERFAULTFD
/*
@ -5067,7 +5068,7 @@ static vm_fault_t ___handle_speculative_fault(struct mm_struct *mm,
if (unlikely(vmf.vma_flags & __VM_UFFD_FLAGS)) {
uffd_missing_sigbus = vma_is_anonymous(vmf.vma) &&
(vmf.vma_flags & VM_UFFD_MISSING) &&
userfaultfd_using_sigbus(vmf.vma);
userfaultfd_using_sigbus(&vmf);
if (!uffd_missing_sigbus) {
trace_spf_vma_notsup(_RET_IP_, vmf.vma, address);
return VM_FAULT_RETRY;
@ -5193,7 +5194,6 @@ static vm_fault_t ___handle_speculative_fault(struct mm_struct *mm,
vmf.pte = NULL;
}
vmf.sequence = seq;
vmf.flags = flags;
local_irq_enable();

View File

@ -18,6 +18,7 @@
#include <linux/kstrtox.h>
#include <linux/sched/task_stack.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/sysfs.h>
typedef void (*show_pad_maps_fn) (struct seq_file *m, struct vm_area_struct *vma);
@ -182,7 +183,15 @@ static inline bool linker_ctx(void)
memset(buf, 0, bufsize);
path = d_path(&file->f_path, buf, bufsize);
if (!strcmp(path, "/system/bin/linker64"))
/*
* Depending on interpreter requested, valid paths could be any of:
* 1. /system/bin/bootstrap/linker64
* 2. /system/bin/linker64
* 3. /apex/com.android.runtime/bin/linker64
*
* Check the base name (linker64).
*/
if (!strcmp(kbasename(path), "linker64"))
return true;
}
@ -395,5 +404,37 @@ void split_pad_vma(struct vm_area_struct *vma, struct vm_area_struct *new,
vma_set_pad_pages(second, nr_vma2_pages);
}
}
/*
* Sets the correct padding bits / flags for a VMA split.
*/
unsigned long vma_pad_fixup_flags(struct vm_area_struct *vma,
unsigned long newflags)
{
if (newflags & VM_PAD_MASK)
return (newflags & ~VM_PAD_MASK) | (vma->vm_flags & VM_PAD_MASK);
else
return newflags;
}
/*
* Merging of padding VMAs is uncommon, as padding is only allowed
* from the linker context.
*
* To simplify the semantics, adjacent VMAs with padding are not
* allowed to merge.
*/
bool is_mergable_pad_vma(struct vm_area_struct *vma,
unsigned long vm_flags)
{
/* Padding VMAs cannot be merged with other padding or real VMAs */
return !((vma->vm_flags | vm_flags) & VM_PAD_MASK);
}
unsigned long vma_data_pages(struct vm_area_struct *vma)
{
return vma_pages(vma) - vma_pad_pages(vma);
}
#endif /* PAGE_SIZE == SZ_4K */
#endif /* CONFIG_64BIT */