ANDROID: fix ABI breakage caused by mm_struct->mmu_notifier_lock addition

To prevent ABI breakage, move mm->mmu_notifier_lock into
mm->notifier_subscriptions and allocate mm->notifier_subscriptions
during mm creation in mmu_notifier_subscriptions_init. This results
in additional 176 bytes allocated for each mm, but prevents ABI breakage.
mmu_notifier_subscriptions_hdr structure is introduced at the beginning
of mmu_notifier_subscriptions to keep mmu_notifier_subscriptions hidden
and prevent its type CRC from changing when used in other structures.

Bug: 161210518
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Change-Id: I6f435708d642b70b22e0243c8b33108c208ce5bb
This commit is contained in:
Suren Baghdasaryan 2021-11-24 07:56:04 -08:00
parent a4d26b9a4b
commit 5d8520b557
3 changed files with 94 additions and 40 deletions

View File

@ -404,7 +404,6 @@ struct core_state {
};
struct kioctx_table;
struct percpu_rw_semaphore_atomic;
struct mm_struct {
struct {
struct vm_area_struct *mmap; /* list of VMAs */
@ -562,9 +561,6 @@ struct mm_struct {
struct file __rcu *exe_file;
#ifdef CONFIG_MMU_NOTIFIER
struct mmu_notifier_subscriptions *notifier_subscriptions;
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
struct percpu_rw_semaphore_atomic *mmu_notifier_lock;
#endif
#endif
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
pgtable_t pmd_huge_pte; /* protected by page_table_lock */

View File

@ -17,6 +17,13 @@ struct mmu_notifier;
struct mmu_notifier_range;
struct mmu_interval_notifier;
struct mmu_notifier_subscriptions_hdr {
bool valid;
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
struct percpu_rw_semaphore_atomic *mmu_notifier_lock;
#endif
};
/**
* enum mmu_notifier_event - reason for the mmu notifier callback
* @MMU_NOTIFY_UNMAP: either munmap() that unmap the range or a mremap() that
@ -283,9 +290,30 @@ struct mmu_notifier_range {
void *migrate_pgmap_owner;
};
static inline
struct mmu_notifier_subscriptions_hdr *get_notifier_subscriptions_hdr(
struct mm_struct *mm)
{
/*
* container_of() can't be used here because mmu_notifier_subscriptions
* struct should be kept invisible to mm_struct, otherwise it
* introduces KMI CRC breakage. Therefore the callers don't know what
* members struct mmu_notifier_subscriptions contains and can't call
* container_of(), which requires a member name.
*
* WARNING: For this typecasting to work, mmu_notifier_subscriptions_hdr
* should be the first member of struct mmu_notifier_subscriptions.
*/
return (struct mmu_notifier_subscriptions_hdr *)mm->notifier_subscriptions;
}
static inline int mm_has_notifiers(struct mm_struct *mm)
{
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
return unlikely(get_notifier_subscriptions_hdr(mm)->valid);
#else
return unlikely(mm->notifier_subscriptions);
#endif
}
struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
@ -506,41 +534,19 @@ static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
static inline bool mmu_notifier_subscriptions_init(struct mm_struct *mm)
{
mm->mmu_notifier_lock = kzalloc(
sizeof(struct percpu_rw_semaphore_atomic), GFP_KERNEL);
if (!mm->mmu_notifier_lock)
return false;
percpu_init_rwsem(&mm->mmu_notifier_lock->rw_sem);
mm->notifier_subscriptions = NULL;
return true;
}
static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
{
if (mm_has_notifiers(mm))
__mmu_notifier_subscriptions_destroy(mm);
if (in_atomic()) {
percpu_rwsem_async_destroy(mm->mmu_notifier_lock);
} else {
percpu_free_rwsem(&mm->mmu_notifier_lock->rw_sem);
kfree(mm->mmu_notifier_lock);
}
mm->mmu_notifier_lock = NULL;
}
extern bool mmu_notifier_subscriptions_init(struct mm_struct *mm);
extern void mmu_notifier_subscriptions_destroy(struct mm_struct *mm);
static inline bool mmu_notifier_trylock(struct mm_struct *mm)
{
return percpu_down_read_trylock(&mm->mmu_notifier_lock->rw_sem);
return percpu_down_read_trylock(
&get_notifier_subscriptions_hdr(mm)->mmu_notifier_lock->rw_sem);
}
static inline void mmu_notifier_unlock(struct mm_struct *mm)
{
percpu_up_read(&mm->mmu_notifier_lock->rw_sem);
percpu_up_read(
&get_notifier_subscriptions_hdr(mm)->mmu_notifier_lock->rw_sem);
}
#else /* CONFIG_SPECULATIVE_PAGE_FAULT */

View File

@ -35,6 +35,12 @@ struct lockdep_map __mmu_notifier_invalidate_range_start_map = {
* in mmdrop().
*/
struct mmu_notifier_subscriptions {
/*
* WARNING: hdr should be the first member of this structure
* so that it can be typecasted into mmu_notifier_subscriptions_hdr.
* This is required to avoid KMI CRC breakage.
*/
struct mmu_notifier_subscriptions_hdr hdr;
/* all mmu notifiers registered in this mm are queued in this list */
struct hlist_head list;
bool has_itree;
@ -625,12 +631,14 @@ void __mmu_notifier_invalidate_range(struct mm_struct *mm,
static inline void mmu_notifier_write_lock(struct mm_struct *mm)
{
percpu_down_write(&mm->mmu_notifier_lock->rw_sem);
percpu_down_write(
&mm->notifier_subscriptions->hdr.mmu_notifier_lock->rw_sem);
}
static inline void mmu_notifier_write_unlock(struct mm_struct *mm)
{
percpu_up_write(&mm->mmu_notifier_lock->rw_sem);
percpu_up_write(
&mm->notifier_subscriptions->hdr.mmu_notifier_lock->rw_sem);
}
#else /* CONFIG_SPECULATIVE_PAGE_FAULT */
@ -640,6 +648,16 @@ static inline void mmu_notifier_write_unlock(struct mm_struct *mm) {}
#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */
static void init_subscriptions(struct mmu_notifier_subscriptions *subscriptions)
{
INIT_HLIST_HEAD(&subscriptions->list);
spin_lock_init(&subscriptions->lock);
subscriptions->invalidate_seq = 2;
subscriptions->itree = RB_ROOT_CACHED;
init_waitqueue_head(&subscriptions->wq);
INIT_HLIST_HEAD(&subscriptions->deferred_list);
}
/*
* Same as mmu_notifier_register but here the caller must hold the mmap_lock in
* write mode. A NULL mn signals the notifier is being registered for itree
@ -672,12 +690,7 @@ int __mmu_notifier_register(struct mmu_notifier *subscription,
if (!subscriptions)
return -ENOMEM;
INIT_HLIST_HEAD(&subscriptions->list);
spin_lock_init(&subscriptions->lock);
subscriptions->invalidate_seq = 2;
subscriptions->itree = RB_ROOT_CACHED;
init_waitqueue_head(&subscriptions->wq);
INIT_HLIST_HEAD(&subscriptions->deferred_list);
init_subscriptions(subscriptions);
}
mmu_notifier_write_lock(mm);
@ -706,6 +719,7 @@ int __mmu_notifier_register(struct mmu_notifier *subscription,
*/
if (subscriptions)
smp_store_release(&mm->notifier_subscriptions, subscriptions);
mm->notifier_subscriptions->hdr.valid = true;
if (subscription) {
/* Pairs with the mmdrop in mmu_notifier_unregister_* */
@ -1149,3 +1163,41 @@ mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range)
return range->vma->vm_flags & VM_READ;
}
EXPORT_SYMBOL_GPL(mmu_notifier_range_update_to_read_only);
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
bool mmu_notifier_subscriptions_init(struct mm_struct *mm)
{
struct mmu_notifier_subscriptions *subscriptions;
struct percpu_rw_semaphore_atomic *sem;
subscriptions = kzalloc(
sizeof(struct mmu_notifier_subscriptions), GFP_KERNEL);
if (!subscriptions)
return false;
sem = kzalloc(sizeof(struct percpu_rw_semaphore_atomic), GFP_KERNEL);
if (!sem) {
kfree(subscriptions);
return false;
}
percpu_init_rwsem(&sem->rw_sem);
init_subscriptions(subscriptions);
subscriptions->has_itree = true;
subscriptions->hdr.valid = false;
subscriptions->hdr.mmu_notifier_lock = sem;
mm->notifier_subscriptions = subscriptions;
return true;
}
void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
{
percpu_rwsem_async_destroy(
mm->notifier_subscriptions->hdr.mmu_notifier_lock);
kfree(mm->notifier_subscriptions);
mm->notifier_subscriptions = NULL;
}
#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */