ANDROID: fix ABI breakage caused by percpu_rw_semaphore changes

percpu_rw_semaphore changes to allow calling percpu_free_rwsem in atomic
context cause ABI breakage. Introduce percpu_free_rwsem_atomic wrapper
and change percpu_rwsem_destroy to use it in order to keep
percpu_rw_semaphore struct intact and fix ABI breakage.

Bug: 161210518
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Change-Id: I198a6381fb48059f2aaa2ec38b8c1e5e5e936bb0
This commit is contained in:
Suren Baghdasaryan 2021-11-24 07:43:12 -08:00
parent 6971350406
commit a4d26b9a4b
5 changed files with 20 additions and 21 deletions

View File

@ -404,7 +404,7 @@ struct core_state {
};
struct kioctx_table;
struct percpu_rw_semaphore;
struct percpu_rw_semaphore_atomic;
struct mm_struct {
struct {
struct vm_area_struct *mmap; /* list of VMAs */
@ -563,7 +563,7 @@ struct mm_struct {
#ifdef CONFIG_MMU_NOTIFIER
struct mmu_notifier_subscriptions *notifier_subscriptions;
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
struct percpu_rw_semaphore *mmu_notifier_lock;
struct percpu_rw_semaphore_atomic *mmu_notifier_lock;
#endif
#endif
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS

View File

@ -508,11 +508,12 @@ static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
static inline bool mmu_notifier_subscriptions_init(struct mm_struct *mm)
{
mm->mmu_notifier_lock = kzalloc(sizeof(struct percpu_rw_semaphore), GFP_KERNEL);
mm->mmu_notifier_lock = kzalloc(
sizeof(struct percpu_rw_semaphore_atomic), GFP_KERNEL);
if (!mm->mmu_notifier_lock)
return false;
percpu_init_rwsem(mm->mmu_notifier_lock);
percpu_init_rwsem(&mm->mmu_notifier_lock->rw_sem);
mm->notifier_subscriptions = NULL;
return true;
@ -526,7 +527,7 @@ static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
if (in_atomic()) {
percpu_rwsem_async_destroy(mm->mmu_notifier_lock);
} else {
percpu_free_rwsem(mm->mmu_notifier_lock);
percpu_free_rwsem(&mm->mmu_notifier_lock->rw_sem);
kfree(mm->mmu_notifier_lock);
}
mm->mmu_notifier_lock = NULL;
@ -534,12 +535,12 @@ static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
static inline bool mmu_notifier_trylock(struct mm_struct *mm)
{
return percpu_down_read_trylock(mm->mmu_notifier_lock);
return percpu_down_read_trylock(&mm->mmu_notifier_lock->rw_sem);
}
static inline void mmu_notifier_unlock(struct mm_struct *mm)
{
percpu_up_read(mm->mmu_notifier_lock);
percpu_up_read(&mm->mmu_notifier_lock->rw_sem);
}
#else /* CONFIG_SPECULATIVE_PAGE_FAULT */

View File

@ -13,20 +13,18 @@ struct percpu_rw_semaphore {
struct rcu_sync rss;
unsigned int __percpu *read_count;
struct rcuwait writer;
/*
* destroy_list_entry is used during object destruction when waiters
* can't be used, therefore reusing the same space.
*/
union {
wait_queue_head_t waiters;
struct list_head destroy_list_entry;
};
wait_queue_head_t waiters;
atomic_t block;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
};
struct percpu_rw_semaphore_atomic {
struct percpu_rw_semaphore rw_sem;
struct list_head destroy_list_entry;
};
#ifdef CONFIG_DEBUG_LOCK_ALLOC
#define __PERCPU_RWSEM_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname },
#else
@ -138,7 +136,7 @@ extern int __percpu_init_rwsem(struct percpu_rw_semaphore *,
extern void percpu_free_rwsem(struct percpu_rw_semaphore *);
/* Invokes percpu_free_rwsem and frees the semaphore from a worker thread. */
extern void percpu_rwsem_async_destroy(struct percpu_rw_semaphore *sem);
extern void percpu_rwsem_async_destroy(struct percpu_rw_semaphore_atomic *sem);
#define percpu_init_rwsem(sem) \
({ \

View File

@ -275,7 +275,7 @@ static DEFINE_SPINLOCK(destroy_list_lock);
static void destroy_list_workfn(struct work_struct *work)
{
struct percpu_rw_semaphore *sem, *sem2;
struct percpu_rw_semaphore_atomic *sem, *sem2;
LIST_HEAD(to_destroy);
spin_lock(&destroy_list_lock);
@ -286,14 +286,14 @@ static void destroy_list_workfn(struct work_struct *work)
return;
list_for_each_entry_safe(sem, sem2, &to_destroy, destroy_list_entry) {
percpu_free_rwsem(sem);
percpu_free_rwsem(&sem->rw_sem);
kfree(sem);
}
}
static DECLARE_WORK(destroy_list_work, destroy_list_workfn);
void percpu_rwsem_async_destroy(struct percpu_rw_semaphore *sem)
void percpu_rwsem_async_destroy(struct percpu_rw_semaphore_atomic *sem)
{
spin_lock(&destroy_list_lock);
list_add_tail(&sem->destroy_list_entry, &destroy_list);

View File

@ -625,12 +625,12 @@ void __mmu_notifier_invalidate_range(struct mm_struct *mm,
static inline void mmu_notifier_write_lock(struct mm_struct *mm)
{
percpu_down_write(mm->mmu_notifier_lock);
percpu_down_write(&mm->mmu_notifier_lock->rw_sem);
}
static inline void mmu_notifier_write_unlock(struct mm_struct *mm)
{
percpu_up_write(mm->mmu_notifier_lock);
percpu_up_write(&mm->mmu_notifier_lock->rw_sem);
}
#else /* CONFIG_SPECULATIVE_PAGE_FAULT */