ANDROID: vendor_hook: add hooks to protect locking-tsk in cpu scheduler

Providing vendor hooks to record the start time of holding the lock, which
protects rwsem/mutex locking-process from being preemptedfor a short time
in some cases.

- android_vh_record_mutex_lock_starttime
- android_vh_record_rtmutex_lock_starttime
- android_vh_record_rwsem_lock_starttime
- android_vh_record_percpu_rwsem_lock_starttime

Bug: 241191475

Signed-off-by: Peifeng Li <lipeifeng@oppo.com>
Change-Id: I0e967a1e8b77c32a1ad588acd54028fae2f90c4e
This commit is contained in:
Peifeng Li 2022-08-03 19:42:27 +08:00 committed by Matthias Männich
parent fb39cdb9ea
commit eed2741ae6
7 changed files with 76 additions and 7 deletions

View File

@ -259,6 +259,10 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_commit_creds);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_exit_creds);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_override_creds);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_revert_creds);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_record_mutex_lock_starttime);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_record_rtmutex_lock_starttime);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_record_rwsem_lock_starttime);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_record_percpu_rwsem_lock_starttime);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_set_memory_x);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_set_memory_nx);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_set_memory_ro);

View File

@ -9,6 +9,9 @@
#include <linux/rcu_sync.h>
#include <linux/lockdep.h>
void _trace_android_vh_record_percpu_rwsem_lock_starttime(
struct task_struct *tsk, unsigned long settime);
struct percpu_rw_semaphore {
struct rcu_sync rss;
unsigned int __percpu *read_count;
@ -73,6 +76,7 @@ static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
* bleeding the critical section out.
*/
preempt_enable();
_trace_android_vh_record_percpu_rwsem_lock_starttime(current, jiffies);
}
static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
@ -93,14 +97,17 @@ static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
* bleeding the critical section out.
*/
if (ret)
if (ret) {
_trace_android_vh_record_percpu_rwsem_lock_starttime(current, jiffies);
rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
}
return ret;
}
static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
{
_trace_android_vh_record_percpu_rwsem_lock_starttime(current, 0);
rwsem_release(&sem->dep_map, _RET_IP_);
preempt_disable();

View File

@ -71,7 +71,18 @@ DECLARE_HOOK(android_vh_mutex_unlock_slowpath_end,
DECLARE_HOOK(android_vh_mutex_start_check_new_owner,
TP_PROTO(struct mutex *lock),
TP_ARGS(lock));
DECLARE_HOOK(android_vh_record_mutex_lock_starttime,
TP_PROTO(struct task_struct *tsk, unsigned long settime_jiffies),
TP_ARGS(tsk, settime_jiffies));
DECLARE_HOOK(android_vh_record_rtmutex_lock_starttime,
TP_PROTO(struct task_struct *tsk, unsigned long settime_jiffies),
TP_ARGS(tsk, settime_jiffies));
DECLARE_HOOK(android_vh_record_rwsem_lock_starttime,
TP_PROTO(struct task_struct *tsk, unsigned long settime_jiffies),
TP_ARGS(tsk, settime_jiffies));
DECLARE_HOOK(android_vh_record_percpu_rwsem_lock_starttime,
TP_PROTO(struct task_struct *tsk, unsigned long settime_jiffies),
TP_ARGS(tsk, settime_jiffies));
/* macro versions of hooks are no longer required */
#endif /* _TRACE_HOOK_DTASK_H */

View File

@ -170,8 +170,10 @@ static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
unsigned long curr = (unsigned long)current;
unsigned long zero = 0UL;
if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr)) {
trace_android_vh_record_mutex_lock_starttime(current, jiffies);
return true;
}
return false;
}
@ -748,6 +750,7 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
*/
void __sched mutex_unlock(struct mutex *lock)
{
trace_android_vh_record_mutex_lock_starttime(current, 0);
#ifndef CONFIG_DEBUG_LOCK_ALLOC
if (__mutex_unlock_fast(lock))
return;
@ -978,6 +981,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
lock_acquired(&lock->dep_map, ip);
if (ww_ctx)
ww_mutex_set_context_fastpath(ww, ww_ctx);
trace_android_vh_record_mutex_lock_starttime(current, jiffies);
preempt_enable();
return 0;
}
@ -1097,6 +1101,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
spin_unlock(&lock->wait_lock);
preempt_enable();
trace_android_vh_record_mutex_lock_starttime(current, jiffies);
return 0;
err:
@ -1433,8 +1438,10 @@ int __sched mutex_trylock(struct mutex *lock)
#endif
locked = __mutex_trylock(lock);
if (locked)
if (locked) {
trace_android_vh_record_mutex_lock_starttime(current, jiffies);
mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
}
return locked;
}

View File

@ -10,6 +10,21 @@
#include <linux/slab.h>
#include <linux/errno.h>
#include <trace/hooks/dtask.h>
/*
* trace_android_vh_record_percpu_rwsem_lock_starttime is called in
* include/linux/percpu-rwsem.h by including include/hooks/dtask.h, which
* will result to build-err. So we create
* func:_trace_android_vh_record_percpu_rwsem_lock_starttime for percpu-rwsem.h to call.
*/
void _trace_android_vh_record_percpu_rwsem_lock_starttime(struct task_struct *tsk,
unsigned long settime)
{
trace_android_vh_record_percpu_rwsem_lock_starttime(tsk, settime);
}
EXPORT_SYMBOL_GPL(_trace_android_vh_record_percpu_rwsem_lock_starttime);
int __percpu_init_rwsem(struct percpu_rw_semaphore *sem,
const char *name, struct lock_class_key *key)
{
@ -237,11 +252,13 @@ void percpu_down_write(struct percpu_rw_semaphore *sem)
/* Wait for all active readers to complete. */
rcuwait_wait_event(&sem->writer, readers_active_check(sem), TASK_UNINTERRUPTIBLE);
trace_android_vh_record_percpu_rwsem_lock_starttime(current, jiffies);
}
EXPORT_SYMBOL_GPL(percpu_down_write);
void percpu_up_write(struct percpu_rw_semaphore *sem)
{
trace_android_vh_record_percpu_rwsem_lock_starttime(current, 0);
rwsem_release(&sem->dep_map, _RET_IP_);
/*

View File

@ -1471,6 +1471,7 @@ static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass)
mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
trace_android_vh_record_rtmutex_lock_starttime(current, jiffies);
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@ -1519,6 +1520,8 @@ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
ret = rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
if (ret)
mutex_release(&lock->dep_map, _RET_IP_);
else
trace_android_vh_record_rtmutex_lock_starttime(current, jiffies);
return ret;
}
@ -1563,6 +1566,8 @@ rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
rt_mutex_slowlock);
if (ret)
mutex_release(&lock->dep_map, _RET_IP_);
else
trace_android_vh_record_rtmutex_lock_starttime(current, jiffies);
return ret;
}
@ -1589,6 +1594,8 @@ int __sched rt_mutex_trylock(struct rt_mutex *lock)
ret = rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
if (ret)
mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
else
trace_android_vh_record_rtmutex_lock_starttime(current, jiffies);
return ret;
}
@ -1603,6 +1610,7 @@ void __sched rt_mutex_unlock(struct rt_mutex *lock)
{
mutex_release(&lock->dep_map, _RET_IP_);
rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
trace_android_vh_record_rtmutex_lock_starttime(current, 0);
}
EXPORT_SYMBOL_GPL(rt_mutex_unlock);

View File

@ -279,6 +279,10 @@ static inline bool rwsem_read_trylock(struct rw_semaphore *sem)
long cnt = atomic_long_add_return_acquire(RWSEM_READER_BIAS, &sem->count);
if (WARN_ON_ONCE(cnt < 0))
rwsem_set_nonspinnable(sem);
if ((cnt & RWSEM_READ_FAILED_MASK) == 0)
trace_android_vh_record_rwsem_lock_starttime(current, jiffies);
return !(cnt & RWSEM_READ_FAILED_MASK);
}
@ -1021,9 +1025,11 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
raw_spin_unlock_irq(&sem->wait_lock);
wake_up_q(&wake_q);
}
trace_android_vh_record_rwsem_lock_starttime(current, jiffies);
return sem;
} else if (rwsem_reader_phase_trylock(sem, waiter.last_rowner)) {
/* rwsem_reader_phase_trylock() implies ACQUIRE on success */
trace_android_vh_record_rwsem_lock_starttime(current, jiffies);
return sem;
}
@ -1104,6 +1110,7 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
__set_current_state(TASK_RUNNING);
trace_android_vh_rwsem_read_wait_finish(sem);
lockevent_inc(rwsem_rlock);
trace_android_vh_record_rwsem_lock_starttime(current, jiffies);
return sem;
out_nolock:
@ -1150,6 +1157,7 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
if (rwsem_can_spin_on_owner(sem, RWSEM_WR_NONSPINNABLE) &&
rwsem_optimistic_spin(sem, true)) {
/* rwsem_optimistic_spin() implies ACQUIRE on success */
trace_android_vh_record_rwsem_lock_starttime(current, jiffies);
return sem;
}
@ -1280,7 +1288,7 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
rwsem_disable_reader_optspin(sem, disable_rspin);
raw_spin_unlock_irq(&sem->wait_lock);
lockevent_inc(rwsem_wlock);
trace_android_vh_record_rwsem_lock_starttime(current, jiffies);
return ret;
out_nolock:
@ -1396,6 +1404,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
tmp + RWSEM_READER_BIAS)) {
rwsem_set_reader_owned(sem);
trace_android_vh_record_rwsem_lock_starttime(current, jiffies);
return 1;
}
} while (!(tmp & RWSEM_READ_FAILED_MASK));
@ -1410,10 +1419,12 @@ static inline void __down_write(struct rw_semaphore *sem)
long tmp = RWSEM_UNLOCKED_VALUE;
if (unlikely(!atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
RWSEM_WRITER_LOCKED)))
RWSEM_WRITER_LOCKED))) {
rwsem_down_write_slowpath(sem, TASK_UNINTERRUPTIBLE);
else
} else {
trace_android_vh_record_rwsem_lock_starttime(current, jiffies);
rwsem_set_owner(sem);
}
}
static inline int __down_write_killable(struct rw_semaphore *sem)
@ -1425,6 +1436,7 @@ static inline int __down_write_killable(struct rw_semaphore *sem)
if (IS_ERR(rwsem_down_write_slowpath(sem, TASK_KILLABLE)))
return -EINTR;
} else {
trace_android_vh_record_rwsem_lock_starttime(current, jiffies);
rwsem_set_owner(sem);
}
return 0;
@ -1440,6 +1452,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
RWSEM_WRITER_LOCKED)) {
rwsem_set_owner(sem);
trace_android_vh_record_rwsem_lock_starttime(current, jiffies);
return true;
}
return false;
@ -1455,6 +1468,7 @@ static inline void __up_read(struct rw_semaphore *sem)
DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
trace_android_vh_record_rwsem_lock_starttime(current, 0);
rwsem_clear_reader_owned(sem);
tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count);
DEBUG_RWSEMS_WARN_ON(tmp < 0, sem);
@ -1481,6 +1495,7 @@ static inline void __up_write(struct rw_semaphore *sem)
DEBUG_RWSEMS_WARN_ON((rwsem_owner(sem) != current) &&
!rwsem_test_oflags(sem, RWSEM_NONSPINNABLE), sem);
trace_android_vh_record_rwsem_lock_starttime(current, 0);
rwsem_clear_owner(sem);
tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count);
if (unlikely(tmp & RWSEM_FLAG_WAITERS))