ANDROID: vendor_hooks: Allow multiple attachments to restricted hooks

Allow up to two attachments to restricted vendor hooks to enable OEM
and vendor coexistence.

Priorities are not exposed to callers on purpose. Life's too short to
re-order the callback array with concurrent readers.

Bug: 183720636
Signed-off-by: Quentin Perret <qperret@google.com>
Change-Id: I5c7aca7f69e581b4197388478d47e0da6d2893e6
This commit is contained in:
Quentin Perret 2021-03-29 14:26:52 +00:00
parent 22738b6910
commit e0cfc5fd8c
2 changed files with 88 additions and 8 deletions

View File

@ -13,6 +13,8 @@
#define DECLARE_HOOK DECLARE_TRACE
int android_rvh_probe_register(struct tracepoint *tp, void *probe, void *data);
#ifdef TRACE_HEADER_MULTI_READ
#define DEFINE_HOOK_FN(_name, _reg, _unreg, proto, args) \
@ -38,9 +40,11 @@
\
it_func_ptr = (&__tracepoint_##_name)->funcs; \
it_func = (it_func_ptr)->func; \
__data = (it_func_ptr)->data; \
((void(*)(void *, proto))(it_func))(__data, args); \
WARN_ON(((++it_func_ptr)->func)); \
do { \
__data = (it_func_ptr)->data; \
((void(*)(void *, proto))(it_func))(__data, args); \
it_func = READ_ONCE((++it_func_ptr)->func); \
} while (it_func); \
return 0; \
} \
DEFINE_STATIC_CALL(tp_func_##_name, __traceiter_##_name);
@ -95,11 +99,8 @@
static inline int \
register_trace_##name(void (*probe)(data_proto), void *data) \
{ \
/* only allow a single attachment */ \
if (trace_##name##_enabled()) \
return -EBUSY; \
return tracepoint_probe_register(&__tracepoint_##name, \
(void *)probe, data); \
return android_rvh_probe_register(&__tracepoint_##name, \
(void *)probe, data); \
} \
/* vendor hooks cannot be unregistered */ \

View File

@ -644,3 +644,82 @@ void syscall_unregfunc(void)
}
}
#endif
#ifdef CONFIG_ANDROID_VENDOR_HOOKS
static void *rvh_zalloc_funcs(int count)
{
return kzalloc(sizeof(struct tracepoint_func) * count, GFP_KERNEL);
}
#define ANDROID_RVH_NR_PROBES_MAX 2
static int rvh_func_add(struct tracepoint *tp, struct tracepoint_func *func)
{
int i;
if (!static_key_enabled(&tp->key)) {
/* '+ 1' for the last NULL element */
tp->funcs = rvh_zalloc_funcs(ANDROID_RVH_NR_PROBES_MAX + 1);
if (!tp->funcs)
return ENOMEM;
}
for (i = 0; i < ANDROID_RVH_NR_PROBES_MAX; i++) {
if (!tp->funcs[i].func) {
if (!static_key_enabled(&tp->key))
tp->funcs[i].data = func->data;
WRITE_ONCE(tp->funcs[i].func, func->func);
return 0;
}
}
return -EBUSY;
}
static int android_rvh_add_func(struct tracepoint *tp, struct tracepoint_func *func)
{
int ret;
if (tp->regfunc && !static_key_enabled(&tp->key)) {
ret = tp->regfunc();
if (ret < 0)
return ret;
}
ret = rvh_func_add(tp, func);
if (ret)
return ret;
tracepoint_update_call(tp, tp->funcs, false);
static_key_enable(&tp->key);
return 0;
}
int android_rvh_probe_register(struct tracepoint *tp, void *probe, void *data)
{
struct tracepoint_func tp_func;
int ret;
/*
* Once the static key has been flipped, the array may be read
* concurrently. Although __traceiter_*() always checks .func first,
* it doesn't enforce read->read dependencies, and we can't strongly
* guarantee it will see the correct .data for the second element
* without adding smp_load_acquire() in the fast path. But this is a
* corner case which is unlikely to be needed by anybody in practice,
* so let's just forbid it and keep the fast path clean.
*/
if (WARN_ON(static_key_enabled(&tp->key) && data))
return -EINVAL;
mutex_lock(&tracepoints_mutex);
tp_func.func = probe;
tp_func.data = data;
ret = android_rvh_add_func(tp, &tp_func);
mutex_unlock(&tracepoints_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(android_rvh_probe_register);
#endif