Merge branch 'android12-5.10' into android12-5.10-lts

Merge the changes in the non-lts branch into the lts branch to keep it
up to date.  Changes in here include:

* 9b6fc2bc30 UPSTREAM: dma-buf: heaps: Fix off-by-one in CMA heap fault handler
* b9d4c135c7 Merge tag 'android12-5.10.226_r00' into android12-5.10
* 1ef7612897 BACKPORT: firmware: arm_scmi: Queue in scmi layer for mailbox implementation
* 53a61c62da BACKPORT: gso: fix udp gso fraglist segmentation after pull from frag_list
* f22586e750 ANDROID: usb: Optimization the transfer rate of accessory mode in USB3.2 mode
* 51b9e859a4 UPSTREAM: unicode: Don't special case ignorable code points
* 66aebe42e6 ANDROID: 16K: Fixup padding vm_flags bits on VMA splits
* 0e3d191eaf ANDROID: 16K: Introduce pgsize_migration_inline.h
* 4c7b897f39 BACKPORT: netem: fix return value if duplicate enqueue fails
* 7b9f6a0ecd ANDROID: GKI: Update the ABI symbol list
* 6b967696c1 ANDROID: GKI: Update symbol list for vivo
* ac8da910db ANDROID: GKI: export sys_exit tracepoint
* f3eef39b58 ANDROID: GKI: Update symbol list for vivo
* 2205d34fb2 ANDROID: GKI: Update symbol list for vivo
* 8884166229 ANDROID: GKI: add percpu_rwsem vendor hooks
* 2521fb1dd7 ANDROID: vendor_hooks: add hooks in rwsem
* 5b9bc4b198 ANDROID: delete tool added by mistake
* 0c025265d8 ANDROID: GKI: Add initialization for rwsem's oem_data and vendor_data.
* 2c00661c3f ANDROID: GKI: Add initialization for mutex oem_data.
* 8e78d8ae8a ANDROID: fix ENOMEM check of binder_proc_ext
* 587d04a070 ANDROID: binder: fix KMI issues due to frozen notification
* 69d87eed07 BACKPORT: FROMGIT: binder: frozen notification binder_features flag
* 0e10c6560f BACKPORT: FROMGIT: binder: frozen notification
* 014a9ca18f UPSTREAM: selftests/binderfs: add test for feature files
* fe8ef2d5db UPSTREAM: docs: binderfs: add section about feature files
* 433a83ab08 UPSTREAM: binderfs: add support for feature files
* d7881f1c8f ANDROID: GKI: Add symbol to symbol list for vivo.
* 284a6a930d ANDROID: vendor_hooks: add hooks to modify pageflags
* a5d073d697 ANDROID: GKI: Add pageflags for OEM
* c7d7f8476d ANDROID: GKI: Update symbol list for vivo
* 89d09e01fa ANDROID: vendor_hooks: add vendor hooks for fuse request
* 9ac177ec5c UPSTREAM: net: sched: sch_multiq: fix possible OOB write in multiq_tune()

Change-Id: Id1fceaefc8261e4c59d90b24a039ee3e3ff21fa5
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2024-11-07 16:05:59 +00:00
commit e477d44e5f
35 changed files with 6287 additions and 5533 deletions

View File

@ -72,3 +72,16 @@ that the `rm() <rm_>`_ tool can be used to delete them. Note that the
``binder-control`` device cannot be deleted since this would make the binderfs
instance unuseable. The ``binder-control`` device will be deleted when the
binderfs instance is unmounted and all references to it have been dropped.
Binder features
---------------
Assuming an instance of binderfs has been mounted at ``/dev/binderfs``, the
features supported by the binder driver can be located under
``/dev/binderfs/features/``. The presence of individual files can be tested
to determine whether a particular feature is supported by the driver.
Example::
cat /dev/binderfs/features/oneway_spam_detection
1

File diff suppressed because it is too large Load Diff

View File

@ -293,8 +293,6 @@
debugfs_create_x32
debugfs_create_x64
debugfs_create_x8
debugfs_file_get
debugfs_file_put
debugfs_lookup
debugfs_remove
debugfs_rename
@ -341,6 +339,7 @@
device_for_each_child
device_get_child_node_count
device_get_match_data
device_get_named_child_node
device_get_next_child_node
device_initialize
device_init_wakeup
@ -349,11 +348,13 @@
device_match_name
device_property_present
device_property_read_string
device_property_read_string_array
device_property_read_u16_array
device_property_read_u32_array
device_register
device_remove_file
device_rename
device_set_wakeup_capable
device_show_int
device_store_int
device_unregister
@ -398,7 +399,9 @@
devm_kmemdup
devm_krealloc
devm_kstrdup
devm_led_classdev_multicolor_register_ext
devm_led_classdev_register_ext
devm_mfd_add_devices
devm_nvmem_register
devm_of_clk_add_hw_provider
devm_of_icc_get
@ -602,12 +605,14 @@
drm_atomic_state_default_release
__drm_atomic_state_free
drm_atomic_state_init
drm_bridge_add
drm_bridge_attach
drm_bridge_chain_disable
drm_bridge_chain_enable
drm_bridge_chain_mode_set
drm_bridge_chain_post_disable
drm_bridge_chain_pre_enable
drm_bridge_remove
drm_client_init
drm_client_modeset_commit_locked
drm_client_register
@ -810,6 +815,8 @@
freq_scale
fsync_bdev
fwnode_find_reference
fwnode_get_name
fwnode_get_named_child_node
fwnode_get_next_child_node
fwnode_handle_get
fwnode_handle_put
@ -874,6 +881,7 @@
gpiochip_remove
gpiochip_unlock_as_irq
gpiod_cansleep
gpiod_count
gpiod_direction_input
gpiod_direction_output
gpiod_direction_output_raw
@ -882,6 +890,8 @@
gpiod_get_raw_value_cansleep
gpiod_get_value
gpiod_get_value_cansleep
gpiod_is_active_low
gpiod_set_array_value_cansleep
gpiod_set_consumer_name
gpiod_set_debounce
gpiod_set_raw_value
@ -924,12 +934,14 @@
__hwspin_unlock
i2c_adapter_type
i2c_add_adapter
i2c_add_numbered_adapter
i2c_bus_type
i2c_del_adapter
i2c_del_driver
i2c_for_each_dev
i2c_get_adapter
i2c_get_dma_safe_msg_buf
i2c_new_ancillary_device
i2c_new_client_device
i2c_put_adapter
i2c_put_dma_safe_msg_buf
@ -937,7 +949,9 @@
i2c_smbus_read_byte_data
i2c_smbus_read_i2c_block_data
i2c_smbus_write_byte_data
__i2c_smbus_xfer
i2c_smbus_xfer
__i2c_transfer
i2c_transfer
i2c_transfer_buffer_flags
i2c_unregister_device
@ -1203,6 +1217,7 @@
kstrtobool
kstrtoint
kstrtoll
kstrtos16
kstrtos8
kstrtou16
kstrtou8
@ -1243,6 +1258,7 @@
kvmalloc_node
led_classdev_register_ext
led_classdev_unregister
led_mc_calc_color_components
led_trigger_event
led_trigger_register_simple
led_trigger_unregister_simple
@ -1424,6 +1440,7 @@
of_drm_find_bridge
of_find_compatible_node
of_find_device_by_node
of_find_i2c_adapter_by_node
of_find_i2c_device_by_node
of_find_matching_node_and_match
of_find_node_by_name
@ -1438,6 +1455,7 @@
of_get_address
of_get_child_by_name
of_get_cpu_node
of_get_i2c_adapter_by_node
of_get_named_gpio_flags
of_get_next_available_child
of_get_next_child
@ -1487,6 +1505,7 @@
of_property_read_variable_u8_array
of_prop_next_string
of_prop_next_u32
of_regulator_match
of_reserved_mem_device_init_by_idx
of_reserved_mem_device_release
of_reserved_mem_lookup
@ -1733,6 +1752,7 @@
__rcu_read_lock
__rcu_read_unlock
rdev_get_drvdata
rdev_get_regmap
reboot_mode
refcount_dec_and_lock
refcount_dec_not_one
@ -1771,6 +1791,7 @@
__regmap_init
regmap_irq_get_virq
regmap_mmio_detach_clk
regmap_multi_reg_write
regmap_read
regmap_update_bits_base
regmap_write
@ -1874,6 +1895,9 @@
rtc_time64_to_tm
rtc_tm_to_time64
rtc_update_irq
rt_mutex_lock
rt_mutex_trylock
rt_mutex_unlock
rtnl_is_locked
rtnl_lock
rtnl_trylock
@ -2164,6 +2188,10 @@
thermal_cooling_device_unregister
thermal_of_cooling_device_register
thermal_pressure
thermal_zone_device_disable
thermal_zone_device_enable
thermal_zone_device_register
thermal_zone_device_unregister
thermal_zone_device_update
thermal_zone_get_slope
thermal_zone_get_temp
@ -2173,6 +2201,8 @@
topology_set_thermal_pressure
topology_update_done
total_swapcache_pages
touchscreen_parse_properties
touchscreen_report_pos
trace_array_put
__trace_bprintk
trace_event_buffer_commit
@ -2543,3 +2573,7 @@
__xa_insert
xa_load
xa_store
# preserved by --additions-only
debugfs_file_get
debugfs_file_put

View File

@ -1687,6 +1687,7 @@
__traceiter_android_rvh_flush_task
__traceiter_android_rvh_migrate_queued_task
__traceiter_android_rvh_new_task_stats
__traceiter_android_rvh_percpu_rwsem_wait_complete
__traceiter_android_rvh_refrigerator
__traceiter_android_rvh_replace_next_task_fair
__traceiter_android_rvh_resume_cpus
@ -1725,9 +1726,14 @@
__traceiter_android_vh_binder_trans
__traceiter_android_vh_binder_wakeup_ilocked
__traceiter_android_vh_blk_alloc_rqs
__traceiter_android_vh_clear_rwsem_reader_owned
__traceiter_android_vh_clear_rwsem_writer_owned
__traceiter_android_vh_blk_rq_ctx_init
__traceiter_android_vh_cpu_idle_enter
__traceiter_android_vh_cpu_idle_exit
__traceiter_android_vh_do_anonymous_page
__traceiter_android_vh_do_swap_page
__traceiter_android_vh_do_wp_page
__traceiter_android_vh_dup_task_struct
__traceiter_android_vh_filemap_fault_cache_page
__traceiter_android_vh_filemap_fault_get_page
@ -1736,6 +1742,7 @@
__traceiter_android_vh_ftrace_oops_enter
__traceiter_android_vh_ftrace_oops_exit
__traceiter_android_vh_ftrace_size_check
__traceiter_android_vh_fuse_request_end
__traceiter_android_vh_iommu_setup_dma_ops
__traceiter_android_vh_ipi_stop
__traceiter_android_vh_irqtime_account_process_tick
@ -1746,10 +1753,15 @@
__traceiter_android_vh_mmc_blk_reset
__traceiter_android_vh_mmc_gpio_cd_irqt
__traceiter_android_vh_mm_dirty_limits
__traceiter_android_vh_percpu_rwsem_down_read
__traceiter_android_vh_percpu_rwsem_up_write
__traceiter_android_vh_printk_hotplug
__traceiter_android_vh_queue_request_and_unlock
__traceiter_android_vh_record_rwsem_reader_owned
__traceiter_android_vh_scheduler_tick
__traceiter_android_vh_sdhci_get_cd
__traceiter_android_vh_sd_update_bus_speed_mode
__traceiter_android_vh_shmem_swapin_page
__traceiter_android_vh_show_max_freq
__traceiter_android_vh_show_resume_epoch_val
__traceiter_android_vh_show_suspend_epoch_val
@ -1762,6 +1774,7 @@
__traceiter_android_vh_ufs_compl_command
__traceiter_android_vh_ufs_send_command
__traceiter_android_vh_ufs_update_sdev
__traceiter_android_vh_uprobes_replace_page
__traceiter_android_vh_vmpressure
__traceiter_binder_transaction_received
__traceiter_block_bio_complete
@ -1784,6 +1797,7 @@
__traceiter_rwmmio_read
__traceiter_rwmmio_write
__traceiter_suspend_resume
__traceiter_sys_exit
__traceiter_usb_gadget_connect
__traceiter_usb_gadget_disconnect
__tracepoint_android_rvh_account_irq
@ -1800,6 +1814,7 @@
__tracepoint_android_rvh_flush_task
__tracepoint_android_rvh_migrate_queued_task
__tracepoint_android_rvh_new_task_stats
__tracepoint_android_rvh_percpu_rwsem_wait_complete
__tracepoint_android_rvh_refrigerator
__tracepoint_android_rvh_replace_next_task_fair
__tracepoint_android_rvh_resume_cpus
@ -1839,8 +1854,13 @@
__tracepoint_android_vh_binder_wakeup_ilocked
__tracepoint_android_vh_blk_alloc_rqs
__tracepoint_android_vh_blk_rq_ctx_init
__tracepoint_android_vh_clear_rwsem_reader_owned
__tracepoint_android_vh_clear_rwsem_writer_owned
__tracepoint_android_vh_cpu_idle_enter
__tracepoint_android_vh_cpu_idle_exit
__tracepoint_android_vh_do_anonymous_page
__tracepoint_android_vh_do_swap_page
__tracepoint_android_vh_do_wp_page
__tracepoint_android_vh_dup_task_struct
__tracepoint_android_vh_filemap_fault_cache_page
__tracepoint_android_vh_filemap_fault_get_page
@ -1849,6 +1869,7 @@
__tracepoint_android_vh_ftrace_oops_enter
__tracepoint_android_vh_ftrace_oops_exit
__tracepoint_android_vh_ftrace_size_check
__tracepoint_android_vh_fuse_request_end
__tracepoint_android_vh_iommu_setup_dma_ops
__tracepoint_android_vh_ipi_stop
__tracepoint_android_vh_irqtime_account_process_tick
@ -1859,10 +1880,15 @@
__tracepoint_android_vh_mmc_blk_reset
__tracepoint_android_vh_mmc_gpio_cd_irqt
__tracepoint_android_vh_mm_dirty_limits
__tracepoint_android_vh_percpu_rwsem_down_read
__tracepoint_android_vh_percpu_rwsem_up_write
__tracepoint_android_vh_printk_hotplug
__tracepoint_android_vh_queue_request_and_unlock
__tracepoint_android_vh_record_rwsem_reader_owned
__tracepoint_android_vh_scheduler_tick
__tracepoint_android_vh_sdhci_get_cd
__tracepoint_android_vh_sd_update_bus_speed_mode
__tracepoint_android_vh_shmem_swapin_page
__tracepoint_android_vh_show_max_freq
__tracepoint_android_vh_show_resume_epoch_val
__tracepoint_android_vh_show_suspend_epoch_val
@ -1875,6 +1901,7 @@
__tracepoint_android_vh_ufs_compl_command
__tracepoint_android_vh_ufs_send_command
__tracepoint_android_vh_ufs_update_sdev
__tracepoint_android_vh_uprobes_replace_page
__tracepoint_android_vh_vmpressure
__tracepoint_binder_transaction_received
__tracepoint_block_bio_complete
@ -1900,6 +1927,7 @@
__tracepoint_rwmmio_read
__tracepoint_rwmmio_write
__tracepoint_suspend_resume
__tracepoint_sys_exit
__tracepoint_usb_gadget_connect
__tracepoint_usb_gadget_disconnect
trace_print_array_seq

View File

@ -44,6 +44,8 @@
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
EXPORT_TRACEPOINT_SYMBOL_GPL(sys_exit);
struct pt_regs_offset {
const char *name;
int offset;

View File

@ -1412,6 +1412,7 @@ static void binder_free_ref(struct binder_ref *ref)
if (ref->node)
binder_free_node(ref->node);
kfree(ref->death);
kfree(ref->freeze);
kfree(ref);
}
@ -3824,6 +3825,154 @@ static void binder_transaction(struct binder_proc *proc,
}
}
static int
binder_request_freeze_notification(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_handle_cookie *handle_cookie)
{
struct binder_ref_freeze *freeze;
struct binder_ref *ref;
bool is_frozen;
freeze = kzalloc(sizeof(*freeze), GFP_KERNEL);
if (!freeze)
return -ENOMEM;
binder_proc_lock(proc);
ref = binder_get_ref_olocked(proc, handle_cookie->handle, false);
if (!ref) {
binder_user_error("%d:%d BC_REQUEST_FREEZE_NOTIFICATION invalid ref %d\n",
proc->pid, thread->pid, handle_cookie->handle);
binder_proc_unlock(proc);
kfree(freeze);
return -EINVAL;
}
binder_node_lock(ref->node);
if (ref->freeze || !ref->node->proc) {
binder_user_error("%d:%d invalid BC_REQUEST_FREEZE_NOTIFICATION %s\n",
proc->pid, thread->pid,
ref->freeze ? "already set" : "dead node");
binder_node_unlock(ref->node);
binder_proc_unlock(proc);
kfree(freeze);
return -EINVAL;
}
binder_inner_proc_lock(ref->node->proc);
is_frozen = ref->node->proc->is_frozen;
binder_inner_proc_unlock(ref->node->proc);
INIT_LIST_HEAD(&freeze->work.entry);
freeze->cookie = handle_cookie->cookie;
freeze->work.type = BINDER_WORK_FROZEN_BINDER;
freeze->is_frozen = is_frozen;
ref->freeze = freeze;
binder_inner_proc_lock(proc);
binder_enqueue_work_ilocked(&ref->freeze->work, &proc->todo);
binder_wakeup_proc_ilocked(proc);
binder_inner_proc_unlock(proc);
binder_node_unlock(ref->node);
binder_proc_unlock(proc);
return 0;
}
static int
binder_clear_freeze_notification(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_handle_cookie *handle_cookie)
{
struct binder_ref_freeze *freeze;
struct binder_ref *ref;
binder_proc_lock(proc);
ref = binder_get_ref_olocked(proc, handle_cookie->handle, false);
if (!ref) {
binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION invalid ref %d\n",
proc->pid, thread->pid, handle_cookie->handle);
binder_proc_unlock(proc);
return -EINVAL;
}
binder_node_lock(ref->node);
if (!ref->freeze) {
binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION freeze notification not active\n",
proc->pid, thread->pid);
binder_node_unlock(ref->node);
binder_proc_unlock(proc);
return -EINVAL;
}
freeze = ref->freeze;
binder_inner_proc_lock(proc);
if (freeze->cookie != handle_cookie->cookie) {
binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION freeze notification cookie mismatch %016llx != %016llx\n",
proc->pid, thread->pid, (u64)freeze->cookie,
(u64)handle_cookie->cookie);
binder_inner_proc_unlock(proc);
binder_node_unlock(ref->node);
binder_proc_unlock(proc);
return -EINVAL;
}
ref->freeze = NULL;
/*
* Take the existing freeze object and overwrite its work type. There are three cases here:
* 1. No pending notification. In this case just add the work to the queue.
* 2. A notification was sent and is pending an ack from userspace. Once an ack arrives, we
* should resend with the new work type.
* 3. A notification is pending to be sent. Since the work is already in the queue, nothing
* needs to be done here.
*/
freeze->work.type = BINDER_WORK_CLEAR_FREEZE_NOTIFICATION;
if (list_empty(&freeze->work.entry)) {
binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
binder_wakeup_proc_ilocked(proc);
} else if (freeze->sent) {
freeze->resend = true;
}
binder_inner_proc_unlock(proc);
binder_node_unlock(ref->node);
binder_proc_unlock(proc);
return 0;
}
static int
binder_freeze_notification_done(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t cookie)
{
struct binder_ref_freeze *freeze = NULL;
struct binder_work *w;
binder_inner_proc_lock(proc);
list_for_each_entry(w, &proc_wrapper(proc)->delivered_freeze, entry) {
struct binder_ref_freeze *tmp_freeze =
container_of(w, struct binder_ref_freeze, work);
if (tmp_freeze->cookie == cookie) {
freeze = tmp_freeze;
break;
}
}
if (!freeze) {
binder_user_error("%d:%d BC_FREEZE_NOTIFICATION_DONE %016llx not found\n",
proc->pid, thread->pid, (u64)cookie);
binder_inner_proc_unlock(proc);
return -EINVAL;
}
binder_dequeue_work_ilocked(&freeze->work);
freeze->sent = false;
if (freeze->resend) {
freeze->resend = false;
binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
binder_wakeup_proc_ilocked(proc);
}
binder_inner_proc_unlock(proc);
return 0;
}
/**
* binder_free_buf() - free the specified buffer
* @proc: binder proc that owns buffer
@ -4309,6 +4458,44 @@ static int binder_thread_write(struct binder_proc *proc,
binder_inner_proc_unlock(proc);
} break;
case BC_REQUEST_FREEZE_NOTIFICATION: {
struct binder_handle_cookie handle_cookie;
int error;
if (copy_from_user(&handle_cookie, ptr, sizeof(handle_cookie)))
return -EFAULT;
ptr += sizeof(handle_cookie);
error = binder_request_freeze_notification(proc, thread,
&handle_cookie);
if (error)
return error;
} break;
case BC_CLEAR_FREEZE_NOTIFICATION: {
struct binder_handle_cookie handle_cookie;
int error;
if (copy_from_user(&handle_cookie, ptr, sizeof(handle_cookie)))
return -EFAULT;
ptr += sizeof(handle_cookie);
error = binder_clear_freeze_notification(proc, thread, &handle_cookie);
if (error)
return error;
} break;
case BC_FREEZE_NOTIFICATION_DONE: {
binder_uintptr_t cookie;
int error;
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(cookie);
error = binder_freeze_notification_done(proc, thread, cookie);
if (error)
return error;
} break;
default:
pr_err("%d:%d unknown command %d\n",
proc->pid, thread->pid, cmd);
@ -4711,6 +4898,45 @@ static int binder_thread_read(struct binder_proc *proc,
if (cmd == BR_DEAD_BINDER)
goto done; /* DEAD_BINDER notifications can cause transactions */
} break;
case BINDER_WORK_FROZEN_BINDER: {
struct binder_ref_freeze *freeze;
struct binder_frozen_state_info info;
memset(&info, 0, sizeof(info));
freeze = container_of(w, struct binder_ref_freeze, work);
info.is_frozen = freeze->is_frozen;
info.cookie = freeze->cookie;
freeze->sent = true;
binder_enqueue_work_ilocked(w, &proc_wrapper(proc)->delivered_freeze);
binder_inner_proc_unlock(proc);
if (put_user(BR_FROZEN_BINDER, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
if (copy_to_user(ptr, &info, sizeof(info)))
return -EFAULT;
ptr += sizeof(info);
binder_stat_br(proc, thread, BR_FROZEN_BINDER);
goto done; /* BR_FROZEN_BINDER notifications can cause transactions */
} break;
case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION: {
struct binder_ref_freeze *freeze =
container_of(w, struct binder_ref_freeze, work);
binder_uintptr_t cookie = freeze->cookie;
binder_inner_proc_unlock(proc);
kfree(freeze);
if (put_user(BR_CLEAR_FREEZE_NOTIFICATION_DONE, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
if (put_user(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
binder_stat_br(proc, thread, BR_CLEAR_FREEZE_NOTIFICATION_DONE);
} break;
default:
binder_inner_proc_unlock(proc);
pr_err("%d:%d: bad work type %d\n",
@ -5330,6 +5556,48 @@ static bool binder_txns_pending_ilocked(struct binder_proc *proc)
return false;
}
static void binder_add_freeze_work(struct binder_proc *proc, bool is_frozen)
{
struct rb_node *n;
struct binder_ref *ref;
binder_inner_proc_lock(proc);
for (n = rb_first(&proc->nodes); n; n = rb_next(n)) {
struct binder_node *node;
node = rb_entry(n, struct binder_node, rb_node);
binder_inner_proc_unlock(proc);
binder_node_lock(node);
hlist_for_each_entry(ref, &node->refs, node_entry) {
/*
* Need the node lock to synchronize
* with new notification requests and the
* inner lock to synchronize with queued
* freeze notifications.
*/
binder_inner_proc_lock(ref->proc);
if (!ref->freeze) {
binder_inner_proc_unlock(ref->proc);
continue;
}
ref->freeze->work.type = BINDER_WORK_FROZEN_BINDER;
if (list_empty(&ref->freeze->work.entry)) {
ref->freeze->is_frozen = is_frozen;
binder_enqueue_work_ilocked(&ref->freeze->work, &ref->proc->todo);
binder_wakeup_proc_ilocked(ref->proc);
} else {
if (ref->freeze->sent && ref->freeze->is_frozen != is_frozen)
ref->freeze->resend = true;
ref->freeze->is_frozen = is_frozen;
}
binder_inner_proc_unlock(ref->proc);
}
binder_node_unlock(node);
binder_inner_proc_lock(proc);
}
binder_inner_proc_unlock(proc);
}
static int binder_ioctl_freeze(struct binder_freeze_info *info,
struct binder_proc *target_proc)
{
@ -5341,6 +5609,7 @@ static int binder_ioctl_freeze(struct binder_freeze_info *info,
target_proc->async_recv = false;
target_proc->is_frozen = false;
binder_inner_proc_unlock(target_proc);
binder_add_freeze_work(target_proc, false);
return 0;
}
@ -5373,6 +5642,8 @@ static int binder_ioctl_freeze(struct binder_freeze_info *info,
binder_inner_proc_lock(target_proc);
target_proc->is_frozen = false;
binder_inner_proc_unlock(target_proc);
} else {
binder_add_freeze_work(target_proc, true);
}
return ret;
@ -5706,9 +5977,9 @@ static int binder_open(struct inode *nodp, struct file *filp)
current->group_leader->pid, current->pid);
eproc = kzalloc(sizeof(*eproc), GFP_KERNEL);
proc = &eproc->proc;
if (proc == NULL)
if (eproc == NULL)
return -ENOMEM;
proc = &eproc->proc;
spin_lock_init(&proc->inner_lock);
spin_lock_init(&proc->outer_lock);
get_task_struct(current->group_leader);
@ -5740,6 +6011,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
binder_stats_created(BINDER_STAT_PROC);
proc->pid = current->group_leader->pid;
INIT_LIST_HEAD(&proc->delivered_death);
INIT_LIST_HEAD(&proc_wrapper(proc)->delivered_freeze);
INIT_LIST_HEAD(&proc->waiting_threads);
filp->private_data = proc;
@ -6325,7 +6597,7 @@ static const char * const binder_objstat_strings[] = {
"ref",
"death",
"transaction",
"transaction_complete"
"transaction_complete",
};
static void print_binder_stats(struct seq_file *m, const char *prefix,

View File

@ -158,6 +158,10 @@ struct binder_work {
BINDER_WORK_DEAD_BINDER,
BINDER_WORK_DEAD_BINDER_AND_CLEAR,
BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
#ifndef __GENKSYMS__
BINDER_WORK_FROZEN_BINDER,
BINDER_WORK_CLEAR_FREEZE_NOTIFICATION,
#endif
} type;
};
@ -279,6 +283,14 @@ struct binder_ref_death {
binder_uintptr_t cookie;
};
struct binder_ref_freeze {
struct binder_work work;
binder_uintptr_t cookie;
bool is_frozen:1;
bool sent:1;
bool resend:1;
};
/**
* struct binder_ref_data - binder_ref counts and id
* @debug_id: unique ID for the ref
@ -311,6 +323,8 @@ struct binder_ref_data {
* @node indicates the node must be freed
* @death: pointer to death notification (ref_death) if requested
* (protected by @node->lock)
* @freeze: pointer to freeze notification (ref_freeze) if requested
* (protected by @node->lock)
*
* Structure to track references from procA to target node (on procB). This
* structure is unsafe to access without holding @proc->outer_lock.
@ -327,6 +341,7 @@ struct binder_ref {
struct binder_proc *proc;
struct binder_node *node;
struct binder_ref_death *death;
struct binder_ref_freeze *freeze;
};
/**
@ -457,6 +472,8 @@ struct binder_proc {
* @cred struct cred associated with the `struct file`
* in binder_open()
* (invariant after initialized)
* @delivered_freeze: list of delivered freeze notification
* (protected by @inner_lock)
*
* Extended binder_proc -- needed to add the "cred" field without
* changing the KMI for binder_proc.
@ -464,6 +481,7 @@ struct binder_proc {
struct binder_proc_ext {
struct binder_proc proc;
const struct cred *cred;
struct list_head delivered_freeze;
};
static inline const struct cred *binder_get_cred(struct binder_proc *proc)
@ -474,6 +492,12 @@ static inline const struct cred *binder_get_cred(struct binder_proc *proc)
return eproc->cred;
}
static inline
struct binder_proc_ext *proc_wrapper(struct binder_proc *proc)
{
return container_of(proc, struct binder_proc_ext, proc);
}
/**
* struct binder_thread - binder thread bookkeeping
* @proc: binder process for this thread

View File

@ -58,6 +58,11 @@ enum binderfs_stats_mode {
binderfs_stats_mode_global,
};
struct binder_features {
bool oneway_spam_detection;
bool freeze_notification;
};
static const struct constant_table binderfs_param_stats[] = {
{ "global", binderfs_stats_mode_global },
{}
@ -69,6 +74,11 @@ static const struct fs_parameter_spec binderfs_fs_parameters[] = {
{}
};
static struct binder_features binder_features = {
.oneway_spam_detection = true,
.freeze_notification = true,
};
static inline struct binderfs_info *BINDERFS_SB(const struct super_block *sb)
{
return sb->s_fs_info;
@ -581,6 +591,39 @@ static struct dentry *binderfs_create_dir(struct dentry *parent,
return dentry;
}
static int binder_features_show(struct seq_file *m, void *unused)
{
bool *feature = m->private;
seq_printf(m, "%d\n", *feature);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(binder_features);
static int init_binder_features(struct super_block *sb)
{
struct dentry *dentry, *dir;
dir = binderfs_create_dir(sb->s_root, "features");
if (IS_ERR(dir))
return PTR_ERR(dir);
dentry = binderfs_create_file(dir, "oneway_spam_detection",
&binder_features_fops,
&binder_features.oneway_spam_detection);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
dentry = binderfs_create_file(dir, "freeze_notification",
&binder_features_fops,
&binder_features.freeze_notification);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
return 0;
}
static int init_binder_logs(struct super_block *sb)
{
struct dentry *binder_logs_root_dir, *dentry, *proc_log_dir;
@ -694,6 +737,10 @@ static int binderfs_fill_super(struct super_block *sb, struct fs_context *fc)
name++;
}
ret = init_binder_features(sb);
if (ret)
return ret;
if (info->mount_opts.stats_mode == binderfs_stats_mode_global)
return init_binder_logs(sb);

View File

@ -78,6 +78,7 @@
#include <trace/hooks/pci.h>
#include <trace/hooks/dmabuf.h>
#include <trace/hooks/wakeupbypass.h>
#include <trace/hooks/fuse.h>
/*
* Export tracepoints that act as a bare tracehook (ie: have no trace event
@ -493,6 +494,8 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_pages);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_set_shmem_page_flag);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mmput);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sched_pelt_multiplier);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_queue_request_and_unlock);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_fuse_request_end);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_pages_reclaim_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_pages_failure_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_check_page_look_around_ref);
@ -540,6 +543,17 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_blk_mq_all_tag_iter);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_blk_mq_queue_tag_busy_iter);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_blk_mq_free_tags);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_blk_mq_sched_insert_request);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_shmem_swapin_page);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_do_wp_page);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_do_swap_page);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_do_anonymous_page);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_uprobes_replace_page);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_record_rwsem_reader_owned);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_clear_rwsem_reader_owned);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_clear_rwsem_writer_owned);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_percpu_rwsem_down_read);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_percpu_rwsem_up_write);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_percpu_rwsem_wait_complete);
/*
* For type visibility
*/

View File

@ -167,7 +167,7 @@ static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf)
struct vm_area_struct *vma = vmf->vma;
struct cma_heap_buffer *buffer = vma->vm_private_data;
if (vmf->pgoff > buffer->pagecount)
if (vmf->pgoff >= buffer->pagecount)
return VM_FAULT_SIGBUS;
vmf->page = buffer->pages[vmf->pgoff];

View File

@ -22,12 +22,14 @@
* @chan: Transmit/Receive mailbox channel
* @cinfo: SCMI channel info
* @shmem: Transmit/Receive shared memory area
* @chan_lock: Lock that prevents multiple xfers from being queued
*/
struct scmi_mailbox {
struct mbox_client cl;
struct mbox_chan *chan;
struct scmi_chan_info *cinfo;
struct scmi_shared_mem __iomem *shmem;
struct mutex chan_lock;
};
#define client_to_scmi_mailbox(c) container_of(c, struct scmi_mailbox, cl)
@ -138,6 +140,7 @@ static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
cinfo->transport_info = smbox;
smbox->cinfo = cinfo;
mutex_init(&smbox->chan_lock);
return 0;
}
@ -165,26 +168,33 @@ static int mailbox_send_message(struct scmi_chan_info *cinfo,
struct scmi_mailbox *smbox = cinfo->transport_info;
int ret;
/*
* The mailbox layer has its own queue. However the mailbox queue confuses
* the per message SCMI timeouts since the clock starts when the message is
* submitted into the mailbox queue. So when multiple messages are queued up
* the clock starts on all messages instead of only the one inflight.
*/
mutex_lock(&smbox->chan_lock);
ret = mbox_send_message(smbox->chan, xfer);
/* mbox_send_message returns non-negative value on success, so reset */
if (ret > 0)
ret = 0;
if (ret < 0) {
mutex_unlock(&smbox->chan_lock);
return ret;
}
return ret;
return 0;
}
static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret)
{
struct scmi_mailbox *smbox = cinfo->transport_info;
/*
* NOTE: we might prefer not to need the mailbox ticker to manage the
* transfer queueing since the protocol layer queues things by itself.
* Unfortunately, we have to kick the mailbox framework after we have
* received our message.
*/
mbox_client_txdone(smbox->chan, ret);
/* Release channel */
mutex_unlock(&smbox->chan_lock);
}
static void mailbox_fetch_response(struct scmi_chan_info *cinfo,

View File

@ -171,7 +171,7 @@ static struct usb_ss_ep_comp_descriptor acc_superspeedplus_comp_desc = {
/* the following 2 values can be tweaked if necessary */
.bMaxBurst = 6,
/* .bmAttributes = 0, */
.bmAttributes = 16,
};
static struct usb_endpoint_descriptor acc_superspeed_in_desc = {
@ -196,7 +196,7 @@ static struct usb_ss_ep_comp_descriptor acc_superspeed_comp_desc = {
/* the following 2 values can be tweaked if necessary */
.bMaxBurst = 6,
/* .bmAttributes = 0, */
.bmAttributes = 16,
};
static struct usb_endpoint_descriptor acc_highspeed_in_desc = {

View File

@ -23,6 +23,8 @@
#include <linux/splice.h>
#include <linux/sched.h>
#include <trace/hooks/fuse.h>
MODULE_ALIAS_MISCDEV(FUSE_MINOR);
MODULE_ALIAS("devname:fuse");
@ -234,6 +236,7 @@ __releases(fiq->lock)
fuse_len_args(req->args->in_numargs,
(struct fuse_arg *) req->args->in_args);
list_add_tail(&req->list, &fiq->pending);
trace_android_vh_queue_request_and_unlock(&fiq->waitq, sync);
fiq->ops->wake_pending_and_unlock(fiq, sync);
}
@ -330,6 +333,7 @@ void fuse_request_end(struct fuse_req *req)
} else {
/* Wake up waiter sleeping in request_wait_answer() */
wake_up(&req->waitq);
trace_android_vh_fuse_request_end(current);
}
if (test_bit(FR_ASYNC, &req->flags))

View File

@ -2230,75 +2230,6 @@ static void nfdicf_init(void)
file_fail(fold_name);
}
static void ignore_init(void)
{
FILE *file;
unsigned int unichar;
unsigned int first;
unsigned int last;
unsigned int *um;
int count;
int ret;
if (verbose > 0)
printf("Parsing %s\n", prop_name);
file = fopen(prop_name, "r");
if (!file)
open_fail(prop_name, errno);
assert(file);
count = 0;
while (fgets(line, LINESIZE, file)) {
ret = sscanf(line, "%X..%X ; %s # ", &first, &last, buf0);
if (ret == 3) {
if (strcmp(buf0, "Default_Ignorable_Code_Point"))
continue;
if (!utf32valid(first) || !utf32valid(last))
line_fail(prop_name, line);
for (unichar = first; unichar <= last; unichar++) {
free(unicode_data[unichar].utf32nfdi);
um = malloc(sizeof(unsigned int));
*um = 0;
unicode_data[unichar].utf32nfdi = um;
free(unicode_data[unichar].utf32nfdicf);
um = malloc(sizeof(unsigned int));
*um = 0;
unicode_data[unichar].utf32nfdicf = um;
count++;
}
if (verbose > 1)
printf(" %X..%X Default_Ignorable_Code_Point\n",
first, last);
continue;
}
ret = sscanf(line, "%X ; %s # ", &unichar, buf0);
if (ret == 2) {
if (strcmp(buf0, "Default_Ignorable_Code_Point"))
continue;
if (!utf32valid(unichar))
line_fail(prop_name, line);
free(unicode_data[unichar].utf32nfdi);
um = malloc(sizeof(unsigned int));
*um = 0;
unicode_data[unichar].utf32nfdi = um;
free(unicode_data[unichar].utf32nfdicf);
um = malloc(sizeof(unsigned int));
*um = 0;
unicode_data[unichar].utf32nfdicf = um;
if (verbose > 1)
printf(" %X Default_Ignorable_Code_Point\n",
unichar);
count++;
continue;
}
}
fclose(file);
if (verbose > 0)
printf("Found %d entries\n", count);
if (count == 0)
file_fail(prop_name);
}
static void corrections_init(void)
{
FILE *file;
@ -3396,7 +3327,6 @@ int main(int argc, char *argv[])
ccc_init();
nfdi_init();
nfdicf_init();
ignore_init();
corrections_init();
hangul_decompose();
nfdi_decompose();

File diff suppressed because it is too large Load Diff

View File

@ -28,6 +28,7 @@
#include <linux/ioctl.h>
#include <linux/security.h>
#include <linux/hugetlb.h>
#include <linux/pgsize_migration.h>
int sysctl_unprivileged_userfaultfd __read_mostly;
@ -1511,7 +1512,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
* the current one has not been updated yet.
*/
vm_write_begin(vma);
WRITE_ONCE(vma->vm_flags, new_flags);
WRITE_ONCE(vma->vm_flags, vma_pad_fixup_flags(vma, new_flags));
rcu_assign_pointer(vma->vm_userfaultfd_ctx.ctx, ctx);
vm_write_end(vma);
@ -1694,7 +1695,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
* the current one has not been updated yet.
*/
vm_write_begin(vma);
WRITE_ONCE(vma->vm_flags, new_flags);
WRITE_ONCE(vma->vm_flags, vma_pad_fixup_flags(vma, new_flags));
rcu_assign_pointer(vma->vm_userfaultfd_ctx.ctx, NULL);
vm_write_end(vma);

View File

@ -141,6 +141,12 @@ enum pageflags {
#endif
#ifdef CONFIG_KASAN_HW_TAGS
PG_skip_kasan_poison,
#endif
#if defined(CONFIG_64BIT) && !defined(CONFIG_NUMA_BALANCING)
PG_oem_reserved_1,
PG_oem_reserved_2,
PG_oem_reserved_3,
PG_oem_reserved_4,
#endif
__NR_PAGEFLAGS,

View File

@ -13,35 +13,9 @@
* page size in Android.
*/
#include <linux/mm.h>
#include <linux/pgsize_migration_inline.h>
#include <linux/seq_file.h>
#include <linux/sizes.h>
/*
* vm_flags representation of VMA padding pages.
*
* This allows the kernel to identify the portion of an ELF LOAD segment VMA
* that is padding.
*
* 4 high bits of vm_flags [63,60] are used to represent ELF segment padding
* up to 60kB, which is sufficient for ELFs of both 16kB and 64kB segment
* alignment (p_align).
*
* The representation is illustrated below.
*
* 63 62 61 60
* _________ _________ _________ _________
* | Bit 3 | Bit 2 | Bit 1 | Bit 0 |
* | of 4kB | of 4kB | of 4kB | of 4kB |
* | chunks | chunks | chunks | chunks |
* |_________|_________|_________|_________|
*/
#define VM_PAD_WIDTH 4
#define VM_PAD_SHIFT (BITS_PER_LONG - VM_PAD_WIDTH)
#define VM_TOTAL_PAD_PAGES ((1ULL << VM_PAD_WIDTH) - 1)
#define VM_PAD_MASK (VM_TOTAL_PAD_PAGES << VM_PAD_SHIFT)
#define VMA_PAD_START(vma) (vma->vm_end - (vma_pad_pages(vma) << PAGE_SHIFT))
#include <linux/mm.h>
#if PAGE_SIZE == SZ_4K && defined(CONFIG_64BIT)
extern void vma_set_pad_pages(struct vm_area_struct *vma,
@ -63,9 +37,6 @@ extern void show_map_pad_vma(struct vm_area_struct *vma,
extern void split_pad_vma(struct vm_area_struct *vma, struct vm_area_struct *new,
unsigned long addr, int new_below);
extern unsigned long vma_pad_fixup_flags(struct vm_area_struct *vma,
unsigned long newflags);
extern bool is_mergable_pad_vma(struct vm_area_struct *vma,
unsigned long vm_flags);
@ -107,12 +78,6 @@ static inline void split_pad_vma(struct vm_area_struct *vma, struct vm_area_stru
{
}
static inline unsigned long vma_pad_fixup_flags(struct vm_area_struct *vma,
unsigned long newflags)
{
return newflags;
}
static inline bool is_mergable_pad_vma(struct vm_area_struct *vma,
unsigned long vm_flags)
{

View File

@ -0,0 +1,67 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_PAGE_SIZE_MIGRATION_INLINE_H
#define _LINUX_PAGE_SIZE_MIGRATION_INLINE_H
/*
* Page Size Migration
*
* Copyright (c) 2024, Google LLC.
* Author: Kalesh Singh <kaleshsingh@goole.com>
*
* This file contains inline APIs for mitigations to ensure
* app compatibility during the transition from 4kB to 16kB
* page size in Android.
*/
#include <linux/mm_types.h>
#include <linux/sizes.h>
#include <asm/page.h>
/*
* vm_flags representation of VMA padding pages.
*
* This allows the kernel to identify the portion of an ELF LOAD segment VMA
* that is padding.
*
* 4 high bits of vm_flags [63,60] are used to represent ELF segment padding
* up to 60kB, which is sufficient for ELFs of both 16kB and 64kB segment
* alignment (p_align).
*
* The representation is illustrated below.
*
* 63 62 61 60
* _________ _________ _________ _________
* | Bit 3 | Bit 2 | Bit 1 | Bit 0 |
* | of 4kB | of 4kB | of 4kB | of 4kB |
* | chunks | chunks | chunks | chunks |
* |_________|_________|_________|_________|
*/
#define VM_PAD_WIDTH 4
#define VM_PAD_SHIFT (BITS_PER_LONG - VM_PAD_WIDTH)
#define VM_TOTAL_PAD_PAGES ((1ULL << VM_PAD_WIDTH) - 1)
#define VM_PAD_MASK (VM_TOTAL_PAD_PAGES << VM_PAD_SHIFT)
#define VMA_PAD_START(vma) (vma->vm_end - (vma_pad_pages(vma) << PAGE_SHIFT))
#if PAGE_SIZE == SZ_4K && defined(CONFIG_64BIT)
/*
* Sets the correct padding bits / flags for a VMA split.
*/
static inline unsigned long vma_pad_fixup_flags(struct vm_area_struct *vma,
unsigned long newflags)
{
if (newflags & VM_PAD_MASK)
return (newflags & ~VM_PAD_MASK) | (vma->vm_flags & VM_PAD_MASK);
else
return newflags;
}
#else /* PAGE_SIZE != SZ_4K || !defined(CONFIG_64BIT) */
static inline unsigned long vma_pad_fixup_flags(struct vm_area_struct *vma,
unsigned long newflags)
{
return newflags;
}
#endif /* PAGE_SIZE == SZ_4K && defined(CONFIG_64BIT) */
#endif /* _LINUX_PAGE_SIZE_MIGRATION_INLINE_H */

View File

@ -83,8 +83,17 @@
#ifdef CONFIG_64BIT
#define IF_HAVE_PG_ARCH_2(flag,string) ,{1UL << flag, string}
/* With CONFIG_NUMA_BALANCING LAST_CPUPID_WIDTH consumes OEM-used page flags */
#ifdef CONFIG_NUMA_BALANCING
#define IF_HAVE_PG_OEM_RESERVED(flag,string)
#else
#define IF_HAVE_PG_OEM_RESERVED(flag,string) ,{1UL << flag, string}
#endif
#else
#define IF_HAVE_PG_ARCH_2(flag,string)
#define IF_HAVE_PG_OEM_RESERVED(flag,string)
#endif
#ifdef CONFIG_KASAN_HW_TAGS
@ -121,6 +130,10 @@ IF_HAVE_PG_HWPOISON(PG_hwpoison, "hwpoison" ) \
IF_HAVE_PG_IDLE(PG_young, "young" ) \
IF_HAVE_PG_IDLE(PG_idle, "idle" ) \
IF_HAVE_PG_ARCH_2(PG_arch_2, "arch_2" ) \
IF_HAVE_PG_OEM_RESERVED(PG_oem_reserved_1,"oem_reserved_1") \
IF_HAVE_PG_OEM_RESERVED(PG_oem_reserved_2,"oem_reserved_2") \
IF_HAVE_PG_OEM_RESERVED(PG_oem_reserved_3,"oem_reserved_3") \
IF_HAVE_PG_OEM_RESERVED(PG_oem_reserved_4,"oem_reserved_4") \
IF_HAVE_PG_SKIP_KASAN_POISON(PG_skip_kasan_poison, "skip_kasan_poison")
#define show_page_flags(flags) \

View File

@ -103,6 +103,15 @@ struct percpu_rw_semaphore;
DECLARE_HOOK(android_vh_percpu_rwsem_wq_add,
TP_PROTO(struct percpu_rw_semaphore *sem, bool reader),
TP_ARGS(sem, reader));
DECLARE_HOOK(android_vh_percpu_rwsem_down_read,
TP_PROTO(struct percpu_rw_semaphore *sem, bool try, bool *ret),
TP_ARGS(sem, try, ret));
DECLARE_HOOK(android_vh_percpu_rwsem_up_write,
TP_PROTO(struct percpu_rw_semaphore *sem),
TP_ARGS(sem));
DECLARE_RESTRICTED_HOOK(android_rvh_percpu_rwsem_wait_complete,
TP_PROTO(struct percpu_rw_semaphore *sem, long state, bool *complete),
TP_ARGS(sem, state, complete), 1);
DECLARE_HOOK(android_vh_exit_check,
TP_PROTO(struct task_struct *tsk, long code, int group_dead),

View File

@ -0,0 +1,24 @@
/* SPDX-License-Identifier: GPL-2.0 */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM fuse
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH trace/hooks
#if !defined(_TRACE_HOOK_FUSE_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_HOOK_FUSE_H
#include <trace/hooks/vendor_hooks.h>
/*
* Following tracepoints are not exported in tracefs and provide a
* mechanism for vendor modules to hook and extend functionality
*/
struct wait_queue_head;
DECLARE_HOOK(android_vh_queue_request_and_unlock,
TP_PROTO(struct wait_queue_head *wq_head, bool sync),
TP_ARGS(wq_head, sync));
DECLARE_HOOK(android_vh_fuse_request_end,
TP_PROTO(struct task_struct *self),
TP_ARGS(self));
#endif /* _TRACE_HOOK_FUSE_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@ -370,6 +370,22 @@ DECLARE_HOOK(android_vh_do_swap_page_spf,
DECLARE_HOOK(android_vh_tune_fault_around_bytes,
TP_PROTO(unsigned long *fault_around_bytes),
TP_ARGS(fault_around_bytes));
DECLARE_HOOK(android_vh_do_anonymous_page,
TP_PROTO(struct vm_area_struct *vma, struct page *page),
TP_ARGS(vma, page));
DECLARE_HOOK(android_vh_do_swap_page,
TP_PROTO(struct page *page, pte_t *pte, struct vm_fault *vmf,
swp_entry_t entry),
TP_ARGS(page, pte, vmf, entry));
DECLARE_HOOK(android_vh_do_wp_page,
TP_PROTO(struct page *page),
TP_ARGS(page));
DECLARE_HOOK(android_vh_uprobes_replace_page,
TP_PROTO(struct page *new_page, struct page *old_page),
TP_ARGS(new_page, old_page));
DECLARE_HOOK(android_vh_shmem_swapin_page,
TP_PROTO(struct page *page),
TP_ARGS(page));
#endif /* _TRACE_HOOK_MM_H */
/* This part must be outside protection */

View File

@ -49,6 +49,16 @@ DECLARE_HOOK(android_vh_rwsem_up_read_end,
DECLARE_HOOK(android_vh_rwsem_mark_wake_readers,
TP_PROTO(struct rw_semaphore *sem, struct rwsem_waiter *waiter),
TP_ARGS(sem, waiter));
DECLARE_HOOK(android_vh_record_rwsem_reader_owned,
TP_PROTO(struct rw_semaphore *sem,
struct list_head *wlist),
TP_ARGS(sem, wlist));
DECLARE_HOOK(android_vh_clear_rwsem_reader_owned,
TP_PROTO(struct rw_semaphore *sem),
TP_ARGS(sem));
DECLARE_HOOK(android_vh_clear_rwsem_writer_owned,
TP_PROTO(struct rw_semaphore *sem),
TP_ARGS(sem));
/* macro versions of hooks are no longer required */
#endif /* _TRACE_HOOK_RWSEM_H */

View File

@ -284,6 +284,12 @@ struct binder_frozen_status_info {
__u32 async_recv;
};
struct binder_frozen_state_info {
binder_uintptr_t cookie;
__u32 is_frozen;
__u32 reserved;
};
#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64)
#define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32)
@ -492,6 +498,17 @@ enum binder_driver_return_protocol {
* asynchronous transaction makes the allocated async buffer size exceed
* detection threshold. No parameters.
*/
BR_FROZEN_BINDER = _IOR('r', 21, struct binder_frozen_state_info),
/*
* The cookie and a boolean (is_frozen) that indicates whether the process
* transitioned into a frozen or an unfrozen state.
*/
BR_CLEAR_FREEZE_NOTIFICATION_DONE = _IOR('r', 22, binder_uintptr_t),
/*
* void *: cookie
*/
};
enum binder_driver_command_protocol {
@ -575,6 +592,25 @@ enum binder_driver_command_protocol {
/*
* binder_transaction_data_sg: the sent command.
*/
BC_REQUEST_FREEZE_NOTIFICATION =
_IOW('c', 19, struct binder_handle_cookie),
/*
* int: handle
* void *: cookie
*/
BC_CLEAR_FREEZE_NOTIFICATION = _IOW('c', 20,
struct binder_handle_cookie),
/*
* int: handle
* void *: cookie
*/
BC_FREEZE_NOTIFICATION_DONE = _IOW('c', 21, binder_uintptr_t),
/*
* void *: cookie
*/
};
#endif /* _UAPI_LINUX_BINDER_H */

View File

@ -30,6 +30,9 @@
#include <linux/uprobes.h>
#undef CREATE_TRACE_POINTS
#include <trace/hooks/mm.h>
#define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
#define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE
@ -185,6 +188,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
get_page(new_page);
page_add_new_anon_rmap(new_page, vma, addr, false);
lru_cache_add_inactive_or_unevictable(new_page, vma);
trace_android_vh_uprobes_replace_page(new_page, old_page);
} else
/* no new page, just dec_mm_counter for old_page */
dec_mm_counter(mm, MM_ANONPAGES);

View File

@ -47,6 +47,7 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
osq_lock_init(&lock->osq);
#endif
android_init_oem_data(lock, 1);
debug_mutex_init(lock, name, key);
}

View File

@ -181,9 +181,15 @@ static void percpu_rwsem_wait(struct percpu_rw_semaphore *sem, bool reader)
bool __percpu_down_read(struct percpu_rw_semaphore *sem, bool try)
{
bool ret = false;
if (__percpu_down_read_trylock(sem))
return true;
trace_android_vh_percpu_rwsem_down_read(sem, try, &ret);
if (ret)
return true;
if (try)
return false;
@ -230,6 +236,8 @@ static bool readers_active_check(struct percpu_rw_semaphore *sem)
void percpu_down_write(struct percpu_rw_semaphore *sem)
{
bool complete = false;
might_sleep();
rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
@ -252,7 +260,9 @@ void percpu_down_write(struct percpu_rw_semaphore *sem)
*/
/* Wait for all active readers to complete. */
rcuwait_wait_event(&sem->writer, readers_active_check(sem), TASK_UNINTERRUPTIBLE);
trace_android_rvh_percpu_rwsem_wait_complete(sem, TASK_UNINTERRUPTIBLE, &complete);
if (!complete)
rcuwait_wait_event(&sem->writer, readers_active_check(sem), TASK_UNINTERRUPTIBLE);
trace_android_vh_record_pcpu_rwsem_starttime(current, jiffies);
}
EXPORT_SYMBOL_GPL(percpu_down_write);
@ -261,6 +271,8 @@ void percpu_up_write(struct percpu_rw_semaphore *sem)
{
rwsem_release(&sem->dep_map, _RET_IP_);
trace_android_vh_percpu_rwsem_up_write(sem);
/*
* Signal the writer is done, no fast path yet.
*

View File

@ -182,6 +182,7 @@ static inline void rwsem_set_owner(struct rw_semaphore *sem)
static inline void rwsem_clear_owner(struct rw_semaphore *sem)
{
atomic_long_set(&sem->owner, 0);
trace_android_vh_clear_rwsem_writer_owned(sem);
}
/*
@ -245,6 +246,7 @@ static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
{
unsigned long val = atomic_long_read(&sem->owner);
trace_android_vh_clear_rwsem_reader_owned(sem);
while ((val & ~RWSEM_OWNER_FLAGS_MASK) == (unsigned long)current) {
if (atomic_long_try_cmpxchg(&sem->owner, &val,
val & RWSEM_OWNER_FLAGS_MASK))
@ -254,6 +256,7 @@ static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
#else
static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
{
trace_android_vh_clear_rwsem_reader_owned(sem);
}
#endif
@ -348,6 +351,8 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
osq_lock_init(&sem->osq);
#endif
android_init_vendor_data(sem, 1);
android_init_oem_data(sem, 1);
trace_android_vh_rwsem_init(sem);
}
EXPORT_SYMBOL(__init_rwsem);
@ -526,6 +531,7 @@ static void rwsem_mark_wake(struct rw_semaphore *sem,
if (adjustment)
atomic_long_add(adjustment, &sem->count);
trace_android_vh_record_rwsem_reader_owned(sem, &wlist);
/* 2nd pass */
list_for_each_entry_safe(waiter, tmp, &wlist, list) {

View File

@ -172,7 +172,7 @@ static long madvise_behavior(struct vm_area_struct *vma,
* vm_flags is protected by the mmap_lock held in write mode.
*/
vm_write_begin(vma);
WRITE_ONCE(vma->vm_flags, new_flags);
WRITE_ONCE(vma->vm_flags, vma_pad_fixup_flags(vma, new_flags));
vm_write_end(vma);
out_convert_errno:

View File

@ -78,6 +78,9 @@
#include <trace/events/kmem.h>
#undef CREATE_TRACE_POINTS
#include <trace/hooks/mm.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
@ -3444,6 +3447,7 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
* Take out anonymous pages first, anonymous shared vmas are
* not dirty accountable.
*/
trace_android_vh_do_wp_page(vmf->page);
if (PageAnon(vmf->page)) {
struct page *page = vmf->page;
@ -3815,6 +3819,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS);
pte = mk_pte(page, vmf->vma_page_prot);
trace_android_vh_do_swap_page(page, &pte, vmf, entry);
if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) {
pte = maybe_mkwrite(pte_mkdirty(pte), vmf->vma_flags);
vmf->flags &= ~FAULT_FLAG_WRITE;
@ -3971,6 +3976,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
*/
__SetPageUptodate(page);
trace_android_vh_do_anonymous_page(vma, page);
entry = mk_pte(page, vmf->vma_page_prot);
entry = pte_sw_mkyoung(entry);
if (vmf->vma_flags & VM_WRITE)

View File

@ -126,7 +126,7 @@ unsigned long vma_pad_pages(struct vm_area_struct *vma)
if (!is_pgsize_migration_enabled())
return 0;
return vma->vm_flags >> VM_PAD_SHIFT;
return (vma->vm_flags & VM_PAD_MASK) >> VM_PAD_SHIFT;
}
static __always_inline bool str_has_suffix(const char *str, const char *suffix)
@ -397,7 +397,7 @@ void split_pad_vma(struct vm_area_struct *vma, struct vm_area_struct *new,
nr_vma2_pages = vma_pages(second);
if (nr_vma2_pages >= nr_pad_pages) { /* Case 1 & 3 */
first->vm_flags &= ~VM_PAD_MASK;
vma_set_pad_pages(first, 0);
vma_set_pad_pages(second, nr_pad_pages);
} else { /* Case 2 */
vma_set_pad_pages(first, nr_pad_pages - nr_vma2_pages);
@ -405,18 +405,6 @@ void split_pad_vma(struct vm_area_struct *vma, struct vm_area_struct *new,
}
}
/*
* Sets the correct padding bits / flags for a VMA split.
*/
unsigned long vma_pad_fixup_flags(struct vm_area_struct *vma,
unsigned long newflags)
{
if (newflags & VM_PAD_MASK)
return (newflags & ~VM_PAD_MASK) | (vma->vm_flags & VM_PAD_MASK);
else
return newflags;
}
/*
* Merging of padding VMAs is uncommon, as padding is only allowed
* from the linker context.

View File

@ -1742,6 +1742,7 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index,
/* We have to do this with page locked to prevent races */
lock_page(page);
trace_android_vh_shmem_swapin_page(page);
if (!PageSwapCache(page) || page_private(page) != swap.val ||
!shmem_confirm_swap(mapping, index, swap)) {
error = -EEXIST;

View File

@ -7,6 +7,7 @@
*/
#include <linux/skbuff.h>
#include <net/ip6_checksum.h>
#include <net/udp.h>
#include <net/protocol.h>
#include <net/inet_common.h>
@ -269,8 +270,26 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
__sum16 check;
__be16 newlen;
if (skb_shinfo(gso_skb)->gso_type & SKB_GSO_FRAGLIST)
return __udp_gso_segment_list(gso_skb, features, is_ipv6);
if (skb_shinfo(gso_skb)->gso_type & SKB_GSO_FRAGLIST) {
/* Detect modified geometry and pass those to skb_segment. */
if (skb_pagelen(gso_skb) - sizeof(*uh) == skb_shinfo(gso_skb)->gso_size)
return __udp_gso_segment_list(gso_skb, features, is_ipv6);
/* Setup csum, as fraglist skips this in udp4_gro_receive. */
gso_skb->csum_start = skb_transport_header(gso_skb) - gso_skb->head;
gso_skb->csum_offset = offsetof(struct udphdr, check);
gso_skb->ip_summed = CHECKSUM_PARTIAL;
uh = udp_hdr(gso_skb);
if (is_ipv6)
uh->check = ~udp_v6_check(gso_skb->len,
&ipv6_hdr(gso_skb)->saddr,
&ipv6_hdr(gso_skb)->daddr, 0);
else
uh->check = ~udp_v4_check(gso_skb->len,
ip_hdr(gso_skb)->saddr,
ip_hdr(gso_skb)->daddr, 0);
}
mss = skb_shinfo(gso_skb)->gso_size;
if (gso_skb->len <= sizeof(*uh) + mss)

View File

@ -62,6 +62,10 @@ static int __do_binderfs_test(struct __test_metadata *_metadata)
struct binder_version version = { 0 };
char binderfs_mntpt[] = P_tmpdir "/binderfs_XXXXXX",
device_path[sizeof(P_tmpdir "/binderfs_XXXXXX/") + BINDERFS_MAX_NAME];
static const char * const binder_features[] = {
"oneway_spam_detection",
"freeze_notification",
};
change_mountns(_metadata);
@ -150,6 +154,20 @@ static int __do_binderfs_test(struct __test_metadata *_metadata)
}
/* success: binder-control device removal failed as expected */
for (int i = 0; i < ARRAY_SIZE(binder_features); i++) {
snprintf(device_path, sizeof(device_path), "%s/features/%s",
binderfs_mntpt, binder_features[i]);
fd = open(device_path, O_CLOEXEC | O_RDONLY);
EXPECT_GE(fd, 0) {
TH_LOG("%s - Failed to open binder feature: %s",
strerror(errno), binder_features[i]);
goto umount;
}
close(fd);
}
/* success: binder feature files found */
result = 0;
umount: