Merge branch 'android12-5.10' into branch 'android12-5.10-lts'
Catch up with some commits applied to the android12-5.10 branch that adds new symbols to ensure that the ABI remains stable with LTS merges. This merge contains the following commits: *383607d234
Merge "Merge tag 'android12-5.10.185_r00' into android12-5.10" into android12-5.10 |\ | *e424229872
Merge tag 'android12-5.10.185_r00' into android12-5.10 * |b4b7d22830
ANDROID: wakeupbypass: Add vendor hook for batteryswap * |1dc5772322
ANDROID: GKI: Update symbols to symbol list * |28f1c8e015
ANDROID: vendor_hook: Add hook to abort reclaim and compaction * |adad2dab31
UPSTREAM: Revert "Fix XFRM-I support for nested ESP tunnels" * |f48a4c3b29
ANDROID: Incremental fs: Allocate data buffer based on input request size * |4def2dd180
ANDROID: ABI: update symbol list for Xclipse GPU * |bff06d6020
ANDROID: vendor_hooks: Supplement the missing hook call point. * |de4dc1c15c
ANDROID: GKI: Add symbol list for Nothing |/ *b52f2d4395
UPSTREAM: usb: dwc3: gadget: Propagate core init errors to UDC during pullup *505b4a1929
UPSTREAM: usb: gadget: udc: renesas_usb3: Fix use after free bug in renesas_usb3_remove due to race condition *158d8bfffc
UPSTREAM: media: rkvdec: fix use after free bug in rkvdec_remove *739f5722f4
UPSTREAM: x86/mm: Avoid using set_pgd() outside of real PGD pages *5db82d830f
UPSTREAM: relayfs: fix out-of-bounds access in relay_file_read *be9bc79296
UPSTREAM: io_uring: hold uring mutex around poll removal *6a975c2771
UPSTREAM: net/sched: flower: fix possible OOB write in fl_set_geneve_opt() *c811ac11f7
UPSTREAM: ipvlan:Fix out-of-bounds caused by unclear skb->cb *540586cf5b
ANDROID: GKI: Update symbols to symbol list *4782c8cb16
ANDROID: vendor_hook: Add hook to tune readaround size *69a794a283
ANDROID: vendor_hooks: Add hooks to avoid key threads stalled in memory allocations *69dc2c1a79
UPSTREAM: f2fs: fix to avoid use-after-free for cached IPU bio *39c3d16903
UPSTREAM: net/sched: cls_u32: Fix reference counter leak leading to overflow *4991def0fa
UPSTREAM: xfs: verify buffer contents when we skip log replay *1c98645c8e
UPSTREAM: memstick: r592: Fix UAF bug in r592_remove due to race condition *e6d71f847b
BACKPORT: btrfs: unset reloc control if transaction commit fails in prepare_to_relocate() *b3af11bf90
ANDROID: ABI: Update oplus symbol list *5f17bf82b4
ANDROID: Export memcg functions to allow module to add new files *40aea038ea
ANDROID: HID: Only utilise UHID provided exports if UHID is enabled *5bffeca4fb
UPSTREAM: bluetooth: Perform careful capability checks in hci_sock_ioctl() *c93516a2d0
ANDROID: HID; Over-ride default maximum buffer size when using UHID *ab25d94e88
UPSTREAM: usb: gadget: f_fs: Add unbind event before functionfs_unbind *552009b284
UPSTREAM: net: cdc_ncm: Deal with too low values of dwNtbOutMaxSize *c6ae8be4e6
ANDROID: GKI: update symbol list for exynos *0a52039fcb
UPSTREAM: mailbox: mailbox-test: fix a locking issue in mbox_test_message_write() *17bbc533f8
UPSTREAM: mailbox: mailbox-test: Fix potential double-free in mbox_test_message_write() *708d51e22a
UPSTREAM: 9p/xen : Fix use after free bug in xen_9pfs_front_remove due to race condition *b82903573e
FROMGIT: pstore: Revert pmsg_lock back to a normal mutex *403d5d1318
ANDROID: vendor_hook: Avoid clearing protect-flag before waking waiters *2246168a72
ANDROID: fix a race between speculative page walk and unmap operations *7cc458acf4
BACKPORT: usb: gadget: udc: Handle gadget_connect failure during bind operation *fffb0ae9aa
BACKPORT: usb: dwc3: gadget: Bail out in pullup if soft reset timeout happens *a4420dc21f
BACKPORT: f2fs: skip GC if possible when checkpoint disabling Change-Id: I74d4bd3e7f094e845598f8c29348bea10738d57a Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
9710ae86ef
File diff suppressed because it is too large
Load Diff
@ -172,6 +172,7 @@
|
||||
__const_udelay
|
||||
consume_skb
|
||||
contig_page_data
|
||||
copy_highpage
|
||||
__cpu_active_mask
|
||||
cpu_all_bits
|
||||
cpu_bit_bitmap
|
||||
@ -2114,7 +2115,9 @@
|
||||
ttm_bo_move_ttm
|
||||
ttm_bo_put
|
||||
ttm_bo_unlock_delayed_workqueue
|
||||
ttm_bo_unmap_virtual
|
||||
ttm_bo_validate
|
||||
ttm_bo_wait
|
||||
ttm_dma_page_alloc_debugfs
|
||||
ttm_dma_populate
|
||||
ttm_dma_tt_fini
|
||||
@ -2133,6 +2136,7 @@
|
||||
ttm_tt_destroy_common
|
||||
ttm_tt_populate
|
||||
ttm_tt_set_placement_caching
|
||||
ttm_tt_unpopulate
|
||||
ttm_unmap_and_unpopulate_pages
|
||||
tty_flip_buffer_push
|
||||
tty_insert_flip_string_fixed_flag
|
||||
|
4
android/abi_gki_aarch64_nothing
Normal file
4
android/abi_gki_aarch64_nothing
Normal file
@ -0,0 +1,4 @@
|
||||
[abi_symbol_list]
|
||||
# required by mount_state.ko
|
||||
iterate_supers_type
|
||||
get_fs_type
|
@ -143,6 +143,7 @@
|
||||
cdev_device_del
|
||||
cdev_init
|
||||
__cfi_slowpath
|
||||
cgroup_add_dfl_cftypes
|
||||
cgroup_add_legacy_cftypes
|
||||
cgroup_path_ns
|
||||
cgroup_taskset_first
|
||||
@ -2793,6 +2794,16 @@
|
||||
__traceiter_android_vh_clear_mask_adjust
|
||||
__traceiter_android_vh_clear_reserved_fmt_fields
|
||||
__traceiter_android_vh_cma_drain_all_pages_bypass
|
||||
__traceiter_android_vh_compact_finished
|
||||
__traceiter_android_vh_alloc_pages_reclaim_bypass
|
||||
__traceiter_android_vh_free_unref_page_bypass
|
||||
__traceiter_android_vh_kvmalloc_node_use_vmalloc
|
||||
__traceiter_android_vh_should_alloc_pages_retry
|
||||
__traceiter_android_vh_unreserve_highatomic_bypass
|
||||
__traceiter_android_vh_pageset_update
|
||||
__traceiter_android_vh_rmqueue_bulk_bypass
|
||||
__traceiter_android_vh_tune_mmap_readaround
|
||||
__traceiter_android_vh_ra_tuning_max_page
|
||||
__traceiter_android_vh_cleanup_old_buffers_bypass
|
||||
__traceiter_android_vh_commit_creds
|
||||
__traceiter_android_vh_cpufreq_acct_update_power
|
||||
@ -2841,6 +2852,7 @@
|
||||
__traceiter_android_vh_logbuf
|
||||
__traceiter_android_vh_look_around
|
||||
__traceiter_android_vh_look_around_migrate_page
|
||||
__traceiter_android_vh_madvise_cold_or_pageout_abort
|
||||
__traceiter_android_vh_mark_page_accessed
|
||||
__traceiter_android_vh_mem_cgroup_alloc
|
||||
__traceiter_android_vh_mem_cgroup_css_offline
|
||||
@ -3049,6 +3061,16 @@
|
||||
__tracepoint_android_vh_clear_mask_adjust
|
||||
__tracepoint_android_vh_clear_reserved_fmt_fields
|
||||
__tracepoint_android_vh_cma_drain_all_pages_bypass
|
||||
__tracepoint_android_vh_compact_finished
|
||||
__tracepoint_android_vh_alloc_pages_reclaim_bypass
|
||||
__tracepoint_android_vh_free_unref_page_bypass
|
||||
__tracepoint_android_vh_kvmalloc_node_use_vmalloc
|
||||
__tracepoint_android_vh_should_alloc_pages_retry
|
||||
__tracepoint_android_vh_unreserve_highatomic_bypass
|
||||
__tracepoint_android_vh_pageset_update
|
||||
__tracepoint_android_vh_rmqueue_bulk_bypass
|
||||
__tracepoint_android_vh_tune_mmap_readaround
|
||||
__tracepoint_android_vh_ra_tuning_max_page
|
||||
__tracepoint_android_vh_cleanup_old_buffers_bypass
|
||||
__tracepoint_android_vh_commit_creds
|
||||
__tracepoint_android_vh_cpufreq_acct_update_power
|
||||
@ -3097,6 +3119,7 @@
|
||||
__tracepoint_android_vh_logbuf
|
||||
__tracepoint_android_vh_look_around
|
||||
__tracepoint_android_vh_look_around_migrate_page
|
||||
__tracepoint_android_vh_madvise_cold_or_pageout_abort
|
||||
__tracepoint_android_vh_mark_page_accessed
|
||||
__tracepoint_android_vh_mem_cgroup_alloc
|
||||
__tracepoint_android_vh_mem_cgroup_css_offline
|
||||
|
@ -23,6 +23,7 @@ android/abi_gki_aarch64_honor
|
||||
android/abi_gki_aarch64_imx
|
||||
android/abi_gki_aarch64_lenovo
|
||||
android/abi_gki_aarch64_mtk
|
||||
android/abi_gki_aarch64_nothing
|
||||
android/abi_gki_aarch64_oplus
|
||||
android/abi_gki_aarch64_qcom
|
||||
android/abi_gki_aarch64_rockchip
|
||||
|
@ -76,6 +76,7 @@
|
||||
#include <trace/hooks/ipv4.h>
|
||||
#include <trace/hooks/pci.h>
|
||||
#include <trace/hooks/dmabuf.h>
|
||||
#include <trace/hooks/wakeupbypass.h>
|
||||
|
||||
/*
|
||||
* Export tracepoints that act as a bare tracehook (ie: have no trace event
|
||||
@ -332,6 +333,12 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_do_page_trylock);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_page_referenced_check_bypass);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_drain_all_pages_bypass);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cma_drain_all_pages_bypass);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_unref_page_bypass);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_kvmalloc_node_use_vmalloc);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_should_alloc_pages_retry);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_unreserve_highatomic_bypass);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_pageset_update);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rmqueue_bulk_bypass);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_pcplist_add_cma_pages_bypass);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_psi_event);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_psi_group);
|
||||
@ -441,6 +448,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_read_done);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_handle_tlb_conf);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_shrink_node_memcgs);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ra_tuning_max_page);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tune_mmap_readaround);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_handle_pte_fault_end);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_handle_pte_fault_end);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cow_user_page);
|
||||
@ -481,6 +489,9 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_look_around);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_look_around_migrate_page);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_test_clear_look_around_ref);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_dma_buf_stats_teardown);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_madvise_cold_or_pageout_abort);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_compact_finished);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_wakeup_bypass);
|
||||
/*
|
||||
* For type visibility
|
||||
*/
|
||||
|
@ -466,3 +466,4 @@ void ttm_tt_unpopulate(struct ttm_bo_device *bdev,
|
||||
else
|
||||
ttm_pool_unpopulate(ttm);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_tt_unpopulate);
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include <linux/hiddev.h>
|
||||
#include <linux/hid-debug.h>
|
||||
#include <linux/hidraw.h>
|
||||
#include <linux/uhid.h>
|
||||
|
||||
#include "hid-ids.h"
|
||||
|
||||
@ -258,6 +259,7 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
|
||||
{
|
||||
struct hid_report *report;
|
||||
struct hid_field *field;
|
||||
unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
|
||||
unsigned int usages;
|
||||
unsigned int offset;
|
||||
unsigned int i;
|
||||
@ -288,8 +290,11 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
|
||||
offset = report->size;
|
||||
report->size += parser->global.report_size * parser->global.report_count;
|
||||
|
||||
if (IS_ENABLED(CONFIG_UHID) && parser->device->ll_driver == &uhid_hid_driver)
|
||||
max_buffer_size = UHID_DATA_MAX;
|
||||
|
||||
/* Total size check: Allow for possible report index byte */
|
||||
if (report->size > (HID_MAX_BUFFER_SIZE - 1) << 3) {
|
||||
if (report->size > (max_buffer_size - 1) << 3) {
|
||||
hid_err(parser->device, "report is too long\n");
|
||||
return -1;
|
||||
}
|
||||
@ -1752,6 +1757,7 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
|
||||
struct hid_report_enum *report_enum = hid->report_enum + type;
|
||||
struct hid_report *report;
|
||||
struct hid_driver *hdrv;
|
||||
int max_buffer_size = HID_MAX_BUFFER_SIZE;
|
||||
unsigned int a;
|
||||
u32 rsize, csize = size;
|
||||
u8 *cdata = data;
|
||||
@ -1768,10 +1774,13 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
|
||||
|
||||
rsize = hid_compute_report_size(report);
|
||||
|
||||
if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE)
|
||||
rsize = HID_MAX_BUFFER_SIZE - 1;
|
||||
else if (rsize > HID_MAX_BUFFER_SIZE)
|
||||
rsize = HID_MAX_BUFFER_SIZE;
|
||||
if (IS_ENABLED(CONFIG_UHID) && hid->ll_driver == &uhid_hid_driver)
|
||||
max_buffer_size = UHID_DATA_MAX;
|
||||
|
||||
if (report_enum->numbered && rsize >= max_buffer_size)
|
||||
rsize = max_buffer_size - 1;
|
||||
else if (rsize > max_buffer_size)
|
||||
rsize = max_buffer_size;
|
||||
|
||||
if (csize < rsize) {
|
||||
dbg_hid("report %d is too short, (%d < %d)\n", report->id,
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <dt-bindings/input/gpio-keys.h>
|
||||
#include <trace/hooks/wakeupbypass.h>
|
||||
|
||||
struct gpio_button_data {
|
||||
const struct gpio_keys_button *button;
|
||||
@ -958,11 +959,16 @@ static int __maybe_unused gpio_keys_suspend(struct device *dev)
|
||||
struct gpio_keys_drvdata *ddata = dev_get_drvdata(dev);
|
||||
struct input_dev *input = ddata->input;
|
||||
int error;
|
||||
int wakeup_bypass_enabled = 0;
|
||||
|
||||
trace_android_vh_wakeup_bypass(&wakeup_bypass_enabled);
|
||||
|
||||
if (device_may_wakeup(dev)) {
|
||||
if (!wakeup_bypass_enabled) {
|
||||
error = gpio_keys_enable_wakeup(ddata);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
} else {
|
||||
mutex_lock(&input->mutex);
|
||||
if (input->users)
|
||||
@ -978,8 +984,12 @@ static int __maybe_unused gpio_keys_resume(struct device *dev)
|
||||
struct gpio_keys_drvdata *ddata = dev_get_drvdata(dev);
|
||||
struct input_dev *input = ddata->input;
|
||||
int error = 0;
|
||||
int wakeup_bypass_enabled = 0;
|
||||
|
||||
trace_android_vh_wakeup_bypass(&wakeup_bypass_enabled);
|
||||
|
||||
if (device_may_wakeup(dev)) {
|
||||
if (!wakeup_bypass_enabled)
|
||||
gpio_keys_disable_wakeup(ddata);
|
||||
} else {
|
||||
mutex_lock(&input->mutex);
|
||||
|
@ -2586,7 +2586,9 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
|
||||
ret = pm_runtime_get_sync(dwc->dev);
|
||||
if (!ret || ret < 0) {
|
||||
pm_runtime_put(dwc->dev);
|
||||
return 0;
|
||||
if (ret < 0)
|
||||
pm_runtime_set_suspended(dwc->dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (dwc->pullups_connected == is_on) {
|
||||
@ -2605,13 +2607,16 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
|
||||
* device-initiated disconnect requires a core soft reset
|
||||
* (DCTL.CSftRst) before enabling the run/stop bit.
|
||||
*/
|
||||
dwc3_core_soft_reset(dwc);
|
||||
ret = dwc3_core_soft_reset(dwc);
|
||||
if (ret)
|
||||
goto done;
|
||||
|
||||
dwc3_event_buffers_setup(dwc);
|
||||
__dwc3_gadget_start(dwc);
|
||||
ret = dwc3_gadget_run_stop(dwc, true, false);
|
||||
}
|
||||
|
||||
done:
|
||||
pm_runtime_put(dwc->dev);
|
||||
|
||||
return ret;
|
||||
|
@ -1047,12 +1047,16 @@ EXPORT_SYMBOL_GPL(usb_gadget_set_state);
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
|
||||
static void usb_udc_connect_control(struct usb_udc *udc)
|
||||
static int usb_udc_connect_control(struct usb_udc *udc)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (udc->vbus)
|
||||
usb_gadget_connect(udc->gadget);
|
||||
ret = usb_gadget_connect(udc->gadget);
|
||||
else
|
||||
usb_gadget_disconnect(udc->gadget);
|
||||
ret = usb_gadget_disconnect(udc->gadget);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1507,15 +1511,26 @@ static int udc_bind_to_driver(struct usb_udc *udc, struct usb_gadget_driver *dri
|
||||
if (ret)
|
||||
goto err1;
|
||||
ret = usb_gadget_udc_start(udc);
|
||||
if (ret) {
|
||||
driver->unbind(udc->gadget);
|
||||
goto err1;
|
||||
}
|
||||
if (ret)
|
||||
goto err_start;
|
||||
|
||||
usb_gadget_enable_async_callbacks(udc);
|
||||
usb_udc_connect_control(udc);
|
||||
ret = usb_udc_connect_control(udc);
|
||||
if (ret)
|
||||
goto err_connect_control;
|
||||
|
||||
kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
|
||||
return 0;
|
||||
|
||||
err_connect_control:
|
||||
usb_gadget_disable_async_callbacks(udc);
|
||||
if (udc->gadget->irq)
|
||||
synchronize_irq(udc->gadget->irq);
|
||||
usb_gadget_udc_stop(udc);
|
||||
|
||||
err_start:
|
||||
driver->unbind(udc->gadget);
|
||||
|
||||
err1:
|
||||
if (ret != -EISNAM)
|
||||
dev_err(&udc->dev, "failed to start %s: %d\n",
|
||||
|
@ -1994,6 +1994,11 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
|
||||
}
|
||||
sbi->sb->s_flags |= SB_ACTIVE;
|
||||
|
||||
/* check if we need more GC first */
|
||||
unusable = f2fs_get_unusable_blocks(sbi);
|
||||
if (!f2fs_disable_cp_again(sbi, unusable))
|
||||
goto skip_gc;
|
||||
|
||||
f2fs_update_time(sbi, DISABLE_TIME);
|
||||
|
||||
while (!f2fs_time_over(sbi, DISABLE_TIME)) {
|
||||
@ -2019,6 +2024,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
|
||||
goto restore_flag;
|
||||
}
|
||||
|
||||
skip_gc:
|
||||
f2fs_down_write(&sbi->gc_lock);
|
||||
cpc.reason = CP_PAUSE;
|
||||
set_sbi_flag(sbi, SBI_CP_DISABLED);
|
||||
|
@ -916,10 +916,10 @@ static long ioctl_get_read_timeouts(struct mount_info *mi, void __user *arg)
|
||||
if (copy_from_user(&args, args_usr_ptr, sizeof(args)))
|
||||
return -EINVAL;
|
||||
|
||||
if (args.timeouts_array_size_out > INCFS_DATA_FILE_BLOCK_SIZE)
|
||||
if (args.timeouts_array_size > INCFS_DATA_FILE_BLOCK_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
buffer = kzalloc(args.timeouts_array_size_out, GFP_NOFS);
|
||||
buffer = kzalloc(args.timeouts_array_size, GFP_NOFS);
|
||||
if (!buffer)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -107,7 +107,6 @@ static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
|
||||
|
||||
static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
|
||||
{
|
||||
_trace_android_vh_record_pcpu_rwsem_starttime(current, 0);
|
||||
rwsem_release(&sem->dep_map, _RET_IP_);
|
||||
|
||||
preempt_disable();
|
||||
@ -130,6 +129,7 @@ static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
|
||||
this_cpu_dec(*sem->read_count);
|
||||
rcuwait_wake_up(&sem->writer);
|
||||
}
|
||||
_trace_android_vh_record_pcpu_rwsem_starttime(current, 0);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
|
@ -192,6 +192,27 @@ DECLARE_HOOK(android_vh_mark_page_accessed,
|
||||
DECLARE_HOOK(android_vh_cma_drain_all_pages_bypass,
|
||||
TP_PROTO(unsigned int migratetype, bool *bypass),
|
||||
TP_ARGS(migratetype, bypass));
|
||||
DECLARE_HOOK(android_vh_free_unref_page_bypass,
|
||||
TP_PROTO(struct page *page, int order, int migratetype, bool *bypass),
|
||||
TP_ARGS(page, order, migratetype, bypass));
|
||||
DECLARE_HOOK(android_vh_kvmalloc_node_use_vmalloc,
|
||||
TP_PROTO(size_t size, gfp_t *kmalloc_flags, bool *use_vmalloc),
|
||||
TP_ARGS(size, kmalloc_flags, use_vmalloc));
|
||||
DECLARE_HOOK(android_vh_should_alloc_pages_retry,
|
||||
TP_PROTO(gfp_t gfp_mask, int order, int *alloc_flags,
|
||||
int migratetype, struct zone *preferred_zone, struct page **page, bool *should_alloc_retry),
|
||||
TP_ARGS(gfp_mask, order, alloc_flags,
|
||||
migratetype, preferred_zone, page, should_alloc_retry));
|
||||
DECLARE_HOOK(android_vh_unreserve_highatomic_bypass,
|
||||
TP_PROTO(bool force, struct zone *zone, bool *skip_unreserve_highatomic),
|
||||
TP_ARGS(force, zone, skip_unreserve_highatomic));
|
||||
DECLARE_HOOK(android_vh_pageset_update,
|
||||
TP_PROTO(unsigned long *high, unsigned long *batch),
|
||||
TP_ARGS(high, batch));
|
||||
DECLARE_HOOK(android_vh_rmqueue_bulk_bypass,
|
||||
TP_PROTO(unsigned int order, struct per_cpu_pages *pcp, int migratetype,
|
||||
struct list_head *list),
|
||||
TP_ARGS(order, pcp, migratetype, list));
|
||||
DECLARE_HOOK(android_vh_pcplist_add_cma_pages_bypass,
|
||||
TP_PROTO(int migratetype, bool *bypass),
|
||||
TP_ARGS(migratetype, bypass));
|
||||
@ -201,6 +222,10 @@ DECLARE_HOOK(android_vh_subpage_dma_contig_alloc,
|
||||
DECLARE_HOOK(android_vh_ra_tuning_max_page,
|
||||
TP_PROTO(struct readahead_control *ractl, unsigned long *max_page),
|
||||
TP_ARGS(ractl, max_page));
|
||||
DECLARE_HOOK(android_vh_tune_mmap_readaround,
|
||||
TP_PROTO(unsigned int ra_pages, pgoff_t pgoff,
|
||||
pgoff_t *start, unsigned int *size, unsigned int *async_size),
|
||||
TP_ARGS(ra_pages, pgoff, start, size, async_size));
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_handle_pte_fault_end,
|
||||
TP_PROTO(struct vm_fault *vmf, unsigned long highest_memmap_pfn),
|
||||
TP_ARGS(vmf, highest_memmap_pfn), 1);
|
||||
@ -313,6 +338,12 @@ DECLARE_HOOK(android_vh_look_around,
|
||||
TP_PROTO(struct page_vma_mapped_walk *pvmw, struct page *page,
|
||||
struct vm_area_struct *vma, int *referenced),
|
||||
TP_ARGS(pvmw, page, vma, referenced));
|
||||
DECLARE_HOOK(android_vh_compact_finished,
|
||||
TP_PROTO(bool *abort_compact),
|
||||
TP_ARGS(abort_compact));
|
||||
DECLARE_HOOK(android_vh_madvise_cold_or_pageout_abort,
|
||||
TP_PROTO(struct vm_area_struct *vma, bool *abort_madvise),
|
||||
TP_ARGS(vma, abort_madvise));
|
||||
/* macro versions of hooks are no longer required */
|
||||
|
||||
#endif /* _TRACE_HOOK_MM_H */
|
||||
|
17
include/trace/hooks/wakeupbypass.h
Normal file
17
include/trace/hooks/wakeupbypass.h
Normal file
@ -0,0 +1,17 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM wakeupbypass
|
||||
|
||||
#define TRACE_INCLUDE_PATH trace/hooks
|
||||
|
||||
#if !defined(_TRACE_HOOK_WAKEUPBYPASS_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _TRACE_HOOK_WAKEUPBYPASS_H
|
||||
#include <trace/hooks/vendor_hooks.h>
|
||||
|
||||
DECLARE_HOOK(android_vh_wakeup_bypass,
|
||||
TP_PROTO(int *is_wakeup_bypassed),
|
||||
TP_ARGS(is_wakeup_bypassed));
|
||||
|
||||
#endif /* _TRACE_HOOK_WAKEUPBYPASS_H */
|
||||
/* This part must be outside protection */
|
||||
#include <trace/define_trace.h>
|
@ -4233,6 +4233,7 @@ int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
|
||||
cft->flags |= __CFTYPE_ONLY_ON_DFL;
|
||||
return cgroup_add_cftypes(ss, cfts);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cgroup_add_dfl_cftypes);
|
||||
|
||||
/**
|
||||
* cgroup_add_legacy_cftypes - add an array of cftypes for legacy hierarchies
|
||||
|
@ -760,12 +760,12 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
|
||||
*/
|
||||
void __sched mutex_unlock(struct mutex *lock)
|
||||
{
|
||||
trace_android_vh_record_mutex_lock_starttime(current, 0);
|
||||
#ifndef CONFIG_DEBUG_LOCK_ALLOC
|
||||
if (__mutex_unlock_fast(lock))
|
||||
return;
|
||||
#endif
|
||||
__mutex_unlock_slowpath(lock, _RET_IP_);
|
||||
trace_android_vh_record_mutex_lock_starttime(current, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(mutex_unlock);
|
||||
|
||||
|
@ -258,7 +258,6 @@ EXPORT_SYMBOL_GPL(percpu_down_write);
|
||||
|
||||
void percpu_up_write(struct percpu_rw_semaphore *sem)
|
||||
{
|
||||
trace_android_vh_record_pcpu_rwsem_starttime(current, 0);
|
||||
rwsem_release(&sem->dep_map, _RET_IP_);
|
||||
|
||||
/*
|
||||
@ -284,6 +283,7 @@ void percpu_up_write(struct percpu_rw_semaphore *sem)
|
||||
* exclusive write lock because its counting.
|
||||
*/
|
||||
rcu_sync_exit(&sem->rss);
|
||||
trace_android_vh_record_pcpu_rwsem_starttime(current, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(percpu_up_write);
|
||||
|
||||
|
@ -1066,6 +1066,8 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
|
||||
raw_spin_unlock_irq(&sem->wait_lock);
|
||||
rwsem_set_reader_owned(sem);
|
||||
lockevent_inc(rwsem_rlock_fast);
|
||||
trace_android_vh_record_rwsem_lock_starttime(
|
||||
current, jiffies);
|
||||
return sem;
|
||||
}
|
||||
adjustment += RWSEM_FLAG_WAITERS;
|
||||
|
@ -33,6 +33,8 @@
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/alarmtimer.h>
|
||||
|
||||
#undef CREATE_TRACE_POINTS
|
||||
#include <trace/hooks/wakeupbypass.h>
|
||||
/**
|
||||
* struct alarm_base - Alarm timer bases
|
||||
* @lock: Lock for syncrhonized access to the base
|
||||
@ -246,6 +248,7 @@ static int alarmtimer_suspend(struct device *dev)
|
||||
struct rtc_device *rtc;
|
||||
unsigned long flags;
|
||||
struct rtc_time tm;
|
||||
int wakeup_bypass_enabled = 0;
|
||||
|
||||
spin_lock_irqsave(&freezer_delta_lock, flags);
|
||||
min = freezer_delta;
|
||||
@ -254,6 +257,10 @@ static int alarmtimer_suspend(struct device *dev)
|
||||
freezer_delta = 0;
|
||||
spin_unlock_irqrestore(&freezer_delta_lock, flags);
|
||||
|
||||
trace_android_vh_wakeup_bypass(&wakeup_bypass_enabled);
|
||||
if (wakeup_bypass_enabled)
|
||||
return 0;
|
||||
|
||||
rtc = alarmtimer_get_rtcdev();
|
||||
/* If we have no rtcdev, just return */
|
||||
if (!rtc)
|
||||
|
@ -45,6 +45,11 @@ static inline void count_compact_events(enum vm_event_item item, long delta)
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/compaction.h>
|
||||
|
||||
#undef CREATE_TRACE_POINTS
|
||||
#ifndef __GENKSYMS__
|
||||
#include <trace/hooks/mm.h>
|
||||
#endif
|
||||
|
||||
#define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order))
|
||||
#define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order))
|
||||
#define pageblock_start_pfn(pfn) block_start_pfn(pfn, pageblock_order)
|
||||
@ -1984,6 +1989,7 @@ static enum compact_result __compact_finished(struct compact_control *cc)
|
||||
unsigned int order;
|
||||
const int migratetype = cc->migratetype;
|
||||
int ret;
|
||||
bool abort_compact = false;
|
||||
|
||||
/* Compaction run completes if the migrate and free scanner meet */
|
||||
if (compact_scanners_met(cc)) {
|
||||
@ -2083,7 +2089,8 @@ static enum compact_result __compact_finished(struct compact_control *cc)
|
||||
}
|
||||
|
||||
out:
|
||||
if (cc->contended || fatal_signal_pending(current))
|
||||
trace_android_vh_compact_finished(&abort_compact);
|
||||
if (cc->contended || fatal_signal_pending(current) || abort_compact)
|
||||
ret = COMPACT_CONTENDED;
|
||||
|
||||
return ret;
|
||||
|
@ -2661,6 +2661,8 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
|
||||
ra->start = max_t(long, 0, vmf->pgoff - ra->ra_pages / 2);
|
||||
ra->size = ra->ra_pages;
|
||||
ra->async_size = ra->ra_pages / 4;
|
||||
trace_android_vh_tune_mmap_readaround(ra->ra_pages, vmf->pgoff,
|
||||
&ra->start, &ra->size, &ra->async_size);
|
||||
ractl._index = ra->start;
|
||||
do_page_cache_ra(&ractl, ra->size, ra->async_size);
|
||||
return fpin;
|
||||
|
@ -322,8 +322,10 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
|
||||
struct page *page = NULL;
|
||||
LIST_HEAD(page_list);
|
||||
bool allow_shared = false;
|
||||
bool abort_madvise = false;
|
||||
|
||||
if (fatal_signal_pending(current))
|
||||
trace_android_vh_madvise_cold_or_pageout_abort(vma, &abort_madvise);
|
||||
if (fatal_signal_pending(current) || abort_madvise)
|
||||
return -EINTR;
|
||||
|
||||
trace_android_vh_madvise_cold_or_pageout(vma, &allow_shared);
|
||||
|
34
mm/memory.c
34
mm/memory.c
@ -246,6 +246,16 @@ static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
|
||||
unsigned long addr)
|
||||
{
|
||||
pgtable_t token = pmd_pgtable(*pmd);
|
||||
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
|
||||
/*
|
||||
* Ensure page table destruction is blocked if __pte_map_lock managed
|
||||
* to take this lock. Without this barrier tlb_remove_table_rcu can
|
||||
* destroy ptl after __pte_map_lock locked it and during unlock would
|
||||
* cause a use-after-free.
|
||||
*/
|
||||
spinlock_t *ptl = pmd_lock(tlb->mm, pmd);
|
||||
spin_unlock(ptl);
|
||||
#endif
|
||||
pmd_clear(pmd);
|
||||
pte_free_tlb(tlb, token, addr);
|
||||
mm_dec_nr_ptes(tlb->mm);
|
||||
@ -2627,9 +2637,7 @@ EXPORT_SYMBOL_GPL(apply_to_page_range);
|
||||
static bool pte_spinlock(struct vm_fault *vmf)
|
||||
{
|
||||
bool ret = false;
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
pmd_t pmdval;
|
||||
#endif
|
||||
|
||||
/* Check if vma is still valid */
|
||||
if (!(vmf->flags & FAULT_FLAG_SPECULATIVE)) {
|
||||
@ -2644,24 +2652,28 @@ static bool pte_spinlock(struct vm_fault *vmf)
|
||||
goto out;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
/*
|
||||
* We check if the pmd value is still the same to ensure that there
|
||||
* is not a huge collapse operation in progress in our back.
|
||||
* It also ensures that pmd was not cleared by pmd_clear in
|
||||
* free_pte_range and ptl is still valid.
|
||||
*/
|
||||
pmdval = READ_ONCE(*vmf->pmd);
|
||||
if (!pmd_same(pmdval, vmf->orig_pmd)) {
|
||||
trace_spf_pmd_changed(_RET_IP_, vmf->vma, vmf->address);
|
||||
goto out;
|
||||
}
|
||||
#endif
|
||||
|
||||
vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
|
||||
vmf->ptl = pte_lockptr(vmf->vma->vm_mm, &pmdval);
|
||||
if (unlikely(!spin_trylock(vmf->ptl))) {
|
||||
trace_spf_pte_lock(_RET_IP_, vmf->vma, vmf->address);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* The check below will fail if pte_spinlock passed its ptl barrier
|
||||
* before we took the ptl lock.
|
||||
*/
|
||||
if (vma_has_changed(vmf)) {
|
||||
spin_unlock(vmf->ptl);
|
||||
trace_spf_vma_changed(_RET_IP_, vmf->vma, vmf->address);
|
||||
@ -2679,9 +2691,7 @@ static bool __pte_map_lock_speculative(struct vm_fault *vmf, unsigned long addr)
|
||||
bool ret = false;
|
||||
pte_t *pte;
|
||||
spinlock_t *ptl;
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
pmd_t pmdval;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The first vma_has_changed() guarantees the page-tables are still
|
||||
@ -2696,7 +2706,6 @@ static bool __pte_map_lock_speculative(struct vm_fault *vmf, unsigned long addr)
|
||||
goto out;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
/*
|
||||
* We check if the pmd value is still the same to ensure that there
|
||||
* is not a huge collapse operation in progress in our back.
|
||||
@ -2706,7 +2715,6 @@ static bool __pte_map_lock_speculative(struct vm_fault *vmf, unsigned long addr)
|
||||
trace_spf_pmd_changed(_RET_IP_, vmf->vma, addr);
|
||||
goto out;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Same as pte_offset_map_lock() except that we call
|
||||
@ -2715,14 +2723,18 @@ static bool __pte_map_lock_speculative(struct vm_fault *vmf, unsigned long addr)
|
||||
* to invalidate TLB but this CPU has irq disabled.
|
||||
* Since we are in a speculative patch, accept it could fail
|
||||
*/
|
||||
ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
|
||||
pte = pte_offset_map(vmf->pmd, addr);
|
||||
ptl = pte_lockptr(vmf->vma->vm_mm, &pmdval);
|
||||
pte = pte_offset_map(&pmdval, addr);
|
||||
if (unlikely(!spin_trylock(ptl))) {
|
||||
pte_unmap(pte);
|
||||
trace_spf_pte_lock(_RET_IP_, vmf->vma, addr);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* The check below will fail if __pte_map_lock_speculative passed its ptl
|
||||
* barrier before we took the ptl lock.
|
||||
*/
|
||||
if (vma_has_changed(vmf)) {
|
||||
pte_unmap_unlock(pte, ptl);
|
||||
trace_spf_vma_changed(_RET_IP_, vmf->vma, addr);
|
||||
|
@ -1608,11 +1608,16 @@ static void __free_pages_ok(struct page *page, unsigned int order,
|
||||
unsigned long flags;
|
||||
int migratetype;
|
||||
unsigned long pfn = page_to_pfn(page);
|
||||
bool skip_free_unref_page = false;
|
||||
|
||||
if (!free_pages_prepare(page, order, true, fpi_flags))
|
||||
return;
|
||||
|
||||
migratetype = get_pfnblock_migratetype(page, pfn);
|
||||
trace_android_vh_free_unref_page_bypass(page, order, migratetype, &skip_free_unref_page);
|
||||
if (skip_free_unref_page)
|
||||
return;
|
||||
|
||||
local_irq_save(flags);
|
||||
__count_vm_events(PGFREE, 1 << order);
|
||||
free_one_page(page_zone(page), page, pfn, order, migratetype,
|
||||
@ -2791,6 +2796,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
|
||||
struct page *page;
|
||||
int order;
|
||||
bool ret;
|
||||
bool skip_unreserve_highatomic = false;
|
||||
|
||||
for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx,
|
||||
ac->nodemask) {
|
||||
@ -2802,6 +2808,11 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
|
||||
pageblock_nr_pages)
|
||||
continue;
|
||||
|
||||
trace_android_vh_unreserve_highatomic_bypass(force, zone,
|
||||
&skip_unreserve_highatomic);
|
||||
if (skip_unreserve_highatomic)
|
||||
continue;
|
||||
|
||||
spin_lock_irqsave(&zone->lock, flags);
|
||||
for (order = 0; order < MAX_ORDER; order++) {
|
||||
struct free_area *area = &(zone->free_area[order]);
|
||||
@ -3047,6 +3058,10 @@ static struct list_head *get_populated_pcp_list(struct zone *zone,
|
||||
struct list_head *list = &pcp->lists[migratetype];
|
||||
|
||||
if (list_empty(list)) {
|
||||
trace_android_vh_rmqueue_bulk_bypass(order, pcp, migratetype, list);
|
||||
if (!list_empty(list))
|
||||
return list;
|
||||
|
||||
pcp->count += rmqueue_bulk(zone, order,
|
||||
pcp->batch, list,
|
||||
migratetype, alloc_flags);
|
||||
@ -3343,10 +3358,17 @@ void free_unref_page(struct page *page)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned long pfn = page_to_pfn(page);
|
||||
int migratetype;
|
||||
bool skip_free_unref_page = false;
|
||||
|
||||
if (!free_unref_page_prepare(page, pfn))
|
||||
return;
|
||||
|
||||
migratetype = get_pfnblock_migratetype(page, pfn);
|
||||
trace_android_vh_free_unref_page_bypass(page, 0, migratetype, &skip_free_unref_page);
|
||||
if (skip_free_unref_page)
|
||||
return;
|
||||
|
||||
local_irq_save(flags);
|
||||
free_unref_page_commit(page, pfn);
|
||||
local_irq_restore(flags);
|
||||
@ -4822,6 +4844,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
|
||||
unsigned int zonelist_iter_cookie;
|
||||
int reserve_flags;
|
||||
unsigned long vh_record;
|
||||
bool should_alloc_retry = false;
|
||||
|
||||
trace_android_vh_alloc_pages_slowpath_begin(gfp_mask, order, &vh_record);
|
||||
/*
|
||||
@ -4962,6 +4985,12 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
|
||||
if (page)
|
||||
goto got_pg;
|
||||
|
||||
trace_android_vh_should_alloc_pages_retry(gfp_mask, order,
|
||||
&alloc_flags, ac->migratetype, ac->preferred_zoneref->zone,
|
||||
&page, &should_alloc_retry);
|
||||
if (should_alloc_retry)
|
||||
goto retry;
|
||||
|
||||
/* Try direct reclaim and then allocating */
|
||||
page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
|
||||
&did_some_progress);
|
||||
@ -6620,6 +6649,7 @@ static int zone_batchsize(struct zone *zone)
|
||||
static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
|
||||
unsigned long batch)
|
||||
{
|
||||
trace_android_vh_pageset_update(&high, &batch);
|
||||
/* start with a fail safe value for batch */
|
||||
pcp->batch = 1;
|
||||
smp_wmb();
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "internal.h"
|
||||
#ifndef __GENKSYMS__
|
||||
#include <trace/hooks/syscall_check.h>
|
||||
#include <trace/hooks/mm.h>
|
||||
#endif
|
||||
|
||||
/**
|
||||
@ -587,6 +588,7 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
|
||||
{
|
||||
gfp_t kmalloc_flags = flags;
|
||||
void *ret;
|
||||
bool use_vmalloc = false;
|
||||
|
||||
/*
|
||||
* vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
|
||||
@ -595,6 +597,10 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
|
||||
if ((flags & GFP_KERNEL) != GFP_KERNEL)
|
||||
return kmalloc_node(size, flags, node);
|
||||
|
||||
trace_android_vh_kvmalloc_node_use_vmalloc(size, &kmalloc_flags, &use_vmalloc);
|
||||
if (use_vmalloc)
|
||||
goto use_vmalloc_node;
|
||||
|
||||
/*
|
||||
* We want to attempt a large physically contiguous block first because
|
||||
* it is less likely to fragment multiple larger blocks and therefore
|
||||
@ -624,6 +630,7 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
use_vmalloc_node:
|
||||
return __vmalloc_node(size, 1, flags, node,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user