Merge branch 'android12-5.10' into branch 'android12-5.10-lts'

Sync up with android12-5.10 for the following commits:

b389838308 ANDROID: GKI: Add symbols to abi_gki_aarch64_transsion
5b696d45bf BACKPORT: nfc: nfcmrvl: main: reorder destructive operations in nfcmrvl_nci_unregister_dev to avoid bugs
01680ae117 ANDROID: vendor_hook: Add hook in __free_pages()
e064059673 ANDROID: create and export is_swap_slot_cache_enabled
f6f18f7ffa ANDROID: vendor_hook: Add hook in swap_slots
034877c195 ANDROID: mm: export swapcache_free_entries
06c2766cbc ANDROID: mm: export symbols used in vendor hook android_vh_get_swap_page()
d4eef93a9d ANDROID: vendor_hooks: Add hooks to extend struct swap_slots_cache
4506bcbba5 ANDROID: mm: export swap_type_to_swap_info
ed2b11d639 ANDROID: vendor_hook: Add hook in si_swapinfo()
667f0d71dc ANDROID: vendor_hooks: Add hooks to extend the struct swap_info_struct
bc4c73c182 ANDROID: vendor_hook: Add hooks in unuse_pte_range() and try_to_unuse()
7222a0b29b ANDROID: vendor_hook: Add hooks in free_swap_slot()
d2fea0ba9a ANDROID: vendor_hook: Add hook to update nr_swap_pages and total_swap_pages
1aa26f0017 ANDROID: vendor_hook: Add hook in page_referenced_one()
851672a4b2 ANDROID: vendor_hooks: Add hooks to record the I/O statistics of swap:
5bc9b10c45 ANDROID: vendor_hook: Add hook in migrate_page_states()
89a247a638 ANDROID: vendor_hook: Add hook in __migration_entry_wait()
f7c932399e ANDROID: vendor_hook: Add hook in handle_pte_fault()
50148ce249 ANDROID: vendor_hook: Add hook in do_swap_page()
9d4b553252 ANDROID: vendor_hook: Add hook in wp_page_copy()
e3f469befb ANDROID: vendor_hooks: Add hooks to madvise_cold_or_pageout_pte_range()
6b7243da5e ANDROID: vendor_hook: Add hook in snapshot_refaults()
6b04959511 ANDROID: vendor_hook: Add hook in inactive_is_low()
bb9c8f5256 FROMGIT: usb: gadget: f_fs: change ep->ep safe in ffs_epfile_io()
7d2bd28eae FROMGIT: usb: gadget: f_fs: change ep->status safe in ffs_epfile_io()
abb407e9ff ANDROID: GKI: forward declare struct cgroup_taskset in vendor hooks
8d6d335851 ANDROID: Fix build error with CONFIG_UCLAMP_TASK disabled
1590a0e8e1 ANDROID: GKI: include more type definitions in vendor hooks
583c0f7c1c ANDROID: Update symbol list for mtk
5146690a6c ANDROID: dma/debug: fix warning of check_sync
564ba93050 FROMGIT: usb: common: usb-conn-gpio: Allow wakeup from system suspend
d41cf0b55b BACKPORT: FROMLIST: usb: gadget: uvc: fix list double add in uvcg_video_pump
74769685e4 BACKPORT: exfat: improve write performance when dirsync enabled
47fa973d9e FROMLIST: devcoredump : Serialize devcd_del work
b92ac32536 FROMGIT: usb: gadget: uvc: calculate the number of request depending on framesize
59d057a3f9 ANDROID: GKI: Add tracing_is_on interface into symbol list
db16bd36e8 UPSTREAM: usb: gadget: f_mass_storage: Make CD-ROM emulation work with Mac OS-X
fefdf99a96 BACKPORT: io_uring: fix race between timeout flush and removal
07b78bf6d0 BACKPORT: net/sched: cls_u32: fix netns refcount changes in u32_change()
95e278bdc8 UPSTREAM: io_uring: always use original task when preparing req identity
0f77129416 FROMLIST: remoteproc: Fix dma_mem leak after rproc_shutdown
6a15abd604 FROMLIST: dma-mapping: Add dma_release_coherent_memory to DMA API
9efe21cd8f ANDROID: Update QCOM symbol list for __reset_control_get
131b12d50f Merge tag 'android12-5.10.110_r01' into android12-5.10
8c3ac02bca ANDROID: vendor_hooks: Add hooks for mutex
a27d9caa6a BACKPORT: can: ems_usb: ems_usb_start_xmit(): fix double dev_kfree_skb() in error path
1292f51788 BACKPORT: can: usb_8dev: usb_8dev_start_xmit(): fix double dev_kfree_skb() in error path
82a3c7ee8d ANDROID: GKI: Update symbols to symbol list
59735a7d31 ANDROID: oplus: Update the ABI xml and symbol list
76c90b9959 UPSTREAM: remoteproc: Fix count check in rproc_coredump_write()
3e71aa523e BACKPORT: esp: Fix possible buffer overflow in ESP transformation
66f0c91b2f ANDROID: Fix the drain_all_pages default condition broken by a hook
393be9a064 UPSTREAM: Revert "xfrm: xfrm_state_mtu should return at least 1280 for ipv6"
73f6098941 UPSTREAM: xfrm: fix MTU regression
e27ad1d211 ANDROID: signal: Add vendor hook for memory reaping

And track more new symbols that were added to the 'android12-5.10' branch:

Leaf changes summary: 33 artifacts changed
Changed leaf types summary: 0 leaf type changed
Removed/Changed/Added functions summary: 0 Removed, 0 Changed, 31 Added functions
Removed/Changed/Added variables summary: 0 Removed, 0 Changed, 2 Added variables

31 Added functions:

  [A] 'function int __traceiter_android_vh_killed_process(void*, task_struct*, task_struct*, bool*)'
  [A] 'function void _snd_pcm_hw_params_any(snd_pcm_hw_params*)'
  [A] 'function bool check_cache_active()'
  [A] 'function int copy_to_user_fromio(void*, const volatile void*, size_t)'
  [A] 'function void debugfs_create_file_size(const char*, umode_t, dentry*, void*, const file_operations*, loff_t)'
  [A] 'function int devm_regmap_field_bulk_alloc(device*, regmap*, regmap_field**, reg_field*, int)'
  [A] 'function mem_cgroup* get_mem_cgroup_from_mm(mm_struct*)'
  [A] 'function bool is_swap_slot_cache_enabled()'
  [A] 'function void ktime_get_coarse_ts64(timespec64*)'
  [A] 'function unsigned int linear_range_get_max_value(const linear_range*)'
  [A] 'function int linear_range_get_value(const linear_range*, unsigned int, unsigned int*)'
  [A] 'function int platform_irqchip_probe(platform_device*)'
  [A] 'function int register_tcf_proto_ops(tcf_proto_ops*)'
  [A] 'function int scan_swap_map_slots(swap_info_struct*, unsigned char, int, swp_entry_t*)'
  [A] 'function int snd_pcm_kernel_ioctl(snd_pcm_substream*, unsigned int, void*)'
  [A] 'function int snd_pcm_open_substream(snd_pcm*, int, file*, snd_pcm_substream**)'
  [A] 'function int snd_pcm_stop(snd_pcm_substream*, snd_pcm_state_t)'
  [A] 'function long int strnlen_user(const char*, long int)'
  [A] 'function int swap_alloc_cluster(swap_info_struct*, swp_entry_t*)'
  [A] 'function swap_info_struct* swap_type_to_swap_info(int)'
  [A] 'function void swapcache_free_entries(swp_entry_t*, int)'
  [A] 'function int tcf_action_exec(sk_buff*, tc_action**, int, tcf_result*)'
  [A] 'function void tcf_exts_destroy(tcf_exts*)'
  [A] 'function int tcf_exts_dump(sk_buff*, tcf_exts*)'
  [A] 'function int tcf_exts_dump_stats(sk_buff*, tcf_exts*)'
  [A] 'function int tcf_exts_validate(net*, tcf_proto*, nlattr**, nlattr*, tcf_exts*, bool, bool, netlink_ext_ack*)'
  [A] 'function bool tcf_queue_work(rcu_work*, work_func_t)'
  [A] 'function int thermal_zone_unbind_cooling_device(thermal_zone_device*, int, thermal_cooling_device*)'
  [A] 'function int tracing_is_on()'
  [A] 'function int unregister_tcf_proto_ops(tcf_proto_ops*)'
  [A] 'function usb_role usb_role_switch_get_role(usb_role_switch*)'

2 Added variables:

  [A] 'tracepoint __tracepoint_android_vh_killed_process'
  [A] 'void* high_memory'

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: Ia4a34d9aa1929161e2587529f700f49c31b4c2cc
This commit is contained in:
Greg Kroah-Hartman 2022-07-01 14:25:17 +02:00
commit fa7f6a5f56
70 changed files with 4290 additions and 2345 deletions

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -2130,6 +2130,7 @@
register_shrinker
register_syscore_ops
register_sysctl_table
register_tcf_proto_ops
register_virtio_device
register_virtio_driver
regmap_bulk_read
@ -2629,6 +2630,12 @@
__task_pid_nr_ns
__task_rq_lock
task_rq_lock
tcf_action_exec
tcf_exts_destroy
tcf_exts_dump
tcf_exts_dump_stats
tcf_exts_validate
tcf_queue_work
tcp_hashinfo
tcp_parse_options
thaw_bdev
@ -2812,6 +2819,7 @@
__traceiter_android_vh_prepare_update_load_avg_se
__traceiter_android_vh_printk_hotplug
__traceiter_android_vh_process_killed
__traceiter_android_vh_killed_process
__traceiter_android_vh_revert_creds
__traceiter_android_vh_rmqueue
__traceiter_android_vh_rwsem_init
@ -3019,6 +3027,7 @@
__tracepoint_android_vh_prepare_update_load_avg_se
__tracepoint_android_vh_printk_hotplug
__tracepoint_android_vh_process_killed
__tracepoint_android_vh_killed_process
__tracepoint_android_vh_revert_creds
__tracepoint_android_vh_rmqueue
__tracepoint_android_vh_rwsem_init
@ -3094,6 +3103,7 @@
trace_raw_output_prep
trace_seq_printf
trace_seq_putc
tracing_is_on
tracing_off
truncate_inode_pages_range
truncate_pagecache_range
@ -3209,6 +3219,7 @@
unregister_shrinker
unregister_syscore_ops
unregister_sysctl_table
unregister_tcf_proto_ops
unregister_virtio_device
unregister_virtio_driver
up

View File

@ -2034,6 +2034,7 @@
resched_curr
reset_control_assert
reset_control_deassert
__reset_control_get
reset_control_put
reset_control_reset
resume_cpus

View File

@ -0,0 +1,8 @@
[abi_symbol_list]
get_mem_cgroup_from_mm
is_swap_slot_cache_enabled
swapcache_free_entries
swap_type_to_swap_info
scan_swap_map_slots
swap_alloc_cluster
check_cache_active

View File

@ -29,6 +29,7 @@ android/abi_gki_aarch64_virtual_device
android/abi_gki_aarch64_vivo
android/abi_gki_aarch64_xiaomi
android/abi_gki_aarch64_asus
android/abi_gki_aarch64_transsion
"
FILES="${FILES}

View File

@ -99,6 +99,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_restore_priority);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_wakeup_ilocked);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_do_send_sig_info);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_process_killed);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_killed_process);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_init);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_wake);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_write_finished);
@ -242,6 +243,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_build_sched_domains);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alter_mutex_list_add);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_unlock_slowpath);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_unlock_slowpath_end);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_start_check_new_owner);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_wake_finish);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_undefinstr);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_ptrauth_fault);
@ -405,3 +407,26 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_handle_tlb_conf);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_shrink_node_memcgs);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ra_tuning_max_page);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tune_memcg_scan_type);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_handle_pte_fault_end);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cow_user_page);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_swapin_add_anon_rmap);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_waiting_for_page_migration);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_migrate_page_states);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_page_referenced_one_end);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_count_pswpin);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_count_pswpout);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_count_swpout_vm_event);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_swap_slot_cache_active);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_drain_slots_cache_cpu);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_swap_slot_cache);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_swap_slot);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_get_swap_page);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_page_isolated_for_reclaim);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_inactive_is_low);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_snapshot_refaults);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_account_swap_pages);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_unuse_swap_page);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_init_swap_info_struct);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_si_swapinfo);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_si);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_pages);

View File

@ -29,6 +29,47 @@ struct devcd_entry {
struct device devcd_dev;
void *data;
size_t datalen;
/*
* Here, mutex is required to serialize the calls to del_wk work between
* user/kernel space which happens when devcd is added with device_add()
* and that sends uevent to user space. User space reads the uevents,
* and calls to devcd_data_write() which try to modify the work which is
* not even initialized/queued from devcoredump.
*
*
*
* cpu0(X) cpu1(Y)
*
* dev_coredump() uevent sent to user space
* device_add() ======================> user space process Y reads the
* uevents writes to devcd fd
* which results into writes to
*
* devcd_data_write()
* mod_delayed_work()
* try_to_grab_pending()
* del_timer()
* debug_assert_init()
* INIT_DELAYED_WORK()
* schedule_delayed_work()
*
*
* Also, mutex alone would not be enough to avoid scheduling of
* del_wk work after it get flush from a call to devcd_free()
* mentioned as below.
*
* disabled_store()
* devcd_free()
* mutex_lock() devcd_data_write()
* flush_delayed_work()
* mutex_unlock()
* mutex_lock()
* mod_delayed_work()
* mutex_unlock()
* So, delete_work flag is required.
*/
struct mutex mutex;
bool delete_work;
struct module *owner;
ssize_t (*read)(char *buffer, loff_t offset, size_t count,
void *data, size_t datalen);
@ -88,7 +129,12 @@ static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct devcd_entry *devcd = dev_to_devcd(dev);
mod_delayed_work(system_wq, &devcd->del_wk, 0);
mutex_lock(&devcd->mutex);
if (!devcd->delete_work) {
devcd->delete_work = true;
mod_delayed_work(system_wq, &devcd->del_wk, 0);
}
mutex_unlock(&devcd->mutex);
return count;
}
@ -116,7 +162,12 @@ static int devcd_free(struct device *dev, void *data)
{
struct devcd_entry *devcd = dev_to_devcd(dev);
mutex_lock(&devcd->mutex);
if (!devcd->delete_work)
devcd->delete_work = true;
flush_delayed_work(&devcd->del_wk);
mutex_unlock(&devcd->mutex);
return 0;
}
@ -126,6 +177,30 @@ static ssize_t disabled_show(struct class *class, struct class_attribute *attr,
return sysfs_emit(buf, "%d\n", devcd_disabled);
}
/*
*
* disabled_store() worker()
* class_for_each_device(&devcd_class,
* NULL, NULL, devcd_free)
* ...
* ...
* while ((dev = class_dev_iter_next(&iter))
* devcd_del()
* device_del()
* put_device() <- last reference
* error = fn(dev, data) devcd_dev_release()
* devcd_free(dev, data) kfree(devcd)
* mutex_lock(&devcd->mutex);
*
*
* In the above diagram, It looks like disabled_store() would be racing with parallely
* running devcd_del() and result in memory abort while acquiring devcd->mutex which
* is called after kfree of devcd memory after dropping its last reference with
* put_device(). However, this will not happens as fn(dev, data) runs
* with its own reference to device via klist_node so it is not its last reference.
* so, above situation would not occur.
*/
static ssize_t disabled_store(struct class *class, struct class_attribute *attr,
const char *buf, size_t count)
{
@ -282,13 +357,16 @@ void dev_coredumpm(struct device *dev, struct module *owner,
devcd->read = read;
devcd->free = free;
devcd->failing_dev = get_device(dev);
devcd->delete_work = false;
mutex_init(&devcd->mutex);
device_initialize(&devcd->devcd_dev);
dev_set_name(&devcd->devcd_dev, "devcd%d",
atomic_inc_return(&devcd_count));
devcd->devcd_dev.class = &devcd_class;
mutex_lock(&devcd->mutex);
if (device_add(&devcd->devcd_dev))
goto put_device;
@ -302,10 +380,11 @@ void dev_coredumpm(struct device *dev, struct module *owner,
INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
schedule_delayed_work(&devcd->del_wk, DEVCD_TIMEOUT);
mutex_unlock(&devcd->mutex);
return;
put_device:
put_device(&devcd->devcd_dev);
mutex_unlock(&devcd->mutex);
put_module:
module_put(owner);
free:

View File

@ -461,6 +461,7 @@ static void rproc_rvdev_release(struct device *dev)
struct rproc_vdev *rvdev = container_of(dev, struct rproc_vdev, dev);
of_reserved_mem_device_release(dev);
dma_release_coherent_memory(dev);
kfree(rvdev);
}

View File

@ -275,6 +275,7 @@ static int usb_conn_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, info);
device_set_wakeup_capable(&pdev->dev, true);
/* Perform initial detection */
usb_conn_queue_dwork(info, 0);
@ -304,6 +305,14 @@ static int __maybe_unused usb_conn_suspend(struct device *dev)
{
struct usb_conn_info *info = dev_get_drvdata(dev);
if (device_may_wakeup(dev)) {
if (info->id_gpiod)
enable_irq_wake(info->id_irq);
if (info->vbus_gpiod)
enable_irq_wake(info->vbus_irq);
return 0;
}
if (info->id_gpiod)
disable_irq(info->id_irq);
if (info->vbus_gpiod)
@ -318,6 +327,14 @@ static int __maybe_unused usb_conn_resume(struct device *dev)
{
struct usb_conn_info *info = dev_get_drvdata(dev);
if (device_may_wakeup(dev)) {
if (info->id_gpiod)
disable_irq_wake(info->id_irq);
if (info->vbus_gpiod)
disable_irq_wake(info->vbus_irq);
return 0;
}
pinctrl_pm_select_default_state(dev);
if (info->id_gpiod)

View File

@ -122,8 +122,6 @@ struct ffs_ep {
struct usb_endpoint_descriptor *descs[3];
u8 num;
int status; /* P: epfile->mutex */
};
struct ffs_epfile {
@ -227,6 +225,9 @@ struct ffs_io_data {
bool use_sg;
struct ffs_data *ffs;
int status;
struct completion done;
};
struct ffs_desc_helper {
@ -705,12 +706,15 @@ static const struct file_operations ffs_ep0_operations = {
static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req)
{
struct ffs_io_data *io_data = req->context;
ENTER();
if (likely(req->context)) {
struct ffs_ep *ep = _ep->driver_data;
ep->status = req->status ? req->status : req->actual;
complete(req->context);
}
if (req->status)
io_data->status = req->status;
else
io_data->status = req->actual;
complete(&io_data->done);
}
static ssize_t ffs_copy_to_iter(void *data, int data_len, struct iov_iter *iter)
@ -1048,7 +1052,6 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
WARN(1, "%s: data_len == -EINVAL\n", __func__);
ret = -EINVAL;
} else if (!io_data->aio) {
DECLARE_COMPLETION_ONSTACK(done);
bool interrupted = false;
req = ep->req;
@ -1064,7 +1067,8 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
io_data->buf = data;
req->context = &done;
init_completion(&io_data->done);
req->context = io_data;
req->complete = ffs_epfile_io_complete;
ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
@ -1073,7 +1077,12 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
spin_unlock_irq(&epfile->ffs->eps_lock);
if (unlikely(wait_for_completion_interruptible(&done))) {
if (unlikely(wait_for_completion_interruptible(&io_data->done))) {
spin_lock_irq(&epfile->ffs->eps_lock);
if (epfile->ep != ep) {
ret = -ESHUTDOWN;
goto error_lock;
}
/*
* To avoid race condition with ffs_epfile_io_complete,
* dequeue the request first then check
@ -1081,17 +1090,18 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
* condition with req->complete callback.
*/
usb_ep_dequeue(ep->ep, req);
wait_for_completion(&done);
interrupted = ep->status < 0;
spin_unlock_irq(&epfile->ffs->eps_lock);
wait_for_completion(&io_data->done);
interrupted = io_data->status < 0;
}
if (interrupted)
ret = -EINTR;
else if (io_data->read && ep->status > 0)
ret = __ffs_epfile_read_data(epfile, data, ep->status,
else if (io_data->read && io_data->status > 0)
ret = __ffs_epfile_read_data(epfile, data, io_data->status,
&io_data->data);
else
ret = ep->status;
ret = io_data->status;
goto error_mutex;
} else if (!(req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC))) {
ret = -ENOMEM;

View File

@ -1188,6 +1188,8 @@ static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh)
int msf = common->cmnd[1] & 0x02;
int start_track = common->cmnd[6];
u8 *buf = (u8 *)bh->buf;
u8 format;
int i, len;
if ((common->cmnd[1] & ~0x02) != 0 || /* Mask away MSF */
start_track > 1) {
@ -1195,18 +1197,62 @@ static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh)
return -EINVAL;
}
memset(buf, 0, 20);
buf[1] = (20-2); /* TOC data length */
buf[2] = 1; /* First track number */
buf[3] = 1; /* Last track number */
buf[5] = 0x16; /* Data track, copying allowed */
buf[6] = 0x01; /* Only track is number 1 */
store_cdrom_address(&buf[8], msf, 0);
format = common->cmnd[2] & 0xf;
/*
* Check if CDB is old style SFF-8020i
* i.e. format is in 2 MSBs of byte 9
* Mac OS-X host sends us this.
*/
if (format == 0)
format = (common->cmnd[9] >> 6) & 0x3;
buf[13] = 0x16; /* Lead-out track is data */
buf[14] = 0xAA; /* Lead-out track number */
store_cdrom_address(&buf[16], msf, curlun->num_sectors);
return 20;
switch (format) {
case 0:
/* Formatted TOC */
len = 4 + 2*8; /* 4 byte header + 2 descriptors */
memset(buf, 0, len);
buf[1] = len - 2; /* TOC Length excludes length field */
buf[2] = 1; /* First track number */
buf[3] = 1; /* Last track number */
buf[5] = 0x16; /* Data track, copying allowed */
buf[6] = 0x01; /* Only track is number 1 */
store_cdrom_address(&buf[8], msf, 0);
buf[13] = 0x16; /* Lead-out track is data */
buf[14] = 0xAA; /* Lead-out track number */
store_cdrom_address(&buf[16], msf, curlun->num_sectors);
return len;
case 2:
/* Raw TOC */
len = 4 + 3*11; /* 4 byte header + 3 descriptors */
memset(buf, 0, len); /* Header + A0, A1 & A2 descriptors */
buf[1] = len - 2; /* TOC Length excludes length field */
buf[2] = 1; /* First complete session */
buf[3] = 1; /* Last complete session */
buf += 4;
/* fill in A0, A1 and A2 points */
for (i = 0; i < 3; i++) {
buf[0] = 1; /* Session number */
buf[1] = 0x16; /* Data track, copying allowed */
/* 2 - Track number 0 -> TOC */
buf[3] = 0xA0 + i; /* A0, A1, A2 point */
/* 4, 5, 6 - Min, sec, frame is zero */
buf[8] = 1; /* Pmin: last track number */
buf += 11; /* go to next track descriptor */
}
buf -= 11; /* go back to A2 descriptor */
/* For A2, 7, 8, 9, 10 - zero, Pmin, Psec, Pframe of Lead out */
store_cdrom_address(&buf[7], msf, curlun->num_sectors);
return len;
default:
/* Multi-session, PMA, ATIP, CD-TEXT not supported/required */
curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
return -EINVAL;
}
}
static int do_mode_sense(struct fsg_common *common, struct fsg_buffhd *bh)
@ -1933,7 +1979,7 @@ static int do_scsi_command(struct fsg_common *common)
common->data_size_from_cmnd =
get_unaligned_be16(&common->cmnd[7]);
reply = check_command(common, 10, DATA_DIR_TO_HOST,
(7<<6) | (1<<1), 1,
(0xf<<6) | (3<<1), 1,
"READ TOC");
if (reply == 0)
reply = do_read_toc(common, bh);

View File

@ -43,7 +43,8 @@ static int uvc_queue_setup(struct vb2_queue *vq,
{
struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
struct uvc_video *video = container_of(queue, struct uvc_video, queue);
struct usb_composite_dev *cdev = video->uvc->func.config->cdev;
unsigned int req_size;
unsigned int nreq;
if (*nbuffers > UVC_MAX_VIDEO_BUFFERS)
*nbuffers = UVC_MAX_VIDEO_BUFFERS;
@ -52,10 +53,16 @@ static int uvc_queue_setup(struct vb2_queue *vq,
sizes[0] = video->imagesize;
if (cdev->gadget->speed < USB_SPEED_SUPER)
video->uvc_num_requests = 4;
else
video->uvc_num_requests = 64;
req_size = video->ep->maxpacket
* max_t(unsigned int, video->ep->maxburst, 1)
* (video->ep->mult);
/* We divide by two, to increase the chance to run
* into fewer requests for smaller framesizes.
*/
nreq = DIV_ROUND_UP(DIV_ROUND_UP(sizes[0], 2), req_size);
nreq = clamp(nreq, 4U, 64U);
video->uvc_num_requests = nreq;
return 0;
}

View File

@ -302,6 +302,9 @@ static void uvcg_video_pump(struct work_struct *work)
uvcg_queue_cancel(queue, 0);
break;
}
/* Endpoint now owns the request */
req = NULL;
}
if (!req)

View File

@ -141,7 +141,7 @@ void exfat_free_bitmap(struct exfat_sb_info *sbi)
kfree(sbi->vol_amap);
}
int exfat_set_bitmap(struct inode *inode, unsigned int clu)
int exfat_set_bitmap(struct inode *inode, unsigned int clu, bool sync)
{
int i, b;
unsigned int ent_idx;
@ -154,7 +154,7 @@ int exfat_set_bitmap(struct inode *inode, unsigned int clu)
b = BITMAP_OFFSET_BIT_IN_SECTOR(sb, ent_idx);
set_bit_le(b, sbi->vol_amap[i]->b_data);
exfat_update_bh(sbi->vol_amap[i], IS_DIRSYNC(inode));
exfat_update_bh(sbi->vol_amap[i], sync);
return 0;
}

View File

@ -317,7 +317,7 @@ int exfat_alloc_new_dir(struct inode *inode, struct exfat_chain *clu)
exfat_chain_set(clu, EXFAT_EOF_CLUSTER, 0, ALLOC_NO_FAT_CHAIN);
ret = exfat_alloc_cluster(inode, 1, clu);
ret = exfat_alloc_cluster(inode, 1, clu, IS_DIRSYNC(inode));
if (ret)
return ret;

View File

@ -388,7 +388,7 @@ int exfat_clear_volume_dirty(struct super_block *sb);
#define exfat_get_next_cluster(sb, pclu) exfat_ent_get(sb, *(pclu), pclu)
int exfat_alloc_cluster(struct inode *inode, unsigned int num_alloc,
struct exfat_chain *p_chain);
struct exfat_chain *p_chain, bool sync_bmap);
int exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain);
int exfat_ent_get(struct super_block *sb, unsigned int loc,
unsigned int *content);
@ -407,7 +407,7 @@ int exfat_count_num_clusters(struct super_block *sb,
/* balloc.c */
int exfat_load_bitmap(struct super_block *sb);
void exfat_free_bitmap(struct exfat_sb_info *sbi);
int exfat_set_bitmap(struct inode *inode, unsigned int clu);
int exfat_set_bitmap(struct inode *inode, unsigned int clu, bool sync);
void exfat_clear_bitmap(struct inode *inode, unsigned int clu);
unsigned int exfat_find_free_bitmap(struct super_block *sb, unsigned int clu);
int exfat_count_used_clusters(struct super_block *sb, unsigned int *ret_count);

View File

@ -277,7 +277,7 @@ int exfat_zeroed_cluster(struct inode *dir, unsigned int clu)
}
int exfat_alloc_cluster(struct inode *inode, unsigned int num_alloc,
struct exfat_chain *p_chain)
struct exfat_chain *p_chain, bool sync_bmap)
{
int ret = -ENOSPC;
unsigned int num_clusters = 0, total_cnt;
@ -339,7 +339,7 @@ int exfat_alloc_cluster(struct inode *inode, unsigned int num_alloc,
}
/* update allocation bitmap */
if (exfat_set_bitmap(inode, new_clu)) {
if (exfat_set_bitmap(inode, new_clu, sync_bmap)) {
ret = -EIO;
goto free_cluster;
}

View File

@ -178,7 +178,8 @@ static int exfat_map_cluster(struct inode *inode, unsigned int clu_offset,
return -EIO;
}
ret = exfat_alloc_cluster(inode, num_to_be_allocated, &new_clu);
ret = exfat_alloc_cluster(inode, num_to_be_allocated, &new_clu,
inode_needs_sync(inode));
if (ret)
return ret;

View File

@ -340,7 +340,7 @@ static int exfat_find_empty_entry(struct inode *inode,
exfat_chain_set(&clu, last_clu + 1, 0, p_dir->flags);
/* allocate a cluster */
ret = exfat_alloc_cluster(inode, 1, &clu);
ret = exfat_alloc_cluster(inode, 1, &clu, IS_DIRSYNC(inode));
if (ret)
return ret;

View File

@ -171,6 +171,7 @@ static inline void dma_pernuma_cma_reserve(void) { }
#ifdef CONFIG_DMA_DECLARE_COHERENT
int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
dma_addr_t device_addr, size_t size);
void dma_release_coherent_memory(struct device *dev);
int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
dma_addr_t *dma_handle, void **ret);
int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
@ -189,6 +190,8 @@ static inline int dma_declare_coherent_memory(struct device *dev,
{
return -ENOSYS;
}
#define dma_release_coherent_memory(dev) (0)
#define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
#define dma_release_from_dev_coherent(dev, order, vaddr) (0)
#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)

View File

@ -11,13 +11,26 @@
* Following tracepoints are not exported in tracefs and provide a
* mechanism for vendor modules to hook and extend functionality
*/
struct binder_transaction;
struct task_struct;
#ifdef __GENKSYMS__
struct binder_alloc;
struct binder_proc;
struct binder_thread;
struct binder_transaction_data;
struct binder_transaction;
struct task_struct;
struct seq_file;
struct binder_transaction_data;
#else
/* struct binder_alloc */
#include <../drivers/android/binder_alloc.h>
/* struct binder_proc, struct binder_thread, struct binder_transaction */
#include <../drivers/android/binder_internal.h>
/* struct task_struct */
#include <linux/sched.h>
/* struct seq_file */
#include <linux/seq_file.h>
/* struct binder_transaction_data */
#include <uapi/linux/android/binder.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_binder_transaction_init,
TP_PROTO(struct binder_transaction *t),
TP_ARGS(t));
@ -30,8 +43,6 @@ DECLARE_HOOK(android_vh_binder_set_priority,
DECLARE_HOOK(android_vh_binder_restore_priority,
TP_PROTO(struct binder_transaction *t, struct task_struct *task),
TP_ARGS(t, task));
struct binder_proc;
struct binder_thread;
DECLARE_HOOK(android_vh_binder_wakeup_ilocked,
TP_PROTO(struct task_struct *task, bool sync, struct binder_proc *proc),
TP_ARGS(task, sync, proc));

View File

@ -10,9 +10,18 @@
#include <linux/tracepoint.h>
#include <trace/hooks/vendor_hooks.h>
struct blk_mq_tag_set;
#ifdef __GENKSYMS__
struct blk_mq_tags;
struct blk_mq_alloc_data;
struct blk_mq_tag_set;
#else
/* struct blk_mq_tags */
#include <../block/blk-mq-tag.h>
/* struct blk_mq_alloc_data */
#include <../block/blk-mq.h>
/* struct blk_mq_tag_set */
#include <linux/blk-mq.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_blk_alloc_rqs,
TP_PROTO(size_t *rq_size, struct blk_mq_tag_set *set,

View File

@ -8,7 +8,16 @@
#include <linux/tracepoint.h>
#include <trace/hooks/vendor_hooks.h>
struct cgroup_taskset;
#ifdef __GENKSYMS__
struct cgroup_subsys;
struct task_struct;
#else
/* struct cgroup_subsys */
#include <linux/cgroup-defs.h>
/* struct task_struct */
#include <linux/sched.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_cgroup_set_task,
TP_PROTO(int ret, struct task_struct *task),
TP_ARGS(ret, task));
@ -22,8 +31,6 @@ DECLARE_RESTRICTED_HOOK(android_rvh_refrigerator,
TP_PROTO(bool f),
TP_ARGS(f), 1);
struct cgroup_subsys;
struct cgroup_taskset;
DECLARE_HOOK(android_vh_cgroup_attach,
TP_PROTO(struct cgroup_subsys *ss, struct cgroup_taskset *tset),
TP_ARGS(ss, tset))

View File

@ -10,7 +10,12 @@
#include <linux/tracepoint.h>
#include <trace/hooks/vendor_hooks.h>
#ifdef __GENKSYMS__
struct cpuidle_device;
#else
/* struct cpuidle_device */
#include <linux/cpuidle.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_cpu_idle_enter,
TP_PROTO(int *state, struct cpuidle_device *dev),

View File

@ -11,7 +11,12 @@
* mechanism for vendor modules to hook and extend functionality
*/
#ifdef __GENKSYMS__
struct cpuidle_device;
#else
/* struct cpuidle_device */
#include <linux/cpuidle.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_cpuidle_psci_enter,
TP_PROTO(struct cpuidle_device *dev, bool s2idle),
TP_ARGS(dev, s2idle));

View File

@ -11,8 +11,15 @@
* Following tracepoints are not exported in tracefs and provide a
* mechanism for vendor modules to hook and extend functionality
*/
#ifdef __GENKSYMS__
struct cred;
struct task_struct;
#else
/* struct cred */
#include <linux/cred.h>
/* struct task_struct */
#include <linux/sched.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_commit_creds,
TP_PROTO(const struct task_struct *task, const struct cred *new),
TP_ARGS(task, new));

View File

@ -11,7 +11,12 @@
#include <trace/hooks/vendor_hooks.h>
#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_ANDROID_VENDOR_HOOKS)
#ifdef __GENKSYMS__
struct pt_regs;
#else
/* struct pt_regs */
#include <asm/ptrace.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_ipi_stop,
TP_PROTO(struct pt_regs *regs),

View File

@ -11,7 +11,21 @@
* Following tracepoints are not exported in tracefs and provide a
* mechanism for vendor modules to hook and extend functionality
*/
#ifdef __GENKSYMS__
struct mutex;
struct rt_mutex;
struct rw_semaphore;
struct task_struct;
#else
/* struct mutex */
#include <linux/mutex.h>
/* struct rt_mutex */
#include <linux/rtmutex.h>
/* struct rw_semaphore */
#include <linux/rwsem.h>
/* struct task_struct */
#include <linux/sched.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_mutex_wait_start,
TP_PROTO(struct mutex *lock),
TP_ARGS(lock));
@ -19,7 +33,6 @@ DECLARE_HOOK(android_vh_mutex_wait_finish,
TP_PROTO(struct mutex *lock),
TP_ARGS(lock));
struct rt_mutex;
DECLARE_HOOK(android_vh_rtmutex_wait_start,
TP_PROTO(struct rt_mutex *lock),
TP_ARGS(lock));
@ -27,7 +40,6 @@ DECLARE_HOOK(android_vh_rtmutex_wait_finish,
TP_PROTO(struct rt_mutex *lock),
TP_ARGS(lock));
struct rw_semaphore;
DECLARE_HOOK(android_vh_rwsem_read_wait_start,
TP_PROTO(struct rw_semaphore *sem),
TP_ARGS(sem));
@ -41,7 +53,6 @@ DECLARE_HOOK(android_vh_rwsem_write_wait_finish,
TP_PROTO(struct rw_semaphore *sem),
TP_ARGS(sem));
struct task_struct;
DECLARE_HOOK(android_vh_sched_show_task,
TP_PROTO(struct task_struct *task),
TP_ARGS(task));
@ -57,6 +68,9 @@ DECLARE_HOOK(android_vh_mutex_unlock_slowpath,
DECLARE_HOOK(android_vh_mutex_unlock_slowpath_end,
TP_PROTO(struct mutex *lock, struct task_struct *next),
TP_ARGS(lock, next));
DECLARE_HOOK(android_vh_mutex_start_check_new_owner,
TP_PROTO(struct mutex *lock),
TP_ARGS(lock));
/* macro versions of hooks are no longer required */

View File

@ -11,7 +11,12 @@
* Following tracepoints are not exported in tracefs and provide a
* mechanism for vendor modules to hook and extend functionality
*/
#ifdef __GENKSYMS__
struct pt_regs;
#else
/* struct pt_regs */
#include <asm/ptrace.h>
#endif /* __GENKSYMS__ */
DECLARE_RESTRICTED_HOOK(android_rvh_die_kernel_fault,
TP_PROTO(struct pt_regs *regs, unsigned int esr, unsigned long addr, const char *msg),
TP_ARGS(regs, esr, addr, msg), 1);

View File

@ -8,7 +8,12 @@
#include <linux/tracepoint.h>
#include <trace/hooks/vendor_hooks.h>
#ifdef __GENKSYMS__
struct crypto_aes_ctx;
#else
/* struct crypto_aes_ctx */
#include <crypto/aes.h>
#endif /* __GENKSYMS__ */
/*
* These hooks exist only for the benefit of the FIPS140 crypto module, which

View File

@ -10,7 +10,12 @@
#include <linux/tracepoint.h>
#include <trace/hooks/vendor_hooks.h>
#ifdef __GENKSYMS__
struct task_struct;
#else
/* struct task_struct */
#include <linux/sched.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_is_fpsimd_save,
TP_PROTO(struct task_struct *prev, struct task_struct *next),

View File

@ -10,8 +10,15 @@
* Following tracepoints are not exported in tracefs and provide a
* mechanism for vendor modules to hook and extend functionality
*/
struct irq_data;
#ifdef __GENKSYMS__
struct cpumask;
struct irq_data;
#else
/* struct cpumask */
#include <linux/cpumask.h>
/* struct irq_data */
#include <linux/irq.h>
#endif /* __GENKSYMS__ */
DECLARE_RESTRICTED_HOOK(android_rvh_gic_v3_set_affinity,
TP_PROTO(struct irq_data *d, const struct cpumask *mask_val,
u64 *affinity, bool force, void __iomem *base),

View File

@ -12,6 +12,12 @@
#include <linux/tracepoint.h>
#include <trace/hooks/vendor_hooks.h>
#ifdef __GENKSYMS__
struct iova_domain;
#else
/* struct iova_domain */
#include <linux/iova.h>
#endif /* __GENKSYMS__ */
DECLARE_RESTRICTED_HOOK(android_rvh_iommu_setup_dma_ops,
TP_PROTO(struct device *dev, u64 dma_base, u64 size),
TP_ARGS(dev, dma_base, size), 1);
@ -24,7 +30,6 @@ DECLARE_HOOK(android_vh_iommu_alloc_iova,
TP_PROTO(struct device *dev, dma_addr_t iova, size_t size),
TP_ARGS(dev, iova, size));
struct iova_domain;
DECLARE_HOOK(android_vh_iommu_iovad_alloc_iova,
TP_PROTO(struct device *dev, struct iova_domain *iovad, dma_addr_t iova, size_t size),

View File

@ -10,8 +10,13 @@
#include <linux/tracepoint.h>
#include <trace/hooks/vendor_hooks.h>
struct printk_ringbuffer;
#ifdef __GENKSYMS__
struct printk_record;
struct printk_ringbuffer;
#else
/* struct printk_record, struct printk_ringbuffer */
#include <../kernel/printk/printk_ringbuffer.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_logbuf,
TP_PROTO(struct printk_ringbuffer *rb, struct printk_record *r),

View File

@ -14,7 +14,26 @@
#include <linux/tracepoint.h>
#include <trace/hooks/vendor_hooks.h>
#ifdef __GENKSYMS__
struct slabinfo;
struct cgroup_subsys_state;
struct device;
struct mem_cgroup;
struct readahead_control;
#else
/* struct slabinfo */
#include <../mm/slab.h>
/* struct cgroup_subsys_state */
#include <linux/cgroup-defs.h>
/* struct device */
#include <linux/device.h>
/* struct mem_cgroup */
#include <linux/memcontrol.h>
/* struct readahead_control */
#include <linux/pagemap.h>
#endif /* __GENKSYMS__ */
struct cma;
struct swap_slots_cache;
DECLARE_RESTRICTED_HOOK(android_rvh_set_skip_swapcache_flags,
TP_PROTO(gfp_t *flags),
@ -68,7 +87,6 @@ DECLARE_HOOK(android_vh_include_reserved_zone,
DECLARE_HOOK(android_vh_show_mem,
TP_PROTO(unsigned int filter, nodemask_t *nodemask),
TP_ARGS(filter, nodemask));
struct slabinfo;
struct dirty_throttle_control;
DECLARE_HOOK(android_vh_mm_dirty_limits,
TP_PROTO(struct dirty_throttle_control *const gdtc, bool strictlimit,
@ -88,7 +106,6 @@ DECLARE_HOOK(android_vh_show_stack_hash,
DECLARE_HOOK(android_vh_save_track_hash,
TP_PROTO(bool alloc, unsigned long p),
TP_ARGS(alloc, p));
struct mem_cgroup;
DECLARE_HOOK(android_vh_vmpressure,
TP_PROTO(struct mem_cgroup *memcg, bool *bypass),
TP_ARGS(memcg, bypass));
@ -101,7 +118,6 @@ DECLARE_HOOK(android_vh_mem_cgroup_free,
DECLARE_HOOK(android_vh_mem_cgroup_id_remove,
TP_PROTO(struct mem_cgroup *memcg),
TP_ARGS(memcg));
struct cgroup_subsys_state;
DECLARE_HOOK(android_vh_mem_cgroup_css_online,
TP_PROTO(struct cgroup_subsys_state *css, struct mem_cgroup *memcg),
TP_ARGS(css, memcg));
@ -128,14 +144,77 @@ DECLARE_HOOK(android_vh_cma_drain_all_pages_bypass,
DECLARE_HOOK(android_vh_pcplist_add_cma_pages_bypass,
TP_PROTO(int migratetype, bool *bypass),
TP_ARGS(migratetype, bypass));
struct device;
DECLARE_HOOK(android_vh_subpage_dma_contig_alloc,
TP_PROTO(bool *allow_subpage_alloc, struct device *dev, size_t *size),
TP_ARGS(allow_subpage_alloc, dev, size));
struct readahead_control;
DECLARE_HOOK(android_vh_ra_tuning_max_page,
TP_PROTO(struct readahead_control *ractl, unsigned long *max_page),
TP_ARGS(ractl, max_page));
DECLARE_HOOK(android_vh_handle_pte_fault_end,
TP_PROTO(struct vm_fault *vmf, unsigned long highest_memmap_pfn),
TP_ARGS(vmf, highest_memmap_pfn));
DECLARE_HOOK(android_vh_cow_user_page,
TP_PROTO(struct vm_fault *vmf, struct page *page),
TP_ARGS(vmf, page));
DECLARE_HOOK(android_vh_swapin_add_anon_rmap,
TP_PROTO(struct vm_fault *vmf, struct page *page),
TP_ARGS(vmf, page));
DECLARE_HOOK(android_vh_waiting_for_page_migration,
TP_PROTO(struct page *page),
TP_ARGS(page));
DECLARE_HOOK(android_vh_migrate_page_states,
TP_PROTO(struct page *page, struct page *newpage),
TP_ARGS(page, newpage));
DECLARE_HOOK(android_vh_page_referenced_one_end,
TP_PROTO(struct vm_area_struct *vma, struct page *page, int referenced),
TP_ARGS(vma, page, referenced));
DECLARE_HOOK(android_vh_count_pswpin,
TP_PROTO(struct swap_info_struct *sis),
TP_ARGS(sis));
DECLARE_HOOK(android_vh_count_pswpout,
TP_PROTO(struct swap_info_struct *sis),
TP_ARGS(sis));
DECLARE_HOOK(android_vh_count_swpout_vm_event,
TP_PROTO(struct swap_info_struct *sis, struct page *page, bool *skip),
TP_ARGS(sis, page, skip));
DECLARE_HOOK(android_vh_swap_slot_cache_active,
TP_PROTO(bool swap_slot_cache_active),
TP_ARGS(swap_slot_cache_active));
DECLARE_HOOK(android_vh_drain_slots_cache_cpu,
TP_PROTO(struct swap_slots_cache *cache, unsigned int type,
bool free_slots, bool *skip),
TP_ARGS(cache, type, free_slots, skip));
DECLARE_HOOK(android_vh_alloc_swap_slot_cache,
TP_PROTO(struct swap_slots_cache *cache, int *ret, bool *skip),
TP_ARGS(cache, ret, skip));
DECLARE_HOOK(android_vh_free_swap_slot,
TP_PROTO(swp_entry_t entry, struct swap_slots_cache *cache, bool *skip),
TP_ARGS(entry, cache, skip));
DECLARE_HOOK(android_vh_get_swap_page,
TP_PROTO(struct page *page, swp_entry_t *entry,
struct swap_slots_cache *cache, bool *found),
TP_ARGS(page, entry, cache, found));
DECLARE_HOOK(android_vh_page_isolated_for_reclaim,
TP_PROTO(struct mm_struct *mm, struct page *page),
TP_ARGS(mm, page));
DECLARE_HOOK(android_vh_account_swap_pages,
TP_PROTO(struct swap_info_struct *si, bool *skip),
TP_ARGS(si, skip));
DECLARE_HOOK(android_vh_unuse_swap_page,
TP_PROTO(struct swap_info_struct *si, struct page *page),
TP_ARGS(si, page));
DECLARE_HOOK(android_vh_init_swap_info_struct,
TP_PROTO(struct swap_info_struct *p, struct plist_head *swap_avail_heads),
TP_ARGS(p, swap_avail_heads));
DECLARE_HOOK(android_vh_si_swapinfo,
TP_PROTO(struct swap_info_struct *si, bool *skip),
TP_ARGS(si, skip));
DECLARE_HOOK(android_vh_alloc_si,
TP_PROTO(struct swap_info_struct **p, bool *skip),
TP_ARGS(p, skip));
DECLARE_HOOK(android_vh_free_pages,
TP_PROTO(struct page *page, unsigned int order),
TP_ARGS(page, order));
/* macro versions of hooks are no longer required */
#endif /* _TRACE_HOOK_MM_H */

View File

@ -10,9 +10,18 @@
#include <linux/tracepoint.h>
#include <trace/hooks/vendor_hooks.h>
struct mmc_host;
struct mmc_card;
#ifdef __GENKSYMS__
struct sdhci_host;
struct mmc_card;
struct mmc_host;
#else
/* struct sdhci_host */
#include <../drivers/mmc/host/sdhci.h>
/* struct mmc_card */
#include <linux/mmc/card.h>
/* struct mmc_host */
#include <linux/mmc/host.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_mmc_blk_reset,
TP_PROTO(struct mmc_host *host, int err, bool *allow),

View File

@ -11,7 +11,12 @@
* Following tracepoints are not exported in tracefs and provide a
* mechanism for vendor modules to hook and extend functionality
*/
#ifdef __GENKSYMS__
struct module;
#else
/* struct module */
#include <linux/module.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_set_module_permit_before_init,
TP_PROTO(const struct module *mod),
TP_ARGS(mod));

View File

@ -9,9 +9,18 @@
#include <linux/tracepoint.h>
#include <trace/hooks/vendor_hooks.h>
#ifdef __GENKSYMS__
struct packet_type;
struct list_head;
struct sk_buff;
struct list_head;
#else
/* struct packet_type */
#include <linux/netdevice.h>
/* struct sk_buff */
#include <linux/skbuff.h>
/* struct list_head */
#include <linux/types.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_ptype_head,
TP_PROTO(const struct packet_type *pt, struct list_head *vendor_pt),
TP_ARGS(pt, vendor_pt));

View File

@ -11,7 +11,12 @@
#include <linux/tracepoint.h>
#include <trace/hooks/vendor_hooks.h>
#ifdef __GENKSYMS__
struct generic_pm_domain;
#else
/* struct generic_pm_domain */
#include <linux/pm_domain.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_allow_domain_state,
TP_PROTO(struct generic_pm_domain *genpd, uint32_t idx, bool *allow),
TP_ARGS(genpd, idx, allow))

View File

@ -11,7 +11,17 @@
* Following tracepoints are not exported in tracefs and provide a
* mechanism for vendor modules to hook and extend functionality
*/
#ifdef __GENKSYMS__
enum freq_qos_req_type;
struct freq_constraints;
struct freq_qos_request;
struct task_struct;
#else
/* enum freq_qos_req_type, struct freq_constraints, struct freq_qos_request */
#include <linux/pm_qos.h>
/* struct task_struct */
#include <linux/sched.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_try_to_freeze_todo,
TP_PROTO(unsigned int todo, unsigned int elapsed_msecs, bool wq_busy),
TP_ARGS(todo, elapsed_msecs, wq_busy));
@ -20,9 +30,6 @@ DECLARE_HOOK(android_vh_try_to_freeze_todo_unfrozen,
TP_PROTO(struct task_struct *p),
TP_ARGS(p));
enum freq_qos_req_type;
struct freq_qos_request;
struct freq_constraints;
DECLARE_HOOK(android_vh_freq_qos_add_request,
TP_PROTO(struct freq_constraints *qos, struct freq_qos_request *req,

View File

@ -12,8 +12,13 @@
#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_ANDROID_VENDOR_HOOKS)
struct psi_trigger;
#ifdef __GENKSYMS__
struct psi_group;
struct psi_trigger;
#else
/* struct psi_group, struct psi_trigger */
#include <linux/psi_types.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_psi_event,
TP_PROTO(struct psi_trigger *t),
TP_ARGS(t));

View File

@ -10,7 +10,12 @@
#include <linux/tracepoint.h>
#include <trace/hooks/vendor_hooks.h>
#ifdef __GENKSYMS__
struct rproc;
#else
/* struct rproc */
#include <linux/remoteproc.h>
#endif /* __GENKSYMS__ */
/* When recovery succeeds */
DECLARE_HOOK(android_vh_rproc_recovery,

View File

@ -10,8 +10,13 @@
* Following tracepoints are not exported in tracefs and provide a
* mechanism for vendor modules to hook and extend functionality
*/
#ifdef __GENKSYMS__
struct rw_semaphore;
struct rwsem_waiter;
#else
/* struct rw_semaphore, struct rwsem_waiter */
#include <linux/rwsem.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_rwsem_init,
TP_PROTO(struct rw_semaphore *sem),
TP_ARGS(sem));

View File

@ -10,7 +10,25 @@
* Following tracepoints are not exported in tracefs and provide a
* mechanism for vendor modules to hook and extend functionality
*/
struct cgroup_taskset;
#ifdef __GENKSYMS__
struct cgroup_subsys_state;
struct cpufreq_policy;
struct em_perf_domain;
enum uclamp_id;
struct sched_entity;
struct task_struct;
struct uclamp_se;
#else
/* struct cgroup_subsys_state */
#include <linux/cgroup-defs.h>
/* struct cpufreq_policy */
#include <linux/cpufreq.h>
/* struct em_perf_domain */
#include <linux/energy_model.h>
/* enum uclamp_id, struct sched_entity, struct task_struct, struct uclamp_se */
#include <linux/sched.h>
#endif /* __GENKSYMS__ */
DECLARE_RESTRICTED_HOOK(android_rvh_select_task_rq_fair,
TP_PROTO(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags, int *new_cpu),
TP_ARGS(p, prev_cpu, sd_flag, wake_flags, new_cpu), 1);
@ -178,7 +196,6 @@ DECLARE_RESTRICTED_HOOK(android_rvh_account_irq,
TP_PROTO(struct task_struct *curr, int cpu, s64 delta),
TP_ARGS(curr, cpu, delta), 1);
struct sched_entity;
DECLARE_RESTRICTED_HOOK(android_rvh_place_entity,
TP_PROTO(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial, u64 vruntime),
TP_ARGS(cfs_rq, se, initial, vruntime), 1);
@ -195,7 +212,6 @@ DECLARE_RESTRICTED_HOOK(android_rvh_update_misfit_status,
TP_PROTO(struct task_struct *p, struct rq *rq, bool *need_update),
TP_ARGS(p, rq, need_update), 1);
struct cgroup_taskset;
DECLARE_RESTRICTED_HOOK(android_rvh_cpu_cgroup_attach,
TP_PROTO(struct cgroup_taskset *tset),
TP_ARGS(tset), 1);
@ -204,7 +220,6 @@ DECLARE_RESTRICTED_HOOK(android_rvh_cpu_cgroup_can_attach,
TP_PROTO(struct cgroup_taskset *tset, int *retval),
TP_ARGS(tset, retval), 1);
struct cgroup_subsys_state;
DECLARE_RESTRICTED_HOOK(android_rvh_cpu_cgroup_online,
TP_PROTO(struct cgroup_subsys_state *css),
TP_ARGS(css), 1);
@ -225,14 +240,12 @@ DECLARE_RESTRICTED_HOOK(android_rvh_sched_exec,
TP_PROTO(bool *cond),
TP_ARGS(cond), 1);
struct cpufreq_policy;
DECLARE_HOOK(android_vh_map_util_freq,
TP_PROTO(unsigned long util, unsigned long freq,
unsigned long cap, unsigned long *next_freq, struct cpufreq_policy *policy,
bool *need_freq_update),
TP_ARGS(util, freq, cap, next_freq, policy, need_freq_update));
struct em_perf_domain;
DECLARE_HOOK(android_vh_em_cpu_energy,
TP_PROTO(struct em_perf_domain *pd,
unsigned long max_util, unsigned long sum_util,
@ -268,7 +281,7 @@ DECLARE_HOOK(android_vh_set_wake_flags,
TP_PROTO(int *wake_flags, unsigned int *mode),
TP_ARGS(wake_flags, mode));
enum uclamp_id;
/* Conditionally defined upon CONFIG_UCLAMP_TASK */
struct uclamp_se;
DECLARE_RESTRICTED_HOOK(android_rvh_uclamp_eff_get,
TP_PROTO(struct task_struct *p, enum uclamp_id clamp_id,
@ -329,7 +342,6 @@ DECLARE_RESTRICTED_HOOK(android_rvh_after_dequeue_task,
TP_ARGS(rq, p), 1);
struct cfs_rq;
struct sched_entity;
struct rq_flags;
DECLARE_RESTRICTED_HOOK(android_rvh_enqueue_entity,
TP_PROTO(struct cfs_rq *cfs, struct sched_entity *se),

View File

@ -8,7 +8,12 @@
#include <linux/tracepoint.h>
#include <trace/hooks/vendor_hooks.h>
#ifdef __GENKSYMS__
struct page;
#else
/* struct page */
#include <linux/mm_types.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_shmem_alloc_page,
TP_PROTO(struct page **page),
TP_ARGS(page));

View File

@ -8,13 +8,21 @@
#include <linux/tracepoint.h>
#include <trace/hooks/vendor_hooks.h>
#ifdef __GENKSYMS__
struct task_struct;
#else
/* struct task_struct */
#include <linux/sched.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_do_send_sig_info,
TP_PROTO(int sig, struct task_struct *killer, struct task_struct *dst),
TP_ARGS(sig, killer, dst));
DECLARE_HOOK(android_vh_process_killed,
TP_PROTO(struct task_struct *task, bool *reap),
TP_ARGS(task, reap));
DECLARE_HOOK(android_vh_killed_process,
TP_PROTO(struct task_struct *killer, struct task_struct *dst, bool *reap),
TP_ARGS(killer, dst, reap));
#endif /* _TRACE_HOOK_SIGNAL_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@ -11,7 +11,12 @@
* Following tracepoints are not exported in tracefs and provide a
* mechanism for vendor modules to hook and extend functionality
*/
#ifdef __GENKSYMS__
struct pt_regs;
#else
/* struct pt_regs */
#include <asm/ptrace.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_watchdog_timer_softlockup,
TP_PROTO(int duration, struct pt_regs *regs, bool is_panic),
TP_ARGS(duration, regs, is_panic));

View File

@ -8,7 +8,12 @@
#include <linux/tracepoint.h>
#include <trace/hooks/vendor_hooks.h>
#ifdef __GENKSYMS__
struct task_struct;
#else
/* struct task_struct */
#include <linux/sched.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_syscall_prctl_finished,
TP_PROTO(int option, struct task_struct *task),
TP_ARGS(option, task));

View File

@ -11,8 +11,15 @@
* Following tracepoints are not exported in tracefs and provide a
* mechanism for vendor modules to hook and extend functionality
*/
#ifdef __GENKSYMS__
struct file;
union bpf_attr;
#else
/* struct file */
#include <linux/fs.h>
/* union bpf_attr */
#include <uapi/linux/bpf.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_check_mmap_file,
TP_PROTO(const struct file *file, unsigned long prot,
unsigned long flag, unsigned long ret),

View File

@ -10,11 +10,16 @@
#include <linux/tracepoint.h>
#include <trace/hooks/vendor_hooks.h>
#ifdef __GENKSYMS__
struct thermal_zone_device;
#else
/* struct thermal_zone_device */
#include <linux/thermal.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_enable_thermal_genl_check,
TP_PROTO(int event, int tz_id, int *enable_thermal_genl),
TP_ARGS(event, tz_id, enable_thermal_genl));
struct thermal_zone_device;
DECLARE_HOOK(android_vh_thermal_pm_notify_suspend,
TP_PROTO(struct thermal_zone_device *tz, int *irq_wakeable),
TP_ARGS(tz, irq_wakeable));

View File

@ -11,7 +11,12 @@
* Following tracepoints are not exported in tracefs and provide a
* mechanism for vendor modules to hook and extend functionality
*/
#ifdef __GENKSYMS__
struct pt_regs;
#else
/* struct pt_regs */
#include <asm/ptrace.h>
#endif /* __GENKSYMS__ */
DECLARE_RESTRICTED_HOOK(android_rvh_do_undefinstr,
TP_PROTO(struct pt_regs *regs, bool user),
TP_ARGS(regs, user),

View File

@ -11,8 +11,13 @@
* Following tracepoints are not exported in tracefs and provide a
* mechanism for vendor modules to hook and extend functionality
*/
struct tcpci;
#ifdef __GENKSYMS__
struct tcpci_data;
#else
/* struct tcpci_data */
#include <../drivers/usb/typec/tcpm/tcpci.h>
#endif /* __GENKSYMS__ */
struct tcpci;
struct tcpm_port;
#ifndef TYPEC_TIMER

View File

@ -10,9 +10,20 @@
* Following tracepoints are not exported in tracefs and provide a
* mechanism for vendor modules to hook and extend functionality
*/
#ifdef __GENKSYMS__
struct ufs_hba;
struct request;
struct ufshcd_lrb;
struct uic_command;
struct request;
struct scsi_device;
#else
/* struct ufs_hba, struct ufshcd_lrb, struct uic_command */
#include <../drivers/scsi/ufs/ufshcd.h>
/* struct request */
#include <linux/blkdev.h>
/* struct scsi_device */
#include <scsi/scsi_device.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_ufs_fill_prdt,
TP_PROTO(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
@ -44,7 +55,6 @@ DECLARE_HOOK(android_vh_ufs_compl_command,
TP_PROTO(struct ufs_hba *hba, struct ufshcd_lrb *lrbp),
TP_ARGS(hba, lrbp));
struct uic_command;
DECLARE_HOOK(android_vh_ufs_send_uic_command,
TP_PROTO(struct ufs_hba *hba, struct uic_command *ucmd,
const char *str),
@ -58,7 +68,6 @@ DECLARE_HOOK(android_vh_ufs_check_int_errors,
TP_PROTO(struct ufs_hba *hba, bool queue_eh_work),
TP_ARGS(hba, queue_eh_work));
struct scsi_device;
DECLARE_HOOK(android_vh_ufs_update_sdev,
TP_PROTO(struct scsi_device *sdev),
TP_ARGS(sdev));

View File

@ -10,12 +10,26 @@
#include <linux/tracepoint.h>
#include <trace/hooks/vendor_hooks.h>
#ifdef __GENKSYMS__
struct v4l2_subdev;
struct v4l2_subdev_pad_config;
struct v4l2_subdev_format;
struct v4l2_subdev_frame_interval;
struct v4l2_subdev_selection;
struct v4l2_fmtdesc;
struct v4l2_format;
#else
/* struct v4l2_subdev, struct v4l2_subdev_pad_config */
#include <media/v4l2-subdev.h>
/* struct v4l2_subdev_format, struct v4l2_subdev_frame_interval, struct v4l2_subdev_selection */
#include <uapi/linux/v4l2-subdev.h>
/* struct v4l2_fmtdesc, struct v4l2_format */
#include <uapi/linux/videodev2.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_clear_reserved_fmt_fields,
TP_PROTO(struct v4l2_format *fmt, int *ret),
TP_ARGS(fmt, ret));
struct v4l2_fmtdesc;
DECLARE_HOOK(android_vh_fill_ext_fmtdesc,
TP_PROTO(struct v4l2_fmtdesc *fmtd, const char **descr),
TP_ARGS(fmtd, descr));
@ -24,21 +38,16 @@ DECLARE_HOOK(android_vh_clear_mask_adjust,
TP_PROTO(unsigned int ctrl, int *n),
TP_ARGS(ctrl, n));
struct v4l2_subdev;
struct v4l2_subdev_pad_config;
struct v4l2_subdev_selection;
DECLARE_HOOK(android_vh_v4l2subdev_set_selection,
TP_PROTO(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *pad,
struct v4l2_subdev_selection *sel, int *ret),
TP_ARGS(sd, pad, sel, ret));
struct v4l2_subdev_format;
DECLARE_HOOK(android_vh_v4l2subdev_set_fmt,
TP_PROTO(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *pad,
struct v4l2_subdev_format *format, int *ret),
TP_ARGS(sd, pad, format, ret));
struct v4l2_subdev_frame_interval;
DECLARE_HOOK(android_vh_v4l2subdev_set_frame_interval,
TP_PROTO(struct v4l2_subdev *sd, struct v4l2_subdev_frame_interval *fi,
int *ret),

View File

@ -10,8 +10,15 @@
#include <linux/tracepoint.h>
#include <trace/hooks/vendor_hooks.h>
#ifdef __GENKSYMS__
struct media_link;
struct media_link_desc;
#else
/* struct media_link */
#include <media/media-entity.h>
/* struct media_link_desc */
#include <uapi/linux/media.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_media_device_setup_link,
TP_PROTO(struct media_link *link, struct media_link_desc *linkd, int *ret),
TP_ARGS(link, linkd, ret));

View File

@ -34,6 +34,13 @@ DECLARE_HOOK(android_vh_shrink_node_memcgs,
DECLARE_HOOK(android_vh_tune_memcg_scan_type,
TP_PROTO(struct mem_cgroup *memcg, char *scan_type),
TP_ARGS(memcg, scan_type));
DECLARE_HOOK(android_vh_inactive_is_low,
TP_PROTO(unsigned long gb, unsigned long *inactive_ratio,
enum lru_list inactive_lru, bool *skip),
TP_ARGS(gb, inactive_ratio, inactive_lru, skip));
DECLARE_HOOK(android_vh_snapshot_refaults,
TP_PROTO(struct lruvec *target_lruvec),
TP_ARGS(target_lruvec));
#endif /* _TRACE_HOOK_VMSCAN_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@ -84,7 +84,7 @@ static int dma_init_coherent_memory(phys_addr_t phys_addr,
return ret;
}
static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
static void _dma_release_coherent_memory(struct dma_coherent_mem *mem)
{
if (!mem)
return;
@ -136,10 +136,16 @@ int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
ret = dma_assign_coherent_memory(dev, mem);
if (ret)
dma_release_coherent_memory(mem);
_dma_release_coherent_memory(mem);
return ret;
}
void dma_release_coherent_memory(struct device *dev)
{
if (dev)
_dma_release_coherent_memory(dev->dma_mem);
}
static void *__dma_alloc_from_coherent(struct device *dev,
struct dma_coherent_mem *mem,
ssize_t size, dma_addr_t *dma_handle)

View File

@ -1147,10 +1147,11 @@ static void check_sync(struct device *dev,
dir2name[entry->direction],
dir2name[ref->direction]);
/* sg list count can be less than map count when partial cache sync */
if (ref->sg_call_ents && ref->type == dma_debug_sg &&
ref->sg_call_ents != entry->sg_call_ents) {
ref->sg_call_ents > entry->sg_call_ents) {
err_printk(ref->dev, entry, "device driver syncs "
"DMA sg list with different entry count "
"DMA sg list count larger than map count "
"[map count=%d] [sync count=%d]\n",
entry->sg_call_ents, ref->sg_call_ents);
}

View File

@ -1049,6 +1049,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
goto err;
}
trace_android_vh_mutex_start_check_new_owner(lock);
spin_unlock(&lock->wait_lock);
schedule_preempt_disabled();

View File

@ -1420,6 +1420,7 @@ int group_send_sig_info(int sig, struct kernel_siginfo *info,
bool reap = false;
trace_android_vh_process_killed(current, &reap);
trace_android_vh_killed_process(current, p, &reap);
if (reap)
add_to_oom_reaper(p);
}

View File

@ -29,6 +29,7 @@
#include <linux/swapops.h>
#include <linux/shmem_fs.h>
#include <linux/mmu_notifier.h>
#include <trace/hooks/mm.h>
#include <asm/tlb.h>
@ -462,8 +463,10 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
if (!isolate_lru_page(page)) {
if (PageUnevictable(page))
putback_lru_page(page);
else
else {
list_add(&page->lru, &page_list);
trace_android_vh_page_isolated_for_reclaim(mm, page);
}
}
} else
deactivate_page(page);

View File

@ -86,6 +86,7 @@
#include "pgalloc-track.h"
#include "internal.h"
#include <trace/hooks/mm.h>
#define CREATE_TRACE_POINTS
#include <trace/events/pagefault.h>
@ -3154,6 +3155,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
put_page(old_page);
return 0;
}
trace_android_vh_cow_user_page(vmf, new_page);
}
if (mem_cgroup_charge(new_page, mm, GFP_KERNEL))
@ -3795,6 +3797,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
}
trace_android_vh_swapin_add_anon_rmap(vmf, page);
swap_free(entry);
if (mem_cgroup_swap_full(page) ||
(vmf->vma_flags & VM_LOCKED) || PageMlocked(page))
@ -4773,6 +4776,7 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
if (vmf->flags & FAULT_FLAG_WRITE)
flush_tlb_fix_spurious_fault(vmf->vma, vmf->address);
}
trace_android_vh_handle_pte_fault_end(vmf, highest_memmap_pfn);
unlock:
pte_unmap_unlock(vmf->pte, vmf->ptl);
return ret;

View File

@ -54,6 +54,8 @@
#define CREATE_TRACE_POINTS
#include <trace/events/migrate.h>
#undef CREATE_TRACE_POINTS
#include <trace/hooks/mm.h>
#include "internal.h"
@ -311,6 +313,7 @@ void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
if (!get_page_unless_zero(page))
goto out;
pte_unmap_unlock(ptep, ptl);
trace_android_vh_waiting_for_page_migration(page);
put_and_wait_on_page_locked(page);
return;
out:
@ -584,6 +587,8 @@ void migrate_page_states(struct page *newpage, struct page *page)
{
int cpupid;
trace_android_vh_migrate_page_states(page, newpage);
if (PageError(page))
SetPageError(newpage);
if (PageReferenced(page))

View File

@ -5180,6 +5180,7 @@ static inline void free_the_page(struct page *page, unsigned int order)
void __free_pages(struct page *page, unsigned int order)
{
trace_android_vh_free_pages(page, order);
if (put_page_testzero(page))
free_the_page(page, order);
else if (!PageHead(page))
@ -8779,7 +8780,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
trace_android_vh_cma_drain_all_pages_bypass(migratetype,
&skip_drain_all_pages);
if (skip_drain_all_pages)
if (!skip_drain_all_pages)
drain_all_pages(cc.zone);
/*

View File

@ -25,6 +25,7 @@
#include <linux/psi.h>
#include <linux/uio.h>
#include <linux/sched/task.h>
#include <trace/hooks/mm.h>
static struct bio *get_swap_bio(gfp_t gfp_flags,
struct page *page, bio_end_io_t end_io)
@ -256,6 +257,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
struct bio *bio;
int ret;
struct swap_info_struct *sis = page_swap_info(page);
bool skip = false;
VM_BUG_ON_PAGE(!PageSwapCache(page), page);
if (data_race(sis->flags & SWP_FS_OPS)) {
@ -277,6 +279,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
unlock_page(page);
ret = mapping->a_ops->direct_IO(&kiocb, &from);
if (ret == PAGE_SIZE) {
trace_android_vh_count_pswpout(sis);
count_vm_event(PSWPOUT);
ret = 0;
} else {
@ -301,7 +304,9 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
ret = bdev_write_page(sis->bdev, swap_page_sector(page), page, wbc);
if (!ret) {
count_swpout_vm_event(page);
trace_android_vh_count_swpout_vm_event(sis, page, &skip);
if (!skip)
count_swpout_vm_event(page);
return 0;
}
@ -313,7 +318,9 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
}
bio->bi_opf = REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc);
bio_associate_blkg_from_page(bio, page);
count_swpout_vm_event(page);
trace_android_vh_count_swpout_vm_event(sis, page, &skip);
if (!skip)
count_swpout_vm_event(page);
set_page_writeback(page);
unlock_page(page);
submit_bio(bio);
@ -352,14 +359,17 @@ int swap_readpage(struct page *page, bool synchronous)
struct address_space *mapping = swap_file->f_mapping;
ret = mapping->a_ops->readpage(swap_file, page);
if (!ret)
if (!ret) {
trace_android_vh_count_pswpin(sis);
count_vm_event(PSWPIN);
}
goto out;
}
if (sis->flags & SWP_SYNCHRONOUS_IO) {
ret = bdev_read_page(sis->bdev, swap_page_sector(page), page);
if (!ret) {
trace_android_vh_count_pswpin(sis);
count_vm_event(PSWPIN);
goto out;
}
@ -383,6 +393,7 @@ int swap_readpage(struct page *page, bool synchronous)
get_task_struct(current);
bio->bi_private = current;
}
trace_android_vh_count_pswpin(sis);
count_vm_event(PSWPIN);
bio_get(bio);
qc = submit_bio(bio);

View File

@ -819,6 +819,7 @@ static bool page_referenced_one(struct page *page, struct vm_area_struct *vma,
pra->vm_flags |= vma->vm_flags;
}
trace_android_vh_page_referenced_one_end(vma, page, referenced);
if (!pra->mapcount)
return false; /* To break the loop */

View File

@ -33,6 +33,7 @@
#include <linux/vmalloc.h>
#include <linux/mutex.h>
#include <linux/mm.h>
#include <trace/hooks/mm.h>
static DEFINE_PER_CPU(struct swap_slots_cache, swp_slots);
static bool swap_slot_cache_active;
@ -54,6 +55,7 @@ static void deactivate_swap_slots_cache(void)
{
mutex_lock(&swap_slots_cache_mutex);
swap_slot_cache_active = false;
trace_android_vh_swap_slot_cache_active(false);
__drain_swap_slots_cache(SLOTS_CACHE|SLOTS_CACHE_RET);
mutex_unlock(&swap_slots_cache_mutex);
}
@ -62,6 +64,7 @@ static void reactivate_swap_slots_cache(void)
{
mutex_lock(&swap_slots_cache_mutex);
swap_slot_cache_active = true;
trace_android_vh_swap_slot_cache_active(true);
mutex_unlock(&swap_slots_cache_mutex);
}
@ -89,7 +92,13 @@ void reenable_swap_slots_cache_unlock(void)
mutex_unlock(&swap_slots_cache_enable_mutex);
}
static bool check_cache_active(void)
bool is_swap_slot_cache_enabled(void)
{
return swap_slot_cache_enabled;
}
EXPORT_SYMBOL_GPL(is_swap_slot_cache_enabled);
bool check_cache_active(void)
{
long pages;
@ -110,17 +119,24 @@ static bool check_cache_active(void)
out:
return swap_slot_cache_active;
}
EXPORT_SYMBOL_GPL(check_cache_active);
static int alloc_swap_slot_cache(unsigned int cpu)
{
struct swap_slots_cache *cache;
swp_entry_t *slots, *slots_ret;
bool skip = false;
int ret = 0;
/*
* Do allocation outside swap_slots_cache_mutex
* as kvzalloc could trigger reclaim and get_swap_page,
* which can lock swap_slots_cache_mutex.
*/
trace_android_vh_alloc_swap_slot_cache(&per_cpu(swp_slots, cpu),
&ret, &skip);
if (skip)
return ret;
slots = kvcalloc(SWAP_SLOTS_CACHE_SIZE, sizeof(swp_entry_t),
GFP_KERNEL);
if (!slots)
@ -171,8 +187,13 @@ static void drain_slots_cache_cpu(unsigned int cpu, unsigned int type,
{
struct swap_slots_cache *cache;
swp_entry_t *slots = NULL;
bool skip = false;
cache = &per_cpu(swp_slots, cpu);
trace_android_vh_drain_slots_cache_cpu(cache, type,
free_slots, &skip);
if (skip)
return;
if ((type & SLOTS_CACHE) && cache->slots) {
mutex_lock(&cache->alloc_lock);
swapcache_free_entries(cache->slots + cache->cur, cache->nr);
@ -274,8 +295,12 @@ static int refill_swap_slots_cache(struct swap_slots_cache *cache)
int free_swap_slot(swp_entry_t entry)
{
struct swap_slots_cache *cache;
bool skip = false;
cache = raw_cpu_ptr(&swp_slots);
trace_android_vh_free_swap_slot(entry, cache, &skip);
if (skip)
return 0;
if (likely(use_swap_slot_cache && cache->slots_ret)) {
spin_lock_irq(&cache->free_lock);
/* Swap slots cache may be deactivated before acquiring lock */
@ -307,9 +332,13 @@ swp_entry_t get_swap_page(struct page *page)
{
swp_entry_t entry;
struct swap_slots_cache *cache;
bool found = false;
entry.val = 0;
trace_android_vh_get_swap_page(page, &entry, raw_cpu_ptr(&swp_slots), &found);
if (found)
goto out;
if (PageTransHuge(page)) {
if (IS_ENABLED(CONFIG_THP_SWAP))
get_swap_pages(1, &entry, HPAGE_PMD_NR);

View File

@ -43,6 +43,7 @@
#include <asm/tlbflush.h>
#include <linux/swapops.h>
#include <linux/swap_cgroup.h>
#include <trace/hooks/mm.h>
static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
unsigned char);
@ -98,7 +99,7 @@ static atomic_t proc_poll_event = ATOMIC_INIT(0);
atomic_t nr_rotate_swap = ATOMIC_INIT(0);
static struct swap_info_struct *swap_type_to_swap_info(int type)
struct swap_info_struct *swap_type_to_swap_info(int type)
{
if (type >= READ_ONCE(nr_swapfiles))
return NULL;
@ -106,6 +107,7 @@ static struct swap_info_struct *swap_type_to_swap_info(int type)
smp_rmb(); /* Pairs with smp_wmb in alloc_swap_info. */
return READ_ONCE(swap_info[type]);
}
EXPORT_SYMBOL_GPL(swap_type_to_swap_info);
static inline unsigned char swap_count(unsigned char ent)
{
@ -712,6 +714,7 @@ static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
unsigned long begin = offset;
unsigned long end = offset + nr_entries - 1;
void (*swap_slot_free_notify)(struct block_device *, unsigned long);
bool skip = false;
if (offset < si->lowest_bit)
si->lowest_bit = offset;
@ -722,7 +725,9 @@ static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
if (was_full && (si->flags & SWP_WRITEOK))
add_to_avail_list(si);
}
atomic_long_add(nr_entries, &nr_swap_pages);
trace_android_vh_account_swap_pages(si, &skip);
if (!skip)
atomic_long_add(nr_entries, &nr_swap_pages);
si->inuse_pages -= nr_entries;
if (si->flags & SWP_BLKDEV)
swap_slot_free_notify =
@ -767,7 +772,7 @@ static void set_cluster_next(struct swap_info_struct *si, unsigned long next)
this_cpu_write(*si->cluster_next_cpu, next);
}
static int scan_swap_map_slots(struct swap_info_struct *si,
int scan_swap_map_slots(struct swap_info_struct *si,
unsigned char usage, int nr,
swp_entry_t slots[])
{
@ -983,8 +988,9 @@ static int scan_swap_map_slots(struct swap_info_struct *si,
si->flags -= SWP_SCANNING;
return n_ret;
}
EXPORT_SYMBOL_GPL(scan_swap_map_slots);
static int swap_alloc_cluster(struct swap_info_struct *si, swp_entry_t *slot)
int swap_alloc_cluster(struct swap_info_struct *si, swp_entry_t *slot)
{
unsigned long idx;
struct swap_cluster_info *ci;
@ -1018,6 +1024,7 @@ static int swap_alloc_cluster(struct swap_info_struct *si, swp_entry_t *slot)
return 1;
}
EXPORT_SYMBOL_GPL(swap_alloc_cluster);
static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx)
{
@ -1137,6 +1144,7 @@ swp_entry_t get_swap_page_of_type(int type)
{
struct swap_info_struct *si = swap_type_to_swap_info(type);
pgoff_t offset;
bool skip = false;
if (!si)
goto fail;
@ -1146,7 +1154,9 @@ swp_entry_t get_swap_page_of_type(int type)
/* This is called for allocating swap entry, not cache */
offset = scan_swap_map(si, 1);
if (offset) {
atomic_long_dec(&nr_swap_pages);
trace_android_vh_account_swap_pages(si, &skip);
if (!skip)
atomic_long_dec(&nr_swap_pages);
spin_unlock(&si->lock);
return swp_entry(type, offset);
}
@ -1478,6 +1488,7 @@ void swapcache_free_entries(swp_entry_t *entries, int n)
if (p)
spin_unlock(&p->lock);
}
EXPORT_SYMBOL_GPL(swapcache_free_entries);
/*
* How many references to page are currently swapped out?
@ -2007,6 +2018,7 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
}
try_to_free_swap(page);
trace_android_vh_unuse_swap_page(si, page);
unlock_page(page);
put_page(page);
@ -2245,6 +2257,7 @@ int try_to_unuse(unsigned int type, bool frontswap,
lock_page(page);
wait_on_page_writeback(page);
try_to_free_swap(page);
trace_android_vh_unuse_swap_page(si, page);
unlock_page(page);
put_page(page);
@ -2499,10 +2512,14 @@ static void setup_swap_info(struct swap_info_struct *p, int prio,
static void _enable_swap_info(struct swap_info_struct *p)
{
p->flags |= SWP_WRITEOK | SWP_VALID;
atomic_long_add(p->pages, &nr_swap_pages);
total_swap_pages += p->pages;
bool skip = false;
p->flags |= SWP_WRITEOK | SWP_VALID;
trace_android_vh_account_swap_pages(p, &skip);
if (!skip) {
atomic_long_add(p->pages, &nr_swap_pages);
total_swap_pages += p->pages;
}
assert_spin_locked(&swap_lock);
/*
* both lists are plists, and thus priority ordered.
@ -2574,6 +2591,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
struct filename *pathname;
int err, found = 0;
unsigned int old_block_size;
bool skip = false;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@ -2628,8 +2646,11 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
least_priority++;
}
plist_del(&p->list, &swap_active_head);
atomic_long_sub(p->pages, &nr_swap_pages);
total_swap_pages -= p->pages;
trace_android_vh_account_swap_pages(p, &skip);
if (!skip) {
atomic_long_sub(p->pages, &nr_swap_pages);
total_swap_pages -= p->pages;
}
p->flags &= ~SWP_WRITEOK;
spin_unlock(&p->lock);
spin_unlock(&swap_lock);
@ -2881,12 +2902,15 @@ late_initcall(max_swapfiles_check);
static struct swap_info_struct *alloc_swap_info(void)
{
struct swap_info_struct *p;
struct swap_info_struct *p = NULL;
struct swap_info_struct *defer = NULL;
unsigned int type;
int i;
bool skip = false;
p = kvzalloc(struct_size(p, avail_lists, nr_node_ids), GFP_KERNEL);
trace_android_vh_alloc_si(&p, &skip);
if (!skip)
p = kvzalloc(struct_size(p, avail_lists, nr_node_ids), GFP_KERNEL);
if (!p)
return ERR_PTR(-ENOMEM);
@ -3369,6 +3393,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
(swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
enable_swap_info(p, prio, swap_map, cluster_info, frontswap_map);
trace_android_vh_init_swap_info_struct(p, swap_avail_heads);
pr_info("Adding %uk swap on %s. Priority:%d extents:%d across:%lluk %s%s%s%s%s\n",
p->pages<<(PAGE_SHIFT-10), name->name, p->prio,
nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10),
@ -3433,8 +3458,10 @@ void si_swapinfo(struct sysinfo *val)
spin_lock(&swap_lock);
for (type = 0; type < nr_swapfiles; type++) {
struct swap_info_struct *si = swap_info[type];
bool skip = false;
if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
trace_android_vh_si_swapinfo(si, &skip);
if (!skip && (si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
nr_to_be_unused += si->inuse_pages;
}
val->freeswap = atomic_long_read(&nr_swap_pages) + nr_to_be_unused;

View File

@ -2270,11 +2270,16 @@ static bool inactive_is_low(struct lruvec *lruvec, enum lru_list inactive_lru)
unsigned long inactive, active;
unsigned long inactive_ratio;
unsigned long gb;
bool skip = false;
inactive = lruvec_page_state(lruvec, NR_LRU_BASE + inactive_lru);
active = lruvec_page_state(lruvec, NR_LRU_BASE + active_lru);
gb = (inactive + active) >> (30 - PAGE_SHIFT);
trace_android_vh_inactive_is_low(gb, &inactive_ratio, inactive_lru, &skip);
if (skip)
goto out;
if (gb)
inactive_ratio = int_sqrt(10 * gb);
else
@ -2282,6 +2287,7 @@ static bool inactive_is_low(struct lruvec *lruvec, enum lru_list inactive_lru)
trace_android_vh_tune_inactive_ratio(&inactive_ratio, is_file_lru(inactive_lru));
out:
return inactive * inactive_ratio < active;
}
@ -3079,6 +3085,7 @@ static void snapshot_refaults(struct mem_cgroup *target_memcg, pg_data_t *pgdat)
target_lruvec->refaults[0] = refaults;
refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_FILE);
target_lruvec->refaults[1] = refaults;
trace_android_vh_snapshot_refaults(target_lruvec);
}
/*