Merge branch 'android12-5.10' into branch 'android12-5.10-lts'

Catch the -lts branch up with the changes in the normal branch.

This includes the following commits:

* 13fdad03e6 ANDROID: GKI: fix ABI breakage in struct ipv6_devconf
* be6ad5e439 Reapply "net: release reference to inet6_dev pointer"
* a4508eb552 Reapply "net: change accept_ra_min_rtr_lft to affect all RA lifetimes"
* d4c4255fb4 Reapply "net: add sysctl accept_ra_min_rtr_lft"
* 08538f14e5 ANDROID: GKI: explicit include of stringify.h
* c9ca12bfd2 ANDROID: introduce a vendor hook to allow speculative swap pagefaults
* d47a714fa7 ANDROID: mm: allow limited speculative page faulting in do_swap_page()
* febebd3d31 UPSTREAM: net: tls, update curr on splice as well
* db57c320fa ANDROID: GKI: Update oplus symbol list
* 0a9731879a ANDROID: mm: Add vendor hooks for recording when kswapd finishing the reclaim job
* 8cbe4885a9 ANDROID: GKI: Update oplus symbol list
* 46469e117f ANDROID: vendor_hooks: Add hooks for adjusting alloc_flags
* 5fe6b5a194 Reapply "perf: Fix perf_event_validate_size()"
* 067856b87f UPSTREAM: ida: Fix crash in ida_free when the bitmap is empty
* fd764687e8 UPSTREAM: netfilter: nf_tables: Reject tables of unsupported family
* 102b3d7a64 UPSTREAM: net/rose: Fix Use-After-Free in rose_ioctl
*   f6de684297 Merge "Merge tag 'android12-5.10.205_r00' into branch 'android12-5.10'" into android12-5.10
|\
| * 3afa5a4331 Merge tag 'android12-5.10.205_r00' into branch 'android12-5.10'
* | 40c0aa9ee1 Reapply "perf: Disallow mis-matched inherited group reads"
* | 789c68e980 ANDROID: scsi: ufs: UFS HPB feature not working
* | 75c057fe65 ANDROID: GKI: Update symbol list for mtk
|/
* cbfcf517dc FROMGIT: usb: dwc: ep0: Update request status in dwc3_ep0_stall_restart
* 050c668fef FROMGIT: BACKPORT: mm: update mark_victim tracepoints fields
* 7efc668a49 ANDROID: ABI: update allowed list for galaxy
* 3e4106c3c9 BACKPORT: exfat: reduce block requests when zeroing a cluster
* a5ea932a75 UPSTREAM: netfilter: nf_tables: skip set commit for deleted/destroyed sets
* 99512f1216 FROMGIT: PM / devfreq: Synchronize devfreq_monitor_[start/stop]
* d5552f63b8 UPSTREAM: dm verity: don't perform FEC for failed readahead IO
* 2274751099 UPSTREAM: netfilter: nft_set_pipapo: skip inactive elements during set walk
* a72131befe UPSTREAM: ipv4: igmp: fix refcnt uaf issue when receiving igmp query packet
* 718225bcc1 UPSTREAM: x86/sev: Check for user-space IOIO pointing to kernel space
* 5b118b5b1d UPSTREAM: x86/sev: Check IOBM for IOIO exceptions from user-space
* 0edf68775a UPSTREAM: nvmet-tcp: Fix a possible UAF in queue intialization setup
* f22c548c91 FROMLIST: binder: fix memory leaks of spam and pending work
* 5144c1c2e3 ANDROID: Snapshot Mainline's version of checkpatch.pl
* b39b9bde93 ANDROID: scsi: ufs: vendor check response and recovery addition
* 4246d242fa ANDROID: scsi: ufs: add perf heuristic design
* 83578817d0 ANDROID: ABI: Update symbol list for Mediatek
* 85d8fb9163 ANDROID: Add vendor hook for ufs perf heuristic and error recovery
* 7b8b02fa37 UPSTREAM: io_uring/fdinfo: lock SQ thread while retrieving thread cpu/pid
* 469a933a87 UPSTREAM: ufs: core: wlun send SSU timeout recovery
* 1b6cfcd1ed ANDROID: GKI: db845c: Update symbols list and ABI on rpmsg_register_device_override
* ced76f823f ANDROID: fix up rpmsg_device ABI break
* 72b5689f02 ANDROID: fix up platform_device ABI break
* 4172086503 UPSTREAM: rpmsg: Fix possible refcount leak in rpmsg_register_device_override()
* 0f4467ff35 UPSTREAM: rpmsg: glink: Release driver_override
* fa6692abc2 BACKPORT: rpmsg: Fix calling device_lock() on non-initialized device
* 20f1dfc0f7 BACKPORT: rpmsg: Fix kfree() of static memory on setting driver_override
* 68482b6bd9 UPSTREAM: rpmsg: Constify local variable in field store macro
* 66b73fb5b9 UPSTREAM: driver: platform: Add helper for safer setting of driver_override
* 525ecbbfda FROMGIT: Input: uinput - allow injecting event times
* e6278ff4b7 ANDROID: abi_gki_aarch64_qcom: Add android_gki_sysctl_vals
* 4194727e2c UPSTREAM: kheaders: Have cpio unconditionally replace files
* efe30da3a3 ANDROID: ABI: Update oplus symbol list
* 464ef3ee59 ANDROID: vendor_hooks: Add hooks for binder
* ba88bbf1bb BACKPORT: firmware_loader: Abort all upcoming firmware load request once reboot triggered
* 10120b0270 UPSTREAM: firmware_loader: Refactor kill_pending_fw_fallback_reqs()
*   f40707abde Merge "Merge tag 'android12-5.10.198_r00' into android12-5.10" into android12-5.10
|\
| * 1289e04b48 Merge tag 'android12-5.10.198_r00' into android12-5.10
* 422887ef07 ANDROID: GKI: Update symbols to symbol list
* 91d2427218 ANDROID: Add Interrupt Hook for madvise Compression
* 4e38f783da UPSTREAM: netfilter: ipset: add the missing IP_SET_HASH_WITH_NET0 macro for ip_set_hash_netportnet.c

Change-Id: I9ace27ef5bb0f8ae42d808292ed4f5b9778ac8a6
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2024-02-14 14:10:04 +00:00
commit 10896fff34
40 changed files with 3443 additions and 2152 deletions

View File

@ -1902,6 +1902,14 @@ accept_ra_min_hop_limit - INTEGER
Default: 1
accept_ra_min_lft - INTEGER
Minimum acceptable lifetime value in Router Advertisement.
RA sections with a lifetime less than this value shall be
ignored. Zero lifetimes stay unaffected.
Default: 0
accept_ra_pinfo - BOOLEAN
Learn Prefix Information in Router Advertisement.

File diff suppressed because it is too large Load Diff

View File

@ -3316,6 +3316,7 @@
snd_card_disconnect
snd_card_free
snd_card_new
snd_card_ref
snd_card_register
snd_card_rw_proc_new
snd_component_add

View File

@ -2227,7 +2227,14 @@
__traceiter_android_vh_snd_compr_use_pause_in_drain
__traceiter_android_vh_sound_usb_support_cpu_suspend
__traceiter_android_vh_syscall_prctl_finished
__traceiter_android_vh_ufs_abort_success_ctrl
__traceiter_android_vh_ufs_compl_rsp_check_done
__traceiter_android_vh_ufs_err_check_ctrl
__traceiter_android_vh_ufs_err_handler
__traceiter_android_vh_ufs_err_print_ctrl
__traceiter_android_vh_ufs_perf_huristic_ctrl
__traceiter_android_vh_ufs_send_command
__traceiter_android_vh_ufs_send_command_post_change
__traceiter_android_vh_ufs_send_tm_command
__traceiter_cpu_frequency
__traceiter_gpu_mem_total
@ -2312,7 +2319,14 @@
__tracepoint_android_vh_snd_compr_use_pause_in_drain
__tracepoint_android_vh_sound_usb_support_cpu_suspend
__tracepoint_android_vh_syscall_prctl_finished
__tracepoint_android_vh_ufs_abort_success_ctrl
__tracepoint_android_vh_ufs_compl_rsp_check_done
__tracepoint_android_vh_ufs_err_check_ctrl
__tracepoint_android_vh_ufs_err_handler
__tracepoint_android_vh_ufs_err_print_ctrl
__tracepoint_android_vh_ufs_perf_huristic_ctrl
__tracepoint_android_vh_ufs_send_command
__tracepoint_android_vh_ufs_send_command_post_change
__tracepoint_android_vh_ufs_send_tm_command
__tracepoint_cpu_frequency
__tracepoint_gpu_mem_total
@ -2367,10 +2381,13 @@
uart_update_timeout
uclamp_eff_value
__udelay
ufshcd_complete_requests
ufshcd_delay_us
ufshcd_dme_get_attr
ufshcd_dme_set_attr
ufshcd_dump_regs
ufshcd_err_handling_prepare
ufshcd_err_handling_unprepare
ufshcd_fixup_dev_quirks
ufshcd_get_pwr_dev_param
ufshcd_hba_enable

View File

@ -2565,6 +2565,7 @@
__stack_chk_guard
stack_depot_fetch
stack_trace_save
static_key_enable
static_key_disable
static_key_slow_dec
static_key_slow_inc
@ -2761,6 +2762,7 @@
__traceiter_android_rvh_wake_up_new_task
__traceiter_android_vh_account_task_time
__traceiter_android_vh_add_page_to_lrulist
__traceiter_android_vh_adjust_alloc_flags
__traceiter_android_vh_alloc_pages_slowpath_begin
__traceiter_android_vh_alloc_pages_slowpath_end
__traceiter_android_vh_allow_domain_state
@ -2937,6 +2939,7 @@
__traceiter_android_vh_ufs_send_tm_command
__traceiter_android_vh_update_page_mapcount
__traceiter_android_vh_update_topology_flags_workfn
__traceiter_android_vh_vmscan_kswapd_done
__traceiter_binder_transaction_received
__traceiter_cpu_frequency
__traceiter_cpu_frequency_limits
@ -2945,6 +2948,7 @@
__traceiter_ipi_entry
__traceiter_ipi_raise
__traceiter_irq_handler_entry
__traceiter_mm_vmscan_kswapd_wake
__traceiter_net_dev_queue
__traceiter_net_dev_xmit
__traceiter_netif_receive_skb
@ -2966,6 +2970,12 @@
__traceiter_task_newtask
__traceiter_task_rename
__traceiter_xhci_urb_giveback
__traceiter_android_vh_binder_proc_transaction_finish
__traceiter_android_vh_alloc_oem_binder_struct
__traceiter_android_vh_binder_transaction_received
__traceiter_android_vh_free_oem_binder_struct
__traceiter_android_vh_binder_special_task
__traceiter_android_vh_binder_free_buf
__tracepoint_android_rvh_account_irq
__tracepoint_android_rvh_after_enqueue_task
__tracepoint_android_rvh_build_perf_domains
@ -3029,6 +3039,7 @@
__tracepoint_android_rvh_wake_up_new_task
__tracepoint_android_vh_account_task_time
__tracepoint_android_vh_add_page_to_lrulist
__tracepoint_android_vh_adjust_alloc_flags
__tracepoint_android_vh_alloc_pages_slowpath_begin
__tracepoint_android_vh_alloc_pages_slowpath_end
__tracepoint_android_vh_allow_domain_state
@ -3205,6 +3216,7 @@
__tracepoint_android_vh_ufs_send_tm_command
__tracepoint_android_vh_update_page_mapcount
__tracepoint_android_vh_update_topology_flags_workfn
__tracepoint_android_vh_vmscan_kswapd_done
__tracepoint_binder_transaction_received
__tracepoint_cpu_frequency
__tracepoint_cpu_frequency_limits
@ -3213,6 +3225,7 @@
__tracepoint_ipi_entry
__tracepoint_ipi_raise
__tracepoint_irq_handler_entry
__tracepoint_mm_vmscan_kswapd_wake
__tracepoint_net_dev_queue
__tracepoint_net_dev_xmit
__tracepoint_netif_receive_skb
@ -3238,6 +3251,12 @@
__tracepoint_task_newtask
__tracepoint_task_rename
__tracepoint_xhci_urb_giveback
__tracepoint_android_vh_binder_proc_transaction_finish
__tracepoint_android_vh_alloc_oem_binder_struct
__tracepoint_android_vh_binder_transaction_received
__tracepoint_android_vh_free_oem_binder_struct
__tracepoint_android_vh_binder_special_task
__tracepoint_android_vh_binder_free_buf
trace_print_array_seq
trace_print_flags_seq
trace_print_hex_seq

View File

@ -34,6 +34,7 @@
android_debug_for_each_module
android_debug_per_cpu_symbol
android_debug_symbol
android_gki_sysctl_vals
android_rvh_probe_register
anon_inode_getfile
arc4_crypt

View File

@ -45,6 +45,7 @@
__traceiter_android_vh_swap_slot_cache_active
__traceiter_android_vh_unuse_swap_page
__traceiter_android_vh_waiting_for_page_migration
__traceiter_android_vh_should_end_madvise
__tracepoint_android_rvh_alloc_si
__tracepoint_android_rvh_alloc_swap_slot_cache
__tracepoint_android_rvh_drain_slots_cache_cpu
@ -80,4 +81,5 @@
__tracepoint_android_vh_swap_slot_cache_active
__tracepoint_android_vh_unuse_swap_page
__tracepoint_android_vh_waiting_for_page_migration
__tracepoint_android_vh_should_end_madvise
zero_pfn

View File

@ -1673,6 +1673,7 @@ static void binder_free_transaction(struct binder_transaction *t)
{
struct binder_proc *target_proc = t->to_proc;
trace_android_vh_free_oem_binder_struct(t);
if (target_proc) {
binder_inner_proc_lock(target_proc);
target_proc->outstanding_txns--;
@ -2855,6 +2856,7 @@ static int binder_proc_transaction(struct binder_transaction *t,
bool oneway = !!(t->flags & TF_ONE_WAY);
bool pending_async = false;
struct binder_transaction *t_outdated = NULL;
bool enqueue_task = true;
BUG_ON(!node);
binder_node_lock(node);
@ -2894,7 +2896,10 @@ static int binder_proc_transaction(struct binder_transaction *t,
node->inherit_rt);
binder_enqueue_thread_work_ilocked(thread, &t->work);
} else if (!pending_async) {
binder_enqueue_work_ilocked(&t->work, &proc->todo);
trace_android_vh_binder_special_task(t, proc, thread,
&t->work, &proc->todo, !oneway, &enqueue_task);
if (enqueue_task)
binder_enqueue_work_ilocked(&t->work, &proc->todo);
} else {
if ((t->flags & TF_UPDATE_TXN) && proc->is_frozen) {
t_outdated = binder_find_outdated_transaction_ilocked(t,
@ -2907,11 +2912,16 @@ static int binder_proc_transaction(struct binder_transaction *t,
proc->outstanding_txns--;
}
}
binder_enqueue_work_ilocked(&t->work, &node->async_todo);
trace_android_vh_binder_special_task(t, proc, thread,
&t->work, &node->async_todo, !oneway, &enqueue_task);
if (enqueue_task)
binder_enqueue_work_ilocked(&t->work, &node->async_todo);
}
trace_android_vh_binder_proc_transaction_end(current, proc->tsk,
thread ? thread->task : NULL, t->code, pending_async, !oneway);
trace_android_vh_binder_proc_transaction_finish(proc, t,
thread ? thread->task : NULL, pending_async, !oneway);
if (!pending_async)
binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
@ -3349,6 +3359,7 @@ static void binder_transaction(struct binder_proc *proc,
t->buffer->target_node = target_node;
t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
trace_binder_transaction_alloc_buf(t->buffer);
trace_android_vh_alloc_oem_binder_struct(tr, t, target_proc);
if (binder_alloc_copy_user_to_buffer(
&target_proc->alloc,
@ -3818,6 +3829,9 @@ binder_free_buf(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_buffer *buffer, bool is_failure)
{
bool enqueue_task = true;
trace_android_vh_binder_free_buf(proc, thread, buffer);
binder_inner_proc_lock(proc);
if (buffer->transaction) {
buffer->transaction->buffer = NULL;
@ -3837,8 +3851,10 @@ binder_free_buf(struct binder_proc *proc,
if (!w) {
buf_node->has_async_transaction = false;
} else {
binder_enqueue_work_ilocked(
w, &proc->todo);
trace_android_vh_binder_special_task(NULL, proc, thread, w,
&proc->todo, false, &enqueue_task);
if (enqueue_task)
binder_enqueue_work_ilocked(w, &proc->todo);
binder_wakeup_proc_ilocked(proc);
}
binder_node_inner_unlock(buf_node);
@ -4785,6 +4801,7 @@ static int binder_thread_read(struct binder_proc *proc,
ptr += trsize;
trace_binder_transaction_received(t);
trace_android_vh_binder_transaction_received(t, proc, thread, cmd);
binder_stat_br(proc, thread, cmd);
binder_debug(BINDER_DEBUG_TRANSACTION,
"%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
@ -4867,6 +4884,7 @@ static void binder_release_work(struct binder_proc *proc,
"undelivered TRANSACTION_ERROR: %u\n",
e->cmd);
} break;
case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT:
case BINDER_WORK_TRANSACTION_COMPLETE: {
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
"undelivered TRANSACTION_COMPLETE\n");

View File

@ -290,6 +290,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_set_memory_rw);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_set_module_permit_before_init);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_set_module_permit_after_init);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_util_est_update);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_adjust_alloc_flags);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_meminfo_proc_show);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_exit_mm);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_get_from_fragment_pool);
@ -474,6 +475,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_del_from_avail_list);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh___cgroup_throttle_swaprate);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_madvise_cold_or_pageout);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_page_isolated_for_reclaim);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_should_end_madvise);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_inactive_is_low);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_snapshot_refaults);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_account_swap_pages);
@ -498,6 +500,21 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_madvise_cold_or_pageout_abort);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_compact_finished);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_wakeup_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_skip_swapcache);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_proc_transaction_finish);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_oem_binder_struct);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_transaction_received);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_oem_binder_struct);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_special_task);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_free_buf);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_perf_huristic_ctrl);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_send_command_post_change);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_abort_success_ctrl);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_compl_rsp_check_done);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_err_handler);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_err_check_ctrl);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_err_print_ctrl);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_vmscan_kswapd_done);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_do_swap_page_spf);
/*
* For type visibility
*/

View File

@ -106,7 +106,7 @@ static void fw_load_abort(struct fw_sysfs *fw_sysfs)
static LIST_HEAD(pending_fw_head);
void kill_pending_fw_fallback_reqs(bool only_kill_custom)
void kill_pending_fw_fallback_reqs(bool kill_all)
{
struct fw_priv *fw_priv;
struct fw_priv *next;
@ -114,9 +114,13 @@ void kill_pending_fw_fallback_reqs(bool only_kill_custom)
mutex_lock(&fw_lock);
list_for_each_entry_safe(fw_priv, next, &pending_fw_head,
pending_list) {
if (!fw_priv->need_uevent || !only_kill_custom)
if (kill_all || !fw_priv->need_uevent)
__fw_load_abort(fw_priv);
}
if (kill_all)
fw_load_abort_all = true;
mutex_unlock(&fw_lock);
}
@ -511,7 +515,7 @@ static int fw_load_sysfs_fallback(struct fw_sysfs *fw_sysfs, long timeout)
}
mutex_lock(&fw_lock);
if (fw_state_is_aborted(fw_priv)) {
if (fw_load_abort_all || fw_state_is_aborted(fw_priv)) {
mutex_unlock(&fw_lock);
retval = -EINTR;
goto out;

View File

@ -35,7 +35,7 @@ int firmware_fallback_sysfs(struct firmware *fw, const char *name,
struct device *device,
u32 opt_flags,
int ret);
void kill_pending_fw_fallback_reqs(bool only_kill_custom);
void kill_pending_fw_fallback_reqs(bool kill_all);
void fw_fallback_set_cache_timeout(void);
void fw_fallback_set_default_timeout(void);
@ -52,7 +52,7 @@ static inline int firmware_fallback_sysfs(struct firmware *fw, const char *name,
return ret;
}
static inline void kill_pending_fw_fallback_reqs(bool only_kill_custom) { }
static inline void kill_pending_fw_fallback_reqs(bool kill_all) { }
static inline void fw_fallback_set_cache_timeout(void) { }
static inline void fw_fallback_set_default_timeout(void) { }

View File

@ -87,6 +87,7 @@ struct fw_priv {
};
extern struct mutex fw_lock;
extern bool fw_load_abort_all;
static inline bool __fw_state_check(struct fw_priv *fw_priv,
enum fw_status status)

View File

@ -91,6 +91,7 @@ static inline struct fw_priv *to_fw_priv(struct kref *ref)
DEFINE_MUTEX(fw_lock);
static struct firmware_cache fw_cache;
bool fw_load_abort_all;
/* Builtin firmware support */
@ -1442,10 +1443,10 @@ static int fw_pm_notify(struct notifier_block *notify_block,
case PM_SUSPEND_PREPARE:
case PM_RESTORE_PREPARE:
/*
* kill pending fallback requests with a custom fallback
* to avoid stalling suspend.
* Here, kill pending fallback requests will only kill
* non-uevent firmware request to avoid stalling suspend.
*/
kill_pending_fw_fallback_reqs(true);
kill_pending_fw_fallback_reqs(false);
device_cache_fw_images();
break;
@ -1530,7 +1531,7 @@ static int fw_shutdown_notify(struct notifier_block *unused1,
* Kill all pending fallback requests to avoid both stalling shutdown,
* and avoid a deadlock with the usermode_lock.
*/
kill_pending_fw_fallback_reqs(false);
kill_pending_fw_fallback_reqs(true);
return NOTIFY_DONE;
}

View File

@ -438,10 +438,14 @@ static void devfreq_monitor(struct work_struct *work)
if (err)
dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err);
if (devfreq->stop_polling)
goto out;
queue_delayed_work(devfreq_wq, &devfreq->work,
msecs_to_jiffies(devfreq->profile->polling_ms));
mutex_unlock(&devfreq->lock);
out:
mutex_unlock(&devfreq->lock);
trace_devfreq_monitor(devfreq);
}
@ -459,6 +463,10 @@ void devfreq_monitor_start(struct devfreq *devfreq)
if (devfreq->governor->interrupt_driven)
return;
mutex_lock(&devfreq->lock);
if (delayed_work_pending(&devfreq->work))
goto out;
switch (devfreq->profile->timer) {
case DEVFREQ_TIMER_DEFERRABLE:
INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor);
@ -467,12 +475,16 @@ void devfreq_monitor_start(struct devfreq *devfreq)
INIT_DELAYED_WORK(&devfreq->work, devfreq_monitor);
break;
default:
return;
goto out;
}
if (devfreq->profile->polling_ms)
queue_delayed_work(devfreq_wq, &devfreq->work,
msecs_to_jiffies(devfreq->profile->polling_ms));
out:
devfreq->stop_polling = false;
mutex_unlock(&devfreq->lock);
}
EXPORT_SYMBOL(devfreq_monitor_start);
@ -489,6 +501,14 @@ void devfreq_monitor_stop(struct devfreq *devfreq)
if (devfreq->governor->interrupt_driven)
return;
mutex_lock(&devfreq->lock);
if (devfreq->stop_polling) {
mutex_unlock(&devfreq->lock);
return;
}
devfreq->stop_polling = true;
mutex_unlock(&devfreq->lock);
cancel_delayed_work_sync(&devfreq->work);
}
EXPORT_SYMBOL(devfreq_monitor_stop);

View File

@ -33,6 +33,7 @@
#define UINPUT_NAME "uinput"
#define UINPUT_BUFFER_SIZE 16
#define UINPUT_NUM_REQUESTS 16
#define UINPUT_TIMESTAMP_ALLOWED_OFFSET_SECS 10
enum uinput_state { UIST_NEW_DEVICE, UIST_SETUP_COMPLETE, UIST_CREATED };
@ -569,11 +570,40 @@ static int uinput_setup_device_legacy(struct uinput_device *udev,
return retval;
}
/*
* Returns true if the given timestamp is valid (i.e., if all the following
* conditions are satisfied), false otherwise.
* 1) given timestamp is positive
* 2) it's within the allowed offset before the current time
* 3) it's not in the future
*/
static bool is_valid_timestamp(const ktime_t timestamp)
{
ktime_t zero_time;
ktime_t current_time;
ktime_t min_time;
ktime_t offset;
zero_time = ktime_set(0, 0);
if (ktime_compare(zero_time, timestamp) >= 0)
return false;
current_time = ktime_get();
offset = ktime_set(UINPUT_TIMESTAMP_ALLOWED_OFFSET_SECS, 0);
min_time = ktime_sub(current_time, offset);
if (ktime_after(min_time, timestamp) || ktime_after(timestamp, current_time))
return false;
return true;
}
static ssize_t uinput_inject_events(struct uinput_device *udev,
const char __user *buffer, size_t count)
{
struct input_event ev;
size_t bytes = 0;
ktime_t timestamp;
if (count != 0 && count < input_event_size())
return -EINVAL;
@ -588,6 +618,10 @@ static ssize_t uinput_inject_events(struct uinput_device *udev,
if (input_event_from_user(buffer + bytes, &ev))
return -EFAULT;
timestamp = ktime_set(ev.input_event_sec, ev.input_event_usec * NSEC_PER_USEC);
if (is_valid_timestamp(timestamp))
input_set_timestamp(udev->dev, timestamp);
input_event(udev->dev, ev.type, ev.code, ev.value);
bytes += input_event_size();
cond_resched();

View File

@ -623,6 +623,7 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev,
order_size = 1U << order;
if (order_mask > order_size)
alloc_flags |= __GFP_NORETRY;
trace_android_vh_adjust_alloc_flags(order, &alloc_flags);
page = alloc_pages_node(nid, alloc_flags, order);
if (!page)
continue;

View File

@ -145,10 +145,7 @@ enum {
UFSHCD_STATE_EH_SCHEDULED_NON_FATAL,
};
/* UFSHCD error handling flags */
enum {
UFSHCD_EH_IN_PROGRESS = (1 << 0),
};
/* UFSHCD UIC layer error flags */
enum {
@ -161,12 +158,6 @@ enum {
UFSHCD_UIC_PA_GENERIC_ERROR = (1 << 6), /* Generic PA error */
};
#define ufshcd_set_eh_in_progress(h) \
((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
#define ufshcd_eh_in_progress(h) \
((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
#define ufshcd_clear_eh_in_progress(h) \
((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
{UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
@ -2105,6 +2096,7 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
}
/* Make sure that doorbell is committed immediately */
wmb();
trace_android_vh_ufs_send_command_post_change(hba, lrbp);
}
/**
@ -2770,7 +2762,9 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
}
/* Make sure descriptors are ready before ringing the doorbell */
wmb();
trace_android_vh_ufs_perf_huristic_ctrl(hba, lrbp, &err);
if (err)
goto out;
ufshcd_send_command(hba, tag);
out:
up_read(&hba->clk_scaling_lock);
@ -5204,7 +5198,7 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
}
/* Release the resources allocated for processing a SCSI command. */
static void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp)
{
struct scsi_cmnd *cmd = lrbp->cmd;
@ -5215,6 +5209,7 @@ static void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
ufshcd_release(hba);
ufshcd_clk_scaling_update_busy(hba);
}
EXPORT_SYMBOL_GPL(ufshcd_release_scsi_cmd);
/**
* __ufshcd_transfer_req_compl - handle SCSI and query command completion
@ -5235,9 +5230,13 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
lrbp->compl_time_stamp = ktime_get();
cmd = lrbp->cmd;
if (cmd) {
bool done = false;
if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
ufshcd_update_monitor(hba, lrbp);
trace_android_vh_ufs_compl_command(hba, lrbp);
trace_android_vh_ufs_compl_rsp_check_done(hba, lrbp, &done);
if (done)
return;
ufshcd_add_command_trace(hba, index, "complete");
cmd->result = ufshcd_transfer_rsp_status(hba, lrbp);
ufshcd_release_scsi_cmd(hba, lrbp);
@ -5804,11 +5803,13 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
}
/* Complete requests that have door-bell cleared */
static void ufshcd_complete_requests(struct ufs_hba *hba)
void ufshcd_complete_requests(struct ufs_hba *hba)
{
ufshcd_trc_handler(hba, false);
ufshcd_tmc_handler(hba);
}
EXPORT_SYMBOL_GPL(ufshcd_complete_requests);
/**
* ufshcd_quirk_dl_nac_errors - This function checks if error handling is
@ -5922,7 +5923,7 @@ static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend)
}
}
static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
void ufshcd_err_handling_prepare(struct ufs_hba *hba)
{
pm_runtime_get_sync(hba->dev);
if (pm_runtime_status_suspended(hba->dev) || hba->is_sys_suspended) {
@ -5957,8 +5958,9 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
up_write(&hba->clk_scaling_lock);
cancel_work_sync(&hba->eeh_work);
}
EXPORT_SYMBOL_GPL(ufshcd_err_handling_prepare);
static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
{
ufshcd_scsi_unblock_requests(hba);
ufshcd_release(hba);
@ -5966,6 +5968,7 @@ static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
ufshcd_clk_scaling_suspend(hba, false);
pm_runtime_put(hba->dev);
}
EXPORT_SYMBOL_GPL(ufshcd_err_handling_unprepare);
static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba)
{
@ -6038,10 +6041,16 @@ static void ufshcd_err_handler(struct work_struct *work)
bool err_tm = false;
int err = 0, pmc_err;
int tag;
bool err_handled = false;
bool needs_reset = false, needs_restore = false;
hba = container_of(work, struct ufs_hba, eh_work);
trace_android_vh_ufs_err_handler(hba, &err_handled);
if (err_handled)
return;
down(&hba->host_sem);
spin_lock_irqsave(hba->host->host_lock, flags);
if (ufshcd_err_handling_should_stop(hba)) {
@ -6347,14 +6356,16 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
* update the transfer error masks to sticky bits, let's do this
* irrespective of current ufshcd_state.
*/
bool skip = false;
hba->saved_err |= hba->errors;
hba->saved_uic_err |= hba->uic_error;
trace_android_vh_ufs_err_print_ctrl(hba, &skip);
/* dump controller state before resetting */
if ((hba->saved_err &
if (!skip &&((hba->saved_err &
(INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
(hba->saved_uic_err &&
(hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
(hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR)))) {
dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
__func__, hba->saved_err,
hba->saved_uic_err);
@ -6419,6 +6430,7 @@ static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
{
irqreturn_t retval = IRQ_NONE;
bool err_check = false;
if (intr_status & UFSHCD_UIC_MASK)
retval |= ufshcd_uic_cmd_compl(hba, intr_status);
@ -6429,9 +6441,14 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
if (intr_status & UTP_TASK_REQ_COMPL)
retval |= ufshcd_tmc_handler(hba);
if (intr_status & UTP_TRANSFER_REQ_COMPL)
if (intr_status & UTP_TRANSFER_REQ_COMPL) {
retval |= ufshcd_trc_handler(hba, ufshcd_has_utrlcnr(hba));
trace_android_vh_ufs_err_check_ctrl(hba, &err_check);
if (err_check)
ufshcd_check_errors(hba, hba->errors);
}
return retval;
}
@ -7049,8 +7066,10 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
outstanding = __test_and_clear_bit(tag, &hba->outstanding_reqs);
spin_unlock_irqrestore(host->host_lock, flags);
if (outstanding)
if (outstanding) {
ufshcd_release_scsi_cmd(hba, lrbp);
trace_android_vh_ufs_abort_success_ctrl(hba, lrbp);
}
err = SUCCESS;
@ -7164,6 +7183,20 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
hba = shost_priv(cmd->device->host);
/*
* If runtime pm send SSU and got timeout, scsi_error_handler
* stuck at this function to wait for flush_work(&hba->eh_work).
* And ufshcd_err_handler(eh_work) stuck at wait for runtime pm active.
* Do ufshcd_link_recovery instead schedule eh_work can prevent
* dead lock to happen.
*/
if (hba->pm_op_in_progress) {
if (ufshcd_link_recovery(hba))
err = FAILED;
return err;
}
spin_lock_irqsave(hba->host->host_lock, flags);
hba->force_reset = true;
ufshcd_schedule_eh_work(hba);

View File

@ -82,6 +82,19 @@ enum ufs_event_type {
UFS_EVT_CNT,
};
/* UFSHCD error handling flags */
enum {
UFSHCD_EH_IN_PROGRESS = (1 << 0),
};
#define ufshcd_set_eh_in_progress(h) \
((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
#define ufshcd_eh_in_progress(h) \
((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
#define ufshcd_clear_eh_in_progress(h) \
((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
/**
* struct uic_command - UIC command structure
* @command: UIC command
@ -1048,6 +1061,12 @@ int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk);
void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val);
void ufshcd_hba_stop(struct ufs_hba *hba);
void ufshcd_complete_requests(struct ufs_hba *hba);
void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp);
void ufshcd_err_handling_prepare(struct ufs_hba *hba);
void ufshcd_err_handling_unprepare(struct ufs_hba *hba);
static inline void check_upiu_size(void)
{

View File

@ -2873,7 +2873,7 @@ void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf)
hpb_dev_info->control_mode = desc_buf[DEVICE_DESC_PARAM_HPB_CONTROL];
version = get_unaligned_be16(desc_buf + DEVICE_DESC_PARAM_HPB_VER);
version = get_unaligned_be16(desc_buf + DEVICE_DESC_PARAM_HPB_VER) & HPB_MAJOR_VERSION_MASK;
if ((version != HPB_SUPPORT_VERSION) &&
(version != HPB_SUPPORT_LEGACY_VERSION)) {
dev_err(hba->dev, "%s: HPB %x version is not supported.\n",

View File

@ -53,6 +53,7 @@
#define HPB_SUPPORT_VERSION 0x200
#define HPB_SUPPORT_LEGACY_VERSION 0x100
#define HPB_MAJOR_VERSION_MASK 0xFF00
enum UFSHPB_MODE {
HPB_HOST_CONTROL,

View File

@ -6,6 +6,7 @@
#include <linux/slab.h>
#include <asm/unaligned.h>
#include <linux/buffer_head.h>
#include <linux/blk_types.h>
#include "exfat_raw.h"
#include "exfat_fs.h"
@ -258,10 +259,10 @@ int exfat_zeroed_cluster(struct inode *dir, unsigned int clu)
{
struct super_block *sb = dir->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
struct buffer_head *bhs[MAX_BUF_PER_PAGE];
int nr_bhs = MAX_BUF_PER_PAGE;
struct buffer_head *bh;
struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping;
sector_t blknr, last_blknr;
int err, i, n;
int i;
blknr = exfat_cluster_to_sector(sbi, clu);
last_blknr = blknr + sbi->sect_per_clus;
@ -275,30 +276,22 @@ int exfat_zeroed_cluster(struct inode *dir, unsigned int clu)
}
/* Zeroing the unused blocks on this cluster */
while (blknr < last_blknr) {
for (n = 0; n < nr_bhs && blknr < last_blknr; n++, blknr++) {
bhs[n] = sb_getblk(sb, blknr);
if (!bhs[n]) {
err = -ENOMEM;
goto release_bhs;
}
memset(bhs[n]->b_data, 0, sb->s_blocksize);
}
for (i = blknr; i < last_blknr; i++) {
bh = sb_getblk(sb, i);
if (!bh)
return -ENOMEM;
err = exfat_update_bhs(bhs, n, IS_DIRSYNC(dir));
if (err)
goto release_bhs;
for (i = 0; i < n; i++)
brelse(bhs[i]);
memset(bh->b_data, 0, sb->s_blocksize);
set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
brelse(bh);
}
return 0;
if (IS_DIRSYNC(dir))
return filemap_write_and_wait_range(mapping,
EXFAT_BLK_TO_B(blknr, sb),
EXFAT_BLK_TO_B(last_blknr, sb) - 1);
release_bhs:
exfat_err(sb, "failed zeroed sect %llu\n", (unsigned long long)blknr);
for (i = 0; i < n; i++)
bforget(bhs[i]);
return err;
return 0;
}
int exfat_alloc_cluster(struct inode *inode, unsigned int num_alloc,

View File

@ -33,6 +33,7 @@
#define _ANDROID_KABI_H
#include <linux/compiler.h>
#include <linux/stringify.h>
/*
* Worker macros, don't use these, use the ones without a leading '_'

View File

@ -80,7 +80,8 @@ struct ipv6_devconf {
struct ctl_table_header *sysctl_header;
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_USE(1, struct { __s32 accept_ra_min_lft; u32 padding; });
ANDROID_KABI_RESERVE(2);
ANDROID_KABI_RESERVE(3);
ANDROID_KABI_RESERVE(4);

View File

@ -659,6 +659,9 @@ struct perf_event {
/* The cumulative AND of all event_caps for events in this group. */
int group_caps;
#ifndef __GENKSYMS__
unsigned int group_generation;
#endif
struct perf_event *group_leader;
struct pmu *pmu;
void *pmu_private;

View File

@ -72,19 +72,30 @@ TRACE_EVENT(reclaim_retry_zone,
);
TRACE_EVENT(mark_victim,
TP_PROTO(int pid),
TP_PROTO(struct task_struct *task, uid_t uid),
TP_ARGS(pid),
TP_ARGS(task, uid),
TP_STRUCT__entry(
__field(int, pid)
__field(uid_t, uid)
__string(comm, task->comm)
__field(short, oom_score_adj)
),
TP_fast_assign(
__entry->pid = pid;
__entry->pid = task->pid;
__entry->uid = uid;
__assign_str(comm, task->comm);
__entry->oom_score_adj = task->signal->oom_score_adj;
),
TP_printk("pid=%d", __entry->pid)
TP_printk("pid=%d uid=%u comm=%s oom_score_adj=%hd",
__entry->pid,
__entry->uid,
__get_str(comm),
__entry->oom_score_adj
)
);
TRACE_EVENT(wake_reaper,

View File

@ -17,6 +17,8 @@ struct binder_proc;
struct binder_thread;
struct binder_transaction;
struct binder_transaction_data;
struct binder_work;
struct binder_buffer;
#else
/* struct binder_alloc */
#include <../drivers/android/binder_alloc.h>
@ -114,6 +116,30 @@ DECLARE_HOOK(android_vh_binder_read_done,
DECLARE_HOOK(android_vh_binder_has_work_ilocked,
TP_PROTO(struct binder_thread *thread, bool do_proc_work, int *ret),
TP_ARGS(thread, do_proc_work, ret));
DECLARE_HOOK(android_vh_binder_proc_transaction_finish,
TP_PROTO(struct binder_proc *proc, struct binder_transaction *t,
struct task_struct *binder_th_task, bool pending_async, bool sync),
TP_ARGS(proc, t, binder_th_task, pending_async, sync));
DECLARE_HOOK(android_vh_alloc_oem_binder_struct,
TP_PROTO(struct binder_transaction_data *tr, struct binder_transaction *t,
struct binder_proc *proc),
TP_ARGS(tr, t, proc));
DECLARE_HOOK(android_vh_binder_transaction_received,
TP_PROTO(struct binder_transaction *t, struct binder_proc *proc,
struct binder_thread *thread, uint32_t cmd),
TP_ARGS(t, proc, thread, cmd));
DECLARE_HOOK(android_vh_free_oem_binder_struct,
TP_PROTO(struct binder_transaction *t),
TP_ARGS(t));
DECLARE_HOOK(android_vh_binder_special_task,
TP_PROTO(struct binder_transaction *t, struct binder_proc *proc,
struct binder_thread *thread, struct binder_work *w,
struct list_head *head, bool sync, bool *special_task),
TP_ARGS(t, proc, thread, w, head, sync, special_task));
DECLARE_HOOK(android_vh_binder_free_buf,
TP_PROTO(struct binder_proc *proc, struct binder_thread *thread,
struct binder_buffer *buffer),
TP_ARGS(proc, thread, buffer));
/* macro versions of hooks are no longer required */
#endif /* _TRACE_HOOK_BINDER_H */

View File

@ -43,6 +43,9 @@ DECLARE_HOOK(android_vh_iommu_iovad_free_iova,
TP_PROTO(struct iova_domain *iovad, dma_addr_t iova, size_t size),
TP_ARGS(iovad, iova, size));
DECLARE_HOOK(android_vh_adjust_alloc_flags,
TP_PROTO(unsigned int order, gfp_t *alloc_flags),
TP_ARGS(order, alloc_flags));
#endif /* _TRACE_HOOK_IOMMU_H */

View File

@ -302,6 +302,9 @@ DECLARE_HOOK(android_vh_madvise_cold_or_pageout,
DECLARE_HOOK(android_vh_page_isolated_for_reclaim,
TP_PROTO(struct mm_struct *mm, struct page *page),
TP_ARGS(mm, page));
DECLARE_HOOK(android_vh_should_end_madvise,
TP_PROTO(struct mm_struct *mm, bool *skip, bool *pageout),
TP_ARGS(mm, skip, pageout));
DECLARE_HOOK(android_vh_account_swap_pages,
TP_PROTO(struct swap_info_struct *si, bool *skip),
TP_ARGS(si, skip));
@ -359,6 +362,9 @@ DECLARE_HOOK(android_vh_madvise_cold_or_pageout_abort,
DECLARE_HOOK(android_vh_skip_swapcache,
TP_PROTO(swp_entry_t entry, bool *skip),
TP_ARGS(entry, skip));
DECLARE_HOOK(android_vh_do_swap_page_spf,
TP_PROTO(bool *allow_swap_spf),
TP_ARGS(allow_swap_spf));
/* macro versions of hooks are no longer required */
#endif /* _TRACE_HOOK_MM_H */

View File

@ -75,6 +75,40 @@ DECLARE_HOOK(android_vh_ufs_update_sdev,
DECLARE_HOOK(android_vh_ufs_clock_scaling,
TP_PROTO(struct ufs_hba *hba, bool *force_out, bool *force_scaling, bool *scale_up),
TP_ARGS(hba, force_out, force_scaling, scale_up));
DECLARE_HOOK(android_vh_ufs_send_command_post_change,
TP_PROTO(struct ufs_hba *hba, struct ufshcd_lrb *lrbp),
TP_ARGS(hba, lrbp));
DECLARE_HOOK(android_vh_ufs_perf_huristic_ctrl,
TP_PROTO(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp, int *err),
TP_ARGS(hba, lrbp, err));
DECLARE_HOOK(android_vh_ufs_abort_success_ctrl,
TP_PROTO(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp),
TP_ARGS(hba, lrbp));
DECLARE_HOOK(android_vh_ufs_err_handler,
TP_PROTO(struct ufs_hba *hba,
bool *err_handled),
TP_ARGS(hba, err_handled));
DECLARE_HOOK(android_vh_ufs_compl_rsp_check_done,
TP_PROTO(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp, bool *done),
TP_ARGS(hba, lrbp, done));
DECLARE_HOOK(android_vh_ufs_err_print_ctrl,
TP_PROTO(struct ufs_hba *hba,
bool *skip),
TP_ARGS(hba, skip));
DECLARE_HOOK(android_vh_ufs_err_check_ctrl,
TP_PROTO(struct ufs_hba *hba,
bool *err_check),
TP_ARGS(hba, err_check));
#endif /* _TRACE_HOOK_UFSHCD_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@ -53,6 +53,10 @@ DECLARE_HOOK(android_vh_snapshot_refaults,
DECLARE_HOOK(android_vh_check_page_look_around_ref,
TP_PROTO(struct page *page, int *skip),
TP_ARGS(page, skip));
DECLARE_HOOK(android_vh_vmscan_kswapd_done,
TP_PROTO(int node_id, unsigned int highest_zoneidx, unsigned int alloc_order,
unsigned int reclaim_order),
TP_ARGS(node_id, highest_zoneidx, alloc_order, reclaim_order));
#endif /* _TRACE_HOOK_VMSCAN_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@ -196,6 +196,13 @@ enum {
DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN,
DEVCONF_NDISC_TCLASS,
DEVCONF_RPL_SEG_ENABLED,
DEVCONF_RA_DEFRTR_METRIC,
DEVCONF_IOAM6_ENABLED,
DEVCONF_IOAM6_ID,
DEVCONF_IOAM6_ID_WIDE,
DEVCONF_NDISC_EVICT_NOCARRIER,
DEVCONF_ACCEPT_UNTRACKED_NA,
DEVCONF_ACCEPT_RA_MIN_LFT,
DEVCONF_MAX
};

View File

@ -1845,28 +1845,31 @@ static inline void perf_event__state_init(struct perf_event *event)
PERF_EVENT_STATE_INACTIVE;
}
static void __perf_event_read_size(struct perf_event *event, int nr_siblings)
static int __perf_event_read_size(u64 read_format, int nr_siblings)
{
int entry = sizeof(u64); /* value */
int size = 0;
int nr = 1;
if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
size += sizeof(u64);
if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
size += sizeof(u64);
if (event->attr.read_format & PERF_FORMAT_ID)
if (read_format & PERF_FORMAT_ID)
entry += sizeof(u64);
if (event->attr.read_format & PERF_FORMAT_GROUP) {
if (read_format & PERF_FORMAT_GROUP) {
nr += nr_siblings;
size += sizeof(u64);
}
size += entry * nr;
event->read_size = size;
/*
* Since perf_event_validate_size() limits this to 16k and inhibits
* adding more siblings, this will never overflow.
*/
return size + nr * entry;
}
static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
@ -1910,8 +1913,9 @@ static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
*/
static void perf_event__header_size(struct perf_event *event)
{
__perf_event_read_size(event,
event->group_leader->nr_siblings);
event->read_size =
__perf_event_read_size(event->attr.read_format,
event->group_leader->nr_siblings);
__perf_event_header_size(event, event->attr.sample_type);
}
@ -1942,24 +1946,35 @@ static void perf_event__id_header_size(struct perf_event *event)
event->id_header_size = size;
}
/*
* Check that adding an event to the group does not result in anybody
* overflowing the 64k event limit imposed by the output buffer.
*
* Specifically, check that the read_size for the event does not exceed 16k,
* read_size being the one term that grows with groups size. Since read_size
* depends on per-event read_format, also (re)check the existing events.
*
* This leaves 48k for the constant size fields and things like callchains,
* branch stacks and register sets.
*/
static bool perf_event_validate_size(struct perf_event *event)
{
/*
* The values computed here will be over-written when we actually
* attach the event.
*/
__perf_event_read_size(event, event->group_leader->nr_siblings + 1);
__perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ);
perf_event__id_header_size(event);
struct perf_event *sibling, *group_leader = event->group_leader;
/*
* Sum the lot; should not exceed the 64k limit we have on records.
* Conservative limit to allow for callchains and other variable fields.
*/
if (event->read_size + event->header_size +
event->id_header_size + sizeof(struct perf_event_header) >= 16*1024)
if (__perf_event_read_size(event->attr.read_format,
group_leader->nr_siblings + 1) > 16*1024)
return false;
if (__perf_event_read_size(group_leader->attr.read_format,
group_leader->nr_siblings + 1) > 16*1024)
return false;
for_each_sibling_event(sibling, group_leader) {
if (__perf_event_read_size(sibling->attr.read_format,
group_leader->nr_siblings + 1) > 16*1024)
return false;
}
return true;
}
@ -1986,6 +2001,7 @@ static void perf_group_attach(struct perf_event *event)
list_add_tail(&event->sibling_list, &group_leader->sibling_list);
group_leader->nr_siblings++;
group_leader->group_generation++;
perf_event__header_size(group_leader);
@ -2178,6 +2194,7 @@ static void perf_group_detach(struct perf_event *event)
if (leader != event) {
list_del_init(&event->sibling_list);
event->group_leader->nr_siblings--;
event->group_leader->group_generation++;
goto out;
}
@ -5164,7 +5181,7 @@ static int __perf_read_group_add(struct perf_event *leader,
u64 read_format, u64 *values)
{
struct perf_event_context *ctx = leader->ctx;
struct perf_event *sub;
struct perf_event *sub, *parent;
unsigned long flags;
int n = 1; /* skip @nr */
int ret;
@ -5174,6 +5191,33 @@ static int __perf_read_group_add(struct perf_event *leader,
return ret;
raw_spin_lock_irqsave(&ctx->lock, flags);
/*
* Verify the grouping between the parent and child (inherited)
* events is still in tact.
*
* Specifically:
* - leader->ctx->lock pins leader->sibling_list
* - parent->child_mutex pins parent->child_list
* - parent->ctx->mutex pins parent->sibling_list
*
* Because parent->ctx != leader->ctx (and child_list nests inside
* ctx->mutex), group destruction is not atomic between children, also
* see perf_event_release_kernel(). Additionally, parent can grow the
* group.
*
* Therefore it is possible to have parent and child groups in a
* different configuration and summing over such a beast makes no sense
* what so ever.
*
* Reject this.
*/
parent = leader->parent;
if (parent &&
(parent->group_generation != leader->group_generation ||
parent->nr_siblings != leader->nr_siblings)) {
ret = -ECHILD;
goto unlock;
}
/*
* Since we co-schedule groups, {enabled,running} times of siblings
@ -5203,8 +5247,9 @@ static int __perf_read_group_add(struct perf_event *leader,
values[n++] = primary_event_id(sub);
}
unlock:
raw_spin_unlock_irqrestore(&ctx->lock, flags);
return 0;
return ret;
}
static int perf_read_group(struct perf_event *event,
@ -5223,10 +5268,6 @@ static int perf_read_group(struct perf_event *event,
values[0] = 1 + leader->nr_siblings;
/*
* By locking the child_mutex of the leader we effectively
* lock the child list of all siblings.. XXX explain how.
*/
mutex_lock(&leader->child_mutex);
ret = __perf_read_group_add(leader, read_format, values);
@ -12775,6 +12816,7 @@ static int inherit_group(struct perf_event *parent_event,
!perf_get_aux_event(child_ctr, leader))
return -EINVAL;
}
leader->group_generation = parent_event->group_generation;
return 0;
}

View File

@ -76,7 +76,7 @@ fi
# of tree builds having stale headers in srctree. Just silence CPIO for now.
for f in $dir_list;
do find "$f" -name "*.h";
done | cpio --quiet -pd $cpio_dir >/dev/null 2>&1
done | cpio --quiet -pdu $cpio_dir >/dev/null 2>&1
# Remove comments except SDPX lines
find $cpio_dir -type f -print0 |

View File

@ -323,6 +323,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
LIST_HEAD(page_list);
bool allow_shared = false;
bool abort_madvise = false;
bool skip = false;
trace_android_vh_madvise_cold_or_pageout_abort(vma, &abort_madvise);
if (fatal_signal_pending(current) || abort_madvise)
@ -419,6 +420,10 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
if (!page)
continue;
trace_android_vh_should_end_madvise(mm, &skip, &pageout);
if (skip)
break;
/*
* Creating a THP page is expensive so split it only if we
* are sure it's worth. Split it if we are only owner.

View File

@ -3623,8 +3623,16 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
void *shadow = NULL;
if (vmf->flags & FAULT_FLAG_SPECULATIVE) {
pte_unmap(vmf->pte);
return VM_FAULT_RETRY;
bool allow_swap_spf = false;
/* ksm_might_need_to_copy() needs a stable VMA, spf can't be used */
#ifndef CONFIG_KSM
trace_android_vh_do_swap_page_spf(&allow_swap_spf);
#endif
if (!allow_swap_spf) {
pte_unmap(vmf->pte);
return VM_FAULT_RETRY;
}
}
ret = pte_unmap_same(vmf);
@ -3641,6 +3649,10 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
entry = pte_to_swp_entry(vmf->orig_pte);
if (unlikely(non_swap_entry(entry))) {
if (vmf->flags & FAULT_FLAG_SPECULATIVE) {
ret = VM_FAULT_RETRY;
goto out;
}
if (is_migration_entry(entry)) {
migration_entry_wait(vma->vm_mm, vmf->pmd,
vmf->address);

View File

@ -44,6 +44,7 @@
#include <linux/kthread.h>
#include <linux/init.h>
#include <linux/mmu_notifier.h>
#include <linux/cred.h>
#include <asm/tlb.h>
#include "internal.h"
@ -751,6 +752,8 @@ static void __mark_oom_victim(struct task_struct *tsk)
*/
static void mark_oom_victim(struct task_struct *tsk)
{
const struct cred *cred;
WARN_ON(oom_killer_disabled);
/* OOM killer might race with memcg OOM */
if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE))
@ -767,7 +770,9 @@ static void mark_oom_victim(struct task_struct *tsk)
*/
__thaw_task(tsk);
atomic_inc(&oom_victims);
trace_mark_victim(tsk->pid);
cred = get_task_cred(tsk);
trace_mark_victim(tsk, cred->uid.val);
put_cred(cred);
}
/**

View File

@ -68,6 +68,7 @@
EXPORT_TRACEPOINT_SYMBOL_GPL(mm_vmscan_direct_reclaim_begin);
EXPORT_TRACEPOINT_SYMBOL_GPL(mm_vmscan_direct_reclaim_end);
EXPORT_TRACEPOINT_SYMBOL_GPL(mm_vmscan_kswapd_wake);
struct scan_control {
/* How many pages shrink_list() should reclaim */
@ -4038,6 +4039,8 @@ static int kswapd(void *p)
alloc_order);
reclaim_order = balance_pgdat(pgdat, alloc_order,
highest_zoneidx);
trace_android_vh_vmscan_kswapd_done(pgdat->node_id, highest_zoneidx,
alloc_order, reclaim_order);
if (reclaim_order < alloc_order)
goto kswapd_try_sleep;
}

View File

@ -209,6 +209,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
.accept_ra_defrtr = 1,
.accept_ra_from_local = 0,
.accept_ra_min_hop_limit= 1,
.accept_ra_min_lft = 0,
.accept_ra_pinfo = 1,
#ifdef CONFIG_IPV6_ROUTER_PREF
.accept_ra_rtr_pref = 1,
@ -265,6 +266,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
.accept_ra_defrtr = 1,
.accept_ra_from_local = 0,
.accept_ra_min_hop_limit= 1,
.accept_ra_min_lft = 0,
.accept_ra_pinfo = 1,
#ifdef CONFIG_IPV6_ROUTER_PREF
.accept_ra_rtr_pref = 1,
@ -2748,6 +2750,9 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
return;
}
if (valid_lft != 0 && valid_lft < in6_dev->cnf.accept_ra_min_lft)
goto put;
/*
* Two things going on here:
* 1) Add routes for on-link prefixes
@ -5588,6 +5593,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
array[DEVCONF_DISABLE_POLICY] = cnf->disable_policy;
array[DEVCONF_NDISC_TCLASS] = cnf->ndisc_tclass;
array[DEVCONF_RPL_SEG_ENABLED] = cnf->rpl_seg_enabled;
array[DEVCONF_ACCEPT_RA_MIN_LFT] = cnf->accept_ra_min_lft;
}
static inline size_t inet6_ifla6_size(void)
@ -6741,6 +6747,13 @@ static const struct ctl_table addrconf_sysctl[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "accept_ra_min_lft",
.data = &ipv6_devconf.accept_ra_min_lft,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "accept_ra_pinfo",
.data = &ipv6_devconf.accept_ra_pinfo,

View File

@ -1269,6 +1269,14 @@ static void ndisc_router_discovery(struct sk_buff *skb)
goto skip_defrtr;
}
lifetime = ntohs(ra_msg->icmph.icmp6_rt_lifetime);
if (lifetime != 0 && lifetime < in6_dev->cnf.accept_ra_min_lft) {
ND_PRINTK(2, info,
"RA: router lifetime (%ds) is too short: %s\n",
lifetime, skb->dev->name);
goto skip_defrtr;
}
/* Do not accept RA with source-addr found on local machine unless
* accept_ra_from_local is set to true.
*/
@ -1281,8 +1289,6 @@ static void ndisc_router_discovery(struct sk_buff *skb)
goto skip_defrtr;
}
lifetime = ntohs(ra_msg->icmph.icmp6_rt_lifetime);
#ifdef CONFIG_IPV6_ROUTER_PREF
pref = ra_msg->icmph.icmp6_router_pref;
/* 10b is handled as if it were 00b (medium) */
@ -1453,6 +1459,9 @@ static void ndisc_router_discovery(struct sk_buff *skb)
if (ri->prefix_len == 0 &&
!in6_dev->cnf.accept_ra_defrtr)
continue;
if (ri->lifetime != 0 &&
ntohl(ri->lifetime) < in6_dev->cnf.accept_ra_min_lft)
continue;
if (ri->prefix_len < in6_dev->cnf.accept_ra_rt_info_min_plen)
continue;
if (ri->prefix_len > in6_dev->cnf.accept_ra_rt_info_max_plen)

File diff suppressed because it is too large Load Diff