Merge branch android12-5.10 into android12-5.10-lts

Sync up after the -lts merge, contains the following commits:

2329f61535dd Merge branch android12-5.10 into android12-5.10-lts
851de32d27 Merge tag 'android12-5.10.177_r00' into android12-5.10
7f9a9a8fe4 UPSTREAM: KVM: x86: do not report a vCPU as preempted outside instruction boundaries
7c835be7ec ANDROID: remove CONFIG_NET_CLS_TCINDEX from gki_defconfig
21a4564a6c BACKPORT: net/sched: Retire tcindex classifier
f27e7efdc6 FROMLIST: usb: xhci: Remove unused udev from xhci_log_ctx trace event
948b2a1205 UPSTREAM: ext4: avoid a potential slab-out-of-bounds in ext4_group_desc_csum
f60101a030 ANDROID: GKI: Update symbols to symbol list
64c7044d39 ANDROID: vendor_hook: add hooks in dm_bufio.c
f03258701d ANDROID: GKI: Update symbol list for mtk
9d8c9d868e UPSTREAM: ext4: fix invalid free tracking in ext4_xattr_move_to_block()
97aa93c23f ANDROID: uid_sys_stats: defer process_notifier work if uid_lock is contended
c28be8ff1d BACKPORT: scsi: ufs: Fix device management cmd timeout flow
3641f511ee UPSTREAM: usb: dwc3: debugfs: Resume dwc3 before accessing registers
694b75e0ce UPSTREAM: kvm: initialize all of the kvm_debugregs structure before sending it to userspace
368fb8a50c BACKPORT: scsi: ufs: fix a race condition related to device management
e36eef3783 Revert "Revert "mm/rmap: Fix anon_vma->degree ambiguity leading to double-reuse""
a42b1b6119 BACKPORT: usb: dwc3: gadget: Ignore End Transfer delay on teardown
57f609a261 BACKPORT: usb: dwc3: gadget: Do not clear ep delayed stop flag during ep disable
66cba6260a FROMLIST: binder: fix UAF caused by faulty buffer cleanup
ce88d51c72 ANDROID: GKI: Increase max 8250 uarts
4d70900718 ANDROID: GKI: add missing vendor hook and other ktrace symbols
00499a5f22 ANDROID: GKI: reorder symbols within ABI files
e2ed7e5048 BACKPORT: f2fs: introduce gc_urgent_mid mode
4d2352ab07 UPSTREAM: ext4: fix use-after-free in ext4_xattr_set_entry
3acba5c435 UPSTREAM: ext4: remove duplicate definition of ext4_xattr_ibody_inline_set()
49652e1bbd UPSTREAM: Revert "ext4: fix use-after-free in ext4_xattr_set_entry"
2e61d90c44 ANDROID: fix use of plain integer as NULL pointer
306223f885 UPSTREAM: dm verity: stop using WQ_UNBOUND for verify_wq
3de420d372 BACKPORT: dm verity: enable WQ_HIGHPRI on verify_wq
004c469370 UPSTREAM: dm verity: remove WQ_CPU_INTENSIVE flag since using WQ_UNBOUND
7513f3e148 UPSTREAM: usb: typec: tcpm: Add support for altmodes
5bbc750d9e UPSTREAM: usb: typec: Add typec_port_register_altmodes()
8c9c56dbe5 FROMGIT: usb: dwc3: gadget: Add 1ms delay after end transfer command without IOC
7771fe887f BACKPORT: f2fs: give priority to select unpinned section for foreground GC
7b7cd11586 BACKPORT: f2fs: check pinfile in gc_data_segment() in advance
4078681792 ANDROID: Enable percpu high priority kthreads for erofs
76e536328f UPSTREAM: erofs: fix an error code in z_erofs_init_zip_subsystem()
6f48588062 BACKPORT: erofs: add per-cpu threads for decompression as an option
1b307b685c UPSTREAM: usb: gadget: f_uac2: Fix incorrect increment of bNumEndpoints
43390f1621 BACKPORT: hugetlb: unshare some PMDs when splitting VMAs
391c34feed UPSTREAM: KVM: arm64: Free hypervisor allocations if vector slot init fails
2f9858326d UPSTREAM: coresight: trbe: remove cpuhp instance node before remove cpuhp state
73c8565a9e UPSTREAM: block: mq-deadline: Fix dd_finish_request() for zoned devices
9a595405c4 UPSTREAM: mm/page_exit: fix kernel doc warning in page_ext_put()
8adfaec154 BACKPORT: arm64: mm: kfence: only handle translation faults
d11c3f780c UPSTREAM: mm/damon/dbgfs: check if rm_contexts input is for a real context
8eb30a41f5 UPSTREAM: mm/shmem: use page_mapping() to detect page cache for uffd continue
f74be44246 UPSTREAM: usb: dwc3: gadget: Don't delay End Transfer on delayed_status
37b3a6153f UPSTREAM: powerpc/64: Include cache.h directly in paca.h
3815eca894 UPSTREAM: firmware: tegra: Fix error application of sizeof() to pointer
1b3cfadf63 BACKPORT: drm/amd/display: Allocate structs needed by dcn_bw_calc_rq_dlg_ttu in pipe_ctx
3fafe0740e BACKPORT: drm/amd/display: Pass display_pipe_params_st as const in DML
61344663df ANDROID: clear memory trylock-bit when page_locked.
d55931c1cc UPSTREAM: ext4: fix kernel BUG in 'ext4_write_inline_data_end()'
08ccb44bff ANDROID: GKI: Update symbols to symbol list
faf3626b8e ANDROID: incremental fs: Evict inodes before freeing mount data
b7b3a636ad UPSTREAM: mm: memcontrol: set the correct memcg swappiness restriction
3ea370605a UPSTREAM: media: rc: Fix use-after-free bugs caused by ene_tx_irqsim()
d88cd5c7f0 ANDROID: Fix kernelci break: eventfd_signal_mask redefined
2a7aed7298 ANDROID: dm-default-key: update for blk_crypto_evict_key() returning void
0dad2818cb BACKPORT: FROMGIT: blk-crypto: make blk_crypto_evict_key() more robust
b3926f1a34 BACKPORT: FROMGIT: blk-crypto: make blk_crypto_evict_key() return void
e7bfca1670 BACKPORT: FROMGIT: blk-mq: release crypto keyslot before reporting I/O complete
469e02cc6d BACKPORT: of: base: Skip CPU nodes with "fail"/"fail-..." status
e0d8206f5d UPSTREAM: hid: bigben_probe(): validate report count
7fd7972fc1 UPSTREAM: HID: bigben: use spinlock to safely schedule workers
1bba06f3e8 UPSTREAM: HID: bigben_worker() remove unneeded check on report_field
aaffce1ef4 UPSTREAM: HID: bigben: use spinlock to protect concurrent accesses
d1d2d17fe9 BACKPORT: USB: gadget: Fix use-after-free during usb config switch

Change-Id: Ia2fcbb257da4641590addf2da2f6938144405043
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-05-30 19:12:22 +00:00
commit b6b9dff07b
88 changed files with 6994 additions and 5687 deletions

View File

@ -271,11 +271,16 @@ Description: Shows current reserved blocks in system, it may be temporarily
What: /sys/fs/f2fs/<disk>/gc_urgent
Date: August 2017
Contact: "Jaegeuk Kim" <jaegeuk@kernel.org>
Description: Do background GC agressively when set. When gc_urgent = 1,
background thread starts to do GC by given gc_urgent_sleep_time
interval. When gc_urgent = 2, F2FS will lower the bar of
checking idle in order to process outstanding discard commands
and GC a little bit aggressively. It is set to 0 by default.
Description: Do background GC aggressively when set. Set to 0 by default.
gc urgent high(1): does GC forcibly in a period of given
gc_urgent_sleep_time and ignores I/O idling check. uses greedy
GC approach and turns SSR mode on.
gc urgent low(2): lowers the bar of checking I/O idling in
order to process outstanding discard commands and GC a
little bit aggressively. uses cost benefit GC approach.
gc urgent mid(3): does GC forcibly in a period of given
gc_urgent_sleep_time and executes a mid level of I/O idling check.
uses cost benefit GC approach.
What: /sys/fs/f2fs/<disk>/gc_urgent_sleep_time
Date: August 2017
@ -506,7 +511,7 @@ Date: July 2021
Contact: "Daeho Jeong" <daehojeong@google.com>
Description: Show how many segments have been reclaimed by GC during a specific
GC mode (0: GC normal, 1: GC idle CB, 2: GC idle greedy,
3: GC idle AT, 4: GC urgent high, 5: GC urgent low)
3: GC idle AT, 4: GC urgent high, 5: GC urgent low 6: GC urgent mid)
You can re-initialize this value to "0".
What: /sys/fs/f2fs/<disk>/gc_segment_mode

File diff suppressed because it is too large Load Diff

View File

@ -265,8 +265,8 @@ up_read
up_write
vfree
vfs_fsync_range
vmalloc
__vmalloc
vmalloc
vsnprintf
vzalloc
__wait_on_buffer

View File

@ -73,9 +73,9 @@
blocking_notifier_call_chain
blocking_notifier_chain_register
blocking_notifier_chain_unregister
bpf_trace_run1
bpf_trace_run10
bpf_trace_run12
bpf_trace_run1
bpf_trace_run2
bpf_trace_run3
bpf_trace_run4
@ -1044,9 +1044,9 @@
is_dma_buf_file
is_vmalloc_addr
iterate_fd
jiffies
jiffies_64_to_clock_t
jiffies64_to_msecs
jiffies
jiffies_to_msecs
jiffies_to_usecs
kasan_flag_enabled
@ -1167,8 +1167,8 @@
memory_read_from_buffer
memparse
memremap
memset
memset64
memset
__memset_io
memstart_addr
memunmap
@ -1262,8 +1262,8 @@
nla_find
nla_memcpy
__nla_parse
nla_put
nla_put_64bit
nla_put
nla_put_nohdr
nla_reserve
__nla_validate

File diff suppressed because it is too large Load Diff

View File

@ -87,10 +87,10 @@
blocking_notifier_call_chain
blocking_notifier_chain_register
blocking_notifier_chain_unregister
bpf_trace_run1
bpf_trace_run10
bpf_trace_run11
bpf_trace_run12
bpf_trace_run1
bpf_trace_run2
bpf_trace_run3
bpf_trace_run4
@ -993,8 +993,8 @@
irq_work_queue
irq_work_sync
is_vmalloc_addr
jiffies
jiffies64_to_msecs
jiffies
jiffies_to_msecs
jiffies_to_usecs
kasan_flag_enabled
@ -1123,8 +1123,8 @@
memmove
memparse
memremap
memset
memset64
memset
memstart_addr
memunmap
mfd_add_devices
@ -1198,8 +1198,8 @@
nla_find
nla_memcpy
__nla_parse
nla_put
nla_put_64bit
nla_put
nla_put_nohdr
nla_reserve
nla_strlcpy

View File

@ -1,10 +1,10 @@
[abi_symbol_list]
# required by raid0.ko
__tracepoint_block_bio_remap
disk_stack_limits
blk_queue_max_write_same_sectors
__traceiter_block_bio_remap
submit_bio_noacct
bio_split
blk_queue_max_write_same_sectors
disk_stack_limits
submit_bio_noacct
__traceiter_block_bio_remap
__tracepoint_block_bio_remap
# required by lenovo-fan.ko
clk_set_duty_cycle

View File

@ -199,8 +199,8 @@
cpufreq_disable_fast_switch
cpufreq_driver_fast_switch
cpufreq_driver_resolve_freq
cpufreq_driver_target
__cpufreq_driver_target
cpufreq_driver_target
cpufreq_enable_fast_switch
cpufreq_frequency_table_get_index
cpufreq_generic_attr
@ -374,8 +374,8 @@
__dev_kfree_skb_any
__dev_kfree_skb_irq
devm_add_action
__devm_alloc_percpu
devm_alloc_etherdev_mqs
__devm_alloc_percpu
devm_blk_ksm_init
devm_clk_bulk_get
devm_clk_bulk_get_optional
@ -1478,6 +1478,7 @@
pci_generic_config_write32
pci_lock_rescan_remove
pci_pio_to_address
pci_prepare_to_sleep
pci_unlock_rescan_remove
PDE_DATA
__per_cpu_offset
@ -1499,6 +1500,7 @@
phy_drivers_register
phy_drivers_unregister
phy_ethtool_get_link_ksettings
phy_ethtool_get_wol
phy_ethtool_nway_reset
phy_ethtool_set_link_ksettings
phy_exit
@ -1518,6 +1520,7 @@
phy_set_mode_ext
phy_start
phy_stop
phy_support_asym_pause
phy_write_paged
pid_task
pinconf_generic_parse_dt_config
@ -3142,6 +3145,7 @@
__traceiter_android_vh_snd_soc_card_get_comp_chain
__traceiter_android_vh_sound_usb_support_cpu_suspend
__traceiter_android_vh_syscall_prctl_finished
__traceiter_android_vh_ufs_update_sdev
__traceiter_android_vh_v4l2subdev_set_fmt
__traceiter_android_vh_v4l2subdev_set_frame_interval
__traceiter_android_vh_v4l2subdev_set_selection
@ -3159,10 +3163,10 @@
__tracepoint_android_rvh_select_task_rq_rt
__tracepoint_android_rvh_setscheduler
__tracepoint_android_rvh_set_user_nice
__tracepoint_android_rvh_uclamp_eff_get
__tracepoint_android_rvh_v4l2subdev_set_fmt
__tracepoint_android_rvh_v4l2subdev_set_frame_interval
__tracepoint_android_rvh_v4l2subdev_set_selection
__tracepoint_android_rvh_uclamp_eff_get
__tracepoint_android_vh_alter_futex_plist_add
__tracepoint_android_vh_alter_rwsem_list_add
__tracepoint_android_vh_arch_set_freq_scale

View File

@ -115,9 +115,9 @@
blocking_notifier_call_chain
blocking_notifier_chain_register
blocking_notifier_chain_unregister
bpf_trace_run1
bpf_trace_run10
bpf_trace_run12
bpf_trace_run1
bpf_trace_run2
bpf_trace_run3
bpf_trace_run4
@ -685,8 +685,8 @@
dma_unmap_resource
dma_unmap_sg_attrs
do_exit
do_wait_intr_irq
do_traversal_all_lruvec
do_wait_intr_irq
down
down_interruptible
down_read
@ -1568,8 +1568,8 @@
mempool_free
mempool_free_slab
memremap
memset
memset64
memset
__memset_io
memstart_addr
memunmap
@ -1670,10 +1670,10 @@
nla_find
nla_memcpy
__nla_parse
nla_put
nla_put_64bit
nla_reserve
nla_put
nla_reserve_64bit
nla_reserve
__nla_validate
__nlmsg_put
no_llseek
@ -1810,8 +1810,8 @@
page_endio
__page_file_index
__page_file_mapping
__page_mapcount
page_get_link
__page_mapcount
page_mapping
__page_pinner_migration_failed
page_symlink
@ -2059,8 +2059,8 @@
radix_tree_lookup
radix_tree_lookup_slot
radix_tree_next_chunk
radix_tree_replace_slot
radix_tree_preload
radix_tree_replace_slot
___ratelimit
rational_best_approximation
raw_notifier_call_chain
@ -2696,8 +2696,8 @@
__traceiter_android_rvh_after_enqueue_task
__traceiter_android_rvh_build_perf_domains
__traceiter_android_rvh_can_migrate_task
__traceiter_android_rvh_check_preempt_wakeup
__traceiter_android_rvh_check_preempt_tick
__traceiter_android_rvh_check_preempt_wakeup
__traceiter_android_rvh_cpu_cgroup_attach
__traceiter_android_rvh_cpu_cgroup_online
__traceiter_android_rvh_cpu_overutilized
@ -2754,6 +2754,9 @@
__traceiter_android_rvh_v4l2subdev_set_selection
__traceiter_android_rvh_wake_up_new_task
__traceiter_android_vh_account_task_time
__traceiter_android_vh_add_page_to_lrulist
__traceiter_android_vh_alloc_pages_slowpath_begin
__traceiter_android_vh_alloc_pages_slowpath_end
__traceiter_android_vh_allow_domain_state
__traceiter_android_vh_alter_futex_plist_add
__traceiter_android_vh_alter_mutex_list_add
@ -2784,11 +2787,13 @@
__traceiter_android_vh_check_bpf_syscall
__traceiter_android_vh_check_file_open
__traceiter_android_vh_check_mmap_file
__traceiter_android_vh_check_page_look_around_ref
__traceiter_android_vh_check_uninterruptible_tasks
__traceiter_android_vh_check_uninterruptible_tasks_dn
__traceiter_android_vh_clear_mask_adjust
__traceiter_android_vh_clear_reserved_fmt_fields
__traceiter_android_vh_cma_drain_all_pages_bypass
__traceiter_android_vh_cleanup_old_buffers_bypass
__traceiter_android_vh_commit_creds
__traceiter_android_vh_cpufreq_acct_update_power
__traceiter_android_vh_cpufreq_fast_switch
@ -2797,9 +2802,12 @@
__traceiter_android_vh_cpu_idle_enter
__traceiter_android_vh_cpu_idle_exit
__traceiter_android_vh_cpu_up
__traceiter_android_vh_check_page_look_around_ref
__traceiter_android_vh_del_page_from_lrulist
__traceiter_android_vh_do_futex
__traceiter_android_vh_do_page_trylock
__traceiter_android_vh_do_send_sig_info
__traceiter_android_vh_do_traversal_lruvec
__traceiter_android_vh_dm_bufio_shrink_scan_bypass
__traceiter_android_vh_drain_all_pages_bypass
__traceiter_android_vh_em_cpu_energy
__traceiter_android_vh_exclude_reserved_zone
@ -2833,14 +2841,13 @@
__traceiter_android_vh_logbuf
__traceiter_android_vh_look_around
__traceiter_android_vh_look_around_migrate_page
__traceiter_android_vh_mark_page_accessed
__traceiter_android_vh_mem_cgroup_alloc
__traceiter_android_vh_mem_cgroup_css_offline
__traceiter_android_vh_mem_cgroup_css_online
__traceiter_android_vh_mem_cgroup_free
__traceiter_android_vh_mem_cgroup_id_remove
__traceiter_android_vh_meminfo_proc_show
__traceiter_android_vh_alloc_pages_slowpath_begin
__traceiter_android_vh_alloc_pages_slowpath_end
__traceiter_android_vh_mutex_can_spin_on_owner
__traceiter_android_vh_mutex_opt_spin_finish
__traceiter_android_vh_mutex_opt_spin_start
@ -2851,25 +2858,19 @@
__traceiter_android_vh_override_creds
__traceiter_android_vh_page_referenced_check_bypass
__traceiter_android_vh_page_should_be_protected
__traceiter_android_vh_page_trylock_set
__traceiter_android_vh_page_trylock_clear
__traceiter_android_vh_page_trylock_get_result
__traceiter_android_vh_mark_page_accessed
__traceiter_android_vh_show_mapcount_pages
__traceiter_android_vh_do_traversal_lruvec
__traceiter_android_vh_do_page_trylock
__traceiter_android_vh_update_page_mapcount
__traceiter_android_vh_add_page_to_lrulist
__traceiter_android_vh_del_page_from_lrulist
__traceiter_android_vh_page_trylock_set
__traceiter_android_vh_pcplist_add_cma_pages_bypass
__traceiter_android_vh_prepare_update_load_avg_se
__traceiter_android_vh_printk_hotplug
__traceiter_android_vh_process_killed
__traceiter_android_vh_revert_creds
__traceiter_android_vh_record_mutex_lock_starttime
__traceiter_android_vh_record_pcpu_rwsem_starttime
__traceiter_android_vh_record_rtmutex_lock_starttime
__traceiter_android_vh_record_rwsem_lock_starttime
__traceiter_android_vh_record_pcpu_rwsem_starttime
__traceiter_android_vh_remove_vmalloc_stack
__traceiter_android_vh_revert_creds
__traceiter_android_vh_rmqueue
__traceiter_android_vh_rwsem_can_spin_on_owner
__traceiter_android_vh_rwsem_init
@ -2885,7 +2886,6 @@
__traceiter_android_vh_rwsem_write_finished
__traceiter_android_vh_save_track_hash
__traceiter_android_vh_save_vmalloc_stack
__traceiter_android_vh_remove_vmalloc_stack
__traceiter_android_vh_sched_stat_runtime_rt
__traceiter_android_vh_scheduler_tick
__traceiter_android_vh_selinux_avc_insert
@ -2901,6 +2901,7 @@
__traceiter_android_vh_set_module_permit_before_init
__traceiter_android_vh_setscheduler_uclamp
__traceiter_android_vh_set_wake_flags
__traceiter_android_vh_show_mapcount_pages
__traceiter_android_vh_show_max_freq
__traceiter_android_vh_show_resume_epoch_val
__traceiter_android_vh_show_stack_hash
@ -2908,6 +2909,7 @@
__traceiter_android_vh_shrink_node_memcgs
__traceiter_android_vh_sync_txn_recvd
__traceiter_android_vh_syscall_prctl_finished
__traceiter_android_vh_test_clear_look_around_ref
__traceiter_android_vh_timer_calc_index
__traceiter_android_vh_tune_inactive_ratio
__traceiter_android_vh_tune_scan_type
@ -2915,6 +2917,7 @@
__traceiter_android_vh_ufs_compl_command
__traceiter_android_vh_ufs_send_command
__traceiter_android_vh_ufs_send_tm_command
__traceiter_android_vh_update_page_mapcount
__traceiter_android_vh_update_topology_flags_workfn
__traceiter_binder_transaction_received
__traceiter_cpu_frequency
@ -2924,6 +2927,10 @@
__traceiter_ipi_entry
__traceiter_ipi_raise
__traceiter_irq_handler_entry
__traceiter_net_dev_queue
__traceiter_net_dev_xmit
__traceiter_netif_receive_skb
__traceiter_netif_rx
__traceiter_pelt_se_tp
__traceiter_rwmmio_post_read
__traceiter_rwmmio_read
@ -2940,14 +2947,13 @@
__traceiter_suspend_resume
__traceiter_task_newtask
__traceiter_task_rename
__traceiter_android_vh_test_clear_look_around_ref
__traceiter_xhci_urb_giveback
__tracepoint_android_rvh_account_irq
__tracepoint_android_rvh_after_enqueue_task
__tracepoint_android_rvh_build_perf_domains
__tracepoint_android_rvh_can_migrate_task
__tracepoint_android_rvh_check_preempt_wakeup
__tracepoint_android_rvh_check_preempt_tick
__tracepoint_android_rvh_check_preempt_wakeup
__tracepoint_android_rvh_cpu_cgroup_attach
__tracepoint_android_rvh_cpu_cgroup_online
__tracepoint_android_rvh_cpu_overutilized
@ -3004,6 +3010,9 @@
__tracepoint_android_rvh_v4l2subdev_set_selection
__tracepoint_android_rvh_wake_up_new_task
__tracepoint_android_vh_account_task_time
__tracepoint_android_vh_add_page_to_lrulist
__tracepoint_android_vh_alloc_pages_slowpath_begin
__tracepoint_android_vh_alloc_pages_slowpath_end
__tracepoint_android_vh_allow_domain_state
__tracepoint_android_vh_alter_futex_plist_add
__tracepoint_android_vh_alter_mutex_list_add
@ -3034,11 +3043,13 @@
__tracepoint_android_vh_check_bpf_syscall
__tracepoint_android_vh_check_file_open
__tracepoint_android_vh_check_mmap_file
__tracepoint_android_vh_check_page_look_around_ref
__tracepoint_android_vh_check_uninterruptible_tasks
__tracepoint_android_vh_check_uninterruptible_tasks_dn
__tracepoint_android_vh_clear_mask_adjust
__tracepoint_android_vh_clear_reserved_fmt_fields
__tracepoint_android_vh_cma_drain_all_pages_bypass
__tracepoint_android_vh_cleanup_old_buffers_bypass
__tracepoint_android_vh_commit_creds
__tracepoint_android_vh_cpufreq_acct_update_power
__tracepoint_android_vh_cpufreq_fast_switch
@ -3047,9 +3058,12 @@
__tracepoint_android_vh_cpu_idle_enter
__tracepoint_android_vh_cpu_idle_exit
__tracepoint_android_vh_cpu_up
__tracepoint_android_vh_check_page_look_around_ref
__tracepoint_android_vh_del_page_from_lrulist
__tracepoint_android_vh_dm_bufio_shrink_scan_bypass
__tracepoint_android_vh_do_futex
__tracepoint_android_vh_do_page_trylock
__tracepoint_android_vh_do_send_sig_info
__tracepoint_android_vh_do_traversal_lruvec
__tracepoint_android_vh_drain_all_pages_bypass
__tracepoint_android_vh_em_cpu_energy
__tracepoint_android_vh_exclude_reserved_zone
@ -3083,14 +3097,13 @@
__tracepoint_android_vh_logbuf
__tracepoint_android_vh_look_around
__tracepoint_android_vh_look_around_migrate_page
__tracepoint_android_vh_mark_page_accessed
__tracepoint_android_vh_mem_cgroup_alloc
__tracepoint_android_vh_mem_cgroup_css_offline
__tracepoint_android_vh_mem_cgroup_css_online
__tracepoint_android_vh_mem_cgroup_free
__tracepoint_android_vh_mem_cgroup_id_remove
__tracepoint_android_vh_meminfo_proc_show
__tracepoint_android_vh_alloc_pages_slowpath_begin
__tracepoint_android_vh_alloc_pages_slowpath_end
__tracepoint_android_vh_mutex_can_spin_on_owner
__tracepoint_android_vh_mutex_opt_spin_finish
__tracepoint_android_vh_mutex_opt_spin_start
@ -3101,25 +3114,19 @@
__tracepoint_android_vh_override_creds
__tracepoint_android_vh_page_referenced_check_bypass
__tracepoint_android_vh_page_should_be_protected
__tracepoint_android_vh_page_trylock_set
__tracepoint_android_vh_page_trylock_clear
__tracepoint_android_vh_page_trylock_get_result
__tracepoint_android_vh_mark_page_accessed
__tracepoint_android_vh_show_mapcount_pages
__tracepoint_android_vh_do_traversal_lruvec
__tracepoint_android_vh_do_page_trylock
__tracepoint_android_vh_update_page_mapcount
__tracepoint_android_vh_add_page_to_lrulist
__tracepoint_android_vh_del_page_from_lrulist
__tracepoint_android_vh_page_trylock_set
__tracepoint_android_vh_pcplist_add_cma_pages_bypass
__tracepoint_android_vh_prepare_update_load_avg_se
__tracepoint_android_vh_printk_hotplug
__tracepoint_android_vh_process_killed
__tracepoint_android_vh_revert_creds
__tracepoint_android_vh_record_mutex_lock_starttime
__tracepoint_android_vh_record_pcpu_rwsem_starttime
__tracepoint_android_vh_record_rtmutex_lock_starttime
__tracepoint_android_vh_record_rwsem_lock_starttime
__tracepoint_android_vh_record_pcpu_rwsem_starttime
__tracepoint_android_vh_remove_vmalloc_stack
__tracepoint_android_vh_revert_creds
__tracepoint_android_vh_rmqueue
__tracepoint_android_vh_rwsem_can_spin_on_owner
__tracepoint_android_vh_rwsem_init
@ -3135,7 +3142,6 @@
__tracepoint_android_vh_rwsem_write_finished
__tracepoint_android_vh_save_track_hash
__tracepoint_android_vh_save_vmalloc_stack
__tracepoint_android_vh_remove_vmalloc_stack
__tracepoint_android_vh_sched_stat_runtime_rt
__tracepoint_android_vh_scheduler_tick
__tracepoint_android_vh_selinux_avc_insert
@ -3151,6 +3157,7 @@
__tracepoint_android_vh_set_module_permit_before_init
__tracepoint_android_vh_setscheduler_uclamp
__tracepoint_android_vh_set_wake_flags
__tracepoint_android_vh_show_mapcount_pages
__tracepoint_android_vh_show_max_freq
__tracepoint_android_vh_show_resume_epoch_val
__tracepoint_android_vh_show_stack_hash
@ -3158,14 +3165,15 @@
__tracepoint_android_vh_shrink_node_memcgs
__tracepoint_android_vh_sync_txn_recvd
__tracepoint_android_vh_syscall_prctl_finished
__tracepoint_android_vh_test_clear_look_around_ref
__tracepoint_android_vh_timer_calc_index
__tracepoint_android_vh_tune_inactive_ratio
__tracepoint_android_vh_tune_scan_type
__tracepoint_android_vh_tune_swappiness
__tracepoint_android_vh_test_clear_look_around_ref
__tracepoint_android_vh_ufs_compl_command
__tracepoint_android_vh_ufs_send_command
__tracepoint_android_vh_ufs_send_tm_command
__tracepoint_android_vh_update_page_mapcount
__tracepoint_android_vh_update_topology_flags_workfn
__tracepoint_binder_transaction_received
__tracepoint_cpu_frequency
@ -3262,6 +3270,9 @@
ucsi_set_drvdata
ucsi_unregister
__udelay
__udp4_lib_lookup
__udp6_lib_lookup
udp_table
ufshcd_auto_hibern8_update
ufshcd_delay_us
ufshcd_dme_get_attr

View File

@ -93,10 +93,10 @@
blocking_notifier_call_chain
blocking_notifier_chain_register
blocking_notifier_chain_unregister
bpf_trace_run1
bpf_trace_run10
bpf_trace_run11
bpf_trace_run12
bpf_trace_run1
bpf_trace_run2
bpf_trace_run3
bpf_trace_run4
@ -1431,8 +1431,8 @@
mempool_free
mempool_free_slab
memremap
memset
memset64
memset
__memset_io
memstart_addr
memunmap
@ -1453,8 +1453,8 @@
mmc_regulator_set_ocr
mmc_regulator_set_vqmmc
mmc_select_bus_width
mmc_select_hs
mmc_select_hs400
mmc_select_hs
mmc_select_hs_ddr
mmc_select_timing
mmc_send_tuning
@ -1524,10 +1524,10 @@
nla_find
nla_memcpy
__nla_parse
nla_put
nla_put_64bit
nla_reserve
nla_put
nla_reserve_64bit
nla_reserve
__nla_validate
__nlmsg_put
no_llseek
@ -2514,6 +2514,7 @@
__traceiter_android_rvh_cpu_cgroup_online
__traceiter_android_rvh_cpufreq_transition
__traceiter_android_rvh_dequeue_task
__traceiter_android_rvh_do_ptrauth_fault
__traceiter_android_rvh_do_sched_yield
__traceiter_android_rvh_enqueue_task
__traceiter_android_rvh_find_busiest_queue
@ -2561,14 +2562,18 @@
__traceiter_android_rvh_update_misfit_status
__traceiter_android_rvh_wake_up_new_task
__traceiter_android_vh_allow_domain_state
__traceiter_android_vh_alter_rwsem_list_add
__traceiter_android_vh_binder_restore_priority
__traceiter_android_vh_binder_set_priority
__traceiter_android_vh_binder_transaction_init
__traceiter_android_vh_binder_wakeup_ilocked
__traceiter_android_vh_check_uninterruptible_tasks
__traceiter_android_vh_check_uninterruptible_tasks_dn
__traceiter_android_vh_cpu_idle_enter
__traceiter_android_vh_cpu_idle_exit
__traceiter_android_vh_cpuidle_psci_enter
__traceiter_android_vh_cpuidle_psci_exit
__traceiter_android_vh_disable_thermal_cooling_stats
__traceiter_android_vh_dump_throttled_rt_tasks
__traceiter_android_vh_freq_table_limits
__traceiter_android_vh_ftrace_dump_buffer
@ -2578,18 +2583,26 @@
__traceiter_android_vh_ftrace_size_check
__traceiter_android_vh_gic_resume
__traceiter_android_vh_gpio_block_read
__traceiter_android_vh_handle_tlb_conf
__traceiter_android_vh_iommu_setup_dma_ops
__traceiter_android_vh_ipi_stop
__traceiter_android_vh_jiffies_update
__traceiter_android_vh_logbuf
__traceiter_android_vh_logbuf_pr_cont
__traceiter_android_vh_madvise_cold_or_pageout
__traceiter_android_vh_oom_check_panic
__traceiter_android_vh_printk_hotplug
__traceiter_android_vh_process_killed
__traceiter_android_vh_psi_event
__traceiter_android_vh_psi_group
__traceiter_android_vh_rproc_recovery
__traceiter_android_vh_rproc_recovery_set
__traceiter_android_vh_scheduler_tick
__traceiter_android_vh_shmem_alloc_page
__traceiter_android_vh_show_max_freq
__traceiter_android_vh_show_resume_epoch_val
__traceiter_android_vh_show_suspend_epoch_val
__traceiter_android_vh_subpage_dma_contig_alloc
__traceiter_android_vh_timer_calc_index
__traceiter_android_vh_ufs_check_int_errors
__traceiter_android_vh_ufs_clock_scaling

View File

@ -1,74 +1,74 @@
[abi_symbol_list]
check_cache_active
get_mem_cgroup_from_mm
is_swap_slot_cache_enabled
swapcache_free_entries
swap_type_to_swap_info
nr_swap_pages
plist_del
plist_requeue
scan_swap_map_slots
swap_alloc_cluster
check_cache_active
zero_pfn
nr_swap_pages
plist_requeue
plist_del
swapcache_free_entries
swap_type_to_swap_info
__traceiter_android_rvh_alloc_si
__traceiter_android_rvh_alloc_swap_slot_cache
__traceiter_android_rvh_drain_slots_cache_cpu
__traceiter_android_rvh_free_swap_slot
__traceiter_android_rvh_get_swap_page
__traceiter_android_rvh_handle_pte_fault_end
__traceiter_android_vh_handle_pte_fault_end
__traceiter_android_vh_cow_user_page
__traceiter_android_vh_swapin_add_anon_rmap
__traceiter_android_vh_waiting_for_page_migration
__traceiter_android_vh_migrate_page_states
__traceiter_android_vh_page_referenced_one_end
__traceiter_android_vh_account_swap_pages
__traceiter_android_vh_alloc_si
__traceiter_android_vh_alloc_swap_slot_cache
__traceiter_android_vh_count_pswpin
__traceiter_android_vh_count_pswpout
__traceiter_android_vh_count_swpout_vm_event
__traceiter_android_vh_swap_slot_cache_active
__traceiter_android_rvh_drain_slots_cache_cpu
__traceiter_android_vh_cow_user_page
__traceiter_android_vh_drain_slots_cache_cpu
__traceiter_android_rvh_alloc_swap_slot_cache
__traceiter_android_vh_alloc_swap_slot_cache
__traceiter_android_rvh_free_swap_slot
__traceiter_android_vh_free_swap_slot
__traceiter_android_rvh_get_swap_page
__traceiter_android_vh_get_swap_page
__traceiter_android_vh_page_isolated_for_reclaim
__traceiter_android_vh_inactive_is_low
__traceiter_android_vh_snapshot_refaults
__traceiter_android_vh_account_swap_pages
__traceiter_android_vh_unuse_swap_page
__traceiter_android_vh_init_swap_info_struct
__traceiter_android_vh_si_swapinfo
__traceiter_android_rvh_alloc_si
__traceiter_android_vh_alloc_si
__traceiter_android_vh_free_pages
__traceiter_android_vh_set_shmem_page_flag
__traceiter_android_vh_free_swap_slot
__traceiter_android_vh_get_swap_page
__traceiter_android_vh_handle_pte_fault_end
__traceiter_android_vh_inactive_is_low
__traceiter_android_vh_init_swap_info_struct
__traceiter_android_vh_migrate_page_states
__traceiter_android_vh_page_isolated_for_reclaim
__traceiter_android_vh_page_referenced_one_end
__traceiter_android_vh_ra_tuning_max_page
__traceiter_android_vh_set_shmem_page_flag
__traceiter_android_vh_si_swapinfo
__traceiter_android_vh_snapshot_refaults
__traceiter_android_vh_swapin_add_anon_rmap
__traceiter_android_vh_swap_slot_cache_active
__traceiter_android_vh_unuse_swap_page
__traceiter_android_vh_waiting_for_page_migration
__tracepoint_android_rvh_alloc_si
__tracepoint_android_rvh_alloc_swap_slot_cache
__tracepoint_android_rvh_drain_slots_cache_cpu
__tracepoint_android_rvh_free_swap_slot
__tracepoint_android_rvh_get_swap_page
__tracepoint_android_rvh_handle_pte_fault_end
__tracepoint_android_vh_handle_pte_fault_end
__tracepoint_android_vh_cow_user_page
__tracepoint_android_vh_swapin_add_anon_rmap
__tracepoint_android_vh_waiting_for_page_migration
__tracepoint_android_vh_migrate_page_states
__tracepoint_android_vh_page_referenced_one_end
__tracepoint_android_vh_account_swap_pages
__tracepoint_android_vh_alloc_si
__tracepoint_android_vh_alloc_swap_slot_cache
__tracepoint_android_vh_count_pswpin
__tracepoint_android_vh_count_pswpout
__tracepoint_android_vh_count_swpout_vm_event
__tracepoint_android_vh_swap_slot_cache_active
__tracepoint_android_rvh_drain_slots_cache_cpu
__tracepoint_android_vh_cow_user_page
__tracepoint_android_vh_drain_slots_cache_cpu
__tracepoint_android_rvh_alloc_swap_slot_cache
__tracepoint_android_vh_alloc_swap_slot_cache
__tracepoint_android_rvh_free_swap_slot
__tracepoint_android_vh_free_swap_slot
__tracepoint_android_rvh_get_swap_page
__tracepoint_android_vh_get_swap_page
__tracepoint_android_vh_page_isolated_for_reclaim
__tracepoint_android_vh_inactive_is_low
__tracepoint_android_vh_snapshot_refaults
__tracepoint_android_vh_account_swap_pages
__tracepoint_android_vh_unuse_swap_page
__tracepoint_android_vh_init_swap_info_struct
__tracepoint_android_vh_si_swapinfo
__tracepoint_android_rvh_alloc_si
__tracepoint_android_vh_alloc_si
__tracepoint_android_vh_free_pages
__tracepoint_android_vh_set_shmem_page_flag
__tracepoint_android_vh_free_swap_slot
__tracepoint_android_vh_get_swap_page
__tracepoint_android_vh_handle_pte_fault_end
__tracepoint_android_vh_inactive_is_low
__tracepoint_android_vh_init_swap_info_struct
__tracepoint_android_vh_migrate_page_states
__tracepoint_android_vh_page_isolated_for_reclaim
__tracepoint_android_vh_page_referenced_one_end
__tracepoint_android_vh_ra_tuning_max_page
__tracepoint_android_vh_set_shmem_page_flag
__tracepoint_android_vh_si_swapinfo
__tracepoint_android_vh_snapshot_refaults
__tracepoint_android_vh_swapin_add_anon_rmap
__tracepoint_android_vh_swap_slot_cache_active
__tracepoint_android_vh_unuse_swap_page
__tracepoint_android_vh_waiting_for_page_migration
zero_pfn

View File

@ -1,6 +1,6 @@
[abi_symbol_list]
# for type visibility
GKI_struct_selinux_state
GKI_struct_gic_chip_data
GKI_struct_selinux_state
GKI_struct_swap_slots_cache

View File

@ -254,6 +254,9 @@
__tasklet_schedule
thermal_zone_device_disable
thermal_zone_device_enable
__traceiter_rwmmio_post_read
__traceiter_rwmmio_read
__traceiter_rwmmio_write
__tracepoint_rwmmio_post_read
__tracepoint_rwmmio_read
__tracepoint_rwmmio_write

View File

@ -84,9 +84,9 @@
blocking_notifier_call_chain
blocking_notifier_chain_register
blocking_notifier_chain_unregister
bpf_trace_run1
bpf_trace_run10
bpf_trace_run12
bpf_trace_run1
bpf_trace_run2
bpf_trace_run3
bpf_trace_run4
@ -938,8 +938,8 @@
mempool_free
mempool_free_slab
memremap
memset
memset64
memset
__memset_io
memstart_addr
memunmap
@ -975,8 +975,8 @@
__next_zones_zonelist
nla_find
nla_put
nla_reserve
nla_reserve_64bit
nla_reserve
__nla_validate
__nlmsg_put
no_llseek
@ -1687,6 +1687,7 @@
__traceiter_android_rvh_flush_task
__traceiter_android_rvh_migrate_queued_task
__traceiter_android_rvh_new_task_stats
__traceiter_android_rvh_refrigerator
__traceiter_android_rvh_replace_next_task_fair
__traceiter_android_rvh_resume_cpus
__traceiter_android_rvh_sched_cpu_dying
@ -1704,22 +1705,30 @@
__traceiter_android_rvh_set_readahead_gfp_mask
__traceiter_android_rvh_set_skip_swapcache_flags
__traceiter_android_rvh_set_task_cpu
__traceiter_android_rvh_tcp_recvmsg
__traceiter_android_rvh_tcp_recvmsg_stat
__traceiter_android_rvh_tcp_sendmsg_locked
__traceiter_android_rvh_tick_entry
__traceiter_android_rvh_try_to_wake_up
__traceiter_android_rvh_try_to_wake_up_success
__traceiter_android_rvh_ttwu_cond
__traceiter_android_rvh_udp_recvmsg
__traceiter_android_rvh_udp_sendmsg
__traceiter_android_rvh_update_cpu_capacity
__traceiter_android_rvh_update_cpus_allowed
__traceiter_android_rvh_update_misfit_status
__traceiter_android_rvh_wake_up_new_task
__traceiter_android_vh_account_task_time
__traceiter_android_vh_allow_domain_state
__traceiter_android_vh_binder_restore_priority
__traceiter_android_vh_binder_set_priority
__traceiter_android_vh_binder_trans
__traceiter_android_vh_binder_wakeup_ilocked
__traceiter_android_vh_blk_alloc_rqs
__traceiter_android_vh_blk_rq_ctx_init
__traceiter_android_vh_cpu_idle_enter
__traceiter_android_vh_cpu_idle_exit
__traceiter_android_vh_dup_task_struct
__traceiter_android_vh_filemap_fault_cache_page
__traceiter_android_vh_filemap_fault_get_page
__traceiter_android_vh_ftrace_dump_buffer
@ -1729,6 +1738,7 @@
__traceiter_android_vh_ftrace_size_check
__traceiter_android_vh_iommu_setup_dma_ops
__traceiter_android_vh_ipi_stop
__traceiter_android_vh_irqtime_account_process_tick
__traceiter_android_vh_jiffies_update
__traceiter_android_vh_mmap_region
__traceiter_android_vh_mmc_attach_sd
@ -1746,10 +1756,13 @@
__traceiter_android_vh_shrink_slab_bypass
__traceiter_android_vh_timer_calc_index
__traceiter_android_vh_try_to_unmap_one
__traceiter_android_vh_tune_scan_type
__traceiter_android_vh_tune_swappiness
__traceiter_android_vh_ufs_check_int_errors
__traceiter_android_vh_ufs_compl_command
__traceiter_android_vh_ufs_send_command
__traceiter_android_vh_ufs_update_sdev
__traceiter_android_vh_vmpressure
__traceiter_binder_transaction_received
__traceiter_block_bio_complete
__traceiter_block_bio_queue

View File

@ -2,8 +2,8 @@
# commonly used symbols
# required by touch module
proc_mkdir_data
proc_create_seq_private
proc_mkdir_data
# required by aw8697-haptic.ko
devm_gpio_free
@ -11,36 +11,43 @@
i2c_smbus_write_byte_data
#required by memory module
blk_execute_rq
blk_rq_map_kern
scsi_device_lookup
scsi_host_lookup
scsi_host_put
ufshcd_read_desc_param
utf16s_to_utf8s
async_schedule_node
blk_execute_rq
blk_ksm_get_slot_idx
blk_ksm_register
blk_ksm_reprogram_all_keys
blk_mq_alloc_tag_set
blk_mq_free_tag_set
blk_mq_init_queue
blk_mq_tagset_busy_iter
blk_queue_update_dma_alignment
blk_queue_update_dma_pad
blk_rq_map_kern
bsg_job_done
bsg_remove_queue
bsg_setup_queue
dev_pm_opp_remove
mempool_alloc_pages
mempool_free_pages
mempool_resize
__scsi_add_device
scsi_add_host_with_dma
scsi_block_requests
scsi_change_queue_depth
scsi_device_lookup
scsi_dma_map
scsi_dma_unmap
__scsi_execute
scsi_host_alloc
scsi_host_lookup
scsi_host_put
scsi_is_host_device
scsi_normalize_sense
scsi_print_command
scsi_remove_host
scsi_report_bus_reset
scsi_scan_host
scsi_unblock_requests
scsi_change_queue_depth
scsi_print_command
scsi_dma_map
scsi_host_alloc
scsi_normalize_sense
sg_copy_from_buffer
sg_copy_to_buffer
ufshcd_alloc_host
@ -51,44 +58,40 @@
ufshcd_map_desc_id_to_length
ufshcd_query_attr_retry
ufshcd_query_flag_retry
ufshcd_read_desc_param
ufshcd_update_evt_hist
utf16s_to_utf8s
wait_for_completion_io_timeout
__scsi_add_device
__scsi_execute
blk_mq_free_tag_set
blk_queue_update_dma_alignment
blk_queue_update_dma_pad
blk_ksm_get_slot_idx
mempool_resize
mempool_alloc_pages
mempool_free_pages
#required by cs35l41 module
regmap_raw_write_async
snd_soc_bytes_tlv_callback
regcache_drop_region
regmap_async_complete
regmap_multi_reg_write
regmap_multi_reg_write_bypassed
regmap_raw_read
regmap_raw_write
regmap_raw_write_async
regulator_bulk_enable
snd_compr_stop_error
snd_soc_component_disable_pin
snd_soc_component_force_enable_pin
snd_ctl_boolean_mono_info
snd_pcm_format_physical_width
snd_pcm_hw_constraint_list
regmap_multi_reg_write_bypassed
snd_ctl_boolean_mono_info
snd_soc_put_volsw_range
snd_soc_bytes_tlv_callback
snd_soc_component_disable_pin
snd_soc_component_force_enable_pin
snd_soc_get_volsw_range
snd_soc_info_volsw_range
regmap_raw_write
regcache_drop_region
regmap_raw_read
regmap_multi_reg_write
regulator_bulk_enable
snd_soc_put_volsw_range
#required by mtd module
__blk_mq_end_request
balance_dirty_pages_ratelimited
bdi_alloc
bdi_put
bdi_register
blkdev_get_by_dev
blkdev_get_by_path
blkdev_put
__blk_mq_end_request
blk_mq_freeze_queue
blk_mq_init_sq_queue
blk_mq_quiesce_queue
@ -97,9 +100,6 @@
blk_mq_unquiesce_queue
blk_queue_write_cache
blk_update_request
blkdev_get_by_dev
blkdev_get_by_path
blkdev_put
deactivate_locked_super
fixed_size_llseek
generic_shutdown_super
@ -126,42 +126,42 @@
#required by millet.ko
freezer_cgrp_subsys
__traceiter_android_vh_do_send_sig_info
__traceiter_android_vh_binder_preset
__traceiter_android_vh_binder_wait_for_work
__traceiter_android_vh_binder_trans
__traceiter_android_vh_binder_reply
__traceiter_android_vh_binder_alloc_new_buf_locked
__tracepoint_android_vh_do_send_sig_info
__tracepoint_android_vh_binder_preset
__tracepoint_android_vh_binder_wait_for_work
__tracepoint_android_vh_binder_trans
__tracepoint_android_vh_binder_reply
__traceiter_android_vh_binder_preset
__traceiter_android_vh_binder_reply
__traceiter_android_vh_binder_trans
__traceiter_android_vh_binder_wait_for_work
__traceiter_android_vh_do_send_sig_info
__tracepoint_android_vh_binder_alloc_new_buf_locked
__tracepoint_android_vh_binder_preset
__tracepoint_android_vh_binder_reply
__tracepoint_android_vh_binder_trans
__tracepoint_android_vh_binder_wait_for_work
__tracepoint_android_vh_do_send_sig_info
#required by mi_sched.ko
find_user
free_uid
jiffies_64
__traceiter_android_vh_free_task
__tracepoint_android_vh_free_task
jiffies_64
free_uid
find_user
#required by migt.ko
__traceiter_android_rvh_after_enqueue_task
__traceiter_android_rvh_after_dequeue_task
__traceiter_android_rvh_after_enqueue_task
__traceiter_android_vh_map_util_freq
__tracepoint_android_rvh_after_enqueue_task
__tracepoint_android_rvh_after_dequeue_task
__tracepoint_android_rvh_after_enqueue_task
__tracepoint_android_vh_map_util_freq
#required by turbo.ko
cpuset_cpus_allowed
__traceiter_android_rvh_cpuset_fork
__traceiter_android_rvh_set_cpus_allowed_comm
__traceiter_android_vh_sched_setaffinity_early
__traceiter_android_rvh_cpuset_fork
__tracepoint_android_rvh_cpuset_fork
__tracepoint_android_rvh_set_cpus_allowed_comm
__tracepoint_android_vh_sched_setaffinity_early
__tracepoint_android_rvh_cpuset_fork
cpuset_cpus_allowed
#required by fas.ko
__traceiter_android_rvh_check_preempt_tick
@ -175,15 +175,15 @@
console_printk
#required by binderinfo.ko module
__traceiter_android_vh_binder_transaction_init
__traceiter_android_vh_binder_print_transaction_info
__tracepoint_android_vh_binder_transaction_init
__traceiter_android_vh_binder_transaction_init
__tracepoint_android_vh_binder_print_transaction_info
__tracepoint_android_vh_binder_transaction_init
#required by reclaim module
__traceiter_android_vh_tune_scan_type
__tracepoint_android_vh_tune_scan_type
__traceiter_android_vh_tune_swappiness
__tracepoint_android_vh_tune_scan_type
__tracepoint_android_vh_tune_swappiness
#required by msm_drm.ko module
@ -198,19 +198,19 @@
##required by xm_power_debug.ko module
wakeup_sources_read_lock
wakeup_sources_read_unlock
wakeup_sources_walk_start
wakeup_sources_walk_next
wakeup_sources_walk_start
#required by mi_mempool.ko module
__traceiter_android_vh_mmput
__tracepoint_android_vh_mmput
__traceiter_android_vh_alloc_pages_reclaim_bypass
__tracepoint_android_vh_alloc_pages_reclaim_bypass
__traceiter_android_vh_alloc_pages_failure_bypass
__traceiter_android_vh_alloc_pages_reclaim_bypass
__traceiter_android_vh_mmput
__tracepoint_android_vh_alloc_pages_failure_bypass
__tracepoint_android_vh_alloc_pages_reclaim_bypass
__tracepoint_android_vh_mmput
#required by us_prox.ko module
iio_trigger_alloc
__iio_trigger_register
iio_trigger_free
__iio_trigger_register
iio_trigger_unregister

View File

@ -372,6 +372,7 @@ CONFIG_SERIAL_8250=y
# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_SERIAL_8250_EXAR is not set
CONFIG_SERIAL_8250_NR_UARTS=32
CONFIG_SERIAL_8250_RUNTIME_UARTS=0
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_AMBA_PL011=y
@ -590,6 +591,8 @@ CONFIG_PSTORE_CONSOLE=y
CONFIG_PSTORE_PMSG=y
CONFIG_PSTORE_RAM=y
CONFIG_EROFS_FS=y
CONFIG_EROFS_FS_PCPU_KTHREAD=y
CONFIG_EROFS_FS_PCPU_KTHREAD_HIPRI=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_737=y
CONFIG_NLS_CODEPAGE_775=y

View File

@ -2098,18 +2098,18 @@ int kvm_arch_init(void *opaque)
err = kvm_init_vector_slots();
if (err) {
kvm_err("Cannot initialise vector slots\n");
goto out_err;
goto out_hyp;
}
err = init_subsystems();
if (err)
goto out_hyp;
goto out_subs;
if (!in_hyp_mode) {
err = finalize_hyp_mode();
if (err) {
kvm_err("Failed to finalize Hyp protection\n");
goto out_hyp;
goto out_subs;
}
}
@ -2123,8 +2123,9 @@ int kvm_arch_init(void *opaque)
return 0;
out_hyp:
out_subs:
hyp_cpu_pm_exit();
out_hyp:
if (!in_hyp_mode)
teardown_hyp_mode();
out_err:

View File

@ -361,6 +361,11 @@ static bool is_el1_mte_sync_tag_check_fault(unsigned int esr)
return false;
}
static bool is_translation_fault(unsigned long esr)
{
return (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_FAULT;
}
static void __do_kernel_fault(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
@ -393,7 +398,8 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr,
} else if (addr < PAGE_SIZE) {
msg = "NULL pointer dereference";
} else {
if (kfence_handle_page_fault(addr, esr & ESR_ELx_WNR, regs))
if (is_translation_fault(esr) &&
kfence_handle_page_fault(addr, esr & ESR_ELx_WNR, regs))
return;
msg = "paging request";

View File

@ -12,6 +12,7 @@
#ifdef CONFIG_PPC64
#include <linux/cache.h>
#include <linux/string.h>
#include <asm/types.h>
#include <asm/lppaca.h>

View File

@ -342,6 +342,7 @@ CONFIG_INPUT_UINPUT=y
CONFIG_SERIAL_8250=y
# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=32
CONFIG_SERIAL_8250_RUNTIME_UARTS=0
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_SAMSUNG=y
@ -522,6 +523,8 @@ CONFIG_PSTORE_CONSOLE=y
CONFIG_PSTORE_PMSG=y
CONFIG_PSTORE_RAM=y
CONFIG_EROFS_FS=y
CONFIG_EROFS_FS_PCPU_KTHREAD=y
CONFIG_EROFS_FS_PCPU_KTHREAD_HIPRI=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_737=y
CONFIG_NLS_CODEPAGE_775=y

View File

@ -553,6 +553,7 @@ struct kvm_vcpu_arch {
u64 ia32_misc_enable_msr;
u64 smbase;
u64 smi_count;
bool at_instruction_boundary;
bool tpr_access_reporting;
bool xsaves_enabled;
u64 ia32_xss;
@ -1061,6 +1062,8 @@ struct kvm_vcpu_stat {
u64 req_event;
u64 halt_poll_success_ns;
u64 halt_poll_fail_ns;
u64 preemption_reported;
u64 preemption_other;
};
struct x86_instruction_info;

View File

@ -3983,6 +3983,8 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu,
static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu)
{
if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_INTR)
vcpu->arch.at_instruction_boundary = true;
}
static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)

View File

@ -6510,6 +6510,7 @@ static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu)
return;
handle_interrupt_nmi_irqoff(vcpu, gate_offset(desc));
vcpu->arch.at_instruction_boundary = true;
}
static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)

View File

@ -231,6 +231,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
VCPU_STAT("l1d_flush", l1d_flush),
VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
VCPU_STAT("preemption_reported", preemption_reported),
VCPU_STAT("preemption_other", preemption_other),
VM_STAT("mmu_shadow_zapped", mmu_shadow_zapped),
VM_STAT("mmu_pte_write", mmu_pte_write),
VM_STAT("mmu_pde_zapped", mmu_pde_zapped),
@ -4052,6 +4054,19 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
struct kvm_host_map map;
struct kvm_steal_time *st;
/*
* The vCPU can be marked preempted if and only if the VM-Exit was on
* an instruction boundary and will not trigger guest emulation of any
* kind (see vcpu_run). Vendor specific code controls (conservatively)
* when this is true, for example allowing the vCPU to be marked
* preempted if and only if the VM-Exit was due to a host interrupt.
*/
if (!vcpu->arch.at_instruction_boundary) {
vcpu->stat.preemption_other++;
return;
}
vcpu->stat.preemption_reported++;
if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
return;
@ -9357,6 +9372,13 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
vcpu->arch.l1tf_flush_l1d = true;
for (;;) {
/*
* If another guest vCPU requests a PV TLB flush in the middle
* of instruction emulation, the rest of the emulation could
* use a stale page translation. Assume that any code after
* this point can start executing an instruction.
*/
vcpu->arch.at_instruction_boundary = false;
if (kvm_vcpu_running(vcpu)) {
r = vcpu_enter_guest(vcpu);
} else {

View File

@ -1450,6 +1450,13 @@ bool blk_update_request(struct request *req, blk_status_t error,
req->q->integrity.profile->complete_fn(req, nr_bytes);
#endif
/*
* Upper layers may call blk_crypto_evict_key() anytime after the last
* bio_endio(). Therefore, the keyslot must be released before that.
*/
if (blk_crypto_rq_has_keyslot(req) && nr_bytes >= blk_rq_bytes(req))
__blk_crypto_rq_put_keyslot(req);
if (unlikely(error && !blk_rq_is_passthrough(req) &&
!(req->rq_flags & RQF_QUIET)))
print_req_error(req, error, __func__);

View File

@ -60,6 +60,11 @@ static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
return rq->crypt_ctx;
}
static inline bool blk_crypto_rq_has_keyslot(struct request *rq)
{
return rq->crypt_keyslot;
}
#else /* CONFIG_BLK_INLINE_ENCRYPTION */
static inline bool bio_crypt_rq_ctx_compatible(struct request *rq,
@ -93,6 +98,11 @@ static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
return false;
}
static inline bool blk_crypto_rq_has_keyslot(struct request *rq)
{
return false;
}
#endif /* CONFIG_BLK_INLINE_ENCRYPTION */
void __bio_crypt_advance(struct bio *bio, unsigned int bytes);
@ -127,14 +137,21 @@ static inline bool blk_crypto_bio_prep(struct bio **bio_ptr)
return true;
}
blk_status_t __blk_crypto_init_request(struct request *rq);
static inline blk_status_t blk_crypto_init_request(struct request *rq)
blk_status_t __blk_crypto_rq_get_keyslot(struct request *rq);
static inline blk_status_t blk_crypto_rq_get_keyslot(struct request *rq)
{
if (blk_crypto_rq_is_encrypted(rq))
return __blk_crypto_init_request(rq);
return __blk_crypto_rq_get_keyslot(rq);
return BLK_STS_OK;
}
void __blk_crypto_rq_put_keyslot(struct request *rq);
static inline void blk_crypto_rq_put_keyslot(struct request *rq)
{
if (blk_crypto_rq_has_keyslot(rq))
__blk_crypto_rq_put_keyslot(rq);
}
void __blk_crypto_free_request(struct request *rq);
static inline void blk_crypto_free_request(struct request *rq)
{
@ -173,7 +190,7 @@ static inline blk_status_t blk_crypto_insert_cloned_request(struct request *rq)
{
if (blk_crypto_rq_is_encrypted(rq))
return blk_crypto_init_request(rq);
return blk_crypto_rq_get_keyslot(rq);
return BLK_STS_OK;
}

View File

@ -13,6 +13,7 @@
#include <linux/blkdev.h>
#include <linux/keyslot-manager.h>
#include <linux/module.h>
#include <linux/ratelimit.h>
#include <linux/slab.h>
#include "blk-crypto-internal.h"
@ -217,26 +218,26 @@ static bool bio_crypt_check_alignment(struct bio *bio)
return true;
}
blk_status_t __blk_crypto_init_request(struct request *rq)
blk_status_t __blk_crypto_rq_get_keyslot(struct request *rq)
{
return blk_ksm_get_slot_for_key(rq->q->ksm, rq->crypt_ctx->bc_key,
&rq->crypt_keyslot);
}
/**
* __blk_crypto_free_request - Uninitialize the crypto fields of a request.
*
* @rq: The request whose crypto fields to uninitialize.
*
* Completely uninitializes the crypto fields of a request. If a keyslot has
* been programmed into some inline encryption hardware, that keyslot is
* released. The rq->crypt_ctx is also freed.
*/
void __blk_crypto_free_request(struct request *rq)
void __blk_crypto_rq_put_keyslot(struct request *rq)
{
blk_ksm_put_slot(rq->crypt_keyslot);
rq->crypt_keyslot = NULL;
}
void __blk_crypto_free_request(struct request *rq)
{
/* The keyslot, if one was needed, should have been released earlier. */
if (WARN_ON_ONCE(rq->crypt_keyslot))
__blk_crypto_rq_put_keyslot(rq);
mempool_free(rq->crypt_ctx, bio_crypt_ctx_pool);
blk_crypto_rq_set_defaults(rq);
rq->crypt_ctx = NULL;
}
/**
@ -409,29 +410,39 @@ int blk_crypto_start_using_key(const struct blk_crypto_key *key,
EXPORT_SYMBOL_GPL(blk_crypto_start_using_key);
/**
* blk_crypto_evict_key() - Evict a key from any inline encryption hardware
* it may have been programmed into
* @q: The request queue who's associated inline encryption hardware this key
* might have been programmed into
* @key: The key to evict
* blk_crypto_evict_key() - Evict a blk_crypto_key from a request_queue
* @q: a request_queue on which I/O using the key may have been done
* @key: the key to evict
*
* Upper layers (filesystems) must call this function to ensure that a key is
* evicted from any hardware that it might have been programmed into. The key
* must not be in use by any in-flight IO when this function is called.
* For a given request_queue, this function removes the given blk_crypto_key
* from the keyslot management structures and evicts it from any underlying
* hardware keyslot(s) or blk-crypto-fallback keyslot it may have been
* programmed into.
*
* Return: 0 on success or if key is not present in the q's ksm, -err on error.
* Upper layers must call this before freeing the blk_crypto_key. It must be
* called for every request_queue the key may have been used on. The key must
* no longer be in use by any I/O when this function is called.
*
* Context: May sleep.
*/
int blk_crypto_evict_key(struct request_queue *q,
const struct blk_crypto_key *key)
void blk_crypto_evict_key(struct request_queue *q,
const struct blk_crypto_key *key)
{
if (blk_ksm_crypto_cfg_supported(q->ksm, &key->crypto_cfg))
return blk_ksm_evict_key(q->ksm, key);
int err;
if (blk_ksm_crypto_cfg_supported(q->ksm, &key->crypto_cfg))
err = blk_ksm_evict_key(q->ksm, key);
else
err = blk_crypto_fallback_evict_key(key);
/*
* If the request queue's associated inline encryption hardware didn't
* have support for the key, then the key might have been programmed
* into the fallback keyslot manager, so try to evict from there.
* An error can only occur here if the key failed to be evicted from a
* keyslot (due to a hardware or driver issue) or is allegedly still in
* use by I/O (due to a kernel bug). Even in these cases, the key is
* still unlinked from the keyslot management structures, and the caller
* is allowed and expected to free it right away. There's nothing
* callers can do to handle errors, so just log them and return void.
*/
return blk_crypto_fallback_evict_key(key);
if (err)
pr_warn_ratelimited("error %d evicting key\n", err);
}
EXPORT_SYMBOL_GPL(blk_crypto_evict_key);

View File

@ -803,6 +803,8 @@ static struct request *attempt_merge(struct request_queue *q,
if (!blk_discard_mergable(req))
elv_merge_requests(q, req, next);
blk_crypto_rq_put_keyslot(next);
/*
* 'next' is going away, so update stats accordingly
*/

View File

@ -2248,7 +2248,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
blk_mq_bio_to_request(rq, bio, nr_segs);
ret = blk_crypto_init_request(rq);
ret = blk_crypto_rq_get_keyslot(rq);
if (ret != BLK_STS_OK) {
bio->bi_status = ret;
bio_endio(bio);

View File

@ -350,25 +350,16 @@ bool blk_ksm_crypto_cfg_supported(struct blk_keyslot_manager *ksm,
return true;
}
/**
* blk_ksm_evict_key() - Evict a key from the lower layer device.
* @ksm: The keyslot manager to evict from
* @key: The key to evict
*
* Find the keyslot that the specified key was programmed into, and evict that
* slot from the lower layer device. The slot must not be in use by any
* in-flight IO when this function is called.
*
* Context: Process context. Takes and releases ksm->lock.
* Return: 0 on success or if there's no keyslot with the specified key, -EBUSY
* if the keyslot is still in use, or another -errno value on other
* error.
/*
* This is an internal function that evicts a key from an inline encryption
* device that can be either a real device or the blk-crypto-fallback "device".
* It is used only by blk_crypto_evict_key(); see that function for details.
*/
int blk_ksm_evict_key(struct blk_keyslot_manager *ksm,
const struct blk_crypto_key *key)
{
struct blk_ksm_keyslot *slot;
int err = 0;
int err;
if (blk_ksm_is_passthrough(ksm)) {
if (ksm->ksm_ll_ops.keyslot_evict) {
@ -382,22 +373,30 @@ int blk_ksm_evict_key(struct blk_keyslot_manager *ksm,
blk_ksm_hw_enter(ksm);
slot = blk_ksm_find_keyslot(ksm, key);
if (!slot)
goto out_unlock;
if (!slot) {
/*
* Not an error, since a key not in use by I/O is not guaranteed
* to be in a keyslot. There can be more keys than keyslots.
*/
err = 0;
goto out;
}
if (WARN_ON_ONCE(atomic_read(&slot->slot_refs) != 0)) {
/* BUG: key is still in use by I/O */
err = -EBUSY;
goto out_unlock;
goto out_remove;
}
err = ksm->ksm_ll_ops.keyslot_evict(ksm, key,
blk_ksm_get_slot_idx(slot));
if (err)
goto out_unlock;
out_remove:
/*
* Callers free the key even on error, so unlink the key from the hash
* table and clear slot->key even on error.
*/
hlist_del(&slot->hash_node);
slot->key = NULL;
err = 0;
out_unlock:
out:
blk_ksm_hw_exit(ksm);
return err;
}

View File

@ -793,6 +793,18 @@ static void dd_prepare_request(struct request *rq)
rq->elv.priv[0] = NULL;
}
static bool dd_has_write_work(struct blk_mq_hw_ctx *hctx)
{
struct deadline_data *dd = hctx->queue->elevator->elevator_data;
enum dd_prio p;
for (p = 0; p <= DD_PRIO_MAX; p++)
if (!list_empty_careful(&dd->per_prio[p].fifo_list[DD_WRITE]))
return true;
return false;
}
/*
* Callback from inside blk_mq_free_request().
*
@ -816,7 +828,6 @@ static void dd_finish_request(struct request *rq)
struct dd_blkcg *blkcg = rq->elv.priv[0];
const u8 ioprio_class = dd_rq_ioclass(rq);
const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
struct dd_per_prio *per_prio = &dd->per_prio[prio];
dd_count(dd, completed, prio);
ddcg_count(blkcg, completed, ioprio_class);
@ -826,9 +837,10 @@ static void dd_finish_request(struct request *rq)
spin_lock_irqsave(&dd->zone_lock, flags);
blk_req_zone_write_unlock(rq);
if (!list_empty(&per_prio->fifo_list[DD_WRITE]))
blk_mq_sched_mark_restart_hctx(rq->mq_hctx);
spin_unlock_irqrestore(&dd->zone_lock, flags);
if (dd_has_write_work(rq->mq_hctx))
blk_mq_sched_mark_restart_hctx(rq->mq_hctx);
}
}

View File

@ -1399,7 +1399,8 @@ static struct binder_node *binder_get_node_from_ref(
*/
static void binder_free_ref(struct binder_ref *ref)
{
trace_android_vh_binder_del_ref(ref->proc ? ref->proc->tsk : 0, ref->data.desc);
trace_android_vh_binder_del_ref(ref->proc ? ref->proc->tsk : NULL,
ref->data.desc);
if (ref->node)
binder_free_node(ref->node);
kfree(ref->death);
@ -2033,24 +2034,23 @@ static void binder_deferred_fd_close(int fd)
static void binder_transaction_buffer_release(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_buffer *buffer,
binder_size_t failed_at,
binder_size_t off_end_offset,
bool is_failure)
{
int debug_id = buffer->debug_id;
binder_size_t off_start_offset, buffer_offset, off_end_offset;
binder_size_t off_start_offset, buffer_offset;
binder_debug(BINDER_DEBUG_TRANSACTION,
"%d buffer release %d, size %zd-%zd, failed at %llx\n",
proc->pid, buffer->debug_id,
buffer->data_size, buffer->offsets_size,
(unsigned long long)failed_at);
(unsigned long long)off_end_offset);
if (buffer->target_node)
binder_dec_node(buffer->target_node, 1, 0);
off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
off_end_offset = is_failure && failed_at ? failed_at :
off_start_offset + buffer->offsets_size;
for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
buffer_offset += sizeof(binder_size_t)) {
struct binder_object_header *hdr;
@ -2210,6 +2210,21 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
}
}
/* Clean up all the objects in the buffer */
static inline void binder_release_entire_buffer(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_buffer *buffer,
bool is_failure)
{
binder_size_t off_end_offset;
off_end_offset = ALIGN(buffer->data_size, sizeof(void *));
off_end_offset += buffer->offsets_size;
binder_transaction_buffer_release(proc, thread, buffer,
off_end_offset, is_failure);
}
static int binder_translate_binder(struct flat_binder_object *fp,
struct binder_transaction *t,
struct binder_thread *thread)
@ -2871,7 +2886,8 @@ static int binder_proc_transaction(struct binder_transaction *t,
thread = binder_select_thread_ilocked(proc);
trace_android_vh_binder_proc_transaction(current, proc->tsk,
thread ? thread->task : 0, node->debug_id, t->code, pending_async);
thread ? thread->task : NULL, node->debug_id, t->code,
pending_async);
if (thread) {
binder_transaction_priority(thread->task, t, node_prio,
@ -2914,7 +2930,7 @@ static int binder_proc_transaction(struct binder_transaction *t,
t_outdated->buffer = NULL;
buffer->transaction = NULL;
trace_binder_transaction_update_buffer_release(buffer);
binder_transaction_buffer_release(proc, NULL, buffer, 0, 0);
binder_release_entire_buffer(proc, NULL, buffer, false);
binder_alloc_free_buf(&proc->alloc, buffer);
kfree(t_outdated);
binder_stats_deleted(BINDER_STAT_TRANSACTION);
@ -3828,7 +3844,7 @@ binder_free_buf(struct binder_proc *proc,
binder_node_inner_unlock(buf_node);
}
trace_binder_transaction_buffer_release(buffer);
binder_transaction_buffer_release(proc, thread, buffer, 0, is_failure);
binder_release_entire_buffer(proc, thread, buffer, is_failure);
binder_alloc_free_buf(&proc->alloc, buffer);
}

View File

@ -275,6 +275,8 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_commit_creds);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_exit_creds);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_override_creds);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_revert_creds);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_dm_bufio_shrink_scan_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cleanup_old_buffers_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_record_mutex_lock_starttime);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_record_rtmutex_lock_starttime);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_record_rwsem_lock_starttime);

View File

@ -77,13 +77,14 @@ static const char *get_filename(struct tegra_bpmp *bpmp,
const char *root_path, *filename = NULL;
char *root_path_buf;
size_t root_len;
size_t root_path_buf_len = 512;
root_path_buf = kzalloc(512, GFP_KERNEL);
root_path_buf = kzalloc(root_path_buf_len, GFP_KERNEL);
if (!root_path_buf)
goto out;
root_path = dentry_path(bpmp->debugfs_mirror, root_path_buf,
sizeof(root_path_buf));
root_path_buf_len);
if (IS_ERR(root_path))
goto out;

View File

@ -459,9 +459,9 @@ static void dcn_bw_calc_rq_dlg_ttu(
struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &pipe->dlg_regs;
struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &pipe->ttu_regs;
struct _vcs_dpi_display_rq_regs_st *rq_regs = &pipe->rq_regs;
struct _vcs_dpi_display_rq_params_st rq_param = {0};
struct _vcs_dpi_display_dlg_sys_params_st dlg_sys_param = {0};
struct _vcs_dpi_display_e2e_pipe_params_st input = { { { 0 } } };
struct _vcs_dpi_display_rq_params_st *rq_param = &pipe->dml_rq_param;
struct _vcs_dpi_display_dlg_sys_params_st *dlg_sys_param = &pipe->dml_dlg_sys_param;
struct _vcs_dpi_display_e2e_pipe_params_st *input = &pipe->dml_input;
float total_active_bw = 0;
float total_prefetch_bw = 0;
int total_flip_bytes = 0;
@ -470,45 +470,48 @@ static void dcn_bw_calc_rq_dlg_ttu(
memset(dlg_regs, 0, sizeof(*dlg_regs));
memset(ttu_regs, 0, sizeof(*ttu_regs));
memset(rq_regs, 0, sizeof(*rq_regs));
memset(rq_param, 0, sizeof(*rq_param));
memset(dlg_sys_param, 0, sizeof(*dlg_sys_param));
memset(input, 0, sizeof(*input));
for (i = 0; i < number_of_planes; i++) {
total_active_bw += v->read_bandwidth[i];
total_prefetch_bw += v->prefetch_bandwidth[i];
total_flip_bytes += v->total_immediate_flip_bytes[i];
}
dlg_sys_param.total_flip_bw = v->return_bw - dcn_bw_max2(total_active_bw, total_prefetch_bw);
if (dlg_sys_param.total_flip_bw < 0.0)
dlg_sys_param.total_flip_bw = 0;
dlg_sys_param->total_flip_bw = v->return_bw - dcn_bw_max2(total_active_bw, total_prefetch_bw);
if (dlg_sys_param->total_flip_bw < 0.0)
dlg_sys_param->total_flip_bw = 0;
dlg_sys_param.t_mclk_wm_us = v->dram_clock_change_watermark;
dlg_sys_param.t_sr_wm_us = v->stutter_enter_plus_exit_watermark;
dlg_sys_param.t_urg_wm_us = v->urgent_watermark;
dlg_sys_param.t_extra_us = v->urgent_extra_latency;
dlg_sys_param.deepsleep_dcfclk_mhz = v->dcf_clk_deep_sleep;
dlg_sys_param.total_flip_bytes = total_flip_bytes;
dlg_sys_param->t_mclk_wm_us = v->dram_clock_change_watermark;
dlg_sys_param->t_sr_wm_us = v->stutter_enter_plus_exit_watermark;
dlg_sys_param->t_urg_wm_us = v->urgent_watermark;
dlg_sys_param->t_extra_us = v->urgent_extra_latency;
dlg_sys_param->deepsleep_dcfclk_mhz = v->dcf_clk_deep_sleep;
dlg_sys_param->total_flip_bytes = total_flip_bytes;
pipe_ctx_to_e2e_pipe_params(pipe, &input.pipe);
input.clks_cfg.dcfclk_mhz = v->dcfclk;
input.clks_cfg.dispclk_mhz = v->dispclk;
input.clks_cfg.dppclk_mhz = v->dppclk;
input.clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
input.clks_cfg.socclk_mhz = v->socclk;
input.clks_cfg.voltage = v->voltage_level;
pipe_ctx_to_e2e_pipe_params(pipe, &input->pipe);
input->clks_cfg.dcfclk_mhz = v->dcfclk;
input->clks_cfg.dispclk_mhz = v->dispclk;
input->clks_cfg.dppclk_mhz = v->dppclk;
input->clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
input->clks_cfg.socclk_mhz = v->socclk;
input->clks_cfg.voltage = v->voltage_level;
// dc->dml.logger = pool->base.logger;
input.dout.output_format = (v->output_format[in_idx] == dcn_bw_420) ? dm_420 : dm_444;
input.dout.output_type = (v->output[in_idx] == dcn_bw_hdmi) ? dm_hdmi : dm_dp;
input->dout.output_format = (v->output_format[in_idx] == dcn_bw_420) ? dm_420 : dm_444;
input->dout.output_type = (v->output[in_idx] == dcn_bw_hdmi) ? dm_hdmi : dm_dp;
//input[in_idx].dout.output_standard;
/*todo: soc->sr_enter_plus_exit_time??*/
dlg_sys_param.t_srx_delay_us = dc->dcn_ip->dcfclk_cstate_latency / v->dcf_clk_deep_sleep;
dlg_sys_param->t_srx_delay_us = dc->dcn_ip->dcfclk_cstate_latency / v->dcf_clk_deep_sleep;
dml1_rq_dlg_get_rq_params(dml, &rq_param, input.pipe.src);
dml1_rq_dlg_get_rq_params(dml, rq_param, input.pipe.src);
dml1_extract_rq_regs(dml, rq_regs, rq_param);
dml1_rq_dlg_get_dlg_params(
dml,
dlg_regs,
ttu_regs,
rq_param.dlg,
rq_param->dlg,
dlg_sys_param,
input,
true,

View File

@ -3130,7 +3130,7 @@ void dcn20_calculate_dlg_params(
context->bw_ctx.dml.funcs.rq_dlg_get_rq_reg(&context->bw_ctx.dml,
&context->res_ctx.pipe_ctx[i].rq_regs,
pipes[pipe_idx].pipe);
&pipes[pipe_idx].pipe);
pipe_idx++;
}
}

View File

@ -768,12 +768,12 @@ static void dml20_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib,
void dml20_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib,
display_rq_regs_st *rq_regs,
const display_pipe_params_st pipe_param)
const display_pipe_params_st *pipe_param)
{
display_rq_params_st rq_param = {0};
memset(rq_regs, 0, sizeof(*rq_regs));
dml20_rq_dlg_get_rq_params(mode_lib, &rq_param, pipe_param.src);
dml20_rq_dlg_get_rq_params(mode_lib, &rq_param, pipe_param->src);
extract_rq_regs(mode_lib, rq_regs, rq_param);
print__rq_regs_st(mode_lib, *rq_regs);
@ -1549,7 +1549,7 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
void dml20_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
display_dlg_regs_st *dlg_regs,
display_ttu_regs_st *ttu_regs,
display_e2e_pipe_params_st *e2e_pipe_param,
const display_e2e_pipe_params_st *e2e_pipe_param,
const unsigned int num_pipes,
const unsigned int pipe_idx,
const bool cstate_en,

View File

@ -43,7 +43,7 @@ struct display_mode_lib;
void dml20_rq_dlg_get_rq_reg(
struct display_mode_lib *mode_lib,
display_rq_regs_st *rq_regs,
const display_pipe_params_st pipe_param);
const display_pipe_params_st *pipe_param);
// Function: dml_rq_dlg_get_dlg_reg
@ -61,7 +61,7 @@ void dml20_rq_dlg_get_dlg_reg(
struct display_mode_lib *mode_lib,
display_dlg_regs_st *dlg_regs,
display_ttu_regs_st *ttu_regs,
display_e2e_pipe_params_st *e2e_pipe_param,
const display_e2e_pipe_params_st *e2e_pipe_param,
const unsigned int num_pipes,
const unsigned int pipe_idx,
const bool cstate_en,

View File

@ -768,12 +768,12 @@ static void dml20v2_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib,
void dml20v2_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib,
display_rq_regs_st *rq_regs,
const display_pipe_params_st pipe_param)
const display_pipe_params_st *pipe_param)
{
display_rq_params_st rq_param = {0};
memset(rq_regs, 0, sizeof(*rq_regs));
dml20v2_rq_dlg_get_rq_params(mode_lib, &rq_param, pipe_param.src);
dml20v2_rq_dlg_get_rq_params(mode_lib, &rq_param, pipe_param->src);
extract_rq_regs(mode_lib, rq_regs, rq_param);
print__rq_regs_st(mode_lib, *rq_regs);
@ -1550,7 +1550,7 @@ static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
void dml20v2_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
display_dlg_regs_st *dlg_regs,
display_ttu_regs_st *ttu_regs,
display_e2e_pipe_params_st *e2e_pipe_param,
const display_e2e_pipe_params_st *e2e_pipe_param,
const unsigned int num_pipes,
const unsigned int pipe_idx,
const bool cstate_en,

View File

@ -43,7 +43,7 @@ struct display_mode_lib;
void dml20v2_rq_dlg_get_rq_reg(
struct display_mode_lib *mode_lib,
display_rq_regs_st *rq_regs,
const display_pipe_params_st pipe_param);
const display_pipe_params_st *pipe_param);
// Function: dml_rq_dlg_get_dlg_reg
@ -61,7 +61,7 @@ void dml20v2_rq_dlg_get_dlg_reg(
struct display_mode_lib *mode_lib,
display_dlg_regs_st *dlg_regs,
display_ttu_regs_st *ttu_regs,
display_e2e_pipe_params_st *e2e_pipe_param,
const display_e2e_pipe_params_st *e2e_pipe_param,
const unsigned int num_pipes,
const unsigned int pipe_idx,
const bool cstate_en,

View File

@ -694,7 +694,7 @@ static void get_surf_rq_param(
display_data_rq_sizing_params_st *rq_sizing_param,
display_data_rq_dlg_params_st *rq_dlg_param,
display_data_rq_misc_params_st *rq_misc_param,
const display_pipe_params_st pipe_param,
const display_pipe_params_st *pipe_param,
bool is_chroma)
{
bool mode_422 = false;
@ -706,30 +706,30 @@ static void get_surf_rq_param(
// FIXME check if ppe apply for both luma and chroma in 422 case
if (is_chroma) {
vp_width = pipe_param.src.viewport_width_c / ppe;
vp_height = pipe_param.src.viewport_height_c;
data_pitch = pipe_param.src.data_pitch_c;
meta_pitch = pipe_param.src.meta_pitch_c;
vp_width = pipe_param->src.viewport_width_c / ppe;
vp_height = pipe_param->src.viewport_height_c;
data_pitch = pipe_param->src.data_pitch_c;
meta_pitch = pipe_param->src.meta_pitch_c;
} else {
vp_width = pipe_param.src.viewport_width / ppe;
vp_height = pipe_param.src.viewport_height;
data_pitch = pipe_param.src.data_pitch;
meta_pitch = pipe_param.src.meta_pitch;
vp_width = pipe_param->src.viewport_width / ppe;
vp_height = pipe_param->src.viewport_height;
data_pitch = pipe_param->src.data_pitch;
meta_pitch = pipe_param->src.meta_pitch;
}
if (pipe_param.dest.odm_combine) {
if (pipe_param->dest.odm_combine) {
unsigned int access_dir;
unsigned int full_src_vp_width;
unsigned int hactive_half;
unsigned int src_hactive_half;
access_dir = (pipe_param.src.source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
hactive_half = pipe_param.dest.hactive / 2;
access_dir = (pipe_param->src.source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
hactive_half = pipe_param->dest.hactive / 2;
if (is_chroma) {
full_src_vp_width = pipe_param.scale_ratio_depth.hscl_ratio_c * pipe_param.dest.full_recout_width;
src_hactive_half = pipe_param.scale_ratio_depth.hscl_ratio_c * hactive_half;
full_src_vp_width = pipe_param->scale_ratio_depth.hscl_ratio_c * pipe_param->dest.full_recout_width;
src_hactive_half = pipe_param->scale_ratio_depth.hscl_ratio_c * hactive_half;
} else {
full_src_vp_width = pipe_param.scale_ratio_depth.hscl_ratio * pipe_param.dest.full_recout_width;
src_hactive_half = pipe_param.scale_ratio_depth.hscl_ratio * hactive_half;
full_src_vp_width = pipe_param->scale_ratio_depth.hscl_ratio * pipe_param->dest.full_recout_width;
src_hactive_half = pipe_param->scale_ratio_depth.hscl_ratio * hactive_half;
}
if (access_dir == 0) {
@ -754,7 +754,7 @@ static void get_surf_rq_param(
rq_sizing_param->meta_chunk_bytes = 2048;
rq_sizing_param->min_meta_chunk_bytes = 256;
if (pipe_param.src.hostvm)
if (pipe_param->src.hostvm)
rq_sizing_param->mpte_group_bytes = 512;
else
rq_sizing_param->mpte_group_bytes = 2048;
@ -768,23 +768,23 @@ static void get_surf_rq_param(
vp_height,
data_pitch,
meta_pitch,
pipe_param.src.source_format,
pipe_param.src.sw_mode,
pipe_param.src.macro_tile_size,
pipe_param.src.source_scan,
pipe_param.src.hostvm,
pipe_param->src.source_format,
pipe_param->src.sw_mode,
pipe_param->src.macro_tile_size,
pipe_param->src.source_scan,
pipe_param->src.hostvm,
is_chroma);
}
static void dml_rq_dlg_get_rq_params(
struct display_mode_lib *mode_lib,
display_rq_params_st *rq_param,
const display_pipe_params_st pipe_param)
const display_pipe_params_st *pipe_param)
{
// get param for luma surface
rq_param->yuv420 = pipe_param.src.source_format == dm_420_8
|| pipe_param.src.source_format == dm_420_10;
rq_param->yuv420_10bpc = pipe_param.src.source_format == dm_420_10;
rq_param->yuv420 = pipe_param->src.source_format == dm_420_8
|| pipe_param->src.source_format == dm_420_10;
rq_param->yuv420_10bpc = pipe_param->src.source_format == dm_420_10;
get_surf_rq_param(
mode_lib,
@ -794,7 +794,7 @@ static void dml_rq_dlg_get_rq_params(
pipe_param,
0);
if (is_dual_plane((enum source_format_class) (pipe_param.src.source_format))) {
if (is_dual_plane((enum source_format_class) (pipe_param->src.source_format))) {
// get param for chroma surface
get_surf_rq_param(
mode_lib,
@ -806,14 +806,14 @@ static void dml_rq_dlg_get_rq_params(
}
// calculate how to split the det buffer space between luma and chroma
handle_det_buf_split(mode_lib, rq_param, pipe_param.src);
handle_det_buf_split(mode_lib, rq_param, pipe_param->src);
print__rq_params_st(mode_lib, *rq_param);
}
void dml21_rq_dlg_get_rq_reg(
struct display_mode_lib *mode_lib,
display_rq_regs_st *rq_regs,
const display_pipe_params_st pipe_param)
const display_pipe_params_st *pipe_param)
{
display_rq_params_st rq_param = {0};
@ -1658,7 +1658,7 @@ void dml21_rq_dlg_get_dlg_reg(
struct display_mode_lib *mode_lib,
display_dlg_regs_st *dlg_regs,
display_ttu_regs_st *ttu_regs,
display_e2e_pipe_params_st *e2e_pipe_param,
const display_e2e_pipe_params_st *e2e_pipe_param,
const unsigned int num_pipes,
const unsigned int pipe_idx,
const bool cstate_en,
@ -1696,7 +1696,7 @@ void dml21_rq_dlg_get_dlg_reg(
// system parameter calculation done
dml_print("DML_DLG: Calculation for pipe[%d] start\n\n", pipe_idx);
dml_rq_dlg_get_rq_params(mode_lib, &rq_param, e2e_pipe_param[pipe_idx].pipe);
dml_rq_dlg_get_rq_params(mode_lib, &rq_param, &e2e_pipe_param[pipe_idx].pipe);
dml_rq_dlg_get_dlg_params(
mode_lib,
e2e_pipe_param,

View File

@ -44,7 +44,7 @@ struct display_mode_lib;
void dml21_rq_dlg_get_rq_reg(
struct display_mode_lib *mode_lib,
display_rq_regs_st *rq_regs,
const display_pipe_params_st pipe_param);
const display_pipe_params_st *pipe_param);
// Function: dml_rq_dlg_get_dlg_reg
// Calculate and return DLG and TTU register struct given the system setting
@ -61,7 +61,7 @@ void dml21_rq_dlg_get_dlg_reg(
struct display_mode_lib *mode_lib,
display_dlg_regs_st *dlg_regs,
display_ttu_regs_st *ttu_regs,
display_e2e_pipe_params_st *e2e_pipe_param,
const display_e2e_pipe_params_st *e2e_pipe_param,
const unsigned int num_pipes,
const unsigned int pipe_idx,
const bool cstate_en,

View File

@ -747,7 +747,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib,
display_data_rq_sizing_params_st *rq_sizing_param,
display_data_rq_dlg_params_st *rq_dlg_param,
display_data_rq_misc_params_st *rq_misc_param,
const display_pipe_params_st pipe_param,
const display_pipe_params_st *pipe_param,
bool is_chroma,
bool is_alpha)
{
@ -761,32 +761,32 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib,
// FIXME check if ppe apply for both luma and chroma in 422 case
if (is_chroma | is_alpha) {
vp_width = pipe_param.src.viewport_width_c / ppe;
vp_height = pipe_param.src.viewport_height_c;
data_pitch = pipe_param.src.data_pitch_c;
meta_pitch = pipe_param.src.meta_pitch_c;
surface_height = pipe_param.src.surface_height_y / 2.0;
vp_width = pipe_param->src.viewport_width_c / ppe;
vp_height = pipe_param->src.viewport_height_c;
data_pitch = pipe_param->src.data_pitch_c;
meta_pitch = pipe_param->src.meta_pitch_c;
surface_height = pipe_param->src.surface_height_y / 2.0;
} else {
vp_width = pipe_param.src.viewport_width / ppe;
vp_height = pipe_param.src.viewport_height;
data_pitch = pipe_param.src.data_pitch;
meta_pitch = pipe_param.src.meta_pitch;
surface_height = pipe_param.src.surface_height_y;
vp_width = pipe_param->src.viewport_width / ppe;
vp_height = pipe_param->src.viewport_height;
data_pitch = pipe_param->src.data_pitch;
meta_pitch = pipe_param->src.meta_pitch;
surface_height = pipe_param->src.surface_height_y;
}
if (pipe_param.dest.odm_combine) {
if (pipe_param->dest.odm_combine) {
unsigned int access_dir = 0;
unsigned int full_src_vp_width = 0;
unsigned int hactive_odm = 0;
unsigned int src_hactive_odm = 0;
access_dir = (pipe_param.src.source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
hactive_odm = pipe_param.dest.hactive / ((unsigned int)pipe_param.dest.odm_combine*2);
access_dir = (pipe_param->src.source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
hactive_odm = pipe_param->dest.hactive / ((unsigned int) pipe_param->dest.odm_combine*2);
if (is_chroma) {
full_src_vp_width = pipe_param.scale_ratio_depth.hscl_ratio_c * pipe_param.dest.full_recout_width;
src_hactive_odm = pipe_param.scale_ratio_depth.hscl_ratio_c * hactive_odm;
full_src_vp_width = pipe_param->scale_ratio_depth.hscl_ratio_c * pipe_param->dest.full_recout_width;
src_hactive_odm = pipe_param->scale_ratio_depth.hscl_ratio_c * hactive_odm;
} else {
full_src_vp_width = pipe_param.scale_ratio_depth.hscl_ratio * pipe_param.dest.full_recout_width;
src_hactive_odm = pipe_param.scale_ratio_depth.hscl_ratio * hactive_odm;
full_src_vp_width = pipe_param->scale_ratio_depth.hscl_ratio * pipe_param->dest.full_recout_width;
src_hactive_odm = pipe_param->scale_ratio_depth.hscl_ratio * hactive_odm;
}
if (access_dir == 0) {
@ -815,7 +815,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib,
rq_sizing_param->meta_chunk_bytes = 2048;
rq_sizing_param->min_meta_chunk_bytes = 256;
if (pipe_param.src.hostvm)
if (pipe_param->src.hostvm)
rq_sizing_param->mpte_group_bytes = 512;
else
rq_sizing_param->mpte_group_bytes = 2048;
@ -828,28 +828,28 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib,
vp_height,
data_pitch,
meta_pitch,
pipe_param.src.source_format,
pipe_param.src.sw_mode,
pipe_param.src.macro_tile_size,
pipe_param.src.source_scan,
pipe_param.src.hostvm,
pipe_param->src.source_format,
pipe_param->src.sw_mode,
pipe_param->src.macro_tile_size,
pipe_param->src.source_scan,
pipe_param->src.hostvm,
is_chroma,
surface_height);
}
static void dml_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib,
display_rq_params_st *rq_param,
const display_pipe_params_st pipe_param)
const display_pipe_params_st *pipe_param)
{
// get param for luma surface
rq_param->yuv420 = pipe_param.src.source_format == dm_420_8
|| pipe_param.src.source_format == dm_420_10
|| pipe_param.src.source_format == dm_rgbe_alpha
|| pipe_param.src.source_format == dm_420_12;
rq_param->yuv420 = pipe_param->src.source_format == dm_420_8
|| pipe_param->src.source_format == dm_420_10
|| pipe_param->src.source_format == dm_rgbe_alpha
|| pipe_param->src.source_format == dm_420_12;
rq_param->yuv420_10bpc = pipe_param.src.source_format == dm_420_10;
rq_param->yuv420_10bpc = pipe_param->src.source_format == dm_420_10;
rq_param->rgbe_alpha = (pipe_param.src.source_format == dm_rgbe_alpha)?1:0;
rq_param->rgbe_alpha = (pipe_param->src.source_format == dm_rgbe_alpha)?1:0;
get_surf_rq_param(mode_lib,
&(rq_param->sizing.rq_l),
@ -859,7 +859,7 @@ static void dml_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib,
0,
0);
if (is_dual_plane((enum source_format_class)(pipe_param.src.source_format))) {
if (is_dual_plane((enum source_format_class)(pipe_param->src.source_format))) {
// get param for chroma surface
get_surf_rq_param(mode_lib,
&(rq_param->sizing.rq_c),
@ -871,13 +871,13 @@ static void dml_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib,
}
// calculate how to split the det buffer space between luma and chroma
handle_det_buf_split(mode_lib, rq_param, pipe_param.src);
handle_det_buf_split(mode_lib, rq_param, pipe_param->src);
print__rq_params_st(mode_lib, *rq_param);
}
void dml30_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib,
display_rq_regs_st *rq_regs,
const display_pipe_params_st pipe_param)
const display_pipe_params_st *pipe_param)
{
display_rq_params_st rq_param = { 0 };
@ -1831,7 +1831,7 @@ static void dml_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
void dml30_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
display_dlg_regs_st *dlg_regs,
display_ttu_regs_st *ttu_regs,
display_e2e_pipe_params_st *e2e_pipe_param,
const display_e2e_pipe_params_st *e2e_pipe_param,
const unsigned int num_pipes,
const unsigned int pipe_idx,
const bool cstate_en,
@ -1866,7 +1866,7 @@ void dml30_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
// system parameter calculation done
dml_print("DML_DLG: Calculation for pipe[%d] start\n\n", pipe_idx);
dml_rq_dlg_get_rq_params(mode_lib, &rq_param, e2e_pipe_param[pipe_idx].pipe);
dml_rq_dlg_get_rq_params(mode_lib, &rq_param, &e2e_pipe_param[pipe_idx].pipe);
dml_rq_dlg_get_dlg_params(mode_lib,
e2e_pipe_param,
num_pipes,

View File

@ -41,7 +41,7 @@ struct display_mode_lib;
// See also: <display_rq_regs_st>
void dml30_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib,
display_rq_regs_st *rq_regs,
const display_pipe_params_st pipe_param);
const display_pipe_params_st *pipe_param);
// Function: dml_rq_dlg_get_dlg_reg
// Calculate and return DLG and TTU register struct given the system setting
@ -57,7 +57,7 @@ void dml30_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib,
void dml30_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
display_dlg_regs_st *dlg_regs,
display_ttu_regs_st *ttu_regs,
display_e2e_pipe_params_st *e2e_pipe_param,
const display_e2e_pipe_params_st *e2e_pipe_param,
const unsigned int num_pipes,
const unsigned int pipe_idx,
const bool cstate_en,

View File

@ -49,7 +49,7 @@ struct dml_funcs {
struct display_mode_lib *mode_lib,
display_dlg_regs_st *dlg_regs,
display_ttu_regs_st *ttu_regs,
display_e2e_pipe_params_st *e2e_pipe_param,
const display_e2e_pipe_params_st *e2e_pipe_param,
const unsigned int num_pipes,
const unsigned int pipe_idx,
const bool cstate_en,
@ -60,7 +60,7 @@ struct dml_funcs {
void (*rq_dlg_get_rq_reg)(
struct display_mode_lib *mode_lib,
display_rq_regs_st *rq_regs,
const display_pipe_params_st pipe_param);
const display_pipe_params_st *pipe_param);
void (*recalculate)(struct display_mode_lib *mode_lib);
void (*validate)(struct display_mode_lib *mode_lib);
};

View File

@ -328,6 +328,9 @@ struct pipe_ctx {
struct _vcs_dpi_display_ttu_regs_st ttu_regs;
struct _vcs_dpi_display_rq_regs_st rq_regs;
struct _vcs_dpi_display_pipe_dest_params_st pipe_dlg_param;
struct _vcs_dpi_display_rq_params_st dml_rq_param;
struct _vcs_dpi_display_dlg_sys_params_st dml_dlg_sys_param;
struct _vcs_dpi_display_e2e_pipe_params_st dml_input;
#endif
union pipe_update_flags update_flags;
struct dwbc *dwbc;

View File

@ -1030,6 +1030,7 @@ static int arm_trbe_probe_cpuhp(struct trbe_drvdata *drvdata)
static void arm_trbe_remove_cpuhp(struct trbe_drvdata *drvdata)
{
cpuhp_state_remove_instance(drvdata->trbe_online, &drvdata->hotplug_node);
cpuhp_remove_multi_state(drvdata->trbe_online);
}

View File

@ -19,6 +19,8 @@
#include <linux/rbtree.h>
#include <linux/stacktrace.h>
#include <trace/hooks/mm.h>
#define DM_MSG_PREFIX "bufio"
/*
@ -1683,6 +1685,13 @@ static void shrink_work(struct work_struct *w)
static unsigned long dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
struct dm_bufio_client *c;
bool bypass = false;
trace_android_vh_dm_bufio_shrink_scan_bypass(
dm_bufio_current_allocated,
&bypass);
if (bypass)
return 0;
c = container_of(shrink, struct dm_bufio_client, shrinker);
atomic_long_add(sc->nr_to_scan, &c->need_shrink);
@ -2009,6 +2018,14 @@ static void cleanup_old_buffers(void)
{
unsigned long max_age_hz = get_max_age_hz();
struct dm_bufio_client *c;
bool bypass = false;
trace_android_vh_cleanup_old_buffers_bypass(
dm_bufio_current_allocated,
&max_age_hz,
&bypass);
if (bypass)
return;
mutex_lock(&dm_bufio_clients_lock);

View File

@ -67,13 +67,9 @@ lookup_cipher(const char *cipher_string)
static void default_key_dtr(struct dm_target *ti)
{
struct default_key_c *dkc = ti->private;
int err;
if (dkc->dev) {
err = blk_crypto_evict_key(bdev_get_queue(dkc->dev->bdev),
&dkc->key);
if (err && err != -ENOKEY)
DMWARN("Failed to evict crypto key: %d", err);
blk_crypto_evict_key(bdev_get_queue(dkc->dev->bdev), &dkc->key);
dm_put_device(ti, dkc->dev);
}
kfree_sensitive(dkc->cipher_string);

View File

@ -1221,21 +1221,12 @@ struct dm_keyslot_manager {
struct mapped_device *md;
};
struct dm_keyslot_evict_args {
const struct blk_crypto_key *key;
int err;
};
static int dm_keyslot_evict_callback(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
struct dm_keyslot_evict_args *args = data;
int err;
const struct blk_crypto_key *key = data;
err = blk_crypto_evict_key(bdev_get_queue(dev->bdev), args->key);
if (!args->err)
args->err = err;
/* Always try to evict the key from all devices. */
blk_crypto_evict_key(bdev_get_queue(dev->bdev), key);
return 0;
}
@ -1250,7 +1241,6 @@ static int dm_keyslot_evict(struct blk_keyslot_manager *ksm,
struct dm_keyslot_manager,
ksm);
struct mapped_device *md = dksm->md;
struct dm_keyslot_evict_args args = { key };
struct dm_table *t;
int srcu_idx;
int i;
@ -1263,10 +1253,11 @@ static int dm_keyslot_evict(struct blk_keyslot_manager *ksm,
ti = dm_table_get_target(t, i);
if (!ti->type->iterate_devices)
continue;
ti->type->iterate_devices(ti, dm_keyslot_evict_callback, &args);
ti->type->iterate_devices(ti, dm_keyslot_evict_callback,
(void *)key);
}
dm_put_live_table(md, srcu_idx);
return args.err;
return 0;
}
struct dm_derive_raw_secret_args {

View File

@ -1219,8 +1219,16 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
/* WQ_UNBOUND greatly improves performance when running on ramdisk */
v->verify_wq = alloc_workqueue("kverityd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus());
/*
* Using WQ_HIGHPRI improves throughput and completion latency by
* reducing wait times when reading from a dm-verity device.
*
* Also as required for the "try_verify_in_tasklet" feature: WQ_HIGHPRI
* allows verify_wq to preempt softirq since verification in tasklet
* will fall-back to using it for error handling (or if the bufio cache
* doesn't have required hashes).
*/
v->verify_wq = alloc_workqueue("kverityd", WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
if (!v->verify_wq) {
ti->error = "Cannot allocate workqueue";
r = -ENOMEM;

View File

@ -77,12 +77,12 @@ struct uid_entry {
#endif
};
static u64 compute_write_bytes(struct task_struct *task)
static u64 compute_write_bytes(struct task_io_accounting *ioac)
{
if (task->ioac.write_bytes <= task->ioac.cancelled_write_bytes)
if (ioac->write_bytes <= ioac->cancelled_write_bytes)
return 0;
return task->ioac.write_bytes - task->ioac.cancelled_write_bytes;
return ioac->write_bytes - ioac->cancelled_write_bytes;
}
static void compute_io_bucket_stats(struct io_stats *io_bucket,
@ -239,17 +239,16 @@ static void set_io_uid_tasks_zero(struct uid_entry *uid_entry)
}
}
static void add_uid_tasks_io_stats(struct uid_entry *uid_entry,
struct task_struct *task, int slot)
static void add_uid_tasks_io_stats(struct task_entry *task_entry,
struct task_io_accounting *ioac, int slot)
{
struct task_entry *task_entry = find_or_register_task(uid_entry, task);
struct io_stats *task_io_slot = &task_entry->io[slot];
task_io_slot->read_bytes += task->ioac.read_bytes;
task_io_slot->write_bytes += compute_write_bytes(task);
task_io_slot->rchar += task->ioac.rchar;
task_io_slot->wchar += task->ioac.wchar;
task_io_slot->fsync += task->ioac.syscfs;
task_io_slot->read_bytes += ioac->read_bytes;
task_io_slot->write_bytes += compute_write_bytes(ioac);
task_io_slot->rchar += ioac->rchar;
task_io_slot->wchar += ioac->wchar;
task_io_slot->fsync += ioac->syscfs;
}
static void compute_io_uid_tasks(struct uid_entry *uid_entry)
@ -290,8 +289,6 @@ static void show_io_uid_tasks(struct seq_file *m, struct uid_entry *uid_entry)
#else
static void remove_uid_tasks(struct uid_entry *uid_entry) {};
static void set_io_uid_tasks_zero(struct uid_entry *uid_entry) {};
static void add_uid_tasks_io_stats(struct uid_entry *uid_entry,
struct task_struct *task, int slot) {};
static void compute_io_uid_tasks(struct uid_entry *uid_entry) {};
static void show_io_uid_tasks(struct seq_file *m,
struct uid_entry *uid_entry) {}
@ -446,23 +443,32 @@ static const struct proc_ops uid_remove_fops = {
.proc_write = uid_remove_write,
};
static void __add_uid_io_stats(struct uid_entry *uid_entry,
struct task_io_accounting *ioac, int slot)
{
struct io_stats *io_slot = &uid_entry->io[slot];
io_slot->read_bytes += ioac->read_bytes;
io_slot->write_bytes += compute_write_bytes(ioac);
io_slot->rchar += ioac->rchar;
io_slot->wchar += ioac->wchar;
io_slot->fsync += ioac->syscfs;
}
static void add_uid_io_stats(struct uid_entry *uid_entry,
struct task_struct *task, int slot)
{
struct io_stats *io_slot = &uid_entry->io[slot];
struct task_entry *task_entry __maybe_unused;
/* avoid double accounting of dying threads */
if (slot != UID_STATE_DEAD_TASKS && (task->flags & PF_EXITING))
return;
io_slot->read_bytes += task->ioac.read_bytes;
io_slot->write_bytes += compute_write_bytes(task);
io_slot->rchar += task->ioac.rchar;
io_slot->wchar += task->ioac.wchar;
io_slot->fsync += task->ioac.syscfs;
add_uid_tasks_io_stats(uid_entry, task, slot);
#ifdef CONFIG_UID_SYS_STATS_DEBUG
task_entry = find_or_register_task(uid_entry, task);
add_uid_tasks_io_stats(task_entry, &task->ioac, slot);
#endif
__add_uid_io_stats(uid_entry, &task->ioac, slot);
}
static void update_io_stats_all_locked(void)
@ -622,6 +628,48 @@ static const struct proc_ops uid_procstat_fops = {
.proc_write = uid_procstat_write,
};
struct update_stats_work {
struct work_struct work;
uid_t uid;
#ifdef CONFIG_UID_SYS_STATS_DEBUG
struct task_struct *task;
#endif
struct task_io_accounting ioac;
u64 utime;
u64 stime;
};
static void update_stats_workfn(struct work_struct *work)
{
struct update_stats_work *usw =
container_of(work, struct update_stats_work, work);
struct uid_entry *uid_entry;
struct task_entry *task_entry __maybe_unused;
rt_mutex_lock(&uid_lock);
uid_entry = find_uid_entry(usw->uid);
if (!uid_entry)
goto exit;
uid_entry->utime += usw->utime;
uid_entry->stime += usw->stime;
#ifdef CONFIG_UID_SYS_STATS_DEBUG
task_entry = find_task_entry(uid_entry, usw->task);
if (!task_entry)
goto exit;
add_uid_tasks_io_stats(task_entry, &usw->ioac,
UID_STATE_DEAD_TASKS);
#endif
__add_uid_io_stats(uid_entry, &usw->ioac, UID_STATE_DEAD_TASKS);
exit:
rt_mutex_unlock(&uid_lock);
#ifdef CONFIG_UID_SYS_STATS_DEBUG
put_task_struct(usw->task);
#endif
kfree(usw);
}
static int process_notifier(struct notifier_block *self,
unsigned long cmd, void *v)
{
@ -633,8 +681,28 @@ static int process_notifier(struct notifier_block *self,
if (!task)
return NOTIFY_OK;
rt_mutex_lock(&uid_lock);
uid = from_kuid_munged(current_user_ns(), task_uid(task));
if (!rt_mutex_trylock(&uid_lock)) {
struct update_stats_work *usw;
usw = kmalloc(sizeof(struct update_stats_work), GFP_KERNEL);
if (usw) {
INIT_WORK(&usw->work, update_stats_workfn);
usw->uid = uid;
#ifdef CONFIG_UID_SYS_STATS_DEBUG
usw->task = get_task_struct(task);
#endif
/*
* Copy task->ioac since task might be destroyed before
* the work is later performed.
*/
usw->ioac = task->ioac;
task_cputime_adjusted(task, &usw->utime, &usw->stime);
schedule_work(&usw->work);
}
return NOTIFY_OK;
}
uid_entry = find_or_register_uid(uid);
if (!uid_entry) {
pr_err("%s: failed to find uid %d\n", __func__, uid);

View File

@ -627,6 +627,28 @@ bool of_device_is_available(const struct device_node *device)
}
EXPORT_SYMBOL(of_device_is_available);
/**
* __of_device_is_fail - check if a device has status "fail" or "fail-..."
*
* @device: Node to check status for, with locks already held
*
* Return: True if the status property is set to "fail" or "fail-..." (for any
* error code suffix), false otherwise
*/
static bool __of_device_is_fail(const struct device_node *device)
{
const char *status;
if (!device)
return false;
status = __of_get_property(device, "status", NULL);
if (status == NULL)
return false;
return !strcmp(status, "fail") || !strncmp(status, "fail-", 5);
}
/**
* of_device_is_big_endian - check if a device has BE registers
*
@ -775,6 +797,9 @@ EXPORT_SYMBOL(of_get_next_available_child);
* of_get_next_cpu_node - Iterate on cpu nodes
* @prev: previous child of the /cpus node, or NULL to get first
*
* Unusable CPUs (those with the status property set to "fail" or "fail-...")
* will be skipped.
*
* Returns a cpu node pointer with refcount incremented, use of_node_put()
* on it when done. Returns NULL when prev is the last child. Decrements
* the refcount of prev.
@ -796,6 +821,8 @@ struct device_node *of_get_next_cpu_node(struct device_node *prev)
of_node_put(node);
}
for (; next; next = next->sibling) {
if (__of_device_is_fail(next))
continue;
if (!(of_node_name_eq(next, "cpu") ||
__of_node_is_type(next, "cpu")))
continue;

View File

@ -730,16 +730,6 @@ static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
}
/**
* ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
* @hba: per adapter instance
* @tag: position of the bit to be cleared
*/
static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
{
clear_bit(tag, &hba->outstanding_reqs);
}
/**
* ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
* @reg: Register value of host controller status
@ -2882,37 +2872,76 @@ ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp, int max_timeout)
{
int err = 0;
unsigned long time_left;
unsigned long time_left = msecs_to_jiffies(max_timeout);
unsigned long flags;
bool pending;
int err;
retry:
time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
msecs_to_jiffies(max_timeout));
time_left);
/* Make sure descriptors are ready before ringing the doorbell */
wmb();
spin_lock_irqsave(hba->host->host_lock, flags);
hba->dev_cmd.complete = NULL;
if (likely(time_left)) {
/*
* The completion handler called complete() and the caller of
* this function still owns the @lrbp tag so the code below does
* not trigger any race conditions.
*/
hba->dev_cmd.complete = NULL;
err = ufshcd_get_tr_ocs(lrbp);
if (!err)
err = ufshcd_dev_cmd_completion(hba, lrbp);
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
if (!time_left) {
} else {
err = -ETIMEDOUT;
dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
__func__, lrbp->task_tag);
if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0) {
/* successfully cleared the command, retry if needed */
err = -EAGAIN;
/*
* in case of an error, after clearing the doorbell,
* we also need to clear the outstanding_request
* field in hba
*/
ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
/*
* Since clearing the command succeeded we also need to
* clear the task tag bit from the outstanding_reqs
* variable.
*/
spin_lock_irqsave(hba->host->host_lock, flags);
pending = test_bit(lrbp->task_tag,
&hba->outstanding_reqs);
if (pending) {
hba->dev_cmd.complete = NULL;
__clear_bit(lrbp->task_tag,
&hba->outstanding_reqs);
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
if (!pending) {
/*
* The completion handler ran while we tried to
* clear the command.
*/
time_left = 1;
goto retry;
}
} else {
dev_err(hba->dev, "%s: failed to clear tag %d\n",
__func__, lrbp->task_tag);
spin_lock_irqsave(hba->host->host_lock, flags);
pending = test_bit(lrbp->task_tag,
&hba->outstanding_reqs);
if (pending)
hba->dev_cmd.complete = NULL;
spin_unlock_irqrestore(hba->host->host_lock, flags);
if (!pending) {
/*
* The completion handler ran while we tried to
* clear the command.
*/
time_left = 1;
goto retry;
}
}
}
return err;

View File

@ -327,6 +327,11 @@ static int dwc3_lsp_show(struct seq_file *s, void *unused)
unsigned int current_mode;
unsigned long flags;
u32 reg;
int ret;
ret = pm_runtime_resume_and_get(dwc->dev);
if (ret < 0)
return ret;
spin_lock_irqsave(&dwc->lock, flags);
reg = dwc3_readl(dwc->regs, DWC3_GSTS);
@ -345,6 +350,8 @@ static int dwc3_lsp_show(struct seq_file *s, void *unused)
}
spin_unlock_irqrestore(&dwc->lock, flags);
pm_runtime_put_sync(dwc->dev);
return 0;
}
@ -390,6 +397,11 @@ static int dwc3_mode_show(struct seq_file *s, void *unused)
struct dwc3 *dwc = s->private;
unsigned long flags;
u32 reg;
int ret;
ret = pm_runtime_resume_and_get(dwc->dev);
if (ret < 0)
return ret;
spin_lock_irqsave(&dwc->lock, flags);
reg = dwc3_readl(dwc->regs, DWC3_GCTL);
@ -409,6 +421,8 @@ static int dwc3_mode_show(struct seq_file *s, void *unused)
seq_printf(s, "UNKNOWN %08x\n", DWC3_GCTL_PRTCAP(reg));
}
pm_runtime_put_sync(dwc->dev);
return 0;
}
@ -458,6 +472,11 @@ static int dwc3_testmode_show(struct seq_file *s, void *unused)
struct dwc3 *dwc = s->private;
unsigned long flags;
u32 reg;
int ret;
ret = pm_runtime_resume_and_get(dwc->dev);
if (ret < 0)
return ret;
spin_lock_irqsave(&dwc->lock, flags);
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
@ -488,6 +507,8 @@ static int dwc3_testmode_show(struct seq_file *s, void *unused)
seq_printf(s, "UNKNOWN %d\n", reg);
}
pm_runtime_put_sync(dwc->dev);
return 0;
}
@ -504,6 +525,7 @@ static ssize_t dwc3_testmode_write(struct file *file,
unsigned long flags;
u32 testmode = 0;
char buf[32];
int ret;
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
@ -521,10 +543,16 @@ static ssize_t dwc3_testmode_write(struct file *file,
else
testmode = 0;
ret = pm_runtime_resume_and_get(dwc->dev);
if (ret < 0)
return ret;
spin_lock_irqsave(&dwc->lock, flags);
dwc3_gadget_set_test_mode(dwc, testmode);
spin_unlock_irqrestore(&dwc->lock, flags);
pm_runtime_put_sync(dwc->dev);
return count;
}
@ -543,12 +571,18 @@ static int dwc3_link_state_show(struct seq_file *s, void *unused)
enum dwc3_link_state state;
u32 reg;
u8 speed;
int ret;
ret = pm_runtime_resume_and_get(dwc->dev);
if (ret < 0)
return ret;
spin_lock_irqsave(&dwc->lock, flags);
reg = dwc3_readl(dwc->regs, DWC3_GSTS);
if (DWC3_GSTS_CURMOD(reg) != DWC3_GSTS_CURMOD_DEVICE) {
seq_puts(s, "Not available\n");
spin_unlock_irqrestore(&dwc->lock, flags);
pm_runtime_put_sync(dwc->dev);
return 0;
}
@ -561,6 +595,8 @@ static int dwc3_link_state_show(struct seq_file *s, void *unused)
dwc3_gadget_hs_link_string(state));
spin_unlock_irqrestore(&dwc->lock, flags);
pm_runtime_put_sync(dwc->dev);
return 0;
}
@ -579,6 +615,7 @@ static ssize_t dwc3_link_state_write(struct file *file,
char buf[32];
u32 reg;
u8 speed;
int ret;
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
@ -598,10 +635,15 @@ static ssize_t dwc3_link_state_write(struct file *file,
else
return -EINVAL;
ret = pm_runtime_resume_and_get(dwc->dev);
if (ret < 0)
return ret;
spin_lock_irqsave(&dwc->lock, flags);
reg = dwc3_readl(dwc->regs, DWC3_GSTS);
if (DWC3_GSTS_CURMOD(reg) != DWC3_GSTS_CURMOD_DEVICE) {
spin_unlock_irqrestore(&dwc->lock, flags);
pm_runtime_put_sync(dwc->dev);
return -EINVAL;
}
@ -611,12 +653,15 @@ static ssize_t dwc3_link_state_write(struct file *file,
if (speed < DWC3_DSTS_SUPERSPEED &&
state != DWC3_LINK_STATE_RECOV) {
spin_unlock_irqrestore(&dwc->lock, flags);
pm_runtime_put_sync(dwc->dev);
return -EINVAL;
}
dwc3_gadget_set_link_state(dwc, state);
spin_unlock_irqrestore(&dwc->lock, flags);
pm_runtime_put_sync(dwc->dev);
return count;
}
@ -640,6 +685,11 @@ static int dwc3_tx_fifo_size_show(struct seq_file *s, void *unused)
unsigned long flags;
u32 mdwidth;
u32 val;
int ret;
ret = pm_runtime_resume_and_get(dwc->dev);
if (ret < 0)
return ret;
spin_lock_irqsave(&dwc->lock, flags);
val = dwc3_core_fifo_space(dep, DWC3_TXFIFO);
@ -652,6 +702,8 @@ static int dwc3_tx_fifo_size_show(struct seq_file *s, void *unused)
seq_printf(s, "%u\n", val);
spin_unlock_irqrestore(&dwc->lock, flags);
pm_runtime_put_sync(dwc->dev);
return 0;
}
@ -662,6 +714,11 @@ static int dwc3_rx_fifo_size_show(struct seq_file *s, void *unused)
unsigned long flags;
u32 mdwidth;
u32 val;
int ret;
ret = pm_runtime_resume_and_get(dwc->dev);
if (ret < 0)
return ret;
spin_lock_irqsave(&dwc->lock, flags);
val = dwc3_core_fifo_space(dep, DWC3_RXFIFO);
@ -674,6 +731,8 @@ static int dwc3_rx_fifo_size_show(struct seq_file *s, void *unused)
seq_printf(s, "%u\n", val);
spin_unlock_irqrestore(&dwc->lock, flags);
pm_runtime_put_sync(dwc->dev);
return 0;
}
@ -683,12 +742,19 @@ static int dwc3_tx_request_queue_show(struct seq_file *s, void *unused)
struct dwc3 *dwc = dep->dwc;
unsigned long flags;
u32 val;
int ret;
ret = pm_runtime_resume_and_get(dwc->dev);
if (ret < 0)
return ret;
spin_lock_irqsave(&dwc->lock, flags);
val = dwc3_core_fifo_space(dep, DWC3_TXREQQ);
seq_printf(s, "%u\n", val);
spin_unlock_irqrestore(&dwc->lock, flags);
pm_runtime_put_sync(dwc->dev);
return 0;
}
@ -698,12 +764,19 @@ static int dwc3_rx_request_queue_show(struct seq_file *s, void *unused)
struct dwc3 *dwc = dep->dwc;
unsigned long flags;
u32 val;
int ret;
ret = pm_runtime_resume_and_get(dwc->dev);
if (ret < 0)
return ret;
spin_lock_irqsave(&dwc->lock, flags);
val = dwc3_core_fifo_space(dep, DWC3_RXREQQ);
seq_printf(s, "%u\n", val);
spin_unlock_irqrestore(&dwc->lock, flags);
pm_runtime_put_sync(dwc->dev);
return 0;
}
@ -713,12 +786,19 @@ static int dwc3_rx_info_queue_show(struct seq_file *s, void *unused)
struct dwc3 *dwc = dep->dwc;
unsigned long flags;
u32 val;
int ret;
ret = pm_runtime_resume_and_get(dwc->dev);
if (ret < 0)
return ret;
spin_lock_irqsave(&dwc->lock, flags);
val = dwc3_core_fifo_space(dep, DWC3_RXINFOQ);
seq_printf(s, "%u\n", val);
spin_unlock_irqrestore(&dwc->lock, flags);
pm_runtime_put_sync(dwc->dev);
return 0;
}
@ -728,12 +808,19 @@ static int dwc3_descriptor_fetch_queue_show(struct seq_file *s, void *unused)
struct dwc3 *dwc = dep->dwc;
unsigned long flags;
u32 val;
int ret;
ret = pm_runtime_resume_and_get(dwc->dev);
if (ret < 0)
return ret;
spin_lock_irqsave(&dwc->lock, flags);
val = dwc3_core_fifo_space(dep, DWC3_DESCFETCHQ);
seq_printf(s, "%u\n", val);
spin_unlock_irqrestore(&dwc->lock, flags);
pm_runtime_put_sync(dwc->dev);
return 0;
}
@ -743,12 +830,19 @@ static int dwc3_event_queue_show(struct seq_file *s, void *unused)
struct dwc3 *dwc = dep->dwc;
unsigned long flags;
u32 val;
int ret;
ret = pm_runtime_resume_and_get(dwc->dev);
if (ret < 0)
return ret;
spin_lock_irqsave(&dwc->lock, flags);
val = dwc3_core_fifo_space(dep, DWC3_EVENTQ);
seq_printf(s, "%u\n", val);
spin_unlock_irqrestore(&dwc->lock, flags);
pm_runtime_put_sync(dwc->dev);
return 0;
}
@ -793,6 +887,11 @@ static int dwc3_trb_ring_show(struct seq_file *s, void *unused)
struct dwc3 *dwc = dep->dwc;
unsigned long flags;
int i;
int ret;
ret = pm_runtime_resume_and_get(dwc->dev);
if (ret < 0)
return ret;
spin_lock_irqsave(&dwc->lock, flags);
if (dep->number <= 1) {
@ -822,6 +921,8 @@ static int dwc3_trb_ring_show(struct seq_file *s, void *unused)
out:
spin_unlock_irqrestore(&dwc->lock, flags);
pm_runtime_put_sync(dwc->dev);
return 0;
}
@ -834,6 +935,11 @@ static int dwc3_ep_info_register_show(struct seq_file *s, void *unused)
u32 lower_32_bits;
u32 upper_32_bits;
u32 reg;
int ret;
ret = pm_runtime_resume_and_get(dwc->dev);
if (ret < 0)
return ret;
spin_lock_irqsave(&dwc->lock, flags);
reg = DWC3_GDBGLSPMUX_EPSELECT(dep->number);
@ -846,6 +952,8 @@ static int dwc3_ep_info_register_show(struct seq_file *s, void *unused)
seq_printf(s, "0x%016llx\n", ep_info);
spin_unlock_irqrestore(&dwc->lock, flags);
pm_runtime_put_sync(dwc->dev);
return 0;
}
@ -907,6 +1015,7 @@ void dwc3_debugfs_init(struct dwc3 *dwc)
dwc->regset->regs = dwc3_regs;
dwc->regset->nregs = ARRAY_SIZE(dwc3_regs);
dwc->regset->base = dwc->regs - DWC3_GLOBALS_REGS_START;
dwc->regset->dev = dwc->dev;
root = debugfs_create_dir(dev_name(dwc->dev), usb_debug_root);
dwc->root = root;

View File

@ -1013,6 +1013,7 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
{
struct dwc3 *dwc = dep->dwc;
u32 reg;
u32 mask;
trace_dwc3_gadget_ep_disable(dep);
@ -1028,7 +1029,15 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
dep->stream_capable = false;
dep->type = 0;
dep->flags &= DWC3_EP_TXFIFO_RESIZED;
mask = DWC3_EP_TXFIFO_RESIZED;
/*
* dwc3_remove_requests() can exit early if DWC3 EP delayed stop is
* set. Do not clear DEP flags, so that the end transfer command will
* be reattempted during the next SETUP stage.
*/
if (dep->flags & DWC3_EP_DELAY_STOP)
mask |= (DWC3_EP_DELAY_STOP | DWC3_EP_TRANSFER_STARTED);
dep->flags &= mask;
/* Clear out the ep descriptors for non-ep0 */
if (dep->number > 1) {
@ -1684,6 +1693,16 @@ static int __dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool int
cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
memset(&params, 0, sizeof(params));
ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
/*
* If the End Transfer command was timed out while the device is
* not in SETUP phase, it's possible that an incoming Setup packet
* may prevent the command's completion. Let's retry when the
* ep0state returns to EP0_SETUP_PHASE.
*/
if (ret == -ETIMEDOUT && dep->dwc->ep0state != EP0_SETUP_PHASE) {
dep->flags |= DWC3_EP_DELAY_STOP;
return 0;
}
WARN_ON_ONCE(ret);
dep->resource_index = 0;
@ -1695,6 +1714,7 @@ static int __dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool int
dep->flags |= DWC3_EP_END_TRANSFER_PENDING;
}
dep->flags &= ~DWC3_EP_DELAY_STOP;
return ret;
}
@ -3702,8 +3722,10 @@ void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
if (dep->number <= 1 && dwc->ep0state != EP0_DATA_PHASE)
return;
if (interrupt && (dep->flags & DWC3_EP_DELAY_STOP))
return;
if (!(dep->flags & DWC3_EP_TRANSFER_STARTED) ||
(dep->flags & DWC3_EP_DELAY_STOP) ||
(dep->flags & DWC3_EP_END_TRANSFER_PENDING))
return;
@ -3714,7 +3736,7 @@ void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
* timeout. Delay issuing the End Transfer command until the Setup TRB is
* prepared.
*/
if (dwc->ep0state != EP0_SETUP_PHASE) {
if (dwc->ep0state != EP0_SETUP_PHASE && !dwc->delayed_status) {
dep->flags |= DWC3_EP_DELAY_STOP;
return;
}

View File

@ -1097,6 +1097,7 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
}
std_as_out_if0_desc.bInterfaceNumber = ret;
std_as_out_if1_desc.bInterfaceNumber = ret;
std_as_out_if1_desc.bNumEndpoints = 1;
uac2->as_out_intf = ret;
uac2->as_out_alt = 0;

View File

@ -730,10 +730,11 @@ int usb_gadget_disconnect(struct usb_gadget *gadget)
}
ret = gadget->ops->pullup(gadget, 0);
if (!ret) {
if (!ret)
gadget->connected = 0;
if (gadget->udc->driver)
gadget->udc->driver->disconnect(gadget);
}
out:
trace_usb_gadget_disconnect(gadget, ret);

View File

@ -80,20 +80,16 @@ DECLARE_EVENT_CLASS(xhci_log_ctx,
__field(dma_addr_t, ctx_dma)
__field(u8 *, ctx_va)
__field(unsigned, ctx_ep_num)
__field(int, slot_id)
__dynamic_array(u32, ctx_data,
((HCC_64BYTE_CONTEXT(xhci->hcc_params) + 1) * 8) *
((ctx->type == XHCI_CTX_TYPE_INPUT) + ep_num + 1))
),
TP_fast_assign(
struct usb_device *udev;
udev = to_usb_device(xhci_to_hcd(xhci)->self.controller);
__entry->ctx_64 = HCC_64BYTE_CONTEXT(xhci->hcc_params);
__entry->ctx_type = ctx->type;
__entry->ctx_dma = ctx->dma;
__entry->ctx_va = ctx->bytes;
__entry->slot_id = udev->slot_id;
__entry->ctx_ep_num = ep_num;
memcpy(__get_dynamic_array(ctx_data), ctx->bytes,
((HCC_64BYTE_CONTEXT(xhci->hcc_params) + 1) * 32) *

View File

@ -1984,6 +1984,60 @@ typec_port_register_altmode(struct typec_port *port,
}
EXPORT_SYMBOL_GPL(typec_port_register_altmode);
void typec_port_register_altmodes(struct typec_port *port,
const struct typec_altmode_ops *ops, void *drvdata,
struct typec_altmode **altmodes, size_t n)
{
struct fwnode_handle *altmodes_node, *child;
struct typec_altmode_desc desc;
struct typec_altmode *alt;
size_t index = 0;
u32 svid, vdo;
int ret;
altmodes_node = device_get_named_child_node(&port->dev, "altmodes");
if (!altmodes_node)
return; /* No altmodes specified */
fwnode_for_each_child_node(altmodes_node, child) {
ret = fwnode_property_read_u32(child, "svid", &svid);
if (ret) {
dev_err(&port->dev, "Error reading svid for altmode %s\n",
fwnode_get_name(child));
continue;
}
ret = fwnode_property_read_u32(child, "vdo", &vdo);
if (ret) {
dev_err(&port->dev, "Error reading vdo for altmode %s\n",
fwnode_get_name(child));
continue;
}
if (index >= n) {
dev_err(&port->dev, "Error not enough space for altmode %s\n",
fwnode_get_name(child));
continue;
}
desc.svid = svid;
desc.vdo = vdo;
desc.mode = index + 1;
alt = typec_port_register_altmode(port, &desc);
if (IS_ERR(alt)) {
dev_err(&port->dev, "Error registering altmode %s\n",
fwnode_get_name(child));
continue;
}
alt->ops = ops;
typec_altmode_set_drvdata(alt, drvdata);
altmodes[index] = alt;
index++;
}
}
EXPORT_SYMBOL_GPL(typec_port_register_altmodes);
/**
* typec_register_port - Register a USB Type-C Port
* @parent: Parent device

View File

@ -6521,6 +6521,10 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
goto out_role_sw_put;
}
typec_port_register_altmodes(port->typec_port,
&tcpm_altmode_ops, port,
port->port_altmode, ALTMODE_DISCOVERY_MAX);
mutex_lock(&port->lock);
tcpm_init(port);
mutex_unlock(&port->lock);

View File

@ -76,3 +76,20 @@ config EROFS_FS_ZIP
If you don't want to enable compression feature, say N.
config EROFS_FS_PCPU_KTHREAD
bool "EROFS per-cpu decompression kthread workers"
depends on EROFS_FS_ZIP
help
Saying Y here enables per-CPU kthread workers pool to carry out
async decompression for low latencies on some architectures.
If unsure, say N.
config EROFS_FS_PCPU_KTHREAD_HIPRI
bool "EROFS high priority per-CPU kthread workers"
depends on EROFS_FS_ZIP && EROFS_FS_PCPU_KTHREAD
help
This permits EROFS to configure per-CPU kthread workers to run
at higher priority.
If unsure, say N.

View File

@ -7,7 +7,7 @@
#include "zdata.h"
#include "compress.h"
#include <linux/prefetch.h>
#include <linux/cpuhotplug.h>
#include <trace/events/erofs.h>
/*
@ -125,24 +125,128 @@ typedef tagptr1_t compressed_page_t;
static struct workqueue_struct *z_erofs_workqueue __read_mostly;
void z_erofs_exit_zip_subsystem(void)
#ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
static struct kthread_worker __rcu **z_erofs_pcpu_workers;
static void erofs_destroy_percpu_workers(void)
{
destroy_workqueue(z_erofs_workqueue);
z_erofs_destroy_pcluster_pool();
struct kthread_worker *worker;
unsigned int cpu;
for_each_possible_cpu(cpu) {
worker = rcu_dereference_protected(
z_erofs_pcpu_workers[cpu], 1);
rcu_assign_pointer(z_erofs_pcpu_workers[cpu], NULL);
if (worker)
kthread_destroy_worker(worker);
}
kfree(z_erofs_pcpu_workers);
}
static inline int z_erofs_init_workqueue(void)
static struct kthread_worker *erofs_init_percpu_worker(int cpu)
{
const unsigned int onlinecpus = num_possible_cpus();
struct kthread_worker *worker =
kthread_create_worker_on_cpu(cpu, 0, "erofs_worker/%u", cpu);
/*
* no need to spawn too many threads, limiting threads could minimum
* scheduling overhead, perhaps per-CPU threads should be better?
*/
z_erofs_workqueue = alloc_workqueue("erofs_unzipd",
WQ_UNBOUND | WQ_HIGHPRI,
onlinecpus + onlinecpus / 4);
return z_erofs_workqueue ? 0 : -ENOMEM;
if (IS_ERR(worker))
return worker;
if (IS_ENABLED(CONFIG_EROFS_FS_PCPU_KTHREAD_HIPRI))
sched_set_fifo_low(worker->task);
else
sched_set_normal(worker->task, 0);
return worker;
}
static int erofs_init_percpu_workers(void)
{
struct kthread_worker *worker;
unsigned int cpu;
z_erofs_pcpu_workers = kcalloc(num_possible_cpus(),
sizeof(struct kthread_worker *), GFP_ATOMIC);
if (!z_erofs_pcpu_workers)
return -ENOMEM;
for_each_online_cpu(cpu) { /* could miss cpu{off,on}line? */
worker = erofs_init_percpu_worker(cpu);
if (!IS_ERR(worker))
rcu_assign_pointer(z_erofs_pcpu_workers[cpu], worker);
}
return 0;
}
#else
static inline void erofs_destroy_percpu_workers(void) {}
static inline int erofs_init_percpu_workers(void) { return 0; }
#endif
#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_EROFS_FS_PCPU_KTHREAD)
static DEFINE_SPINLOCK(z_erofs_pcpu_worker_lock);
static enum cpuhp_state erofs_cpuhp_state;
static int erofs_cpu_online(unsigned int cpu)
{
struct kthread_worker *worker, *old;
worker = erofs_init_percpu_worker(cpu);
if (IS_ERR(worker))
return PTR_ERR(worker);
spin_lock(&z_erofs_pcpu_worker_lock);
old = rcu_dereference_protected(z_erofs_pcpu_workers[cpu],
lockdep_is_held(&z_erofs_pcpu_worker_lock));
if (!old)
rcu_assign_pointer(z_erofs_pcpu_workers[cpu], worker);
spin_unlock(&z_erofs_pcpu_worker_lock);
if (old)
kthread_destroy_worker(worker);
return 0;
}
static int erofs_cpu_offline(unsigned int cpu)
{
struct kthread_worker *worker;
spin_lock(&z_erofs_pcpu_worker_lock);
worker = rcu_dereference_protected(z_erofs_pcpu_workers[cpu],
lockdep_is_held(&z_erofs_pcpu_worker_lock));
rcu_assign_pointer(z_erofs_pcpu_workers[cpu], NULL);
spin_unlock(&z_erofs_pcpu_worker_lock);
synchronize_rcu();
if (worker)
kthread_destroy_worker(worker);
return 0;
}
static int erofs_cpu_hotplug_init(void)
{
int state;
state = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
"fs/erofs:online", erofs_cpu_online, erofs_cpu_offline);
if (state < 0)
return state;
erofs_cpuhp_state = state;
return 0;
}
static void erofs_cpu_hotplug_destroy(void)
{
if (erofs_cpuhp_state)
cpuhp_remove_state_nocalls(erofs_cpuhp_state);
}
#else /* !CONFIG_HOTPLUG_CPU || !CONFIG_EROFS_FS_PCPU_KTHREAD */
static inline int erofs_cpu_hotplug_init(void) { return 0; }
static inline void erofs_cpu_hotplug_destroy(void) {}
#endif
void z_erofs_exit_zip_subsystem(void)
{
erofs_cpu_hotplug_destroy();
erofs_destroy_percpu_workers();
destroy_workqueue(z_erofs_workqueue);
z_erofs_destroy_pcluster_pool();
}
int __init z_erofs_init_zip_subsystem(void)
@ -150,10 +254,31 @@ int __init z_erofs_init_zip_subsystem(void)
int err = z_erofs_create_pcluster_pool();
if (err)
return err;
err = z_erofs_init_workqueue();
goto out_error_pcluster_pool;
z_erofs_workqueue = alloc_workqueue("erofs_worker",
WQ_UNBOUND | WQ_HIGHPRI, num_possible_cpus());
if (!z_erofs_workqueue) {
err = -ENOMEM;
goto out_error_workqueue_init;
}
err = erofs_init_percpu_workers();
if (err)
z_erofs_destroy_pcluster_pool();
goto out_error_pcpu_worker;
err = erofs_cpu_hotplug_init();
if (err < 0)
goto out_error_cpuhp_init;
return err;
out_error_cpuhp_init:
erofs_destroy_percpu_workers();
out_error_pcpu_worker:
destroy_workqueue(z_erofs_workqueue);
out_error_workqueue_init:
z_erofs_destroy_pcluster_pool();
out_error_pcluster_pool:
return err;
}
@ -782,6 +907,12 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
}
static void z_erofs_decompressqueue_work(struct work_struct *work);
#ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
static void z_erofs_decompressqueue_kthread_work(struct kthread_work *work)
{
z_erofs_decompressqueue_work((struct work_struct *)work);
}
#endif
static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
bool sync, int bios)
{
@ -799,7 +930,22 @@ static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
return;
/* Use workqueue and sync decompression for atomic contexts only */
if (in_atomic() || irqs_disabled()) {
#ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
struct kthread_worker *worker;
rcu_read_lock();
worker = rcu_dereference(
z_erofs_pcpu_workers[raw_smp_processor_id()]);
if (!worker) {
INIT_WORK(&io->u.work, z_erofs_decompressqueue_work);
queue_work(z_erofs_workqueue, &io->u.work);
} else {
kthread_queue_work(worker, &io->u.kthread_work);
}
rcu_read_unlock();
#else
queue_work(z_erofs_workqueue, &io->u.work);
#endif
sbi->ctx.readahead_sync_decompress = true;
return;
}
@ -1207,7 +1353,12 @@ jobqueue_init(struct super_block *sb,
*fg = true;
goto fg_out;
}
#ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
kthread_init_work(&q->u.kthread_work,
z_erofs_decompressqueue_kthread_work);
#else
INIT_WORK(&q->u.work, z_erofs_decompressqueue_work);
#endif
} else {
fg_out:
q = fgq;
@ -1348,7 +1499,7 @@ static void z_erofs_submit_queue(struct super_block *sb,
/*
* although background is preferred, no one is pending for submission.
* don't issue workqueue for decompression but drop it directly instead.
* don't issue decompression but drop it directly instead.
*/
if (!*force_fg && !nr_bios) {
kvfree(q[JQ_SUBMIT]);

View File

@ -7,6 +7,7 @@
#ifndef __EROFS_FS_ZDATA_H
#define __EROFS_FS_ZDATA_H
#include <linux/kthread.h>
#include "internal.h"
#include "zpvec.h"
@ -92,6 +93,7 @@ struct z_erofs_decompressqueue {
union {
struct completion done;
struct work_struct work;
struct kthread_work kthread_work;
} u;
};

View File

@ -207,7 +207,7 @@ static int ext4_read_inline_data(struct inode *inode, void *buffer,
/*
* write the buffer to the inline inode.
* If 'create' is set, we don't need to do the extra copy in the xattr
* value since it is already handled by ext4_xattr_ibody_inline_set.
* value since it is already handled by ext4_xattr_ibody_set.
* That saves us one memcpy.
*/
static void ext4_write_inline_data(struct inode *inode, struct ext4_iloc *iloc,
@ -289,7 +289,7 @@ static int ext4_create_inline_data(handle_t *handle,
BUG_ON(!is.s.not_found);
error = ext4_xattr_ibody_inline_set(handle, inode, &i, &is);
error = ext4_xattr_ibody_set(handle, inode, &i, &is);
if (error) {
if (error == -ENOSPC)
ext4_clear_inode_state(inode,
@ -361,7 +361,7 @@ static int ext4_update_inline_data(handle_t *handle, struct inode *inode,
i.value = value;
i.value_len = len;
error = ext4_xattr_ibody_inline_set(handle, inode, &i, &is);
error = ext4_xattr_ibody_set(handle, inode, &i, &is);
if (error)
goto out;
@ -434,7 +434,7 @@ static int ext4_destroy_inline_data_nolock(handle_t *handle,
if (error)
goto out;
error = ext4_xattr_ibody_inline_set(handle, inode, &i, &is);
error = ext4_xattr_ibody_set(handle, inode, &i, &is);
if (error)
goto out;
@ -1949,8 +1949,7 @@ int ext4_inline_data_truncate(struct inode *inode, int *has_inline)
i.value = value;
i.value_len = i_size > EXT4_MIN_INLINE_DATA_SIZE ?
i_size - EXT4_MIN_INLINE_DATA_SIZE : 0;
err = ext4_xattr_ibody_inline_set(handle, inode,
&i, &is);
err = ext4_xattr_ibody_set(handle, inode, &i, &is);
if (err)
goto out_error;
}

View File

@ -2831,11 +2831,9 @@ static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group,
crc = crc16(crc, (__u8 *)gdp, offset);
offset += sizeof(gdp->bg_checksum); /* skip checksum */
/* for checksum of struct ext4_group_desc do the rest...*/
if (ext4_has_feature_64bit(sb) &&
offset < le16_to_cpu(sbi->s_es->s_desc_size))
if (ext4_has_feature_64bit(sb) && offset < sbi->s_desc_size)
crc = crc16(crc, (__u8 *)gdp + offset,
le16_to_cpu(sbi->s_es->s_desc_size) -
offset);
sbi->s_desc_size - offset);
out:
return cpu_to_le16(crc);

View File

@ -2215,7 +2215,7 @@ int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
return 0;
}
int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode,
int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
struct ext4_xattr_info *i,
struct ext4_xattr_ibody_find *is)
{
@ -2240,30 +2240,6 @@ int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode,
return 0;
}
static int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
struct ext4_xattr_info *i,
struct ext4_xattr_ibody_find *is)
{
struct ext4_xattr_ibody_header *header;
struct ext4_xattr_search *s = &is->s;
int error;
if (EXT4_I(inode)->i_extra_isize == 0)
return -ENOSPC;
error = ext4_xattr_set_entry(i, s, handle, inode, false /* is_block */);
if (error)
return error;
header = IHDR(inode, ext4_raw_inode(&is->iloc));
if (!IS_LAST_ENTRY(s->first)) {
header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
ext4_set_inode_state(inode, EXT4_STATE_XATTR);
} else {
header->h_magic = cpu_to_le32(0);
ext4_clear_inode_state(inode, EXT4_STATE_XATTR);
}
return 0;
}
static int ext4_xattr_value_same(struct ext4_xattr_search *s,
struct ext4_xattr_info *i)
{
@ -2578,6 +2554,7 @@ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode,
.in_inode = !!entry->e_value_inum,
};
struct ext4_xattr_ibody_header *header = IHDR(inode, raw_inode);
int needs_kvfree = 0;
int error;
is = kzalloc(sizeof(struct ext4_xattr_ibody_find), GFP_NOFS);
@ -2600,7 +2577,7 @@ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode,
error = -ENOMEM;
goto out;
}
needs_kvfree = 1;
error = ext4_xattr_inode_get(inode, entry, buffer, value_size);
if (error)
goto out;
@ -2639,7 +2616,7 @@ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode,
out:
kfree(b_entry_name);
if (entry->e_value_inum && buffer)
if (needs_kvfree && buffer)
kvfree(buffer);
if (is)
brelse(is->iloc.bh);

View File

@ -200,9 +200,9 @@ extern int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
extern int ext4_xattr_ibody_get(struct inode *inode, int name_index,
const char *name,
void *buffer, size_t buffer_size);
extern int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode,
struct ext4_xattr_info *i,
struct ext4_xattr_ibody_find *is);
extern int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
struct ext4_xattr_info *i,
struct ext4_xattr_ibody_find *is);
extern struct mb_cache *ext4_xattr_create_cache(void);
extern void ext4_xattr_destroy_cache(struct mb_cache *);

View File

@ -469,12 +469,14 @@ static int stat_show(struct seq_file *s, void *v)
si->node_segs, si->bg_node_segs);
seq_printf(s, " - Reclaimed segs : Normal (%d), Idle CB (%d), "
"Idle Greedy (%d), Idle AT (%d), "
"Urgent High (%d), Urgent Low (%d)\n",
"Urgent High (%d), Urgent Mid (%d), "
"Urgent Low (%d)\n",
si->sbi->gc_reclaimed_segs[GC_NORMAL],
si->sbi->gc_reclaimed_segs[GC_IDLE_CB],
si->sbi->gc_reclaimed_segs[GC_IDLE_GREEDY],
si->sbi->gc_reclaimed_segs[GC_IDLE_AT],
si->sbi->gc_reclaimed_segs[GC_URGENT_HIGH],
si->sbi->gc_reclaimed_segs[GC_URGENT_MID],
si->sbi->gc_reclaimed_segs[GC_URGENT_LOW]);
seq_printf(s, "Try to move %d blocks (BG: %d)\n", si->tot_blks,
si->bg_data_blks + si->bg_node_blks);

View File

@ -1286,6 +1286,7 @@ enum {
GC_IDLE_AT,
GC_URGENT_HIGH,
GC_URGENT_LOW,
GC_URGENT_MID,
MAX_GC_MODE,
};
@ -2754,6 +2755,9 @@ static inline bool is_idle(struct f2fs_sb_info *sbi, int type)
if (is_inflight_io(sbi, type))
return false;
if (sbi->gc_mode == GC_URGENT_MID)
return true;
if (sbi->gc_mode == GC_URGENT_LOW &&
(type == DISCARD_TIME || type == GC_TIME))
return true;

View File

@ -90,7 +90,8 @@ static int gc_thread_func(void *data)
* invalidated soon after by user update or deletion.
* So, I'd like to wait some time to collect dirty segments.
*/
if (sbi->gc_mode == GC_URGENT_HIGH) {
if (sbi->gc_mode == GC_URGENT_HIGH ||
sbi->gc_mode == GC_URGENT_MID) {
wait_ms = gc_th->urgent_sleep_time;
f2fs_down_write(&sbi->gc_lock);
goto do_gc;
@ -627,6 +628,54 @@ static void release_victim_entry(struct f2fs_sb_info *sbi)
f2fs_bug_on(sbi, !list_empty(&am->victim_list));
}
static bool f2fs_pin_section(struct f2fs_sb_info *sbi, unsigned int segno)
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
if (!dirty_i->enable_pin_section)
return false;
if (!test_and_set_bit(secno, dirty_i->pinned_secmap))
dirty_i->pinned_secmap_cnt++;
return true;
}
static bool f2fs_pinned_section_exists(struct dirty_seglist_info *dirty_i)
{
return dirty_i->pinned_secmap_cnt;
}
static bool f2fs_section_is_pinned(struct dirty_seglist_info *dirty_i,
unsigned int secno)
{
return dirty_i->enable_pin_section &&
f2fs_pinned_section_exists(dirty_i) &&
test_bit(secno, dirty_i->pinned_secmap);
}
static void f2fs_unpin_all_sections(struct f2fs_sb_info *sbi, bool enable)
{
unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
if (f2fs_pinned_section_exists(DIRTY_I(sbi))) {
memset(DIRTY_I(sbi)->pinned_secmap, 0, bitmap_size);
DIRTY_I(sbi)->pinned_secmap_cnt = 0;
}
DIRTY_I(sbi)->enable_pin_section = enable;
}
static int f2fs_gc_pinned_control(struct inode *inode, int gc_type,
unsigned int segno)
{
if (!f2fs_is_pinned_file(inode))
return 0;
if (gc_type != FG_GC)
return -EBUSY;
if (!f2fs_pin_section(F2FS_I_SB(inode), segno))
f2fs_pin_file_control(inode, true);
return -EAGAIN;
}
/*
* This function is called from two paths.
* One is garbage collection and the other is SSR segment selection.
@ -768,6 +817,9 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
goto next;
if (gc_type == FG_GC && f2fs_section_is_pinned(dirty_i, secno))
goto next;
if (is_atgc) {
add_victim_entry(sbi, &p, segno);
goto next;
@ -1196,12 +1248,9 @@ static int move_data_block(struct inode *inode, block_t bidx,
goto out;
}
if (f2fs_is_pinned_file(inode)) {
if (gc_type == FG_GC)
f2fs_pin_file_control(inode, true);
err = -EAGAIN;
err = f2fs_gc_pinned_control(inode, gc_type, segno);
if (err)
goto out;
}
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
@ -1346,12 +1395,9 @@ static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
err = -EAGAIN;
goto out;
}
if (f2fs_is_pinned_file(inode)) {
if (gc_type == FG_GC)
f2fs_pin_file_control(inode, true);
err = -EAGAIN;
err = f2fs_gc_pinned_control(inode, gc_type, segno);
if (err)
goto out;
}
if (gc_type == BG_GC) {
if (PageWriteback(page)) {
@ -1472,10 +1518,18 @@ static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
ofs_in_node = le16_to_cpu(entry->ofs_in_node);
if (phase == 3) {
int err;
inode = f2fs_iget(sb, dni.ino);
if (IS_ERR(inode) || is_bad_inode(inode))
continue;
err = f2fs_gc_pinned_control(inode, gc_type, segno);
if (err == -EAGAIN) {
iput(inode);
return submitted;
}
if (!f2fs_down_write_trylock(
&F2FS_I(inode)->i_gc_rwsem[WRITE])) {
iput(inode);
@ -1756,9 +1810,17 @@ int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
ret = -EINVAL;
goto stop;
}
retry:
ret = __get_victim(sbi, &segno, gc_type);
if (ret)
if (ret) {
/* allow to search victim from sections has pinned data */
if (ret == -ENODATA && gc_type == FG_GC &&
f2fs_pinned_section_exists(DIRTY_I(sbi))) {
f2fs_unpin_all_sections(sbi, false);
goto retry;
}
goto stop;
}
seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type, force);
if (gc_type == FG_GC &&
@ -1809,6 +1871,9 @@ int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
SIT_I(sbi)->last_victim[FLUSH_DEVICE] = init_segno;
if (gc_type == FG_GC)
f2fs_unpin_all_sections(sbi, true);
trace_f2fs_gc_end(sbi->sb, ret, total_freed, sec_freed,
get_pages(sbi, F2FS_DIRTY_NODES),
get_pages(sbi, F2FS_DIRTY_DENTS),

View File

@ -4713,6 +4713,13 @@ static int init_victim_secmap(struct f2fs_sb_info *sbi)
dirty_i->victim_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
if (!dirty_i->victim_secmap)
return -ENOMEM;
dirty_i->pinned_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
if (!dirty_i->pinned_secmap)
return -ENOMEM;
dirty_i->pinned_secmap_cnt = 0;
dirty_i->enable_pin_section = true;
return 0;
}
@ -5301,6 +5308,7 @@ static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
kvfree(dirty_i->pinned_secmap);
kvfree(dirty_i->victim_secmap);
}

View File

@ -295,6 +295,9 @@ struct dirty_seglist_info {
struct mutex seglist_lock; /* lock for segment bitmaps */
int nr_dirty[NR_DIRTY_TYPE]; /* # of dirty segments */
unsigned long *victim_secmap; /* background GC victims */
unsigned long *pinned_secmap; /* pinned victims from foreground GC */
unsigned int pinned_secmap_cnt; /* count of victims which has pinned data */
bool enable_pin_section; /* enable pinning section */
};
/* victim selection function for cleaning and SSR */

View File

@ -466,6 +466,13 @@ static ssize_t __sbi_store(struct f2fs_attr *a,
}
} else if (t == 2) {
sbi->gc_mode = GC_URGENT_LOW;
} else if (t == 3) {
sbi->gc_mode = GC_URGENT_MID;
if (sbi->gc_thread) {
sbi->gc_thread->gc_wake = 1;
wake_up_interruptible_all(
&sbi->gc_thread->gc_wait_queue_head);
}
} else {
return -EINVAL;
}

View File

@ -34,12 +34,14 @@ DECLARE_FEATURE_FLAG(corefs);
DECLARE_FEATURE_FLAG(zstd);
DECLARE_FEATURE_FLAG(v2);
DECLARE_FEATURE_FLAG(bugfix_throttling);
DECLARE_FEATURE_FLAG(bugfix_inode_eviction);
static struct attribute *attributes[] = {
&corefs_attr.attr,
&zstd_attr.attr,
&v2_attr.attr,
&bugfix_throttling_attr.attr,
&bugfix_inode_eviction_attr.attr,
NULL,
};

View File

@ -1928,6 +1928,13 @@ void incfs_kill_sb(struct super_block *sb)
pr_debug("incfs: unmount\n");
/*
* We must kill the super before freeing mi, since killing the super
* triggers inode eviction, which triggers the final update of the
* backing file, which uses certain information for mi
*/
kill_anon_super(sb);
if (mi) {
if (mi->mi_backing_dir_path.dentry)
dinode = d_inode(mi->mi_backing_dir_path.dentry);
@ -1943,7 +1950,6 @@ void incfs_kill_sb(struct super_block *sb)
incfs_free_mount_info(mi);
sb->s_fs_info = NULL;
}
kill_anon_super(sb);
}
static int show_options(struct seq_file *m, struct dentry *root)

View File

@ -104,8 +104,8 @@ int blk_crypto_init_key(struct blk_crypto_key *blk_key,
int blk_crypto_start_using_key(const struct blk_crypto_key *key,
struct request_queue *q);
int blk_crypto_evict_key(struct request_queue *q,
const struct blk_crypto_key *key);
void blk_crypto_evict_key(struct request_queue *q,
const struct blk_crypto_key *key);
bool blk_crypto_config_supported(struct request_queue *q,
const struct blk_crypto_config *cfg);

View File

@ -73,12 +73,6 @@ static inline int eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n,
return -ENOSYS;
}
static inline int eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n,
unsigned mask)
{
return -ENOSYS;
}
static inline void eventfd_ctx_put(struct eventfd_ctx *ctx)
{

View File

@ -42,13 +42,7 @@ struct anon_vma {
*/
atomic_t refcount;
/*
* Count of child anon_vmas and VMAs which points to this anon_vma.
*
* This counter is used for making decision about reusing anon_vma
* instead of forking new one. See comments in function anon_vma_clone.
*/
unsigned degree;
unsigned degree; /* ANDROID: KABI preservation, DO NOT USE! */
struct anon_vma *parent; /* Parent of this anon_vma */
@ -63,6 +57,25 @@ struct anon_vma {
/* Interval tree of private "related" vmas */
struct rb_root_cached rb_root;
/*
* ANDROID: KABI preservation, it's safe to put these at the end of this structure as it's
* only passed by a pointer everywhere, the size and internal structures are local to the
* core kernel.
*/
#ifndef __GENKSYMS__
/*
* Count of child anon_vmas. Equals to the count of all anon_vmas that
* have ->parent pointing to this one, including itself.
*
* This counter is used for making decision about reusing anon_vma
* instead of forking new one. See comments in function anon_vma_clone.
*/
unsigned long num_children;
/* Count of VMAs whose ->anon_vma pointer points to this object. */
unsigned long num_active_vmas;
#endif
};
/*

View File

@ -18,6 +18,7 @@ struct typec_partner;
struct typec_cable;
struct typec_plug;
struct typec_port;
struct typec_altmode_ops;
struct fwnode_handle;
struct device;
@ -139,6 +140,11 @@ struct typec_altmode
struct typec_altmode
*typec_port_register_altmode(struct typec_port *port,
const struct typec_altmode_desc *desc);
void typec_port_register_altmodes(struct typec_port *port,
const struct typec_altmode_ops *ops, void *drvdata,
struct typec_altmode **altmodes, size_t n);
void typec_unregister_altmode(struct typec_altmode *altmode);
struct typec_port *typec_altmode2port(struct typec_altmode *alt);

View File

@ -126,6 +126,14 @@ DECLARE_HOOK(android_vh_save_track_hash,
DECLARE_HOOK(android_vh_vmpressure,
TP_PROTO(struct mem_cgroup *memcg, bool *bypass),
TP_ARGS(memcg, bypass));
DECLARE_HOOK(android_vh_dm_bufio_shrink_scan_bypass,
TP_PROTO(unsigned long dm_bufio_current_allocated, bool *bypass),
TP_ARGS(dm_bufio_current_allocated, bypass));
DECLARE_HOOK(android_vh_cleanup_old_buffers_bypass,
TP_PROTO(unsigned long dm_bufio_current_allocated,
unsigned long *max_age_hz,
bool *bypass),
TP_ARGS(dm_bufio_current_allocated, max_age_hz, bypass));
DECLARE_HOOK(android_vh_mem_cgroup_alloc,
TP_PROTO(struct mem_cgroup *memcg),
TP_ARGS(memcg));

View File

@ -785,6 +785,7 @@ static ssize_t dbgfs_mk_context_write(struct file *file,
static int dbgfs_rm_context(char *name)
{
struct dentry *root, *dir, **new_dirs;
struct inode *inode;
struct damon_ctx **new_ctxs;
int i, j;
int ret = 0;
@ -800,6 +801,12 @@ static int dbgfs_rm_context(char *name)
if (!dir)
return -ENOENT;
inode = d_inode(dir);
if (!S_ISDIR(inode->i_mode)) {
ret = -EINVAL;
goto out_dput;
}
new_dirs = kmalloc_array(dbgfs_nr_ctxs - 1, sizeof(*dbgfs_dirs),
GFP_KERNEL);
if (!new_dirs) {

View File

@ -78,6 +78,9 @@ DEFINE_SPINLOCK(hugetlb_lock);
static int num_fault_mutexes;
struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
static inline bool PageHugeFreed(struct page *head)
{
return page_private(head + 4) == -1UL;
@ -3698,6 +3701,25 @@ static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
{
if (addr & ~(huge_page_mask(hstate_vma(vma))))
return -EINVAL;
/*
* PMD sharing is only possible for PUD_SIZE-aligned address ranges
* in HugeTLB VMAs. If we will lose PUD_SIZE alignment due to this
* split, unshare PMDs in the PUD_SIZE interval surrounding addr now.
*/
if (addr & ~PUD_MASK) {
/*
* hugetlb_vm_op_split is called right before we attempt to
* split the VMA. We will need to unshare PMDs in the old and
* new VMAs, so let's unshare before we split.
*/
unsigned long floor = addr & PUD_MASK;
unsigned long ceil = floor + PUD_SIZE;
if (floor >= vma->vm_start && ceil <= vma->vm_end)
hugetlb_unshare_pmds(vma, floor, ceil);
}
return 0;
}
@ -5756,26 +5778,21 @@ void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
}
}
/*
* This function will unconditionally remove all the shared pmd pgtable entries
* within the specific vma for a hugetlbfs memory range.
*/
void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
unsigned long start,
unsigned long end)
{
struct hstate *h = hstate_vma(vma);
unsigned long sz = huge_page_size(h);
struct mm_struct *mm = vma->vm_mm;
struct mmu_notifier_range range;
unsigned long address, start, end;
unsigned long address;
spinlock_t *ptl;
pte_t *ptep;
if (!(vma->vm_flags & VM_MAYSHARE))
return;
start = ALIGN(vma->vm_start, PUD_SIZE);
end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
if (start >= end)
return;
@ -5808,6 +5825,16 @@ void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
mmu_notifier_invalidate_range_end(&range);
}
/*
* This function will unconditionally remove all the shared pmd pgtable entries
* within the specific vma for a hugetlbfs memory range.
*/
void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
{
hugetlb_unshare_pmds(vma, ALIGN(vma->vm_start, PUD_SIZE),
ALIGN_DOWN(vma->vm_end, PUD_SIZE));
}
#ifdef CONFIG_CMA
static bool cma_reserve_called __initdata;

View File

@ -4210,7 +4210,7 @@ static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
{
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
if (val > 100)
if (val > 200)
return -EINVAL;
if (css->parent)

View File

@ -148,7 +148,7 @@ struct page_ext *page_ext_get(struct page *page)
/**
* page_ext_put() - Working with page extended information is done.
* @page_ext - Page extended information received from page_ext_get().
* @page_ext: Page extended information received from page_ext_get().
*
* The page extended information of the page may not be valid after this
* function is called.

View File

@ -91,7 +91,8 @@ static inline struct anon_vma *anon_vma_alloc(void)
anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
if (anon_vma) {
atomic_set(&anon_vma->refcount, 1);
anon_vma->degree = 1; /* Reference for first vma */
anon_vma->num_children = 0;
anon_vma->num_active_vmas = 0;
anon_vma->parent = anon_vma;
/*
* Initialise the anon_vma root to point to itself. If called
@ -199,6 +200,7 @@ int __anon_vma_prepare(struct vm_area_struct *vma)
anon_vma = anon_vma_alloc();
if (unlikely(!anon_vma))
goto out_enomem_free_avc;
anon_vma->num_children++; /* self-parent link for new root */
allocated = anon_vma;
}
@ -208,8 +210,7 @@ int __anon_vma_prepare(struct vm_area_struct *vma)
if (likely(!vma->anon_vma)) {
vma->anon_vma = anon_vma;
anon_vma_chain_link(vma, avc, anon_vma);
/* vma reference or self-parent link for new root */
anon_vma->degree++;
anon_vma->num_active_vmas++;
allocated = NULL;
avc = NULL;
}
@ -294,19 +295,19 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
anon_vma_chain_link(dst, avc, anon_vma);
/*
* Reuse existing anon_vma if its degree lower than two,
* that means it has no vma and only one anon_vma child.
* Reuse existing anon_vma if it has no vma and only one
* anon_vma child.
*
* Do not chose parent anon_vma, otherwise first child
* will always reuse it. Root anon_vma is never reused:
* Root anon_vma is never reused:
* it has self-parent reference and at least one child.
*/
if (!dst->anon_vma && src->anon_vma &&
anon_vma != src->anon_vma && anon_vma->degree < 2)
anon_vma->num_children < 2 &&
anon_vma->num_active_vmas == 0)
dst->anon_vma = anon_vma;
}
if (dst->anon_vma)
dst->anon_vma->degree++;
dst->anon_vma->num_active_vmas++;
unlock_anon_vma_root(root);
return 0;
@ -356,6 +357,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
anon_vma = anon_vma_alloc();
if (!anon_vma)
goto out_error;
anon_vma->num_active_vmas++;
avc = anon_vma_chain_alloc(GFP_KERNEL);
if (!avc)
goto out_error_free_anon_vma;
@ -376,7 +378,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
vma->anon_vma = anon_vma;
anon_vma_lock_write(anon_vma);
anon_vma_chain_link(vma, avc, anon_vma);
anon_vma->parent->degree++;
anon_vma->parent->num_children++;
anon_vma_unlock_write(anon_vma);
return 0;
@ -408,7 +410,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
* to free them outside the lock.
*/
if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) {
anon_vma->parent->degree--;
anon_vma->parent->num_children--;
continue;
}
@ -416,7 +418,8 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
anon_vma_chain_free(avc);
}
if (vma->anon_vma)
vma->anon_vma->degree--;
vma->anon_vma->num_active_vmas--;
unlock_anon_vma_root(root);
/*
@ -427,7 +430,8 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
struct anon_vma *anon_vma = avc->anon_vma;
VM_WARN_ON(anon_vma->degree);
VM_WARN_ON(anon_vma->num_children);
VM_WARN_ON(anon_vma->num_active_vmas);
put_anon_vma(anon_vma);
list_del(&avc->same_vma);

View File

@ -63,7 +63,7 @@ int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
pte_t _dst_pte, *dst_pte;
bool writable = dst_vma->vm_flags & VM_WRITE;
bool vm_shared = dst_vma->vm_flags & VM_SHARED;
bool page_in_cache = page->mapping;
bool page_in_cache = page_mapping(page);
spinlock_t *ptl;
struct inode *inode;
pgoff_t offset, max_off;

View File

@ -1128,6 +1128,7 @@ static unsigned int shrink_page_list(struct list_head *page_list,
LIST_HEAD(free_pages);
unsigned int nr_reclaimed = 0;
unsigned int pgactivate = 0;
bool page_trylock_result;
memset(stat, 0, sizeof(*stat));
cond_resched();
@ -1527,6 +1528,18 @@ static unsigned int shrink_page_list(struct list_head *page_list,
count_memcg_page_event(page, PGACTIVATE);
}
keep_locked:
/*
* The page with trylock-bit will be added ret_pages and
* handled in trace_android_vh_handle_failed_page_trylock.
* In the progress[unlock_page, handled], the page carried
* with trylock-bit will cause some error-issues in other
* scene, so clear trylock-bit here.
* trace_android_vh_page_trylock_get_result will clear
* trylock-bit and return if page tyrlock failed in
* reclaim-process. Here we just want to clear trylock-bit
* so that ignore page_trylock_result.
*/
trace_android_vh_page_trylock_get_result(page, &page_trylock_result);
unlock_page(page);
keep:
list_add(&page->lru, &ret_pages);