Merge keystone/android12-5.10-keystone-qcom-release.117+ (26604a5) into msm-5.10

* refs/heads/tmp-26604a5:
  UPSTREAM: usb: dwc3: gadget: Avoid duplicate requests to enable Run/Stop
  UPSTREAM: usb: typec: ucsi: Acknowledge the GET_ERROR_STATUS command completion
  BACKPORT: scsi: ufs: core: Increase fDeviceInit poll frequency
  FROMGIT: f2fs: increase the limit for reserve_root
  FROMGIT: f2fs: complete checkpoints during remount
  FROMGIT: f2fs: flush pending checkpoints when freezing super
  BACKPORT: f2fs: don't get FREEZE lock in f2fs_evict_inode in frozen fs
  BACKPORT: f2fs: introduce F2FS_IPU_HONOR_OPU_WRITE ipu policy
  Revert "ANDROID: GKI: signal: Export for __lock_task_sighand"
  BACKPORT: f2fs: invalidate meta pages only for post_read required inode
  BACKPORT: f2fs: fix to invalidate META_MAPPING before DIO write
  BACKPORT: f2fs: invalidate META_MAPPING before IPU/DIO write
  ANDROID: mm: page_pinner: use page_ext_get/put() to work with page_ext
  FROMLIST: mm: fix use-after free of page_ext after race with memory-offline
  ANDROID: vendor_hooks:vendor hook for __alloc_pages_slowpath.
  ANDROID: GKI: rockchip: add symbol netif_set_xps_queue
  ANDROID: GKI: Update symbol list
  Revert "ANDROID: vendor_hooks: tune reclaim scan type for specified mem_cgroup"
  ANDROID: Fix a build warning inside early_memblock_nomap
  ANDROID: mm/memory_hotplug: Fix error path handling
  Revert "ANDROID: add for tuning readahead size"
  Revert "ANDROID: vendor_hooks: Add hooks for mutex"
  ANDROID: fix execute bit on android/abi_gki_aarch64_asus
  ANDROID: avoid huge-page not to clear trylock-bit after shrink_page_list.
  ANDROID: vendor_hooks: Add hooks for oem futex optimization
  ANDROID: mm: memblock: avoid to create memmap for memblock nomap regions
  ANDROID: abi_gki_aarch64_qcom: Add android_vh_disable_thermal_cooling_stats
  ANDROID: thermal: vendor hook to disable thermal cooling stats
  ANDROID: GKI: Update symbols to symbol list
  ANDROID: GKI: rockchip: update fragment file
  ANDROID: GKI: rockchip: Enable symbols bcmdhd-sdio
  ANDROID: GKI: rockchip: Update symbols for rga driver
  BACKPORT: cgroup: Fix threadgroup_rwsem <-> cpus_read_lock() deadlock
  UPSTREAM: cgroup: Elide write-locking threadgroup_rwsem when updating csses on an empty subtree
  ANDROID: GKI: Update symbol list for transsion
  ANDROID: vendor_hook: Add hook in __free_pages()
  ANDROID: vendor_hooks: Add hooks to extend the struct swap_info_struct
  ANDROID: vendor_hook: Add hook in si_swapinfo()
  ANDROID: GKI: Update symbols to symbol list
  ANDROID: Use rq_clock_task without CONFIG_SMP
  ANDROID: abi_gki_aarch64_qcom: Add skb and scatterlist helpers
  Revert "ANDROID: vendor_hook: Add hook in si_swapinfo()"
  Revert "ANDROID: vendor_hooks:vendor hook for pidfd_open"
  Revert "ANDROID: vendor_hooks: Add hooks to extend the struct swap_info_struct"
  Revert "ANDROID: vendor_hooks:vendor hook for mmput"
  ANDROID: GKI: Update symbols to symbol list
  ANDROID: Guard rq_clock_task_mult with CONFIG_SMP
  Revert "ANDROID: vendor_hook: Add hook in __free_pages()"
  Revert "ANDROID: vendor_hooks: Add hooks for binder"
  ANDROID: vendor_hook: add hooks to protect locking-tsk in cpu scheduler
  ANDROID: export reclaim_pages
  ANDROID: vendor_hook: Add hook to not be stuck ro rmap lock in kswapd or direct_reclaim

Change-Id: Id29a9448f424508e3b3e82c4f69959fa9da81699
Signed-off-by: Sivasri Kumar, Vanka <quic_svanka@quicinc.com>
This commit is contained in:
Sivasri Kumar, Vanka 2022-09-28 17:44:38 +05:30
commit 86f9bee9c0
55 changed files with 2153 additions and 929 deletions

View File

@ -56,8 +56,9 @@ Description: Controls the in-place-update policy.
0x04 F2FS_IPU_UTIL
0x08 F2FS_IPU_SSR_UTIL
0x10 F2FS_IPU_FSYNC
0x20 F2FS_IPU_ASYNC,
0x20 F2FS_IPU_ASYNC
0x40 F2FS_IPU_NOCACHE
0x80 F2FS_IPU_HONOR_OPU_WRITE
==== =================
Refer segment.h for details.
@ -431,6 +432,7 @@ Description: Show status of f2fs superblock in real time.
0x800 SBI_QUOTA_SKIP_FLUSH skip flushing quota in current CP
0x1000 SBI_QUOTA_NEED_REPAIR quota file may be corrupted
0x2000 SBI_IS_RESIZEFS resizefs is in process
0x4000 SBI_IS_FREEZING freefs is in process
====== ===================== =================================
What: /sys/fs/f2fs/<disk>/ckpt_thread_ioprio

View File

@ -2537,6 +2537,12 @@
memblock=debug [KNL] Enable memblock debug messages.
android12_only.will_be_removed_soon.memblock_nomap_remove= [KNL]
Setting this to true through kernel command line will
call memblock_remove on the regions marked with no-map
property thereby saving memory by removing page structs
for those regions. By default this is set to false.
load_ramdisk= [RAM] [Deprecated]
lockd.nlm_grace_period=P [NFS] Assign grace period.

View File

@ -1 +1,3 @@
91bfc78bc009e8afc8e6bbd153d3bd47118892cf
android12-5.10-2022-09
95279078149abf6684fadbc7bb6a2ab49efeea18

File diff suppressed because it is too large Load Diff

0
android/abi_gki_aarch64_asus Executable file → Normal file
View File

View File

@ -2100,6 +2100,7 @@
rdev_get_regmap
read_cache_page_gfp
reboot_mode
reclaim_pages
refcount_dec_and_lock
refcount_dec_not_one
refcount_warn_saturate
@ -2782,6 +2783,7 @@
__traceiter_android_vh_cpu_idle_enter
__traceiter_android_vh_cpu_idle_exit
__traceiter_android_vh_cpu_up
__traceiter_android_vh_do_futex
__traceiter_android_vh_do_send_sig_info
__traceiter_android_vh_drain_all_pages_bypass
__traceiter_android_vh_em_cpu_energy
@ -2796,8 +2798,14 @@
__traceiter_android_vh_ftrace_oops_exit
__traceiter_android_vh_ftrace_size_check
__traceiter_android_vh_futex_sleep_start
__traceiter_android_vh_futex_wait_end
__traceiter_android_vh_futex_wait_start
__traceiter_android_vh_futex_wake_this
__traceiter_android_vh_futex_wake_traverse_plist
__traceiter_android_vh_futex_wake_up_q_finish
__traceiter_android_vh_get_from_fragment_pool
__traceiter_android_vh_gpio_block_read
__traceiter_android_vh_handle_failed_page_trylock
__traceiter_android_vh_include_reserved_zone
__traceiter_android_vh_iommu_alloc_iova
__traceiter_android_vh_iommu_free_iova
@ -2823,9 +2831,13 @@
__traceiter_android_vh_override_creds
__traceiter_android_vh_page_referenced_check_bypass
__traceiter_android_vh_page_should_be_protected
__traceiter_android_vh_page_trylock_set
__traceiter_android_vh_page_trylock_clear
__traceiter_android_vh_page_trylock_get_result
__traceiter_android_vh_mark_page_accessed
__traceiter_android_vh_show_mapcount_pages
__traceiter_android_vh_do_traversal_lruvec
__traceiter_android_vh_do_page_trylock
__traceiter_android_vh_update_page_mapcount
__traceiter_android_vh_add_page_to_lrulist
__traceiter_android_vh_del_page_from_lrulist
@ -2834,6 +2846,10 @@
__traceiter_android_vh_printk_hotplug
__traceiter_android_vh_process_killed
__traceiter_android_vh_revert_creds
__traceiter_android_vh_record_mutex_lock_starttime
__traceiter_android_vh_record_rtmutex_lock_starttime
__traceiter_android_vh_record_rwsem_lock_starttime
__traceiter_android_vh_record_percpu_rwsem_lock_starttime
__traceiter_android_vh_rmqueue
__traceiter_android_vh_rwsem_init
__traceiter_android_vh_rwsem_mark_wake_readers
@ -2846,6 +2862,7 @@
__traceiter_android_vh_rwsem_write_finished
__traceiter_android_vh_save_track_hash
__traceiter_android_vh_save_vmalloc_stack
__traceiter_android_vh_remove_vmalloc_stack
__traceiter_android_vh_sched_stat_runtime_rt
__traceiter_android_vh_scheduler_tick
__traceiter_android_vh_selinux_avc_insert
@ -3005,6 +3022,7 @@
__tracepoint_android_vh_cpu_idle_enter
__tracepoint_android_vh_cpu_idle_exit
__tracepoint_android_vh_cpu_up
__tracepoint_android_vh_do_futex
__tracepoint_android_vh_do_send_sig_info
__tracepoint_android_vh_drain_all_pages_bypass
__tracepoint_android_vh_em_cpu_energy
@ -3019,8 +3037,14 @@
__tracepoint_android_vh_ftrace_oops_exit
__tracepoint_android_vh_ftrace_size_check
__tracepoint_android_vh_futex_sleep_start
__tracepoint_android_vh_futex_wait_end
__tracepoint_android_vh_futex_wait_start
__tracepoint_android_vh_futex_wake_this
__tracepoint_android_vh_futex_wake_traverse_plist
__tracepoint_android_vh_futex_wake_up_q_finish
__tracepoint_android_vh_get_from_fragment_pool
__tracepoint_android_vh_gpio_block_read
__tracepoint_android_vh_handle_failed_page_trylock
__tracepoint_android_vh_include_reserved_zone
__tracepoint_android_vh_iommu_alloc_iova
__tracepoint_android_vh_iommu_free_iova
@ -3046,9 +3070,13 @@
__tracepoint_android_vh_override_creds
__tracepoint_android_vh_page_referenced_check_bypass
__tracepoint_android_vh_page_should_be_protected
__tracepoint_android_vh_page_trylock_set
__tracepoint_android_vh_page_trylock_clear
__tracepoint_android_vh_page_trylock_get_result
__tracepoint_android_vh_mark_page_accessed
__tracepoint_android_vh_show_mapcount_pages
__tracepoint_android_vh_do_traversal_lruvec
__tracepoint_android_vh_do_page_trylock
__tracepoint_android_vh_update_page_mapcount
__tracepoint_android_vh_add_page_to_lrulist
__tracepoint_android_vh_del_page_from_lrulist
@ -3057,6 +3085,10 @@
__tracepoint_android_vh_printk_hotplug
__tracepoint_android_vh_process_killed
__tracepoint_android_vh_revert_creds
__tracepoint_android_vh_record_mutex_lock_starttime
__tracepoint_android_vh_record_rtmutex_lock_starttime
__tracepoint_android_vh_record_rwsem_lock_starttime
__tracepoint_android_vh_record_percpu_rwsem_lock_starttime
__tracepoint_android_vh_rmqueue
__tracepoint_android_vh_rwsem_init
__tracepoint_android_vh_rwsem_mark_wake_readers
@ -3069,6 +3101,7 @@
__tracepoint_android_vh_rwsem_write_finished
__tracepoint_android_vh_save_track_hash
__tracepoint_android_vh_save_vmalloc_stack
__tracepoint_android_vh_remove_vmalloc_stack
__tracepoint_android_vh_sched_stat_runtime_rt
__tracepoint_android_vh_scheduler_tick
__tracepoint_android_vh_selinux_avc_insert

View File

@ -2685,6 +2685,7 @@
__tracepoint_android_vh_cpu_idle_exit
__tracepoint_android_vh_cpuidle_psci_enter
__tracepoint_android_vh_cpuidle_psci_exit
__tracepoint_android_vh_disable_thermal_cooling_stats
__tracepoint_android_vh_dump_throttled_rt_tasks
__tracepoint_android_vh_freq_table_limits
__tracepoint_android_vh_ftrace_dump_buffer

View File

@ -57,6 +57,7 @@
cancel_delayed_work
cancel_delayed_work_sync
cancel_work_sync
capable
cdev_add
cdev_del
cdev_device_add
@ -92,6 +93,7 @@
clk_gate_ops
clk_get
__clk_get_name
clk_get_parent
clk_get_rate
clk_hw_get_flags
clk_hw_get_name
@ -116,6 +118,7 @@
completion_done
__const_udelay
consume_skb
cpu_bit_bitmap
__cpufreq_driver_target
cpufreq_generic_suspend
cpufreq_register_governor
@ -188,6 +191,7 @@
devfreq_suspend_device
devfreq_unregister_opp_notifier
dev_fwnode
dev_get_regmap
device_add
device_add_disk
device_create
@ -241,6 +245,7 @@
devm_input_allocate_device
devm_ioremap
devm_ioremap_resource
devm_kasprintf
devm_kfree
devm_kmalloc
devm_kmemdup
@ -256,6 +261,7 @@
devm_phy_optional_get
devm_pinctrl_get
devm_pinctrl_register
devm_pinctrl_register_and_init
devm_platform_get_and_ioremap_resource
devm_platform_ioremap_resource
devm_platform_ioremap_resource_byname
@ -315,7 +321,10 @@
dma_buf_unmap_attachment
dma_buf_vmap
dma_buf_vunmap
dma_contiguous_default_area
dma_fence_add_callback
dma_fence_context_alloc
dma_fence_get_status
dma_fence_init
dma_fence_release
dma_fence_signal
@ -345,15 +354,18 @@
dma_unmap_page_attrs
dma_unmap_resource
dma_unmap_sg_attrs
down
down_read
down_write
driver_register
driver_unregister
drm_add_edid_modes
drm_atomic_get_crtc_state
drm_atomic_get_new_bridge_state
drm_atomic_get_new_connector_for_encoder
drm_atomic_helper_bridge_destroy_state
drm_atomic_helper_bridge_duplicate_state
drm_atomic_helper_bridge_propagate_bus_fmt
drm_atomic_helper_bridge_reset
drm_atomic_helper_connector_destroy_state
drm_atomic_helper_connector_duplicate_state
@ -417,6 +429,7 @@
drm_helper_probe_single_connector_modes
drm_ioctl
drm_kms_helper_hotplug_event
drm_kms_helper_is_poll_worker
drm_match_cea_mode
drm_mode_copy
drm_mode_create
@ -462,6 +475,7 @@
extcon_get_state
extcon_register_notifier
extcon_set_property_capability
extcon_set_state
extcon_set_state_sync
extcon_unregister_notifier
failure_tracking
@ -476,8 +490,8 @@
flush_workqueue
fpsimd_context_busy
fput
frame_vector_to_pages
free_irq
free_netdev
__free_pages
free_pages
free_percpu
@ -490,6 +504,7 @@
fwnode_property_read_string
fwnode_property_read_u32_array
gcd
generic_file_llseek
generic_handle_irq
generic_mii_ioctl
__genphy_config_aneg
@ -507,6 +522,7 @@
__get_free_pages
get_random_bytes
get_unused_fd_flags
get_user_pages_remote
get_zeroed_page
gic_nonsecure_priorities
gpiochip_add_pin_range
@ -526,6 +542,8 @@
gpiod_set_value
gpiod_set_value_cansleep
gpiod_to_irq
gpio_free
gpio_request
gpio_to_desc
handle_nested_irq
handle_simple_irq
@ -555,8 +573,10 @@
i2c_get_adapter
i2c_put_adapter
i2c_register_driver
i2c_smbus_read_byte
i2c_smbus_read_byte_data
i2c_smbus_read_i2c_block_data
i2c_smbus_read_word_data
i2c_smbus_write_byte_data
i2c_smbus_write_i2c_block_data
__i2c_smbus_xfer
@ -575,6 +595,7 @@
iio_device_attach_buffer
iio_push_to_buffers
iio_read_channel_processed
init_net
__init_rwsem
__init_swait_queue_head
init_timer_key
@ -589,17 +610,22 @@
input_register_device
input_set_abs_params
input_set_capability
input_unregister_device
iommu_attach_device
iommu_attach_group
iommu_detach_device
iommu_detach_group
iommu_device_register
iommu_device_sysfs_add
iommu_device_sysfs_remove
iommu_get_dma_cookie
iommu_get_domain_for_dev
iommu_group_alloc
iommu_group_get
iommu_group_put
iommu_group_ref_get
iommu_map
iommu_map_sg
iommu_put_dma_cookie
iommu_set_fault_handler
iommu_unmap
@ -645,6 +671,7 @@
kmem_cache_free
kmemdup
kobject_create_and_add
kobject_init_and_add
kobject_put
kobject_uevent_env
kstrdup
@ -670,6 +697,7 @@
led_trigger_unregister
__list_add_valid
__list_del_entry_valid
__local_bh_enable_ip
__log_post_read_mmio
__log_read_mmio
__log_write_mmio
@ -689,6 +717,7 @@
media_pipeline_start
media_pipeline_stop
memcpy
__memcpy_fromio
memdup_user
memmove
memset
@ -701,11 +730,14 @@
mipi_dsi_attach
mipi_dsi_create_packet
mipi_dsi_detach
mipi_dsi_device_register_full
mipi_dsi_device_unregister
mipi_dsi_host_register
mipi_dsi_host_unregister
misc_deregister
misc_register
mmc_of_parse
__mmdrop
mod_delayed_work_on
mod_timer
__module_get
@ -724,6 +756,13 @@
netdev_err
netdev_info
netdev_warn
netif_rx
netif_rx_ni
netif_tx_wake_queue
netlink_unicast
nla_memcpy
nla_put
nla_reserve
no_llseek
nonseekable_open
noop_llseek
@ -753,6 +792,7 @@
of_find_device_by_node
of_find_i2c_device_by_node
of_find_matching_node_and_match
of_find_mipi_dsi_host_by_node
of_find_node_by_name
of_find_node_opts_by_path
of_find_property
@ -801,6 +841,9 @@
param_ops_int
param_ops_string
param_ops_uint
pcie_capability_clear_and_set_word
pci_read_config_dword
pci_write_config_dword
PDE_DATA
__per_cpu_offset
perf_trace_buf_alloc
@ -814,8 +857,15 @@
phy_power_off
phy_power_on
phy_set_mode_ext
pinconf_generic_dt_free_map
pinconf_generic_dt_node_to_map
pinctrl_dev_get_drvdata
pinctrl_enable
pinctrl_generic_add_group
pinctrl_generic_get_group
pinctrl_generic_get_group_count
pinctrl_generic_get_group_name
pinctrl_generic_get_group_pins
pinctrl_gpio_direction_input
pinctrl_gpio_direction_output
pinctrl_lookup_state
@ -823,6 +873,11 @@
pinctrl_pm_select_sleep_state
pinctrl_select_state
pinctrl_utils_free_map
pinmux_generic_add_function
pinmux_generic_get_function
pinmux_generic_get_function_count
pinmux_generic_get_function_groups
pinmux_generic_get_function_name
platform_bus_type
platform_device_put
platform_device_register_full
@ -839,6 +894,7 @@
pm_clk_destroy
pm_power_off
__pm_relax
pm_relax
__pm_runtime_disable
pm_runtime_enable
pm_runtime_force_resume
@ -851,6 +907,7 @@
__pm_runtime_suspend
__pm_runtime_use_autosuspend
__pm_stay_awake
pm_stay_awake
pm_wakeup_ws_event
power_supply_am_i_supplied
power_supply_changed
@ -863,6 +920,8 @@
power_supply_register
power_supply_reg_notifier
power_supply_unregister
prandom_bytes
prandom_u32
preempt_schedule
preempt_schedule_notrace
prepare_to_wait_event
@ -904,6 +963,8 @@
regcache_sync
__register_chrdev
register_chrdev_region
register_inetaddr_notifier
register_netdevice
register_netdevice_notifier
register_pm_notifier
register_reboot_notifier
@ -958,6 +1019,7 @@
round_jiffies_relative
rtc_class_open
rtc_read_time
rtc_time64_to_tm
rtc_tm_to_time64
rtc_valid_tm
rtnl_is_locked
@ -1003,9 +1065,11 @@
simple_attr_release
simple_read_from_buffer
simple_strtol
simple_strtoul
single_open
single_release
skb_clone
skb_copy
skb_copy_bits
skb_copy_expand
skb_pull
@ -1054,27 +1118,33 @@
snd_soc_put_volsw
snd_soc_unregister_component
snprintf
sort
__spi_register_driver
spi_sync
sprintf
sscanf
__stack_chk_fail
__stack_chk_guard
strchr
strcmp
strcpy
strlcpy
strlen
strncasecmp
strncat
strncmp
strncpy
strnlen
strrchr
strscpy
strsep
strstr
__sw_hweight16
__sw_hweight32
__sw_hweight64
__sw_hweight8
sync_file_create
sync_file_get_fence
synchronize_irq
synchronize_rcu
syscon_node_to_regmap
@ -1122,7 +1192,9 @@
__udelay
__unregister_chrdev
unregister_chrdev_region
unregister_inetaddr_notifier
unregister_netdevice_notifier
unregister_netdevice_queue
unregister_reboot_notifier
unregister_shrinker
up
@ -1221,6 +1293,7 @@
v4l2_event_queue
v4l2_event_subdev_unsubscribe
v4l2_event_subscribe
v4l2_event_unsubscribe
v4l2_fh_open
v4l2_i2c_subdev_init
v4l2_match_dv_timings
@ -1237,10 +1310,6 @@
v4l2_valid_dv_timings
vabits_actual
vb2_buffer_done
vb2_common_vm_ops
vb2_create_framevec
vb2_destroy_framevec
vb2_dma_contig_memops
vb2_fop_mmap
vb2_fop_poll
vb2_fop_release
@ -1270,8 +1339,7 @@
vmap
vm_get_page_prot
vm_map_pages
vm_map_ram
vm_unmap_ram
vsnprintf
vunmap
vzalloc
wait_for_completion
@ -1281,6 +1349,7 @@
wakeup_source_add
wakeup_source_remove
__warn_printk
work_busy
# required by 8250_dw.ko
of_device_is_big_endian
@ -1312,6 +1381,73 @@
drm_dp_start_crc
drm_dp_stop_crc
# required by aspm_ext.ko
pci_find_capability
pci_find_ext_capability
# required by bcmdhd.ko
alloc_etherdev_mqs
complete_and_exit
dev_open
down_interruptible
down_timeout
iwe_stream_add_event
iwe_stream_add_point
iwe_stream_add_value
__kfifo_init
kobject_uevent
mmc_set_data_timeout
mmc_sw_reset
mmc_wait_for_req
netdev_update_features
netif_napi_add
__netif_napi_del
netif_set_xps_queue
__netlink_kernel_create
netlink_kernel_release
nla_append
nla_put_nohdr
__nlmsg_put
_raw_read_lock_bh
_raw_read_unlock_bh
register_netdev
sched_set_fifo_low
sdio_claim_host
sdio_disable_func
sdio_enable_func
sdio_f0_readb
sdio_f0_writeb
sdio_get_host_pm_caps
sdio_memcpy_fromio
sdio_memcpy_toio
sdio_readb
sdio_readl
sdio_readsb
sdio_readw
sdio_register_driver
sdio_release_host
sdio_retune_crc_disable
sdio_retune_crc_enable
sdio_retune_hold_now
sdio_retune_release
sdio_set_block_size
sdio_set_host_pm_flags
sdio_unregister_driver
sdio_writeb
sdio_writel
sdio_writew
set_cpus_allowed_ptr
__skb_pad
skb_realloc_headroom
sock_wfree
sprint_symbol
strcat
strspn
sys_tz
unregister_netdev
unregister_pm_notifier
wireless_send_event
# required by bifrost_kbase.ko
__arch_clear_user
__bitmap_andnot
@ -1326,28 +1462,23 @@
devfreq_cooling_unregister
devfreq_remove_device
dev_pm_opp_find_freq_exact
dma_fence_add_callback
dma_fence_default_wait
dma_fence_get_status
dma_fence_remove_callback
downgrade_write
down_read_trylock
dump_stack
find_get_pid
freezing_slow_path
generic_file_llseek
get_user_pages
get_user_pages_fast
hrtimer_active
iomem_resource
kobject_del
kobject_init_and_add
kstrndup
kstrtobool_from_user
ktime_get_raw
ktime_get_raw_ts64
memchr
__mmdrop
of_dma_is_coherent
of_property_read_variable_u64_array
pid_task
@ -1371,7 +1502,6 @@
shmem_file_setup
simple_open
strcspn
sync_file_get_fence
system_freezing_cnt
system_highpri_wq
_totalram_pages
@ -1418,25 +1548,19 @@
get_net_ns_by_fd
get_net_ns_by_pid
inet_csk_get_port
init_net
init_uts_ns
key_create_or_update
key_put
keyring_alloc
ktime_get_coarse_with_offset
memcmp
netif_rx_ni
netlink_broadcast
netlink_register_notifier
netlink_unicast
netlink_unregister_notifier
net_ns_type_operations
nla_find
nla_memcpy
__nla_parse
nla_put_64bit
nla_put
nla_reserve
__nla_validate
of_prop_next_u32
__put_net
@ -1482,7 +1606,6 @@
clk_fixed_factor_ops
clk_fractional_divider_ops
__clk_get_hw
clk_get_parent
clk_hw_register_composite
clk_hw_round_rate
clk_mux_ops
@ -1501,9 +1624,11 @@
scmi_driver_register
scmi_driver_unregister
# required by cm3218.ko
i2c_smbus_write_word_data
# required by cma_heap.ko
cma_get_name
dma_contiguous_default_area
dma_heap_get_drvdata
dma_heap_put
@ -1544,10 +1669,8 @@
mmc_cqe_request_done
# required by cryptodev.ko
__close_fd
crypto_ahash_final
crypto_alloc_akcipher
get_user_pages_remote
krealloc
proc_dointvec
register_sysctl_table
@ -1555,7 +1678,6 @@
unregister_sysctl_table
# required by display-connector.ko
drm_atomic_get_new_bridge_state
drm_probe_ddc
# required by dm9601.ko
@ -1646,7 +1768,6 @@
tcpm_register_port
tcpm_unregister_port
tcpm_vbus_change
vsnprintf
# required by gc2145.ko
v4l2_ctrl_subdev_log_status
@ -1672,14 +1793,7 @@
irq_get_domain_generic_chip
of_pinctrl_get
# required by grf.ko
of_find_matching_node_and_match
# required by gslx680-pad.ko
input_unregister_device
# required by hid-alps.ko
down
input_alloc_absinfo
input_mt_sync_frame
@ -1706,7 +1820,6 @@
hid_destroy_device
hid_input_report
hid_parse_report
i2c_smbus_read_byte
# required by i2c-mux.ko
i2c_add_numbered_adapter
@ -1768,7 +1881,6 @@
dev_queue_xmit
ether_setup
ethtool_op_get_link
free_netdev
get_random_u32
__hw_addr_init
__hw_addr_sync
@ -1777,24 +1889,17 @@
kernel_param_unlock
kfree_skb_list
ktime_get_seconds
__local_bh_enable_ip
napi_gro_receive
netdev_set_default_ethtool_ops
netif_carrier_off
netif_carrier_on
netif_receive_skb
netif_receive_skb_list
netif_rx
netif_tx_stop_all_queues
netif_tx_wake_queue
net_ratelimit
prandom_bytes
prandom_u32
___pskb_trim
rcu_barrier
register_inet6addr_notifier
register_inetaddr_notifier
register_netdevice
rhashtable_free_and_destroy
rhashtable_insert_slow
rhltable_init
@ -1806,7 +1911,6 @@
skb_checksum_help
skb_clone_sk
skb_complete_wifi_ack
skb_copy
skb_dequeue
skb_ensure_writable
__skb_get_hash
@ -1816,9 +1920,7 @@
skb_queue_tail
synchronize_net
unregister_inet6addr_notifier
unregister_inetaddr_notifier
unregister_netdevice_many
unregister_netdevice_queue
# required by nvme-core.ko
bd_set_nr_sectors
@ -1848,7 +1950,6 @@
blk_set_queue_dying
blk_status_to_errno
blk_sync_queue
capable
cleanup_srcu_struct
device_remove_file_self
dev_pm_qos_expose_latency_tolerance
@ -1974,7 +2075,6 @@
videomode_from_timing
# required by pcie-dw-rockchip.ko
cpu_bit_bitmap
cpumask_next_and
dw_pcie_find_ext_capability
dw_pcie_host_init
@ -1985,10 +2085,7 @@
dw_pcie_write
dw_pcie_write_dbi
pci_disable_link_state
pcie_capability_clear_and_set_word
pci_read_config_dword
pci_set_power_state
pci_write_config_dword
# required by pcierockchiphost.ko
devm_pci_alloc_host_bridge
@ -2006,7 +2103,6 @@
# required by phy-rockchip-inno-usb2.ko
devm_extcon_register_notifier
extcon_set_state
extcon_sync
# required by phy-rockchip-inno-usb3.ko
@ -2061,7 +2157,6 @@
pm_genpd_add_subdomain
pm_genpd_init
pm_genpd_remove
strrchr
# required by pwm-regulator.ko
regulator_map_voltage_iterate
@ -2083,12 +2178,17 @@
kernel_kobj
# required by rfkill-rk.ko
gpio_free
gpio_request
kstrtoll
rfkill_init_sw_state
rfkill_set_sw_state
# required by rga3.ko
alloc_iova_fast
dma_fence_wait_timeout
free_iova_fast
kstrdup_quotable_cmdline
mmput
# required by rk628.ko
irq_dispose_mapping
irq_domain_xlate_onetwocell
@ -2118,11 +2218,6 @@
regmap_del_irq_chip
unregister_syscore_ops
# required by rk818_battery.ko
blocking_notifier_call_chain
blocking_notifier_chain_register
blocking_notifier_chain_unregister
# required by rk860x-regulator.ko
regulator_suspend_enable
@ -2151,6 +2246,9 @@
# required by rk_headset_irq_hook_adc.ko
iio_read_channel_raw
# required by rk_ircut.ko
drain_workqueue
# required by rk_vcodec.ko
devfreq_remove_governor
devm_iounmap
@ -2160,11 +2258,8 @@
dma_buf_begin_cpu_access_partial
dma_buf_end_cpu_access_partial
__fdget
iommu_attach_group
iommu_detach_group
iommu_device_unregister
iommu_dma_reserve_iova
iommu_group_get
kthread_flush_worker
__kthread_init_worker
kthread_queue_work
@ -2174,15 +2269,11 @@
platform_device_del
pm_generic_runtime_resume
pm_generic_runtime_suspend
pm_relax
pm_stay_awake
proc_create_single_data
proc_remove
strncat
# required by rknpu.ko
dev_pm_domain_attach_by_name
drm_gem_create_mmap_offset
drm_gem_dumb_destroy
drm_gem_handle_delete
drm_gem_prime_export
@ -2253,10 +2344,7 @@
dev_pm_qos_add_request
dev_pm_qos_remove_request
dev_pm_qos_update_request
regulator_get
remove_cpu
strchr
strsep
thermal_zone_get_temp
# required by rockchip_thermal.ko
@ -2280,7 +2368,6 @@
drm_atomic_commit
drm_atomic_get_connector_state
drm_atomic_get_plane_state
drm_atomic_helper_bridge_propagate_bus_fmt
drm_atomic_helper_check
drm_atomic_helper_check_plane_state
drm_atomic_helper_cleanup_planes
@ -2421,7 +2508,6 @@
drm_writeback_signal_completion
iommu_domain_alloc
iommu_domain_free
iommu_map_sg
memblock_free
mipi_dsi_packet_format_is_short
of_find_backlight_by_node
@ -2432,21 +2518,14 @@
platform_find_device_by_driver
__platform_register_drivers
platform_unregister_drivers
sort
__vmalloc
# required by rohm-bu18tl82.ko
mipi_dsi_device_register_full
mipi_dsi_device_unregister
of_find_mipi_dsi_host_by_node
# required by rtc-hym8563.ko
devm_rtc_device_register
# required by rtc-rk808.ko
devm_rtc_allocate_device
__rtc_register_device
rtc_time64_to_tm
rtc_update_irq
# required by sdhci-of-arasan.ko
@ -2464,12 +2543,17 @@
sdhci_setup_host
# required by sdhci-of-dwcmshc.ko
device_get_match_data
devm_clk_bulk_get_optional
dma_get_required_mask
sdhci_adma_write_desc
sdhci_remove_host
sdhci_request
# required by sensor_dev.ko
class_create_file_ns
class_remove_file_ns
# required by sg.ko
blk_get_request
blk_put_request
@ -2498,9 +2582,6 @@
sg_scsi_ioctl
__task_pid_nr_ns
# required by sgm41542_charger.ko
regulator_unregister
# required by sha1-ce.ko
crypto_sha1_finup
crypto_sha1_update
@ -2534,6 +2615,9 @@
# required by snd-soc-es8316.ko
snd_pcm_hw_constraint_list
# required by snd-soc-es8326.ko
snd_soc_register_component
# required by snd-soc-hdmi-codec.ko
snd_ctl_add
snd_ctl_new1
@ -2568,7 +2652,6 @@
snd_soc_dapm_force_bias_level
# required by snd-soc-simple-card-utils.ko
devm_kasprintf
devm_kvasprintf
snd_soc_dai_set_tdm_slot
snd_soc_of_parse_audio_simple_widgets
@ -2607,7 +2690,6 @@
swiotlb_max_segment
# required by tcpci_husb311.ko
i2c_smbus_read_word_data
tcpci_get_tcpm_port
tcpci_irq
tcpci_register_port
@ -2706,18 +2788,21 @@
# required by video_rkcif.ko
media_entity_setup_link
work_busy
# required by video_rkisp.ko
media_device_cleanup
__memcpy_fromio
__memcpy_toio
param_ops_ullong
v4l2_event_unsubscribe
v4l2_pipeline_link_notify
# required by videobuf2-dma-sg.ko
# required by videobuf2-cma-sg.ko
frame_vector_to_pages
split_page
vb2_common_vm_ops
vb2_create_framevec
vb2_destroy_framevec
vm_map_ram
vm_unmap_ram
# required by vl6180.ko
iio_read_const_attr

View File

@ -1,20 +1,24 @@
CONFIG_AP6XXX=m
CONFIG_ARCH_ROCKCHIP=y
CONFIG_ARM_ROCKCHIP_BUS_DEVFREQ=m
CONFIG_ARM_ROCKCHIP_CPUFREQ=m
# CONFIG_ATA_SFF is not set
CONFIG_ARM_ROCKCHIP_DMC_DEVFREQ=m
CONFIG_BACKLIGHT_PWM=m
CONFIG_BATTERY_CW2015=m
CONFIG_BATTERY_CW2017=m
CONFIG_BATTERY_CW221X=m
CONFIG_BATTERY_RK817=m
CONFIG_BATTERY_RK818=m
CONFIG_BLK_DEV_NVME=m
CONFIG_BMA2XX_ACC=m
CONFIG_CHARGER_BQ25700=m
CONFIG_CHARGER_BQ25890=m
CONFIG_CHARGER_RK817=m
CONFIG_CHARGER_RK818=m
CONFIG_CHARGER_SC89890=m
CONFIG_CHARGER_SGM41542=m
CONFIG_CHR_DEV_SCH=m
CONFIG_CHR_DEV_SG=m
# CONFIG_CLK_RK1808 is not set
# CONFIG_CLK_RK3308 is not set
CONFIG_COMMON_CLK_PWM=m
CONFIG_COMMON_CLK_RK808=m
CONFIG_COMMON_CLK_ROCKCHIP=m
@ -25,12 +29,10 @@ CONFIG_COMPASS_DEVICE=m
CONFIG_CPUFREQ_DT=m
CONFIG_CPU_FREQ_GOV_ONDEMAND=m
CONFIG_CPU_FREQ_GOV_USERSPACE=m
CONFIG_CPU_PX30=y
CONFIG_CPU_RK3328=y
CONFIG_CPU_RK3368=y
CONFIG_CPU_RK3399=y
CONFIG_CPU_RK3568=y
CONFIG_CPU_RK3588=y
CONFIG_CRYPTO_AES_ARM64_CE_CCM=m
CONFIG_CRYPTO_DEV_ROCKCHIP=m
CONFIG_CRYPTO_DEV_ROCKCHIP_DEV=m
CONFIG_CRYPTO_GHASH_ARM64_CE=m
CONFIG_CRYPTO_SHA1_ARM64_CE=m
CONFIG_CRYPTO_TWOFISH=m
@ -38,13 +40,20 @@ CONFIG_DEVFREQ_EVENT_ROCKCHIP_NOCP=m
CONFIG_DMABUF_HEAPS_CMA=m
CONFIG_DMABUF_HEAPS_SYSTEM=m
CONFIG_DRAGONRISE_FF=y
CONFIG_DRM_DISPLAY_CONNECTOR=m
CONFIG_DRM_DW_HDMI_CEC=m
CONFIG_DRM_DW_HDMI_I2S_AUDIO=m
CONFIG_DRM_MAXIM_MAX96745=m
CONFIG_DRM_MAXIM_MAX96752F=m
CONFIG_DRM_MAXIM_MAX96755F=m
CONFIG_DRM_PANEL_SIMPLE=m
CONFIG_DRM_RK1000_TVE=m
CONFIG_DRM_RK630_TVE=m
CONFIG_DRM_ROCKCHIP=m
CONFIG_DRM_ROCKCHIP_RK628=m
CONFIG_DRM_ROHM_BU18XL82=m
CONFIG_DRM_SII902X=m
CONFIG_DTC_SYMBOLS=y
# CONFIG_DWMAC_GENERIC is not set
CONFIG_DW_WATCHDOG=m
CONFIG_GPIO_ROCKCHIP=m
CONFIG_GREENASIA_FF=y
@ -116,13 +125,12 @@ CONFIG_I2C_CHARDEV=m
CONFIG_I2C_GPIO=m
CONFIG_I2C_HID=m
CONFIG_I2C_RK3X=m
CONFIG_IEP=m
CONFIG_IIO_BUFFER_CB=m
CONFIG_INPUT_RK805_PWRKEY=m
CONFIG_ION=y
CONFIG_ION_SYSTEM_HEAP=y
CONFIG_JOLIET=y
CONFIG_KEYBOARD_ADC=m
CONFIG_LEDS_GPIO=m
CONFIG_LEDS_RGB13H=m
CONFIG_LEDS_TRIGGER_BACKLIGHT=m
CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
CONFIG_LEDS_TRIGGER_HEARTBEAT=m
@ -131,7 +139,16 @@ CONFIG_LSM330_ACC=m
CONFIG_LS_CM3217=m
CONFIG_LS_CM3218=m
CONFIG_LS_STK3410=m
# CONFIG_MALI400_PROFILING is not set
CONFIG_LS_UCS14620=m
CONFIG_MALI_BIFROST=m
CONFIG_MALI_BIFROST_DEBUG=y
CONFIG_MALI_BIFROST_EXPERT=y
CONFIG_MALI_CSF_SUPPORT=y
CONFIG_MALI_PLATFORM_NAME="rk"
CONFIG_MALI_PWRSOFT_765=y
CONFIG_MFD_RK628=m
CONFIG_MFD_RK630_I2C=m
CONFIG_MFD_RK806_SPI=m
CONFIG_MFD_RK808=m
CONFIG_MMC_DW=m
CONFIG_MMC_DW_ROCKCHIP=m
@ -141,22 +158,33 @@ CONFIG_MPU6500_ACC=m
CONFIG_MPU6880_ACC=m
CONFIG_OPTEE=m
CONFIG_PANTHERLORD_FF=y
CONFIG_PCIEASPM_EXT=m
CONFIG_PCIE_DW_ROCKCHIP=m
CONFIG_PCIE_ROCKCHIP_HOST=m
CONFIG_PHY_ROCKCHIP_CSI2_DPHY=m
CONFIG_PHY_ROCKCHIP_DP=m
CONFIG_PHY_ROCKCHIP_EMMC=m
CONFIG_PHY_ROCKCHIP_INNO_DSIDPHY=m
CONFIG_PHY_ROCKCHIP_INNO_HDMI=m
CONFIG_PHY_ROCKCHIP_INNO_USB2=m
CONFIG_PHY_ROCKCHIP_INNO_USB3=m
CONFIG_PHY_ROCKCHIP_NANENG_COMBO_PHY=m
CONFIG_PHY_ROCKCHIP_NANENG_EDP=m
CONFIG_PHY_ROCKCHIP_PCIE=m
CONFIG_PHY_ROCKCHIP_SAMSUNG_DCPHY=m
CONFIG_PHY_ROCKCHIP_SAMSUNG_HDPTX=m
CONFIG_PHY_ROCKCHIP_SAMSUNG_HDPTX_HDMI=m
CONFIG_PHY_ROCKCHIP_SNPS_PCIE3=m
CONFIG_PHY_ROCKCHIP_TYPEC=m
CONFIG_PHY_ROCKCHIP_USB=m
CONFIG_PHY_ROCKCHIP_USBDP=m
CONFIG_PINCTRL_RK805=m
CONFIG_PINCTRL_RK806=m
CONFIG_PINCTRL_ROCKCHIP=m
CONFIG_PL330_DMA=m
CONFIG_PROXIMITY_DEVICE=m
CONFIG_PS_STK3410=m
CONFIG_PS_UCS14620=m
CONFIG_PWM_ROCKCHIP=m
CONFIG_REGULATOR_ACT8865=m
CONFIG_REGULATOR_FAN53555=m
@ -164,25 +192,43 @@ CONFIG_REGULATOR_GPIO=m
CONFIG_REGULATOR_LP8752=m
CONFIG_REGULATOR_MP8865=m
CONFIG_REGULATOR_PWM=m
CONFIG_REGULATOR_RK806=m
CONFIG_REGULATOR_RK808=m
CONFIG_REGULATOR_RK860X=m
CONFIG_REGULATOR_TPS65132=m
CONFIG_REGULATOR_WL2868C=m
CONFIG_REGULATOR_XZ3216=m
CONFIG_RFKILL_RK=m
CONFIG_RK_CONSOLE_THREAD=y
CONFIG_RK_NAND=m
CONFIG_RK_HEADSET=m
CONFIG_ROCKCHIP_ANALOGIX_DP=y
CONFIG_ROCKCHIP_CDN_DP=y
CONFIG_ROCKCHIP_CPUINFO=m
CONFIG_ROCKCHIP_DEBUG=m
CONFIG_ROCKCHIP_DW_DP=y
CONFIG_ROCKCHIP_DW_HDCP2=m
CONFIG_ROCKCHIP_DW_HDMI=y
CONFIG_ROCKCHIP_DW_MIPI_DSI=y
CONFIG_ROCKCHIP_EFUSE=m
CONFIG_ROCKCHIP_FIQ_DEBUGGER=m
CONFIG_ROCKCHIP_GRF=m
CONFIG_ROCKCHIP_INNO_HDMI=y
CONFIG_ROCKCHIP_IODOMAIN=m
CONFIG_ROCKCHIP_IOMMU=m
CONFIG_ROCKCHIP_IPA=m
CONFIG_ROCKCHIP_LVDS=y
CONFIG_ROCKCHIP_MPP_AV1DEC=y
CONFIG_ROCKCHIP_MPP_IEP2=y
CONFIG_ROCKCHIP_MPP_JPGDEC=y
CONFIG_ROCKCHIP_MPP_RKVDEC=y
CONFIG_ROCKCHIP_MPP_RKVDEC2=y
CONFIG_ROCKCHIP_MPP_RKVENC=y
CONFIG_ROCKCHIP_MPP_RKVENC2=y
CONFIG_ROCKCHIP_MPP_SERVICE=m
CONFIG_ROCKCHIP_MPP_VDPU1=y
CONFIG_ROCKCHIP_MPP_VDPU2=y
CONFIG_ROCKCHIP_MPP_VEPU1=y
CONFIG_ROCKCHIP_MPP_VEPU2=y
CONFIG_ROCKCHIP_MULTI_RGA=m
CONFIG_ROCKCHIP_OPP=m
CONFIG_ROCKCHIP_OTP=m
CONFIG_ROCKCHIP_PHY=m
@ -191,14 +237,19 @@ CONFIG_ROCKCHIP_PVTM=m
CONFIG_ROCKCHIP_REMOTECTL=m
CONFIG_ROCKCHIP_REMOTECTL_PWM=m
CONFIG_ROCKCHIP_RGB=y
CONFIG_ROCKCHIP_RKNPU=m
CONFIG_ROCKCHIP_SARADC=m
CONFIG_ROCKCHIP_SIP=m
CONFIG_ROCKCHIP_SUSPEND_MODE=m
CONFIG_ROCKCHIP_SYSTEM_MONITOR=m
CONFIG_ROCKCHIP_THERMAL=m
CONFIG_ROCKCHIP_TIMER=m
CONFIG_ROCKCHIP_VENDOR_STORAGE=m
CONFIG_ROCKCHIP_VENDOR_STORAGE_UPDATE_LOADER=y
CONFIG_RTC_DRV_HYM8563=m
CONFIG_RTC_DRV_RK808=m
CONFIG_SENSOR_DEVICE=m
CONFIG_SERIAL_8250_DW=m
CONFIG_SMARTJOYPLUS_FF=y
CONFIG_SND_SIMPLE_CARD=m
CONFIG_SND_SOC_BT_SCO=m
@ -209,13 +260,20 @@ CONFIG_SND_SOC_ES7210=m
CONFIG_SND_SOC_ES7243E=m
CONFIG_SND_SOC_ES8311=m
CONFIG_SND_SOC_ES8316=m
CONFIG_SND_SOC_ES8323=m
CONFIG_SND_SOC_ES8326=m
CONFIG_SND_SOC_ES8396=m
CONFIG_SND_SOC_RK3328=m
CONFIG_SND_SOC_RK817=m
CONFIG_SND_SOC_RK_CODEC_DIGITAL=m
CONFIG_SND_SOC_ROCKCHIP=m
CONFIG_SND_SOC_ROCKCHIP_HDMI=m
CONFIG_SND_SOC_ROCKCHIP_I2S=m
CONFIG_SND_SOC_ROCKCHIP_I2S_TDM=m
CONFIG_SND_SOC_ROCKCHIP_MULTICODECS=m
CONFIG_SND_SOC_ROCKCHIP_PDM=m
CONFIG_SND_SOC_ROCKCHIP_SPDIF=m
CONFIG_SND_SOC_ROCKCHIP_SPDIFRX=m
CONFIG_SND_SOC_RT5640=m
CONFIG_SND_SOC_SPDIF=m
CONFIG_SPI_ROCKCHIP=m
@ -224,14 +282,61 @@ CONFIG_SW_SYNC=m
CONFIG_SYSCON_REBOOT_MODE=m
CONFIG_TEE=m
CONFIG_TEST_POWER=m
CONFIG_TOUCHSCREEN_ELAN5515=m
CONFIG_TOUCHSCREEN_GSL3673=m
CONFIG_TOUCHSCREEN_GSLX680_PAD=m
CONFIG_TOUCHSCREEN_GT1X=m
CONFIG_TYPEC_DP_ALTMODE=m
CONFIG_TYPEC_FUSB302=m
CONFIG_TYPEC_HUSB311=m
CONFIG_UCS12CM0=m
CONFIG_USB_DWC2=m
CONFIG_USB_NET_CDC_MBIM=m
CONFIG_USB_NET_DM9601=m
CONFIG_USB_NET_GL620A=m
CONFIG_USB_NET_KALMIA=m
CONFIG_USB_NET_MCS7830=m
CONFIG_USB_NET_PLUSB=m
CONFIG_USB_NET_SMSC75XX=m
CONFIG_USB_NET_SMSC95XX=m
CONFIG_USB_OHCI_HCD=m
# CONFIG_USB_OHCI_HCD_PCI is not set
CONFIG_USB_OHCI_HCD_PLATFORM=m
CONFIG_USB_PRINTER=m
CONFIG_USB_SERIAL=m
CONFIG_USB_SERIAL_GENERIC=y
CONFIG_USB_TRANCEVIBRATOR=m
CONFIG_VIDEO_AW36518=m
CONFIG_VIDEO_AW8601=m
CONFIG_VIDEO_CN3927V=m
CONFIG_VIDEO_DW9714=m
CONFIG_VIDEO_OV2680=m
CONFIG_VIDEO_FP5510=m
CONFIG_VIDEO_GC2145=m
CONFIG_VIDEO_GC2385=m
CONFIG_VIDEO_GC4C33=m
CONFIG_VIDEO_GC8034=m
CONFIG_VIDEO_IMX415=m
CONFIG_VIDEO_LT6911UXC=m
CONFIG_VIDEO_LT7911D=m
CONFIG_VIDEO_NVP6188=m
CONFIG_VIDEO_OV02B10=m
CONFIG_VIDEO_OV13850=m
CONFIG_VIDEO_OV13855=m
CONFIG_VIDEO_OV50C40=m
CONFIG_VIDEO_OV5695=m
CONFIG_ZISOFS=y
CONFIG_VIDEO_OV8858=m
CONFIG_VIDEO_RK628_BT1120=m
CONFIG_VIDEO_RK628_CSI=m
CONFIG_VIDEO_RK_IRCUT=m
CONFIG_VIDEO_ROCKCHIP_CIF=m
CONFIG_VIDEO_ROCKCHIP_ISP=m
CONFIG_VIDEO_ROCKCHIP_ISPP=m
CONFIG_VIDEO_S5K3L6XX=m
CONFIG_VIDEO_S5KJN1=m
CONFIG_VIDEO_SGM3784=m
CONFIG_VIDEO_THCV244=m
CONFIG_VL6180=m
CONFIG_WIFI_BUILD_MODULE=y
CONFIG_WL_ROCKCHIP=m
CONFIG_ZRAM=m
CONFIG_ZSMALLOC=m
# CONFIG_USB_DUMMY_HCD is not set

View File

@ -3,6 +3,6 @@
DEFCONFIG=rockchip_aarch64_gki_defconfig
KMI_SYMBOL_LIST=android/abi_gki_aarch64_rockchip
PRE_DEFCONFIG_CMDS="KCONFIG_CONFIG=${ROOT_DIR}/common/arch/arm64/configs/${DEFCONFIG} ${ROOT_DIR}/common/scripts/kconfig/merge_config.sh -m -r ${ROOT_DIR}/common/arch/arm64/configs/gki_defconfig ${ROOT_DIR}/common/arch/arm64/configs/rockchip_gki.config"
PRE_DEFCONFIG_CMDS="KCONFIG_CONFIG=${ROOT_DIR}/common/arch/arm64/configs/${DEFCONFIG} ${ROOT_DIR}/common/scripts/kconfig/merge_config.sh -m -r ${ROOT_DIR}/common/arch/arm64/configs/gki_defconfig ${ROOT_DIR}/common/arch/arm64/configs/rockchip_gki.fragment"
POST_DEFCONFIG_CMDS="rm ${ROOT_DIR}/common/arch/arm64/configs/${DEFCONFIG}"

View File

@ -2576,7 +2576,6 @@ static int binder_proc_transaction(struct binder_transaction *t,
struct binder_priority node_prio;
bool oneway = !!(t->flags & TF_ONE_WAY);
bool pending_async = false;
bool skip = false;
struct binder_transaction *t_outdated = NULL;
BUG_ON(!node);
@ -2605,10 +2604,7 @@ static int binder_proc_transaction(struct binder_transaction *t,
return proc->is_frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
}
trace_android_vh_binder_proc_transaction_entry(proc, t,
&thread, node->debug_id, pending_async, !oneway, &skip);
if (!thread && !pending_async && !skip)
if (!thread && !pending_async)
thread = binder_select_thread_ilocked(proc);
trace_android_vh_binder_proc_transaction(current, proc->tsk,
@ -4150,10 +4146,6 @@ static int binder_thread_read(struct binder_proc *proc,
size_t trsize = sizeof(*trd);
binder_inner_proc_lock(proc);
trace_android_vh_binder_select_worklist_ilocked(&list, thread,
proc, wait_for_proc_work);
if (list)
goto skip;
if (!binder_worklist_empty_ilocked(&thread->todo))
list = &thread->todo;
else if (!binder_worklist_empty_ilocked(&proc->todo) &&
@ -4167,7 +4159,7 @@ static int binder_thread_read(struct binder_proc *proc,
goto retry;
break;
}
skip:
if (end - ptr < sizeof(tr) + 4) {
binder_inner_proc_unlock(proc);
break;

View File

@ -106,6 +106,12 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_write_finished);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alter_rwsem_list_add);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alter_futex_plist_add);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_futex_sleep_start);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_do_futex);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_futex_wait_start);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_futex_wait_end);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_futex_wake_traverse_plist);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_futex_wake_this);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_futex_wake_up_q_finish);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_wait_start);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_wait_finish);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rtmutex_wait_start);
@ -243,7 +249,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_build_sched_domains);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alter_mutex_list_add);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_unlock_slowpath);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_unlock_slowpath_end);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_start_check_new_owner);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_wake_finish);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_undefinstr);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_ptrauth_fault);
@ -259,6 +264,10 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_commit_creds);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_exit_creds);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_override_creds);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_revert_creds);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_record_mutex_lock_starttime);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_record_rtmutex_lock_starttime);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_record_rwsem_lock_starttime);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_record_percpu_rwsem_lock_starttime);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_set_memory_x);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_set_memory_nx);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_set_memory_ro);
@ -290,10 +299,8 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_reply);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_trans);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_binder_transaction);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_preset);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_proc_transaction_entry);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_proc_transaction);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_proc_transaction_end);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_select_worklist_ilocked);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_new_ref);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_del_ref);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_post_init_entity_util_avg);
@ -304,6 +311,11 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_logbuf_pr_cont);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tune_scan_type);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tune_swappiness);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_shrink_slab_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_handle_failed_page_trylock);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_page_trylock_set);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_page_trylock_clear);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_page_trylock_get_result);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_do_page_trylock);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_page_referenced_check_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_drain_all_pages_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cma_drain_all_pages_bypass);
@ -394,6 +406,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_udp_recvmsg);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_tcp_recvmsg_stat);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_pci_d3_sleep);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_kmalloc_slab);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_disable_thermal_cooling_stats);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mmap_region);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_update_page_mapcount);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_add_page_to_lrulist);
@ -415,8 +428,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_has_work_ilocked);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_read_done);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_handle_tlb_conf);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_shrink_node_memcgs);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ra_tuning_max_page);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tune_memcg_scan_type);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_handle_pte_fault_end);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cow_user_page);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_swapin_add_anon_rmap);
@ -441,6 +452,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_si_swapinfo);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_si);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_pages);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_set_shmem_page_flag);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_pidfd_open);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mmput);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sched_pelt_multiplier);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_pages_reclaim_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_pages_failure_bypass);

View File

@ -1181,6 +1181,9 @@ int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
if (memblock_is_region_reserved(base, size))
return -EBUSY;
if (memblock_is_nomap_remove())
return memblock_remove(base, size);
return memblock_mark_nomap(base, size);
}
return memblock_reserve(base, size);

View File

@ -4379,7 +4379,7 @@ static int ufshcd_complete_dev_init(struct ufs_hba *hba)
QUERY_FLAG_IDN_FDEVICEINIT, 0, &flag_res);
if (!flag_res)
break;
usleep_range(5000, 10000);
usleep_range(500, 1000);
} while (ktime_before(ktime_get(), timeout));
if (err) {

View File

@ -18,6 +18,7 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/jiffies.h>
#include <trace/hooks/thermal.h>
#include "thermal_core.h"
@ -886,9 +887,22 @@ static struct attribute *cooling_device_stats_attrs[] = {
NULL
};
static umode_t cooling_device_stats_is_visible(struct kobject *kobj,
struct attribute *attr, int attrno)
{
struct thermal_cooling_device *cdev = to_cooling_device(
kobj_to_dev(kobj));
if (!cdev->stats)
return 0;
return attr->mode;
}
static const struct attribute_group cooling_device_stats_attr_group = {
.attrs = cooling_device_stats_attrs,
.name = "stats"
.name = "stats",
.is_visible = cooling_device_stats_is_visible,
};
static void cooling_device_stats_setup(struct thermal_cooling_device *cdev)
@ -896,6 +910,12 @@ static void cooling_device_stats_setup(struct thermal_cooling_device *cdev)
struct cooling_dev_stats *stats;
unsigned long states;
int var;
bool disable_cdev_stats = false;
trace_android_vh_disable_thermal_cooling_stats(cdev,
&disable_cdev_stats);
if (disable_cdev_stats)
return;
if (cdev->ops->get_max_state(cdev, &states))
return;

View File

@ -2472,9 +2472,6 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
is_on = !!is_on;
if (dwc->pullups_connected == is_on)
return 0;
vdwc->softconnect = is_on;
/*
@ -2512,6 +2509,11 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
return 0;
}
if (dwc->pullups_connected == is_on) {
pm_runtime_put(dwc->dev);
return 0;
}
/*
* Synchronize and disable any further event handling while controller
* is being enabled/disabled.

View File

@ -76,6 +76,10 @@ static int ucsi_read_error(struct ucsi *ucsi)
if (ret)
return ret;
ret = ucsi_acknowledge_command(ucsi);
if (ret)
return ret;
switch (error) {
case UCSI_ERROR_INCOMPATIBLE_PARTNER:
return -EOPNOTSUPP;

View File

@ -1863,15 +1863,27 @@ int f2fs_start_ckpt_thread(struct f2fs_sb_info *sbi)
void f2fs_stop_ckpt_thread(struct f2fs_sb_info *sbi)
{
struct ckpt_req_control *cprc = &sbi->cprc_info;
struct task_struct *ckpt_task;
if (cprc->f2fs_issue_ckpt) {
struct task_struct *ckpt_task = cprc->f2fs_issue_ckpt;
if (!cprc->f2fs_issue_ckpt)
return;
cprc->f2fs_issue_ckpt = NULL;
kthread_stop(ckpt_task);
ckpt_task = cprc->f2fs_issue_ckpt;
cprc->f2fs_issue_ckpt = NULL;
kthread_stop(ckpt_task);
flush_remained_ckpt_reqs(sbi, NULL);
}
f2fs_flush_ckpt_thread(sbi);
}
void f2fs_flush_ckpt_thread(struct f2fs_sb_info *sbi)
{
struct ckpt_req_control *cprc = &sbi->cprc_info;
flush_remained_ckpt_reqs(sbi, NULL);
/* Let's wait for the previous dispatched checkpoint. */
while (atomic_read(&cprc->queued_ckpt))
io_schedule_timeout(DEFAULT_IO_TIMEOUT);
}
void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi)

View File

@ -2479,6 +2479,9 @@ static inline bool check_inplace_update_policy(struct inode *inode,
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
unsigned int policy = SM_I(sbi)->ipu_policy;
if (policy & (0x1 << F2FS_IPU_HONOR_OPU_WRITE) &&
is_inode_flag_set(inode, FI_OPU_WRITE))
return false;
if (policy & (0x1 << F2FS_IPU_FORCE))
return true;
if (policy & (0x1 << F2FS_IPU_SSR) && f2fs_need_SSR(sbi))
@ -2549,6 +2552,9 @@ bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
return true;
if (is_inode_flag_set(inode, FI_OPU_WRITE))
return true;
if (fio) {
if (page_private_gcing(fio->page))
return true;
@ -2714,6 +2720,7 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
.submitted = false,
.compr_blocks = compr_blocks,
.need_lock = LOCK_RETRY,
.post_read = f2fs_post_read_required(inode),
.io_type = io_type,
.io_wbc = wbc,
.bio = bio,
@ -3172,8 +3179,8 @@ static int __f2fs_write_data_pages(struct address_space *mapping,
f2fs_available_free_memory(sbi, DIRTY_DENTS))
goto skip_write;
/* skip writing during file defragment */
if (is_inode_flag_set(inode, FI_DO_DEFRAG))
/* skip writing in file defragment preparing stage */
if (is_inode_flag_set(inode, FI_SKIP_WRITES))
goto skip_write;
trace_f2fs_writepages(mapping->host, wbc, DATA);
@ -3955,6 +3962,7 @@ static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
set_inode_flag(inode, FI_ALIGNED_WRITE);
set_inode_flag(inode, FI_OPU_WRITE);
for (; secidx < end_sec; secidx++) {
f2fs_down_write(&sbi->pin_sem);
@ -3963,7 +3971,7 @@ static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
f2fs_unlock_op(sbi);
set_inode_flag(inode, FI_DO_DEFRAG);
set_inode_flag(inode, FI_SKIP_WRITES);
for (blkofs = 0; blkofs < blk_per_sec; blkofs++) {
struct page *page;
@ -3980,7 +3988,7 @@ static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
f2fs_put_page(page, 1);
}
clear_inode_flag(inode, FI_DO_DEFRAG);
clear_inode_flag(inode, FI_SKIP_WRITES);
ret = filemap_fdatawrite(inode->i_mapping);
@ -3991,7 +3999,8 @@ static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
}
done:
clear_inode_flag(inode, FI_DO_DEFRAG);
clear_inode_flag(inode, FI_SKIP_WRITES);
clear_inode_flag(inode, FI_OPU_WRITE);
clear_inode_flag(inode, FI_ALIGNED_WRITE);
f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);

View File

@ -707,7 +707,8 @@ enum {
FI_DROP_CACHE, /* drop dirty page cache */
FI_DATA_EXIST, /* indicate data exists */
FI_INLINE_DOTS, /* indicate inline dot dentries */
FI_DO_DEFRAG, /* indicate defragment is running */
FI_SKIP_WRITES, /* should skip data page writeback */
FI_OPU_WRITE, /* used for opu per file */
FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */
FI_NO_PREALLOC, /* indicate skipped preallocated blocks */
FI_HOT_DATA, /* indicate file is hot */
@ -1161,6 +1162,7 @@ struct f2fs_io_info {
bool retry; /* need to reallocate block address */
int compr_blocks; /* # of compressed block addresses */
bool encrypted; /* indicate file is encrypted */
bool post_read; /* require post read */
enum iostat_type io_type; /* io type */
struct writeback_control *io_wbc; /* writeback control */
struct bio **bio; /* bio for ipu */
@ -1245,6 +1247,7 @@ enum {
SBI_QUOTA_SKIP_FLUSH, /* skip flushing quota in current CP */
SBI_QUOTA_NEED_REPAIR, /* quota file may be corrupted */
SBI_IS_RESIZEFS, /* resizefs is in process */
SBI_IS_FREEZING, /* freezefs is in process */
};
enum {
@ -3623,6 +3626,7 @@ unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
* checkpoint.c
*/
void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io);
void f2fs_flush_ckpt_thread(struct f2fs_sb_info *sbi);
struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index);

View File

@ -2592,10 +2592,6 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
bool fragmented = false;
int err;
/* if in-place-update policy is enabled, don't waste time here */
if (f2fs_should_update_inplace(inode, NULL))
return -EINVAL;
pg_start = range->start >> PAGE_SHIFT;
pg_end = (range->start + range->len) >> PAGE_SHIFT;
@ -2603,6 +2599,13 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
inode_lock(inode);
/* if in-place-update policy is enabled, don't waste time here */
set_inode_flag(inode, FI_OPU_WRITE);
if (f2fs_should_update_inplace(inode, NULL)) {
err = -EINVAL;
goto out;
}
/* writeback all dirty pages in the range */
err = filemap_write_and_wait_range(inode->i_mapping, range->start,
range->start + range->len - 1);
@ -2684,7 +2687,7 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
goto check;
}
set_inode_flag(inode, FI_DO_DEFRAG);
set_inode_flag(inode, FI_SKIP_WRITES);
idx = map.m_lblk;
while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
@ -2709,15 +2712,16 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
if (map.m_lblk < pg_end && cnt < blk_per_seg)
goto do_map;
clear_inode_flag(inode, FI_DO_DEFRAG);
clear_inode_flag(inode, FI_SKIP_WRITES);
err = filemap_fdatawrite(inode->i_mapping);
if (err)
goto out;
}
clear_out:
clear_inode_flag(inode, FI_DO_DEFRAG);
clear_inode_flag(inode, FI_SKIP_WRITES);
out:
clear_inode_flag(inode, FI_OPU_WRITE);
inode_unlock(inode);
if (!err)
range->len = (u64)total << PAGE_SHIFT;

View File

@ -764,7 +764,8 @@ void f2fs_evict_inode(struct inode *inode)
f2fs_remove_ino_entry(sbi, inode->i_ino, UPDATE_INO);
f2fs_remove_ino_entry(sbi, inode->i_ino, FLUSH_INO);
sb_start_intwrite(inode->i_sb);
if (!is_sbi_flag_set(sbi, SBI_IS_FREEZING))
sb_start_intwrite(inode->i_sb);
set_inode_flag(inode, FI_NO_ALLOC);
i_size_write(inode, 0);
retry:
@ -795,7 +796,8 @@ void f2fs_evict_inode(struct inode *inode)
if (dquot_initialize_needed(inode))
set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
}
sb_end_intwrite(inode->i_sb);
if (!is_sbi_flag_set(sbi, SBI_IS_FREEZING))
sb_end_intwrite(inode->i_sb);
no_delete:
dquot_drop(inode);

View File

@ -3568,6 +3568,10 @@ int f2fs_inplace_write_data(struct f2fs_io_info *fio)
goto drop_bio;
}
if (fio->post_read)
invalidate_mapping_pages(META_MAPPING(sbi),
fio->new_blkaddr, fio->new_blkaddr);
stat_inc_inplace_blocks(fio->sbi);
if (fio->bio && !(SM_I(sbi)->ipu_policy & (1 << F2FS_IPU_NOCACHE)))
@ -3747,10 +3751,16 @@ void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr)
void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
block_t len)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
block_t i;
if (!f2fs_post_read_required(inode))
return;
for (i = 0; i < len; i++)
f2fs_wait_on_block_writeback(inode, blkaddr + i);
invalidate_mapping_pages(META_MAPPING(sbi), blkaddr, blkaddr + len - 1);
}
static int read_compacted_summaries(struct f2fs_sb_info *sbi)

View File

@ -650,7 +650,9 @@ static inline int utilization(struct f2fs_sb_info *sbi)
* pages over min_fsync_blocks. (=default option)
* F2FS_IPU_ASYNC - do IPU given by asynchronous write requests.
* F2FS_IPU_NOCACHE - disable IPU bio cache.
* F2FS_IPUT_DISABLE - disable IPU. (=default option in LFS mode)
* F2FS_IPU_HONOR_OPU_WRITE - use OPU write prior to IPU write if inode has
* FI_OPU_WRITE flag.
* F2FS_IPU_DISABLE - disable IPU. (=default option in LFS mode)
*/
#define DEF_MIN_IPU_UTIL 70
#define DEF_MIN_FSYNC_BLOCKS 8
@ -666,6 +668,7 @@ enum {
F2FS_IPU_FSYNC,
F2FS_IPU_ASYNC,
F2FS_IPU_NOCACHE,
F2FS_IPU_HONOR_OPU_WRITE,
};
static inline unsigned int curseg_segno(struct f2fs_sb_info *sbi,

View File

@ -299,10 +299,10 @@ static void f2fs_destroy_casefold_cache(void) { }
static inline void limit_reserve_root(struct f2fs_sb_info *sbi)
{
block_t limit = min((sbi->user_block_count << 1) / 1000,
block_t limit = min((sbi->user_block_count >> 3),
sbi->user_block_count - sbi->reserved_blocks);
/* limit is 0.2% */
/* limit is 12.5% */
if (test_opt(sbi, RESERVE_ROOT) &&
F2FS_OPTION(sbi).root_reserved_blocks > limit) {
F2FS_OPTION(sbi).root_reserved_blocks = limit;
@ -1555,14 +1555,17 @@ static int f2fs_freeze(struct super_block *sb)
if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY))
return -EINVAL;
/* ensure no checkpoint required */
if (!llist_empty(&F2FS_SB(sb)->cprc_info.issue_list))
return -EINVAL;
/* Let's flush checkpoints and stop the thread. */
f2fs_flush_ckpt_thread(F2FS_SB(sb));
/* to avoid deadlock on f2fs_evict_inode->SB_FREEZE_FS */
set_sbi_flag(F2FS_SB(sb), SBI_IS_FREEZING);
return 0;
}
static int f2fs_unfreeze(struct super_block *sb)
{
clear_sbi_flag(F2FS_SB(sb), SBI_IS_FREEZING);
return 0;
}
@ -2025,6 +2028,9 @@ static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
f2fs_up_write(&sbi->gc_lock);
f2fs_sync_fs(sbi->sb, 1);
/* Let's ensure there's no pending checkpoint anymore */
f2fs_flush_ckpt_thread(sbi);
}
static int f2fs_remount(struct super_block *sb, int *flags, char *data)
@ -2184,6 +2190,9 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
f2fs_stop_ckpt_thread(sbi);
need_restart_ckpt = true;
} else {
/* Flush if the prevous checkpoint, if exists. */
f2fs_flush_ckpt_thread(sbi);
err = f2fs_start_ckpt_thread(sbi);
if (err) {
f2fs_err(sbi,
@ -3771,7 +3780,8 @@ static void f2fs_tuning_parameters(struct f2fs_sb_info *sbi)
if (sm_i->main_segments <= SMALL_VOLUME_SEGMENTS) {
F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
sm_i->dcc_info->discard_granularity = 1;
sm_i->ipu_policy = 1 << F2FS_IPU_FORCE;
sm_i->ipu_policy = 1 << F2FS_IPU_FORCE |
1 << F2FS_IPU_HONOR_OPU_WRITE;
}
sbi->readdir_ra = 1;

View File

@ -487,6 +487,7 @@ bool memblock_is_map_memory(phys_addr_t addr);
bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
bool memblock_is_reserved(phys_addr_t addr);
bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
bool memblock_is_nomap_remove(void);
void memblock_dump_all(void);

View File

@ -56,8 +56,9 @@ static inline void page_ext_init(void)
{
}
#endif
struct page_ext *lookup_page_ext(const struct page *page);
extern struct page_ext *page_ext_get(struct page *page);
extern void page_ext_put(struct page_ext *page_ext);
static inline struct page_ext *page_ext_next(struct page_ext *curr)
{
@ -73,11 +74,6 @@ static inline void pgdat_page_ext_init(struct pglist_data *pgdat)
{
}
static inline struct page_ext *lookup_page_ext(const struct page *page)
{
return NULL;
}
static inline void page_ext_init(void)
{
}
@ -85,5 +81,14 @@ static inline void page_ext_init(void)
static inline void page_ext_init_flatmem(void)
{
}
static inline struct page_ext *page_ext_get(struct page *page)
{
return NULL;
}
static inline void page_ext_put(struct page_ext *page_ext)
{
}
#endif /* CONFIG_PAGE_EXTENSION */
#endif /* __LINUX_PAGE_EXT_H */

View File

@ -47,62 +47,77 @@ extern struct page_ext_operations page_idle_ops;
static inline bool page_is_young(struct page *page)
{
struct page_ext *page_ext = lookup_page_ext(page);
struct page_ext *page_ext = page_ext_get(page);
bool page_young;
if (unlikely(!page_ext))
return false;
return test_bit(PAGE_EXT_YOUNG, &page_ext->flags);
page_young = test_bit(PAGE_EXT_YOUNG, &page_ext->flags);
page_ext_put(page_ext);
return page_young;
}
static inline void set_page_young(struct page *page)
{
struct page_ext *page_ext = lookup_page_ext(page);
struct page_ext *page_ext = page_ext_get(page);
if (unlikely(!page_ext))
return;
set_bit(PAGE_EXT_YOUNG, &page_ext->flags);
page_ext_put(page_ext);
}
static inline bool test_and_clear_page_young(struct page *page)
{
struct page_ext *page_ext = lookup_page_ext(page);
struct page_ext *page_ext = page_ext_get(page);
bool page_young;
if (unlikely(!page_ext))
return false;
return test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags);
page_young = test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags);
page_ext_put(page_ext);
return page_young;
}
static inline bool page_is_idle(struct page *page)
{
struct page_ext *page_ext = lookup_page_ext(page);
struct page_ext *page_ext = page_ext_get(page);
bool page_idle;
if (unlikely(!page_ext))
return false;
return test_bit(PAGE_EXT_IDLE, &page_ext->flags);
page_idle = test_bit(PAGE_EXT_IDLE, &page_ext->flags);
page_ext_put(page_ext);
return page_idle;
}
static inline void set_page_idle(struct page *page)
{
struct page_ext *page_ext = lookup_page_ext(page);
struct page_ext *page_ext = page_ext_get(page);
if (unlikely(!page_ext))
return;
set_bit(PAGE_EXT_IDLE, &page_ext->flags);
page_ext_put(page_ext);
}
static inline void clear_page_idle(struct page *page)
{
struct page_ext *page_ext = lookup_page_ext(page);
struct page_ext *page_ext = page_ext_get(page);
if (unlikely(!page_ext))
return;
clear_bit(PAGE_EXT_IDLE, &page_ext->flags);
page_ext_put(page_ext);
}
#endif /* CONFIG_64BIT */

View File

@ -9,6 +9,9 @@
#include <linux/rcu_sync.h>
#include <linux/lockdep.h>
void _trace_android_vh_record_percpu_rwsem_lock_starttime(
struct task_struct *tsk, unsigned long settime);
struct percpu_rw_semaphore {
struct rcu_sync rss;
unsigned int __percpu *read_count;
@ -73,6 +76,7 @@ static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
* bleeding the critical section out.
*/
preempt_enable();
_trace_android_vh_record_percpu_rwsem_lock_starttime(current, jiffies);
}
static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
@ -93,14 +97,17 @@ static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
* bleeding the critical section out.
*/
if (ret)
if (ret) {
_trace_android_vh_record_percpu_rwsem_lock_starttime(current, jiffies);
rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
}
return ret;
}
static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
{
_trace_android_vh_record_percpu_rwsem_lock_starttime(current, 0);
rwsem_release(&sem->dep_map, _RET_IP_);
preempt_disable();

View File

@ -75,11 +75,6 @@ DECLARE_RESTRICTED_HOOK(android_rvh_binder_transaction,
DECLARE_HOOK(android_vh_binder_preset,
TP_PROTO(struct hlist_head *hhead, struct mutex *lock),
TP_ARGS(hhead, lock));
DECLARE_HOOK(android_vh_binder_proc_transaction_entry,
TP_PROTO(struct binder_proc *proc, struct binder_transaction *t,
struct binder_thread **thread, int node_debug_id, bool pending_async,
bool sync, bool *skip),
TP_ARGS(proc, t, thread, node_debug_id, pending_async, sync, skip));
DECLARE_HOOK(android_vh_binder_proc_transaction,
TP_PROTO(struct task_struct *caller_task, struct task_struct *binder_proc_task,
struct task_struct *binder_th_task, int node_debug_id,
@ -90,10 +85,6 @@ DECLARE_HOOK(android_vh_binder_proc_transaction_end,
struct task_struct *binder_th_task, unsigned int code,
bool pending_async, bool sync),
TP_ARGS(caller_task, binder_proc_task, binder_th_task, code, pending_async, sync));
DECLARE_HOOK(android_vh_binder_select_worklist_ilocked,
TP_PROTO(struct list_head **list, struct binder_thread *thread, struct binder_proc *proc,
int wait_for_proc_work),
TP_ARGS(list, thread, proc, wait_for_proc_work));
DECLARE_HOOK(android_vh_binder_new_ref,
TP_PROTO(struct task_struct *proc, uint32_t ref_desc, int node_debug_id),
TP_ARGS(proc, ref_desc, node_debug_id));

View File

@ -68,9 +68,18 @@ DECLARE_HOOK(android_vh_mutex_unlock_slowpath,
DECLARE_HOOK(android_vh_mutex_unlock_slowpath_end,
TP_PROTO(struct mutex *lock, struct task_struct *next),
TP_ARGS(lock, next));
DECLARE_HOOK(android_vh_mutex_start_check_new_owner,
TP_PROTO(struct mutex *lock),
TP_ARGS(lock));
DECLARE_HOOK(android_vh_record_mutex_lock_starttime,
TP_PROTO(struct task_struct *tsk, unsigned long settime_jiffies),
TP_ARGS(tsk, settime_jiffies));
DECLARE_HOOK(android_vh_record_rtmutex_lock_starttime,
TP_PROTO(struct task_struct *tsk, unsigned long settime_jiffies),
TP_ARGS(tsk, settime_jiffies));
DECLARE_HOOK(android_vh_record_rwsem_lock_starttime,
TP_PROTO(struct task_struct *tsk, unsigned long settime_jiffies),
TP_ARGS(tsk, settime_jiffies));
DECLARE_HOOK(android_vh_record_percpu_rwsem_lock_starttime,
TP_PROTO(struct task_struct *tsk, unsigned long settime_jiffies),
TP_ARGS(tsk, settime_jiffies));
/* macro versions of hooks are no longer required */

View File

@ -8,6 +8,10 @@
#include <linux/tracepoint.h>
#include <trace/hooks/vendor_hooks.h>
#include <linux/plist.h>
#ifndef __GENKSYMS__
#include <linux/futex.h>
#endif
/*
* Following tracepoints are not exported in tracefs and provide a
* mechanism for vendor modules to hook and extend functionality
@ -22,6 +26,36 @@ DECLARE_HOOK(android_vh_futex_sleep_start,
TP_PROTO(struct task_struct *p),
TP_ARGS(p));
DECLARE_HOOK(android_vh_do_futex,
TP_PROTO(int cmd,
unsigned int *flags,
u32 __user *uaddr2),
TP_ARGS(cmd, flags, uaddr2));
DECLARE_HOOK(android_vh_futex_wait_start,
TP_PROTO(unsigned int flags,
u32 bitset),
TP_ARGS(flags, bitset));
DECLARE_HOOK(android_vh_futex_wait_end,
TP_PROTO(unsigned int flags,
u32 bitset),
TP_ARGS(flags, bitset));
DECLARE_HOOK(android_vh_futex_wake_traverse_plist,
TP_PROTO(struct plist_head *chain, int *target_nr,
union futex_key key, u32 bitset),
TP_ARGS(chain, target_nr, key, bitset));
DECLARE_HOOK(android_vh_futex_wake_this,
TP_PROTO(int ret, int nr_wake, int target_nr,
struct task_struct *p),
TP_ARGS(ret, nr_wake, target_nr, p));
DECLARE_HOOK(android_vh_futex_wake_up_q_finish,
TP_PROTO(int nr_wake, int target_nr),
TP_ARGS(nr_wake, target_nr));
/* macro versions of hooks are no longer required */
#endif /* _TRACE_HOOK_FUTEX_H */

View File

@ -22,6 +22,7 @@
#include <linux/oom.h>
#include <linux/tracepoint.h>
#include <trace/hooks/vendor_hooks.h>
#include <linux/rwsem.h>
#ifdef __GENKSYMS__
struct slabinfo;
@ -148,6 +149,10 @@ DECLARE_HOOK(android_vh_mmap_region,
DECLARE_HOOK(android_vh_try_to_unmap_one,
TP_PROTO(struct vm_area_struct *vma, struct page *page, unsigned long addr, bool ret),
TP_ARGS(vma, page, addr, ret));
DECLARE_HOOK(android_vh_do_page_trylock,
TP_PROTO(struct page *page, struct rw_semaphore *sem,
bool *got_lock, bool *success),
TP_ARGS(page, sem, got_lock, success));
DECLARE_HOOK(android_vh_drain_all_pages_bypass,
TP_PROTO(gfp_t gfp_mask, unsigned int order, unsigned long alloc_flags,
int migratetype, unsigned long did_some_progress,
@ -184,9 +189,6 @@ DECLARE_HOOK(android_vh_pcplist_add_cma_pages_bypass,
DECLARE_HOOK(android_vh_subpage_dma_contig_alloc,
TP_PROTO(bool *allow_subpage_alloc, struct device *dev, size_t *size),
TP_ARGS(allow_subpage_alloc, dev, size));
DECLARE_HOOK(android_vh_ra_tuning_max_page,
TP_PROTO(struct readahead_control *ractl, unsigned long *max_page),
TP_ARGS(ractl, max_page));
DECLARE_HOOK(android_vh_handle_pte_fault_end,
TP_PROTO(struct vm_fault *vmf, unsigned long highest_memmap_pfn),
TP_ARGS(vmf, highest_memmap_pfn));
@ -258,6 +260,14 @@ DECLARE_HOOK(android_vh_set_shmem_page_flag,
DECLARE_HOOK(android_vh_remove_vmalloc_stack,
TP_PROTO(struct vm_struct *vm),
TP_ARGS(vm));
DECLARE_HOOK(android_vh_alloc_pages_reclaim_bypass,
TP_PROTO(gfp_t gfp_mask, int order, int alloc_flags,
int migratetype, struct page **page),
TP_ARGS(gfp_mask, order, alloc_flags, migratetype, page));
DECLARE_HOOK(android_vh_alloc_pages_failure_bypass,
TP_PROTO(gfp_t gfp_mask, int order, int alloc_flags,
int migratetype, struct page **page),
TP_ARGS(gfp_mask, order, alloc_flags, migratetype, page));
/* macro versions of hooks are no longer required */
#endif /* _TRACE_HOOK_MM_H */

View File

@ -391,14 +391,6 @@ DECLARE_HOOK(android_vh_setscheduler_uclamp,
TP_PROTO(struct task_struct *tsk, int clamp_id, unsigned int value),
TP_ARGS(tsk, clamp_id, value));
DECLARE_HOOK(android_vh_pidfd_open,
TP_PROTO(struct pid *p),
TP_ARGS(p));
DECLARE_HOOK(android_vh_mmput,
TP_PROTO(void *unused),
TP_ARGS(unused));
DECLARE_HOOK(android_vh_sched_pelt_multiplier,
TP_PROTO(unsigned int old, unsigned int cur, int *ret),
TP_ARGS(old, cur, ret));

View File

@ -24,6 +24,11 @@ DECLARE_HOOK(android_vh_thermal_pm_notify_suspend,
TP_PROTO(struct thermal_zone_device *tz, int *irq_wakeable),
TP_ARGS(tz, irq_wakeable));
struct thermal_cooling_device;
DECLARE_HOOK(android_vh_disable_thermal_cooling_stats,
TP_PROTO(struct thermal_cooling_device *cdev, bool *disable_stats),
TP_ARGS(cdev, disable_stats));
#endif /* _TRACE_HOOK_THERMAL_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@ -28,12 +28,21 @@ DECLARE_RESTRICTED_HOOK(android_rvh_set_balance_anon_file_reclaim,
DECLARE_HOOK(android_vh_page_referenced_check_bypass,
TP_PROTO(struct page *page, unsigned long nr_to_scan, int lru, bool *bypass),
TP_ARGS(page, nr_to_scan, lru, bypass));
DECLARE_HOOK(android_vh_page_trylock_get_result,
TP_PROTO(struct page *page, bool *trylock_fail),
TP_ARGS(page, trylock_fail));
DECLARE_HOOK(android_vh_handle_failed_page_trylock,
TP_PROTO(struct list_head *page_list),
TP_ARGS(page_list));
DECLARE_HOOK(android_vh_page_trylock_set,
TP_PROTO(struct page *page),
TP_ARGS(page));
DECLARE_HOOK(android_vh_page_trylock_clear,
TP_PROTO(struct page *page),
TP_ARGS(page));
DECLARE_HOOK(android_vh_shrink_node_memcgs,
TP_PROTO(struct mem_cgroup *memcg, bool *skip),
TP_ARGS(memcg, skip));
DECLARE_HOOK(android_vh_tune_memcg_scan_type,
TP_PROTO(struct mem_cgroup *memcg, char *scan_type),
TP_ARGS(memcg, scan_type));
DECLARE_HOOK(android_vh_inactive_is_low,
TP_PROTO(unsigned long gb, unsigned long *inactive_ratio,
enum lru_list inactive_lru, bool *skip),

View File

@ -2328,6 +2328,47 @@ int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
}
EXPORT_SYMBOL_GPL(task_cgroup_path);
/**
* cgroup_attach_lock - Lock for ->attach()
* @lock_threadgroup: whether to down_write cgroup_threadgroup_rwsem
*
* cgroup migration sometimes needs to stabilize threadgroups against forks and
* exits by write-locking cgroup_threadgroup_rwsem. However, some ->attach()
* implementations (e.g. cpuset), also need to disable CPU hotplug.
* Unfortunately, letting ->attach() operations acquire cpus_read_lock() can
* lead to deadlocks.
*
* Bringing up a CPU may involve creating new tasks which requires read-locking
* threadgroup_rwsem, so threadgroup_rwsem nests inside cpus_read_lock(). If we
* call an ->attach() which acquires the cpus lock while write-locking
* threadgroup_rwsem, the locking order is reversed and we end up waiting for an
* on-going CPU hotplug operation which in turn is waiting for the
* threadgroup_rwsem to be released to create new tasks. For more details:
*
* http://lkml.kernel.org/r/20220711174629.uehfmqegcwn2lqzu@wubuntu
*
* Resolve the situation by always acquiring cpus_read_lock() before optionally
* write-locking cgroup_threadgroup_rwsem. This allows ->attach() to assume that
* CPU hotplug is disabled on entry.
*/
static void cgroup_attach_lock(bool lock_threadgroup)
{
cpus_read_lock();
if (lock_threadgroup)
percpu_down_write(&cgroup_threadgroup_rwsem);
}
/**
* cgroup_attach_unlock - Undo cgroup_attach_lock()
* @lock_threadgroup: whether to up_write cgroup_threadgroup_rwsem
*/
static void cgroup_attach_unlock(bool lock_threadgroup)
{
if (lock_threadgroup)
percpu_up_write(&cgroup_threadgroup_rwsem);
cpus_read_unlock();
}
/**
* cgroup_migrate_add_task - add a migration target task to a migration context
* @task: target task
@ -2812,9 +2853,8 @@ int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader,
}
struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup,
bool *locked,
bool *threadgroup_locked,
struct cgroup *dst_cgrp)
__acquires(&cgroup_threadgroup_rwsem)
{
struct task_struct *tsk;
pid_t pid;
@ -2832,12 +2872,8 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup,
* Therefore, we can skip the global lock.
*/
lockdep_assert_held(&cgroup_mutex);
if (pid || threadgroup) {
percpu_down_write(&cgroup_threadgroup_rwsem);
*locked = true;
} else {
*locked = false;
}
*threadgroup_locked = pid || threadgroup;
cgroup_attach_lock(*threadgroup_locked);
rcu_read_lock();
if (pid) {
@ -2871,17 +2907,14 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup,
goto out_unlock_rcu;
out_unlock_threadgroup:
if (*locked) {
percpu_up_write(&cgroup_threadgroup_rwsem);
*locked = false;
}
cgroup_attach_unlock(*threadgroup_locked);
*threadgroup_locked = false;
out_unlock_rcu:
rcu_read_unlock();
return tsk;
}
void cgroup_procs_write_finish(struct task_struct *task, bool locked)
__releases(&cgroup_threadgroup_rwsem)
void cgroup_procs_write_finish(struct task_struct *task, bool threadgroup_locked)
{
struct cgroup_subsys *ss;
int ssid;
@ -2889,8 +2922,8 @@ void cgroup_procs_write_finish(struct task_struct *task, bool locked)
/* release reference from cgroup_procs_write_start() */
put_task_struct(task);
if (locked)
percpu_up_write(&cgroup_threadgroup_rwsem);
cgroup_attach_unlock(threadgroup_locked);
for_each_subsys(ss, ssid)
if (ss->post_attach)
ss->post_attach();
@ -2945,12 +2978,11 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
struct cgroup_subsys_state *d_css;
struct cgroup *dsct;
struct ext_css_set *ext_src_set;
bool has_tasks;
int ret;
lockdep_assert_held(&cgroup_mutex);
percpu_down_write(&cgroup_threadgroup_rwsem);
/* look up all csses currently attached to @cgrp's subtree */
spin_lock_irq(&css_set_lock);
cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
@ -2961,6 +2993,15 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
}
spin_unlock_irq(&css_set_lock);
/*
* We need to write-lock threadgroup_rwsem while migrating tasks.
* However, if there are no source csets for @cgrp, changing its
* controllers isn't gonna produce any task migrations and the
* write-locking can be skipped safely.
*/
has_tasks = !list_empty(&mgctx.preloaded_src_csets);
cgroup_attach_lock(has_tasks);
/* NULL dst indicates self on default hierarchy */
ret = cgroup_migrate_prepare_dst(&mgctx);
if (ret)
@ -2980,7 +3021,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
ret = cgroup_migrate_execute(&mgctx);
out_finish:
cgroup_migrate_finish(&mgctx);
percpu_up_write(&cgroup_threadgroup_rwsem);
cgroup_attach_unlock(has_tasks);
return ret;
}
@ -4855,13 +4896,13 @@ static ssize_t cgroup_procs_write(struct kernfs_open_file *of,
struct task_struct *task;
const struct cred *saved_cred;
ssize_t ret;
bool locked;
bool threadgroup_locked;
dst_cgrp = cgroup_kn_lock_live(of->kn, false);
if (!dst_cgrp)
return -ENODEV;
task = cgroup_procs_write_start(buf, true, &locked, dst_cgrp);
task = cgroup_procs_write_start(buf, true, &threadgroup_locked, dst_cgrp);
ret = PTR_ERR_OR_ZERO(task);
if (ret)
goto out_unlock;
@ -4887,7 +4928,7 @@ static ssize_t cgroup_procs_write(struct kernfs_open_file *of,
ret = cgroup_attach_task(dst_cgrp, task, true);
out_finish:
cgroup_procs_write_finish(task, locked);
cgroup_procs_write_finish(task, threadgroup_locked);
out_unlock:
cgroup_kn_unlock(of->kn);
@ -4907,7 +4948,7 @@ static ssize_t cgroup_threads_write(struct kernfs_open_file *of,
struct task_struct *task;
const struct cred *saved_cred;
ssize_t ret;
bool locked;
bool threadgroup_locked;
buf = strstrip(buf);
@ -4915,7 +4956,7 @@ static ssize_t cgroup_threads_write(struct kernfs_open_file *of,
if (!dst_cgrp)
return -ENODEV;
task = cgroup_procs_write_start(buf, false, &locked, dst_cgrp);
task = cgroup_procs_write_start(buf, false, &threadgroup_locked, dst_cgrp);
ret = PTR_ERR_OR_ZERO(task);
if (ret)
goto out_unlock;
@ -4941,7 +4982,7 @@ static ssize_t cgroup_threads_write(struct kernfs_open_file *of,
ret = cgroup_attach_task(dst_cgrp, task, false);
out_finish:
cgroup_procs_write_finish(task, locked);
cgroup_procs_write_finish(task, threadgroup_locked);
out_unlock:
cgroup_kn_unlock(of->kn);

View File

@ -2238,7 +2238,7 @@ static void cpuset_attach(struct cgroup_taskset *tset)
cgroup_taskset_first(tset, &css);
cs = css_cs(css);
cpus_read_lock();
lockdep_assert_cpus_held(); /* see cgroup_attach_lock() */
mutex_lock(&cpuset_mutex);
guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
@ -2292,7 +2292,6 @@ static void cpuset_attach(struct cgroup_taskset *tset)
wake_up(&cpuset_attach_wq);
mutex_unlock(&cpuset_mutex);
cpus_read_unlock();
}
/* The various types of files and directories in a cpuset file system */

View File

@ -1150,10 +1150,8 @@ void mmput(struct mm_struct *mm)
{
might_sleep();
if (atomic_dec_and_test(&mm->mm_users)) {
trace_android_vh_mmput(NULL);
if (atomic_dec_and_test(&mm->mm_users))
__mmput(mm);
}
}
EXPORT_SYMBOL_GPL(mmput);

View File

@ -1594,6 +1594,7 @@ futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
struct futex_q *this, *next;
union futex_key key = FUTEX_KEY_INIT;
int ret;
int target_nr;
DEFINE_WAKE_Q(wake_q);
if (!bitset)
@ -1611,6 +1612,7 @@ futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
spin_lock(&hb->lock);
trace_android_vh_futex_wake_traverse_plist(&hb->chain, &target_nr, key, bitset);
plist_for_each_entry_safe(this, next, &hb->chain, list) {
if (match_futex (&this->key, &key)) {
if (this->pi_state || this->rt_waiter) {
@ -1622,6 +1624,7 @@ futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
if (!(this->bitset & bitset))
continue;
trace_android_vh_futex_wake_this(ret, nr_wake, target_nr, this->task);
mark_wake_futex(&wake_q, this);
if (++ret >= nr_wake)
break;
@ -1630,6 +1633,7 @@ futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
spin_unlock(&hb->lock);
wake_up_q(&wake_q);
trace_android_vh_futex_wake_up_q_finish(nr_wake, target_nr);
return ret;
}
@ -2699,6 +2703,7 @@ static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
if (!bitset)
return -EINVAL;
q.bitset = bitset;
trace_android_vh_futex_wait_start(flags, bitset);
to = futex_setup_timer(abs_time, &timeout, flags,
current->timer_slack_ns);
@ -2748,6 +2753,7 @@ static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
hrtimer_cancel(&to->timer);
destroy_hrtimer_on_stack(&to->timer);
}
trace_android_vh_futex_wait_end(flags, bitset);
return ret;
}
@ -3733,6 +3739,7 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
return -ENOSYS;
}
trace_android_vh_do_futex(cmd, &flags, uaddr2);
switch (cmd) {
case FUTEX_WAIT:
val3 = FUTEX_BITSET_MATCH_ANY;

View File

@ -170,8 +170,10 @@ static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
unsigned long curr = (unsigned long)current;
unsigned long zero = 0UL;
if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr)) {
trace_android_vh_record_mutex_lock_starttime(current, jiffies);
return true;
}
return false;
}
@ -748,6 +750,7 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
*/
void __sched mutex_unlock(struct mutex *lock)
{
trace_android_vh_record_mutex_lock_starttime(current, 0);
#ifndef CONFIG_DEBUG_LOCK_ALLOC
if (__mutex_unlock_fast(lock))
return;
@ -978,6 +981,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
lock_acquired(&lock->dep_map, ip);
if (ww_ctx)
ww_mutex_set_context_fastpath(ww, ww_ctx);
trace_android_vh_record_mutex_lock_starttime(current, jiffies);
preempt_enable();
return 0;
}
@ -1049,7 +1053,6 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
goto err;
}
trace_android_vh_mutex_start_check_new_owner(lock);
spin_unlock(&lock->wait_lock);
schedule_preempt_disabled();
@ -1097,6 +1100,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
spin_unlock(&lock->wait_lock);
preempt_enable();
trace_android_vh_record_mutex_lock_starttime(current, jiffies);
return 0;
err:
@ -1433,8 +1437,10 @@ int __sched mutex_trylock(struct mutex *lock)
#endif
locked = __mutex_trylock(lock);
if (locked)
if (locked) {
trace_android_vh_record_mutex_lock_starttime(current, jiffies);
mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
}
return locked;
}

View File

@ -10,6 +10,21 @@
#include <linux/slab.h>
#include <linux/errno.h>
#include <trace/hooks/dtask.h>
/*
* trace_android_vh_record_percpu_rwsem_lock_starttime is called in
* include/linux/percpu-rwsem.h by including include/hooks/dtask.h, which
* will result to build-err. So we create
* func:_trace_android_vh_record_percpu_rwsem_lock_starttime for percpu-rwsem.h to call.
*/
void _trace_android_vh_record_percpu_rwsem_lock_starttime(struct task_struct *tsk,
unsigned long settime)
{
trace_android_vh_record_percpu_rwsem_lock_starttime(tsk, settime);
}
EXPORT_SYMBOL_GPL(_trace_android_vh_record_percpu_rwsem_lock_starttime);
int __percpu_init_rwsem(struct percpu_rw_semaphore *sem,
const char *name, struct lock_class_key *key)
{
@ -237,11 +252,13 @@ void percpu_down_write(struct percpu_rw_semaphore *sem)
/* Wait for all active readers to complete. */
rcuwait_wait_event(&sem->writer, readers_active_check(sem), TASK_UNINTERRUPTIBLE);
trace_android_vh_record_percpu_rwsem_lock_starttime(current, jiffies);
}
EXPORT_SYMBOL_GPL(percpu_down_write);
void percpu_up_write(struct percpu_rw_semaphore *sem)
{
trace_android_vh_record_percpu_rwsem_lock_starttime(current, 0);
rwsem_release(&sem->dep_map, _RET_IP_);
/*

View File

@ -1471,6 +1471,7 @@ static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass)
mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
trace_android_vh_record_rtmutex_lock_starttime(current, jiffies);
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@ -1519,6 +1520,8 @@ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
ret = rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
if (ret)
mutex_release(&lock->dep_map, _RET_IP_);
else
trace_android_vh_record_rtmutex_lock_starttime(current, jiffies);
return ret;
}
@ -1563,6 +1566,8 @@ rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
rt_mutex_slowlock);
if (ret)
mutex_release(&lock->dep_map, _RET_IP_);
else
trace_android_vh_record_rtmutex_lock_starttime(current, jiffies);
return ret;
}
@ -1589,6 +1594,8 @@ int __sched rt_mutex_trylock(struct rt_mutex *lock)
ret = rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
if (ret)
mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
else
trace_android_vh_record_rtmutex_lock_starttime(current, jiffies);
return ret;
}
@ -1603,6 +1610,7 @@ void __sched rt_mutex_unlock(struct rt_mutex *lock)
{
mutex_release(&lock->dep_map, _RET_IP_);
rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
trace_android_vh_record_rtmutex_lock_starttime(current, 0);
}
EXPORT_SYMBOL_GPL(rt_mutex_unlock);

View File

@ -279,6 +279,10 @@ static inline bool rwsem_read_trylock(struct rw_semaphore *sem)
long cnt = atomic_long_add_return_acquire(RWSEM_READER_BIAS, &sem->count);
if (WARN_ON_ONCE(cnt < 0))
rwsem_set_nonspinnable(sem);
if ((cnt & RWSEM_READ_FAILED_MASK) == 0)
trace_android_vh_record_rwsem_lock_starttime(current, jiffies);
return !(cnt & RWSEM_READ_FAILED_MASK);
}
@ -1021,9 +1025,11 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
raw_spin_unlock_irq(&sem->wait_lock);
wake_up_q(&wake_q);
}
trace_android_vh_record_rwsem_lock_starttime(current, jiffies);
return sem;
} else if (rwsem_reader_phase_trylock(sem, waiter.last_rowner)) {
/* rwsem_reader_phase_trylock() implies ACQUIRE on success */
trace_android_vh_record_rwsem_lock_starttime(current, jiffies);
return sem;
}
@ -1104,6 +1110,7 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
__set_current_state(TASK_RUNNING);
trace_android_vh_rwsem_read_wait_finish(sem);
lockevent_inc(rwsem_rlock);
trace_android_vh_record_rwsem_lock_starttime(current, jiffies);
return sem;
out_nolock:
@ -1150,6 +1157,7 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
if (rwsem_can_spin_on_owner(sem, RWSEM_WR_NONSPINNABLE) &&
rwsem_optimistic_spin(sem, true)) {
/* rwsem_optimistic_spin() implies ACQUIRE on success */
trace_android_vh_record_rwsem_lock_starttime(current, jiffies);
return sem;
}
@ -1280,7 +1288,7 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
rwsem_disable_reader_optspin(sem, disable_rspin);
raw_spin_unlock_irq(&sem->wait_lock);
lockevent_inc(rwsem_wlock);
trace_android_vh_record_rwsem_lock_starttime(current, jiffies);
return ret;
out_nolock:
@ -1396,6 +1404,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
tmp + RWSEM_READER_BIAS)) {
rwsem_set_reader_owned(sem);
trace_android_vh_record_rwsem_lock_starttime(current, jiffies);
return 1;
}
} while (!(tmp & RWSEM_READ_FAILED_MASK));
@ -1410,10 +1419,12 @@ static inline void __down_write(struct rw_semaphore *sem)
long tmp = RWSEM_UNLOCKED_VALUE;
if (unlikely(!atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
RWSEM_WRITER_LOCKED)))
RWSEM_WRITER_LOCKED))) {
rwsem_down_write_slowpath(sem, TASK_UNINTERRUPTIBLE);
else
} else {
trace_android_vh_record_rwsem_lock_starttime(current, jiffies);
rwsem_set_owner(sem);
}
}
static inline int __down_write_killable(struct rw_semaphore *sem)
@ -1425,6 +1436,7 @@ static inline int __down_write_killable(struct rw_semaphore *sem)
if (IS_ERR(rwsem_down_write_slowpath(sem, TASK_KILLABLE)))
return -EINTR;
} else {
trace_android_vh_record_rwsem_lock_starttime(current, jiffies);
rwsem_set_owner(sem);
}
return 0;
@ -1440,6 +1452,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
RWSEM_WRITER_LOCKED)) {
rwsem_set_owner(sem);
trace_android_vh_record_rwsem_lock_starttime(current, jiffies);
return true;
}
return false;
@ -1455,6 +1468,7 @@ static inline void __up_read(struct rw_semaphore *sem)
DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
trace_android_vh_record_rwsem_lock_starttime(current, 0);
rwsem_clear_reader_owned(sem);
tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count);
DEBUG_RWSEMS_WARN_ON(tmp < 0, sem);
@ -1481,6 +1495,7 @@ static inline void __up_write(struct rw_semaphore *sem)
DEBUG_RWSEMS_WARN_ON((rwsem_owner(sem) != current) &&
!rwsem_test_oflags(sem, RWSEM_NONSPINNABLE), sem);
trace_android_vh_record_rwsem_lock_starttime(current, 0);
rwsem_clear_owner(sem);
tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count);
if (unlikely(tmp & RWSEM_FLAG_WAITERS))

View File

@ -45,9 +45,6 @@
#include <net/sock.h>
#include <uapi/linux/pidfd.h>
#undef CREATE_TRACE_POINTS
#include <trace/hooks/sched.h>
struct pid init_struct_pid = {
.count = REFCOUNT_INIT(1),
.tasks = {
@ -605,7 +602,6 @@ SYSCALL_DEFINE2(pidfd_open, pid_t, pid, unsigned int, flags)
else
fd = -EINVAL;
trace_android_vh_pidfd_open(p);
put_pid(p);
return fd;
}

View File

@ -1400,7 +1400,6 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
return sighand;
}
EXPORT_SYMBOL_GPL(__lock_task_sighand);
/*
* send signal info to all the members of a group

View File

@ -152,6 +152,7 @@ static __refdata struct memblock_type *memblock_memory = &memblock.memory;
} while (0)
static int memblock_debug __initdata_memblock;
static bool memblock_nomap_remove __initdata_memblock;
static bool system_has_some_mirror __initdata_memblock = false;
static int memblock_can_resize __initdata_memblock;
static int memblock_memory_in_slab __initdata_memblock = 0;
@ -1904,6 +1905,17 @@ static int __init early_memblock(char *p)
}
early_param("memblock", early_memblock);
static int __init early_memblock_nomap(char *str)
{
return kstrtobool(str, &memblock_nomap_remove);
}
early_param("android12_only.will_be_removed_soon.memblock_nomap_remove", early_memblock_nomap);
bool __init memblock_is_nomap_remove(void)
{
return memblock_nomap_remove;
}
static void __init __free_pages_memory(unsigned long start, unsigned long end)
{
int order;

View File

@ -1156,14 +1156,22 @@ int add_memory_subsection(int nid, u64 start, u64 size)
ret = arch_add_memory(nid, start, size, &params);
if (ret) {
if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK))
memblock_remove(start, size);
pr_err("%s failed to add subsection start 0x%llx size 0x%llx\n",
__func__, start, size);
goto err_add_memory;
}
mem_hotplug_done();
return ret;
err_add_memory:
if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK))
memblock_remove(start, size);
mem_hotplug_done();
release_memory_resource(res);
return ret;
}
EXPORT_SYMBOL_GPL(add_memory_subsection);

View File

@ -4924,6 +4924,12 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
if (current->flags & PF_MEMALLOC)
goto nopage;
trace_android_vh_alloc_pages_reclaim_bypass(gfp_mask, order,
alloc_flags, ac->migratetype, &page);
if (page)
goto got_pg;
/* Try direct reclaim and then allocating */
page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
&did_some_progress);
@ -5031,6 +5037,11 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
goto retry;
}
fail:
trace_android_vh_alloc_pages_failure_bypass(gfp_mask, order,
alloc_flags, ac->migratetype, &page);
if (page)
goto got_pg;
warn_alloc(gfp_mask, ac->nodemask,
"page allocation failure: order:%u", order);
got_pg:

View File

@ -8,7 +8,7 @@
#include <linux/kmemleak.h>
#include <linux/page_owner.h>
#include <linux/page_idle.h>
#include <linux/rcupdate.h>
/*
* struct page extension
*
@ -58,6 +58,10 @@
* can utilize this callback to initialize the state of it correctly.
*/
#ifdef CONFIG_SPARSEMEM
#define PAGE_EXT_INVALID (0x1)
#endif
#if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT)
static bool need_page_idle(void)
{
@ -117,6 +121,49 @@ static inline struct page_ext *get_entry(void *base, unsigned long index)
return base + page_ext_size * index;
}
/**
* page_ext_get() - Get the extended information for a page.
* @page: The page we're interested in.
*
* Ensures that the page_ext will remain valid until page_ext_put()
* is called.
*
* Return: NULL if no page_ext exists for this page.
* Context: Any context. Caller may not sleep until they have called
* page_ext_put().
*/
struct page_ext *page_ext_get(struct page *page)
{
struct page_ext *page_ext;
rcu_read_lock();
page_ext = lookup_page_ext(page);
if (!page_ext) {
rcu_read_unlock();
return NULL;
}
return page_ext;
}
/**
* page_ext_put() - Working with page extended information is done.
* @page_ext - Page extended information received from page_ext_get().
*
* The page extended information of the page may not be valid after this
* function is called.
*
* Return: None.
* Context: Any context with corresponding page_ext_get() is called.
*/
void page_ext_put(struct page_ext *page_ext)
{
if (unlikely(!page_ext))
return;
rcu_read_unlock();
}
#if !defined(CONFIG_SPARSEMEM)
@ -131,6 +178,7 @@ struct page_ext *lookup_page_ext(const struct page *page)
unsigned long index;
struct page_ext *base;
WARN_ON_ONCE(!rcu_read_lock_held());
base = NODE_DATA(page_to_nid(page))->node_page_ext;
/*
* The sanity checks the page allocator does upon freeing a
@ -200,20 +248,27 @@ void __init page_ext_init_flatmem(void)
}
#else /* CONFIG_FLAT_NODE_MEM_MAP */
static bool page_ext_invalid(struct page_ext *page_ext)
{
return !page_ext || (((unsigned long)page_ext & PAGE_EXT_INVALID) == PAGE_EXT_INVALID);
}
struct page_ext *lookup_page_ext(const struct page *page)
{
unsigned long pfn = page_to_pfn(page);
struct mem_section *section = __pfn_to_section(pfn);
struct page_ext *page_ext = READ_ONCE(section->page_ext);
WARN_ON_ONCE(!rcu_read_lock_held());
/*
* The sanity checks the page allocator does upon freeing a
* page can reach here before the page_ext arrays are
* allocated when feeding a range of pages to the allocator
* for the first time during bootup or memory hotplug.
*/
if (!section->page_ext)
if (page_ext_invalid(page_ext))
return NULL;
return get_entry(section->page_ext, pfn);
return get_entry(page_ext, pfn);
}
EXPORT_SYMBOL_GPL(lookup_page_ext);
@ -293,9 +348,30 @@ static void __free_page_ext(unsigned long pfn)
ms = __pfn_to_section(pfn);
if (!ms || !ms->page_ext)
return;
base = get_entry(ms->page_ext, pfn);
base = READ_ONCE(ms->page_ext);
/*
* page_ext here can be valid while doing the roll back
* operation in online_page_ext().
*/
if (page_ext_invalid(base))
base = (void *)base - PAGE_EXT_INVALID;
WRITE_ONCE(ms->page_ext, NULL);
base = get_entry(base, pfn);
free_page_ext(base);
ms->page_ext = NULL;
}
static void __invalidate_page_ext(unsigned long pfn)
{
struct mem_section *ms;
void *val;
ms = __pfn_to_section(pfn);
if (!ms || !ms->page_ext)
return;
val = (void *)ms->page_ext + PAGE_EXT_INVALID;
WRITE_ONCE(ms->page_ext, val);
}
static int __meminit online_page_ext(unsigned long start_pfn,
@ -338,6 +414,20 @@ static int __meminit offline_page_ext(unsigned long start_pfn,
start = SECTION_ALIGN_DOWN(start_pfn);
end = SECTION_ALIGN_UP(start_pfn + nr_pages);
/*
* Freeing of page_ext is done in 3 steps to avoid
* use-after-free of it:
* 1) Traverse all the sections and mark their page_ext
* as invalid.
* 2) Wait for all the existing users of page_ext who
* started before invalidation to finish.
* 3) Free the page_ext.
*/
for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
__invalidate_page_ext(pfn);
synchronize_rcu();
for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
__free_page_ext(pfn);
return 0;

View File

@ -173,7 +173,7 @@ void __reset_page_owner(struct page *page, unsigned int order)
handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
page_ext = lookup_page_ext(page);
page_ext = page_ext_get(page);
if (unlikely(!page_ext))
return;
for (i = 0; i < (1 << order); i++) {
@ -183,6 +183,7 @@ void __reset_page_owner(struct page *page, unsigned int order)
page_owner->free_ts_nsec = free_ts_nsec;
page_ext = page_ext_next(page_ext);
}
page_ext_put(page_ext);
}
static inline void __set_page_owner_handle(struct page *page,
@ -210,19 +211,21 @@ static inline void __set_page_owner_handle(struct page *page,
noinline void __set_page_owner(struct page *page, unsigned int order,
gfp_t gfp_mask)
{
struct page_ext *page_ext = lookup_page_ext(page);
struct page_ext *page_ext;
depot_stack_handle_t handle;
handle = save_stack(gfp_mask);
page_ext = page_ext_get(page);
if (unlikely(!page_ext))
return;
handle = save_stack(gfp_mask);
__set_page_owner_handle(page, page_ext, handle, order, gfp_mask);
page_ext_put(page_ext);
}
void __set_page_owner_migrate_reason(struct page *page, int reason)
{
struct page_ext *page_ext = lookup_page_ext(page);
struct page_ext *page_ext = page_ext_get(page);
struct page_owner *page_owner;
if (unlikely(!page_ext))
@ -230,12 +233,13 @@ void __set_page_owner_migrate_reason(struct page *page, int reason)
page_owner = get_page_owner(page_ext);
page_owner->last_migrate_reason = reason;
page_ext_put(page_ext);
}
void __split_page_owner(struct page *page, unsigned int nr)
{
int i;
struct page_ext *page_ext = lookup_page_ext(page);
struct page_ext *page_ext = page_ext_get(page);
struct page_owner *page_owner;
if (unlikely(!page_ext))
@ -246,17 +250,25 @@ void __split_page_owner(struct page *page, unsigned int nr)
page_owner->order = 0;
page_ext = page_ext_next(page_ext);
}
page_ext_put(page_ext);
}
void __copy_page_owner(struct page *oldpage, struct page *newpage)
{
struct page_ext *old_ext = lookup_page_ext(oldpage);
struct page_ext *new_ext = lookup_page_ext(newpage);
struct page_ext *old_ext;
struct page_ext *new_ext;
struct page_owner *old_page_owner, *new_page_owner;
if (unlikely(!old_ext || !new_ext))
old_ext = page_ext_get(oldpage);
if (unlikely(!old_ext))
return;
new_ext = page_ext_get(newpage);
if (unlikely(!new_ext)) {
page_ext_put(old_ext);
return;
}
old_page_owner = get_page_owner(old_ext);
new_page_owner = get_page_owner(new_ext);
new_page_owner->order = old_page_owner->order;
@ -279,6 +291,8 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage)
*/
__set_bit(PAGE_EXT_OWNER, &new_ext->flags);
__set_bit(PAGE_EXT_OWNER_ALLOCATED, &new_ext->flags);
page_ext_put(new_ext);
page_ext_put(old_ext);
}
void pagetypeinfo_showmixedcount_print(struct seq_file *m,
@ -335,12 +349,12 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
if (PageReserved(page))
continue;
page_ext = lookup_page_ext(page);
page_ext = page_ext_get(page);
if (unlikely(!page_ext))
continue;
if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
continue;
goto ext_put_continue;
page_owner = get_page_owner(page_ext);
page_mt = gfp_migratetype(page_owner->gfp_mask);
@ -351,9 +365,12 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
count[pageblock_mt]++;
pfn = block_end_pfn;
page_ext_put(page_ext);
break;
}
pfn += (1UL << page_owner->order) - 1;
ext_put_continue:
page_ext_put(page_ext);
}
}
@ -432,7 +449,7 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn,
void __dump_page_owner(struct page *page)
{
struct page_ext *page_ext = lookup_page_ext(page);
struct page_ext *page_ext = page_ext_get((void *)page);
struct page_owner *page_owner;
depot_stack_handle_t handle;
unsigned long *entries;
@ -451,6 +468,7 @@ void __dump_page_owner(struct page *page)
if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
pr_alert("page_owner info is not present (never set?)\n");
page_ext_put(page_ext);
return;
}
@ -483,6 +501,7 @@ void __dump_page_owner(struct page *page)
if (page_owner->last_migrate_reason != -1)
pr_alert("page has been migrated, last migrate reason: %s\n",
migrate_reason_names[page_owner->last_migrate_reason]);
page_ext_put(page_ext);
}
static ssize_t
@ -508,6 +527,14 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
/* Find an allocated page */
for (; pfn < max_pfn; pfn++) {
/*
* This temporary page_owner is required so
* that we can avoid the context switches while holding
* the rcu lock and copying the page owner information to
* user through copy_to_user() or GFP_KERNEL allocations.
*/
struct page_owner page_owner_tmp;
/*
* If the new page is in a new MAX_ORDER_NR_PAGES area,
* validate the area as existing, skip it if not
@ -530,7 +557,7 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
continue;
}
page_ext = lookup_page_ext(page);
page_ext = page_ext_get(page);
if (unlikely(!page_ext))
continue;
@ -539,14 +566,14 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
* because we don't hold the zone lock.
*/
if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
continue;
goto ext_put_continue;
/*
* Although we do have the info about past allocation of free
* pages, it's not relevant for current memory usage.
*/
if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
continue;
goto ext_put_continue;
page_owner = get_page_owner(page_ext);
@ -555,7 +582,7 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
* would inflate the stats.
*/
if (!IS_ALIGNED(pfn, 1 << page_owner->order))
continue;
goto ext_put_continue;
/*
* Access to page_ext->handle isn't synchronous so we should
@ -563,13 +590,17 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
*/
handle = READ_ONCE(page_owner->handle);
if (!handle)
continue;
goto ext_put_continue;
/* Record the next PFN to read in the file offset */
*ppos = (pfn - min_low_pfn) + 1;
page_owner_tmp = *page_owner;
page_ext_put(page_ext);
return print_page_owner(buf, count, pfn, page,
page_owner, handle);
&page_owner_tmp, handle);
ext_put_continue:
page_ext_put(page_ext);
}
return 0;
@ -627,18 +658,20 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
if (PageReserved(page))
continue;
page_ext = lookup_page_ext(page);
page_ext = page_ext_get(page);
if (unlikely(!page_ext))
continue;
/* Maybe overlapping zone */
if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
continue;
goto ext_put_continue;
/* Found early allocated page */
__set_page_owner_handle(page, page_ext, early_handle,
0, 0);
count++;
ext_put_continue:
page_ext_put(page_ext);
}
cond_resched();
}

View File

@ -162,7 +162,7 @@ void __reset_page_pinner(struct page *page, unsigned int order, bool free)
struct page_ext *page_ext;
int i;
page_ext = lookup_page_ext(page);
page_ext = page_ext_get(page);
if (unlikely(!page_ext))
return;
@ -184,6 +184,7 @@ void __reset_page_pinner(struct page *page, unsigned int order, bool free)
clear_bit(PAGE_EXT_GET, &page_ext->flags);
page_ext = page_ext_next(page_ext);
}
page_ext_put(page_ext);
}
static inline void __set_page_pinner_handle(struct page *page,
@ -206,14 +207,16 @@ static inline void __set_page_pinner_handle(struct page *page,
noinline void __set_page_pinner(struct page *page, unsigned int order)
{
struct page_ext *page_ext = lookup_page_ext(page);
struct page_ext *page_ext;
depot_stack_handle_t handle;
handle = save_stack(GFP_NOWAIT|__GFP_NOWARN);
page_ext = page_ext_get(page);
if (unlikely(!page_ext))
return;
handle = save_stack(GFP_NOWAIT|__GFP_NOWARN);
__set_page_pinner_handle(page, page_ext, handle, order);
page_ext_put(page_ext);
}
static ssize_t
@ -279,7 +282,7 @@ print_page_pinner(bool longterm, char __user *buf, size_t count, struct captured
void __dump_page_pinner(struct page *page)
{
struct page_ext *page_ext = lookup_page_ext(page);
struct page_ext *page_ext = page_ext_get(page);
struct page_pinner *page_pinner;
depot_stack_handle_t handle;
unsigned long *entries;
@ -300,6 +303,7 @@ void __dump_page_pinner(struct page *page)
count = atomic_read(&page_pinner->count);
if (!count) {
pr_alert("page_pinner info is not present (never set?)\n");
page_ext_put(page_ext);
return;
}
@ -323,11 +327,12 @@ void __dump_page_pinner(struct page *page)
nr_entries = stack_depot_fetch(handle, &entries);
stack_trace_print(entries, nr_entries, 0);
}
page_ext_put(page_ext);
}
void __page_pinner_migration_failed(struct page *page)
{
struct page_ext *page_ext = lookup_page_ext(page);
struct page_ext *page_ext = page_ext_get(page);
struct captured_pinner record;
unsigned long flags;
unsigned int idx;
@ -335,9 +340,12 @@ void __page_pinner_migration_failed(struct page *page)
if (unlikely(!page_ext))
return;
if (!test_bit(PAGE_EXT_PINNER_MIGRATION_FAILED, &page_ext->flags))
if (!test_bit(PAGE_EXT_PINNER_MIGRATION_FAILED, &page_ext->flags)) {
page_ext_put(page_ext);
return;
}
page_ext_put(page_ext);
record.handle = save_stack(GFP_NOWAIT|__GFP_NOWARN);
record.ts_usec = ktime_to_us(ktime_get_boottime());
capture_page_state(page, &record);
@ -359,10 +367,11 @@ void __page_pinner_mark_migration_failed_pages(struct list_head *page_list)
/* The page will be freed by putback_movable_pages soon */
if (page_count(page) == 1)
continue;
page_ext = lookup_page_ext(page);
page_ext = page_ext_get(page);
if (unlikely(!page_ext))
continue;
__set_bit(PAGE_EXT_PINNER_MIGRATION_FAILED, &page_ext->flags);
page_ext_put(page_ext);
__page_pinner_migration_failed(page);
}
}

View File

@ -459,8 +459,6 @@ static void ondemand_readahead(struct readahead_control *ractl,
if (req_size > max_pages && bdi->io_pages > max_pages)
max_pages = min(req_size, bdi->io_pages);
trace_android_vh_ra_tuning_max_page(ractl, &max_pages);
/*
* start of file
*/

View File

@ -525,6 +525,7 @@ struct anon_vma *page_lock_anon_vma_read(struct page *page)
struct anon_vma *anon_vma = NULL;
struct anon_vma *root_anon_vma;
unsigned long anon_mapping;
bool success = false;
rcu_read_lock();
anon_mapping = (unsigned long)READ_ONCE(page->mapping);
@ -547,7 +548,11 @@ struct anon_vma *page_lock_anon_vma_read(struct page *page)
}
goto out;
}
trace_android_vh_do_page_trylock(page, NULL, NULL, &success);
if (success) {
anon_vma = NULL;
goto out;
}
/* trylock failed, we got to sleep */
if (!atomic_inc_not_zero(&anon_vma->refcount)) {
anon_vma = NULL;
@ -1981,6 +1986,7 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
struct address_space *mapping = page_mapping(page);
pgoff_t pgoff_start, pgoff_end;
struct vm_area_struct *vma;
bool got_lock = false, success = false;
/*
* The page lock not only makes sure that page->mapping cannot
@ -1995,8 +2001,16 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
pgoff_start = page_to_pgoff(page);
pgoff_end = pgoff_start + thp_nr_pages(page) - 1;
if (!locked)
i_mmap_lock_read(mapping);
if (!locked) {
trace_android_vh_do_page_trylock(page,
&mapping->i_mmap_rwsem, &got_lock, &success);
if (success) {
if (!got_lock)
return;
} else {
i_mmap_lock_read(mapping);
}
}
vma_interval_tree_foreach(vma, &mapping->i_mmap,
pgoff_start, pgoff_end) {
unsigned long address = vma_address(page, vma);

View File

@ -1021,15 +1021,19 @@ static enum page_references page_check_references(struct page *page,
int referenced_ptes, referenced_page;
unsigned long vm_flags;
bool should_protect = false;
bool trylock_fail = false;
trace_android_vh_page_should_be_protected(page, &should_protect);
if (unlikely(should_protect))
return PAGEREF_ACTIVATE;
trace_android_vh_page_trylock_set(page);
referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup,
&vm_flags);
referenced_page = TestClearPageReferenced(page);
trace_android_vh_page_trylock_get_result(page, &trylock_fail);
if (trylock_fail)
return PAGEREF_KEEP;
/*
* Mlock lost the isolation race with us. Let try_to_unmap()
* move the page to the unevictable list.
@ -1341,6 +1345,7 @@ static unsigned int shrink_page_list(struct list_head *page_list,
if (unlikely(PageTransHuge(page)))
flags |= TTU_SPLIT_HUGE_PMD;
trace_android_vh_page_trylock_set(page);
if (!try_to_unmap(page, flags)) {
stat->nr_unmap_fail += nr_pages;
if (!was_swapbacked && PageSwapBacked(page))
@ -1451,6 +1456,7 @@ static unsigned int shrink_page_list(struct list_head *page_list,
* increment nr_reclaimed here (and
* leave it off the LRU).
*/
trace_android_vh_page_trylock_clear(page);
nr_reclaimed++;
continue;
}
@ -1484,6 +1490,7 @@ static unsigned int shrink_page_list(struct list_head *page_list,
* Is there need to periodically free_page_list? It would
* appear not as the counts should be low
*/
trace_android_vh_page_trylock_clear(page);
if (unlikely(PageTransHuge(page)))
destroy_compound_page(page);
else
@ -1999,6 +2006,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
return 0;
nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, &stat, false);
trace_android_vh_handle_failed_page_trylock(&page_list);
spin_lock_irq(&pgdat->lru_lock);
@ -2011,7 +2019,6 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
__count_vm_events(item, nr_reclaimed);
__count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed);
__count_vm_events(PGSTEAL_ANON + file, nr_reclaimed);
spin_unlock_irq(&pgdat->lru_lock);
mem_cgroup_uncharge_list(&page_list);
@ -2107,7 +2114,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
trace_android_vh_page_referenced_check_bypass(page, nr_to_scan, lru, &bypass);
if (bypass)
goto skip_page_referenced;
trace_android_vh_page_trylock_set(page);
if (page_referenced(page, 0, sc->target_mem_cgroup,
&vm_flags)) {
/*
@ -2120,11 +2127,13 @@ static void shrink_active_list(unsigned long nr_to_scan,
* so we ignore them here.
*/
if ((vm_flags & VM_EXEC) && page_is_file_lru(page)) {
trace_android_vh_page_trylock_clear(page);
nr_rotated += thp_nr_pages(page);
list_add(&page->lru, &l_active);
continue;
}
}
trace_android_vh_page_trylock_clear(page);
skip_page_referenced:
ClearPageActive(page); /* we are de-activating */
SetPageWorkingset(page);
@ -2206,6 +2215,7 @@ unsigned long reclaim_pages(struct list_head *page_list)
return nr_reclaimed;
}
EXPORT_SYMBOL_GPL(reclaim_pages);
static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
struct lruvec *lruvec, struct scan_control *sc)
@ -2387,7 +2397,6 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
denominator = ap + fp;
out:
trace_android_vh_tune_scan_type((char *)(&scan_balance));
trace_android_vh_tune_memcg_scan_type(memcg, (char *)(&scan_balance));
for_each_evictable_lru(lru) {
int file = is_file_lru(lru);
unsigned long lruvec_size;