Snap for 11749377 from 3e84e27a4e to android14-6.1-keystone-qcom-release

Change-Id: I96f45e27e80b8769dad0377c635ed3a83dd976ce
Signed-off-by: Coastguard Worker <android-build-coastguard-worker@google.com>
This commit is contained in:
Android Build Coastguard Worker 2024-04-23 08:00:48 +00:00
commit 7cb0122d66
60 changed files with 3308 additions and 473 deletions

View File

@ -104,11 +104,13 @@ filegroup(
"android/abi_gki_aarch64_db845c",
"android/abi_gki_aarch64_exynos",
"android/abi_gki_aarch64_exynosauto",
"android/abi_gki_aarch64_fips140",
"android/abi_gki_aarch64_galaxy",
"android/abi_gki_aarch64_honda",
"android/abi_gki_aarch64_honor",
"android/abi_gki_aarch64_imx",
"android/abi_gki_aarch64_meizu",
"android/abi_gki_aarch64_moto",
"android/abi_gki_aarch64_mtk",
"android/abi_gki_aarch64_nothing",
"android/abi_gki_aarch64_oplus",
@ -718,9 +720,16 @@ kernel_build(
outs = [],
base_kernel = ":kernel_aarch64",
build_config = "build.config.gki.aarch64.fips140",
kmi_symbol_list = "android/abi_gki_aarch64_fips140",
module_outs = ["crypto/fips140.ko"],
)
kernel_abi(
name = "fips140_abi",
kernel_build = ":fips140",
kmi_symbol_list_add_only = True,
)
copy_to_dist_dir(
name = "fips140_dist",
data = [

View File

@ -686,29 +686,30 @@ Description: Support configuring fault injection type, should be
enabled with fault_injection option, fault type value
is shown below, it supports single or combined type.
=================== ===========
Type_Name Type_Value
=================== ===========
FAULT_KMALLOC 0x000000001
FAULT_KVMALLOC 0x000000002
FAULT_PAGE_ALLOC 0x000000004
FAULT_PAGE_GET 0x000000008
FAULT_ALLOC_BIO 0x000000010 (obsolete)
FAULT_ALLOC_NID 0x000000020
FAULT_ORPHAN 0x000000040
FAULT_BLOCK 0x000000080
FAULT_DIR_DEPTH 0x000000100
FAULT_EVICT_INODE 0x000000200
FAULT_TRUNCATE 0x000000400
FAULT_READ_IO 0x000000800
FAULT_CHECKPOINT 0x000001000
FAULT_DISCARD 0x000002000
FAULT_WRITE_IO 0x000004000
FAULT_SLAB_ALLOC 0x000008000
FAULT_DQUOT_INIT 0x000010000
FAULT_LOCK_OP 0x000020000
FAULT_BLKADDR 0x000040000
=================== ===========
=========================== ===========
Type_Name Type_Value
=========================== ===========
FAULT_KMALLOC 0x000000001
FAULT_KVMALLOC 0x000000002
FAULT_PAGE_ALLOC 0x000000004
FAULT_PAGE_GET 0x000000008
FAULT_ALLOC_BIO 0x000000010 (obsolete)
FAULT_ALLOC_NID 0x000000020
FAULT_ORPHAN 0x000000040
FAULT_BLOCK 0x000000080
FAULT_DIR_DEPTH 0x000000100
FAULT_EVICT_INODE 0x000000200
FAULT_TRUNCATE 0x000000400
FAULT_READ_IO 0x000000800
FAULT_CHECKPOINT 0x000001000
FAULT_DISCARD 0x000002000
FAULT_WRITE_IO 0x000004000
FAULT_SLAB_ALLOC 0x000008000
FAULT_DQUOT_INIT 0x000010000
FAULT_LOCK_OP 0x000020000
FAULT_BLKADDR_VALIDITY 0x000040000
FAULT_BLKADDR_CONSISTENCE 0x000080000
=========================== ===========
What: /sys/fs/f2fs/<disk>/discard_io_aware_gran
Date: January 2023

View File

@ -184,29 +184,30 @@ fault_type=%d Support configuring fault injection type, should be
enabled with fault_injection option, fault type value
is shown below, it supports single or combined type.
=================== ===========
Type_Name Type_Value
=================== ===========
FAULT_KMALLOC 0x000000001
FAULT_KVMALLOC 0x000000002
FAULT_PAGE_ALLOC 0x000000004
FAULT_PAGE_GET 0x000000008
FAULT_ALLOC_BIO 0x000000010 (obsolete)
FAULT_ALLOC_NID 0x000000020
FAULT_ORPHAN 0x000000040
FAULT_BLOCK 0x000000080
FAULT_DIR_DEPTH 0x000000100
FAULT_EVICT_INODE 0x000000200
FAULT_TRUNCATE 0x000000400
FAULT_READ_IO 0x000000800
FAULT_CHECKPOINT 0x000001000
FAULT_DISCARD 0x000002000
FAULT_WRITE_IO 0x000004000
FAULT_SLAB_ALLOC 0x000008000
FAULT_DQUOT_INIT 0x000010000
FAULT_LOCK_OP 0x000020000
FAULT_BLKADDR 0x000040000
=================== ===========
=========================== ===========
Type_Name Type_Value
=========================== ===========
FAULT_KMALLOC 0x000000001
FAULT_KVMALLOC 0x000000002
FAULT_PAGE_ALLOC 0x000000004
FAULT_PAGE_GET 0x000000008
FAULT_ALLOC_BIO 0x000000010 (obsolete)
FAULT_ALLOC_NID 0x000000020
FAULT_ORPHAN 0x000000040
FAULT_BLOCK 0x000000080
FAULT_DIR_DEPTH 0x000000100
FAULT_EVICT_INODE 0x000000200
FAULT_TRUNCATE 0x000000400
FAULT_READ_IO 0x000000800
FAULT_CHECKPOINT 0x000001000
FAULT_DISCARD 0x000002000
FAULT_WRITE_IO 0x000004000
FAULT_SLAB_ALLOC 0x000008000
FAULT_DQUOT_INIT 0x000010000
FAULT_LOCK_OP 0x000020000
FAULT_BLKADDR_VALIDITY 0x000040000
FAULT_BLKADDR_CONSISTENCE 0x000080000
=========================== ===========
mode=%s Control block allocation mode which supports "adaptive"
and "lfs". In "lfs" mode, there should be no random
writes towards main area.

File diff suppressed because it is too large Load Diff

View File

@ -1,9 +1,11 @@
[abi_symbol_list]
# commonly used symbols
alloc_chrdev_region
__alloc_pages
__alloc_percpu
alloc_workqueue
alt_cb_patch_nops
anon_inode_getfile
__arch_copy_from_user
__arch_copy_to_user
arm64_use_ng_mappings
@ -13,6 +15,7 @@
atomic_notifier_chain_unregister
__bitmap_and
bitmap_find_free_region
bitmap_parselist
bitmap_release_region
__bitmap_weight
blocking_notifier_call_chain
@ -22,19 +25,25 @@
bpf_trace_run2
bpf_trace_run3
bpf_trace_run4
bpf_trace_run5
bpf_trace_run6
call_rcu
cancel_delayed_work
cancel_delayed_work_sync
cancel_work_sync
cdev_add
cdev_del
cdev_init
__check_object_size
__class_create
class_destroy
__class_register
class_unregister
clk_disable
clk_enable
clk_get
clk_get_rate
__clk_is_enabled
clk_prepare
clk_set_rate
clk_unprepare
@ -52,6 +61,7 @@
cpu_pm_register_notifier
__cpu_possible_mask
crc32_le
_ctype
debugfs_create_dir
debugfs_create_file
debugfs_create_u32
@ -116,6 +126,8 @@
devm_snd_soc_register_component
devm_thermal_of_zone_register
_dev_notice
dev_pm_opp_put
_dev_printk
devres_add
__devres_alloc_node
dev_set_name
@ -141,6 +153,7 @@
dma_fence_release
dma_fence_remove_callback
dma_fence_signal
dma_fence_signal_locked
dma_fence_wait_timeout
dma_free_attrs
dma_heap_buffer_alloc
@ -172,6 +185,8 @@
drm_atomic_helper_connector_destroy_state
drm_atomic_helper_connector_duplicate_state
drm_atomic_helper_connector_reset
__drm_atomic_helper_private_obj_duplicate_state
drm_atomic_private_obj_init
drm_bridge_add
drm_bridge_remove
drm_compat_ioctl
@ -226,7 +241,6 @@
drm_kms_helper_poll_init
drmm_mode_config_init
drm_mode_config_cleanup
drm_mode_copy
drm_mode_create
drm_mode_duplicate
drm_mode_object_put
@ -235,6 +249,7 @@
drm_modeset_unlock
drm_mode_vrefresh
drm_object_attach_property
drm_object_property_set_value
drm_open
drm_poll
drm_prime_gem_destroy
@ -285,7 +300,6 @@
gpiod_set_raw_value
gpiod_set_value_cansleep
gpio_free
gpio_request
gpio_request_one
gpio_to_desc
handle_edge_irq
@ -312,6 +326,7 @@
iomem_resource
iommu_get_domain_for_dev
iommu_map
iommu_map_sg
iommu_register_device_fault_handler
iommu_unmap
iommu_unregister_device_fault_handler
@ -390,7 +405,9 @@
loops_per_jiffy
mdiobus_read
mdiobus_write
memchr_inv
memcpy
__memcpy_fromio
memdup_user
mem_section
memset
@ -507,6 +524,7 @@
platform_get_irq_optional
platform_get_resource
platform_get_resource_byname
__pm_relax
__pm_runtime_disable
pm_runtime_enable
pm_runtime_forbid
@ -518,7 +536,9 @@
__pm_runtime_set_status
__pm_runtime_suspend
__pm_runtime_use_autosuspend
__pm_stay_awake
pm_wakeup_dev_event
pm_wakeup_ws_event
preempt_schedule
preempt_schedule_notrace
prepare_to_wait_event
@ -541,6 +561,7 @@
_raw_spin_unlock_irqrestore
_raw_write_lock_irqsave
_raw_write_unlock_irqrestore
rb_next
rcu_barrier
rdev_get_drvdata
rdev_get_id
@ -548,6 +569,7 @@
regcache_mark_dirty
regcache_sync
__register_chrdev
register_chrdev_region
register_console
register_pm_notifier
register_reboot_notifier
@ -579,6 +601,7 @@
seq_puts
seq_read
set_cpus_allowed_ptr
set_normalized_timespec64
sg_alloc_table
sg_free_table
sg_init_table
@ -589,6 +612,7 @@
simple_attr_write
simple_open
simple_read_from_buffer
simple_strtoul
simple_write_to_buffer
single_open
single_release
@ -631,6 +655,7 @@
strstr
__sw_hweight32
sync_file_create
synchronize_irq
syscon_regmap_lookup_by_phandle
sysfs_create_file_ns
sysfs_create_group
@ -651,6 +676,7 @@
ttm_bo_mmap_obj
__udelay
__unregister_chrdev
unregister_chrdev_region
unregister_pm_notifier
up
up_write
@ -689,6 +715,7 @@
video_unregister_device
vmalloc
vmap
vscnprintf
vsnprintf
vunmap
vzalloc
@ -697,6 +724,8 @@
wait_for_completion_timeout
__wake_up
wake_up_process
wakeup_source_register
wakeup_source_unregister
__warn_printk
ww_mutex_lock
ww_mutex_unlock
@ -712,6 +741,7 @@
cec_transmit_done_ts
cec_unregister_adapter
drm_bridge_hpd_notify
drm_mode_copy
hdmi_avi_infoframe_init
hdmi_avi_infoframe_pack
i2c_new_ancillary_device
@ -749,15 +779,12 @@
# required by drm_display_helper.ko
devm_backlight_device_register
_dev_printk
drm_atomic_get_connector_state
drm_atomic_get_crtc_state
drm_atomic_get_new_private_obj_state
drm_atomic_get_old_private_obj_state
drm_atomic_get_private_obj_state
__drm_atomic_helper_private_obj_duplicate_state
drm_atomic_private_obj_fini
drm_atomic_private_obj_init
drm_crtc_commit_wait
drm_crtc_wait_one_vblank
__drm_debug
@ -766,9 +793,7 @@
drm_edid_duplicate
drm_edid_get_monitor_name
drm_modeset_lock_single_interruptible
drm_object_property_set_value
__drm_printfn_debug
memchr_inv
__sw_hweight8
system_long_wq
@ -794,6 +819,7 @@
nr_irqs
proc_create
register_die_notifier
register_module_notifier
register_restart_handler
return_address
rtc_time64_to_tm
@ -855,9 +881,7 @@
platform_device_alloc
platform_device_del
platform_device_put
__pm_relax
pm_runtime_allow
__pm_stay_awake
typec_register_partner
typec_register_port
typec_set_data_role
@ -867,11 +891,8 @@
typec_unregister_port
usb_gadget_set_state
usb_otg_state_string
wakeup_source_register
wakeup_source_unregister
# required by exynos-acme.ko
bitmap_parselist
__cpu_active_mask
cpufreq_cpu_get
cpufreq_cpu_get_raw
@ -890,9 +911,6 @@
get_cpu_device
system_state
# required by exynos-adv-tracer-s2d.ko
simple_strtoul
# required by exynos-chipid_v2.ko
of_find_node_opts_by_path
soc_device_register
@ -904,7 +922,6 @@
__cpu_present_mask
# required by exynos-debug-test.ko
_ctype
remove_cpu
smp_call_function
smp_call_function_single
@ -961,6 +978,7 @@
drm_atomic_helper_update_plane
drm_atomic_helper_wait_for_vblanks
drm_bridge_attach
drm_connector_set_path_property
drm_crtc_arm_vblank_event
drm_crtc_handle_vblank
drm_crtc_init_with_planes
@ -978,9 +996,11 @@
drm_gem_vm_open
drm_get_connector_status_name
drm_get_format_info
drm_helper_probe_detect
drm_mode_config_helper_resume
drm_mode_config_helper_suspend
drm_mode_config_reset
drm_mode_object_find
drm_mode_object_get
drm_of_component_match_add
drm_of_crtc_port_mask
@ -996,6 +1016,7 @@
drm_plane_create_zpos_immutable_property
drm_plane_create_zpos_property
__drm_printfn_info
drm_property_blob_put
drm_property_create
drm_property_create_blob
drm_rotation_simplify
@ -1004,7 +1025,6 @@
drm_writeback_connector_init
drm_writeback_queue_job
drm_writeback_signal_completion
flush_signals
gpiod_to_irq
mipi_dsi_host_register
mipi_dsi_host_unregister
@ -1012,6 +1032,7 @@
of_drm_find_panel
of_find_i2c_device_by_node
of_graph_get_endpoint_by_regs
of_graph_get_endpoint_count
of_graph_get_next_endpoint
of_graph_get_port_by_id
of_graph_get_remote_port
@ -1023,8 +1044,6 @@
platform_find_device_by_driver
seq_hex_dump
seq_release
strnstr
synchronize_irq
vmalloc_to_page
# required by exynos_mfc.ko
@ -1032,7 +1051,6 @@
dma_buf_begin_cpu_access_partial
dma_buf_end_cpu_access_partial
hex_dump_to_buffer
iommu_map_sg
__sw_hweight64
_totalram_pages
@ -1091,8 +1109,6 @@
hrtimer_try_to_cancel
# required by hwmon.ko
__class_register
class_unregister
device_property_read_string
devres_free
devres_release
@ -1112,8 +1128,6 @@
i2c_smbus_xfer
i2c_transfer_buffer_flags
i2c_verify_client
register_chrdev_region
unregister_chrdev_region
# required by lontium-lt9611.ko
drm_hdmi_avi_infoframe_from_display_mode
@ -1121,6 +1135,28 @@
regmap_multi_reg_write
regulator_set_load
# required by npu.ko
__bitmap_clear
__bitmap_set
cdev_alloc
console_printk
cpuidle_pause_and_lock
cpuidle_resume_and_unlock
devm_clk_put
dev_pm_opp_find_freq_ceil
dev_pm_opp_find_freq_floor
dma_fence_free
fget
_find_next_zero_bit
firmware_request_nowarn
__kfifo_in
__kfifo_out
of_irq_parse_one
rb_erase
rb_insert_color
strrchr
sysfs_remove_file_ns
# required by panel-samsung-dummy.ko
drm_panel_add
drm_panel_init
@ -1138,7 +1174,7 @@
phylink_mii_c22_pcs_encode_advertisement
# required by phy-exynosauto-usbdrd-super.ko
__clk_is_enabled
gpio_request
# required by pinctrl-samsung-core.ko
device_get_next_child_node
@ -1209,7 +1245,6 @@
# required by rtc-s2vps02.ko
devm_rtc_device_register
pm_wakeup_ws_event
rtc_update_irq
rtc_valid_tm
@ -1241,7 +1276,6 @@
regulator_set_voltage
unregister_reboot_notifier
vb2_mmap
vscnprintf
# required by samsung-bridge-dummy.ko
drm_atomic_helper_bridge_propagate_bus_fmt
@ -1257,7 +1291,6 @@
# required by samsung_dma_heap.ko
adjust_managed_page_count
anon_inode_getfile
deferred_free
dma_buf_get_flags
dmabuf_page_pool_alloc
@ -1298,8 +1331,11 @@
# required by scaler.ko
clk_set_parent
devm_get_free_pages
dma_fence_default_wait
dma_heap_buffer_free
flush_signals
kill_pid
sync_file_get_fence
v4l2_ctrl_handler_free
v4l2_ctrl_handler_init_class
@ -1334,14 +1370,14 @@
backlight_device_unregister
__bitmap_andnot
__bitmap_xor
bpf_trace_run5
bpf_trace_run9
capable
devfreq_recommended_opp
devfreq_resume_device
devfreq_suspend_device
__devm_drm_dev_alloc
devm_iounmap
dev_pm_opp_get_voltage
dev_pm_opp_put
dev_pm_qos_update_request
dma_buf_dynamic_attach
dma_buf_move_notify
@ -1352,7 +1388,6 @@
dma_fence_chain_walk
dma_fence_get_status
dma_fence_get_stub
dma_fence_signal_locked
dma_fence_wait_any_timeout
dma_get_required_mask
dma_resv_add_fence
@ -1433,7 +1468,6 @@
jiffies64_to_msecs
jiffies_to_usecs
kobject_put
__memcpy_fromio
__memcpy_toio
memremap
memunmap
@ -1475,12 +1509,10 @@
__rb_erase_color
rb_first_postorder
__rb_insert_augmented
rb_next
rb_next_postorder
__rcu_read_lock
__rcu_read_unlock
seq_write
set_normalized_timespec64
sg_alloc_table_from_pages_segment
si_meminfo
sysfs_create_files
@ -1547,12 +1579,13 @@
snd_soc_jack_report
# required by snd-soc-samsung-abox-core.ko
__devm_regmap_init
devm_ioremap_wc
__devm_regmap_init_mmio_clk
__devm_request_region
dma_mmap_attrs
irq_set_affinity
kthread_bind
of_property_match_string
__platform_register_drivers
platform_unregister_drivers
pm_runtime_no_callbacks
@ -1569,10 +1602,10 @@
snd_soc_dapm_mux_update_power
snd_soc_dapm_new_control
snd_soc_dapm_new_controls
snd_soc_new_compress
snd_soc_set_runtime_hwparams
snd_soc_unregister_component
stpcpy
__tasklet_hi_schedule
# required by snd-soc-tas6424.ko
regcache_cache_only
@ -1748,14 +1781,20 @@
xsk_uses_need_wakeup
# required by sxgmac.ko
alloc_chrdev_region
cdev_add
gpiod_direction_output
gpiod_set_value
# required by ufs-exynosauto-core.ko
__crypto_memneq
devm_blk_crypto_profile_init
dmam_free_coherent
scsi_dma_unmap
scsi_done
__traceiter_android_vh_ufs_fill_prdt
__traceiter_android_vh_ufs_prepare_command
__tracepoint_android_vh_ufs_fill_prdt
__tracepoint_android_vh_ufs_prepare_command
trace_print_symbols_seq
ufshcd_auto_hibern8_update
ufshcd_config_pwr_mode
ufshcd_dme_get_attr
@ -1774,10 +1813,8 @@
console_suspend_enabled
current_work
default_wake_function
__devm_regmap_init
dma_buf_fd
drm_connector_set_path_property
drm_helper_probe_detect
drm_property_blob_put
irq_create_of_mapping
irq_dispose_mapping
irq_find_matching_fwspec
@ -1791,7 +1828,6 @@
nonseekable_open
of_clk_del_provider
of_find_node_by_type
of_graph_get_endpoint_count
of_irq_find_parent
pci_disable_device
pci_disable_msi
@ -1809,8 +1845,10 @@
remove_proc_entry
remove_wait_queue
sigprocmask
strnstr
sysfs_create_bin_file
sysfs_remove_bin_file
__tasklet_hi_schedule
__task_pid_nr_ns
thermal_of_zone_register
thermal_of_zone_unregister

View File

@ -0,0 +1,145 @@
[abi_symbol_list]
# commonly used symbols
module_layout
__put_task_struct
# required by fips140.ko
aead_register_instance
ahash_register_instance
arch_timer_read_counter
bcmp
complete_all
completion_done
cpu_have_feature
crypto_aead_decrypt
crypto_aead_encrypt
crypto_aead_setauthsize
crypto_aead_setkey
crypto_ahash_finup
crypto_ahash_setkey
crypto_alg_list
crypto_alg_sem
crypto_alloc_aead
crypto_alloc_base
crypto_alloc_rng
crypto_alloc_shash
crypto_alloc_skcipher
crypto_attr_alg_name
crypto_check_attr_type
crypto_cipher_encrypt_one
crypto_cipher_setkey
crypto_destroy_tfm
crypto_drop_spawn
crypto_get_default_null_skcipher
crypto_grab_aead
crypto_grab_ahash
crypto_grab_shash
crypto_grab_skcipher
crypto_grab_spawn
crypto_inst_setname
crypto_put_default_null_skcipher
crypto_register_aead
crypto_register_aeads
crypto_register_ahash
crypto_register_ahashes
crypto_register_alg
crypto_register_algs
crypto_register_rng
crypto_register_rngs
crypto_register_shash
crypto_register_shashes
crypto_register_skcipher
crypto_register_skciphers
crypto_register_template
crypto_register_templates
crypto_remove_spawns
crypto_req_done
crypto_rng_reset
crypto_shash_alg_has_setkey
crypto_shash_digest
crypto_shash_final
crypto_shash_finup
crypto_shash_setkey
crypto_shash_tfm_digest
crypto_shash_update
crypto_skcipher_decrypt
crypto_skcipher_encrypt
crypto_skcipher_setkey
crypto_spawn_tfm
crypto_spawn_tfm2
crypto_unregister_aead
crypto_unregister_alg
crypto_unregister_rng
crypto_unregister_rngs
crypto_unregister_shash
crypto_unregister_shashes
crypto_unregister_skciphers
crypto_unregister_template
crypto_unregister_templates
down_write
fortify_panic
fpsimd_context_busy
get_random_bytes
__init_swait_queue_head
irq_stat
jiffies
kasan_flag_enabled
kernel_neon_begin
kernel_neon_end
kfree
kfree_sensitive
__kmalloc
kmalloc_caches
kmalloc_trace
kmemdup
ktime_get
__list_add_valid
__list_del_entry_valid
memcpy
memset
__mutex_init
mutex_lock
mutex_unlock
panic
preempt_schedule
preempt_schedule_notrace
_printk
___ratelimit
_raw_spin_lock
_raw_spin_unlock
refcount_warn_saturate
rng_is_initialized
scatterwalk_ffwd
scatterwalk_map_and_copy
sg_init_one
sg_init_table
sg_next
shash_free_singlespawn_instance
shash_register_instance
skcipher_alloc_instance_simple
skcipher_register_instance
skcipher_walk_aead_decrypt
skcipher_walk_aead_encrypt
skcipher_walk_done
skcipher_walk_virt
snprintf
__stack_chk_fail
strcmp
strlcat
strlen
strncmp
strnlen
strscpy
__traceiter_android_vh_aes_decrypt
__traceiter_android_vh_aes_encrypt
__traceiter_android_vh_aes_expandkey
__traceiter_android_vh_sha256
__tracepoint_android_vh_aes_decrypt
__tracepoint_android_vh_aes_encrypt
__tracepoint_android_vh_aes_expandkey
__tracepoint_android_vh_sha256
tracepoint_probe_register
up_write
wait_for_completion
xa_load
xa_store

View File

@ -0,0 +1,3 @@
[abi_symbol_list]
__traceiter_android_vh_tune_mmap_readaround
__tracepoint_android_vh_tune_mmap_readaround

View File

@ -171,6 +171,7 @@
clk_hw_get_rate
clk_hw_is_enabled
clk_hw_is_prepared
clk_hw_register
clk_hw_round_rate
clk_hw_set_parent
__clk_is_enabled
@ -188,6 +189,7 @@
clk_set_parent
clk_set_rate
clk_unprepare
clk_unregister
clockevents_config_and_register
clocks_calc_mult_shift
clocksource_mmio_init
@ -361,6 +363,7 @@
dev_close_many
dev_coredumpv
dev_driver_string
_dev_emerg
_dev_err
dev_err_probe
dev_fetch_sw_netstats
@ -473,6 +476,7 @@
dev_mc_unsync
devm_devfreq_add_device
devm_devfreq_register_notifier
devm_devfreq_register_opp_notifier
devm_devfreq_remove_device
devm_devfreq_unregister_notifier
devm_devfreq_unregister_opp_notifier
@ -679,6 +683,7 @@
dma_resv_iter_first_unlocked
dma_resv_iter_next_unlocked
dma_resv_reserve_fences
dma_resv_wait_timeout
dma_run_dependencies
dma_set_coherent_mask
dma_set_mask
@ -828,6 +833,7 @@
drm_kms_helper_poll_disable
drm_kms_helper_poll_enable
drm_kms_helper_poll_fini
drm_kms_helper_poll_init
drmm_mode_config_init
drm_mode_config_cleanup
drm_mode_config_reset
@ -881,6 +887,7 @@
dst_cache_set_ip4
dst_cache_set_ip6
dst_release
dummy_irq_chip
dump_stack
efi
em_cpu_get
@ -911,6 +918,7 @@
extcon_get_edev_by_phandle
extcon_get_state
extcon_set_state_sync
fasync_helper
__fdget
fd_install
fget
@ -999,7 +1007,11 @@
genlmsg_put
genl_register_family
genl_unregister_family
__genphy_config_aneg
genphy_read_status
genphy_resume
genphy_soft_reset
genphy_suspend
gen_pool_add_owner
gen_pool_alloc_algo_owner
gen_pool_avail
@ -1060,6 +1072,7 @@
gpiod_direction_input
gpiod_direction_output
gpiod_direction_output_raw
gpiod_get_optional
gpiod_get_raw_value
gpiod_get_value
gpiod_get_value_cansleep
@ -1263,7 +1276,9 @@
irq_create_of_mapping
irq_dispose_mapping
__irq_domain_add
__irq_domain_alloc_fwnode
irq_domain_create_simple
irq_domain_free_fwnode
irq_domain_free_irqs_common
irq_domain_get_irq_data
irq_domain_remove
@ -1335,6 +1350,7 @@
kfree_skb_list_reason
kfree_skb_partial
kfree_skb_reason
kill_fasync
kimage_vaddr
kimage_voffset
__kmalloc
@ -1402,6 +1418,7 @@
kthread_unpark
kthread_worker_fn
ktime_get
ktime_get_coarse_ts64
ktime_get_coarse_with_offset
ktime_get_mono_fast_ns
ktime_get_raw
@ -1458,8 +1475,12 @@
mdiobus_alloc_size
mdiobus_free
mdiobus_get_phy
__mdiobus_read
mdiobus_read
__mdiobus_register
mdiobus_unregister
__mdiobus_write
mdiobus_write
media_create_ancillary_link
media_create_intf_link
media_create_pad_link
@ -1723,6 +1744,7 @@
of_machine_is_compatible
of_match_device
of_match_node
__of_mdiobus_register
of_n_addr_cells
of_n_size_cells
__of_parse_phandle_with_args
@ -1865,6 +1887,9 @@
phy_connect_direct
phy_disconnect
phy_do_ioctl_running
phy_drivers_register
phy_drivers_unregister
phy_error
phy_ethtool_get_eee
phy_ethtool_get_link_ksettings
phy_ethtool_get_wol
@ -1877,8 +1902,10 @@
phy_exit
phy_find_first
phy_get
phy_get_pause
phy_init
phy_init_eee
phy_init_hw
phylink_connect_phy
phylink_create
phylink_destroy
@ -1895,8 +1922,10 @@
phy_power_on
phy_print_status
phy_put
phy_read_mmd
phy_register_fixup_for_uid
phy_remove_link_mode
phy_restore_page
phy_save_page
phy_set_mode_ext
phy_start
@ -1904,7 +1933,9 @@
phy_stop
phy_support_asym_pause
phy_suspend
phy_trigger_machine
phy_unregister_fixup_for_uid
phy_write_mmd
pick_migrate_task
pid_task
pinconf_generic_parse_dt_config
@ -1938,6 +1969,7 @@
__platform_driver_register
platform_driver_unregister
platform_find_device_by_driver
platform_get_ethdev_address
platform_get_irq
platform_get_irq_byname
platform_get_irq_byname_optional
@ -2134,9 +2166,12 @@
regmap_field_read
regmap_field_update_bits_base
regmap_irq_get_domain
regmap_multi_reg_write
regmap_raw_read
regmap_raw_write
regmap_read
regmap_register_patch
regmap_test_bits
regmap_update_bits_base
regmap_write
regulator_bulk_disable
@ -2182,6 +2217,7 @@
remove_proc_entry
remove_proc_subtree
remove_wait_queue
report_iommu_fault
request_firmware
request_firmware_direct
request_firmware_nowait
@ -2457,19 +2493,25 @@
snd_soc_dapm_get_pin_switch
snd_soc_dapm_get_volsw
snd_soc_dapm_info_pin_switch
snd_soc_dapm_kcontrol_widget
snd_soc_dapm_new_controls
snd_soc_dapm_new_widgets
snd_soc_dapm_put_enum_double
snd_soc_dapm_put_pin_switch
snd_soc_dapm_put_volsw
snd_soc_dapm_sync
snd_soc_dpcm_get_substream
snd_soc_get_enum_double
snd_soc_get_volsw
snd_soc_info_enum_double
snd_soc_info_volsw
snd_soc_jack_report
snd_soc_new_compress
snd_soc_of_get_dai_link_codecs
snd_soc_of_parse_audio_routing
snd_soc_pm_ops
snd_soc_poweroff
snd_soc_put_enum_double
snd_soc_put_volsw
snd_soc_register_component
snd_soc_resume
@ -2642,6 +2684,7 @@
thermal_zone_get_zone_by_name
thread_group_cputime_adjusted
tick_nohz_get_sleep_length
time64_to_tm
timecounter_init
timecounter_read
timer_of_init
@ -3135,10 +3178,12 @@
usb_role_string
usb_role_switch_get
usb_role_switch_get_drvdata
usb_role_switch_put
usb_role_switch_register
usb_role_switch_set_role
usb_role_switch_unregister
usb_root_hub_lost_power
usb_scuttle_anchored_urbs
usb_set_interface
usb_show_dynids
usb_speed_string
@ -3154,6 +3199,13 @@
uuid_gen
uuid_null
uuid_parse
v4l2_async_nf_cleanup
v4l2_async_nf_init
v4l2_async_nf_parse_fwnode_endpoints
v4l2_async_nf_register
v4l2_async_nf_unregister
v4l2_async_register_subdev
v4l2_async_unregister_subdev
v4l2_compat_ioctl32
v4l2_ctrl_find
v4l2_ctrl_g_ctrl
@ -3190,6 +3242,7 @@
v4l2_fh_open
__v4l2_find_nearest_size
v4l2_format_info
v4l2_fwnode_endpoint_parse
v4l2_i2c_subdev_init
v4l2_m2m_buf_copy_metadata
v4l2_m2m_buf_queue
@ -3203,7 +3256,9 @@
v4l2_m2m_get_vq
v4l2_m2m_init
v4l2_m2m_ioctl_create_bufs
v4l2_m2m_ioctl_decoder_cmd
v4l2_m2m_ioctl_dqbuf
v4l2_m2m_ioctl_encoder_cmd
v4l2_m2m_ioctl_expbuf
v4l2_m2m_ioctl_prepare_buf
v4l2_m2m_ioctl_qbuf
@ -3211,7 +3266,10 @@
v4l2_m2m_ioctl_reqbufs
v4l2_m2m_ioctl_streamoff
v4l2_m2m_ioctl_streamon
v4l2_m2m_ioctl_try_decoder_cmd
v4l2_m2m_ioctl_try_encoder_cmd
v4l2_m2m_job_finish
v4l2_m2m_last_buffer_done
v4l2_m2m_next_buf
v4l2_m2m_qbuf
v4l2_m2m_register_media_controller

View File

@ -1,4 +1,219 @@
[abi_symbol_list]
# commonly used symbols
module_layout
__put_task_struct
# required by ntfs3.ko
__alloc_pages
alt_cb_patch_nops
__arch_copy_from_user
__arch_copy_to_user
arm64_use_ng_mappings
balance_dirty_pages_ratelimited
bcmp
__bh_read
bh_uptodate_or_lock
bio_add_page
bio_alloc_bioset
bio_chain
bio_put
__bitmap_clear
__bitmap_set
__bitmap_weight
blkdev_issue_discard
blk_finish_plug
blk_start_plug
__blockdev_direct_IO
block_dirty_folio
block_invalidate_folio
block_truncate_page
block_write_begin
block_write_end
block_write_full_page
__bread_gfp
__brelse
call_rcu
capable
clean_bdev_aliases
clear_inode
clear_nlink
copy_page_from_iter_atomic
cpu_hwcaps
create_empty_buffers
current_time
current_umask
d_find_alias
d_instantiate
discard_new_inode
d_make_root
d_obtain_alias
down_read
down_write
down_write_trylock
dput
drop_nlink
d_splice_alias
end_buffer_read_sync
fault_in_iov_iter_readable
fiemap_fill_next_extent
fiemap_prep
filemap_fdatawait_range
filemap_fdatawrite
filemap_fdatawrite_range
filemap_write_and_wait_range
file_remove_privs
file_update_time
_find_next_bit
_find_next_zero_bit
flush_dcache_page
__folio_lock
__folio_put
fs_bio_set
fs_param_is_string
fs_param_is_u32
__fs_parse
generic_block_bmap
generic_fh_to_dentry
generic_fh_to_parent
generic_file_fsync
generic_file_llseek
generic_file_mmap
generic_file_open
generic_file_read_iter
generic_file_splice_read
__generic_file_write_iter
generic_fillattr
generic_permission
generic_read_dir
generic_write_checks
generic_write_end
generic_writepages
__getblk_gfp
get_random_u32
get_tree_bdev
gic_nonsecure_priorities
grab_cache_page_write_begin
hex_asc
iget5_locked
iget_failed
ihold
ilookup
inc_nlink
__init_rwsem
init_special_inode
inode_dio_wait
inode_get_bytes
inode_init_once
inode_init_owner
inode_needs_sync
inode_newsize_ok
inode_nohighmem
inode_set_bytes
inode_to_bdi
insert_inode_locked
invalidate_bdev
invalidate_inode_buffers
iov_iter_revert
iov_iter_zero
iput
is_bad_inode
iter_file_splice_write
kasan_flag_enabled
kfree
kfree_link
kill_block_super
__kmalloc
kmalloc_caches
kmalloc_trace
kmem_cache_alloc
kmem_cache_alloc_lru
kmem_cache_create
kmem_cache_destroy
kmem_cache_free
kmemdup
kvfree
kvmalloc_node
load_nls
load_nls_default
__lock_buffer
lockref_get
logfc
make_bad_inode
mark_buffer_dirty
__mark_inode_dirty
memcpy
memmove
memset
mpage_readahead
mpage_read_folio
mpage_writepages
__mutex_init
mutex_lock
mutex_trylock
mutex_unlock
names_cachep
new_inode
overflowgid
overflowuid
pagecache_get_page
page_pinner_inited
__page_pinner_put_page
posix_acl_chmod
preempt_schedule
_printk
___ratelimit
_raw_spin_lock
_raw_spin_unlock
rb_erase
rb_first
rb_insert_color
rb_last
rb_next
rb_prev
rcu_barrier
read_cache_page
register_filesystem
sb_set_blocksize
seq_printf
seq_puts
setattr_copy
setattr_prepare
set_bh_page
set_nlink
set_page_dirty
snprintf
__stack_chk_fail
strcmp
strlen
submit_bh
submit_bio
submit_bio_wait
sync_blockdev
sync_blockdev_nowait
sync_dirty_buffer
sync_filesystem
sync_inode_metadata
sync_mapping_buffers
truncate_inode_pages_final
truncate_pagecache
truncate_setsize
unload_nls
unlock_buffer
unlock_new_inode
unlock_page
unregister_filesystem
up_read
up_write
utf16s_to_utf8s
utf8_to_utf32
vfs_fsync_range
vmap
vm_zone_stat
vunmap
__wait_on_buffer
__warn_printk
write_inode_now
# required by mount_state.ko
get_fs_type
iterate_supers_type
get_fs_type

View File

@ -212,6 +212,7 @@
__traceiter_android_vh_free_oem_binder_struct
__traceiter_android_vh_binder_special_task
__traceiter_android_vh_binder_free_buf
__traceiter_android_vh_binder_buffer_release
__traceiter_android_vh_copy_process
__tracepoint_android_rvh_post_init_entity_util_avg
__tracepoint_android_rvh_rtmutex_force_update
@ -320,6 +321,7 @@
__tracepoint_android_vh_free_oem_binder_struct
__tracepoint_android_vh_binder_special_task
__tracepoint_android_vh_binder_free_buf
__tracepoint_android_vh_binder_buffer_release
__tracepoint_android_vh_copy_process
__trace_puts
try_to_free_mem_cgroup_pages
@ -332,4 +334,6 @@
wakeup_source_remove
wake_up_state
wq_worker_comm
xt_register_match
xt_unregister_match
zero_pfn

View File

@ -370,6 +370,7 @@
devfreq_recommended_opp
devfreq_register_opp_notifier
devfreq_remove_device
devfreq_remove_governor
devfreq_unregister_opp_notifier
devfreq_update_interval
dev_fwnode
@ -692,6 +693,7 @@
drm_bridge_remove
drm_compat_ioctl
drm_connector_attach_encoder
drm_connector_attach_max_bpc_property
drm_connector_cleanup
drm_connector_init
drm_connector_list_iter_begin
@ -784,6 +786,7 @@
drm_modeset_lock_all_ctx
drm_modeset_lock_single_interruptible
drm_modeset_unlock
drm_mode_sort
drm_mode_vrefresh
drm_object_attach_property
drm_object_property_set_value
@ -1492,6 +1495,7 @@
of_genpd_add_provider_simple
of_get_child_by_name
of_get_cpu_node
of_get_drm_panel_display_mode
of_get_named_gpio_flags
of_get_next_available_child
of_get_next_child
@ -1749,6 +1753,7 @@
radix_tree_lookup
radix_tree_next_chunk
radix_tree_preload
raise_softirq
___ratelimit
raw_notifier_call_chain
raw_notifier_chain_register
@ -2324,6 +2329,7 @@
__traceiter_android_rvh_set_user_nice_locked
__traceiter_android_rvh_typec_tcpci_get_vbus
__traceiter_android_rvh_uclamp_eff_get
__traceiter_android_rvh_ufs_complete_init
__traceiter_android_rvh_ufs_reprogram_all_keys
__traceiter_android_rvh_update_blocked_fair
__traceiter_android_rvh_update_load_avg
@ -2373,7 +2379,10 @@
__traceiter_android_vh_ufs_update_sysfs
__traceiter_android_vh_use_amu_fie
__traceiter_clock_set_rate
__traceiter_cma_alloc_finish
__traceiter_cma_alloc_start
__traceiter_cpu_frequency
__traceiter_cpu_idle
__traceiter_device_pm_callback_end
__traceiter_device_pm_callback_start
__traceiter_dwc3_readl
@ -2439,6 +2448,7 @@
__tracepoint_android_rvh_set_user_nice_locked
__tracepoint_android_rvh_typec_tcpci_get_vbus
__tracepoint_android_rvh_uclamp_eff_get
__tracepoint_android_rvh_ufs_complete_init
__tracepoint_android_rvh_ufs_reprogram_all_keys
__tracepoint_android_rvh_update_blocked_fair
__tracepoint_android_rvh_update_load_avg
@ -2488,7 +2498,10 @@
__tracepoint_android_vh_ufs_update_sysfs
__tracepoint_android_vh_use_amu_fie
__tracepoint_clock_set_rate
__tracepoint_cma_alloc_finish
__tracepoint_cma_alloc_start
__tracepoint_cpu_frequency
__tracepoint_cpu_idle
__tracepoint_device_pm_callback_end
__tracepoint_device_pm_callback_start
__tracepoint_dwc3_readl
@ -2762,6 +2775,8 @@
woken_wake_function
work_busy
__write_overflow_field
ww_mutex_lock
ww_mutex_unlock
__xa_alloc
xa_clear_mark
xa_destroy

View File

@ -3449,6 +3449,7 @@
__traceiter_android_vh_show_suspend_epoch_val
__traceiter_android_vh_skip_swap_map_write
__traceiter_android_vh_timer_calc_index
__traceiter_android_vh_try_fixup_sea
__traceiter_android_vh_ufs_check_int_errors
__traceiter_android_vh_ufs_compl_command
__traceiter_android_vh_ufs_send_command
@ -3597,6 +3598,7 @@
__tracepoint_android_vh_show_suspend_epoch_val
__tracepoint_android_vh_skip_swap_map_write
__tracepoint_android_vh_timer_calc_index
__tracepoint_android_vh_try_fixup_sea
__tracepoint_android_vh_ufs_check_int_errors
__tracepoint_android_vh_ufs_compl_command
__tracepoint_android_vh_ufs_send_command

View File

@ -0,0 +1,37 @@
[abi_symbol_list]
# required by delayacct
set_delayacct_enabled
__traceiter_android_rvh_delayacct_init
__traceiter_android_rvh_delayacct_tsk_init
__traceiter_android_rvh_delayacct_tsk_free
__traceiter_android_vh_delayacct_blkio_start
__traceiter_android_vh_delayacct_blkio_end
__traceiter_android_vh_delayacct_add_tsk
__traceiter_android_vh_delayacct_blkio_ticks
__traceiter_android_vh_delayacct_freepages_start
__traceiter_android_vh_delayacct_freepages_end
__traceiter_android_vh_delayacct_thrashing_start
__traceiter_android_vh_delayacct_thrashing_end
__traceiter_android_vh_delayacct_swapin_start
__traceiter_android_vh_delayacct_swapin_end
__traceiter_android_vh_delayacct_compact_start
__traceiter_android_vh_delayacct_compact_end
__traceiter_android_vh_delayacct_wpcopy_start
__traceiter_android_vh_delayacct_wpcopy_end
__tracepoint_android_rvh_delayacct_init
__tracepoint_android_rvh_delayacct_tsk_init
__tracepoint_android_rvh_delayacct_tsk_free
__tracepoint_android_vh_delayacct_blkio_start
__tracepoint_android_vh_delayacct_blkio_end
__tracepoint_android_vh_delayacct_add_tsk
__tracepoint_android_vh_delayacct_blkio_ticks
__tracepoint_android_vh_delayacct_freepages_start
__tracepoint_android_vh_delayacct_freepages_end
__tracepoint_android_vh_delayacct_thrashing_start
__tracepoint_android_vh_delayacct_thrashing_end
__tracepoint_android_vh_delayacct_swapin_start
__tracepoint_android_vh_delayacct_swapin_end
__tracepoint_android_vh_delayacct_compact_start
__tracepoint_android_vh_delayacct_compact_end
__tracepoint_android_vh_delayacct_wpcopy_start
__tracepoint_android_vh_delayacct_wpcopy_end

View File

@ -350,3 +350,12 @@
pagecache_get_page
filemap_get_folios
find_get_pages_range_tag
#required by bcmdhd.ko
nla_append
sdio_writew
sdio_readw
#required by speed_ui.ko
__tracepoint_android_rvh_update_cpus_allowed
__traceiter_android_rvh_update_cpus_allowed

View File

@ -304,12 +304,12 @@ CONFIG_FW_LOADER_USER_HELPER=y
# CONFIG_SUN50I_DE2_BUS is not set
# CONFIG_SUNXI_RSB is not set
CONFIG_ARM_SCMI_PROTOCOL=y
CONFIG_ARM_SCMI_TRANSPORT_VIRTIO=y
# CONFIG_ARM_SCMI_POWER_DOMAIN is not set
CONFIG_ARM_SCPI_PROTOCOL=y
# CONFIG_ARM_SCPI_POWER_DOMAIN is not set
# CONFIG_EFI_ARMSTUB_DTB_LOADER is not set
CONFIG_GNSS=y
CONFIG_BLK_DEV_NULL_BLK=m
CONFIG_ZRAM=m
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_LOOP_MIN_COUNT=16
@ -323,7 +323,6 @@ CONFIG_UID_SYS_STATS=y
CONFIG_SCSI=y
# CONFIG_SCSI_PROC_FS is not set
CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_DEBUG=m
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y

View File

@ -746,6 +746,11 @@ static int do_sea(unsigned long far, unsigned long esr, struct pt_regs *regs)
{
const struct fault_info *inf;
unsigned long siaddr;
bool can_fixup = false;
trace_android_vh_try_fixup_sea(far, esr, regs, &can_fixup);
if (can_fixup && fixup_exception(regs))
return 0;
inf = esr_to_fault_info(esr);

View File

@ -295,7 +295,6 @@ CONFIG_FW_LOADER_USER_HELPER=y
# CONFIG_FW_CACHE is not set
CONFIG_GNSS=y
CONFIG_OF=y
CONFIG_BLK_DEV_NULL_BLK=m
CONFIG_ZRAM=m
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_LOOP_MIN_COUNT=16
@ -309,7 +308,6 @@ CONFIG_UID_SYS_STATS=y
CONFIG_SCSI=y
# CONFIG_SCSI_PROC_FS is not set
CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_DEBUG=m
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y

View File

@ -10,11 +10,6 @@ MAKE_GOALS="
modules
"
if [ "${LTO}" = "none" ]; then
echo "The FIPS140 module needs LTO to be enabled."
exit 1
fi
MODULES_ORDER=android/gki_aarch64_fips140_modules
KERNEL_DIR=common

View File

@ -79,6 +79,33 @@ config CRYPTO_FIPS140_MOD_EVAL_TESTING
errors and support for a userspace interface to some of the module's
services. This option should not be enabled in production builds.
config CRYPTO_FIPS140_MOD_DEBUG_INTEGRITY_CHECK
bool "Debug the integrity check in FIPS 140 module"
depends on CRYPTO_FIPS140_MOD
help
This option makes the FIPS 140 module provide debugfs files containing
the text and rodata that were used for the integrity check, i.e. the
runtime text and rodata with relocations and code patches unapplied.
This option also makes the module load even if the integrity check
fails so that these files can be used to debug the failure. (A
possible failure mode is that the kernel has added a new type of code
patching and the module needs to be updated to disable or unapply it.)
This option must not be enabled in production builds.
Example commands for debugging an integrity check failure:
adb root
adb shell mount debugfs -t debugfs /sys/kernel/debug
adb shell cp /sys/kernel/debug/fips140/{text,rodata} /data/local/tmp/
adb pull /data/local/tmp/text text.checked
adb pull /data/local/tmp/rodata rodata.checked
llvm-objcopy -O binary --only-section=.text fips140.ko text.orig
llvm-objcopy -O binary --only-section=.rodata fips140.ko rodata.orig
for f in {text,rodata}.{orig,checked}; do xxd -g1 $f > $f.xxd; done
vimdiff text.{orig,checked}.xxd
vimdiff rodata.{orig,checked}.xxd
config CRYPTO_ALGAPI
tristate
select CRYPTO_ALGAPI2

View File

@ -23,6 +23,7 @@
#undef __DISABLE_EXPORTS
#include <linux/ctype.h>
#include <linux/debugfs.h>
#include <linux/module.h>
#include <crypto/aead.h>
#include <crypto/aes.h>
@ -357,6 +358,94 @@ static void __init unapply_rodata_relocations(void *section, int section_size,
}
}
enum {
PACIASP = 0xd503233f,
AUTIASP = 0xd50323bf,
SCS_PUSH = 0xf800865e,
SCS_POP = 0xf85f8e5e,
};
/*
* To make the integrity check work with dynamic Shadow Call Stack (SCS),
* replace all instructions that push or pop from the SCS with the Pointer
* Authentication Code (PAC) instructions that were present originally.
*/
static void __init unapply_scs_patch(void *section, int section_size)
{
#if defined(CONFIG_ARM64) && defined(CONFIG_UNWIND_PATCH_PAC_INTO_SCS)
u32 *insns = section;
int i;
for (i = 0; i < section_size / sizeof(insns[0]); i++) {
if (insns[i] == SCS_PUSH)
insns[i] = PACIASP;
else if (insns[i] == SCS_POP)
insns[i] = AUTIASP;
}
#endif
}
#ifdef CONFIG_CRYPTO_FIPS140_MOD_DEBUG_INTEGRITY_CHECK
static struct {
const void *text;
int textsize;
const void *rodata;
int rodatasize;
} saved_integrity_check_info;
static ssize_t fips140_text_read(struct file *file, char __user *to,
size_t count, loff_t *ppos)
{
return simple_read_from_buffer(to, count, ppos,
saved_integrity_check_info.text,
saved_integrity_check_info.textsize);
}
static ssize_t fips140_rodata_read(struct file *file, char __user *to,
size_t count, loff_t *ppos)
{
return simple_read_from_buffer(to, count, ppos,
saved_integrity_check_info.rodata,
saved_integrity_check_info.rodatasize);
}
static const struct file_operations fips140_text_fops = {
.read = fips140_text_read,
};
static const struct file_operations fips140_rodata_fops = {
.read = fips140_rodata_read,
};
static void fips140_init_integrity_debug_files(const void *text, int textsize,
const void *rodata,
int rodatasize)
{
struct dentry *dir;
dir = debugfs_create_dir("fips140", NULL);
saved_integrity_check_info.text = kmemdup(text, textsize, GFP_KERNEL);
saved_integrity_check_info.textsize = textsize;
if (saved_integrity_check_info.text)
debugfs_create_file("text", 0400, dir, NULL,
&fips140_text_fops);
saved_integrity_check_info.rodata = kmemdup(rodata, rodatasize,
GFP_KERNEL);
saved_integrity_check_info.rodatasize = rodatasize;
if (saved_integrity_check_info.rodata)
debugfs_create_file("rodata", 0400, dir, NULL,
&fips140_rodata_fops);
}
#else /* CONFIG_CRYPTO_FIPS140_MOD_DEBUG_INTEGRITY_CHECK */
static void fips140_init_integrity_debug_files(const void *text, int textsize,
const void *rodata,
int rodatasize)
{
}
#endif /* !CONFIG_CRYPTO_FIPS140_MOD_DEBUG_INTEGRITY_CHECK */
extern struct {
u32 offset;
u32 count;
@ -398,6 +487,11 @@ static bool __init check_fips140_module_hmac(void)
offset_to_ptr(&fips140_rela_rodata.offset),
fips140_rela_rodata.count);
unapply_scs_patch(textcopy, textsize);
fips140_init_integrity_debug_files(textcopy, textsize,
rodatacopy, rodatasize);
fips140_inject_integrity_failure(textcopy);
tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
@ -538,10 +632,14 @@ fips140_init(void)
*/
if (!check_fips140_module_hmac()) {
pr_crit("integrity check failed -- giving up!\n");
goto panic;
if (!IS_ENABLED(CONFIG_CRYPTO_FIPS140_MOD_DEBUG_INTEGRITY_CHECK)) {
pr_crit("integrity check failed -- giving up!\n");
goto panic;
}
pr_crit("ignoring integrity check failure due to debug mode\n");
} else {
pr_info("integrity check passed\n");
}
pr_info("integrity check passed\n");
complete_all(&fips140_tests_done);

View File

@ -1891,8 +1891,10 @@ static size_t binder_get_object(struct binder_proc *proc,
size_t object_size = 0;
read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
if (offset > buffer->data_size || read_size < sizeof(*hdr))
if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
!IS_ALIGNED(offset, sizeof(u32)))
return 0;
if (u) {
if (copy_from_user(object, u + offset, read_size))
return 0;
@ -3989,12 +3991,14 @@ binder_free_buf(struct binder_proc *proc,
struct binder_buffer *buffer, bool is_failure)
{
bool enqueue_task = true;
bool has_transaction = false;
trace_android_vh_binder_free_buf(proc, thread, buffer);
binder_inner_proc_lock(proc);
if (buffer->transaction) {
buffer->transaction->buffer = NULL;
buffer->transaction = NULL;
has_transaction = true;
}
binder_inner_proc_unlock(proc);
if (buffer->async_transaction && buffer->target_node) {
@ -4018,6 +4022,8 @@ binder_free_buf(struct binder_proc *proc,
}
binder_node_inner_unlock(buf_node);
}
trace_android_vh_binder_buffer_release(proc, thread, buffer,
has_transaction);
trace_binder_transaction_buffer_release(buffer);
binder_release_entire_buffer(proc, thread, buffer, is_failure);
binder_alloc_free_buf(&proc->alloc, buffer);

View File

@ -70,6 +70,8 @@
#include <trace/hooks/regmap.h>
#include <trace/hooks/compaction.h>
#include <trace/hooks/suspend.h>
#include <trace/hooks/delayacct.h>
/*
* Export tracepoints that act as a bare tracehook (ie: have no trace event
* associated with them) to allow external modules to probe them.
@ -136,6 +138,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ftrace_format_check);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ftrace_dump_buffer);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_fill_prdt);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_ufs_reprogram_all_keys);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_ufs_complete_init);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_prepare_command);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_update_sysfs);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_send_command);
@ -243,6 +246,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_try_to_freeze_todo_unfrozen);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_die_kernel_fault);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_sea);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_sp_pc_abort);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_try_fixup_sea);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_undefinstr);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_el1_bti);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_el1_fpac);
@ -351,6 +355,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_transaction_received);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_oem_binder_struct);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_special_task);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_free_buf);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_buffer_release);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_compaction_exit);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_compaction_try_to_compact_pages_exit);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_alloc_pages_direct_reclaim_enter);
@ -377,3 +382,21 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_looper_exited);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_spawn_new_thread);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_has_special_work_ilocked);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_detect_low_async_space);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_delayacct_init);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_delayacct_tsk_init);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_delayacct_tsk_free);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_delayacct_blkio_start);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_delayacct_blkio_end);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_delayacct_add_tsk);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_delayacct_blkio_ticks);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_delayacct_freepages_start);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_delayacct_freepages_end);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_delayacct_thrashing_start);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_delayacct_thrashing_end);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_delayacct_swapin_start);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_delayacct_swapin_end);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_delayacct_compact_start);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_delayacct_compact_end);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_delayacct_wpcopy_start);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_delayacct_wpcopy_end);

View File

@ -93,6 +93,7 @@ static void update_pm_runtime_accounting(struct device *dev)
static void __update_runtime_status(struct device *dev, enum rpm_status status)
{
update_pm_runtime_accounting(dev);
trace_rpm_status(dev, status);
dev->power.runtime_status = status;
}

View File

@ -51,12 +51,9 @@ struct io_stats {
#define UID_STATE_FOREGROUND 0
#define UID_STATE_BACKGROUND 1
#define UID_STATE_BUCKET_SIZE 2
#define UID_STATE_TOTAL_CURR 2
#define UID_STATE_TOTAL_LAST 3
#define UID_STATE_DEAD_TASKS 4
#define UID_STATE_SIZE 5
#define UID_STATE_TOTAL_LAST 2
#define UID_STATE_DEAD_TASKS 3
#define UID_STATE_SIZE 4
#define MAX_TASK_COMM_LEN 256
@ -71,8 +68,6 @@ struct uid_entry {
uid_t uid;
u64 utime;
u64 stime;
u64 active_utime;
u64 active_stime;
int state;
struct io_stats io[UID_STATE_SIZE];
struct hlist_node hash;
@ -173,58 +168,47 @@ static struct uid_entry *find_or_register_uid(uid_t uid)
return uid_entry;
}
static void calc_uid_cputime(struct uid_entry *uid_entry,
u64 *total_utime, u64 *total_stime)
{
struct user_namespace *user_ns = current_user_ns();
struct task_struct *p, *t;
u64 utime, stime;
uid_t uid;
rcu_read_lock();
for_each_process(p) {
uid = from_kuid_munged(user_ns, task_uid(p));
if (uid != uid_entry->uid)
continue;
for_each_thread(p, t) {
/* avoid double accounting of dying threads */
if (!(t->flags & PF_EXITING)) {
task_cputime_adjusted(t, &utime, &stime);
*total_utime += utime;
*total_stime += stime;
}
}
}
rcu_read_unlock();
}
static int uid_cputime_show(struct seq_file *m, void *v)
{
struct uid_entry *uid_entry = NULL;
struct task_struct *task, *temp;
struct user_namespace *user_ns = current_user_ns();
u64 utime;
u64 stime;
u32 bkt;
uid_t uid;
for (bkt = 0, uid_entry = NULL; uid_entry == NULL &&
bkt < HASH_SIZE(hash_table); bkt++) {
lock_uid_by_bkt(bkt);
hlist_for_each_entry(uid_entry, &hash_table[bkt], hash) {
uid_entry->active_stime = 0;
uid_entry->active_utime = 0;
}
unlock_uid_by_bkt(bkt);
}
u64 total_utime = uid_entry->utime;
u64 total_stime = uid_entry->stime;
rcu_read_lock();
do_each_thread(temp, task) {
uid = from_kuid_munged(user_ns, task_uid(task));
lock_uid(uid);
if (!uid_entry || uid_entry->uid != uid)
uid_entry = find_or_register_uid(uid);
if (!uid_entry) {
rcu_read_unlock();
unlock_uid(uid);
pr_err("%s: failed to find the uid_entry for uid %d\n",
__func__, uid);
return -ENOMEM;
}
/* avoid double accounting of dying threads */
if (!(task->flags & PF_EXITING)) {
task_cputime_adjusted(task, &utime, &stime);
uid_entry->active_utime += utime;
uid_entry->active_stime += stime;
}
unlock_uid(uid);
} while_each_thread(temp, task);
rcu_read_unlock();
for (bkt = 0, uid_entry = NULL; uid_entry == NULL &&
bkt < HASH_SIZE(hash_table); bkt++) {
lock_uid_by_bkt(bkt);
hlist_for_each_entry(uid_entry, &hash_table[bkt], hash) {
u64 total_utime = uid_entry->utime +
uid_entry->active_utime;
u64 total_stime = uid_entry->stime +
uid_entry->active_stime;
calc_uid_cputime(uid_entry, &total_utime, &total_stime);
seq_printf(m, "%d: %llu %llu\n", uid_entry->uid,
ktime_to_us(total_utime), ktime_to_us(total_stime));
}
@ -323,86 +307,52 @@ static void add_uid_io_stats(struct uid_entry *uid_entry,
__add_uid_io_stats(uid_entry, &task->ioac, slot);
}
static void update_io_stats_all(void)
static void update_io_stats_uid(struct uid_entry *uid_entry)
{
struct uid_entry *uid_entry = NULL;
struct task_struct *task, *temp;
struct user_namespace *user_ns = current_user_ns();
struct task_struct *p, *t;
struct io_stats io;
memset(&io, 0, sizeof(struct io_stats));
rcu_read_lock();
for_each_process(p) {
uid_t uid = from_kuid_munged(user_ns, task_uid(p));
if (uid != uid_entry->uid)
continue;
for_each_thread(p, t) {
/* avoid double accounting of dying threads */
if (!(t->flags & PF_EXITING)) {
io.read_bytes += t->ioac.read_bytes;
io.write_bytes += compute_write_bytes(&t->ioac);
io.rchar += t->ioac.rchar;
io.wchar += t->ioac.wchar;
io.fsync += t->ioac.syscfs;
}
}
}
rcu_read_unlock();
compute_io_bucket_stats(&uid_entry->io[uid_entry->state], &io,
&uid_entry->io[UID_STATE_TOTAL_LAST],
&uid_entry->io[UID_STATE_DEAD_TASKS]);
}
static int uid_io_show(struct seq_file *m, void *v)
{
struct uid_entry *uid_entry = NULL;
u32 bkt;
uid_t uid;
for (bkt = 0, uid_entry = NULL; uid_entry == NULL && bkt < HASH_SIZE(hash_table);
bkt++) {
lock_uid_by_bkt(bkt);
hlist_for_each_entry(uid_entry, &hash_table[bkt], hash) {
memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0,
sizeof(struct io_stats));
}
unlock_uid_by_bkt(bkt);
}
rcu_read_lock();
do_each_thread(temp, task) {
uid = from_kuid_munged(user_ns, task_uid(task));
lock_uid(uid);
if (!uid_entry || uid_entry->uid != uid)
uid_entry = find_or_register_uid(uid);
if (!uid_entry) {
unlock_uid(uid);
continue;
}
add_uid_io_stats(uid_entry, task, UID_STATE_TOTAL_CURR);
unlock_uid(uid);
} while_each_thread(temp, task);
rcu_read_unlock();
update_io_stats_uid(uid_entry);
for (bkt = 0, uid_entry = NULL; uid_entry == NULL && bkt < HASH_SIZE(hash_table);
bkt++) {
lock_uid_by_bkt(bkt);
hlist_for_each_entry(uid_entry, &hash_table[bkt], hash) {
compute_io_bucket_stats(&uid_entry->io[uid_entry->state],
&uid_entry->io[UID_STATE_TOTAL_CURR],
&uid_entry->io[UID_STATE_TOTAL_LAST],
&uid_entry->io[UID_STATE_DEAD_TASKS]);
}
unlock_uid_by_bkt(bkt);
}
}
static void update_io_stats_uid(struct uid_entry *uid_entry)
{
struct task_struct *task, *temp;
struct user_namespace *user_ns = current_user_ns();
memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0,
sizeof(struct io_stats));
rcu_read_lock();
do_each_thread(temp, task) {
if (from_kuid_munged(user_ns, task_uid(task)) != uid_entry->uid)
continue;
add_uid_io_stats(uid_entry, task, UID_STATE_TOTAL_CURR);
} while_each_thread(temp, task);
rcu_read_unlock();
compute_io_bucket_stats(&uid_entry->io[uid_entry->state],
&uid_entry->io[UID_STATE_TOTAL_CURR],
&uid_entry->io[UID_STATE_TOTAL_LAST],
&uid_entry->io[UID_STATE_DEAD_TASKS]);
}
static int uid_io_show(struct seq_file *m, void *v)
{
struct uid_entry *uid_entry;
u32 bkt;
update_io_stats_all();
for (bkt = 0, uid_entry = NULL; uid_entry == NULL && bkt < HASH_SIZE(hash_table);
bkt++) {
lock_uid_by_bkt(bkt);
hlist_for_each_entry(uid_entry, &hash_table[bkt], hash) {
seq_printf(m, "%d %llu %llu %llu %llu %llu %llu %llu %llu %llu %llu\n",
uid_entry->uid,
uid_entry->io[UID_STATE_FOREGROUND].rchar,
@ -446,7 +396,6 @@ static ssize_t uid_procstat_write(struct file *file,
uid_t uid;
int argc, state;
char input[128];
struct uid_entry uid_entry_tmp;
if (count >= sizeof(input))
return -EINVAL;
@ -475,29 +424,8 @@ static ssize_t uid_procstat_write(struct file *file,
return count;
}
/*
* Update_io_stats_uid_locked would take a long lock-time of uid_lock
* due to call do_each_thread to compute uid_entry->io, which would
* cause to lock competition sometime.
*
* Using uid_entry_tmp to get the result of Update_io_stats_uid,
* so that we can unlock_uid during update_io_stats_uid, in order
* to avoid the unnecessary lock-time of uid_lock.
*/
uid_entry_tmp = *uid_entry;
unlock_uid(uid);
update_io_stats_uid(&uid_entry_tmp);
lock_uid(uid);
hlist_for_each_entry(uid_entry, &hash_table[hash_min(uid, HASH_BITS(hash_table))], hash) {
if (uid_entry->uid == uid_entry_tmp.uid) {
memcpy(uid_entry->io, uid_entry_tmp.io,
sizeof(struct io_stats) * UID_STATE_SIZE);
uid_entry->state = state;
break;
}
}
update_io_stats_uid(uid_entry);
uid_entry->state = state;
unlock_uid(uid);
return count;

View File

@ -483,10 +483,12 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
ret = dw_pcie_start_link(pci);
if (ret)
goto err_free_msi;
}
/* Ignore errors, the link may come up later */
dw_pcie_wait_for_link(pci);
if (pci->ops && pci->ops->start_link) {
/* Ignore errors, the link may come up later */
dw_pcie_wait_for_link(pci);
}
}
bridge->sysdata = pp;

View File

@ -645,6 +645,7 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
int tag = scsi_cmd_to_rq(cmd)->tag;
struct ufshcd_lrb *lrbp = &hba->lrb[tag];
struct ufs_hw_queue *hwq;
unsigned long flags;
int err = FAILED;
if (!ufshcd_cmd_inflight(lrbp->cmd)) {
@ -685,8 +686,10 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
}
err = SUCCESS;
spin_lock_irqsave(&hwq->cq_lock, flags);
if (ufshcd_cmd_inflight(lrbp->cmd))
ufshcd_release_scsi_cmd(hba, lrbp);
spin_unlock_irqrestore(&hwq->cq_lock, flags);
out:
return err;

View File

@ -2248,7 +2248,8 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag,
lrbp->compl_time_stamp_local_clock = 0;
trace_android_vh_ufs_send_command(hba, lrbp);
ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND);
ufshcd_clk_scaling_start_busy(hba);
if (lrbp->cmd)
ufshcd_clk_scaling_start_busy(hba);
if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
ufshcd_start_monitor(hba, lrbp);
@ -5544,7 +5545,6 @@ void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
}
ufshcd_add_command_trace(hba, task_tag, UFS_DEV_COMP);
complete(hba->dev_cmd.complete);
ufshcd_clk_scaling_update_busy(hba);
}
}
}
@ -8882,6 +8882,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
ufshcd_auto_hibern8_enable(hba);
ufshpb_toggle_state(hba, HPB_RESET, HPB_PRESENT);
trace_android_rvh_ufs_complete_init(hba);
out:
spin_lock_irqsave(hba->host->host_lock, flags);
if (ret)

View File

@ -552,16 +552,21 @@ static ssize_t hpd_show(struct device *dev, struct device_attribute *attr, char
}
static DEVICE_ATTR_RO(hpd);
static struct attribute *dp_altmode_attrs[] = {
static struct attribute *displayport_attrs[] = {
&dev_attr_configuration.attr,
&dev_attr_pin_assignment.attr,
&dev_attr_hpd.attr,
NULL
};
static const struct attribute_group dp_altmode_group = {
static const struct attribute_group displayport_group = {
.name = "displayport",
.attrs = dp_altmode_attrs,
.attrs = displayport_attrs,
};
static const struct attribute_group *displayport_groups[] = {
&displayport_group,
NULL,
};
int dp_altmode_probe(struct typec_altmode *alt)
@ -569,7 +574,6 @@ int dp_altmode_probe(struct typec_altmode *alt)
const struct typec_altmode *port = typec_altmode_get_partner(alt);
struct fwnode_handle *fwnode;
struct dp_altmode *dp;
int ret;
/* FIXME: Port can only be DFP_U. */
@ -580,10 +584,6 @@ int dp_altmode_probe(struct typec_altmode *alt)
DP_CAP_PIN_ASSIGN_DFP_D(alt->vdo)))
return -ENODEV;
ret = sysfs_create_group(&alt->dev.kobj, &dp_altmode_group);
if (ret)
return ret;
dp = devm_kzalloc(&alt->dev, sizeof(*dp), GFP_KERNEL);
if (!dp)
return -ENOMEM;
@ -614,7 +614,6 @@ void dp_altmode_remove(struct typec_altmode *alt)
{
struct dp_altmode *dp = typec_altmode_get_drvdata(alt);
sysfs_remove_group(&alt->dev.kobj, &dp_altmode_group);
cancel_work_sync(&dp->work);
if (dp->connector_fwnode) {
@ -639,6 +638,7 @@ static struct typec_altmode_driver dp_altmode_driver = {
.driver = {
.name = "typec_displayport",
.owner = THIS_MODULE,
.dev_groups = displayport_groups,
},
};
module_typec_altmode_driver(dp_altmode_driver);

View File

@ -263,11 +263,13 @@ static void typec_altmode_put_partner(struct altmode *altmode)
{
struct altmode *partner = altmode->partner;
struct typec_altmode *adev;
struct typec_altmode *partner_adev;
if (!partner)
return;
adev = &altmode->adev;
partner_adev = &partner->adev;
if (is_typec_plug(adev->dev.parent)) {
struct typec_plug *plug = to_typec_plug(adev->dev.parent);
@ -276,7 +278,7 @@ static void typec_altmode_put_partner(struct altmode *altmode)
} else {
partner->partner = NULL;
}
put_device(&adev->dev);
put_device(&partner_adev->dev);
}
/**

View File

@ -59,13 +59,7 @@
* we need a lock that will allow us to sleep. This lock is a
* mutex (ep->mtx). It is acquired during the event transfer loop,
* during epoll_ctl(EPOLL_CTL_DEL) and during eventpoll_release_file().
* Then we also need a global mutex to serialize eventpoll_release_file()
* and ep_free().
* This mutex is acquired by ep_free() during the epoll file
* cleanup path and it is also acquired by eventpoll_release_file()
* if a file has been pushed inside an epoll set and it is then
* close()d without a previous call to epoll_ctl(EPOLL_CTL_DEL).
* It is also acquired when inserting an epoll fd onto another epoll
* The epmutex is acquired when inserting an epoll fd onto another epoll
* fd. We do this so that we walk the epoll tree and ensure that this
* insertion does not create a cycle of epoll file descriptors, which
* could lead to deadlock. We need a global mutex to prevent two
@ -155,6 +149,13 @@ struct epitem {
/* The file descriptor information this item refers to */
struct epoll_filefd ffd;
/*
* Protected by file->f_lock, true for to-be-released epitem already
* removed from the "struct file" items list; together with
* eventpoll->refcount orchestrates "struct eventpoll" disposal
*/
bool dying;
/* List containing poll wait queues */
struct eppoll_entry *pwqlist;
@ -219,6 +220,12 @@ struct eventpoll {
u64 gen;
struct hlist_head refs;
/*
* usage count, used together with epitem->dying to
* orchestrate the disposal of this struct
*/
refcount_t refcount;
#ifdef CONFIG_NET_RX_BUSY_POLL
/* used to track busy poll napi_id */
unsigned int napi_id;
@ -242,9 +249,7 @@ struct ep_pqueue {
/* Maximum number of epoll watched descriptors, per user */
static long max_user_watches __read_mostly;
/*
* This mutex is used to serialize ep_free() and eventpoll_release_file().
*/
/* Used for cycles detection */
static DEFINE_MUTEX(epmutex);
static u64 loop_check_gen = 0;
@ -559,8 +564,7 @@ static void ep_remove_wait_queue(struct eppoll_entry *pwq)
/*
* This function unregisters poll callbacks from the associated file
* descriptor. Must be called with "mtx" held (or "epmutex" if called from
* ep_free).
* descriptor. Must be called with "mtx" held.
*/
static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi)
{
@ -683,11 +687,40 @@ static void epi_rcu_free(struct rcu_head *head)
kmem_cache_free(epi_cache, epi);
}
static void ep_get(struct eventpoll *ep)
{
refcount_inc(&ep->refcount);
}
/*
* Returns true if the event poll can be disposed
*/
static bool ep_refcount_dec_and_test(struct eventpoll *ep)
{
if (!refcount_dec_and_test(&ep->refcount))
return false;
WARN_ON_ONCE(!RB_EMPTY_ROOT(&ep->rbr.rb_root));
return true;
}
static void ep_free(struct eventpoll *ep)
{
mutex_destroy(&ep->mtx);
free_uid(ep->user);
wakeup_source_unregister(ep->ws);
kfree(ep);
}
/*
* Removes a "struct epitem" from the eventpoll RB tree and deallocates
* all the associated resources. Must be called with "mtx" held.
* If the dying flag is set, do the removal only if force is true.
* This prevents ep_clear_and_put() from dropping all the ep references
* while running concurrently with eventpoll_release_file().
* Returns true if the eventpoll can be disposed.
*/
static int ep_remove(struct eventpoll *ep, struct epitem *epi)
static bool __ep_remove(struct eventpoll *ep, struct epitem *epi, bool force)
{
struct file *file = epi->ffd.file;
struct epitems_head *to_free;
@ -702,6 +735,11 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi)
/* Remove the current item from the list of epoll hooks */
spin_lock(&file->f_lock);
if (epi->dying && !force) {
spin_unlock(&file->f_lock);
return false;
}
to_free = NULL;
head = file->f_ep;
if (head->first == &epi->fllink && !epi->fllink.next) {
@ -735,28 +773,28 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi)
call_rcu(&epi->rcu, epi_rcu_free);
percpu_counter_dec(&ep->user->epoll_watches);
return 0;
return ep_refcount_dec_and_test(ep);
}
static void ep_free(struct eventpoll *ep)
/*
* ep_remove variant for callers owing an additional reference to the ep
*/
static void ep_remove_safe(struct eventpoll *ep, struct epitem *epi)
{
struct rb_node *rbp;
WARN_ON_ONCE(__ep_remove(ep, epi, false));
}
static void ep_clear_and_put(struct eventpoll *ep)
{
struct rb_node *rbp, *next;
struct epitem *epi;
bool dispose;
/* We need to release all tasks waiting for these file */
if (waitqueue_active(&ep->poll_wait))
ep_poll_safewake(ep, NULL, 0);
/*
* We need to lock this because we could be hit by
* eventpoll_release_file() while we're freeing the "struct eventpoll".
* We do not need to hold "ep->mtx" here because the epoll file
* is on the way to be removed and no one has references to it
* anymore. The only hit might come from eventpoll_release_file() but
* holding "epmutex" is sufficient here.
*/
mutex_lock(&epmutex);
mutex_lock(&ep->mtx);
/*
* Walks through the whole tree by unregistering poll callbacks.
@ -769,26 +807,25 @@ static void ep_free(struct eventpoll *ep)
}
/*
* Walks through the whole tree by freeing each "struct epitem". At this
* point we are sure no poll callbacks will be lingering around, and also by
* holding "epmutex" we can be sure that no file cleanup code will hit
* us during this operation. So we can avoid the lock on "ep->lock".
* We do not need to lock ep->mtx, either, we only do it to prevent
* a lockdep warning.
* Walks through the whole tree and try to free each "struct epitem".
* Note that ep_remove_safe() will not remove the epitem in case of a
* racing eventpoll_release_file(); the latter will do the removal.
* At this point we are sure no poll callbacks will be lingering around.
* Since we still own a reference to the eventpoll struct, the loop can't
* dispose it.
*/
mutex_lock(&ep->mtx);
while ((rbp = rb_first_cached(&ep->rbr)) != NULL) {
for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = next) {
next = rb_next(rbp);
epi = rb_entry(rbp, struct epitem, rbn);
ep_remove(ep, epi);
ep_remove_safe(ep, epi);
cond_resched();
}
dispose = ep_refcount_dec_and_test(ep);
mutex_unlock(&ep->mtx);
mutex_unlock(&epmutex);
mutex_destroy(&ep->mtx);
free_uid(ep->user);
wakeup_source_unregister(ep->ws);
kfree(ep);
if (dispose)
ep_free(ep);
}
static int ep_eventpoll_release(struct inode *inode, struct file *file)
@ -796,7 +833,7 @@ static int ep_eventpoll_release(struct inode *inode, struct file *file)
struct eventpoll *ep = file->private_data;
if (ep)
ep_free(ep);
ep_clear_and_put(ep);
return 0;
}
@ -908,33 +945,34 @@ void eventpoll_release_file(struct file *file)
{
struct eventpoll *ep;
struct epitem *epi;
struct hlist_node *next;
bool dispose;
/*
* We don't want to get "file->f_lock" because it is not
* necessary. It is not necessary because we're in the "struct file"
* cleanup path, and this means that no one is using this file anymore.
* So, for example, epoll_ctl() cannot hit here since if we reach this
* point, the file counter already went to zero and fget() would fail.
* The only hit might come from ep_free() but by holding the mutex
* will correctly serialize the operation. We do need to acquire
* "ep->mtx" after "epmutex" because ep_remove() requires it when called
* from anywhere but ep_free().
*
* Besides, ep_remove() acquires the lock, so we can't hold it here.
* Use the 'dying' flag to prevent a concurrent ep_clear_and_put() from
* touching the epitems list before eventpoll_release_file() can access
* the ep->mtx.
*/
mutex_lock(&epmutex);
if (unlikely(!file->f_ep)) {
mutex_unlock(&epmutex);
return;
}
hlist_for_each_entry_safe(epi, next, file->f_ep, fllink) {
again:
spin_lock(&file->f_lock);
if (file->f_ep && file->f_ep->first) {
epi = hlist_entry(file->f_ep->first, struct epitem, fllink);
epi->dying = true;
spin_unlock(&file->f_lock);
/*
* ep access is safe as we still own a reference to the ep
* struct
*/
ep = epi->ep;
mutex_lock_nested(&ep->mtx, 0);
ep_remove(ep, epi);
mutex_lock(&ep->mtx);
dispose = __ep_remove(ep, epi, true);
mutex_unlock(&ep->mtx);
if (dispose)
ep_free(ep);
goto again;
}
mutex_unlock(&epmutex);
spin_unlock(&file->f_lock);
}
static int ep_alloc(struct eventpoll **pep)
@ -957,6 +995,7 @@ static int ep_alloc(struct eventpoll **pep)
ep->rbr = RB_ROOT_CACHED;
ep->ovflist = EP_UNACTIVE_PTR;
ep->user = user;
refcount_set(&ep->refcount, 1);
*pep = ep;
@ -1225,10 +1264,10 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v
*/
list_del_init(&wait->entry);
/*
* ->whead != NULL protects us from the race with ep_free()
* or ep_remove(), ep_remove_wait_queue() takes whead->lock
* held by the caller. Once we nullify it, nothing protects
* ep/epi or even wait.
* ->whead != NULL protects us from the race with
* ep_clear_and_put() or ep_remove(), ep_remove_wait_queue()
* takes whead->lock held by the caller. Once we nullify it,
* nothing protects ep/epi or even wait.
*/
smp_store_release(&ep_pwq_from_wait(wait)->whead, NULL);
}
@ -1503,16 +1542,22 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
if (tep)
mutex_unlock(&tep->mtx);
/*
* ep_remove_safe() calls in the later error paths can't lead to
* ep_free() as the ep file itself still holds an ep reference.
*/
ep_get(ep);
/* now check if we've created too many backpaths */
if (unlikely(full_check && reverse_path_check())) {
ep_remove(ep, epi);
ep_remove_safe(ep, epi);
return -EINVAL;
}
if (epi->event.events & EPOLLWAKEUP) {
error = ep_create_wakeup_source(epi);
if (error) {
ep_remove(ep, epi);
ep_remove_safe(ep, epi);
return error;
}
}
@ -1536,7 +1581,7 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
* high memory pressure.
*/
if (unlikely(!epq.epi)) {
ep_remove(ep, epi);
ep_remove_safe(ep, epi);
return -ENOMEM;
}
@ -2036,7 +2081,7 @@ static int do_epoll_create(int flags)
out_free_fd:
put_unused_fd(fd);
out_free_ep:
ep_free(ep);
ep_clear_and_put(ep);
return error;
}
@ -2178,10 +2223,16 @@ int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds,
error = -EEXIST;
break;
case EPOLL_CTL_DEL:
if (epi)
error = ep_remove(ep, epi);
else
if (epi) {
/*
* The eventpoll itself is still alive: the refcount
* can't go to zero here.
*/
ep_remove_safe(ep, epi);
error = 0;
} else {
error = -ENOENT;
}
break;
case EPOLL_CTL_MOD:
if (epi) {

View File

@ -173,12 +173,9 @@ static bool __is_bitmap_valid(struct f2fs_sb_info *sbi, block_t blkaddr,
return exist;
}
bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
static bool __f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
block_t blkaddr, int type)
{
if (time_to_inject(sbi, FAULT_BLKADDR))
return false;
switch (type) {
case META_NAT:
break;
@ -233,6 +230,20 @@ bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
return true;
}
bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
block_t blkaddr, int type)
{
if (time_to_inject(sbi, FAULT_BLKADDR_VALIDITY))
return false;
return __f2fs_is_valid_blkaddr(sbi, blkaddr, type);
}
bool f2fs_is_valid_blkaddr_raw(struct f2fs_sb_info *sbi,
block_t blkaddr, int type)
{
return __f2fs_is_valid_blkaddr(sbi, blkaddr, type);
}
/*
* Readahead CP/NAT/SIT/SSA/POR pages
*/

View File

@ -1390,6 +1390,8 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
struct f2fs_sb_info *sbi = bio->bi_private;
struct compress_io_ctx *cic =
(struct compress_io_ctx *)page_private(page);
enum count_type type = WB_DATA_TYPE(page,
f2fs_is_compressed_page(page));
int i;
if (unlikely(bio->bi_status))
@ -1397,7 +1399,7 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
f2fs_compress_free_page(page);
dec_page_count(sbi, F2FS_WB_DATA);
dec_page_count(sbi, type);
if (atomic_dec_return(&cic->pending_pages))
return;
@ -1413,12 +1415,14 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
}
static int f2fs_write_raw_pages(struct compress_ctx *cc,
int *submitted,
int *submitted_p,
struct writeback_control *wbc,
enum iostat_type io_type)
{
struct address_space *mapping = cc->inode->i_mapping;
int _submitted, compr_blocks, ret, i;
struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
int submitted, compr_blocks, i;
int ret = 0;
compr_blocks = f2fs_compressed_blocks(cc);
@ -1433,6 +1437,10 @@ static int f2fs_write_raw_pages(struct compress_ctx *cc,
if (compr_blocks < 0)
return compr_blocks;
/* overwrite compressed cluster w/ normal cluster */
if (compr_blocks > 0)
f2fs_lock_op(sbi);
for (i = 0; i < cc->cluster_size; i++) {
if (!cc->rpages[i])
continue;
@ -1457,7 +1465,7 @@ static int f2fs_write_raw_pages(struct compress_ctx *cc,
if (!clear_page_dirty_for_io(cc->rpages[i]))
goto continue_unlock;
ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted,
ret = f2fs_write_single_data_page(cc->rpages[i], &submitted,
NULL, NULL, wbc, io_type,
compr_blocks, false);
if (ret) {
@ -1465,26 +1473,29 @@ static int f2fs_write_raw_pages(struct compress_ctx *cc,
unlock_page(cc->rpages[i]);
ret = 0;
} else if (ret == -EAGAIN) {
ret = 0;
/*
* for quota file, just redirty left pages to
* avoid deadlock caused by cluster update race
* from foreground operation.
*/
if (IS_NOQUOTA(cc->inode))
return 0;
ret = 0;
goto out;
f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
goto retry_write;
}
return ret;
goto out;
}
*submitted += _submitted;
*submitted_p += submitted;
}
f2fs_balance_fs(F2FS_M_SB(mapping), true);
out:
if (compr_blocks > 0)
f2fs_unlock_op(sbi);
return 0;
f2fs_balance_fs(sbi, true);
return ret;
}
int f2fs_write_multi_pages(struct compress_ctx *cc,

View File

@ -49,7 +49,7 @@ void f2fs_destroy_bioset(void)
bioset_exit(&f2fs_bioset);
}
static bool __is_cp_guaranteed(struct page *page)
bool f2fs_is_cp_guaranteed(struct page *page)
{
struct address_space *mapping = page->mapping;
struct inode *inode;
@ -66,8 +66,6 @@ static bool __is_cp_guaranteed(struct page *page)
S_ISDIR(inode->i_mode))
return true;
if (f2fs_is_compressed_page(page))
return false;
if ((S_ISREG(inode->i_mode) && IS_NOQUOTA(inode)) ||
page_private_gcing(page))
return true;
@ -339,7 +337,7 @@ static void f2fs_write_end_io(struct bio *bio)
bio_for_each_segment_all(bvec, bio, iter_all) {
struct page *page = bvec->bv_page;
enum count_type type = WB_DATA_TYPE(page);
enum count_type type = WB_DATA_TYPE(page, false);
if (page_private_dummy(page)) {
clear_page_private_dummy(page);
@ -767,7 +765,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
inc_page_count(fio->sbi, is_read_io(fio->op) ?
__read_io_type(page) : WB_DATA_TYPE(fio->page));
__read_io_type(page) : WB_DATA_TYPE(fio->page, false));
if (is_read_io(bio_op(bio)))
f2fs_submit_read_bio(fio->sbi, bio, fio->type);
@ -978,7 +976,7 @@ int f2fs_merge_page_bio(struct f2fs_io_info *fio)
if (fio->io_wbc)
wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
inc_page_count(fio->sbi, WB_DATA_TYPE(page));
inc_page_count(fio->sbi, WB_DATA_TYPE(page, false));
*fio->last_block = fio->new_blkaddr;
*fio->bio = bio;
@ -1012,6 +1010,7 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio)
enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
struct page *bio_page;
enum count_type type;
f2fs_bug_on(sbi, is_read_io(fio->op));
@ -1051,7 +1050,8 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio)
/* set submitted = true as a return value */
fio->submitted = 1;
inc_page_count(sbi, WB_DATA_TYPE(bio_page));
type = WB_DATA_TYPE(bio_page, fio->compressed_page);
inc_page_count(sbi, type);
if (io->bio &&
(!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio,
@ -1064,7 +1064,8 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio)
if (F2FS_IO_ALIGNED(sbi) &&
(fio->type == DATA || fio->type == NODE) &&
fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
dec_page_count(sbi, WB_DATA_TYPE(bio_page));
dec_page_count(sbi, WB_DATA_TYPE(bio_page,
fio->compressed_page));
fio->retry = 1;
goto skip;
}
@ -1227,7 +1228,8 @@ int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
return -EPERM;
if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
err = inc_valid_block_count(sbi, dn->inode, &count, true);
if (unlikely(err))
return err;
trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
@ -1485,7 +1487,7 @@ static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
dn->data_blkaddr = f2fs_data_blkaddr(dn);
if (dn->data_blkaddr == NULL_ADDR) {
err = inc_valid_block_count(sbi, dn->inode, &count);
err = inc_valid_block_count(sbi, dn->inode, &count, true);
if (unlikely(err))
return err;
}
@ -2859,7 +2861,7 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
.encrypted_page = NULL,
.submitted = 0,
.compr_blocks = compr_blocks,
.need_lock = LOCK_RETRY,
.need_lock = compr_blocks ? LOCK_DONE : LOCK_RETRY,
.post_read = f2fs_post_read_required(inode) ? 1 : 0,
.io_type = io_type,
.io_wbc = wbc,
@ -2939,6 +2941,7 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
if (err == -EAGAIN) {
err = f2fs_do_write_data_page(&fio);
if (err == -EAGAIN) {
f2fs_bug_on(sbi, compr_blocks);
fio.need_lock = LOCK_REQ;
err = f2fs_do_write_data_page(&fio);
}

View File

@ -74,40 +74,14 @@ static void __set_extent_info(struct extent_info *ei,
}
}
static bool __may_read_extent_tree(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
if (!test_opt(sbi, READ_EXTENT_CACHE))
return false;
if (is_inode_flag_set(inode, FI_NO_EXTENT))
return false;
if (is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
!f2fs_sb_has_readonly(sbi))
return false;
return S_ISREG(inode->i_mode);
}
static bool __may_age_extent_tree(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
if (!test_opt(sbi, AGE_EXTENT_CACHE))
return false;
if (is_inode_flag_set(inode, FI_COMPRESSED_FILE))
return false;
if (file_is_cold(inode))
return false;
return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode);
}
static bool __init_may_extent_tree(struct inode *inode, enum extent_type type)
{
if (type == EX_READ)
return __may_read_extent_tree(inode);
else if (type == EX_BLOCK_AGE)
return __may_age_extent_tree(inode);
return test_opt(F2FS_I_SB(inode), READ_EXTENT_CACHE) &&
S_ISREG(inode->i_mode);
if (type == EX_BLOCK_AGE)
return test_opt(F2FS_I_SB(inode), AGE_EXTENT_CACHE) &&
(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode));
return false;
}
@ -120,7 +94,22 @@ static bool __may_extent_tree(struct inode *inode, enum extent_type type)
if (list_empty(&F2FS_I_SB(inode)->s_list))
return false;
return __init_may_extent_tree(inode, type);
if (!__init_may_extent_tree(inode, type))
return false;
if (type == EX_READ) {
if (is_inode_flag_set(inode, FI_NO_EXTENT))
return false;
if (is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
!f2fs_sb_has_readonly(F2FS_I_SB(inode)))
return false;
} else if (type == EX_BLOCK_AGE) {
if (is_inode_flag_set(inode, FI_COMPRESSED_FILE))
return false;
if (file_is_cold(inode))
return false;
}
return true;
}
static void __try_update_largest_extent(struct extent_tree *et,

View File

@ -60,7 +60,8 @@ enum {
FAULT_SLAB_ALLOC,
FAULT_DQUOT_INIT,
FAULT_LOCK_OP,
FAULT_BLKADDR,
FAULT_BLKADDR_VALIDITY,
FAULT_BLKADDR_CONSISTENCE,
FAULT_MAX,
};
@ -1074,7 +1075,8 @@ struct f2fs_sm_info {
* f2fs monitors the number of several block types such as on-writeback,
* dirty dentry blocks, dirty node blocks, and dirty meta blocks.
*/
#define WB_DATA_TYPE(p) (__is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA)
#define WB_DATA_TYPE(p, f) \
(f || f2fs_is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA)
enum count_type {
F2FS_DIRTY_DENTS,
F2FS_DIRTY_DATA,
@ -2221,7 +2223,7 @@ static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi,
static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool);
static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
struct inode *inode, blkcnt_t *count)
struct inode *inode, blkcnt_t *count, bool partial)
{
blkcnt_t diff = 0, release = 0;
block_t avail_user_block_count;
@ -2261,6 +2263,11 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
avail_user_block_count = 0;
}
if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) {
if (!partial) {
spin_unlock(&sbi->stat_lock);
goto enospc;
}
diff = sbi->total_valid_block_count - avail_user_block_count;
if (diff > *count)
diff = *count;
@ -3424,11 +3431,9 @@ bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
static inline void verify_blkaddr(struct f2fs_sb_info *sbi,
block_t blkaddr, int type)
{
if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type)) {
if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type))
f2fs_err(sbi, "invalid blkaddr: %u, type: %d, run fsck to fix.",
blkaddr, type);
f2fs_bug_on(sbi, 1);
}
}
static inline bool __is_valid_data_blkaddr(block_t blkaddr)
@ -3723,6 +3728,8 @@ struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index);
struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index);
bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
block_t blkaddr, int type);
bool f2fs_is_valid_blkaddr_raw(struct f2fs_sb_info *sbi,
block_t blkaddr, int type);
int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
int type, bool sync);
void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index,
@ -3763,6 +3770,7 @@ void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi);
*/
int __init f2fs_init_bioset(void);
void f2fs_destroy_bioset(void);
bool f2fs_is_cp_guaranteed(struct page *page);
int f2fs_init_bio_entry_cache(void);
void f2fs_destroy_bio_entry_cache(void);
void f2fs_submit_read_bio(struct f2fs_sb_info *sbi, struct bio *bio,

View File

@ -593,9 +593,13 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
f2fs_set_data_blkaddr(dn);
if (__is_valid_data_blkaddr(blkaddr)) {
if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
DATA_GENERIC_ENHANCE))
if (time_to_inject(sbi, FAULT_BLKADDR_CONSISTENCE))
continue;
if (!f2fs_is_valid_blkaddr_raw(sbi, blkaddr,
DATA_GENERIC_ENHANCE)) {
f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
continue;
}
if (compressed_cluster)
valid_blocks++;
}
@ -3598,32 +3602,46 @@ static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
blkcnt_t reserved;
int ret;
for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
blkaddr = f2fs_data_blkaddr(dn);
for (i = 0; i < cluster_size; i++) {
blkaddr = data_blkaddr(dn->inode, dn->node_page,
dn->ofs_in_node + i);
if (i == 0) {
if (blkaddr == COMPRESS_ADDR)
continue;
dn->ofs_in_node += cluster_size;
goto next;
}
if (__is_valid_data_blkaddr(blkaddr)) {
compr_blocks++;
if (blkaddr != COMPRESS_ADDR) {
dn->ofs_in_node += cluster_size;
goto next;
}
continue;
}
dn->data_blkaddr = NEW_ADDR;
f2fs_set_data_blkaddr(dn);
/*
* compressed cluster was not released due to it
* fails in release_compress_blocks(), so NEW_ADDR
* is a possible case.
*/
if (blkaddr == NEW_ADDR ||
__is_valid_data_blkaddr(blkaddr)) {
compr_blocks++;
continue;
}
}
reserved = cluster_size - compr_blocks;
ret = inc_valid_block_count(sbi, dn->inode, &reserved);
if (ret)
/* for the case all blocks in cluster were reserved */
if (reserved == 1)
goto next;
ret = inc_valid_block_count(sbi, dn->inode, &reserved, false);
if (unlikely(ret))
return ret;
if (reserved != cluster_size - compr_blocks)
return -ENOSPC;
for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
if (f2fs_data_blkaddr(dn) == NULL_ADDR) {
dn->data_blkaddr = NEW_ADDR;
f2fs_set_data_blkaddr(dn);
}
}
f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);

View File

@ -248,7 +248,7 @@ static int __replace_atomic_write_block(struct inode *inode, pgoff_t index,
} else {
blkcnt_t count = 1;
err = inc_valid_block_count(sbi, inode, &count);
err = inc_valid_block_count(sbi, inode, &count, true);
if (err) {
f2fs_put_dnode(&dn);
return err;

View File

@ -45,24 +45,25 @@ static struct kmem_cache *f2fs_inode_cachep;
#ifdef CONFIG_F2FS_FAULT_INJECTION
const char *f2fs_fault_name[FAULT_MAX] = {
[FAULT_KMALLOC] = "kmalloc",
[FAULT_KVMALLOC] = "kvmalloc",
[FAULT_PAGE_ALLOC] = "page alloc",
[FAULT_PAGE_GET] = "page get",
[FAULT_ALLOC_NID] = "alloc nid",
[FAULT_ORPHAN] = "orphan",
[FAULT_BLOCK] = "no more block",
[FAULT_DIR_DEPTH] = "too big dir depth",
[FAULT_EVICT_INODE] = "evict_inode fail",
[FAULT_TRUNCATE] = "truncate fail",
[FAULT_READ_IO] = "read IO error",
[FAULT_CHECKPOINT] = "checkpoint error",
[FAULT_DISCARD] = "discard error",
[FAULT_WRITE_IO] = "write IO error",
[FAULT_SLAB_ALLOC] = "slab alloc",
[FAULT_DQUOT_INIT] = "dquot initialize",
[FAULT_LOCK_OP] = "lock_op",
[FAULT_BLKADDR] = "invalid blkaddr",
[FAULT_KMALLOC] = "kmalloc",
[FAULT_KVMALLOC] = "kvmalloc",
[FAULT_PAGE_ALLOC] = "page alloc",
[FAULT_PAGE_GET] = "page get",
[FAULT_ALLOC_NID] = "alloc nid",
[FAULT_ORPHAN] = "orphan",
[FAULT_BLOCK] = "no more block",
[FAULT_DIR_DEPTH] = "too big dir depth",
[FAULT_EVICT_INODE] = "evict_inode fail",
[FAULT_TRUNCATE] = "truncate fail",
[FAULT_READ_IO] = "read IO error",
[FAULT_CHECKPOINT] = "checkpoint error",
[FAULT_DISCARD] = "discard error",
[FAULT_WRITE_IO] = "write IO error",
[FAULT_SLAB_ALLOC] = "slab alloc",
[FAULT_DQUOT_INIT] = "dquot initialize",
[FAULT_LOCK_OP] = "lock_op",
[FAULT_BLKADDR_VALIDITY] = "invalid blkaddr",
[FAULT_BLKADDR_CONSISTENCE] = "inconsistent blkaddr",
};
void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,

View File

@ -216,43 +216,141 @@ static inline void delayacct_wpcopy_end(void)
}
#else
extern void _trace_android_rvh_delayacct_init(void);
extern void _trace_android_rvh_delayacct_tsk_init(struct task_struct *tsk);
extern void _trace_android_rvh_delayacct_tsk_free(struct task_struct *tsk);
extern void _trace_android_vh_delayacct_blkio_start(void);
extern void _trace_android_vh_delayacct_blkio_end(struct task_struct *p);
extern void _trace_android_vh_delayacct_add_tsk(struct taskstats *d,
struct task_struct *tsk,
int *ret);
extern void _trace_android_vh_delayacct_blkio_ticks(struct task_struct *tsk, __u64 *ret);
extern void _trace_android_vh_delayacct_freepages_start(void);
extern void _trace_android_vh_delayacct_freepages_end(void);
extern void _trace_android_vh_delayacct_thrashing_start(bool *in_thrashing);
extern void _trace_android_vh_delayacct_thrashing_end(bool *in_thrashing);
extern void _trace_android_vh_delayacct_swapin_start(void);
extern void _trace_android_vh_delayacct_swapin_end(void);
extern void _trace_android_vh_delayacct_compact_start(void);
extern void _trace_android_vh_delayacct_compact_end(void);
extern void _trace_android_vh_delayacct_wpcopy_start(void);
extern void _trace_android_vh_delayacct_wpcopy_end(void);
extern void set_delayacct_enabled(bool enabled);
extern bool get_delayacct_enabled(void);
static inline void delayacct_init(void)
{}
{
if (get_delayacct_enabled())
_trace_android_rvh_delayacct_init();
}
static inline void delayacct_tsk_init(struct task_struct *tsk)
{}
{
if (get_delayacct_enabled())
_trace_android_rvh_delayacct_tsk_init(tsk);
}
static inline void delayacct_tsk_free(struct task_struct *tsk)
{}
{
if (get_delayacct_enabled())
_trace_android_rvh_delayacct_tsk_free(tsk);
}
static inline void delayacct_blkio_start(void)
{}
{
if (get_delayacct_enabled())
_trace_android_vh_delayacct_blkio_start();
}
static inline void delayacct_blkio_end(struct task_struct *p)
{}
{
if (get_delayacct_enabled())
_trace_android_vh_delayacct_blkio_end(p);
}
static inline int delayacct_add_tsk(struct taskstats *d,
struct task_struct *tsk)
{ return 0; }
{
int ret = 0;
if (get_delayacct_enabled())
_trace_android_vh_delayacct_add_tsk(d, tsk, &ret);
return ret;
}
static inline __u64 delayacct_blkio_ticks(struct task_struct *tsk)
{ return 0; }
{
__u64 ret = 0;
if (get_delayacct_enabled())
_trace_android_vh_delayacct_blkio_ticks(tsk, &ret);
return ret;
}
static inline int delayacct_is_task_waiting_on_io(struct task_struct *p)
{ return 0; }
static inline void delayacct_freepages_start(void)
{}
{
if (get_delayacct_enabled())
_trace_android_vh_delayacct_freepages_start();
}
static inline void delayacct_freepages_end(void)
{}
{
if (get_delayacct_enabled())
_trace_android_vh_delayacct_freepages_end();
}
static inline void delayacct_thrashing_start(bool *in_thrashing)
{}
{
if (get_delayacct_enabled())
_trace_android_vh_delayacct_thrashing_start(in_thrashing);
}
static inline void delayacct_thrashing_end(bool *in_thrashing)
{}
{
if (get_delayacct_enabled())
_trace_android_vh_delayacct_thrashing_end(in_thrashing);
}
static inline void delayacct_swapin_start(void)
{}
{
if (get_delayacct_enabled())
_trace_android_vh_delayacct_swapin_start();
}
static inline void delayacct_swapin_end(void)
{}
{
if (get_delayacct_enabled())
_trace_android_vh_delayacct_swapin_end();
}
static inline void delayacct_compact_start(void)
{}
{
if (get_delayacct_enabled())
_trace_android_vh_delayacct_compact_start();
}
static inline void delayacct_compact_end(void)
{}
{
if (get_delayacct_enabled())
_trace_android_vh_delayacct_compact_end();
}
static inline void delayacct_wpcopy_start(void)
{}
{
if (get_delayacct_enabled())
_trace_android_vh_delayacct_wpcopy_start();
}
static inline void delayacct_wpcopy_end(void)
{}
{
if (get_delayacct_enabled())
_trace_android_vh_delayacct_wpcopy_end();
}
#endif /* CONFIG_TASK_DELAY_ACCT */

View File

@ -304,6 +304,44 @@ TRACE_EVENT(mm_page_alloc_extfrag,
__entry->change_ownership)
);
TRACE_EVENT(mm_alloc_contig_migrate_range_info,
TP_PROTO(unsigned long start,
unsigned long end,
unsigned long nr_migrated,
unsigned long nr_reclaimed,
unsigned long nr_mapped,
int migratetype),
TP_ARGS(start, end, nr_migrated, nr_reclaimed, nr_mapped, migratetype),
TP_STRUCT__entry(
__field(unsigned long, start)
__field(unsigned long, end)
__field(unsigned long, nr_migrated)
__field(unsigned long, nr_reclaimed)
__field(unsigned long, nr_mapped)
__field(int, migratetype)
),
TP_fast_assign(
__entry->start = start;
__entry->end = end;
__entry->nr_migrated = nr_migrated;
__entry->nr_reclaimed = nr_reclaimed;
__entry->nr_mapped = nr_mapped;
__entry->migratetype = migratetype;
),
TP_printk("start=0x%lx end=0x%lx migratetype=%d nr_migrated=%lu nr_reclaimed=%lu nr_mapped=%lu",
__entry->start,
__entry->end,
__entry->migratetype,
__entry->nr_migrated,
__entry->nr_reclaimed,
__entry->nr_mapped)
);
/*
* Required for uniquely and securely identifying mm in rss_stat tracepoint.
*/

View File

@ -101,6 +101,48 @@ TRACE_EVENT(rpm_return_int,
__entry->ret)
);
#define RPM_STATUS_STRINGS \
EM(RPM_INVALID, "RPM_INVALID") \
EM(RPM_ACTIVE, "RPM_ACTIVE") \
EM(RPM_RESUMING, "RPM_RESUMING") \
EM(RPM_SUSPENDED, "RPM_SUSPENDED") \
EMe(RPM_SUSPENDING, "RPM_SUSPENDING")
/* Enums require being exported to userspace, for user tool parsing. */
#undef EM
#undef EMe
#define EM(a, b) TRACE_DEFINE_ENUM(a);
#define EMe(a, b) TRACE_DEFINE_ENUM(a);
RPM_STATUS_STRINGS
/*
* Now redefine the EM() and EMe() macros to map the enums to the strings that
* will be printed in the output.
*/
#undef EM
#undef EMe
#define EM(a, b) { a, b },
#define EMe(a, b) { a, b }
TRACE_EVENT(rpm_status,
TP_PROTO(struct device *dev, enum rpm_status status),
TP_ARGS(dev, status),
TP_STRUCT__entry(
__string(name, dev_name(dev))
__field(int, status)
),
TP_fast_assign(
__assign_str(name, dev_name(dev));
__entry->status = status;
),
TP_printk("%s status=%s", __get_str(name),
__print_symbolic(__entry->status, RPM_STATUS_STRINGS))
);
#endif /* _TRACE_RUNTIME_POWER_H */
/* This part must be outside protection */

View File

@ -126,6 +126,10 @@ DECLARE_HOOK(android_vh_binder_free_buf,
TP_PROTO(struct binder_proc *proc, struct binder_thread *thread,
struct binder_buffer *buffer),
TP_ARGS(proc, thread, buffer));
DECLARE_HOOK(android_vh_binder_buffer_release,
TP_PROTO(struct binder_proc *proc, struct binder_thread *thread,
struct binder_buffer *buffer, bool has_transaction),
TP_ARGS(proc, thread, buffer, has_transaction));
DECLARE_HOOK(android_vh_binder_ioctl_end,
TP_PROTO(struct task_struct *caller_task,

View File

@ -0,0 +1,76 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifdef PROTECT_TRACE_INCLUDE_PATH
#undef PROTECT_TRACE_INCLUDE_PATH
#else /* PROTECT_TRACE_INCLUDE_PATH */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM delayacct
#define TRACE_INCLUDE_PATH trace/hooks
#if !defined(_TRACE_HOOK_DELAYACCT_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_HOOK_DELAYACCT_H
#include <trace/hooks/vendor_hooks.h>
struct task_struct;
struct taskstats;
DECLARE_RESTRICTED_HOOK(android_rvh_delayacct_init,
TP_PROTO(void *unused),
TP_ARGS(unused), 1);
DECLARE_RESTRICTED_HOOK(android_rvh_delayacct_tsk_init,
TP_PROTO(struct task_struct *tsk),
TP_ARGS(tsk), 1);
DECLARE_RESTRICTED_HOOK(android_rvh_delayacct_tsk_free,
TP_PROTO(struct task_struct *tsk),
TP_ARGS(tsk), 1);
DECLARE_HOOK(android_vh_delayacct_blkio_start,
TP_PROTO(void *unused),
TP_ARGS(unused));
DECLARE_HOOK(android_vh_delayacct_blkio_end,
TP_PROTO(struct task_struct *p),
TP_ARGS(p));
DECLARE_HOOK(android_vh_delayacct_add_tsk,
TP_PROTO(struct taskstats *d, struct task_struct *tsk, int *ret),
TP_ARGS(d, tsk, ret));
DECLARE_HOOK(android_vh_delayacct_blkio_ticks,
TP_PROTO(struct task_struct *tsk, __u64 *ret),
TP_ARGS(tsk, ret));
DECLARE_HOOK(android_vh_delayacct_freepages_start,
TP_PROTO(void *unused),
TP_ARGS(unused));
DECLARE_HOOK(android_vh_delayacct_freepages_end,
TP_PROTO(void *unused),
TP_ARGS(unused));
DECLARE_HOOK(android_vh_delayacct_thrashing_start,
TP_PROTO(bool *in_thrashing),
TP_ARGS(in_thrashing));
DECLARE_HOOK(android_vh_delayacct_thrashing_end,
TP_PROTO(bool *in_thrashing),
TP_ARGS(in_thrashing));
DECLARE_HOOK(android_vh_delayacct_swapin_start,
TP_PROTO(void *unused),
TP_ARGS(unused));
DECLARE_HOOK(android_vh_delayacct_swapin_end,
TP_PROTO(void *unused),
TP_ARGS(unused));
DECLARE_HOOK(android_vh_delayacct_compact_start,
TP_PROTO(void *unused),
TP_ARGS(unused));
DECLARE_HOOK(android_vh_delayacct_compact_end,
TP_PROTO(void *unused),
TP_ARGS(unused));
DECLARE_HOOK(android_vh_delayacct_wpcopy_start,
TP_PROTO(void *unused),
TP_ARGS(unused));
DECLARE_HOOK(android_vh_delayacct_wpcopy_end,
TP_PROTO(void *unused),
TP_ARGS(unused));
#endif /* _TRACE_HOOK_DELAYACCT_H */
/* This part must be outside protection */
#include <trace/define_trace.h>
#endif /* PROTECT_TRACE_INCLUDE_PATH */

View File

@ -21,6 +21,11 @@ DECLARE_RESTRICTED_HOOK(android_rvh_do_sp_pc_abort,
TP_ARGS(addr, esr, regs),
TP_CONDITION(!user_mode(regs)));
DECLARE_HOOK(android_vh_try_fixup_sea,
TP_PROTO(unsigned long addr, unsigned long esr, struct pt_regs *regs,
bool *can_fixup),
TP_ARGS(addr, esr, regs, can_fixup));
#endif /* _TRACE_HOOK_FAULT_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@ -207,6 +207,11 @@ DECLARE_RESTRICTED_HOOK(android_rvh_util_fits_cpu,
int cpu, bool *fits, bool *done),
TP_ARGS(util, uclamp_min, uclamp_max, cpu, fits, done), 1);
DECLARE_RESTRICTED_HOOK(android_rvh_update_cpus_allowed,
TP_PROTO(struct task_struct *p, cpumask_var_t cpus_requested,
const struct cpumask *new_mask, int *ret),
TP_ARGS(p, cpus_requested, new_mask, ret), 1);
DECLARE_RESTRICTED_HOOK(android_rvh_sched_fork_init,
TP_PROTO(struct task_struct *p),
TP_ARGS(p), 1);

View File

@ -23,6 +23,10 @@ DECLARE_RESTRICTED_HOOK(android_rvh_ufs_reprogram_all_keys,
TP_PROTO(struct ufs_hba *hba, int *err),
TP_ARGS(hba, err), 1);
DECLARE_RESTRICTED_HOOK(android_rvh_ufs_complete_init,
TP_PROTO(struct ufs_hba *hba),
TP_ARGS(hba), 1);
DECLARE_HOOK(android_vh_ufs_prepare_command,
TP_PROTO(struct ufs_hba *hba, struct request *rq,
struct ufshcd_lrb *lrbp, int *err),

View File

@ -10,7 +10,7 @@ obj-y = fork.o exec_domain.o panic.o \
extable.o params.o \
kthread.o sys_ni.o nsproxy.o \
notifier.o ksysfs.o cred.o reboot.o \
async.o range.o smpboot.o ucount.o regset.o
async.o range.o smpboot.o ucount.o regset.o delayacct.o
obj-$(CONFIG_USERMODE_DRIVER) += usermode_driver.o
obj-$(CONFIG_MODULES) += kmod.o
@ -94,7 +94,6 @@ obj-$(CONFIG_HARDLOCKUP_DETECTOR_PERF) += watchdog_hld.o
obj-$(CONFIG_SECCOMP) += seccomp.o
obj-$(CONFIG_RELAY) += relay.o
obj-$(CONFIG_SYSCTL) += utsname_sysctl.o
obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o
obj-$(CONFIG_TRACEPOINTS) += tracepoint.o
obj-$(CONFIG_LATENCYTOP) += latencytop.o

View File

@ -68,6 +68,7 @@
#include <linux/wait.h>
#include <trace/hooks/cgroup.h>
#include <trace/hooks/sched.h>
DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key);
DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
@ -1241,6 +1242,18 @@ void rebuild_sched_domains(void)
}
EXPORT_SYMBOL_GPL(rebuild_sched_domains);
static int update_cpus_allowed(struct cpuset *cs, struct task_struct *p,
const struct cpumask *new_mask)
{
int ret = -EINVAL;
trace_android_rvh_update_cpus_allowed(p, cs->cpus_requested, new_mask, &ret);
if (!ret)
return ret;
return set_cpus_allowed_ptr(p, new_mask);
}
/**
* update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
* @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
@ -1267,7 +1280,7 @@ static void update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus)
cpumask_and(new_cpus, cs->effective_cpus,
task_cpu_possible_mask(task));
set_cpus_allowed_ptr(task, new_cpus);
update_cpus_allowed(cs, task, new_cpus);
}
css_task_iter_end(&it);
}
@ -2640,7 +2653,7 @@ static void cpuset_attach_task(struct cpuset *cs, struct task_struct *task)
* can_attach beforehand should guarantee that this doesn't
* fail. TODO: have a better way to handle failure here
*/
WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
WARN_ON_ONCE(update_cpus_allowed(cs, task, cpus_attach));
cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
cpuset_update_task_spread_flags(cs, task);

View File

@ -14,6 +14,8 @@
#include <linux/delayacct.h>
#include <linux/module.h>
#ifdef CONFIG_TASK_DELAY_ACCT
DEFINE_STATIC_KEY_FALSE(delayacct_key);
int delayacct_on __read_mostly; /* Delay accounting turned on/off */
struct kmem_cache *delayacct_cache;
@ -274,3 +276,105 @@ void __delayacct_wpcopy_end(void)
&current->delays->wpcopy_delay,
&current->delays->wpcopy_count);
}
#else
#include <trace/hooks/delayacct.h>
int delayacct_enabled __read_mostly; /* Delay accounting turned on/off */
bool get_delayacct_enabled(void)
{
return delayacct_enabled;
}
void set_delayacct_enabled(bool enabled)
{
delayacct_enabled = enabled;
}
EXPORT_SYMBOL_GPL(set_delayacct_enabled);
void _trace_android_rvh_delayacct_init(void)
{
trace_android_rvh_delayacct_init(NULL);
}
void _trace_android_rvh_delayacct_tsk_init(struct task_struct *tsk)
{
trace_android_rvh_delayacct_tsk_init(tsk);
}
void _trace_android_rvh_delayacct_tsk_free(struct task_struct *tsk)
{
trace_android_rvh_delayacct_tsk_free(tsk);
}
void _trace_android_vh_delayacct_blkio_start(void)
{
trace_android_vh_delayacct_blkio_start(NULL);
}
void _trace_android_vh_delayacct_blkio_end(struct task_struct *p)
{
trace_android_vh_delayacct_blkio_end(p);
}
void _trace_android_vh_delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk, int *ret)
{
trace_android_vh_delayacct_add_tsk(d, tsk, ret);
}
void _trace_android_vh_delayacct_blkio_ticks(struct task_struct *tsk, __u64 *ret)
{
trace_android_vh_delayacct_blkio_ticks(tsk, ret);
}
void _trace_android_vh_delayacct_freepages_start(void)
{
trace_android_vh_delayacct_freepages_start(NULL);
}
void _trace_android_vh_delayacct_freepages_end(void)
{
trace_android_vh_delayacct_freepages_end(NULL);
}
void _trace_android_vh_delayacct_thrashing_start(bool *in_thrashing)
{
trace_android_vh_delayacct_thrashing_start(in_thrashing);
}
void _trace_android_vh_delayacct_thrashing_end(bool *in_thrashing)
{
trace_android_vh_delayacct_thrashing_end(in_thrashing);
}
void _trace_android_vh_delayacct_swapin_start(void)
{
trace_android_vh_delayacct_swapin_start(NULL);
}
void _trace_android_vh_delayacct_swapin_end(void)
{
trace_android_vh_delayacct_swapin_end(NULL);
}
void _trace_android_vh_delayacct_compact_start(void)
{
trace_android_vh_delayacct_compact_start(NULL);
}
void _trace_android_vh_delayacct_compact_end(void)
{
trace_android_vh_delayacct_compact_end(NULL);
}
void _trace_android_vh_delayacct_wpcopy_start(void)
{
trace_android_vh_delayacct_wpcopy_start(NULL);
}
void _trace_android_vh_delayacct_wpcopy_end(void)
{
trace_android_vh_delayacct_wpcopy_end(NULL);
}
#endif

View File

@ -37,6 +37,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_find_busiest_queue);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_migrate_queued_task);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpu_overutilized);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sched_setaffinity);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_cpus_allowed);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_build_sched_domains);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_check_preempt_tick);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_check_preempt_wakeup_ignore);

View File

@ -755,6 +755,7 @@ void raise_softirq(unsigned int nr)
raise_softirq_irqoff(nr);
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(raise_softirq);
void __raise_softirq_irqoff(unsigned int nr)
{

View File

@ -38,6 +38,12 @@
#include "cma.h"
#undef CREATE_TRACE_POINTS
#include <trace/hooks/mm.h>
EXPORT_TRACEPOINT_SYMBOL_GPL(cma_alloc_start);
EXPORT_TRACEPOINT_SYMBOL_GPL(cma_alloc_finish);
struct cma cma_areas[MAX_CMA_AREAS];
unsigned cma_area_count;
static DEFINE_MUTEX(cma_mutex);

View File

@ -444,7 +444,9 @@ isolate_migratepages_range(struct compact_control *cc,
unsigned long low_pfn, unsigned long end_pfn);
int __alloc_contig_migrate_range(struct compact_control *cc,
unsigned long start, unsigned long end);
unsigned long start, unsigned long end,
int migratetype);
#endif
int find_suitable_fallback(struct free_area *area, unsigned int order,
int migratetype, bool only_stealable, bool *can_steal);

View File

@ -428,8 +428,12 @@ int folio_migrate_mapping(struct address_space *mapping,
if (folio_test_swapbacked(folio)) {
__folio_set_swapbacked(newfolio);
if (folio_test_swapcache(folio)) {
int i;
folio_set_swapcache(newfolio);
newfolio->private = folio_get_private(folio);
for (i = 0; i < nr; i++)
set_page_private(folio_page(newfolio, i),
page_private(folio_page(folio, i)));
}
entries = nr;
} else {

View File

@ -9328,9 +9328,14 @@ static inline void alloc_contig_dump_pages(struct list_head *page_list)
}
#endif
/* [start, end) must belong to a single zone. */
/*
* [start, end) must belong to a single zone.
* @migratetype: using migratetype to filter the type of migration in
* trace_mm_alloc_contig_migrate_range_info.
*/
int __alloc_contig_migrate_range(struct compact_control *cc,
unsigned long start, unsigned long end)
unsigned long start, unsigned long end,
int migratetype)
{
/* This function is based on compact_zone() from compaction.c. */
unsigned int nr_reclaimed;
@ -9342,6 +9347,10 @@ int __alloc_contig_migrate_range(struct compact_control *cc,
.nid = zone_to_nid(cc->zone),
.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
};
struct page *page;
unsigned long total_mapped = 0;
unsigned long total_migrated = 0;
unsigned long total_reclaimed = 0;
if (cc->gfp_mask & __GFP_NORETRY)
max_tries = 1;
@ -9370,9 +9379,18 @@ int __alloc_contig_migrate_range(struct compact_control *cc,
&cc->migratepages);
cc->nr_migratepages -= nr_reclaimed;
if (trace_mm_alloc_contig_migrate_range_info_enabled()) {
total_reclaimed += nr_reclaimed;
list_for_each_entry(page, &cc->migratepages, lru)
total_mapped += page_mapcount(page);
}
ret = migrate_pages(&cc->migratepages, alloc_migration_target,
NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE, NULL);
if (trace_mm_alloc_contig_migrate_range_info_enabled() && !ret)
total_migrated += cc->nr_migratepages;
/*
* On -ENOMEM, migrate_pages() bails out right away. It is pointless
* to retry again over this error, so do the same here.
@ -9395,9 +9413,13 @@ int __alloc_contig_migrate_range(struct compact_control *cc,
}
}
putback_movable_pages(&cc->migratepages);
return ret;
}
return 0;
trace_mm_alloc_contig_migrate_range_info(start, end, migratetype,
total_migrated,
total_reclaimed,
total_mapped);
return (ret < 0) ? ret : 0;
}
/**
@ -9481,7 +9503,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
* allocated. So, if we fall through be sure to clear ret so that
* -EBUSY is not accidentally used or returned to caller.
*/
ret = __alloc_contig_migrate_range(&cc, start, end);
ret = __alloc_contig_migrate_range(&cc, start, end, migratetype);
if (ret && (ret != -EBUSY || (gfp_mask & __GFP_NORETRY)))
goto done;
ret = 0;

View File

@ -434,7 +434,7 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
}
ret = __alloc_contig_migrate_range(&cc, head_pfn,
head_pfn + nr_pages);
head_pfn + nr_pages, page_mt);
/*
* restore the page's migratetype so that it can

View File

@ -8,9 +8,6 @@ This module contains a full list of kernel modules
_COMMON_GKI_MODULES_LIST = [
# keep sorted
"crypto/crct10dif_common.ko",
"crypto/crct10dif_generic.ko",
"drivers/block/null_blk/null_blk.ko",
"drivers/block/zram/zram.ko",
"drivers/bluetooth/btbcm.ko",
"drivers/bluetooth/btqca.ko",
@ -38,12 +35,10 @@ _COMMON_GKI_MODULES_LIST = [
"drivers/net/usb/rtl8150.ko",
"drivers/net/usb/usbnet.ko",
"drivers/net/wwan/wwan.ko",
"drivers/scsi/scsi_debug.ko",
"drivers/usb/class/cdc-acm.ko",
"drivers/usb/serial/ftdi_sio.ko",
"drivers/usb/serial/usbserial.ko",
"kernel/kheaders.ko",
"lib/crc-t10dif.ko",
"lib/crypto/libarc4.ko",
"mm/zsmalloc.ko",
"net/6lowpan/6lowpan.ko",

View File

@ -4741,6 +4741,9 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
if (!(flags & NFT_SET_TIMEOUT))
return -EINVAL;
if (flags & NFT_SET_ANONYMOUS)
return -EOPNOTSUPP;
err = nf_msecs_to_jiffies64(nla[NFTA_SET_TIMEOUT], &desc.timeout);
if (err)
return err;
@ -4749,6 +4752,10 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
if (nla[NFTA_SET_GC_INTERVAL] != NULL) {
if (!(flags & NFT_SET_TIMEOUT))
return -EINVAL;
if (flags & NFT_SET_ANONYMOUS)
return -EOPNOTSUPP;
desc.gc_int = ntohl(nla_get_be32(nla[NFTA_SET_GC_INTERVAL]));
}

View File

@ -235,7 +235,7 @@ static void nft_rbtree_gc_remove(struct net *net, struct nft_set *set,
static const struct nft_rbtree_elem *
nft_rbtree_gc_elem(const struct nft_set *__set, struct nft_rbtree *priv,
struct nft_rbtree_elem *rbe, u8 genmask)
struct nft_rbtree_elem *rbe)
{
struct nft_set *set = (struct nft_set *)__set;
struct rb_node *prev = rb_prev(&rbe->node);
@ -254,7 +254,7 @@ nft_rbtree_gc_elem(const struct nft_set *__set, struct nft_rbtree *priv,
while (prev) {
rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
if (nft_rbtree_interval_end(rbe_prev) &&
nft_set_elem_active(&rbe_prev->ext, genmask))
nft_set_elem_active(&rbe_prev->ext, NFT_GENMASK_ANY))
break;
prev = rb_prev(prev);
@ -365,7 +365,7 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
nft_set_elem_active(&rbe->ext, cur_genmask)) {
const struct nft_rbtree_elem *removed_end;
removed_end = nft_rbtree_gc_elem(set, priv, rbe, genmask);
removed_end = nft_rbtree_gc_elem(set, priv, rbe);
if (IS_ERR(removed_end))
return PTR_ERR(removed_end);

View File

@ -62,6 +62,7 @@ struct tls_decrypt_ctx {
u8 iv[MAX_IV_SIZE];
u8 aad[TLS_MAX_AAD_SIZE];
u8 tail;
bool free_sgout;
struct scatterlist sg[];
};
@ -186,7 +187,6 @@ static void tls_decrypt_done(crypto_completion_data_t *data, int err)
struct aead_request *aead_req = crypto_get_completion_data(data);
struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
struct scatterlist *sgout = aead_req->dst;
struct scatterlist *sgin = aead_req->src;
struct tls_sw_context_rx *ctx;
struct tls_decrypt_ctx *dctx;
struct tls_context *tls_ctx;
@ -212,7 +212,7 @@ static void tls_decrypt_done(crypto_completion_data_t *data, int err)
}
/* Free the destination pages if skb was not decrypted inplace */
if (sgout != sgin) {
if (dctx->free_sgout) {
/* Skip the first S/G entry as it points to AAD */
for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
if (!sg)
@ -1591,6 +1591,7 @@ static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
} else if (out_sg) {
memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
}
dctx->free_sgout = !!pages;
/* Prepare and submit AEAD request */
err = tls_do_decryption(sk, sgin, sgout, dctx->iv,