Merge branch 'android14-6.1' into branch 'android14-6.1-lts'

Backmerge the latest android14-6.1 changes into the lts branch to keep
up to date.  Contains the following commits:

* 3578913b2e UPSTREAM: net/rose: Fix Use-After-Free in rose_ioctl
* 8fbed1ea00 UPSTREAM: ida: Fix crash in ida_free when the bitmap is empty
* 6ce5bb744e ANDROID: GKI: Update symbol list for mtk
* 7cbad58851 Reapply "perf: Disallow mis-matched inherited group reads"
* 067a03c44e ANDROID: GKI: Add Pasa symbol list
* b6be1a36f7 FROMGIT: mm: memcg: don't periodically flush stats when memcg is disabled
* d0e2d333f9 ANDROID: Update the ABI symbol list
* 10558542a1 ANDROID: sched: export update_misfit_status symbol
* a0b3b39898 ANDROID: GKI: Add ASR KMI symbol list
* 599710db0f FROMGIT: usb: dwc3: gadget: Fix NULL pointer dereference in dwc3_gadget_suspend
* 9265fa90c1 FROMLIST: usb: core: Prevent null pointer dereference in update_port_device_state
* 2730733d54 ANDROID: gki_defconfig: Enable CONFIG_NVME_MULTIPATH
* 4f668f5682 BACKPORT: irqchip/gic-v3: Work around affinity issues on ASR8601
* 473a871315 BACKPORT: irqchip/gic-v3: Improve affinity helper
* 6c32acf537 UPSTREAM: sched/fair: Limit sched slice duration
* 7088d250bf ANDROID: Update the ABI symbol list
* c249740414 ANDROID: idle_inject: Export function symbols
* 990d341477 ANDROID: Update the ABI symbol list
* be92a6a1b4 ANDROID: GKI: Remove CONFIG_MEDIA_CEC_RC
* fa9ac43f16 BACKPORT: usb: host: xhci: Avoid XHCI resume delay if SSUSB device is not present
*   f27fc6ba23 Merge "Merge tag 'android14-6.1.68_r00' into branch 'android14-6.1'" into android14-6.1
|\
| * 0177cfb2a2 Merge tag 'android14-6.1.68_r00' into branch 'android14-6.1'
* c96cea1a3c ANDROID: Update the ABI symbol list
* c2fbc12180 ANDROID: uid_sys_stats: Drop CONFIG_UID_SYS_STATS_DEBUG logic
* 90bd30bdef ANDROID: Update the ABI symbol list
* 3280560843 ANDROID: Update the ABI symbol list
* 427210e440 UPSTREAM: usb: gadget: uvc: Remove nested locking
* 9267e267be ANDROID: uid_sys_stats: Fully initialize uid_entry_tmp value
* 2d3f0c9d41 ANDROID: Roll back some code to fix system_server registers psi trigger failed.
* bd77c97c76 UPSTREAM: usb: gadget: uvc: Fix use are free during STREAMOFF
* 21c71a7d0e ANDROID: GKI: Add symbol list for Nothing
* aba5a3fe09 ANDROID: Enable CONFIG_LAZY_RCU in x86 gki_defconfig
* 204160394a ANDROID: fuse-bpf: Fix the issue of abnormal lseek system calls
* 947708f1ff ANDROID: ABI: Update symbol list for imx
* 7eedea7abf BACKPORT: PM: sleep: Fix possible deadlocks in core system-wide PM code
* e1a20dd9ff UPSTREAM: async: Introduce async_schedule_dev_nocall()
* e4b0e14f83 UPSTREAM: async: Split async_schedule_node_domain()
* 6b4c816d17 FROMGIT: BACKPORT: mm: update mark_victim tracepoints fields
* d97ea65296 ANDROID: Enable CONFIG_LAZY_RCU in arm64 gki_defconfig
* 90d68cedd1 FROMLIST: rcu: Provide a boot time parameter to control lazy RCU
* a079cc5876 ANDROID: rcu: Add a minimum time for marking boot as completed
* ffe09c06a8 UPSTREAM: rcu: Disable laziness if lazy-tracking says so
* d07488d26e UPSTREAM: rcu: Track laziness during boot and suspend
* 4316bd568b UPSTREAM: net: Use call_rcu_hurry() for dst_release()
* b9427245f0 UPSTREAM: workqueue: Make queue_rcu_work() use call_rcu_hurry()
* 72fdf7f606 UPSTREAM: percpu-refcount: Use call_rcu_hurry() for atomic switch
* ced65a053b UPSTREAM: io_uring: use call_rcu_hurry if signaling an eventfd
* 84c8157d06 UPSTREAM: rcu: Update synchronize_rcu_mult() comment for call_rcu_hurry()
* 3751416eeb UPSTREAM: scsi/scsi_error: Use call_rcu_hurry() instead of call_rcu()
* 52193e9489 UPSTREAM: rcu/rcutorture: Use call_rcu_hurry() where needed
* 83f8ba569f UPSTREAM: rcu/rcuscale: Use call_rcu_hurry() for async reader test
* 9b625f4978 UPSTREAM: rcu/sync: Use call_rcu_hurry() instead of call_rcu
* c570c8fea3 BACKPORT: rcu: Shrinker for lazy rcu
* 4957579439 UPSTREAM: rcu: Refactor code a bit in rcu_nocb_do_flush_bypass()
* 66a832fe38 UPSTREAM: rcu: Make call_rcu() lazy to save power
* 4fb09fb4f7 UPSTREAM: rcu: Fix missing nocb gp wake on rcu_barrier()
* 64c59ad2c3 UPSTREAM: rcu: Fix late wakeup when flush of bypass cblist happens
* 0799ace265 ANDROID: Update the ABI symbol list
* 65db2f8ed3 ANDROID: GKI: add GKI symbol list for Exynosauto SoC
* cfe8cce4e8 UPSTREAM: coresight: tmc: Don't enable TMC when it's not ready.
* 899194d7e9 UPSTREAM: netfilter: nf_tables: bail out on mismatching dynset and set expressions
* e6712ed4f0 ANDROID: ABI: Update oplus symbol list
* 24bb8fc82e ANDROID: vendor_hooks: add hooks in driver/android/binder.c
* 55930b39ca ANDROID: GKI: Update honda symbol list for xt_LOG
* 3160b69e20 ANDROID: GKI: Update honda symbol list for ebt filter
* 4dc7f98815 ANDROID: GKI: Update honda symbol list for ebtables
* 39a0823340 ANDROID: GKI: Update honda symbol list for net scheduler
* dd0098bdb4 ANDROID: GKI: Update honda symbol list for led-trigger
* 66a20ed4b8 ANDROID: GKI: Add initial symbol list for honda
* 28dbe4d613 ANDROID: GKI: add symbols to ABI
* 97100e867e FROMGIT: usb: dwc: ep0: Update request status in dwc3_ep0_stall_restart
* 36248a15a7 FROMGIT: usb: dwc3: set pm runtime active before resume common

Change-Id: I8d9586a94c3182cd365d1e3b651a7552c7c9949b
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2024-02-01 16:45:30 +00:00
commit c3a1a1e97e
64 changed files with 4243 additions and 574 deletions

View File

@ -99,16 +99,20 @@ filegroup(
name = "aarch64_additional_kmi_symbol_lists",
srcs = [
# keep sorted
"android/abi_gki_aarch64_asr",
"android/abi_gki_aarch64_asus",
"android/abi_gki_aarch64_db845c",
"android/abi_gki_aarch64_exynos",
"android/abi_gki_aarch64_exynosauto",
"android/abi_gki_aarch64_galaxy",
"android/abi_gki_aarch64_honda",
"android/abi_gki_aarch64_honor",
"android/abi_gki_aarch64_imx",
"android/abi_gki_aarch64_meizu",
"android/abi_gki_aarch64_mtk",
"android/abi_gki_aarch64_nothing",
"android/abi_gki_aarch64_oplus",
"android/abi_gki_aarch64_pasa",
"android/abi_gki_aarch64_pixel",
"android/abi_gki_aarch64_qcom",
"android/abi_gki_aarch64_rockchip",

View File

@ -4882,6 +4882,11 @@
rcu_node tree with an eye towards determining
why a new grace period has not yet started.
rcutree.enable_rcu_lazy= [KNL]
To save power, batch RCU callbacks and flush after
delay, memory pressure or callback list growing too
big.
rcuscale.gp_async= [KNL]
Measure performance of asynchronous
grace-period primitives such as call_rcu().
@ -5130,6 +5135,21 @@
rcutorture.verbose= [KNL]
Enable additional printk() statements.
rcupdate.rcu_boot_end_delay= [KNL]
Minimum time in milliseconds from the start of boot
that must elapse before the boot sequence can be marked
complete from RCU's perspective, after which RCU's
behavior becomes more relaxed. The default value is also
configurable via CONFIG_RCU_BOOT_END_DELAY.
Userspace can also mark the boot as completed
sooner by writing the time in milliseconds, say once
userspace considers the system as booted, to:
/sys/module/rcupdate/parameters/rcu_boot_end_delay
Or even just writing a value of 0 to this sysfs node.
The sysfs node can also be used to extend the delay
to be larger than the default, assuming the marking
of boot complete has not yet occurred.
rcupdate.rcu_cpu_stall_ftrace_dump= [KNL]
Dump ftrace buffer after reporting RCU CPU
stall warning.

View File

@ -217,3 +217,7 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| Fujitsu | A64FX | E#010001 | FUJITSU_ERRATUM_010001 |
+----------------+-----------------+-----------------+-----------------------------+
+----------------+-----------------+-----------------+-----------------------------+
| ASR | ASR8601 | #8601001 | N/A |
+----------------+-----------------+-----------------+-----------------------------+

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,43 @@
[abi_symbol_list]
# required by asr5803.ko
sdhci_enable_sdio_irq
# required by asr_serial.ko
uart_get_divisor
uart_handle_cts_change
uart_handle_dcd_change
uart_insert_char
# required by ehci-asr-ci.ko
ehci_init_driver
ehci_setup
# required by phy-asr-ci-usb2.ko
usb_add_phy_dev
usb_remove_phy
# required by pvrsrvkm.ko
call_rcu
devm_devfreq_remove_device
dev_pm_opp_remove
dma_fence_array_ops
dma_fence_enable_sw_signaling
idr_replace
kthread_freezable_should_stop
rcu_barrier
# required by sdhci_asr.ko
sdhci_resume_host
sdhci_send_tuning
sdhci_set_clock
sdhci_set_uhs_signaling
sdhci_suspend_host
# required by vh_sched.ko
__traceiter_android_vh_map_util_freq
__tracepoint_android_vh_map_util_freq
# required by asr_drm.ko
clk_set_rate_exclusive
clk_rate_exclusive_put

View File

@ -34,14 +34,8 @@
clk_disable
clk_enable
clk_get
__clk_get_hw
clk_get_rate
clk_hw_get_parent
clk_prepare
clk_register
clk_register_clkdev
clk_register_fixed_factor
clk_register_fixed_rate
clk_set_rate
clk_unprepare
cma_alloc
@ -90,6 +84,7 @@
_dev_info
devm_add_action
devm_clk_get
devm_clk_get_optional
devm_free_irq
devm_gen_pool_create
devm_gpiod_get
@ -177,8 +172,6 @@
drm_atomic_helper_connector_destroy_state
drm_atomic_helper_connector_duplicate_state
drm_atomic_helper_connector_reset
__drm_atomic_helper_private_obj_duplicate_state
drm_atomic_private_obj_init
drm_bridge_add
drm_bridge_remove
drm_compat_ioctl
@ -242,7 +235,6 @@
drm_modeset_unlock
drm_mode_vrefresh
drm_object_attach_property
drm_object_property_set_value
drm_open
drm_poll
drm_prime_gem_destroy
@ -252,14 +244,18 @@
drm_release
drm_vblank_init
dump_stack
dw_pcie_ep_init
dw_pcie_host_init
dw_pcie_own_conf_map_bus
dw_pcie_read
dw_pcie_read_dbi
dw_pcie_setup_rc
dw_pcie_write
dw_pcie_write_dbi
enable_irq
fd_install
_find_first_bit
_find_first_zero_bit
_find_next_bit
finish_wait
flush_delayed_work
@ -315,7 +311,9 @@
__init_waitqueue_head
iomem_resource
iommu_get_domain_for_dev
iommu_map
iommu_register_device_fault_handler
iommu_unmap
iommu_unregister_device_fault_handler
ioremap_prot
iounmap
@ -403,6 +401,7 @@
mipi_dsi_device_unregister
mipi_dsi_driver_register_full
mipi_dsi_driver_unregister
misc_deregister
misc_register
mod_timer
module_layout
@ -419,13 +418,12 @@
netdev_warn
noop_llseek
nr_cpu_ids
nsecs_to_jiffies
ns_to_timespec64
__num_online_cpus
of_address_to_resource
of_alias_get_id
of_clk_add_provider
of_clk_get
of_clk_src_onecell_get
of_count_phandle_with_args
of_device_get_match_data
of_device_is_available
@ -467,10 +465,15 @@
oops_in_progress
panic
panic_notifier_list
param_ops_bool
param_ops_int
param_ops_string
param_ops_uint
pcie_get_mps
pci_epc_mem_alloc_addr
pci_epc_mem_free_addr
pci_epc_set_bar
pci_epc_unmap_addr
pcie_set_mps
pci_find_bus
pci_generic_config_read
@ -547,6 +550,7 @@
__register_chrdev
register_console
register_pm_notifier
register_reboot_notifier
register_syscore_ops
regmap_read
regmap_update_bits_base
@ -647,6 +651,7 @@
ttm_bo_mmap_obj
__udelay
__unregister_chrdev
unregister_pm_notifier
up
up_write
__usecs_to_jiffies
@ -687,6 +692,7 @@
vsnprintf
vunmap
vzalloc
wait_for_completion
wait_for_completion_interruptible_timeout
wait_for_completion_timeout
__wake_up
@ -712,9 +718,18 @@
regmap_bulk_write
regmap_register_patch
# required by clk_exynosauto.ko
__clk_get_hw
clk_hw_get_parent
clk_register
clk_register_clkdev
clk_register_fixed_factor
clk_register_fixed_rate
of_clk_add_provider
of_clk_src_onecell_get
# required by cmupmucal.ko
debugfs_create_x32
of_find_node_by_type
# required by dmatest.ko
__dma_request_channel
@ -726,7 +741,6 @@
mempool_free_slab
param_get_bool
param_get_string
param_ops_bool
param_set_bool
param_set_copystring
set_freezable
@ -741,7 +755,9 @@
drm_atomic_get_new_private_obj_state
drm_atomic_get_old_private_obj_state
drm_atomic_get_private_obj_state
__drm_atomic_helper_private_obj_duplicate_state
drm_atomic_private_obj_fini
drm_atomic_private_obj_init
drm_crtc_commit_wait
drm_crtc_wait_one_vblank
__drm_debug
@ -750,6 +766,7 @@
drm_edid_duplicate
drm_edid_get_monitor_name
drm_modeset_lock_single_interruptible
drm_object_property_set_value
__drm_printfn_debug
memchr_inv
__sw_hweight8
@ -848,7 +865,6 @@
typec_set_pwr_role
typec_unregister_partner
typec_unregister_port
unregister_pm_notifier
usb_gadget_set_state
usb_otg_state_string
wakeup_source_register
@ -945,7 +961,6 @@
drm_atomic_helper_update_plane
drm_atomic_helper_wait_for_vblanks
drm_bridge_attach
drm_connector_set_path_property
drm_crtc_arm_vblank_event
drm_crtc_handle_vblank
drm_crtc_init_with_planes
@ -963,7 +978,6 @@
drm_gem_vm_open
drm_get_connector_status_name
drm_get_format_info
drm_helper_probe_detect
drm_mode_config_helper_resume
drm_mode_config_helper_suspend
drm_mode_config_reset
@ -982,7 +996,6 @@
drm_plane_create_zpos_immutable_property
drm_plane_create_zpos_property
__drm_printfn_info
drm_property_blob_put
drm_property_create
drm_property_create_blob
drm_rotation_simplify
@ -991,6 +1004,7 @@
drm_writeback_connector_init
drm_writeback_queue_job
drm_writeback_signal_completion
flush_signals
gpiod_to_irq
mipi_dsi_host_register
mipi_dsi_host_unregister
@ -998,7 +1012,6 @@
of_drm_find_panel
of_find_i2c_device_by_node
of_graph_get_endpoint_by_regs
of_graph_get_endpoint_count
of_graph_get_next_endpoint
of_graph_get_port_by_id
of_graph_get_remote_port
@ -1010,15 +1023,16 @@
platform_find_device_by_driver
seq_hex_dump
seq_release
strnstr
synchronize_irq
vmalloc_to_page
# required by exynos_mfc.ko
clk_put
dma_buf_begin_cpu_access_partial
dma_buf_end_cpu_access_partial
hex_dump_to_buffer
iommu_map
iommu_map_sg
iommu_unmap
__sw_hweight64
_totalram_pages
@ -1056,10 +1070,12 @@
strncasecmp
thermal_cdev_update
# required by exyswd-rng.ko
devm_hwrng_register
# required by gpu-sched.ko
mod_delayed_work_on
sched_set_fifo_low
wait_for_completion
__xa_alloc
xa_destroy
xa_erase
@ -1113,6 +1129,9 @@
# required by panic-fingerprint.ko
stack_trace_save
# required by pcie-exynos-dw-ep.ko
pci_epc_map_addr
# required by pcs_xpcs.ko
mdiobus_modify_changed
phylink_mii_c22_pcs_decode_state
@ -1198,6 +1217,12 @@
__devm_irq_alloc_descs
handle_nested_irq
# required by s3c2410_wdt.ko
watchdog_init_timeout
watchdog_register_device
watchdog_set_restart_priority
watchdog_unregister_device
# required by sam-is.ko
down
down_trylock
@ -1206,9 +1231,9 @@
kernel_neon_end
kobject_del
kthread_flush_worker
pfn_is_map_memory
pm_relax
pm_stay_awake
register_reboot_notifier
regulator_get_optional
regulator_get_voltage
regulator_is_enabled
@ -1246,7 +1271,6 @@
dma_heap_get_name
is_dma_buf_file
iterate_fd
misc_deregister
remap_pfn_range
__sg_page_iter_next
__sg_page_iter_start
@ -1393,7 +1417,6 @@
drm_syncobj_get_handle
drm_syncobj_replace_fence
__fdget
_find_first_zero_bit
__folio_put
__get_task_comm
handle_simple_irq
@ -1415,7 +1438,6 @@
memremap
memunmap
mmu_notifier_synchronize
nsecs_to_jiffies
page_pinner_inited
__page_pinner_put_page
param_ops_charp
@ -1579,7 +1601,6 @@
stream_open
# required by stmmac-platform.ko
devm_clk_get_optional
of_get_mac_address
of_phy_is_fixed_link
@ -1754,9 +1775,9 @@
current_work
default_wake_function
dma_buf_fd
dw_pcie_ep_init
dw_pcie_read_dbi
dw_pcie_write_dbi
drm_connector_set_path_property
drm_helper_probe_detect
drm_property_blob_put
irq_create_of_mapping
irq_dispose_mapping
irq_find_matching_fwspec
@ -1769,21 +1790,17 @@
kthread_complete_and_exit
nonseekable_open
of_clk_del_provider
of_find_node_by_type
of_graph_get_endpoint_count
of_irq_find_parent
pci_disable_device
pci_disable_msi
pci_enable_device
pci_epc_map_addr
pci_epc_mem_alloc_addr
pci_epc_mem_free_addr
pci_epc_set_bar
pci_epc_unmap_addr
pci_ioremap_wc_bar
pci_num_vf
__pci_register_driver
pci_request_regions
pci_unregister_driver
pfn_is_map_memory
__platform_driver_probe
pm_power_off
proc_create_data
@ -1792,12 +1809,9 @@
remove_proc_entry
remove_wait_queue
sigprocmask
strnstr
sysfs_create_bin_file
sysfs_remove_bin_file
__task_pid_nr_ns
thermal_of_zone_register
thermal_of_zone_unregister
vprintk
watchdog_register_device
watchdog_unregister_device

View File

@ -0,0 +1,91 @@
[abi_symbol_list]
# commonly used by custom vendor modules
__bitmap_subset
devres_find
input_device_enabled
pvclock_gtod_register_notifier
refcount_dec_if_one
# commonly required by ledtrig-heartbeat.ko modules
avenrun
# commonly required by ledtrig-*.ko modules
led_blink_set_oneshot
led_set_brightness_nosleep
led_stop_software_blink
# required by cls_flower.ko module
flow_rule_alloc
idr_get_next_ul
radix_tree_tagged
skb_flow_dissect_ct
skb_flow_dissect_hash
skb_flow_dissect_meta
skb_flow_dissect_tunnel_info
skb_flow_dissector_init
tc_setup_cb_call
tc_setup_cb_destroy
tc_setup_cb_reoffload
tc_setup_cb_add
tcf_exts_num_actions
tcf_exts_terse_dump
# required by act_vlan.ko module
jiffies_to_clock_t
skb_eth_push
skb_eth_pop
__skb_vlan_pop
skb_vlan_push
skb_vlan_pop
tcf_action_check_ctrlact
tcf_action_set_ctrlact
tcf_action_update_stats
tcf_chain_put_by_act
tcf_generic_walker
tcf_idr_check_alloc
tcf_idr_cleanup
tcf_idr_create_from_flags
tcf_idr_release
tcf_idr_search
tcf_idrinfo_destroy
tcf_register_action
tcf_unregister_action
# required by ebtables.ko module
__audit_log_nfcfg
audit_enabled
nf_register_sockopt
nf_unregister_sockopt
vmalloc_node
xt_check_match
xt_check_target
xt_compat_add_offset
xt_compat_calc_jump
xt_compat_flush_offsets
xt_compat_init_offsets
xt_compat_lock
xt_compat_match_offset
xt_compat_target_offset
xt_compat_unlock
xt_data_to_user
xt_find_match
xt_request_find_match
xt_request_find_target
# required by ebt_arpreply.ko module
arp_send
# required by ebt_log.ko module
nf_log_packet
sysctl_nf_log_all_netns
# required by xt_LOG.ko module
nf_log_buf_add
nf_log_buf_close
nf_log_buf_open
nf_log_register
nf_log_set
nf_log_unregister
nf_log_unset
nf_logger_find_get
nf_logger_put

View File

@ -2253,6 +2253,14 @@
usb_udc_vbus_handler
__usecs_to_jiffies
usleep_range_state
__v4l2_async_nf_add_fwnode
v4l2_async_nf_cleanup
v4l2_async_nf_init
v4l2_async_nf_register
v4l2_async_nf_unregister
v4l2_async_register_subdev
v4l2_async_register_subdev_sensor
v4l2_async_unregister_subdev
v4l2_ctrl_auto_cluster
v4l2_ctrl_handler_free
v4l2_ctrl_handler_init_class
@ -2289,6 +2297,9 @@
v4l2_fh_init
v4l2_fh_open
__v4l2_find_nearest_size
v4l2_fwnode_device_parse
v4l2_fwnode_endpoint_alloc_parse
v4l2_fwnode_endpoint_parse
v4l2_get_link_freq
v4l2_g_parm_cap
v4l2_i2c_subdev_init

View File

@ -152,6 +152,8 @@
clk_bulk_put
clk_bulk_put_all
clk_bulk_unprepare
clkdev_add
clkdev_drop
clk_disable
clk_divider_ops
clk_enable
@ -473,6 +475,7 @@
devm_devfreq_register_notifier
devm_devfreq_remove_device
devm_devfreq_unregister_notifier
devm_devfreq_unregister_opp_notifier
devm_extcon_dev_allocate
devm_extcon_dev_register
devm_extcon_register_notifier
@ -928,6 +931,8 @@
find_vpid
finish_wait
firmware_request_nowarn
fixed_phy_register
fixed_phy_unregister
flush_dcache_page
flush_delayed_fput
flush_delayed_work
@ -989,6 +994,7 @@
generic_file_llseek
generic_handle_domain_irq
generic_handle_irq
generic_handle_irq_safe
genlmsg_multicast_allns
genlmsg_put
genl_register_family
@ -1257,6 +1263,7 @@
irq_create_of_mapping
irq_dispose_mapping
__irq_domain_add
irq_domain_create_simple
irq_domain_free_irqs_common
irq_domain_get_irq_data
irq_domain_remove
@ -1861,6 +1868,8 @@
phy_ethtool_get_eee
phy_ethtool_get_link_ksettings
phy_ethtool_get_wol
phy_ethtool_ksettings_get
phy_ethtool_ksettings_set
phy_ethtool_nway_reset
phy_ethtool_set_eee
phy_ethtool_set_link_ksettings
@ -1886,13 +1895,16 @@
phy_power_on
phy_print_status
phy_put
phy_register_fixup_for_uid
phy_remove_link_mode
phy_save_page
phy_set_mode_ext
phy_start
phy_start_aneg
phy_stop
phy_support_asym_pause
phy_suspend
phy_unregister_fixup_for_uid
pick_migrate_task
pid_task
pinconf_generic_parse_dt_config

View File

@ -0,0 +1,4 @@
[abi_symbol_list]
# required by mount_state.ko
iterate_supers_type
get_fs_type

View File

@ -123,7 +123,10 @@
__traceiter_android_vh_record_rwsem_lock_starttime
__traceiter_android_vh_alter_mutex_list_add
__traceiter_android_vh_binder_free_proc
__traceiter_android_vh_binder_has_special_work_ilocked
__traceiter_android_vh_binder_has_work_ilocked
__traceiter_android_vh_binder_ioctl_end
__traceiter_android_vh_binder_looper_exited
__traceiter_android_vh_binder_looper_state_registered
__traceiter_android_vh_binder_new_ref
__traceiter_android_vh_binder_del_ref
@ -134,6 +137,8 @@
__traceiter_android_vh_binder_proc_transaction_finish
__traceiter_android_vh_binder_read_done
__traceiter_android_vh_binder_select_worklist_ilocked
__traceiter_android_vh_binder_select_special_worklist
__traceiter_android_vh_binder_spawn_new_thread
__traceiter_android_vh_binder_thread_read
__traceiter_android_vh_binder_thread_release
__traceiter_android_vh_binder_wait_for_work
@ -209,7 +214,10 @@
__tracepoint_android_vh_account_task_time
__tracepoint_android_vh_alter_mutex_list_add
__tracepoint_android_vh_binder_free_proc
__tracepoint_android_vh_binder_has_special_work_ilocked
__tracepoint_android_vh_binder_has_work_ilocked
__tracepoint_android_vh_binder_ioctl_end
__tracepoint_android_vh_binder_looper_exited
__tracepoint_android_vh_binder_looper_state_registered
__tracepoint_android_vh_binder_new_ref
__tracepoint_android_vh_binder_del_ref
@ -220,6 +228,8 @@
__tracepoint_android_vh_binder_proc_transaction_finish
__tracepoint_android_vh_binder_read_done
__tracepoint_android_vh_binder_select_worklist_ilocked
__tracepoint_android_vh_binder_select_special_worklist
__tracepoint_android_vh_binder_spawn_new_thread
__tracepoint_android_vh_binder_thread_read
__tracepoint_android_vh_binder_thread_release
__tracepoint_android_vh_binder_wait_for_work

View File

@ -0,0 +1,4 @@
[abi_symbol_list]
iio_trigger_generic_data_rdy_poll
input_device_enabled

View File

@ -543,6 +543,7 @@
dma_buf_fd
dma_buf_get
dma_buf_map_attachment
dma_buf_map_attachment_unlocked
dma_buf_mmap
dmabuf_page_pool_alloc
dmabuf_page_pool_create
@ -552,6 +553,7 @@
dma_buf_put
dma_buf_set_name
dma_buf_unmap_attachment
dma_buf_unmap_attachment_unlocked
dma_buf_vmap
dma_buf_vunmap
dma_direct_alloc
@ -995,6 +997,7 @@
gpiod_get_raw_value_cansleep
gpiod_get_value
gpiod_get_value_cansleep
gpiod_set_debounce
gpiod_set_raw_value
gpiod_set_raw_value_cansleep
gpiod_set_value
@ -1054,6 +1057,12 @@
ida_alloc_range
ida_destroy
ida_free
idle_inject_get_duration
idle_inject_register
idle_inject_set_duration
idle_inject_set_latency
idle_inject_start
idle_inject_stop
idr_alloc
idr_alloc_cyclic
idr_destroy
@ -1314,6 +1323,7 @@
loops_per_jiffy
mac_pton
mas_empty_area_rev
max_load_balance_interval
mbox_chan_received_data
mbox_controller_register
mbox_controller_unregister
@ -1788,6 +1798,7 @@
register_inet6addr_notifier
register_inetaddr_notifier
register_kernel_break_hook
register_kretprobe
register_netdev
register_netdevice
register_netdevice_notifier
@ -1826,6 +1837,7 @@
regulator_enable_regmap
regulator_get
regulator_get_optional
regulator_get_voltage
regulator_get_voltage_sel_regmap
regulator_is_enabled
regulator_is_enabled_regmap
@ -1931,6 +1943,7 @@
scsi_set_sense_information
scsi_unblock_requests
sdev_prefix_printk
send_sig_info
seq_hex_dump
seq_lseek
seq_open
@ -2133,6 +2146,7 @@
srcu_notifier_chain_unregister
sscanf
__stack_chk_fail
static_key_count
static_key_disable
static_key_enable
static_key_slow_dec
@ -2277,6 +2291,9 @@
__traceiter_android_rvh_enqueue_task
__traceiter_android_rvh_enqueue_task_fair
__traceiter_android_rvh_find_lowest_rq
__traceiter_android_rvh_iommu_alloc_insert_iova
__traceiter_android_rvh_iommu_iovad_init_alloc_algo
__traceiter_android_rvh_iommu_limit_align_shift
__traceiter_android_rvh_irqs_disable
__traceiter_android_rvh_irqs_enable
__traceiter_android_rvh_post_init_entity_util_avg
@ -2386,6 +2403,9 @@
__tracepoint_android_rvh_enqueue_task
__tracepoint_android_rvh_enqueue_task_fair
__tracepoint_android_rvh_find_lowest_rq
__tracepoint_android_rvh_iommu_alloc_insert_iova
__tracepoint_android_rvh_iommu_iovad_init_alloc_algo
__tracepoint_android_rvh_iommu_limit_align_shift
__tracepoint_android_rvh_irqs_disable
__tracepoint_android_rvh_irqs_enable
__tracepoint_android_rvh_post_init_entity_util_avg
@ -2541,6 +2561,7 @@
unregister_chrdev_region
unregister_inet6addr_notifier
unregister_inetaddr_notifier
unregister_kretprobe
unregister_netdev
unregister_netdevice_many
unregister_netdevice_notifier
@ -2560,6 +2581,7 @@
update_devfreq
___update_load_avg
___update_load_sum
update_misfit_status
update_rq_clock
up_read
up_write

View File

@ -345,3 +345,8 @@
#required by mi_asap.ko
__traceiter_android_vh_read_pages
__tracepoint_android_vh_read_pages
page_cache_sync_ra
page_cache_async_ra
pagecache_get_page
filemap_get_folios
find_get_pages_range_tag

View File

@ -15,6 +15,8 @@ CONFIG_PSI=y
CONFIG_RCU_EXPERT=y
CONFIG_RCU_BOOST=y
CONFIG_RCU_NOCB_CPU=y
CONFIG_RCU_LAZY=y
CONFIG_RCU_LAZY_DEFAULT_OFF=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_IKHEADERS=m
@ -314,6 +316,7 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
CONFIG_BLK_DEV_UBLK=y
CONFIG_BLK_DEV_NVME=y
CONFIG_NVME_MULTIPATH=y
CONFIG_SRAM=y
CONFIG_UID_SYS_STATS=y
CONFIG_SCSI=y
@ -445,7 +448,6 @@ CONFIG_LIRC=y
# CONFIG_RC_MAP is not set
CONFIG_RC_DECODERS=y
CONFIG_RC_DEVICES=y
CONFIG_MEDIA_CEC_RC=y
# CONFIG_MEDIA_ANALOG_TV_SUPPORT is not set
# CONFIG_MEDIA_DIGITAL_TV_SUPPORT is not set
# CONFIG_MEDIA_RADIO_SUPPORT is not set

View File

@ -17,6 +17,8 @@ CONFIG_PSI=y
CONFIG_RCU_EXPERT=y
CONFIG_RCU_BOOST=y
CONFIG_RCU_NOCB_CPU=y
CONFIG_RCU_LAZY=y
CONFIG_RCU_LAZY_DEFAULT_OFF=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_IKHEADERS=m
@ -300,6 +302,7 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
CONFIG_BLK_DEV_UBLK=y
CONFIG_BLK_DEV_NVME=y
CONFIG_NVME_MULTIPATH=y
CONFIG_SRAM=y
CONFIG_UID_SYS_STATS=y
CONFIG_SCSI=y
@ -411,7 +414,6 @@ CONFIG_LIRC=y
# CONFIG_RC_MAP is not set
CONFIG_RC_DECODERS=y
CONFIG_RC_DEVICES=y
CONFIG_MEDIA_CEC_RC=y
# CONFIG_MEDIA_ANALOG_TV_SUPPORT is not set
# CONFIG_MEDIA_DIGITAL_TV_SUPPORT is not set
# CONFIG_MEDIA_RADIO_SUPPORT is not set

View File

@ -560,6 +560,7 @@ static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
binder_inner_proc_lock(thread->proc);
has_work = binder_has_work_ilocked(thread, do_proc_work);
trace_android_vh_binder_has_special_work_ilocked(thread, do_proc_work, &has_work);
binder_inner_proc_unlock(thread->proc);
return has_work;
@ -4280,6 +4281,7 @@ static int binder_thread_write(struct binder_proc *proc,
thread->looper |= BINDER_LOOPER_STATE_ENTERED;
break;
case BC_EXIT_LOOPER:
trace_android_vh_binder_looper_exited(thread, proc);
binder_debug(BINDER_DEBUG_THREADS,
"%d:%d BC_EXIT_LOOPER\n",
proc->pid, thread->pid);
@ -4607,6 +4609,8 @@ static int binder_thread_read(struct binder_proc *proc,
void __user *end = buffer + size;
int ret = 0;
bool nothing_to_do = false;
bool force_spawn = false;
int wait_for_proc_work;
if (*consumed == 0) {
@ -4662,14 +4666,20 @@ static int binder_thread_read(struct binder_proc *proc,
binder_inner_proc_lock(proc);
trace_android_vh_binder_select_worklist_ilocked(&list, thread,
proc, wait_for_proc_work);
trace_android_vh_binder_select_special_worklist(&list, thread,
proc, wait_for_proc_work, &nothing_to_do);
if (list)
goto skip;
else if (nothing_to_do)
goto no_work;
if (!binder_worklist_empty_ilocked(&thread->todo))
list = &thread->todo;
else if (!binder_worklist_empty_ilocked(&proc->todo) &&
wait_for_proc_work)
list = &proc->todo;
else {
no_work:
binder_inner_proc_unlock(proc);
/* no data added */
@ -4987,11 +4997,14 @@ static int binder_thread_read(struct binder_proc *proc,
*consumed = ptr - buffer;
binder_inner_proc_lock(proc);
if (proc->requested_threads == 0 &&
trace_android_vh_binder_spawn_new_thread(thread, proc, &force_spawn);
if ((proc->requested_threads == 0 &&
list_empty(&thread->proc->waiting_threads) &&
proc->requested_threads_started < proc->max_threads &&
(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
BINDER_LOOPER_STATE_ENTERED))) ||
force_spawn /* the user-space code fails to */
/*spawn a new thread if we leave this out */) {
proc->requested_threads++;
binder_inner_proc_unlock(proc);
@ -5781,6 +5794,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
goto err;
}
ret = 0;
trace_android_vh_binder_ioctl_end(current, cmd, arg, thread, proc, &ret);
err:
if (thread)
thread->looper_need_return = false;

View File

@ -164,6 +164,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_wait_for_work);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_proc_transaction_finish);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_proc_transaction_entry);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_select_worklist_ilocked);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_select_special_worklist);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sync_txn_recvd);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpufreq_transition);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_freq_qos_add_request);
@ -370,3 +371,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_kmalloc_large_alloced);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_netlink_poll);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ep_create_wakeup_source);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_timerfd_create);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_ioctl_end);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_looper_exited);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_spawn_new_thread);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_has_special_work_ilocked);

View File

@ -580,7 +580,7 @@ bool dev_pm_skip_resume(struct device *dev)
}
/**
* device_resume_noirq - Execute a "noirq resume" callback for given device.
* __device_resume_noirq - Execute a "noirq resume" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being resumed asynchronously.
@ -588,7 +588,7 @@ bool dev_pm_skip_resume(struct device *dev)
* The driver of @dev will not receive interrupts while this function is being
* executed.
*/
static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
static void __device_resume_noirq(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
@ -656,7 +656,13 @@ static int device_resume_noirq(struct device *dev, pm_message_t state, bool asyn
Out:
complete_all(&dev->power.completion);
TRACE_RESUME(error);
return error;
if (error) {
suspend_stats.failed_resume_noirq++;
dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
}
}
static bool is_async(struct device *dev)
@ -669,11 +675,15 @@ static bool dpm_async_fn(struct device *dev, async_func_t func)
{
reinit_completion(&dev->power.completion);
if (is_async(dev)) {
get_device(dev);
async_schedule_dev(func, dev);
if (!is_async(dev))
return false;
get_device(dev);
if (async_schedule_dev_nocall(func, dev))
return true;
}
put_device(dev);
return false;
}
@ -681,15 +691,19 @@ static bool dpm_async_fn(struct device *dev, async_func_t func)
static void async_resume_noirq(void *data, async_cookie_t cookie)
{
struct device *dev = (struct device *)data;
int error;
error = device_resume_noirq(dev, pm_transition, true);
if (error)
pm_dev_err(dev, pm_transition, " async", error);
__device_resume_noirq(dev, pm_transition, true);
put_device(dev);
}
static void device_resume_noirq(struct device *dev)
{
if (dpm_async_fn(dev, async_resume_noirq))
return;
__device_resume_noirq(dev, pm_transition, false);
}
static void dpm_noirq_resume_devices(pm_message_t state)
{
struct device *dev;
@ -699,14 +713,6 @@ static void dpm_noirq_resume_devices(pm_message_t state)
mutex_lock(&dpm_list_mtx);
pm_transition = state;
/*
* Advanced the async threads upfront,
* in case the starting of async threads is
* delayed by non-async resuming devices.
*/
list_for_each_entry(dev, &dpm_noirq_list, power.entry)
dpm_async_fn(dev, async_resume_noirq);
while (!list_empty(&dpm_noirq_list)) {
dev = to_device(dpm_noirq_list.next);
get_device(dev);
@ -714,17 +720,7 @@ static void dpm_noirq_resume_devices(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
if (!is_async(dev)) {
int error;
error = device_resume_noirq(dev, state, false);
if (error) {
suspend_stats.failed_resume_noirq++;
dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, " noirq", error);
}
}
device_resume_noirq(dev);
put_device(dev);
@ -752,14 +748,14 @@ void dpm_resume_noirq(pm_message_t state)
}
/**
* device_resume_early - Execute an "early resume" callback for given device.
* __device_resume_early - Execute an "early resume" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being resumed asynchronously.
*
* Runtime PM is disabled for @dev while this function is being executed.
*/
static int device_resume_early(struct device *dev, pm_message_t state, bool async)
static void __device_resume_early(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
@ -812,21 +808,31 @@ static int device_resume_early(struct device *dev, pm_message_t state, bool asyn
pm_runtime_enable(dev);
complete_all(&dev->power.completion);
return error;
if (error) {
suspend_stats.failed_resume_early++;
dpm_save_failed_step(SUSPEND_RESUME_EARLY);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async early" : " early", error);
}
}
static void async_resume_early(void *data, async_cookie_t cookie)
{
struct device *dev = (struct device *)data;
int error;
error = device_resume_early(dev, pm_transition, true);
if (error)
pm_dev_err(dev, pm_transition, " async", error);
__device_resume_early(dev, pm_transition, true);
put_device(dev);
}
static void device_resume_early(struct device *dev)
{
if (dpm_async_fn(dev, async_resume_early))
return;
__device_resume_early(dev, pm_transition, false);
}
/**
* dpm_resume_early - Execute "early resume" callbacks for all devices.
* @state: PM transition of the system being carried out.
@ -840,14 +846,6 @@ void dpm_resume_early(pm_message_t state)
mutex_lock(&dpm_list_mtx);
pm_transition = state;
/*
* Advanced the async threads upfront,
* in case the starting of async threads is
* delayed by non-async resuming devices.
*/
list_for_each_entry(dev, &dpm_late_early_list, power.entry)
dpm_async_fn(dev, async_resume_early);
while (!list_empty(&dpm_late_early_list)) {
dev = to_device(dpm_late_early_list.next);
get_device(dev);
@ -855,17 +853,7 @@ void dpm_resume_early(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
if (!is_async(dev)) {
int error;
error = device_resume_early(dev, state, false);
if (error) {
suspend_stats.failed_resume_early++;
dpm_save_failed_step(SUSPEND_RESUME_EARLY);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, " early", error);
}
}
device_resume_early(dev);
put_device(dev);
@ -889,12 +877,12 @@ void dpm_resume_start(pm_message_t state)
EXPORT_SYMBOL_GPL(dpm_resume_start);
/**
* device_resume - Execute "resume" callbacks for given device.
* __device_resume - Execute "resume" callbacks for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being resumed asynchronously.
*/
static int device_resume(struct device *dev, pm_message_t state, bool async)
static void __device_resume(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
@ -976,20 +964,30 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
TRACE_RESUME(error);
return error;
if (error) {
suspend_stats.failed_resume++;
dpm_save_failed_step(SUSPEND_RESUME);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async" : "", error);
}
}
static void async_resume(void *data, async_cookie_t cookie)
{
struct device *dev = (struct device *)data;
int error;
error = device_resume(dev, pm_transition, true);
if (error)
pm_dev_err(dev, pm_transition, " async", error);
__device_resume(dev, pm_transition, true);
put_device(dev);
}
static void device_resume(struct device *dev)
{
if (dpm_async_fn(dev, async_resume))
return;
__device_resume(dev, pm_transition, false);
}
/**
* dpm_resume - Execute "resume" callbacks for non-sysdev devices.
* @state: PM transition of the system being carried out.
@ -1009,27 +1007,17 @@ void dpm_resume(pm_message_t state)
pm_transition = state;
async_error = 0;
list_for_each_entry(dev, &dpm_suspended_list, power.entry)
dpm_async_fn(dev, async_resume);
while (!list_empty(&dpm_suspended_list)) {
dev = to_device(dpm_suspended_list.next);
get_device(dev);
if (!is_async(dev)) {
int error;
mutex_unlock(&dpm_list_mtx);
mutex_unlock(&dpm_list_mtx);
error = device_resume(dev, state, false);
if (error) {
suspend_stats.failed_resume++;
dpm_save_failed_step(SUSPEND_RESUME);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, "", error);
}
device_resume(dev);
mutex_lock(&dpm_list_mtx);
mutex_lock(&dpm_list_mtx);
}
if (!list_empty(&dev->power.entry))
list_move_tail(&dev->power.entry, &dpm_prepared_list);

View File

@ -31,7 +31,7 @@ DEFINE_CORESIGHT_DEVLIST(etb_devs, "tmc_etb");
DEFINE_CORESIGHT_DEVLIST(etf_devs, "tmc_etf");
DEFINE_CORESIGHT_DEVLIST(etr_devs, "tmc_etr");
void tmc_wait_for_tmcready(struct tmc_drvdata *drvdata)
int tmc_wait_for_tmcready(struct tmc_drvdata *drvdata)
{
struct coresight_device *csdev = drvdata->csdev;
struct csdev_access *csa = &csdev->access;
@ -40,7 +40,9 @@ void tmc_wait_for_tmcready(struct tmc_drvdata *drvdata)
if (coresight_timeout(csa, TMC_STS, TMC_STS_TMCREADY_BIT, 1)) {
dev_err(&csdev->dev,
"timeout while waiting for TMC to be Ready\n");
return -EBUSY;
}
return 0;
}
void tmc_flush_and_stop(struct tmc_drvdata *drvdata)

View File

@ -16,12 +16,20 @@
static int tmc_set_etf_buffer(struct coresight_device *csdev,
struct perf_output_handle *handle);
static void __tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
static int __tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
{
int rc = 0;
CS_UNLOCK(drvdata->base);
/* Wait for TMCSReady bit to be set */
tmc_wait_for_tmcready(drvdata);
rc = tmc_wait_for_tmcready(drvdata);
if (rc) {
dev_err(&drvdata->csdev->dev,
"Failed to enable: TMC not ready\n");
CS_LOCK(drvdata->base);
return rc;
}
writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
@ -33,6 +41,7 @@ static void __tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
tmc_enable_hw(drvdata);
CS_LOCK(drvdata->base);
return rc;
}
static int tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
@ -42,8 +51,10 @@ static int tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
if (rc)
return rc;
__tmc_etb_enable_hw(drvdata);
return 0;
rc = __tmc_etb_enable_hw(drvdata);
if (rc)
coresight_disclaim_device(drvdata->csdev);
return rc;
}
static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
@ -91,12 +102,20 @@ static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
coresight_disclaim_device(drvdata->csdev);
}
static void __tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
static int __tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
{
int rc = 0;
CS_UNLOCK(drvdata->base);
/* Wait for TMCSReady bit to be set */
tmc_wait_for_tmcready(drvdata);
rc = tmc_wait_for_tmcready(drvdata);
if (rc) {
dev_err(&drvdata->csdev->dev,
"Failed to enable : TMC is not ready\n");
CS_LOCK(drvdata->base);
return rc;
}
writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE);
writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI,
@ -105,6 +124,7 @@ static void __tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
tmc_enable_hw(drvdata);
CS_LOCK(drvdata->base);
return rc;
}
static int tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
@ -114,8 +134,10 @@ static int tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
if (rc)
return rc;
__tmc_etf_enable_hw(drvdata);
return 0;
rc = __tmc_etf_enable_hw(drvdata);
if (rc)
coresight_disclaim_device(drvdata->csdev);
return rc;
}
static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
@ -639,6 +661,7 @@ int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
char *buf = NULL;
enum tmc_mode mode;
unsigned long flags;
int rc = 0;
/* config types are set a boot time and never change */
if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
@ -664,7 +687,11 @@ int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
* can't be NULL.
*/
memset(drvdata->buf, 0, drvdata->size);
__tmc_etb_enable_hw(drvdata);
rc = __tmc_etb_enable_hw(drvdata);
if (rc) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
return rc;
}
} else {
/*
* The ETB/ETF is not tracing and the buffer was just read.

View File

@ -985,15 +985,22 @@ static void tmc_sync_etr_buf(struct tmc_drvdata *drvdata)
etr_buf->ops->sync(etr_buf, rrp, rwp);
}
static void __tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
static int __tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
{
u32 axictl, sts;
struct etr_buf *etr_buf = drvdata->etr_buf;
int rc = 0;
CS_UNLOCK(drvdata->base);
/* Wait for TMCSReady bit to be set */
tmc_wait_for_tmcready(drvdata);
rc = tmc_wait_for_tmcready(drvdata);
if (rc) {
dev_err(&drvdata->csdev->dev,
"Failed to enable : TMC not ready\n");
CS_LOCK(drvdata->base);
return rc;
}
writel_relaxed(etr_buf->size / 4, drvdata->base + TMC_RSZ);
writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
@ -1034,6 +1041,7 @@ static void __tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
tmc_enable_hw(drvdata);
CS_LOCK(drvdata->base);
return rc;
}
static int tmc_etr_enable_hw(struct tmc_drvdata *drvdata,
@ -1062,7 +1070,12 @@ static int tmc_etr_enable_hw(struct tmc_drvdata *drvdata,
rc = coresight_claim_device(drvdata->csdev);
if (!rc) {
drvdata->etr_buf = etr_buf;
__tmc_etr_enable_hw(drvdata);
rc = __tmc_etr_enable_hw(drvdata);
if (rc) {
drvdata->etr_buf = NULL;
coresight_disclaim_device(drvdata->csdev);
tmc_etr_disable_catu(drvdata);
}
}
return rc;

View File

@ -255,7 +255,7 @@ struct tmc_sg_table {
};
/* Generic functions */
void tmc_wait_for_tmcready(struct tmc_drvdata *drvdata);
int tmc_wait_for_tmcready(struct tmc_drvdata *drvdata);
void tmc_flush_and_stop(struct tmc_drvdata *drvdata);
void tmc_enable_hw(struct tmc_drvdata *drvdata);
void tmc_disable_hw(struct tmc_drvdata *drvdata);

View File

@ -39,6 +39,7 @@
#define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0)
#define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539 (1ULL << 1)
#define FLAGS_WORKAROUND_MTK_GICR_SAVE (1ULL << 2)
#define FLAGS_WORKAROUND_ASR_ERRATUM_8601001 (1ULL << 3)
#define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1)
@ -648,10 +649,16 @@ static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
return 0;
}
static u64 gic_mpidr_to_affinity(unsigned long mpidr)
static u64 gic_cpu_to_affinity(int cpu)
{
u64 mpidr = cpu_logical_map(cpu);
u64 aff;
/* ASR8601 needs to have its affinities shifted down... */
if (unlikely(gic_data.flags & FLAGS_WORKAROUND_ASR_ERRATUM_8601001))
mpidr = (MPIDR_AFFINITY_LEVEL(mpidr, 1) |
(MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8));
aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
@ -906,7 +913,7 @@ void gic_v3_dist_init(void)
* Set all global interrupts to the boot CPU only. ARE must be
* enabled.
*/
affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
affinity = gic_cpu_to_affinity(smp_processor_id());
for (i = 32; i < GIC_LINE_NR; i++) {
trace_android_vh_gic_v3_affinity_init(i, GICD_IROUTER, &affinity);
gic_write_irouter(affinity, base + GICD_IROUTER + i * 8);
@ -960,7 +967,7 @@ static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *))
static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr)
{
unsigned long mpidr = cpu_logical_map(smp_processor_id());
unsigned long mpidr;
u64 typer;
u32 aff;
@ -968,6 +975,8 @@ static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr)
* Convert affinity to a 32bit value that can be matched to
* GICR_TYPER bits [63:32].
*/
mpidr = gic_cpu_to_affinity(smp_processor_id());
aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
@ -1081,7 +1090,7 @@ static inline bool gic_dist_security_disabled(void)
static void gic_cpu_sys_reg_init(void)
{
int i, cpu = smp_processor_id();
u64 mpidr = cpu_logical_map(cpu);
u64 mpidr = gic_cpu_to_affinity(cpu);
u64 need_rss = MPIDR_RS(mpidr);
bool group0;
u32 pribits;
@ -1180,11 +1189,11 @@ static void gic_cpu_sys_reg_init(void)
for_each_online_cpu(i) {
bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu);
need_rss |= MPIDR_RS(cpu_logical_map(i));
need_rss |= MPIDR_RS(gic_cpu_to_affinity(i));
if (need_rss && (!have_rss))
pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n",
cpu, (unsigned long)mpidr,
i, (unsigned long)cpu_logical_map(i));
i, (unsigned long)gic_cpu_to_affinity(i));
}
/**
@ -1261,9 +1270,11 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
unsigned long cluster_id)
{
int next_cpu, cpu = *base_cpu;
unsigned long mpidr = cpu_logical_map(cpu);
unsigned long mpidr;
u16 tlist = 0;
mpidr = gic_cpu_to_affinity(cpu);
while (cpu < nr_cpu_ids) {
tlist |= 1 << (mpidr & 0xf);
@ -1272,7 +1283,7 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
goto out;
cpu = next_cpu;
mpidr = cpu_logical_map(cpu);
mpidr = gic_cpu_to_affinity(cpu);
if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) {
cpu--;
@ -1317,7 +1328,7 @@ static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
dsb(ishst);
for_each_cpu(cpu, mask) {
u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu));
u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(gic_cpu_to_affinity(cpu));
u16 tlist;
tlist = gic_compute_target_list(&cpu, mask, cluster_id);
@ -1377,7 +1388,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
offset = convert_offset_index(d, GICD_IROUTER, &index);
reg = gic_dist_base(d) + offset + (index * 8);
val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
val = gic_cpu_to_affinity(cpu);
trace_android_rvh_gic_v3_set_affinity(d, mask_val, &val, force, gic_dist_base(d),
gic_data.redist_regions[0].redist_base,
@ -1798,6 +1809,15 @@ static bool gic_enable_quirk_arm64_2941627(void *data)
return true;
}
static bool gic_enable_quirk_asr8601(void *data)
{
struct gic_chip_data_v3 *d = data;
d->flags |= FLAGS_WORKAROUND_ASR_ERRATUM_8601001;
return true;
}
static const struct gic_quirk gic_quirks[] = {
{
.desc = "GICv3: Qualcomm MSM8996 broken firmware",
@ -1809,6 +1829,11 @@ static const struct gic_quirk gic_quirks[] = {
.property = "mediatek,broken-save-restore-fw",
.init = gic_enable_quirk_mtk_gicr,
},
{
.desc = "GICv3: ASR erratum 8601001",
.compatible = "asr,asr8601-gic-v3",
.init = gic_enable_quirk_asr8601,
},
{
.desc = "GICv3: HIP06 erratum 161010803",
.iidr = 0x0204043b,

View File

@ -469,13 +469,6 @@ config UID_SYS_STATS
Per UID based io statistics exported to /proc/uid_io
Per UID based procstat control in /proc/uid_procstat
config UID_SYS_STATS_DEBUG
bool "Per-TASK statistics"
depends on UID_SYS_STATS
default n
help
Per TASK based io statistics exported to /proc/uid_io
config HISI_HIKEY_USB
tristate "USB GPIO Hub on HiSilicon Hikey 960/970 Platform"
depends on (OF && GPIOLIB) || COMPILE_TEST

View File

@ -76,9 +76,6 @@ struct uid_entry {
int state;
struct io_stats io[UID_STATE_SIZE];
struct hlist_node hash;
#ifdef CONFIG_UID_SYS_STATS_DEBUG
DECLARE_HASHTABLE(task_entries, UID_HASH_BITS);
#endif
};
static inline int trylock_uid(uid_t uid)
@ -148,182 +145,6 @@ static void compute_io_bucket_stats(struct io_stats *io_bucket,
memset(io_dead, 0, sizeof(struct io_stats));
}
#ifdef CONFIG_UID_SYS_STATS_DEBUG
static void get_full_task_comm(struct task_entry *task_entry,
struct task_struct *task)
{
int i = 0, offset = 0, len = 0;
/* save one byte for terminating null character */
int unused_len = MAX_TASK_COMM_LEN - TASK_COMM_LEN - 1;
char buf[MAX_TASK_COMM_LEN - TASK_COMM_LEN - 1];
struct mm_struct *mm = task->mm;
/* fill the first TASK_COMM_LEN bytes with thread name */
__get_task_comm(task_entry->comm, TASK_COMM_LEN, task);
i = strlen(task_entry->comm);
while (i < TASK_COMM_LEN)
task_entry->comm[i++] = ' ';
/* next the executable file name */
if (mm) {
mmap_write_lock(mm);
if (mm->exe_file) {
char *pathname = d_path(&mm->exe_file->f_path, buf,
unused_len);
if (!IS_ERR(pathname)) {
len = strlcpy(task_entry->comm + i, pathname,
unused_len);
i += len;
task_entry->comm[i++] = ' ';
unused_len--;
}
}
mmap_write_unlock(mm);
}
unused_len -= len;
/* fill the rest with command line argument
* replace each null or new line character
* between args in argv with whitespace */
len = get_cmdline(task, buf, unused_len);
while (offset < len) {
if (buf[offset] != '\0' && buf[offset] != '\n')
task_entry->comm[i++] = buf[offset];
else
task_entry->comm[i++] = ' ';
offset++;
}
/* get rid of trailing whitespaces in case when arg is memset to
* zero before being reset in userspace
*/
while (task_entry->comm[i-1] == ' ')
i--;
task_entry->comm[i] = '\0';
}
static struct task_entry *find_task_entry(struct uid_entry *uid_entry,
struct task_struct *task)
{
struct task_entry *task_entry;
hash_for_each_possible(uid_entry->task_entries, task_entry, hash,
task->pid) {
if (task->pid == task_entry->pid) {
/* if thread name changed, update the entire command */
int len = strnchr(task_entry->comm, ' ', TASK_COMM_LEN)
- task_entry->comm;
if (strncmp(task_entry->comm, task->comm, len))
get_full_task_comm(task_entry, task);
return task_entry;
}
}
return NULL;
}
static struct task_entry *find_or_register_task(struct uid_entry *uid_entry,
struct task_struct *task)
{
struct task_entry *task_entry;
pid_t pid = task->pid;
task_entry = find_task_entry(uid_entry, task);
if (task_entry)
return task_entry;
task_entry = kzalloc(sizeof(struct task_entry), GFP_ATOMIC);
if (!task_entry)
return NULL;
get_full_task_comm(task_entry, task);
task_entry->pid = pid;
hash_add(uid_entry->task_entries, &task_entry->hash, (unsigned int)pid);
return task_entry;
}
static void remove_uid_tasks(struct uid_entry *uid_entry)
{
struct task_entry *task_entry;
unsigned long bkt_task;
struct hlist_node *tmp_task;
hash_for_each_safe(uid_entry->task_entries, bkt_task,
tmp_task, task_entry, hash) {
hash_del(&task_entry->hash);
kfree(task_entry);
}
}
static void set_io_uid_tasks_zero(struct uid_entry *uid_entry)
{
struct task_entry *task_entry;
unsigned long bkt_task;
hash_for_each(uid_entry->task_entries, bkt_task, task_entry, hash) {
memset(&task_entry->io[UID_STATE_TOTAL_CURR], 0,
sizeof(struct io_stats));
}
}
static void add_uid_tasks_io_stats(struct task_entry *task_entry,
struct task_io_accounting *ioac, int slot)
{
struct io_stats *task_io_slot = &task_entry->io[slot];
task_io_slot->read_bytes += ioac->read_bytes;
task_io_slot->write_bytes += compute_write_bytes(ioac);
task_io_slot->rchar += ioac->rchar;
task_io_slot->wchar += ioac->wchar;
task_io_slot->fsync += ioac->syscfs;
}
static void compute_io_uid_tasks(struct uid_entry *uid_entry)
{
struct task_entry *task_entry;
unsigned long bkt_task;
hash_for_each(uid_entry->task_entries, bkt_task, task_entry, hash) {
compute_io_bucket_stats(&task_entry->io[uid_entry->state],
&task_entry->io[UID_STATE_TOTAL_CURR],
&task_entry->io[UID_STATE_TOTAL_LAST],
&task_entry->io[UID_STATE_DEAD_TASKS]);
}
}
static void show_io_uid_tasks(struct seq_file *m, struct uid_entry *uid_entry)
{
struct task_entry *task_entry;
unsigned long bkt_task;
hash_for_each(uid_entry->task_entries, bkt_task, task_entry, hash) {
/* Separated by comma because space exists in task comm */
seq_printf(m, "task,%s,%lu,%llu,%llu,%llu,%llu,%llu,%llu,%llu,%llu,%llu,%llu\n",
task_entry->comm,
(unsigned long)task_entry->pid,
task_entry->io[UID_STATE_FOREGROUND].rchar,
task_entry->io[UID_STATE_FOREGROUND].wchar,
task_entry->io[UID_STATE_FOREGROUND].read_bytes,
task_entry->io[UID_STATE_FOREGROUND].write_bytes,
task_entry->io[UID_STATE_BACKGROUND].rchar,
task_entry->io[UID_STATE_BACKGROUND].wchar,
task_entry->io[UID_STATE_BACKGROUND].read_bytes,
task_entry->io[UID_STATE_BACKGROUND].write_bytes,
task_entry->io[UID_STATE_FOREGROUND].fsync,
task_entry->io[UID_STATE_BACKGROUND].fsync);
}
}
#else
static void remove_uid_tasks(struct uid_entry *uid_entry) {};
static void set_io_uid_tasks_zero(struct uid_entry *uid_entry) {};
static void compute_io_uid_tasks(struct uid_entry *uid_entry) {};
static void show_io_uid_tasks(struct seq_file *m,
struct uid_entry *uid_entry) {}
#endif
static struct uid_entry *find_uid_entry(uid_t uid)
{
struct uid_entry *uid_entry;
@ -347,9 +168,6 @@ static struct uid_entry *find_or_register_uid(uid_t uid)
return NULL;
uid_entry->uid = uid;
#ifdef CONFIG_UID_SYS_STATS_DEBUG
hash_init(uid_entry->task_entries);
#endif
hash_add(hash_table, &uid_entry->hash, uid);
return uid_entry;
@ -465,7 +283,6 @@ static ssize_t uid_remove_write(struct file *file,
hash_for_each_possible_safe(hash_table, uid_entry, tmp,
hash, (uid_t)uid_start) {
if (uid_start == uid_entry->uid) {
remove_uid_tasks(uid_entry);
hash_del(&uid_entry->hash);
kfree(uid_entry);
}
@ -503,10 +320,6 @@ static void add_uid_io_stats(struct uid_entry *uid_entry,
if (slot != UID_STATE_DEAD_TASKS && (task->flags & PF_EXITING))
return;
#ifdef CONFIG_UID_SYS_STATS_DEBUG
task_entry = find_or_register_task(uid_entry, task);
add_uid_tasks_io_stats(task_entry, &task->ioac, slot);
#endif
__add_uid_io_stats(uid_entry, &task->ioac, slot);
}
@ -524,7 +337,6 @@ static void update_io_stats_all(void)
hlist_for_each_entry(uid_entry, &hash_table[bkt], hash) {
memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0,
sizeof(struct io_stats));
set_io_uid_tasks_zero(uid_entry);
}
unlock_uid_by_bkt(bkt);
}
@ -552,24 +364,18 @@ static void update_io_stats_all(void)
&uid_entry->io[UID_STATE_TOTAL_CURR],
&uid_entry->io[UID_STATE_TOTAL_LAST],
&uid_entry->io[UID_STATE_DEAD_TASKS]);
compute_io_uid_tasks(uid_entry);
}
unlock_uid_by_bkt(bkt);
}
}
#ifndef CONFIG_UID_SYS_STATS_DEBUG
static void update_io_stats_uid(struct uid_entry *uid_entry)
#else
static void update_io_stats_uid_locked(struct uid_entry *uid_entry)
#endif
{
struct task_struct *task, *temp;
struct user_namespace *user_ns = current_user_ns();
memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0,
sizeof(struct io_stats));
set_io_uid_tasks_zero(uid_entry);
rcu_read_lock();
do_each_thread(temp, task) {
@ -583,7 +389,6 @@ static void update_io_stats_uid_locked(struct uid_entry *uid_entry)
&uid_entry->io[UID_STATE_TOTAL_CURR],
&uid_entry->io[UID_STATE_TOTAL_LAST],
&uid_entry->io[UID_STATE_DEAD_TASKS]);
compute_io_uid_tasks(uid_entry);
}
@ -610,8 +415,6 @@ static int uid_io_show(struct seq_file *m, void *v)
uid_entry->io[UID_STATE_BACKGROUND].write_bytes,
uid_entry->io[UID_STATE_FOREGROUND].fsync,
uid_entry->io[UID_STATE_BACKGROUND].fsync);
show_io_uid_tasks(m, uid_entry);
}
unlock_uid_by_bkt(bkt);
}
@ -643,9 +446,7 @@ static ssize_t uid_procstat_write(struct file *file,
uid_t uid;
int argc, state;
char input[128];
#ifndef CONFIG_UID_SYS_STATS_DEBUG
struct uid_entry uid_entry_tmp;
#endif
if (count >= sizeof(input))
return -EINVAL;
@ -674,7 +475,6 @@ static ssize_t uid_procstat_write(struct file *file,
return count;
}
#ifndef CONFIG_UID_SYS_STATS_DEBUG
/*
* Update_io_stats_uid_locked would take a long lock-time of uid_lock
* due to call do_each_thread to compute uid_entry->io, which would
@ -684,9 +484,8 @@ static ssize_t uid_procstat_write(struct file *file,
* so that we can unlock_uid during update_io_stats_uid, in order
* to avoid the unnecessary lock-time of uid_lock.
*/
uid_entry_tmp.uid = uid_entry->uid;
memcpy(uid_entry_tmp.io, uid_entry->io,
sizeof(struct io_stats) * UID_STATE_SIZE);
uid_entry_tmp = *uid_entry;
unlock_uid(uid);
update_io_stats_uid(&uid_entry_tmp);
@ -700,13 +499,6 @@ static ssize_t uid_procstat_write(struct file *file,
}
}
unlock_uid(uid);
#else
update_io_stats_uid_locked(uid_entry);
uid_entry->state = state;
unlock_uid(uid);
#endif
return count;
}
@ -719,9 +511,6 @@ static const struct proc_ops uid_procstat_fops = {
struct update_stats_work {
uid_t uid;
#ifdef CONFIG_UID_SYS_STATS_DEBUG
struct task_struct *task;
#endif
struct task_io_accounting ioac;
u64 utime;
u64 stime;
@ -747,19 +536,9 @@ static void update_stats_workfn(struct work_struct *work)
uid_entry->utime += usw->utime;
uid_entry->stime += usw->stime;
#ifdef CONFIG_UID_SYS_STATS_DEBUG
task_entry = find_task_entry(uid_entry, usw->task);
if (!task_entry)
goto next;
add_uid_tasks_io_stats(task_entry, &usw->ioac,
UID_STATE_DEAD_TASKS);
#endif
__add_uid_io_stats(uid_entry, &usw->ioac, UID_STATE_DEAD_TASKS);
next:
unlock_uid(usw->uid);
#ifdef CONFIG_UID_SYS_STATS_DEBUG
put_task_struct(usw->task);
#endif
kfree(usw);
}
@ -784,9 +563,6 @@ static int process_notifier(struct notifier_block *self,
usw = kmalloc(sizeof(struct update_stats_work), GFP_KERNEL);
if (usw) {
usw->uid = uid;
#ifdef CONFIG_UID_SYS_STATS_DEBUG
usw->task = get_task_struct(task);
#endif
/*
* Copy task->ioac since task might be destroyed before
* the work is later performed.

View File

@ -159,6 +159,7 @@ void idle_inject_set_duration(struct idle_inject_device *ii_dev,
WRITE_ONCE(ii_dev->idle_duration_us, idle_duration_us);
}
}
EXPORT_SYMBOL_GPL(idle_inject_set_duration);
/**
* idle_inject_get_duration - idle and run duration retrieval helper
@ -172,6 +173,7 @@ void idle_inject_get_duration(struct idle_inject_device *ii_dev,
*run_duration_us = READ_ONCE(ii_dev->run_duration_us);
*idle_duration_us = READ_ONCE(ii_dev->idle_duration_us);
}
EXPORT_SYMBOL_GPL(idle_inject_get_duration);
/**
* idle_inject_set_latency - set the maximum latency allowed
@ -182,6 +184,7 @@ void idle_inject_set_latency(struct idle_inject_device *ii_dev,
{
WRITE_ONCE(ii_dev->latency_us, latency_us);
}
EXPORT_SYMBOL_GPL(idle_inject_set_latency);
/**
* idle_inject_start - start idle injections
@ -213,6 +216,7 @@ int idle_inject_start(struct idle_inject_device *ii_dev)
return 0;
}
EXPORT_SYMBOL_GPL(idle_inject_start);
/**
* idle_inject_stop - stops idle injections
@ -259,6 +263,7 @@ void idle_inject_stop(struct idle_inject_device *ii_dev)
cpu_hotplug_enable();
}
EXPORT_SYMBOL_GPL(idle_inject_stop);
/**
* idle_inject_setup - prepare the current task for idle injection
@ -334,6 +339,7 @@ struct idle_inject_device *idle_inject_register(struct cpumask *cpumask)
return NULL;
}
EXPORT_SYMBOL_GPL(idle_inject_register);
/**
* idle_inject_unregister - unregister idle injection control device
@ -354,6 +360,7 @@ void idle_inject_unregister(struct idle_inject_device *ii_dev)
kfree(ii_dev);
}
EXPORT_SYMBOL_GPL(idle_inject_unregister);
static struct smp_hotplug_thread idle_inject_threads = {
.store = &idle_inject_thread.tsk,

View File

@ -317,7 +317,7 @@ void scsi_eh_scmd_add(struct scsi_cmnd *scmd)
* Ensure that all tasks observe the host state change before the
* host_failed change.
*/
call_rcu(&scmd->rcu, scsi_eh_inc_host_failed);
call_rcu_hurry(&scmd->rcu, scsi_eh_inc_host_failed);
}
/**

View File

@ -2033,9 +2033,19 @@ static void update_port_device_state(struct usb_device *udev)
if (udev->parent) {
hub = usb_hub_to_struct_hub(udev->parent);
port_dev = hub->ports[udev->portnum - 1];
WRITE_ONCE(port_dev->state, udev->state);
sysfs_notify_dirent(port_dev->state_kn);
/*
* The Link Layer Validation System Driver (lvstest)
* has a test step to unbind the hub before running the
* rest of the procedure. This triggers hub_disconnect
* which will set the hub's maxchild to 0, further
* resulting in usb_hub_to_struct_hub returning NULL.
*/
if (hub) {
port_dev = hub->ports[udev->portnum - 1];
WRITE_ONCE(port_dev->state, udev->state);
sysfs_notify_dirent(port_dev->state_kn);
}
}
}

View File

@ -2362,12 +2362,15 @@ static int dwc3_resume(struct device *dev)
pinctrl_pm_select_default_state(dev);
ret = dwc3_resume_common(dwc, PMSG_RESUME);
if (ret)
return ret;
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
ret = dwc3_resume_common(dwc, PMSG_RESUME);
if (ret) {
pm_runtime_set_suspended(dev);
return ret;
}
pm_runtime_enable(dev);
return 0;

View File

@ -236,7 +236,10 @@ void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
struct dwc3_request *req;
req = next_request(&dep->pending_list);
dwc3_gadget_giveback(dep, req, -ECONNRESET);
if (!dwc->connected)
dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
else
dwc3_gadget_giveback(dep, req, -ECONNRESET);
}
dwc->eps[0]->trb_enqueue = 0;

View File

@ -4590,15 +4590,13 @@ int dwc3_gadget_suspend(struct dwc3 *dwc)
unsigned long flags;
int ret;
if (!dwc->gadget_driver)
return 0;
ret = dwc3_gadget_soft_disconnect(dwc);
if (ret)
goto err;
spin_lock_irqsave(&dwc->lock, flags);
dwc3_disconnect_gadget(dwc);
if (dwc->gadget_driver)
dwc3_disconnect_gadget(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
return 0;

View File

@ -276,10 +276,9 @@ static int uvcg_video_usb_req_queue(struct uvc_video *video,
bool is_bulk = video->max_payload_size;
struct list_head *list = NULL;
if (!video->is_enabled) {
uvc_video_free_request(req->context, video->ep);
if (!video->is_enabled)
return -ENODEV;
}
if (queue_to_ep) {
struct uvc_request *ureq = req->context;
/*
@ -464,12 +463,21 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
* and this thread for isoc endpoints.
*/
ret = uvcg_video_usb_req_queue(video, to_queue, !is_bulk);
if (ret < 0)
uvcg_queue_cancel(queue, 0);
if (ret < 0) {
/*
* Endpoint error, but the stream is still enabled.
* Put request back in req_free for it to be cleaned
* up later.
*/
list_add_tail(&to_queue->list, &video->req_free);
}
} else {
uvc_video_free_request(ureq, ep);
ret = 0;
}
spin_unlock_irqrestore(&video->req_lock, flags);
if (ret < 0)
uvcg_queue_cancel(queue, 0);
}
static int

View File

@ -1133,6 +1133,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
int retval = 0;
bool comp_timer_running = false;
bool pending_portevent = false;
bool suspended_usb3_devs = false;
bool reinit_xhc = false;
if (!hcd->state)
@ -1282,10 +1283,16 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
/*
* Resume roothubs only if there are pending events.
* USB 3 devices resend U3 LFPS wake after a 100ms delay if
* the first wake signalling failed, give it that chance.
* the first wake signalling failed, give it that chance if
* there are suspended USB 3 devices.
*/
if (xhci->usb3_rhub.bus_state.suspended_ports ||
xhci->usb3_rhub.bus_state.bus_suspended)
suspended_usb3_devs = true;
pending_portevent = xhci_pending_portevent(xhci);
if (!pending_portevent) {
if (suspended_usb3_devs && !pending_portevent) {
msleep(120);
pending_portevent = xhci_pending_portevent(xhci);
}

View File

@ -401,23 +401,26 @@ int fuse_lseek_backing(struct fuse_bpf_args *fa, struct file *file, loff_t offse
struct file *backing_file = fuse_file->backing_file;
loff_t ret;
/* TODO: Handle changing of the file handle */
if (offset == 0) {
if (whence == SEEK_CUR) {
flo->offset = file->f_pos;
return flo->offset;
return 0;
}
if (whence == SEEK_SET) {
flo->offset = vfs_setpos(file, 0, 0);
return flo->offset;
return 0;
}
}
inode_lock(file->f_inode);
backing_file->f_pos = file->f_pos;
ret = vfs_llseek(backing_file, fli->offset, fli->whence);
flo->offset = ret;
if (!IS_ERR(ERR_PTR(ret))) {
flo->offset = ret;
ret = 0;
}
inode_unlock(file->f_inode);
return ret;
}

View File

@ -90,6 +90,8 @@ async_schedule_dev(async_func_t func, struct device *dev)
return async_schedule_node(func, dev, dev_to_node(dev));
}
bool async_schedule_dev_nocall(async_func_t func, struct device *dev);
/**
* async_schedule_dev_domain - A device specific version of async_schedule_domain
* @func: function to execute asynchronously

View File

@ -694,6 +694,9 @@ struct perf_event {
/* The cumulative AND of all event_caps for events in this group. */
int group_caps;
#ifndef __GENKSYMS__
unsigned int group_generation;
#endif
struct perf_event *group_leader;
struct pmu *pmu;
void *pmu_private;

View File

@ -108,6 +108,15 @@ static inline int rcu_preempt_depth(void)
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
#ifdef CONFIG_RCU_LAZY
void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func);
#else
static inline void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func)
{
call_rcu(head, func);
}
#endif
/* Internal to kernel */
void rcu_init(void);
extern int rcu_scheduler_active;

View File

@ -42,6 +42,11 @@ do { \
* call_srcu() function, with this wrapper supplying the pointer to the
* corresponding srcu_struct.
*
* Note that call_rcu_hurry() should be used instead of call_rcu()
* because in kernels built with CONFIG_RCU_LAZY=y the delay between the
* invocation of call_rcu() and that of the corresponding RCU callback
* can be multiple seconds.
*
* The first argument tells Tiny RCU's _wait_rcu_gp() not to
* bother waiting for RCU. The reason for this is because anywhere
* synchronize_rcu_mult() can be called is automatically already a full

View File

@ -72,19 +72,30 @@ TRACE_EVENT(reclaim_retry_zone,
);
TRACE_EVENT(mark_victim,
TP_PROTO(int pid),
TP_PROTO(struct task_struct *task, uid_t uid),
TP_ARGS(pid),
TP_ARGS(task, uid),
TP_STRUCT__entry(
__field(int, pid)
__field(uid_t, uid)
__string(comm, task->comm)
__field(short, oom_score_adj)
),
TP_fast_assign(
__entry->pid = pid;
__entry->pid = task->pid;
__entry->uid = uid;
__assign_str(comm, task->comm);
__entry->oom_score_adj = task->signal->oom_score_adj;
),
TP_printk("pid=%d", __entry->pid)
TP_printk("pid=%d uid=%u comm=%s oom_score_adj=%hd",
__entry->pid,
__entry->uid,
__get_str(comm),
__entry->oom_score_adj
)
);
TRACE_EVENT(wake_reaper,

View File

@ -74,6 +74,11 @@ DECLARE_HOOK(android_vh_binder_select_worklist_ilocked,
TP_PROTO(struct list_head **list, struct binder_thread *thread, struct binder_proc *proc,
int wait_for_proc_work),
TP_ARGS(list, thread, proc, wait_for_proc_work));
DECLARE_HOOK(android_vh_binder_select_special_worklist,
TP_PROTO(struct list_head **list, struct binder_thread *thread, struct binder_proc *proc,
int wait_for_proc_work, bool *nothing_to_do),
TP_ARGS(list, thread, proc, wait_for_proc_work, nothing_to_do));
DECLARE_HOOK(android_vh_binder_alloc_new_buf_locked,
TP_PROTO(size_t size, size_t *free_async_space, int is_async),
TP_ARGS(size, free_async_space, is_async));
@ -118,6 +123,23 @@ DECLARE_HOOK(android_vh_binder_free_buf,
struct binder_buffer *buffer),
TP_ARGS(proc, thread, buffer));
DECLARE_HOOK(android_vh_binder_ioctl_end,
TP_PROTO(struct task_struct *caller_task,
unsigned int cmd,
unsigned long arg,
struct binder_thread *thread,
struct binder_proc *proc,
int *ret),
TP_ARGS(caller_task, cmd, arg, thread, proc, ret));
DECLARE_HOOK(android_vh_binder_looper_exited,
TP_PROTO(struct binder_thread *thread, struct binder_proc *proc),
TP_ARGS(thread, proc));
DECLARE_HOOK(android_vh_binder_spawn_new_thread,
TP_PROTO(struct binder_thread *thread, struct binder_proc *proc, bool *force_spawn),
TP_ARGS(thread, proc, force_spawn));
DECLARE_HOOK(android_vh_binder_has_special_work_ilocked,
TP_PROTO(struct binder_thread *thread, bool do_proc_work, bool *has_work),
TP_ARGS(thread, do_proc_work, has_work));
#endif /* _TRACE_HOOK_BINDER_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@ -536,7 +536,7 @@ static void io_eventfd_signal(struct io_ring_ctx *ctx)
} else {
atomic_inc(&ev_fd->refs);
if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_SIGNAL_BIT), &ev_fd->ops))
call_rcu(&ev_fd->rcu, io_eventfd_ops);
call_rcu_hurry(&ev_fd->rcu, io_eventfd_ops);
else
atomic_dec(&ev_fd->refs);
}

View File

@ -145,6 +145,39 @@ static void async_run_entry_fn(struct work_struct *work)
wake_up(&async_done);
}
static async_cookie_t __async_schedule_node_domain(async_func_t func,
void *data, int node,
struct async_domain *domain,
struct async_entry *entry)
{
async_cookie_t newcookie;
unsigned long flags;
INIT_LIST_HEAD(&entry->domain_list);
INIT_LIST_HEAD(&entry->global_list);
INIT_WORK(&entry->work, async_run_entry_fn);
entry->func = func;
entry->data = data;
entry->domain = domain;
spin_lock_irqsave(&async_lock, flags);
/* allocate cookie and queue */
newcookie = entry->cookie = next_cookie++;
list_add_tail(&entry->domain_list, &domain->pending);
if (domain->registered)
list_add_tail(&entry->global_list, &async_global_pending);
atomic_inc(&entry_count);
spin_unlock_irqrestore(&async_lock, flags);
/* schedule for execution */
queue_work_node(node, system_unbound_wq, &entry->work);
return newcookie;
}
/**
* async_schedule_node_domain - NUMA specific version of async_schedule_domain
* @func: function to execute asynchronously
@ -186,29 +219,8 @@ async_cookie_t async_schedule_node_domain(async_func_t func, void *data,
func(data, newcookie);
return newcookie;
}
INIT_LIST_HEAD(&entry->domain_list);
INIT_LIST_HEAD(&entry->global_list);
INIT_WORK(&entry->work, async_run_entry_fn);
entry->func = func;
entry->data = data;
entry->domain = domain;
spin_lock_irqsave(&async_lock, flags);
/* allocate cookie and queue */
newcookie = entry->cookie = next_cookie++;
list_add_tail(&entry->domain_list, &domain->pending);
if (domain->registered)
list_add_tail(&entry->global_list, &async_global_pending);
atomic_inc(&entry_count);
spin_unlock_irqrestore(&async_lock, flags);
/* schedule for execution */
queue_work_node(node, system_unbound_wq, &entry->work);
return newcookie;
return __async_schedule_node_domain(func, data, node, domain, entry);
}
EXPORT_SYMBOL_GPL(async_schedule_node_domain);
@ -231,6 +243,35 @@ async_cookie_t async_schedule_node(async_func_t func, void *data, int node)
}
EXPORT_SYMBOL_GPL(async_schedule_node);
/**
* async_schedule_dev_nocall - A simplified variant of async_schedule_dev()
* @func: function to execute asynchronously
* @dev: device argument to be passed to function
*
* @dev is used as both the argument for the function and to provide NUMA
* context for where to run the function.
*
* If the asynchronous execution of @func is scheduled successfully, return
* true. Otherwise, do nothing and return false, unlike async_schedule_dev()
* that will run the function synchronously then.
*/
bool async_schedule_dev_nocall(async_func_t func, struct device *dev)
{
struct async_entry *entry;
entry = kzalloc(sizeof(struct async_entry), GFP_KERNEL);
/* Give up if there is no memory or too much work. */
if (!entry || atomic_read(&entry_count) > MAX_WORK) {
kfree(entry);
return false;
}
__async_schedule_node_domain(func, dev, dev_to_node(dev),
&async_dfl_domain, entry);
return true;
}
/**
* async_synchronize_full - synchronize all asynchronous function calls
*

View File

@ -1987,6 +1987,7 @@ static void perf_group_attach(struct perf_event *event)
list_add_tail(&event->sibling_list, &group_leader->sibling_list);
group_leader->nr_siblings++;
group_leader->group_generation++;
perf_event__header_size(group_leader);
@ -2181,6 +2182,7 @@ static void perf_group_detach(struct perf_event *event)
if (leader != event) {
list_del_init(&event->sibling_list);
event->group_leader->nr_siblings--;
event->group_leader->group_generation++;
goto out;
}
@ -5305,7 +5307,7 @@ static int __perf_read_group_add(struct perf_event *leader,
u64 read_format, u64 *values)
{
struct perf_event_context *ctx = leader->ctx;
struct perf_event *sub;
struct perf_event *sub, *parent;
unsigned long flags;
int n = 1; /* skip @nr */
int ret;
@ -5315,6 +5317,33 @@ static int __perf_read_group_add(struct perf_event *leader,
return ret;
raw_spin_lock_irqsave(&ctx->lock, flags);
/*
* Verify the grouping between the parent and child (inherited)
* events is still in tact.
*
* Specifically:
* - leader->ctx->lock pins leader->sibling_list
* - parent->child_mutex pins parent->child_list
* - parent->ctx->mutex pins parent->sibling_list
*
* Because parent->ctx != leader->ctx (and child_list nests inside
* ctx->mutex), group destruction is not atomic between children, also
* see perf_event_release_kernel(). Additionally, parent can grow the
* group.
*
* Therefore it is possible to have parent and child groups in a
* different configuration and summing over such a beast makes no sense
* what so ever.
*
* Reject this.
*/
parent = leader->parent;
if (parent &&
(parent->group_generation != leader->group_generation ||
parent->nr_siblings != leader->nr_siblings)) {
ret = -ECHILD;
goto unlock;
}
/*
* Since we co-schedule groups, {enabled,running} times of siblings
@ -5348,8 +5377,9 @@ static int __perf_read_group_add(struct perf_event *leader,
values[n++] = atomic64_read(&sub->lost_samples);
}
unlock:
raw_spin_unlock_irqrestore(&ctx->lock, flags);
return 0;
return ret;
}
static int perf_read_group(struct perf_event *event,
@ -5368,10 +5398,6 @@ static int perf_read_group(struct perf_event *event,
values[0] = 1 + leader->nr_siblings;
/*
* By locking the child_mutex of the leader we effectively
* lock the child list of all siblings.. XXX explain how.
*/
mutex_lock(&leader->child_mutex);
ret = __perf_read_group_add(leader, read_format, values);
@ -13278,6 +13304,7 @@ static int inherit_group(struct perf_event *parent_event,
!perf_get_aux_event(child_ctr, leader))
return -EINVAL;
}
leader->group_generation = parent_event->group_generation;
return 0;
}

View File

@ -311,4 +311,45 @@ config TASKS_TRACE_RCU_READ_MB
Say N here if you hate read-side memory barriers.
Take the default if you are unsure.
config RCU_LAZY
bool "RCU callback lazy invocation functionality"
depends on RCU_NOCB_CPU
default n
help
To save power, batch RCU callbacks and flush after delay, memory
pressure, or callback list growing too big.
Requires rcu_nocbs=all to be set.
Use rcutree.enable_rcu_lazy=0 to turn it off at boot time.
config RCU_LAZY_DEFAULT_OFF
bool "Turn RCU lazy invocation off by default"
depends on RCU_LAZY
default n
help
Allows building the kernel with CONFIG_RCU_LAZY=y yet keep it default
off. Boot time param rcutree.enable_rcu_lazy=1 can be used to switch
it back on.
config RCU_BOOT_END_DELAY
int "Minimum time before RCU may consider in-kernel boot as completed"
range 0 120000
default 20000
help
Default value of the minimum time in milliseconds from the start of boot
that must elapse before the boot sequence can be marked complete from RCU's
perspective, after which RCU's behavior becomes more relaxed.
Userspace can also mark the boot as completed sooner than this default
by writing the time in milliseconds, say once userspace considers
the system as booted, to: /sys/module/rcupdate/parameters/rcu_boot_end_delay.
Or even just writing a value of 0 to this sysfs node. The sysfs node can
also be used to extend the delay to be larger than the default, assuming
the marking of boot completion has not yet occurred.
The actual delay for RCU's view of the system to be marked as booted can be
higher than this value if the kernel takes a long time to initialize but it
will never be smaller than this value.
Accept the default if unsure.
endmenu # "RCU Subsystem"

View File

@ -443,14 +443,20 @@ do { \
/* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */
static inline bool rcu_gp_is_normal(void) { return true; }
static inline bool rcu_gp_is_expedited(void) { return false; }
static inline bool rcu_async_should_hurry(void) { return false; }
static inline void rcu_expedite_gp(void) { }
static inline void rcu_unexpedite_gp(void) { }
static inline void rcu_async_hurry(void) { }
static inline void rcu_async_relax(void) { }
static inline void rcu_request_urgent_qs_task(struct task_struct *t) { }
#else /* #ifdef CONFIG_TINY_RCU */
bool rcu_gp_is_normal(void); /* Internal RCU use. */
bool rcu_gp_is_expedited(void); /* Internal RCU use. */
bool rcu_async_should_hurry(void); /* Internal RCU use. */
void rcu_expedite_gp(void);
void rcu_unexpedite_gp(void);
void rcu_async_hurry(void);
void rcu_async_relax(void);
void rcupdate_announce_bootup_oddness(void);
#ifdef CONFIG_TASKS_RCU_GENERIC
void show_rcu_tasks_gp_kthreads(void);
@ -474,6 +480,14 @@ enum rcutorture_type {
INVALID_RCU_FLAVOR
};
#if defined(CONFIG_RCU_LAZY)
unsigned long rcu_lazy_get_jiffies_till_flush(void);
void rcu_lazy_set_jiffies_till_flush(unsigned long j);
#else
static inline unsigned long rcu_lazy_get_jiffies_till_flush(void) { return 0; }
static inline void rcu_lazy_set_jiffies_till_flush(unsigned long j) { }
#endif
#if defined(CONFIG_TREE_RCU)
void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
unsigned long *gp_seq);

View File

@ -175,7 +175,7 @@ static struct rcu_scale_ops rcu_ops = {
.get_gp_seq = rcu_get_gp_seq,
.gp_diff = rcu_seq_diff,
.exp_completed = rcu_exp_batches_completed,
.async = call_rcu,
.async = call_rcu_hurry,
.gp_barrier = rcu_barrier,
.sync = synchronize_rcu,
.exp_sync = synchronize_rcu_expedited,

View File

@ -510,7 +510,7 @@ static unsigned long rcu_no_completed(void)
static void rcu_torture_deferred_free(struct rcu_torture *p)
{
call_rcu(&p->rtort_rcu, rcu_torture_cb);
call_rcu_hurry(&p->rtort_rcu, rcu_torture_cb);
}
static void rcu_sync_torture_init(void)
@ -551,7 +551,7 @@ static struct rcu_torture_ops rcu_ops = {
.start_gp_poll_exp_full = start_poll_synchronize_rcu_expedited_full,
.poll_gp_state_exp = poll_state_synchronize_rcu,
.cond_sync_exp = cond_synchronize_rcu_expedited,
.call = call_rcu,
.call = call_rcu_hurry,
.cb_barrier = rcu_barrier,
.fqs = rcu_force_quiescent_state,
.stats = NULL,
@ -848,7 +848,7 @@ static void rcu_tasks_torture_deferred_free(struct rcu_torture *p)
static void synchronize_rcu_mult_test(void)
{
synchronize_rcu_mult(call_rcu_tasks, call_rcu);
synchronize_rcu_mult(call_rcu_tasks, call_rcu_hurry);
}
static struct rcu_torture_ops tasks_ops = {
@ -3388,13 +3388,13 @@ static void rcu_test_debug_objects(void)
/* Try to queue the rh2 pair of callbacks for the same grace period. */
preempt_disable(); /* Prevent preemption from interrupting test. */
rcu_read_lock(); /* Make it impossible to finish a grace period. */
call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */
call_rcu_hurry(&rh1, rcu_torture_leak_cb); /* Start grace period. */
local_irq_disable(); /* Make it harder to start a new grace period. */
call_rcu(&rh2, rcu_torture_leak_cb);
call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
call_rcu_hurry(&rh2, rcu_torture_leak_cb);
call_rcu_hurry(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
if (rhp) {
call_rcu(rhp, rcu_torture_leak_cb);
call_rcu(rhp, rcu_torture_err_cb); /* Another duplicate callback. */
call_rcu_hurry(rhp, rcu_torture_leak_cb);
call_rcu_hurry(rhp, rcu_torture_err_cb); /* Another duplicate callback. */
}
local_irq_enable();
rcu_read_unlock();

View File

@ -44,7 +44,7 @@ static void rcu_sync_func(struct rcu_head *rhp);
static void rcu_sync_call(struct rcu_sync *rsp)
{
call_rcu(&rsp->cb_head, rcu_sync_func);
call_rcu_hurry(&rsp->cb_head, rcu_sync_func);
}
/**

View File

@ -44,7 +44,7 @@ static struct rcu_ctrlblk rcu_ctrlblk = {
void rcu_barrier(void)
{
wait_rcu_gp(call_rcu);
wait_rcu_gp(call_rcu_hurry);
}
EXPORT_SYMBOL(rcu_barrier);

View File

@ -2744,8 +2744,112 @@ static void check_cb_ovld(struct rcu_data *rdp)
raw_spin_unlock_rcu_node(rnp);
}
static void
__call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in)
{
static atomic_t doublefrees;
unsigned long flags;
bool lazy;
struct rcu_data *rdp;
bool was_alldone;
/* Misaligned rcu_head! */
WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
if (debug_rcu_head_queue(head)) {
/*
* Probable double call_rcu(), so leak the callback.
* Use rcu:rcu_callback trace event to find the previous
* time callback was passed to call_rcu().
*/
if (atomic_inc_return(&doublefrees) < 4) {
pr_err("%s(): Double-freed CB %p->%pS()!!! ", __func__, head, head->func);
mem_dump_obj(head);
}
WRITE_ONCE(head->func, rcu_leak_callback);
return;
}
head->func = func;
head->next = NULL;
kasan_record_aux_stack_noalloc(head);
local_irq_save(flags);
rdp = this_cpu_ptr(&rcu_data);
lazy = lazy_in && !rcu_async_should_hurry();
/* Add the callback to our list. */
if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) {
// This can trigger due to call_rcu() from offline CPU:
WARN_ON_ONCE(rcu_scheduler_active != RCU_SCHEDULER_INACTIVE);
WARN_ON_ONCE(!rcu_is_watching());
// Very early boot, before rcu_init(). Initialize if needed
// and then drop through to queue the callback.
if (rcu_segcblist_empty(&rdp->cblist))
rcu_segcblist_init(&rdp->cblist);
}
check_cb_ovld(rdp);
if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags, lazy))
return; // Enqueued onto ->nocb_bypass, so just leave.
// If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock.
rcu_segcblist_enqueue(&rdp->cblist, head);
if (__is_kvfree_rcu_offset((unsigned long)func))
trace_rcu_kvfree_callback(rcu_state.name, head,
(unsigned long)func,
rcu_segcblist_n_cbs(&rdp->cblist));
else
trace_rcu_callback(rcu_state.name, head,
rcu_segcblist_n_cbs(&rdp->cblist));
trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued"));
/* Go handle any RCU core processing required. */
if (unlikely(rcu_rdp_is_offloaded(rdp))) {
__call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */
} else {
__call_rcu_core(rdp, head, flags);
local_irq_restore(flags);
}
}
#ifdef CONFIG_RCU_LAZY
static bool enable_rcu_lazy __read_mostly = !IS_ENABLED(CONFIG_RCU_LAZY_DEFAULT_OFF);
module_param(enable_rcu_lazy, bool, 0444);
/**
* call_rcu_hurry() - Queue RCU callback for invocation after grace period, and
* flush all lazy callbacks (including the new one) to the main ->cblist while
* doing so.
*
* @head: structure to be used for queueing the RCU updates.
* @func: actual callback function to be invoked after the grace period
*
* The callback function will be invoked some time after a full grace
* period elapses, in other words after all pre-existing RCU read-side
* critical sections have completed.
*
* Use this API instead of call_rcu() if you don't want the callback to be
* invoked after very long periods of time, which can happen on systems without
* memory pressure and on systems which are lightly loaded or mostly idle.
* This function will cause callbacks to be invoked sooner than later at the
* expense of extra power. Other than that, this function is identical to, and
* reuses call_rcu()'s logic. Refer to call_rcu() for more details about memory
* ordering and other functionality.
*/
void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func)
{
return __call_rcu_common(head, func, false);
}
EXPORT_SYMBOL_GPL(call_rcu_hurry);
#else
#define enable_rcu_lazy false
#endif
/**
* call_rcu() - Queue an RCU callback for invocation after a grace period.
* By default the callbacks are 'lazy' and are kept hidden from the main
* ->cblist to prevent starting of grace periods too soon.
* If you desire grace periods to start very soon, use call_rcu_hurry().
*
* @head: structure to be used for queueing the RCU updates.
* @func: actual callback function to be invoked after the grace period
*
@ -2786,70 +2890,10 @@ static void check_cb_ovld(struct rcu_data *rdp)
*/
void call_rcu(struct rcu_head *head, rcu_callback_t func)
{
static atomic_t doublefrees;
unsigned long flags;
struct rcu_data *rdp;
bool was_alldone;
/* Misaligned rcu_head! */
WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
if (debug_rcu_head_queue(head)) {
/*
* Probable double call_rcu(), so leak the callback.
* Use rcu:rcu_callback trace event to find the previous
* time callback was passed to call_rcu().
*/
if (atomic_inc_return(&doublefrees) < 4) {
pr_err("%s(): Double-freed CB %p->%pS()!!! ", __func__, head, head->func);
mem_dump_obj(head);
}
WRITE_ONCE(head->func, rcu_leak_callback);
return;
}
head->func = func;
head->next = NULL;
kasan_record_aux_stack_noalloc(head);
local_irq_save(flags);
rdp = this_cpu_ptr(&rcu_data);
/* Add the callback to our list. */
if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) {
// This can trigger due to call_rcu() from offline CPU:
WARN_ON_ONCE(rcu_scheduler_active != RCU_SCHEDULER_INACTIVE);
WARN_ON_ONCE(!rcu_is_watching());
// Very early boot, before rcu_init(). Initialize if needed
// and then drop through to queue the callback.
if (rcu_segcblist_empty(&rdp->cblist))
rcu_segcblist_init(&rdp->cblist);
}
check_cb_ovld(rdp);
if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags))
return; // Enqueued onto ->nocb_bypass, so just leave.
// If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock.
rcu_segcblist_enqueue(&rdp->cblist, head);
if (__is_kvfree_rcu_offset((unsigned long)func))
trace_rcu_kvfree_callback(rcu_state.name, head,
(unsigned long)func,
rcu_segcblist_n_cbs(&rdp->cblist));
else
trace_rcu_callback(rcu_state.name, head,
rcu_segcblist_n_cbs(&rdp->cblist));
trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued"));
/* Go handle any RCU core processing required. */
if (unlikely(rcu_rdp_is_offloaded(rdp))) {
__call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */
} else {
__call_rcu_core(rdp, head, flags);
local_irq_restore(flags);
}
__call_rcu_common(head, func, enable_rcu_lazy);
}
EXPORT_SYMBOL_GPL(call_rcu);
/* Maximum number of jiffies to wait before draining a batch. */
#define KFREE_DRAIN_JIFFIES (5 * HZ)
#define KFREE_N_BATCHES 2
@ -3542,7 +3586,7 @@ void synchronize_rcu(void)
if (rcu_gp_is_expedited())
synchronize_rcu_expedited();
else
wait_rcu_gp(call_rcu);
wait_rcu_gp(call_rcu_hurry);
return;
}
@ -3929,6 +3973,8 @@ static void rcu_barrier_entrain(struct rcu_data *rdp)
{
unsigned long gseq = READ_ONCE(rcu_state.barrier_sequence);
unsigned long lseq = READ_ONCE(rdp->barrier_seq_snap);
bool wake_nocb = false;
bool was_alldone = false;
lockdep_assert_held(&rcu_state.barrier_lock);
if (rcu_seq_state(lseq) || !rcu_seq_state(gseq) || rcu_seq_ctr(lseq) != rcu_seq_ctr(gseq))
@ -3937,7 +3983,14 @@ static void rcu_barrier_entrain(struct rcu_data *rdp)
rdp->barrier_head.func = rcu_barrier_callback;
debug_rcu_head_queue(&rdp->barrier_head);
rcu_nocb_lock(rdp);
WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies));
/*
* Flush bypass and wakeup rcuog if we add callbacks to an empty regular
* queue. This way we don't wait for bypass timer that can reach seconds
* if it's fully lazy.
*/
was_alldone = rcu_rdp_is_offloaded(rdp) && !rcu_segcblist_pend_cbs(&rdp->cblist);
WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies, false));
wake_nocb = was_alldone && rcu_segcblist_pend_cbs(&rdp->cblist);
if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) {
atomic_inc(&rcu_state.barrier_cpu_count);
} else {
@ -3945,6 +3998,8 @@ static void rcu_barrier_entrain(struct rcu_data *rdp)
rcu_barrier_trace(TPS("IRQNQ"), -1, rcu_state.barrier_sequence);
}
rcu_nocb_unlock(rdp);
if (wake_nocb)
wake_nocb_gp(rdp, false);
smp_store_release(&rdp->barrier_seq_snap, gseq);
}
@ -4369,7 +4424,7 @@ void rcutree_migrate_callbacks(int cpu)
my_rdp = this_cpu_ptr(&rcu_data);
my_rnp = my_rdp->mynode;
rcu_nocb_lock(my_rdp); /* irqs already disabled. */
WARN_ON_ONCE(!rcu_nocb_flush_bypass(my_rdp, NULL, jiffies));
WARN_ON_ONCE(!rcu_nocb_flush_bypass(my_rdp, NULL, jiffies, false));
raw_spin_lock_rcu_node(my_rnp); /* irqs already disabled. */
/* Leverage recent GPs and set GP for new callbacks. */
needwake = rcu_advance_cbs(my_rnp, rdp) ||
@ -4408,11 +4463,13 @@ static int rcu_pm_notify(struct notifier_block *self,
switch (action) {
case PM_HIBERNATION_PREPARE:
case PM_SUSPEND_PREPARE:
rcu_async_hurry();
rcu_expedite_gp();
break;
case PM_POST_HIBERNATION:
case PM_POST_SUSPEND:
rcu_unexpedite_gp();
rcu_async_relax();
break;
default:
break;

View File

@ -263,14 +263,16 @@ struct rcu_data {
unsigned long last_fqs_resched; /* Time of last rcu_resched(). */
unsigned long last_sched_clock; /* Jiffies of last rcu_sched_clock_irq(). */
long lazy_len; /* Length of buffered lazy callbacks. */
int cpu;
};
/* Values for nocb_defer_wakeup field in struct rcu_data. */
#define RCU_NOCB_WAKE_NOT 0
#define RCU_NOCB_WAKE_BYPASS 1
#define RCU_NOCB_WAKE 2
#define RCU_NOCB_WAKE_FORCE 3
#define RCU_NOCB_WAKE_LAZY 2
#define RCU_NOCB_WAKE 3
#define RCU_NOCB_WAKE_FORCE 4
#define RCU_JIFFIES_TILL_FORCE_QS (1 + (HZ > 250) + (HZ > 500))
/* For jiffies_till_first_fqs and */
@ -443,10 +445,12 @@ static void zero_cpu_stall_ticks(struct rcu_data *rdp);
static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp);
static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq);
static void rcu_init_one_nocb(struct rcu_node *rnp);
static bool wake_nocb_gp(struct rcu_data *rdp, bool force);
static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
unsigned long j);
unsigned long j, bool lazy);
static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
bool *was_alldone, unsigned long flags);
bool *was_alldone, unsigned long flags,
bool lazy);
static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty,
unsigned long flags);
static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level);

View File

@ -941,7 +941,7 @@ void synchronize_rcu_expedited(void)
/* If expedited grace periods are prohibited, fall back to normal. */
if (rcu_gp_is_normal()) {
wait_rcu_gp(call_rcu);
wait_rcu_gp(call_rcu_hurry);
return;
}

View File

@ -256,6 +256,31 @@ static bool wake_nocb_gp(struct rcu_data *rdp, bool force)
return __wake_nocb_gp(rdp_gp, rdp, force, flags);
}
/*
* LAZY_FLUSH_JIFFIES decides the maximum amount of time that
* can elapse before lazy callbacks are flushed. Lazy callbacks
* could be flushed much earlier for a number of other reasons
* however, LAZY_FLUSH_JIFFIES will ensure no lazy callbacks are
* left unsubmitted to RCU after those many jiffies.
*/
#define LAZY_FLUSH_JIFFIES (10 * HZ)
static unsigned long jiffies_till_flush = LAZY_FLUSH_JIFFIES;
#ifdef CONFIG_RCU_LAZY
// To be called only from test code.
void rcu_lazy_set_jiffies_till_flush(unsigned long jif)
{
jiffies_till_flush = jif;
}
EXPORT_SYMBOL(rcu_lazy_set_jiffies_till_flush);
unsigned long rcu_lazy_get_jiffies_till_flush(void)
{
return jiffies_till_flush;
}
EXPORT_SYMBOL(rcu_lazy_get_jiffies_till_flush);
#endif
/*
* Arrange to wake the GP kthread for this NOCB group at some future
* time when it is safe to do so.
@ -269,10 +294,14 @@ static void wake_nocb_gp_defer(struct rcu_data *rdp, int waketype,
raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
/*
* Bypass wakeup overrides previous deferments. In case
* of callback storm, no need to wake up too early.
* Bypass wakeup overrides previous deferments. In case of
* callback storms, no need to wake up too early.
*/
if (waketype == RCU_NOCB_WAKE_BYPASS) {
if (waketype == RCU_NOCB_WAKE_LAZY &&
rdp->nocb_defer_wakeup == RCU_NOCB_WAKE_NOT) {
mod_timer(&rdp_gp->nocb_timer, jiffies + jiffies_till_flush);
WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype);
} else if (waketype == RCU_NOCB_WAKE_BYPASS) {
mod_timer(&rdp_gp->nocb_timer, jiffies + 2);
WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype);
} else {
@ -293,12 +322,16 @@ static void wake_nocb_gp_defer(struct rcu_data *rdp, int waketype,
* proves to be initially empty, just return false because the no-CB GP
* kthread may need to be awakened in this case.
*
* Return true if there was something to be flushed and it succeeded, otherwise
* false.
*
* Note that this function always returns true if rhp is NULL.
*/
static bool rcu_nocb_do_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
unsigned long j)
static bool rcu_nocb_do_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp_in,
unsigned long j, bool lazy)
{
struct rcu_cblist rcl;
struct rcu_head *rhp = rhp_in;
WARN_ON_ONCE(!rcu_rdp_is_offloaded(rdp));
rcu_lockdep_assert_cblist_protected(rdp);
@ -310,7 +343,20 @@ static bool rcu_nocb_do_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
/* Note: ->cblist.len already accounts for ->nocb_bypass contents. */
if (rhp)
rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */
/*
* If the new CB requested was a lazy one, queue it onto the main
* ->cblist so that we can take advantage of the grace-period that will
* happen regardless. But queue it onto the bypass list first so that
* the lazy CB is ordered with the existing CBs in the bypass list.
*/
if (lazy && rhp) {
rcu_cblist_enqueue(&rdp->nocb_bypass, rhp);
rhp = NULL;
}
rcu_cblist_flush_enqueue(&rcl, &rdp->nocb_bypass, rhp);
WRITE_ONCE(rdp->lazy_len, 0);
rcu_segcblist_insert_pend_cbs(&rdp->cblist, &rcl);
WRITE_ONCE(rdp->nocb_bypass_first, j);
rcu_nocb_bypass_unlock(rdp);
@ -326,13 +372,13 @@ static bool rcu_nocb_do_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
* Note that this function always returns true if rhp is NULL.
*/
static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
unsigned long j)
unsigned long j, bool lazy)
{
if (!rcu_rdp_is_offloaded(rdp))
return true;
rcu_lockdep_assert_cblist_protected(rdp);
rcu_nocb_bypass_lock(rdp);
return rcu_nocb_do_flush_bypass(rdp, rhp, j);
return rcu_nocb_do_flush_bypass(rdp, rhp, j, lazy);
}
/*
@ -345,7 +391,7 @@ static void rcu_nocb_try_flush_bypass(struct rcu_data *rdp, unsigned long j)
if (!rcu_rdp_is_offloaded(rdp) ||
!rcu_nocb_bypass_trylock(rdp))
return;
WARN_ON_ONCE(!rcu_nocb_do_flush_bypass(rdp, NULL, j));
WARN_ON_ONCE(!rcu_nocb_do_flush_bypass(rdp, NULL, j, false));
}
/*
@ -367,12 +413,14 @@ static void rcu_nocb_try_flush_bypass(struct rcu_data *rdp, unsigned long j)
* there is only one CPU in operation.
*/
static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
bool *was_alldone, unsigned long flags)
bool *was_alldone, unsigned long flags,
bool lazy)
{
unsigned long c;
unsigned long cur_gp_seq;
unsigned long j = jiffies;
long ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
bool bypass_is_lazy = (ncbs == READ_ONCE(rdp->lazy_len));
lockdep_assert_irqs_disabled();
@ -417,24 +465,29 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
// If there hasn't yet been all that many ->cblist enqueues
// this jiffy, tell the caller to enqueue onto ->cblist. But flush
// ->nocb_bypass first.
if (rdp->nocb_nobypass_count < nocb_nobypass_lim_per_jiffy) {
// Lazy CBs throttle this back and do immediate bypass queuing.
if (rdp->nocb_nobypass_count < nocb_nobypass_lim_per_jiffy && !lazy) {
rcu_nocb_lock(rdp);
*was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
if (*was_alldone)
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
TPS("FirstQ"));
WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, j));
WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, j, false));
WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
return false; // Caller must enqueue the callback.
}
// If ->nocb_bypass has been used too long or is too full,
// flush ->nocb_bypass to ->cblist.
if ((ncbs && j != READ_ONCE(rdp->nocb_bypass_first)) ||
if ((ncbs && !bypass_is_lazy && j != READ_ONCE(rdp->nocb_bypass_first)) ||
(ncbs && bypass_is_lazy &&
(time_after(j, READ_ONCE(rdp->nocb_bypass_first) + jiffies_till_flush))) ||
ncbs >= qhimark) {
rcu_nocb_lock(rdp);
if (!rcu_nocb_flush_bypass(rdp, rhp, j)) {
*was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
*was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
if (!rcu_nocb_flush_bypass(rdp, rhp, j, lazy)) {
if (*was_alldone)
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
TPS("FirstQ"));
@ -447,7 +500,12 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
rcu_advance_cbs_nowake(rdp->mynode, rdp);
rdp->nocb_gp_adv_time = j;
}
rcu_nocb_unlock_irqrestore(rdp, flags);
// The flush succeeded and we moved CBs into the regular list.
// Don't wait for the wake up timer as it may be too far ahead.
// Wake up the GP thread now instead, if the cblist was empty.
__call_rcu_nocb_wake(rdp, *was_alldone, flags);
return true; // Callback already enqueued.
}
@ -457,13 +515,24 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */
rcu_cblist_enqueue(&rdp->nocb_bypass, rhp);
if (lazy)
WRITE_ONCE(rdp->lazy_len, rdp->lazy_len + 1);
if (!ncbs) {
WRITE_ONCE(rdp->nocb_bypass_first, j);
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("FirstBQ"));
}
rcu_nocb_bypass_unlock(rdp);
smp_mb(); /* Order enqueue before wake. */
if (ncbs) {
// A wake up of the grace period kthread or timer adjustment
// needs to be done only if:
// 1. Bypass list was fully empty before (this is the first
// bypass list entry), or:
// 2. Both of these conditions are met:
// a. The bypass list previously had only lazy CBs, and:
// b. The new CB is non-lazy.
if (ncbs && (!bypass_is_lazy || lazy)) {
local_irq_restore(flags);
} else {
// No-CBs GP kthread might be indefinitely asleep, if so, wake.
@ -491,8 +560,10 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
unsigned long flags)
__releases(rdp->nocb_lock)
{
long bypass_len;
unsigned long cur_gp_seq;
unsigned long j;
long lazy_len;
long len;
struct task_struct *t;
@ -506,9 +577,16 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
}
// Need to actually to a wakeup.
len = rcu_segcblist_n_cbs(&rdp->cblist);
bypass_len = rcu_cblist_n_cbs(&rdp->nocb_bypass);
lazy_len = READ_ONCE(rdp->lazy_len);
if (was_alldone) {
rdp->qlen_last_fqs_check = len;
if (!irqs_disabled_flags(flags)) {
// Only lazy CBs in bypass list
if (lazy_len && bypass_len == lazy_len) {
rcu_nocb_unlock_irqrestore(rdp, flags);
wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_LAZY,
TPS("WakeLazy"));
} else if (!irqs_disabled_flags(flags)) {
/* ... if queue was empty ... */
rcu_nocb_unlock_irqrestore(rdp, flags);
wake_nocb_gp(rdp, false);
@ -599,12 +677,12 @@ static void nocb_gp_sleep(struct rcu_data *my_rdp, int cpu)
static void nocb_gp_wait(struct rcu_data *my_rdp)
{
bool bypass = false;
long bypass_ncbs;
int __maybe_unused cpu = my_rdp->cpu;
unsigned long cur_gp_seq;
unsigned long flags;
bool gotcbs = false;
unsigned long j = jiffies;
bool lazy = false;
bool needwait_gp = false; // This prevents actual uninitialized use.
bool needwake;
bool needwake_gp;
@ -634,24 +712,43 @@ static void nocb_gp_wait(struct rcu_data *my_rdp)
* won't be ignored for long.
*/
list_for_each_entry(rdp, &my_rdp->nocb_head_rdp, nocb_entry_rdp) {
long bypass_ncbs;
bool flush_bypass = false;
long lazy_ncbs;
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Check"));
rcu_nocb_lock_irqsave(rdp, flags);
lockdep_assert_held(&rdp->nocb_lock);
bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
if (bypass_ncbs &&
lazy_ncbs = READ_ONCE(rdp->lazy_len);
if (bypass_ncbs && (lazy_ncbs == bypass_ncbs) &&
(time_after(j, READ_ONCE(rdp->nocb_bypass_first) + jiffies_till_flush) ||
bypass_ncbs > 2 * qhimark)) {
flush_bypass = true;
} else if (bypass_ncbs && (lazy_ncbs != bypass_ncbs) &&
(time_after(j, READ_ONCE(rdp->nocb_bypass_first) + 1) ||
bypass_ncbs > 2 * qhimark)) {
// Bypass full or old, so flush it.
(void)rcu_nocb_try_flush_bypass(rdp, j);
bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
flush_bypass = true;
} else if (!bypass_ncbs && rcu_segcblist_empty(&rdp->cblist)) {
rcu_nocb_unlock_irqrestore(rdp, flags);
continue; /* No callbacks here, try next. */
}
if (flush_bypass) {
// Bypass full or old, so flush it.
(void)rcu_nocb_try_flush_bypass(rdp, j);
bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
lazy_ncbs = READ_ONCE(rdp->lazy_len);
}
if (bypass_ncbs) {
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
TPS("Bypass"));
bypass = true;
bypass_ncbs == lazy_ncbs ? TPS("Lazy") : TPS("Bypass"));
if (bypass_ncbs == lazy_ncbs)
lazy = true;
else
bypass = true;
}
rnp = rdp->mynode;
@ -699,12 +796,20 @@ static void nocb_gp_wait(struct rcu_data *my_rdp)
my_rdp->nocb_gp_gp = needwait_gp;
my_rdp->nocb_gp_seq = needwait_gp ? wait_gp_seq : 0;
if (bypass && !rcu_nocb_poll) {
// At least one child with non-empty ->nocb_bypass, so set
// timer in order to avoid stranding its callbacks.
wake_nocb_gp_defer(my_rdp, RCU_NOCB_WAKE_BYPASS,
TPS("WakeBypassIsDeferred"));
// At least one child with non-empty ->nocb_bypass, so set
// timer in order to avoid stranding its callbacks.
if (!rcu_nocb_poll) {
// If bypass list only has lazy CBs. Add a deferred lazy wake up.
if (lazy && !bypass) {
wake_nocb_gp_defer(my_rdp, RCU_NOCB_WAKE_LAZY,
TPS("WakeLazyIsDeferred"));
// Otherwise add a deferred bypass wake up.
} else if (bypass) {
wake_nocb_gp_defer(my_rdp, RCU_NOCB_WAKE_BYPASS,
TPS("WakeBypassIsDeferred"));
}
}
if (rcu_nocb_poll) {
/* Polling, so trace if first poll in the series. */
if (gotcbs)
@ -1030,7 +1135,7 @@ static long rcu_nocb_rdp_deoffload(void *arg)
* return false, which means that future calls to rcu_nocb_try_bypass()
* will refuse to put anything into the bypass.
*/
WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies));
WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies, false));
/*
* Start with invoking rcu_core() early. This way if the current thread
* happens to preempt an ongoing call to rcu_core() in the middle,
@ -1207,6 +1312,55 @@ int rcu_nocb_cpu_offload(int cpu)
}
EXPORT_SYMBOL_GPL(rcu_nocb_cpu_offload);
static unsigned long
lazy_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
int cpu;
unsigned long count = 0;
/* Snapshot count of all CPUs */
for_each_possible_cpu(cpu) {
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
count += READ_ONCE(rdp->lazy_len);
}
return count ? count : SHRINK_EMPTY;
}
static unsigned long
lazy_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
int cpu;
unsigned long flags;
unsigned long count = 0;
/* Snapshot count of all CPUs */
for_each_possible_cpu(cpu) {
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
int _count = READ_ONCE(rdp->lazy_len);
if (_count == 0)
continue;
rcu_nocb_lock_irqsave(rdp, flags);
WRITE_ONCE(rdp->lazy_len, 0);
rcu_nocb_unlock_irqrestore(rdp, flags);
wake_nocb_gp(rdp, false);
sc->nr_to_scan -= _count;
count += _count;
if (sc->nr_to_scan <= 0)
break;
}
return count ? count : SHRINK_STOP;
}
static struct shrinker lazy_rcu_shrinker = {
.count_objects = lazy_rcu_shrink_count,
.scan_objects = lazy_rcu_shrink_scan,
.batch = 0,
.seeks = DEFAULT_SEEKS,
};
void __init rcu_init_nohz(void)
{
int cpu;
@ -1249,6 +1403,9 @@ void __init rcu_init_nohz(void)
if (offload_all)
cpumask_setall(rcu_nocb_mask);
if (register_shrinker(&lazy_rcu_shrinker, "rcu-lazy"))
pr_err("Failed to register lazy_rcu shrinker!\n");
if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) {
pr_info("\tNote: kernel parameter 'rcu_nocbs=', 'nohz_full', or 'isolcpus=' contains nonexistent CPUs.\n");
cpumask_and(rcu_nocb_mask, cpu_possible_mask,
@ -1284,6 +1441,7 @@ static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
raw_spin_lock_init(&rdp->nocb_gp_lock);
timer_setup(&rdp->nocb_timer, do_nocb_deferred_wakeup_timer, 0);
rcu_cblist_init(&rdp->nocb_bypass);
WRITE_ONCE(rdp->lazy_len, 0);
mutex_init(&rdp->nocb_gp_kthread_mutex);
}
@ -1564,14 +1722,19 @@ static void rcu_init_one_nocb(struct rcu_node *rnp)
{
}
static bool wake_nocb_gp(struct rcu_data *rdp, bool force)
{
return false;
}
static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
unsigned long j)
unsigned long j, bool lazy)
{
return true;
}
static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
bool *was_alldone, unsigned long flags)
bool *was_alldone, unsigned long flags, bool lazy)
{
return false;
}

View File

@ -43,6 +43,7 @@
#include <linux/slab.h>
#include <linux/irq_work.h>
#include <linux/rcupdate_trace.h>
#include <linux/jiffies.h>
#define CREATE_TRACE_POINTS
@ -144,8 +145,45 @@ bool rcu_gp_is_normal(void)
}
EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
static atomic_t rcu_expedited_nesting = ATOMIC_INIT(1);
static atomic_t rcu_async_hurry_nesting = ATOMIC_INIT(1);
/*
* Should call_rcu() callbacks be processed with urgency or are
* they OK being executed with arbitrary delays?
*/
bool rcu_async_should_hurry(void)
{
return !IS_ENABLED(CONFIG_RCU_LAZY) ||
atomic_read(&rcu_async_hurry_nesting);
}
EXPORT_SYMBOL_GPL(rcu_async_should_hurry);
/**
* rcu_async_hurry - Make future async RCU callbacks not lazy.
*
* After a call to this function, future calls to call_rcu()
* will be processed in a timely fashion.
*/
void rcu_async_hurry(void)
{
if (IS_ENABLED(CONFIG_RCU_LAZY))
atomic_inc(&rcu_async_hurry_nesting);
}
EXPORT_SYMBOL_GPL(rcu_async_hurry);
/**
* rcu_async_relax - Make future async RCU callbacks lazy.
*
* After a call to this function, future calls to call_rcu()
* will be processed in a lazy fashion.
*/
void rcu_async_relax(void)
{
if (IS_ENABLED(CONFIG_RCU_LAZY))
atomic_dec(&rcu_async_hurry_nesting);
}
EXPORT_SYMBOL_GPL(rcu_async_relax);
static atomic_t rcu_expedited_nesting = ATOMIC_INIT(1);
/*
* Should normal grace-period primitives be expedited? Intended for
* use within RCU. Note that this function takes the rcu_expedited
@ -187,19 +225,90 @@ void rcu_unexpedite_gp(void)
}
EXPORT_SYMBOL_GPL(rcu_unexpedite_gp);
/*
* Minimum time in milliseconds from the start boot until RCU can consider
* in-kernel boot as completed. This can also be tuned at runtime to end the
* boot earlier, by userspace init code writing the time in milliseconds (even
* 0) to: /sys/module/rcupdate/parameters/rcu_boot_end_delay. The sysfs node
* can also be used to extend the delay to be larger than the default, assuming
* the marking of boot complete has not yet occurred.
*/
static int rcu_boot_end_delay = CONFIG_RCU_BOOT_END_DELAY;
static bool rcu_boot_ended __read_mostly;
static bool rcu_boot_end_called __read_mostly;
static DEFINE_MUTEX(rcu_boot_end_lock);
/*
* Inform RCU of the end of the in-kernel boot sequence.
* Inform RCU of the end of the in-kernel boot sequence. The boot sequence will
* not be marked ended until at least rcu_boot_end_delay milliseconds have passed.
*/
void rcu_end_inkernel_boot(void)
void rcu_end_inkernel_boot(void);
static void rcu_boot_end_work_fn(struct work_struct *work)
{
rcu_end_inkernel_boot();
}
static DECLARE_DELAYED_WORK(rcu_boot_end_work, rcu_boot_end_work_fn);
/* Must be called with rcu_boot_end_lock held. */
static void rcu_end_inkernel_boot_locked(void)
{
rcu_boot_end_called = true;
if (rcu_boot_ended)
return;
if (rcu_boot_end_delay) {
u64 boot_ms = div_u64(ktime_get_boot_fast_ns(), 1000000UL);
if (boot_ms < rcu_boot_end_delay) {
schedule_delayed_work(&rcu_boot_end_work,
msecs_to_jiffies(rcu_boot_end_delay - boot_ms));
return;
}
}
cancel_delayed_work(&rcu_boot_end_work);
rcu_unexpedite_gp();
rcu_async_relax();
if (rcu_normal_after_boot)
WRITE_ONCE(rcu_normal, 1);
rcu_boot_ended = true;
}
void rcu_end_inkernel_boot(void)
{
mutex_lock(&rcu_boot_end_lock);
rcu_end_inkernel_boot_locked();
mutex_unlock(&rcu_boot_end_lock);
}
static int param_set_rcu_boot_end(const char *val, const struct kernel_param *kp)
{
uint end_ms;
int ret = kstrtouint(val, 0, &end_ms);
if (ret)
return ret;
/*
* rcu_end_inkernel_boot() should be called at least once during init
* before we can allow param changes to end the boot.
*/
mutex_lock(&rcu_boot_end_lock);
rcu_boot_end_delay = end_ms;
if (!rcu_boot_ended && rcu_boot_end_called) {
rcu_end_inkernel_boot_locked();
}
mutex_unlock(&rcu_boot_end_lock);
return ret;
}
static const struct kernel_param_ops rcu_boot_end_ops = {
.set = param_set_rcu_boot_end,
.get = param_get_uint,
};
module_param_cb(rcu_boot_end_delay, &rcu_boot_end_ops, &rcu_boot_end_delay, 0644);
/*
* Let rcutorture know when it is OK to turn it up to eleven.
*/

View File

@ -4574,7 +4574,7 @@ static inline int task_fits_cpu(struct task_struct *p, int cpu)
return (util_fits_cpu(util, uclamp_min, uclamp_max, cpu) > 0);
}
static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
inline void update_misfit_status(struct task_struct *p, struct rq *rq)
{
bool need_update = true;
@ -4598,6 +4598,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
*/
rq->misfit_task_load = max_t(unsigned long, task_h_load(p), 1);
}
EXPORT_SYMBOL_GPL(update_misfit_status);
#else /* CONFIG_SMP */
@ -4947,7 +4948,13 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
s64 delta;
bool skip_preempt = false;
ideal_runtime = sched_slice(cfs_rq, curr);
/*
* When many tasks blow up the sched_period; it is possible that
* sched_slice() reports unusually large results (when many tasks are
* very light for example). Therefore impose a maximum.
*/
ideal_runtime = min_t(u64, sched_slice(cfs_rq, curr), sysctl_sched_latency);
delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
trace_android_rvh_check_preempt_tick(current, &ideal_runtime, &skip_preempt,
delta_exec, cfs_rq, curr, sysctl_sched_min_granularity);

View File

@ -1410,27 +1410,19 @@ static int psi_cpu_show(struct seq_file *m, void *v)
return psi_show(m, &psi_system, PSI_CPU);
}
static int psi_open(struct file *file, int (*psi_show)(struct seq_file *, void *))
{
if (file->f_mode & FMODE_WRITE && !capable(CAP_SYS_RESOURCE))
return -EPERM;
return single_open(file, psi_show, NULL);
}
static int psi_io_open(struct inode *inode, struct file *file)
{
return psi_open(file, psi_io_show);
return single_open(file, psi_io_show, NULL);
}
static int psi_memory_open(struct inode *inode, struct file *file)
{
return psi_open(file, psi_memory_show);
return single_open(file, psi_memory_show, NULL);
}
static int psi_cpu_open(struct inode *inode, struct file *file)
{
return psi_open(file, psi_cpu_show);
return single_open(file, psi_cpu_show, NULL);
}
static ssize_t psi_write(struct file *file, const char __user *user_buf,
@ -1544,7 +1536,7 @@ static int psi_irq_show(struct seq_file *m, void *v)
static int psi_irq_open(struct inode *inode, struct file *file)
{
return psi_open(file, psi_irq_show);
return single_open(file, psi_irq_show, NULL);
}
static ssize_t psi_irq_write(struct file *file, const char __user *user_buf,

View File

@ -1781,7 +1781,7 @@ bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork)
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
rwork->wq = wq;
call_rcu(&rwork->rcu, rcu_work_rcufn);
call_rcu_hurry(&rwork->rcu, rcu_work_rcufn);
return true;
}

View File

@ -230,7 +230,8 @@ static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref,
percpu_ref_noop_confirm_switch;
percpu_ref_get(ref); /* put after confirmation */
call_rcu(&ref->data->rcu, percpu_ref_switch_to_atomic_rcu);
call_rcu_hurry(&ref->data->rcu,
percpu_ref_switch_to_atomic_rcu);
}
static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)

View File

@ -5401,7 +5401,7 @@ static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
refcount_set(&memcg->id.ref, 1);
css_get(css);
if (unlikely(mem_cgroup_is_root(memcg)))
if (unlikely(mem_cgroup_is_root(memcg)) && !mem_cgroup_disabled())
queue_delayed_work(system_unbound_wq, &stats_flush_dwork,
2UL*HZ);
lru_gen_online_memcg(memcg);

View File

@ -44,6 +44,7 @@
#include <linux/kthread.h>
#include <linux/init.h>
#include <linux/mmu_notifier.h>
#include <linux/cred.h>
#include <asm/tlb.h>
#include "internal.h"
@ -771,6 +772,8 @@ static void __mark_oom_victim(struct task_struct *tsk)
*/
static void mark_oom_victim(struct task_struct *tsk)
{
const struct cred *cred;
WARN_ON(oom_killer_disabled);
/* OOM killer might race with memcg OOM */
if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE))
@ -787,7 +790,9 @@ static void mark_oom_victim(struct task_struct *tsk)
*/
__thaw_task(tsk);
atomic_inc(&oom_victims);
trace_mark_victim(tsk->pid);
cred = get_task_cred(tsk);
trace_mark_victim(tsk, cred->uid.val);
put_cred(cred);
}
/**

View File

@ -174,7 +174,7 @@ void dst_release(struct dst_entry *dst)
net_warn_ratelimited("%s: dst:%p refcnt:%d\n",
__func__, dst, newrefcnt);
if (!newrefcnt)
call_rcu(&dst->rcu_head, dst_destroy_rcu);
call_rcu_hurry(&dst->rcu_head, dst_destroy_rcu);
}
}
EXPORT_SYMBOL(dst_release);

View File

@ -255,7 +255,7 @@ static int bpf_test_partial(const char *mount_dir)
TEST(src_fd = open(ft_src, O_DIRECTORY | O_RDONLY | O_CLOEXEC),
src_fd != -1);
TESTEQUAL(create_file(src_fd, s(test_name), 1, 2), 0);
TESTEQUAL(install_elf_bpf("test_bpf.bpf", "test_trace",
TESTEQUAL(install_elf_bpf("test_bpf.bpf", "test_partial",
&bpf_fd, NULL, NULL), 0);
TESTEQUAL(mount_fuse(mount_dir, bpf_fd, src_fd, &fuse_dev), 0);
@ -363,7 +363,7 @@ static int bpf_test_readdir(const char *mount_dir)
src_fd != -1);
TESTEQUAL(create_file(src_fd, s(names[0]), 1, 2), 0);
TESTEQUAL(create_file(src_fd, s(names[1]), 1, 2), 0);
TESTEQUAL(install_elf_bpf("test_bpf.bpf", "test_trace",
TESTEQUAL(install_elf_bpf("test_bpf.bpf", "test_partial",
&bpf_fd, NULL, NULL), 0);
TESTEQUAL(mount_fuse(mount_dir, bpf_fd, src_fd, &fuse_dev), 0);
@ -1490,6 +1490,8 @@ static int bpf_test_statfs(const char *mount_dir)
static int bpf_test_lseek(const char *mount_dir)
{
const char *file = "real";
const char *sparse_file = "sparse";
const off_t sparse_length = 0x100000000u;
const char *test_data = "data";
int result = TEST_FAILURE;
int src_fd = -1;
@ -1504,6 +1506,12 @@ static int bpf_test_lseek(const char *mount_dir)
TESTEQUAL(write(fd, test_data, strlen(test_data)), strlen(test_data));
TESTSYSCALL(close(fd));
fd = -1;
TEST(fd = openat(src_fd, sparse_file, O_CREAT | O_RDWR | O_CLOEXEC,
0777),
fd != -1);
TESTSYSCALL(ftruncate(fd, sparse_length));
TESTSYSCALL(close(fd));
fd = -1;
TESTEQUAL(install_elf_bpf("test_bpf.bpf", "test_trace",
&bpf_fd, NULL, NULL), 0);
TESTEQUAL(mount_fuse(mount_dir, bpf_fd, src_fd, &fuse_dev), 0);
@ -1518,6 +1526,18 @@ static int bpf_test_lseek(const char *mount_dir)
TESTEQUAL(bpf_test_trace("lseek"), 0);
TESTEQUAL(lseek(fd, 1, SEEK_DATA), 1);
TESTEQUAL(bpf_test_trace("lseek"), 0);
TESTSYSCALL(close(fd));
fd = -1;
TEST(fd = s_open(s_path(s(mount_dir), s(sparse_file)),
O_RDONLY | O_CLOEXEC),
fd != -1);
TESTEQUAL(lseek(fd, -256, SEEK_END), sparse_length - 256);
TESTEQUAL(lseek(fd, 0, SEEK_CUR), sparse_length - 256);
TESTSYSCALL(close(fd));
fd = -1;
result = TEST_SUCCESS;
out:
close(fd);

View File

@ -28,9 +28,9 @@ int readdir_test(struct fuse_bpf_args *fa)
}
}
SEC("test_trace")
SEC("test_partial")
/* return FUSE_BPF_BACKING to use backing fs, 0 to pass to usermode */
int trace_test(struct fuse_bpf_args *fa)
int partial_test(struct fuse_bpf_args *fa)
{
switch (fa->opcode) {
case FUSE_LOOKUP | FUSE_PREFILTER: {
@ -329,6 +329,195 @@ int trace_test(struct fuse_bpf_args *fa)
}
}
SEC("test_trace")
/* return FUSE_BPF_BACKING to use backing fs, 0 to pass to usermode */
int trace_test(struct fuse_bpf_args *fa)
{
switch (fa->opcode) {
case FUSE_LOOKUP | FUSE_PREFILTER: {
/* real and partial use backing file */
const char *name = fa->in_args[0].value;
bpf_printk("lookup %s", name);
return FUSE_BPF_BACKING;
}
case FUSE_ACCESS | FUSE_PREFILTER: {
bpf_printk("Access: %d", fa->nodeid);
return FUSE_BPF_BACKING;
}
case FUSE_CREATE | FUSE_PREFILTER:
bpf_printk("Create: %d", fa->nodeid);
return FUSE_BPF_BACKING;
case FUSE_MKNOD | FUSE_PREFILTER: {
const struct fuse_mknod_in *fmi = fa->in_args[0].value;
const char *name = fa->in_args[1].value;
bpf_printk("mknod %s %x %x", name, fmi->rdev | fmi->mode, fmi->umask);
return FUSE_BPF_BACKING;
}
case FUSE_MKDIR | FUSE_PREFILTER: {
const struct fuse_mkdir_in *fmi = fa->in_args[0].value;
const char *name = fa->in_args[1].value;
bpf_printk("mkdir %s %x %x", name, fmi->mode, fmi->umask);
return FUSE_BPF_BACKING;
}
case FUSE_RMDIR | FUSE_PREFILTER: {
const char *name = fa->in_args[0].value;
bpf_printk("rmdir %s", name);
return FUSE_BPF_BACKING;
}
case FUSE_RENAME | FUSE_PREFILTER: {
const char *oldname = fa->in_args[1].value;
const char *newname = fa->in_args[2].value;
bpf_printk("rename from %s", oldname);
bpf_printk("rename to %s", newname);
return FUSE_BPF_BACKING;
}
case FUSE_RENAME2 | FUSE_PREFILTER: {
const struct fuse_rename2_in *fri = fa->in_args[0].value;
uint32_t flags = fri->flags;
const char *oldname = fa->in_args[1].value;
const char *newname = fa->in_args[2].value;
bpf_printk("rename(%x) from %s", flags, oldname);
bpf_printk("rename to %s", newname);
return FUSE_BPF_BACKING;
}
case FUSE_UNLINK | FUSE_PREFILTER: {
const char *name = fa->in_args[0].value;
bpf_printk("unlink %s", name);
return FUSE_BPF_BACKING;
}
case FUSE_LINK | FUSE_PREFILTER: {
const struct fuse_link_in *fli = fa->in_args[0].value;
const char *link_name = fa->in_args[1].value;
bpf_printk("link %d %s", fli->oldnodeid, link_name);
return FUSE_BPF_BACKING;
}
case FUSE_SYMLINK | FUSE_PREFILTER: {
const char *link_name = fa->in_args[0].value;
const char *link_dest = fa->in_args[1].value;
bpf_printk("symlink from %s", link_name);
bpf_printk("symlink to %s", link_dest);
return FUSE_BPF_BACKING;
}
case FUSE_READLINK | FUSE_PREFILTER: {
const char *link_name = fa->in_args[0].value;
bpf_printk("readlink from", link_name);
return FUSE_BPF_BACKING;
}
case FUSE_OPEN | FUSE_PREFILTER: {
bpf_printk("open");
return FUSE_BPF_BACKING;
}
case FUSE_OPEN | FUSE_POSTFILTER:
bpf_printk("open postfilter");
return FUSE_BPF_USER_FILTER;
case FUSE_READ | FUSE_PREFILTER: {
const struct fuse_read_in *fri = fa->in_args[0].value;
bpf_printk("read %llu", fri->offset);
return FUSE_BPF_BACKING;
}
case FUSE_GETATTR | FUSE_PREFILTER: {
bpf_printk("getattr");
return FUSE_BPF_BACKING;
}
case FUSE_SETATTR | FUSE_PREFILTER: {
bpf_printk("setattr");
return FUSE_BPF_BACKING;
}
case FUSE_OPENDIR | FUSE_PREFILTER: {
bpf_printk("opendir");
return FUSE_BPF_BACKING;
}
case FUSE_READDIR | FUSE_PREFILTER: {
bpf_printk("readdir");
return FUSE_BPF_BACKING;
}
case FUSE_FLUSH | FUSE_PREFILTER: {
bpf_printk("Flush");
return FUSE_BPF_BACKING;
}
case FUSE_GETXATTR | FUSE_PREFILTER: {
const char *name = fa->in_args[1].value;
bpf_printk("getxattr %s", name);
return FUSE_BPF_BACKING;
}
case FUSE_LISTXATTR | FUSE_PREFILTER: {
const char *name = fa->in_args[1].value;
bpf_printk("listxattr %s", name);
return FUSE_BPF_BACKING;
}
case FUSE_SETXATTR | FUSE_PREFILTER: {
const char *name = fa->in_args[1].value;
unsigned int size = fa->in_args[2].size;
bpf_printk("setxattr %s %u", name, size);
return FUSE_BPF_BACKING;
}
case FUSE_REMOVEXATTR | FUSE_PREFILTER: {
const char *name = fa->in_args[0].value;
bpf_printk("removexattr %s", name);
return FUSE_BPF_BACKING;
}
case FUSE_CANONICAL_PATH | FUSE_PREFILTER: {
bpf_printk("canonical_path");
return FUSE_BPF_BACKING;
}
case FUSE_STATFS | FUSE_PREFILTER: {
bpf_printk("statfs");
return FUSE_BPF_BACKING;
}
case FUSE_LSEEK | FUSE_PREFILTER: {
const struct fuse_lseek_in *fli = fa->in_args[0].value;
bpf_printk("lseek type:%d, offset:%lld", fli->whence, fli->offset);
return FUSE_BPF_BACKING;
}
default:
bpf_printk("Unknown opcode %d", fa->opcode);
return FUSE_BPF_BACKING;
}
}
SEC("test_hidden")
int trace_hidden(struct fuse_bpf_args *fa)
{