Merge branch 'android12-5.10' into android12-5.10-lts

Sync up with android12-5.10 for the following commits:

c82b315c4b ANDROID: GKI: rockchip: Enable symbols for phy
c10e63b794 ANDROID: GKI: rockchip: Enable symbols for rockchip-opp
54a33334de ANDROID: GKI: rockchip: Enable symbols for sdhci-arasan
6276cc3982 ANDROID: GKI: rockchip: Enable symbols for devfreq
a195017de8 ANDROID: GKI: rockchip: Enable symbols for gpio-rockchip
068f20142a ANDROID: GKI: rockchip: Enable symbol for pm-domain
6cef9b2952 ANDROID: GKI: rockchip: Enable symbols for common clk
fd41ec3b94 ANDROID: GKI: rockchip: Enable symbols for rtc-rk808
7827c05b00 ANDROID: GKI: rockchip: Enable symbol for panel-simple
229f9c2faf ANDROID: GKI: rockchip: Enable symbol for ov5695
94f76a7779 ANDROID: GKI: rockchip: Enable symbol for nvme
4275c37d4e ANDROID: GKI: rockchip: Enable symbols for iio
be153f6250 ANDROID: GKI: rockchip: Enable symbol for cw2015_battery
a90c09dd41 ANDROID: GKI: rockchip: Enable symbols for mmc driver
f1e23eee85 ANDROID: GKI: rockchip: Enable symbols for cpufreq governor
5f020167d0 ANDROID: GKI: rockchip: Enable symbols for scsi ch
64665afcb3 ANDROID: GKI: rockchip: Enable symbols for adc-keys
b37b3c9eaa ANDROID: GKI: rockchip: Enable symbol for act8865
21034d71fc ANDROID: GKI: rockchip: Enable symbols for pwm_bl
576c7a6297 ANDROID: GKI: rockchip: Enable symbols for phy
fc1e452fa3 ANDROID: GKI: rockchip: Enable symbols for LED hardbeat
f4b3d35dfa FROMLIST: scsi: ufs: Fix task management completion
7b6860d2a4 ANDROID: scsi: ufs: Rename struct ufs_hba_with_hpb into ufs_hba_add_info
5adc3c4124 ANDROID: Update the ABI representation
e774e4eca6 ANDROID: scsi: ufs: add complete init vendor hook
7050ead570 ANDROID: qcom: Add qdisc related symbols
d788d16fed FROMGIT: scs: Release kasan vmalloc poison in scs_free process
2659f14d93 UPSTREAM: arm64: Kconfig: select KASAN_VMALLOC if KANSAN_GENERIC is enabled
23232f84c8 UPSTREAM: arm64: kaslr: support randomized module area with KASAN_VMALLOC
ef61240f62 UPSTREAM: arm64: Kconfig: support CONFIG_KASAN_VMALLOC
d0f4b61ae6 UPSTREAM: arm64: kasan: abstract _text and _end to KERNEL_START/END
4d91b1f6ee UPSTREAM: arm64: kasan: don't populate vmalloc area for CONFIG_KASAN_VMALLOC
935b5c3bdd ANDROID: GKI: rockchip: Enable symbols for rk808-regulator
9a8a15b8bd ANDROID: GKI: rockchip: Enable symbols for hid
55b0b34791 FROMGIT: arm64: kasan: mte: move GCR_EL1 switch to task switch when KASAN disabled
6c6d1d7e42 UPSTREAM: arm64: add MTE supported check to thread switching and syscall entry/exit
f746714fe4 UPSTREAM: arm64: kasan: mte: use a constant kernel GCR_EL1 value
eb02ea0e35 ANDROID: GKI: fix mode of android/abi_gki_aarch64.xml file
62ad82b86b ANDROID: ABI: update allowed list for galaxy
f1a5448fa7 ANDROID: GKI: rockchip: Convert symbol to order by module
18d90d0300 ANDROID: GKI: Add a symbol to symbol list
cb7a5d58a9 FROMLIST: arm64: mm: update max_pfn after memory hotplug
477cd8fd78 ANDROID: GKI: Update symbols to symbol list
dcd77f0b74 UPSTREAM: erofs: fix 1 lcluster-sized pcluster for big pcluster
e085d3f0d0 UPSTREAM: erofs: enable big pcluster feature
ed0607cc52 UPSTREAM: erofs: support decompress big pcluster for lz4 backend
d34cb6cdc0 UPSTREAM: erofs: support parsing big pcluster compact indexes
051d76b899 UPSTREAM: erofs: support parsing big pcluster compress indexes
d149931601 UPSTREAM: erofs: adjust per-CPU buffers according to max_pclusterblks
95a1d5df84 UPSTREAM: erofs: add big physical cluster definition
8043aaed1d UPSTREAM: erofs: fix up inplace I/O pointer for big pcluster
6ad2f8f169 UPSTREAM: erofs: introduce physical cluster slab pools
432f58b100 UPSTREAM: erofs: introduce multipage per-CPU buffers
571c9a0bd3 UPSTREAM: erofs: remove a void EROFS_VERSION macro set in Makefile
431d73396d UPSTREAM: erofs: reserve physical_clusterbits[]
89dbc6246a UPSTREAM: erofs: Clean up spelling mistakes found in fs/erofs
ac1f14e9d5 UPSTREAM: erofs: add on-disk compression configurations
cd21e62366 UPSTREAM: erofs: introduce on-disk lz4 fs configurations
e17fd2ac9d UPSTREAM: erofs: introduce erofs_sb_has_xxx() helpers
ba1a3d1fb2 UPSTREAM: erofs: don't use erofs_map_blocks() any more
384b2cdaf8 UPSTREAM: erofs: complete a missing case for inplace I/O
a9ac6ae90e BACKPORT: UPSTREAM: mm: fs: invalidate bh_lrus for only cold path
49af2e35d5 FROMLIST: dma-buf: support users to change dma_buf.name
3b1f439841 ANDROID: GKI: Update symbol list for vivo
c82dbcbec1 BACKPORT: ASoC: soc-pcm: Get all BEs along DAPM path
de7ca5e752 ANDROID: Disable CFI on trace hooks
38532a9f24 ANDROID: GKI: Update symbol list for new modules
099e8c7741 ANDROID: Update symbol list for mtk
cfc0a49c73 ANDROID: fs/fuse: Keep FUSE file times consistent with lower file
cdbeb135e5 ANDROID: GKI: Update symbols to symbol list
66e24eb093 Revert "ANDROID: mm: page_pinner: use EXPORT_SYMBOL_GPL"
5f10883630 Revert "FROMLIST: USB: gadget: f_fs: add SuperSpeed Plus support"
4e6242598d UPSTREAM: drm/dp_mst: Fix return code on sideband message failure
bb13ff0598 BACKPORT: FROMGIT: usb: dwc3: gadget: Avoid starting DWC3 gadget during UDC unbind
0671bafa24 UPSTREAM: tracing: Fix NULL pointer dereference in start_creating
aae44f81e3 ANDROID: enable MTK RNDIS
f278b215d4 ANDROID: abi_gki_aarch64_qcom: Add 2 new symbols for gsi
e21fe3ef80 ANDROID: Update the ABI representation
b74189ec8b ANDROID: GKI: Update abi_gki_aarch64_qcom for rtc_tm_to_ktime and rtc_ktime_to_tm
4652709913 ANDROID: fuse: Allocate zeroed memory for canonical path
96db9b84a6 FROMGIT: f2fs: should use GFP_NOFS for directory inodes
96beb15eb2 UPSTREAM: mm, slub: move slub_debug static key enabling outside slab_mutex
9f821f9789 UPSTREAM: mm, slub: enable slub_debug static key when creating cache with explicit debug flags
bcbaadf442 UPSTREAM: PM: sleep: core: Avoid setting power.must_resume to false
ba98a3a1bb BACKPORT: FROMGIT: usb: gadget: f_uac2: Populate SS descriptors' wBytesPerInterval
d9e738916e BACKPORT: FROMGIT: usb: gadget: f_uac2: Add missing companion descriptor for feedback EP
6773d5cd77 Revert "FROMLIST: usb: gadget: f_uac2: Add missing companion descriptor for feedback EP"
e6e66cb3dd ANDROID: Update the ABI representation
5a4ed990f2 FROMGIT: binder: make sure fd closes complete
b55536ba69 ANDROID: abi_gki_aarch64_qcom: Add vsock functions
4d9d866fe5 ANDROID: mm: unlock the page on speculative fault retry
fd2214199a FROMGIT: binder: fix freeze race
fca745e32d FROMLIST: dm-verity: skip verity_handle_error on I/O errors
640610dbc4 UPSTREAM: thermal: cpufreq_cooling: Update also offline CPUs per-cpu thermal_pressure

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I2534965b25926060feaac05e1ae2821e6a12cc45
This commit is contained in:
Greg Kroah-Hartman 2021-10-06 08:09:24 +02:00
commit 7d8687d4ef
68 changed files with 3666 additions and 1635 deletions

View File

@ -94,8 +94,11 @@ __ethtool_get_link_ksettings
__fdget __fdget
__flush_icache_range __flush_icache_range
__free_pages __free_pages
__genphy_config_aneg
__get_free_pages __get_free_pages
__get_task_comm __get_task_comm
__hid_register_driver
__hid_request
__hrtimer_get_remaining __hrtimer_get_remaining
__hvc_resize __hvc_resize
__hwspin_lock_timeout __hwspin_lock_timeout
@ -343,6 +346,8 @@ __traceiter_gpu_mem_total
__traceiter_sched_util_est_se_tp __traceiter_sched_util_est_se_tp
__traceiter_xdp_exception __traceiter_xdp_exception
__tracepoint_android_rvh_account_irq __tracepoint_android_rvh_account_irq
__tracepoint_android_rvh_arm64_serror_panic
__tracepoint_android_rvh_bad_mode
__tracepoint_android_rvh_build_perf_domains __tracepoint_android_rvh_build_perf_domains
__tracepoint_android_rvh_can_migrate_task __tracepoint_android_rvh_can_migrate_task
__tracepoint_android_rvh_check_preempt_wakeup __tracepoint_android_rvh_check_preempt_wakeup
@ -352,9 +357,12 @@ __tracepoint_android_rvh_cpu_cgroup_online
__tracepoint_android_rvh_cpu_overutilized __tracepoint_android_rvh_cpu_overutilized
__tracepoint_android_rvh_cpufreq_transition __tracepoint_android_rvh_cpufreq_transition
__tracepoint_android_rvh_dequeue_task __tracepoint_android_rvh_dequeue_task
__tracepoint_android_rvh_dequeue_task_idle
__tracepoint_android_rvh_die_kernel_fault __tracepoint_android_rvh_die_kernel_fault
__tracepoint_android_rvh_do_mem_abort __tracepoint_android_rvh_do_mem_abort
__tracepoint_android_rvh_do_sea
__tracepoint_android_rvh_do_sp_pc_abort __tracepoint_android_rvh_do_sp_pc_abort
__tracepoint_android_rvh_do_undefinstr
__tracepoint_android_rvh_enqueue_task __tracepoint_android_rvh_enqueue_task
__tracepoint_android_rvh_find_busiest_queue __tracepoint_android_rvh_find_busiest_queue
__tracepoint_android_rvh_find_energy_efficient_cpu __tracepoint_android_rvh_find_energy_efficient_cpu
@ -408,6 +416,7 @@ __tracepoint_android_vh_binder_restore_priority
__tracepoint_android_vh_binder_set_priority __tracepoint_android_vh_binder_set_priority
__tracepoint_android_vh_binder_transaction_init __tracepoint_android_vh_binder_transaction_init
__tracepoint_android_vh_binder_wakeup_ilocked __tracepoint_android_vh_binder_wakeup_ilocked
__tracepoint_android_vh_cgroup_attach
__tracepoint_android_vh_cma_alloc_finish __tracepoint_android_vh_cma_alloc_finish
__tracepoint_android_vh_cma_alloc_start __tracepoint_android_vh_cma_alloc_start
__tracepoint_android_vh_cpu_idle_enter __tracepoint_android_vh_cpu_idle_enter
@ -429,10 +438,19 @@ __tracepoint_android_vh_kfree_skb
__tracepoint_android_vh_logbuf __tracepoint_android_vh_logbuf
__tracepoint_android_vh_logbuf_pr_cont __tracepoint_android_vh_logbuf_pr_cont
__tracepoint_android_vh_meminfo_proc_show __tracepoint_android_vh_meminfo_proc_show
__tracepoint_android_vh_mutex_wait_finish
__tracepoint_android_vh_mutex_wait_start
__tracepoint_android_vh_pagecache_get_page __tracepoint_android_vh_pagecache_get_page
__tracepoint_android_vh_printk_hotplug __tracepoint_android_vh_printk_hotplug
__tracepoint_android_vh_ptype_head __tracepoint_android_vh_ptype_head
__tracepoint_android_vh_rmqueue __tracepoint_android_vh_rmqueue
__tracepoint_android_vh_rtmutex_wait_finish
__tracepoint_android_vh_rtmutex_wait_start
__tracepoint_android_vh_rwsem_read_wait_finish
__tracepoint_android_vh_rwsem_read_wait_start
__tracepoint_android_vh_rwsem_write_wait_finish
__tracepoint_android_vh_rwsem_write_wait_start
__tracepoint_android_vh_sched_show_task
__tracepoint_android_vh_scheduler_tick __tracepoint_android_vh_scheduler_tick
__tracepoint_android_vh_show_max_freq __tracepoint_android_vh_show_max_freq
__tracepoint_android_vh_show_mem __tracepoint_android_vh_show_mem
@ -440,6 +458,8 @@ __tracepoint_android_vh_show_resume_epoch_val
__tracepoint_android_vh_show_suspend_epoch_val __tracepoint_android_vh_show_suspend_epoch_val
__tracepoint_android_vh_timer_calc_index __tracepoint_android_vh_timer_calc_index
__tracepoint_android_vh_timerfd_create __tracepoint_android_vh_timerfd_create
__tracepoint_android_vh_try_to_freeze_todo
__tracepoint_android_vh_try_to_freeze_todo_unfrozen
__tracepoint_android_vh_typec_store_partner_src_caps __tracepoint_android_vh_typec_store_partner_src_caps
__tracepoint_android_vh_typec_tcpci_override_toggling __tracepoint_android_vh_typec_tcpci_override_toggling
__tracepoint_android_vh_typec_tcpm_adj_current_limit __tracepoint_android_vh_typec_tcpm_adj_current_limit
@ -453,6 +473,8 @@ __tracepoint_android_vh_ufs_send_tm_command
__tracepoint_android_vh_ufs_send_uic_command __tracepoint_android_vh_ufs_send_uic_command
__tracepoint_android_vh_ufs_update_sdev __tracepoint_android_vh_ufs_update_sdev
__tracepoint_android_vh_ufs_update_sysfs __tracepoint_android_vh_ufs_update_sysfs
__tracepoint_android_vh_watchdog_timer_softlockup
__tracepoint_android_vh_wq_lockup_pool
__tracepoint_binder_transaction_received __tracepoint_binder_transaction_received
__tracepoint_clock_set_rate __tracepoint_clock_set_rate
__tracepoint_cpu_frequency __tracepoint_cpu_frequency
@ -489,12 +511,14 @@ __tracepoint_suspend_resume
__tracepoint_workqueue_execute_end __tracepoint_workqueue_execute_end
__tracepoint_workqueue_execute_start __tracepoint_workqueue_execute_start
__tracepoint_xdp_exception __tracepoint_xdp_exception
__tty_alloc_driver
__tty_insert_flip_char __tty_insert_flip_char
__udelay __udelay
__uio_register_device __uio_register_device
__unregister_chrdev __unregister_chrdev
__update_load_avg_blocked_se __update_load_avg_blocked_se
__usb_create_hcd __usb_create_hcd
__usb_get_extra_descriptor
__usecs_to_jiffies __usecs_to_jiffies
__v4l2_device_register_subdev_nodes __v4l2_device_register_subdev_nodes
__video_register_device __video_register_device
@ -774,6 +798,14 @@ cancel_delayed_work
cancel_delayed_work_sync cancel_delayed_work_sync
cancel_work_sync cancel_work_sync
capable capable
cdc_ncm_bind_common
cdc_ncm_change_mtu
cdc_ncm_fill_tx_frame
cdc_ncm_rx_verify_ndp16
cdc_ncm_rx_verify_nth16
cdc_ncm_select_altsetting
cdc_ncm_unbind
cdc_parse_cdc_header
cdev_add cdev_add
cdev_alloc cdev_alloc
cdev_del cdev_del
@ -946,6 +978,7 @@ cpumask_next_wrap
cpupri_find_fitness cpupri_find_fitness
cpus_read_lock cpus_read_lock
cpus_read_unlock cpus_read_unlock
crc16
crc32_le crc32_le
crc8 crc8
crc8_populate_msb crc8_populate_msb
@ -1051,6 +1084,7 @@ dev_pm_opp_adjust_voltage
dev_pm_opp_disable dev_pm_opp_disable
dev_pm_opp_enable dev_pm_opp_enable
dev_pm_opp_find_freq_ceil dev_pm_opp_find_freq_ceil
dev_pm_opp_find_freq_ceil_by_volt
dev_pm_opp_find_freq_exact dev_pm_opp_find_freq_exact
dev_pm_opp_find_freq_floor dev_pm_opp_find_freq_floor
dev_pm_opp_free_cpufreq_table dev_pm_opp_free_cpufreq_table
@ -1389,6 +1423,7 @@ down_write
downgrade_write downgrade_write
dput dput
drain_workqueue drain_workqueue
driver_attach
driver_create_file driver_create_file
driver_find_device driver_find_device
driver_register driver_register
@ -1691,6 +1726,7 @@ drm_mode_object_put
drm_mode_probed_add drm_mode_probed_add
drm_mode_set_crtcinfo drm_mode_set_crtcinfo
drm_mode_set_name drm_mode_set_name
drm_mode_sort
drm_mode_vrefresh drm_mode_vrefresh
drm_modeset_acquire_fini drm_modeset_acquire_fini
drm_modeset_acquire_init drm_modeset_acquire_init
@ -1919,7 +1955,10 @@ genl_notify
genl_register_family genl_register_family
genl_unregister_family genl_unregister_family
genlmsg_put genlmsg_put
genphy_read_status
genphy_resume genphy_resume
genphy_soft_reset
genphy_suspend
get_cpu_device get_cpu_device
get_cpu_idle_time get_cpu_idle_time
get_cpu_idle_time_us get_cpu_idle_time_us
@ -2028,6 +2067,13 @@ hdmi_infoframe_pack
hex2bin hex2bin
hex_dump_to_buffer hex_dump_to_buffer
hex_to_bin hex_to_bin
hid_hw_close
hid_hw_open
hid_hw_start
hid_hw_stop
hid_open_report
hid_report_raw_event
hid_unregister_driver
hmm_range_fault hmm_range_fault
hrtimer_active hrtimer_active
hrtimer_cancel hrtimer_cancel
@ -2135,6 +2181,7 @@ iio_read_channel_processed
iio_read_channel_raw iio_read_channel_raw
import_iovec import_iovec
in4_pton in4_pton
in6_dev_finish_destroy
in6_pton in6_pton
in_aton in_aton
in_egroup_p in_egroup_p
@ -2156,6 +2203,7 @@ input_allocate_device
input_close_device input_close_device
input_event input_event
input_ff_create input_ff_create
input_ff_create_memless
input_ff_destroy input_ff_destroy
input_free_device input_free_device
input_mt_assign_slots input_mt_assign_slots
@ -2315,6 +2363,7 @@ kernel_sendmsg
kernel_sigaction kernel_sigaction
kernfs_find_and_get_ns kernfs_find_and_get_ns
kernfs_notify kernfs_notify
kernfs_path_from_node
kernfs_put kernfs_put
kfree kfree
kfree_const kfree_const
@ -2353,6 +2402,7 @@ kobject_uevent
kobject_uevent_env kobject_uevent_env
krealloc krealloc
kset_create_and_add kset_create_and_add
kset_unregister
ksize ksize
ksoftirqd ksoftirqd
kstat kstat
@ -2448,7 +2498,9 @@ mbox_request_channel
mbox_send_message mbox_send_message
mdiobus_alloc_size mdiobus_alloc_size
mdiobus_free mdiobus_free
mdiobus_read
mdiobus_unregister mdiobus_unregister
mdiobus_write
media_device_cleanup media_device_cleanup
media_device_init media_device_init
media_device_unregister media_device_unregister
@ -2930,16 +2982,24 @@ perf_pmu_unregister
perf_trace_buf_alloc perf_trace_buf_alloc
perf_trace_run_bpf_submit perf_trace_run_bpf_submit
pfn_valid pfn_valid
phy_attached_info
phy_calibrate phy_calibrate
phy_configure phy_configure
phy_connect phy_connect
phy_connect_direct
phy_disconnect phy_disconnect
phy_do_ioctl_running phy_do_ioctl_running
phy_drivers_register
phy_drivers_unregister
phy_ethtool_get_link_ksettings phy_ethtool_get_link_ksettings
phy_ethtool_nway_reset phy_ethtool_nway_reset
phy_ethtool_set_link_ksettings phy_ethtool_set_link_ksettings
phy_exit phy_exit
phy_find_first
phy_get_pause
phy_init phy_init
phy_init_hw
phy_mii_ioctl
phy_pm_runtime_get_sync phy_pm_runtime_get_sync
phy_pm_runtime_put_sync phy_pm_runtime_put_sync
phy_power_off phy_power_off
@ -3078,11 +3138,13 @@ ps2_sliced_command
pskb_expand_head pskb_expand_head
pstore_register pstore_register
pstore_unregister pstore_unregister
public_key_verify_signature
put_device put_device
put_disk put_disk
put_iova_domain put_iova_domain
put_pid put_pid
put_sg_io_hdr put_sg_io_hdr
put_tty_driver
put_unused_fd put_unused_fd
put_vaddr_frames put_vaddr_frames
pwm_apply_state pwm_apply_state
@ -3539,6 +3601,7 @@ smp_call_function
smp_call_function_any smp_call_function_any
smp_call_function_many smp_call_function_many
smp_call_function_single smp_call_function_single
smp_call_function_single_async
smp_call_on_cpu smp_call_on_cpu
smpboot_register_percpu_thread smpboot_register_percpu_thread
smpboot_unregister_percpu_thread smpboot_unregister_percpu_thread
@ -3755,6 +3818,7 @@ spmi_register_read
spmi_register_write spmi_register_write
spmi_register_zero_write spmi_register_zero_write
sprint_symbol sprint_symbol
sprint_symbol_no_offset
sprintf sprintf
srcu_barrier srcu_barrier
srcu_batches_completed srcu_batches_completed
@ -3767,6 +3831,7 @@ srcutorture_get_gp_data
sscanf sscanf
stack_trace_print stack_trace_print
stack_trace_save stack_trace_save
stack_trace_save_regs
stack_trace_save_tsk stack_trace_save_tsk
static_key_disable static_key_disable
static_key_disable_cpuslocked static_key_disable_cpuslocked
@ -3864,6 +3929,12 @@ tasklet_init
tasklet_kill tasklet_kill
tasklet_setup tasklet_setup
tasklist_lock tasklist_lock
tcp_register_congestion_control
tcp_reno_cong_avoid
tcp_reno_ssthresh
tcp_reno_undo_cwnd
tcp_slow_start
tcp_unregister_congestion_control
tcpci_get_tcpm_port tcpci_get_tcpm_port
tcpci_irq tcpci_irq
tcpci_register_port tcpci_register_port
@ -3967,9 +4038,26 @@ ttm_unmap_and_unpopulate_pages
tty_flip_buffer_push tty_flip_buffer_push
tty_insert_flip_string_fixed_flag tty_insert_flip_string_fixed_flag
tty_kref_put tty_kref_put
tty_ldisc_deref
tty_ldisc_ref
tty_port_close
tty_port_destroy
tty_port_hangup
tty_port_init
tty_port_open
tty_port_register_device
tty_port_tty_get tty_port_tty_get
tty_port_tty_wakeup
tty_register_driver
tty_set_operations
tty_standard_install
tty_std_termios
tty_termios_baud_rate tty_termios_baud_rate
tty_termios_copy_hw
tty_termios_encode_baud_rate tty_termios_encode_baud_rate
tty_unregister_device
tty_unregister_driver
tty_vhangup
typec_altmode_get_partner typec_altmode_get_partner
typec_altmode_update_active typec_altmode_update_active
typec_get_drvdata typec_get_drvdata
@ -4095,6 +4183,7 @@ usb_amd_quirk_pll_enable
usb_asmedia_modifyflowcontrol usb_asmedia_modifyflowcontrol
usb_assign_descriptors usb_assign_descriptors
usb_autopm_get_interface usb_autopm_get_interface
usb_autopm_get_interface_no_resume
usb_autopm_put_interface usb_autopm_put_interface
usb_bulk_msg usb_bulk_msg
usb_calc_bus_time usb_calc_bus_time
@ -4107,6 +4196,7 @@ usb_debug_root
usb_decode_ctrl usb_decode_ctrl
usb_del_gadget_udc usb_del_gadget_udc
usb_deregister usb_deregister
usb_deregister_dev
usb_disable_xhci_ports usb_disable_xhci_ports
usb_disabled usb_disabled
usb_enable_autosuspend usb_enable_autosuspend
@ -4124,6 +4214,7 @@ usb_ep_queue
usb_ep_set_halt usb_ep_set_halt
usb_ep_set_maxpacket_limit usb_ep_set_maxpacket_limit
usb_find_common_endpoints usb_find_common_endpoints
usb_find_interface
usb_free_all_descriptors usb_free_all_descriptors
usb_free_coherent usb_free_coherent
usb_free_urb usb_free_urb
@ -4140,8 +4231,10 @@ usb_gadget_vbus_connect
usb_gadget_vbus_disconnect usb_gadget_vbus_disconnect
usb_gadget_vbus_draw usb_gadget_vbus_draw
usb_gadget_wakeup usb_gadget_wakeup
usb_get_dev
usb_get_dr_mode usb_get_dr_mode
usb_get_gadget_udc_name usb_get_gadget_udc_name
usb_get_intf
usb_get_maximum_speed usb_get_maximum_speed
usb_get_urb usb_get_urb
usb_gstrings_attach usb_gstrings_attach
@ -4169,12 +4262,17 @@ usb_ifnum_to_if
usb_initialize_gadget usb_initialize_gadget
usb_interface_id usb_interface_id
usb_kill_urb usb_kill_urb
usb_match_id
usb_match_one_id
usb_otg_state_string usb_otg_state_string
usb_phy_set_charger_current usb_phy_set_charger_current
usb_poison_anchored_urbs
usb_poison_urb usb_poison_urb
usb_put_dev usb_put_dev
usb_put_function_instance usb_put_function_instance
usb_put_hcd usb_put_hcd
usb_put_intf
usb_register_dev
usb_register_driver usb_register_driver
usb_register_notify usb_register_notify
usb_remove_hcd usb_remove_hcd
@ -4188,12 +4286,17 @@ usb_role_switch_set_role
usb_role_switch_unregister usb_role_switch_unregister
usb_root_hub_lost_power usb_root_hub_lost_power
usb_set_device_state usb_set_device_state
usb_set_interface
usb_show_dynids
usb_speed_string usb_speed_string
usb_store_new_id
usb_string_id usb_string_id
usb_submit_urb usb_submit_urb
usb_unpoison_urb
usb_unregister_notify usb_unregister_notify
usb_wakeup_notification usb_wakeup_notification
usbnet_change_mtu usbnet_change_mtu
usbnet_defer_kevent
usbnet_disconnect usbnet_disconnect
usbnet_get_drvinfo usbnet_get_drvinfo
usbnet_get_endpoints usbnet_get_endpoints

View File

@ -398,6 +398,7 @@
__devm_regmap_init_spi __devm_regmap_init_spi
devm_regulator_bulk_get devm_regulator_bulk_get
devm_regulator_get devm_regulator_get
devm_regulator_get_exclusive
devm_regulator_get_optional devm_regulator_get_optional
devm_regulator_put devm_regulator_put
devm_regulator_register devm_regulator_register
@ -967,6 +968,7 @@
__ioremap __ioremap
io_schedule_timeout io_schedule_timeout
iounmap iounmap
iov_iter_bvec
ip_send_check ip_send_check
iput iput
__irq_alloc_descs __irq_alloc_descs
@ -1052,6 +1054,7 @@
kstrtouint kstrtouint
kstrtouint_from_user kstrtouint_from_user
kstrtoull kstrtoull
kstrtoull_from_user
kthread_bind kthread_bind
kthread_bind_mask kthread_bind_mask
kthread_cancel_delayed_work_sync kthread_cancel_delayed_work_sync
@ -1246,6 +1249,7 @@
of_get_regulator_init_data of_get_regulator_init_data
of_iomap of_iomap
of_irq_find_parent of_irq_find_parent
of_irq_get
of_irq_get_byname of_irq_get_byname
of_irq_parse_one of_irq_parse_one
of_machine_is_compatible of_machine_is_compatible
@ -1280,6 +1284,7 @@
of_root of_root
of_thermal_get_ntrips of_thermal_get_ntrips
of_thermal_get_trip_points of_thermal_get_trip_points
of_thermal_is_trip_valid
of_translate_address of_translate_address
of_usb_host_tpl_support of_usb_host_tpl_support
page_endio page_endio
@ -1355,6 +1360,7 @@
pinctrl_remove_gpio_range pinctrl_remove_gpio_range
pinctrl_select_state pinctrl_select_state
pin_get_name pin_get_name
pin_user_pages
pin_user_pages_fast pin_user_pages_fast
pin_user_pages_remote pin_user_pages_remote
pktgen_xfrm_outer_mode_output pktgen_xfrm_outer_mode_output
@ -1582,6 +1588,9 @@
rtc_tm_to_time64 rtc_tm_to_time64
rtc_update_irq rtc_update_irq
rtc_valid_tm rtc_valid_tm
__rt_mutex_init
rt_mutex_lock
rt_mutex_unlock
rtnl_is_locked rtnl_is_locked
rtnl_lock rtnl_lock
rtnl_unlock rtnl_unlock
@ -1923,6 +1932,7 @@
__traceiter_android_rvh_typec_tcpci_chk_contaminant __traceiter_android_rvh_typec_tcpci_chk_contaminant
__traceiter_android_rvh_typec_tcpci_get_vbus __traceiter_android_rvh_typec_tcpci_get_vbus
__traceiter_android_rvh_uclamp_eff_get __traceiter_android_rvh_uclamp_eff_get
__traceiter_android_rvh_ufs_complete_init
__traceiter_android_rvh_ufs_reprogram_all_keys __traceiter_android_rvh_ufs_reprogram_all_keys
__traceiter_android_rvh_util_est_update __traceiter_android_rvh_util_est_update
__traceiter_android_vh_arch_set_freq_scale __traceiter_android_vh_arch_set_freq_scale
@ -1956,6 +1966,8 @@
__traceiter_android_vh_ufs_send_tm_command __traceiter_android_vh_ufs_send_tm_command
__traceiter_android_vh_ufs_send_uic_command __traceiter_android_vh_ufs_send_uic_command
__traceiter_android_vh_ufs_update_sysfs __traceiter_android_vh_ufs_update_sysfs
__traceiter_android_vh_usb_dev_resume
__traceiter_android_vh_usb_dev_suspend
__traceiter_clock_set_rate __traceiter_clock_set_rate
__traceiter_cpu_frequency __traceiter_cpu_frequency
__traceiter_device_pm_callback_end __traceiter_device_pm_callback_end
@ -1998,6 +2010,7 @@
__tracepoint_android_rvh_typec_tcpci_chk_contaminant __tracepoint_android_rvh_typec_tcpci_chk_contaminant
__tracepoint_android_rvh_typec_tcpci_get_vbus __tracepoint_android_rvh_typec_tcpci_get_vbus
__tracepoint_android_rvh_uclamp_eff_get __tracepoint_android_rvh_uclamp_eff_get
__tracepoint_android_rvh_ufs_complete_init
__tracepoint_android_rvh_ufs_reprogram_all_keys __tracepoint_android_rvh_ufs_reprogram_all_keys
__tracepoint_android_rvh_util_est_update __tracepoint_android_rvh_util_est_update
__tracepoint_android_vh_arch_set_freq_scale __tracepoint_android_vh_arch_set_freq_scale
@ -2031,6 +2044,8 @@
__tracepoint_android_vh_ufs_send_tm_command __tracepoint_android_vh_ufs_send_tm_command
__tracepoint_android_vh_ufs_send_uic_command __tracepoint_android_vh_ufs_send_uic_command
__tracepoint_android_vh_ufs_update_sysfs __tracepoint_android_vh_ufs_update_sysfs
__tracepoint_android_vh_usb_dev_resume
__tracepoint_android_vh_usb_dev_suspend
__tracepoint_clock_set_rate __tracepoint_clock_set_rate
__tracepoint_cpu_frequency __tracepoint_cpu_frequency
__tracepoint_device_pm_callback_end __tracepoint_device_pm_callback_end

View File

@ -452,6 +452,7 @@
devres_alloc_node devres_alloc_node
devres_free devres_free
devres_release devres_release
dev_set_mac_address
dev_set_name dev_set_name
_dev_warn _dev_warn
disable_irq disable_irq
@ -2195,6 +2196,7 @@
usbnet_write_cmd usbnet_write_cmd
usbnet_write_cmd_async usbnet_write_cmd_async
usbnet_write_cmd_nopm usbnet_write_cmd_nopm
usb_os_desc_prepare_interf_dir
usb_put_function usb_put_function
usb_put_function_instance usb_put_function_instance
usb_put_hcd usb_put_hcd
@ -2235,6 +2237,7 @@
v4l2_ctrl_request_complete v4l2_ctrl_request_complete
v4l2_ctrl_request_setup v4l2_ctrl_request_setup
__v4l2_ctrl_s_ctrl __v4l2_ctrl_s_ctrl
__v4l2_ctrl_s_ctrl_compound
v4l2_ctrl_subdev_subscribe_event v4l2_ctrl_subdev_subscribe_event
v4l2_ctrl_subscribe_event v4l2_ctrl_subscribe_event
v4l2_device_register v4l2_device_register

File diff suppressed because it is too large Load Diff

View File

@ -1784,6 +1784,8 @@
platform_get_resource platform_get_resource
platform_get_resource_byname platform_get_resource_byname
platform_irq_count platform_irq_count
platform_msi_domain_alloc_irqs
platform_msi_domain_free_irqs
pm_clk_add pm_clk_add
pm_clk_create pm_clk_create
pm_clk_destroy pm_clk_destroy
@ -1958,6 +1960,7 @@
register_kretprobe register_kretprobe
register_memory_notifier register_memory_notifier
register_module_notifier register_module_notifier
register_qdisc
register_netdev register_netdev
register_netdevice register_netdevice
register_netdevice_notifier register_netdevice_notifier
@ -2086,8 +2089,10 @@
rproc_remove_subdev rproc_remove_subdev
rproc_report_crash rproc_report_crash
rproc_shutdown rproc_shutdown
rtc_ktime_to_tm
__rtc_register_device __rtc_register_device
rtc_time64_to_tm rtc_time64_to_tm
rtc_tm_to_ktime
rtc_tm_to_time64 rtc_tm_to_time64
rtc_update_irq rtc_update_irq
rt_mutex_lock rt_mutex_lock
@ -2810,6 +2815,7 @@
unregister_oom_notifier unregister_oom_notifier
unregister_pernet_device unregister_pernet_device
unregister_pm_notifier unregister_pm_notifier
unregister_qdisc
unregister_reboot_notifier unregister_reboot_notifier
unregister_restart_handler unregister_restart_handler
unregister_rpmsg_driver unregister_rpmsg_driver
@ -2983,6 +2989,8 @@
vprintk vprintk
vscnprintf vscnprintf
vsnprintf vsnprintf
vsock_addr_init
vsock_remove_sock
vunmap vunmap
vzalloc vzalloc
wait_for_completion wait_for_completion

File diff suppressed because it is too large Load Diff

View File

@ -869,6 +869,7 @@
kstrtou8_from_user kstrtou8_from_user
kstrtouint kstrtouint
kstrtouint_from_user kstrtouint_from_user
_kstrtoul
kstrtoul_from_user kstrtoul_from_user
kstrtoull kstrtoull
kstrtoull_from_user kstrtoull_from_user
@ -1226,6 +1227,8 @@
proc_dointvec_minmax proc_dointvec_minmax
proc_dostring proc_dostring
proc_douintvec_minmax proc_douintvec_minmax
profile_event_register
profile_event_unregister
proto_register proto_register
proto_unregister proto_unregister
__pskb_pull_tail __pskb_pull_tail
@ -1761,6 +1764,7 @@
__tracepoint_android_rvh_flush_task __tracepoint_android_rvh_flush_task
__tracepoint_android_rvh_migrate_queued_task __tracepoint_android_rvh_migrate_queued_task
__tracepoint_android_rvh_new_task_stats __tracepoint_android_rvh_new_task_stats
__tracepoint_android_rvh_refrigerator
__tracepoint_android_rvh_replace_next_task_fair __tracepoint_android_rvh_replace_next_task_fair
__tracepoint_android_rvh_resume_cpus __tracepoint_android_rvh_resume_cpus
__tracepoint_android_rvh_sched_cpu_dying __tracepoint_android_rvh_sched_cpu_dying
@ -1786,12 +1790,15 @@
__tracepoint_android_rvh_update_cpus_allowed __tracepoint_android_rvh_update_cpus_allowed
__tracepoint_android_rvh_update_misfit_status __tracepoint_android_rvh_update_misfit_status
__tracepoint_android_rvh_wake_up_new_task __tracepoint_android_rvh_wake_up_new_task
__tracepoint_android_vh_account_task_time
__tracepoint_android_vh_allow_domain_state __tracepoint_android_vh_allow_domain_state
__tracepoint_android_vh_binder_restore_priority __tracepoint_android_vh_binder_restore_priority
__tracepoint_android_vh_binder_set_priority __tracepoint_android_vh_binder_set_priority
__tracepoint_android_vh_binder_trans
__tracepoint_android_vh_binder_wakeup_ilocked __tracepoint_android_vh_binder_wakeup_ilocked
__tracepoint_android_vh_cpu_idle_enter __tracepoint_android_vh_cpu_idle_enter
__tracepoint_android_vh_cpu_idle_exit __tracepoint_android_vh_cpu_idle_exit
__tracepoint_android_vh_dup_task_struct
__tracepoint_android_vh_ftrace_dump_buffer __tracepoint_android_vh_ftrace_dump_buffer
__tracepoint_android_vh_ftrace_format_check __tracepoint_android_vh_ftrace_format_check
__tracepoint_android_vh_ftrace_oops_enter __tracepoint_android_vh_ftrace_oops_enter
@ -1799,6 +1806,7 @@
__tracepoint_android_vh_ftrace_size_check __tracepoint_android_vh_ftrace_size_check
__tracepoint_android_vh_iommu_setup_dma_ops __tracepoint_android_vh_iommu_setup_dma_ops
__tracepoint_android_vh_ipi_stop __tracepoint_android_vh_ipi_stop
__tracepoint_android_vh_irqtime_account_process_tick
__tracepoint_android_vh_jiffies_update __tracepoint_android_vh_jiffies_update
__tracepoint_android_vh_mmc_attach_sd __tracepoint_android_vh_mmc_attach_sd
__tracepoint_android_vh_mmc_blk_mq_rw_recovery __tracepoint_android_vh_mmc_blk_mq_rw_recovery
@ -1812,10 +1820,13 @@
__tracepoint_android_vh_show_resume_epoch_val __tracepoint_android_vh_show_resume_epoch_val
__tracepoint_android_vh_show_suspend_epoch_val __tracepoint_android_vh_show_suspend_epoch_val
__tracepoint_android_vh_timer_calc_index __tracepoint_android_vh_timer_calc_index
__tracepoint_android_vh_tune_scan_type
__tracepoint_android_vh_tune_swappiness
__tracepoint_android_vh_ufs_check_int_errors __tracepoint_android_vh_ufs_check_int_errors
__tracepoint_android_vh_ufs_compl_command __tracepoint_android_vh_ufs_compl_command
__tracepoint_android_vh_ufs_send_command __tracepoint_android_vh_ufs_send_command
__tracepoint_android_vh_ufs_update_sdev __tracepoint_android_vh_ufs_update_sdev
__tracepoint_android_vh_vmpressure
__tracepoint_binder_transaction_received __tracepoint_binder_transaction_received
__tracepoint_cpu_frequency_limits __tracepoint_cpu_frequency_limits
__tracepoint_cpu_idle __tracepoint_cpu_idle
@ -1865,6 +1876,7 @@
ucsi_destroy ucsi_destroy
ucsi_get_drvdata ucsi_get_drvdata
ucsi_register ucsi_register
ucsi_send_command
ucsi_set_drvdata ucsi_set_drvdata
ucsi_unregister ucsi_unregister
__udelay __udelay

View File

@ -139,6 +139,7 @@ config ARM64
select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KASAN if !(ARM64_16K_PAGES && ARM64_VA_BITS_48) select HAVE_ARCH_KASAN if !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
select HAVE_ARCH_KASAN_VMALLOC if HAVE_ARCH_KASAN
select HAVE_ARCH_KASAN_SW_TAGS if HAVE_ARCH_KASAN select HAVE_ARCH_KASAN_SW_TAGS if HAVE_ARCH_KASAN
select HAVE_ARCH_KASAN_HW_TAGS if (HAVE_ARCH_KASAN && ARM64_MTE) select HAVE_ARCH_KASAN_HW_TAGS if (HAVE_ARCH_KASAN && ARM64_MTE)
select HAVE_ARCH_KFENCE select HAVE_ARCH_KFENCE
@ -194,6 +195,7 @@ config ARM64
select IOMMU_DMA if IOMMU_SUPPORT select IOMMU_DMA if IOMMU_SUPPORT
select IRQ_DOMAIN select IRQ_DOMAIN
select IRQ_FORCED_THREADING select IRQ_FORCED_THREADING
select KASAN_VMALLOC if KASAN_GENERIC
select MODULES_USE_ELF_RELA select MODULES_USE_ELF_RELA
select NEED_DMA_MAP_STATE select NEED_DMA_MAP_STATE
select NEED_SG_DMA_LENGTH select NEED_SG_DMA_LENGTH

View File

@ -236,7 +236,6 @@ static inline const void *__tag_set(const void *addr, u8 tag)
#define arch_enable_tagging_async() mte_enable_kernel_async() #define arch_enable_tagging_async() mte_enable_kernel_async()
#define arch_set_tagging_report_once(state) mte_set_report_once(state) #define arch_set_tagging_report_once(state) mte_set_report_once(state)
#define arch_force_async_tag_fault() mte_check_tfsr_exit() #define arch_force_async_tag_fault() mte_check_tfsr_exit()
#define arch_init_tags(max_tag) mte_init_tags(max_tag)
#define arch_get_random_tag() mte_get_random_tag() #define arch_get_random_tag() mte_get_random_tag()
#define arch_get_mem_tag(addr) mte_get_mem_tag(addr) #define arch_get_mem_tag(addr) mte_get_mem_tag(addr)
#define arch_set_mem_tag_range(addr, size, tag, init) \ #define arch_set_mem_tag_range(addr, size, tag, init) \

View File

@ -130,7 +130,6 @@ static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag,
void mte_enable_kernel_sync(void); void mte_enable_kernel_sync(void);
void mte_enable_kernel_async(void); void mte_enable_kernel_async(void);
void mte_init_tags(u64 max_tag);
void mte_set_report_once(bool state); void mte_set_report_once(bool state);
bool mte_report_once(void); bool mte_report_once(void);
@ -165,10 +164,6 @@ static inline void mte_enable_kernel_async(void)
{ {
} }
static inline void mte_init_tags(u64 max_tag)
{
}
static inline void mte_set_report_once(bool state) static inline void mte_set_report_once(bool state)
{ {
} }

View File

@ -16,8 +16,6 @@
#include <asm/pgtable-types.h> #include <asm/pgtable-types.h>
extern u64 gcr_kernel_excl;
void mte_clear_page_tags(void *addr); void mte_clear_page_tags(void *addr);
unsigned long mte_copy_tags_from_user(void *to, const void __user *from, unsigned long mte_copy_tags_from_user(void *to, const void __user *from,
unsigned long n); unsigned long n);
@ -43,7 +41,6 @@ void mte_copy_page_tags(void *kto, const void *kfrom);
void mte_thread_init_user(void); void mte_thread_init_user(void);
void mte_thread_switch(struct task_struct *next); void mte_thread_switch(struct task_struct *next);
void mte_suspend_enter(void); void mte_suspend_enter(void);
void mte_suspend_exit(void);
long set_mte_ctrl(struct task_struct *task, unsigned long arg); long set_mte_ctrl(struct task_struct *task, unsigned long arg);
long get_mte_ctrl(struct task_struct *task); long get_mte_ctrl(struct task_struct *task);
int mte_ptrace_copy_tags(struct task_struct *child, long request, int mte_ptrace_copy_tags(struct task_struct *child, long request,
@ -72,9 +69,6 @@ static inline void mte_thread_switch(struct task_struct *next)
static inline void mte_suspend_enter(void) static inline void mte_suspend_enter(void)
{ {
} }
static inline void mte_suspend_exit(void)
{
}
static inline long set_mte_ctrl(struct task_struct *task, unsigned long arg) static inline long set_mte_ctrl(struct task_struct *task, unsigned long arg)
{ {
return 0; return 0;
@ -105,11 +99,17 @@ void mte_check_tfsr_el1(void);
static inline void mte_check_tfsr_entry(void) static inline void mte_check_tfsr_entry(void)
{ {
if (!system_supports_mte())
return;
mte_check_tfsr_el1(); mte_check_tfsr_el1();
} }
static inline void mte_check_tfsr_exit(void) static inline void mte_check_tfsr_exit(void)
{ {
if (!system_supports_mte())
return;
/* /*
* The asynchronous faults are sync'ed automatically with * The asynchronous faults are sync'ed automatically with
* TFSR_EL1 on kernel entry but for exit an explicit dsb() * TFSR_EL1 on kernel entry but for exit an explicit dsb()

View File

@ -11,6 +11,7 @@
#include <linux/bits.h> #include <linux/bits.h>
#include <linux/stringify.h> #include <linux/stringify.h>
#include <linux/kasan-tags.h>
/* /*
* ARMv8 ARM reserves the following encoding for system registers: * ARMv8 ARM reserves the following encoding for system registers:
@ -1044,6 +1045,21 @@
#define SYS_GCR_EL1_RRND (BIT(16)) #define SYS_GCR_EL1_RRND (BIT(16))
#define SYS_GCR_EL1_EXCL_MASK 0xffffUL #define SYS_GCR_EL1_EXCL_MASK 0xffffUL
#ifdef CONFIG_KASAN_HW_TAGS
/*
* KASAN always uses a whole byte for its tags. With CONFIG_KASAN_HW_TAGS it
* only uses tags in the range 0xF0-0xFF, which we map to MTE tags 0x0-0xF.
*/
#define __MTE_TAG_MIN (KASAN_TAG_MIN & 0xf)
#define __MTE_TAG_MAX (KASAN_TAG_MAX & 0xf)
#define __MTE_TAG_INCL GENMASK(__MTE_TAG_MAX, __MTE_TAG_MIN)
#define KERNEL_GCR_EL1_EXCL (SYS_GCR_EL1_EXCL_MASK & ~__MTE_TAG_INCL)
#else
#define KERNEL_GCR_EL1_EXCL SYS_GCR_EL1_EXCL_MASK
#endif
#define KERNEL_GCR_EL1 (SYS_GCR_EL1_RRND | KERNEL_GCR_EL1_EXCL)
/* RGSR_EL1 Definitions */ /* RGSR_EL1 Definitions */
#define SYS_RGSR_EL1_TAG_MASK 0xfUL #define SYS_RGSR_EL1_TAG_MASK 0xfUL
#define SYS_RGSR_EL1_SEED_SHIFT 8 #define SYS_RGSR_EL1_SEED_SHIFT 8

View File

@ -193,21 +193,20 @@ alternative_else_nop_endif
.macro mte_set_kernel_gcr, tmp, tmp2 .macro mte_set_kernel_gcr, tmp, tmp2
#ifdef CONFIG_KASAN_HW_TAGS #ifdef CONFIG_KASAN_HW_TAGS
alternative_if_not ARM64_MTE alternative_cb kasan_hw_tags_enable
b 1f b 1f
alternative_else_nop_endif alternative_cb_end
ldr_l \tmp, gcr_kernel_excl mov \tmp, KERNEL_GCR_EL1
msr_s SYS_GCR_EL1, \tmp
mte_set_gcr \tmp, \tmp2
1: 1:
#endif #endif
.endm .endm
.macro mte_set_user_gcr, tsk, tmp, tmp2 .macro mte_set_user_gcr, tsk, tmp, tmp2
#ifdef CONFIG_ARM64_MTE #ifdef CONFIG_KASAN_HW_TAGS
alternative_if_not ARM64_MTE alternative_cb kasan_hw_tags_enable
b 1f b 1f
alternative_else_nop_endif alternative_cb_end
ldr \tmp, [\tsk, #THREAD_MTE_CTRL] ldr \tmp, [\tsk, #THREAD_MTE_CTRL]
mte_set_gcr \tmp, \tmp2 mte_set_gcr \tmp, \tmp2

View File

@ -128,15 +128,17 @@ u64 __init kaslr_early_init(void)
/* use the top 16 bits to randomize the linear region */ /* use the top 16 bits to randomize the linear region */
memstart_offset_seed = seed >> 48; memstart_offset_seed = seed >> 48;
if (IS_ENABLED(CONFIG_KASAN_GENERIC) || if (!IS_ENABLED(CONFIG_KASAN_VMALLOC) &&
IS_ENABLED(CONFIG_KASAN_SW_TAGS)) (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
IS_ENABLED(CONFIG_KASAN_SW_TAGS)))
/* /*
* KASAN does not expect the module region to intersect the * KASAN without KASAN_VMALLOC does not expect the module region
* vmalloc region, since shadow memory is allocated for each * to intersect the vmalloc region, since shadow memory is
* module at load time, whereas the vmalloc region is shadowed * allocated for each module at load time, whereas the vmalloc
* by KASAN zero pages. So keep modules out of the vmalloc * region is shadowed by KASAN zero pages. So keep modules
* region if KASAN is enabled, and put the kernel well within * out of the vmalloc region if KASAN is enabled without
* 4 GB of the module region. * KASAN_VMALLOC, and put the kernel well within 4 GB of the
* module region.
*/ */
return offset % SZ_2G; return offset % SZ_2G;

View File

@ -40,14 +40,16 @@ void *module_alloc(unsigned long size)
NUMA_NO_NODE, __builtin_return_address(0)); NUMA_NO_NODE, __builtin_return_address(0));
if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
!IS_ENABLED(CONFIG_KASAN_GENERIC) && (IS_ENABLED(CONFIG_KASAN_VMALLOC) ||
!IS_ENABLED(CONFIG_KASAN_SW_TAGS)) (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
!IS_ENABLED(CONFIG_KASAN_SW_TAGS))))
/* /*
* KASAN can only deal with module allocations being served * KASAN without KASAN_VMALLOC can only deal with module
* from the reserved module region, since the remainder of * allocations being served from the reserved module region,
* the vmalloc region is already backed by zero shadow pages, * since the remainder of the vmalloc region is already
* and punching holes into it is non-trivial. Since the module * backed by zero shadow pages, and punching holes into it
* region is not randomized when KASAN is enabled, it is even * is non-trivial. Since the module region is not randomized
* when KASAN is enabled without KASAN_VMALLOC, it is even
* less likely that the module region gets exhausted, so we * less likely that the module region gets exhausted, so we
* can simply omit this fallback in that case. * can simply omit this fallback in that case.
*/ */

View File

@ -23,8 +23,6 @@
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/sysreg.h> #include <asm/sysreg.h>
u64 gcr_kernel_excl __ro_after_init;
static bool report_fault_once = true; static bool report_fault_once = true;
static DEFINE_PER_CPU_READ_MOSTLY(u64, mte_tcf_preferred); static DEFINE_PER_CPU_READ_MOSTLY(u64, mte_tcf_preferred);
@ -96,26 +94,6 @@ int memcmp_pages(struct page *page1, struct page *page2)
return ret; return ret;
} }
void mte_init_tags(u64 max_tag)
{
static bool gcr_kernel_excl_initialized;
if (!gcr_kernel_excl_initialized) {
/*
* The format of the tags in KASAN is 0xFF and in MTE is 0xF.
* This conversion extracts an MTE tag from a KASAN tag.
*/
u64 incl = GENMASK(FIELD_GET(MTE_TAG_MASK >> MTE_TAG_SHIFT,
max_tag), 0);
gcr_kernel_excl = ~incl & SYS_GCR_EL1_EXCL_MASK;
gcr_kernel_excl_initialized = true;
}
/* Enable the kernel exclude mask for random tags generation. */
write_sysreg_s(SYS_GCR_EL1_RRND | gcr_kernel_excl, SYS_GCR_EL1);
}
static inline void __mte_enable_kernel(const char *mode, unsigned long tcf) static inline void __mte_enable_kernel(const char *mode, unsigned long tcf)
{ {
/* Enable MTE Sync Mode for EL1. */ /* Enable MTE Sync Mode for EL1. */
@ -168,12 +146,7 @@ bool mte_report_once(void)
#ifdef CONFIG_KASAN_HW_TAGS #ifdef CONFIG_KASAN_HW_TAGS
void mte_check_tfsr_el1(void) void mte_check_tfsr_el1(void)
{ {
u64 tfsr_el1; u64 tfsr_el1 = read_sysreg_s(SYS_TFSR_EL1);
if (!system_supports_mte())
return;
tfsr_el1 = read_sysreg_s(SYS_TFSR_EL1);
if (unlikely(tfsr_el1 & SYS_TFSR_EL1_TF1)) { if (unlikely(tfsr_el1 & SYS_TFSR_EL1_TF1)) {
/* /*
@ -210,6 +183,30 @@ static void mte_update_sctlr_user(struct task_struct *task)
task->thread.sctlr_user = sctlr; task->thread.sctlr_user = sctlr;
} }
static void mte_update_gcr_excl(struct task_struct *task)
{
/*
* SYS_GCR_EL1 will be set to current->thread.mte_ctrl value by
* mte_set_user_gcr() in kernel_exit, but only if KASAN is enabled.
*/
if (kasan_hw_tags_enabled())
return;
write_sysreg_s(
((task->thread.mte_ctrl >> MTE_CTRL_GCR_USER_EXCL_SHIFT) &
SYS_GCR_EL1_EXCL_MASK) | SYS_GCR_EL1_RRND,
SYS_GCR_EL1);
}
void __init kasan_hw_tags_enable(struct alt_instr *alt, __le32 *origptr,
__le32 *updptr, int nr_inst)
{
BUG_ON(nr_inst != 1); /* Branch -> NOP */
if (kasan_hw_tags_enabled())
*updptr = cpu_to_le32(aarch64_insn_gen_nop());
}
void mte_thread_init_user(void) void mte_thread_init_user(void)
{ {
if (!system_supports_mte()) if (!system_supports_mte())
@ -225,7 +222,11 @@ void mte_thread_init_user(void)
void mte_thread_switch(struct task_struct *next) void mte_thread_switch(struct task_struct *next)
{ {
if (!system_supports_mte())
return;
mte_update_sctlr_user(next); mte_update_sctlr_user(next);
mte_update_gcr_excl(next);
/* /*
* Check if an async tag exception occurred at EL1. * Check if an async tag exception occurred at EL1.
@ -254,15 +255,6 @@ void mte_suspend_enter(void)
mte_check_tfsr_el1(); mte_check_tfsr_el1();
} }
void mte_suspend_exit(void)
{
if (!system_supports_mte())
return;
sysreg_clear_set_s(SYS_GCR_EL1, SYS_GCR_EL1_EXCL_MASK, gcr_kernel_excl);
isb();
}
long set_mte_ctrl(struct task_struct *task, unsigned long arg) long set_mte_ctrl(struct task_struct *task, unsigned long arg)
{ {
u64 mte_ctrl = (~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) & u64 mte_ctrl = (~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) &
@ -280,6 +272,7 @@ long set_mte_ctrl(struct task_struct *task, unsigned long arg)
if (task == current) { if (task == current) {
preempt_disable(); preempt_disable();
mte_update_sctlr_user(task); mte_update_sctlr_user(task);
mte_update_gcr_excl(task);
update_sctlr_el1(task->thread.sctlr_user); update_sctlr_el1(task->thread.sctlr_user);
preempt_enable(); preempt_enable();
} }

View File

@ -76,7 +76,6 @@ void notrace __cpu_suspend_exit(void)
spectre_v4_enable_mitigation(NULL); spectre_v4_enable_mitigation(NULL);
/* Restore additional feature-specific configuration */ /* Restore additional feature-specific configuration */
mte_suspend_exit();
ptrauth_suspend_exit(); ptrauth_suspend_exit();
} }

View File

@ -214,15 +214,18 @@ static void __init kasan_init_shadow(void)
{ {
u64 kimg_shadow_start, kimg_shadow_end; u64 kimg_shadow_start, kimg_shadow_end;
u64 mod_shadow_start, mod_shadow_end; u64 mod_shadow_start, mod_shadow_end;
u64 vmalloc_shadow_end;
phys_addr_t pa_start, pa_end; phys_addr_t pa_start, pa_end;
u64 i; u64 i;
kimg_shadow_start = (u64)kasan_mem_to_shadow(_text) & PAGE_MASK; kimg_shadow_start = (u64)kasan_mem_to_shadow(KERNEL_START) & PAGE_MASK;
kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(_end)); kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(KERNEL_END));
mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR); mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END); mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
vmalloc_shadow_end = (u64)kasan_mem_to_shadow((void *)VMALLOC_END);
/* /*
* We are going to perform proper setup of shadow memory. * We are going to perform proper setup of shadow memory.
* At first we should unmap early shadow (clear_pgds() call below). * At first we should unmap early shadow (clear_pgds() call below).
@ -237,16 +240,22 @@ static void __init kasan_init_shadow(void)
clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
kasan_map_populate(kimg_shadow_start, kimg_shadow_end, kasan_map_populate(kimg_shadow_start, kimg_shadow_end,
early_pfn_to_nid(virt_to_pfn(lm_alias(_text)))); early_pfn_to_nid(virt_to_pfn(lm_alias(KERNEL_START))));
kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END), kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END),
(void *)mod_shadow_start); (void *)mod_shadow_start);
kasan_populate_early_shadow((void *)kimg_shadow_end,
(void *)KASAN_SHADOW_END);
if (kimg_shadow_start > mod_shadow_end) if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
kasan_populate_early_shadow((void *)mod_shadow_end, BUILD_BUG_ON(VMALLOC_START != MODULES_END);
(void *)kimg_shadow_start); kasan_populate_early_shadow((void *)vmalloc_shadow_end,
(void *)KASAN_SHADOW_END);
} else {
kasan_populate_early_shadow((void *)kimg_shadow_end,
(void *)KASAN_SHADOW_END);
if (kimg_shadow_start > mod_shadow_end)
kasan_populate_early_shadow((void *)mod_shadow_end,
(void *)kimg_shadow_start);
}
for_each_mem_range(i, &pa_start, &pa_end) { for_each_mem_range(i, &pa_start, &pa_end) {
void *start = (void *)__phys_to_virt(pa_start); void *start = (void *)__phys_to_virt(pa_start);

View File

@ -1501,6 +1501,11 @@ int arch_add_memory(int nid, u64 start, u64 size,
if (ret) if (ret)
__remove_pgd_mapping(swapper_pg_dir, __remove_pgd_mapping(swapper_pg_dir,
__phys_to_virt(start), size); __phys_to_virt(start), size);
else {
max_pfn = PFN_UP(start + size);
max_low_pfn = max_pfn;
}
return ret; return ret;
} }

View File

@ -445,8 +445,7 @@ SYM_FUNC_START(__cpu_setup)
mov x10, #MAIR_ATTR_NORMAL_TAGGED mov x10, #MAIR_ATTR_NORMAL_TAGGED
bfi x5, x10, #(8 * MT_NORMAL_TAGGED), #8 bfi x5, x10, #(8 * MT_NORMAL_TAGGED), #8
/* initialize GCR_EL1: all non-zero tags excluded by default */ mov x10, #KERNEL_GCR_EL1
mov x10, #(SYS_GCR_EL1_RRND | SYS_GCR_EL1_EXCL_MASK)
msr_s SYS_GCR_EL1, x10 msr_s SYS_GCR_EL1, x10
/* /*

View File

@ -3196,9 +3196,8 @@ static void binder_transaction(struct binder_proc *proc,
if (reply) { if (reply) {
binder_enqueue_thread_work(thread, tcomplete); binder_enqueue_thread_work(thread, tcomplete);
binder_inner_proc_lock(target_proc); binder_inner_proc_lock(target_proc);
if (target_thread->is_dead || target_proc->is_frozen) { if (target_thread->is_dead) {
return_error = target_thread->is_dead ? return_error = BR_DEAD_REPLY;
BR_DEAD_REPLY : BR_FROZEN_REPLY;
binder_inner_proc_unlock(target_proc); binder_inner_proc_unlock(target_proc);
goto err_dead_proc_or_thread; goto err_dead_proc_or_thread;
} }
@ -4806,6 +4805,22 @@ static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
return 0; return 0;
} }
static bool binder_txns_pending_ilocked(struct binder_proc *proc)
{
struct rb_node *n;
struct binder_thread *thread;
if (proc->outstanding_txns > 0)
return true;
for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
thread = rb_entry(n, struct binder_thread, rb_node);
if (thread->transaction_stack)
return true;
}
return false;
}
static int binder_ioctl_freeze(struct binder_freeze_info *info, static int binder_ioctl_freeze(struct binder_freeze_info *info,
struct binder_proc *target_proc) struct binder_proc *target_proc)
{ {
@ -4837,8 +4852,13 @@ static int binder_ioctl_freeze(struct binder_freeze_info *info,
(!target_proc->outstanding_txns), (!target_proc->outstanding_txns),
msecs_to_jiffies(info->timeout_ms)); msecs_to_jiffies(info->timeout_ms));
if (!ret && target_proc->outstanding_txns) /* Check pending transactions that wait for reply */
ret = -EAGAIN; if (ret >= 0) {
binder_inner_proc_lock(target_proc);
if (binder_txns_pending_ilocked(target_proc))
ret = -EAGAIN;
binder_inner_proc_unlock(target_proc);
}
if (ret < 0) { if (ret < 0) {
binder_inner_proc_lock(target_proc); binder_inner_proc_lock(target_proc);
@ -4854,6 +4874,7 @@ static int binder_ioctl_get_freezer_info(
{ {
struct binder_proc *target_proc; struct binder_proc *target_proc;
bool found = false; bool found = false;
__u32 txns_pending;
info->sync_recv = 0; info->sync_recv = 0;
info->async_recv = 0; info->async_recv = 0;
@ -4863,7 +4884,9 @@ static int binder_ioctl_get_freezer_info(
if (target_proc->pid == info->pid) { if (target_proc->pid == info->pid) {
found = true; found = true;
binder_inner_proc_lock(target_proc); binder_inner_proc_lock(target_proc);
info->sync_recv |= target_proc->sync_recv; txns_pending = binder_txns_pending_ilocked(target_proc);
info->sync_recv |= target_proc->sync_recv |
(txns_pending << 1);
info->async_recv |= target_proc->async_recv; info->async_recv |= target_proc->async_recv;
binder_inner_proc_unlock(target_proc); binder_inner_proc_unlock(target_proc);
} }

View File

@ -399,6 +399,8 @@ struct binder_priority {
* binder transactions * binder transactions
* (protected by @inner_lock) * (protected by @inner_lock)
* @sync_recv: process received sync transactions since last frozen * @sync_recv: process received sync transactions since last frozen
* bit 0: received sync transaction after being frozen
* bit 1: new pending sync transaction during freezing
* (protected by @inner_lock) * (protected by @inner_lock)
* @async_recv: process received async transactions since last frozen * @async_recv: process received async transactions since last frozen
* (protected by @inner_lock) * (protected by @inner_lock)

View File

@ -214,6 +214,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_filemap_fault_cache_page);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_enable_thermal_genl_check); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_enable_thermal_genl_check);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_thermal_pm_notify_suspend); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_thermal_pm_notify_suspend);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_fill_prdt); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_fill_prdt);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_ufs_complete_init);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_ufs_reprogram_all_keys); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_ufs_reprogram_all_keys);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_prepare_command); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_prepare_command);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_update_sysfs); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_update_sysfs);

View File

@ -348,6 +348,25 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
return events; return events;
} }
static long _dma_buf_set_name(struct dma_buf *dmabuf, const char *name)
{
long ret = 0;
dma_resv_lock(dmabuf->resv, NULL);
if (!list_empty(&dmabuf->attachments)) {
ret = -EBUSY;
goto out_unlock;
}
spin_lock(&dmabuf->name_lock);
kfree(dmabuf->name);
dmabuf->name = name;
spin_unlock(&dmabuf->name_lock);
out_unlock:
dma_resv_unlock(dmabuf->resv);
return ret;
}
/** /**
* dma_buf_set_name - Set a name to a specific dma_buf to track the usage. * dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
* The name of the dma-buf buffer can only be set when the dma-buf is not * The name of the dma-buf buffer can only be set when the dma-buf is not
@ -363,7 +382,23 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
* devices, return -EBUSY. * devices, return -EBUSY.
* *
*/ */
static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf) long dma_buf_set_name(struct dma_buf *dmabuf, const char *name)
{
long ret = 0;
char *buf = kstrndup(name, DMA_BUF_NAME_LEN, GFP_KERNEL);
if (!buf)
return -ENOMEM;
ret = _dma_buf_set_name(dmabuf, buf);
if (ret)
kfree(buf);
return ret;
}
EXPORT_SYMBOL_GPL(dma_buf_set_name);
static long dma_buf_set_name_user(struct dma_buf *dmabuf, const char __user *buf)
{ {
char *name = strndup_user(buf, DMA_BUF_NAME_LEN); char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
long ret = 0; long ret = 0;
@ -371,19 +406,10 @@ static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
if (IS_ERR(name)) if (IS_ERR(name))
return PTR_ERR(name); return PTR_ERR(name);
dma_resv_lock(dmabuf->resv, NULL); ret = _dma_buf_set_name(dmabuf, name);
if (!list_empty(&dmabuf->attachments)) { if (ret)
ret = -EBUSY;
kfree(name); kfree(name);
goto out_unlock;
}
spin_lock(&dmabuf->name_lock);
kfree(dmabuf->name);
dmabuf->name = name;
spin_unlock(&dmabuf->name_lock);
out_unlock:
dma_resv_unlock(dmabuf->resv);
return ret; return ret;
} }
@ -428,7 +454,7 @@ static long dma_buf_ioctl(struct file *file,
case DMA_BUF_SET_NAME_A: case DMA_BUF_SET_NAME_A:
case DMA_BUF_SET_NAME_B: case DMA_BUF_SET_NAME_B:
return dma_buf_set_name(dmabuf, (const char __user *)arg); return dma_buf_set_name_user(dmabuf, (const char __user *)arg);
default: default:
return -ENOTTY; return -ENOTTY;

View File

@ -475,6 +475,7 @@ static int verity_verify_io(struct dm_verity_io *io)
struct bvec_iter start; struct bvec_iter start;
unsigned b; unsigned b;
struct crypto_wait wait; struct crypto_wait wait;
struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
for (b = 0; b < io->n_blocks; b++) { for (b = 0; b < io->n_blocks; b++) {
int r; int r;
@ -529,9 +530,17 @@ static int verity_verify_io(struct dm_verity_io *io)
else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA, else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA,
cur_block, NULL, &start) == 0) cur_block, NULL, &start) == 0)
continue; continue;
else if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_DATA, else {
if (bio->bi_status) {
/*
* Error correction failed; Just return error
*/
return -EIO;
}
if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_DATA,
cur_block)) cur_block))
return -EIO; return -EIO;
}
} }
return 0; return 0;

View File

@ -0,0 +1,25 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _UFSHCD_ADD_INFO_H_
#define _UFSHCD_ADD_INFO_H_
/*
* Compared to the upstream equivalent, @hpb_dev has been moved from struct
* ufs_hba into struct ufs_hba_add_info to satisfy the Android ABI checks.
* Do NOT use this data structure in any out-of-tree driver since it is not
* covered by the GKI.
*/
struct ufs_hba_add_info {
struct ufs_hba hba;
struct request **tmf_rqs;
#ifdef CONFIG_SCSI_UFS_HPB
struct ufshpb_dev_info hpb_dev;
#endif
};
static inline struct ufs_hba_add_info *ufs_hba_add_info(struct ufs_hba *hba)
{
return container_of(hba, struct ufs_hba_add_info, hba);
}
#endif /* _UFSHCD_ADD_INFO_H_ */

View File

@ -17,6 +17,7 @@
#include <linux/blk-pm.h> #include <linux/blk-pm.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include "ufshcd.h" #include "ufshcd.h"
#include "ufshcd-add-info.h"
#include "ufs_quirks.h" #include "ufs_quirks.h"
#include "unipro.h" #include "unipro.h"
#include "ufs-sysfs.h" #include "ufs-sysfs.h"
@ -6363,27 +6364,6 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
return retval; return retval;
} }
struct ctm_info {
struct ufs_hba *hba;
unsigned long pending;
unsigned int ncpl;
};
static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved)
{
struct ctm_info *const ci = priv;
struct completion *c;
WARN_ON_ONCE(reserved);
if (test_bit(req->tag, &ci->pending))
return true;
ci->ncpl++;
c = req->end_io_data;
if (c)
complete(c);
return true;
}
/** /**
* ufshcd_tmc_handler - handle task management function completion * ufshcd_tmc_handler - handle task management function completion
* @hba: per adapter instance * @hba: per adapter instance
@ -6394,18 +6374,25 @@ static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved)
*/ */
static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba) static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
{ {
unsigned long flags; struct request **tmf_rqs = ufs_hba_add_info(hba)->tmf_rqs;
struct request_queue *q = hba->tmf_queue; unsigned long flags, pending, issued;
struct ctm_info ci = { irqreturn_t ret = IRQ_NONE;
.hba = hba, int tag;
};
pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
spin_lock_irqsave(hba->host->host_lock, flags); spin_lock_irqsave(hba->host->host_lock, flags);
ci.pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); issued = hba->outstanding_tasks & ~pending;
blk_mq_tagset_busy_iter(q->tag_set, ufshcd_compl_tm, &ci); for_each_set_bit(tag, &issued, hba->nutmrs) {
struct request *req = tmf_rqs[tag];
struct completion *c = req->end_io_data;
complete(c);
ret = IRQ_HANDLED;
}
spin_unlock_irqrestore(hba->host->host_lock, flags); spin_unlock_irqrestore(hba->host->host_lock, flags);
return ci.ncpl ? IRQ_HANDLED : IRQ_NONE; return ret;
} }
/** /**
@ -6510,6 +6497,7 @@ static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
struct utp_task_req_desc *treq, u8 tm_function) struct utp_task_req_desc *treq, u8 tm_function)
{ {
struct request **tmf_rqs = ufs_hba_add_info(hba)->tmf_rqs;
struct request_queue *q = hba->tmf_queue; struct request_queue *q = hba->tmf_queue;
struct Scsi_Host *host = hba->host; struct Scsi_Host *host = hba->host;
DECLARE_COMPLETION_ONSTACK(wait); DECLARE_COMPLETION_ONSTACK(wait);
@ -6528,9 +6516,9 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
ufshcd_hold(hba, false); ufshcd_hold(hba, false);
spin_lock_irqsave(host->host_lock, flags); spin_lock_irqsave(host->host_lock, flags);
blk_mq_start_request(req);
task_tag = req->tag; task_tag = req->tag;
tmf_rqs[req->tag] = req;
treq->req_header.dword_0 |= cpu_to_be32(task_tag); treq->req_header.dword_0 |= cpu_to_be32(task_tag);
memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq)); memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq));
@ -6574,6 +6562,7 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
} }
spin_lock_irqsave(hba->host->host_lock, flags); spin_lock_irqsave(hba->host->host_lock, flags);
tmf_rqs[req->tag] = NULL;
__clear_bit(task_tag, &hba->outstanding_tasks); __clear_bit(task_tag, &hba->outstanding_tasks);
spin_unlock_irqrestore(hba->host->host_lock, flags); spin_unlock_irqrestore(hba->host->host_lock, flags);
@ -8042,6 +8031,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
ufshcd_auto_hibern8_enable(hba); ufshcd_auto_hibern8_enable(hba);
ufshpb_reset(hba); ufshpb_reset(hba);
trace_android_rvh_ufs_complete_init(hba);
out: out:
spin_lock_irqsave(hba->host->host_lock, flags); spin_lock_irqsave(hba->host->host_lock, flags);
if (ret) if (ret)
@ -9323,7 +9314,7 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
} }
host = scsi_host_alloc(&ufshcd_driver_template, host = scsi_host_alloc(&ufshcd_driver_template,
sizeof(struct ufs_hba_with_hpb)); sizeof(struct ufs_hba_add_info));
if (!host) { if (!host) {
dev_err(dev, "scsi_host_alloc failed\n"); dev_err(dev, "scsi_host_alloc failed\n");
err = -ENOMEM; err = -ENOMEM;
@ -9364,6 +9355,7 @@ static const struct blk_mq_ops ufshcd_tmf_ops = {
*/ */
int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
{ {
struct request ***tmf_rqs = &ufs_hba_add_info(hba)->tmf_rqs;
int err; int err;
struct Scsi_Host *host = hba->host; struct Scsi_Host *host = hba->host;
struct device *dev = hba->dev; struct device *dev = hba->dev;
@ -9501,6 +9493,12 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
err = PTR_ERR(hba->tmf_queue); err = PTR_ERR(hba->tmf_queue);
goto free_tmf_tag_set; goto free_tmf_tag_set;
} }
*tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs, sizeof(**tmf_rqs),
GFP_KERNEL);
if (!*tmf_rqs) {
err = -ENOMEM;
goto free_tmf_queue;
}
/* Reset the attached device */ /* Reset the attached device */
ufshcd_vops_device_reset(hba); ufshcd_vops_device_reset(hba);

View File

@ -843,6 +843,12 @@ struct ufs_hba {
struct blk_mq_tag_set tmf_tag_set; struct blk_mq_tag_set tmf_tag_set;
struct request_queue *tmf_queue; struct request_queue *tmf_queue;
#if 0
/*
* This has been moved into struct ufs_hba_add_info because of the GKI.
*/
struct request **tmf_rqs;
#endif
struct uic_command *active_uic_cmd; struct uic_command *active_uic_cmd;
struct mutex uic_cmd_mutex; struct mutex uic_cmd_mutex;
@ -913,7 +919,7 @@ struct ufs_hba {
struct delayed_work rpm_dev_flush_recheck_work; struct delayed_work rpm_dev_flush_recheck_work;
#if 0 #if 0
/* This has been moved into struct ufs_hba_with_hpb. */ /* This has been moved into struct ufs_hba_add_info. */
struct ufshpb_dev_info ufshpb_dev; struct ufshpb_dev_info ufshpb_dev;
#endif #endif
@ -935,17 +941,6 @@ struct ufs_hba {
ANDROID_KABI_RESERVE(4); ANDROID_KABI_RESERVE(4);
}; };
/*
* Compared to the upstream equivalent, @hpb_dev has been moved from struct
* ufs_hba into struct ufs_hba_with_hpb to satisfy the Android ABI checks.
*/
struct ufs_hba_with_hpb {
struct ufs_hba hba;
#ifdef CONFIG_SCSI_UFS_HPB
struct ufshpb_dev_info hpb_dev;
#endif
};
/* Returns true if clocks can be gated. Otherwise false */ /* Returns true if clocks can be gated. Otherwise false */
static inline bool ufshcd_is_clkgating_allowed(struct ufs_hba *hba) static inline bool ufshcd_is_clkgating_allowed(struct ufs_hba *hba)
{ {

View File

@ -13,6 +13,7 @@
#include <linux/async.h> #include <linux/async.h>
#include "ufshcd.h" #include "ufshcd.h"
#include "ufshcd-add-info.h"
#include "ufshpb.h" #include "ufshpb.h"
#include "../sd.h" #include "../sd.h"
@ -37,7 +38,7 @@ static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
static inline struct ufshpb_dev_info *ufs_hba_to_hpb(struct ufs_hba *hba) static inline struct ufshpb_dev_info *ufs_hba_to_hpb(struct ufs_hba *hba)
{ {
return &container_of(hba, struct ufs_hba_with_hpb, hba)->hpb_dev; return &ufs_hba_add_info(hba)->hpb_dev;
} }
bool ufshpb_is_allowed(struct ufs_hba *hba) bool ufshpb_is_allowed(struct ufs_hba *hba)

View File

@ -1526,15 +1526,17 @@ static int dwc3_probe(struct platform_device *pdev)
{ {
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct resource *res, dwc_res; struct resource *res, dwc_res;
struct dwc3_vendor *vdwc;
struct dwc3 *dwc; struct dwc3 *dwc;
int ret; int ret;
void __iomem *regs; void __iomem *regs;
dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL); vdwc = devm_kzalloc(dev, sizeof(*vdwc), GFP_KERNEL);
if (!dwc) if (!vdwc)
return -ENOMEM; return -ENOMEM;
dwc = &vdwc->dwc;
dwc->dev = dev; dwc->dev = dev;

View File

@ -1319,6 +1319,16 @@ struct dwc3 {
ANDROID_KABI_RESERVE(4); ANDROID_KABI_RESERVE(4);
}; };
/**
* struct dwc3_vendor - contains parameters without modifying the format of DWC3 core
* @dwc: contains dwc3 core reference
* @softconnect: true when gadget connect is called, false when disconnect runs
*/
struct dwc3_vendor {
struct dwc3 dwc;
unsigned softconnect:1;
};
#define INCRX_BURST_MODE 0 #define INCRX_BURST_MODE 0
#define INCRX_UNDEF_LENGTH_BURST_MODE 1 #define INCRX_UNDEF_LENGTH_BURST_MODE 1

View File

@ -2411,10 +2411,12 @@ static int __dwc3_gadget_start(struct dwc3 *dwc);
static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
{ {
struct dwc3 *dwc = gadget_to_dwc(g); struct dwc3 *dwc = gadget_to_dwc(g);
struct dwc3_vendor *vdwc = container_of(dwc, struct dwc3_vendor, dwc);
unsigned long flags; unsigned long flags;
int ret; int ret;
is_on = !!is_on; is_on = !!is_on;
vdwc->softconnect = is_on;
/* /*
* Per databook, when we want to stop the gadget, if a control transfer * Per databook, when we want to stop the gadget, if a control transfer
@ -4366,9 +4368,10 @@ int dwc3_gadget_suspend(struct dwc3 *dwc)
int dwc3_gadget_resume(struct dwc3 *dwc) int dwc3_gadget_resume(struct dwc3 *dwc)
{ {
struct dwc3_vendor *vdwc = container_of(dwc, struct dwc3_vendor, dwc);
int ret; int ret;
if (!dwc->gadget_driver) if (!dwc->gadget_driver || !vdwc->softconnect)
return 0; return 0;
ret = __dwc3_gadget_start(dwc); ret = __dwc3_gadget_start(dwc);

View File

@ -3226,10 +3226,6 @@ static int _ffs_func_bind(struct usb_configuration *c,
func->function.os_desc_n = func->function.os_desc_n =
c->cdev->use_os_string ? ffs->interfaces_count : 0; c->cdev->use_os_string ? ffs->interfaces_count : 0;
if (likely(super)) {
func->function.ssp_descriptors =
usb_copy_descriptors(func->function.ss_descriptors);
}
/* And we're done */ /* And we're done */
ffs_event_add(ffs, FUNCTIONFS_BIND); ffs_event_add(ffs, FUNCTIONFS_BIND);
return 0; return 0;

View File

@ -348,6 +348,14 @@ static struct usb_endpoint_descriptor ss_epin_fback_desc = {
.bInterval = 4, .bInterval = 4,
}; };
static struct usb_ss_ep_comp_descriptor ss_epin_fback_desc_comp = {
.bLength = sizeof(ss_epin_fback_desc_comp),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
.bMaxBurst = 0,
.bmAttributes = 0,
.wBytesPerInterval = cpu_to_le16(4),
};
/* Audio Streaming IN Interface - Alt0 */ /* Audio Streaming IN Interface - Alt0 */
static struct usb_interface_descriptor std_as_in_if0_desc = { static struct usb_interface_descriptor std_as_in_if0_desc = {
@ -527,7 +535,7 @@ static struct usb_descriptor_header *ss_audio_desc[] = {
(struct usb_descriptor_header *)&ss_epout_desc_comp, (struct usb_descriptor_header *)&ss_epout_desc_comp,
(struct usb_descriptor_header *)&as_iso_out_desc, (struct usb_descriptor_header *)&as_iso_out_desc,
(struct usb_descriptor_header *)&ss_epin_fback_desc, (struct usb_descriptor_header *)&ss_epin_fback_desc,
(struct usb_descriptor_header *)&ss_epin_desc_comp, (struct usb_descriptor_header *)&ss_epin_fback_desc_comp,
(struct usb_descriptor_header *)&std_as_in_if0_desc, (struct usb_descriptor_header *)&std_as_in_if0_desc,
(struct usb_descriptor_header *)&std_as_in_if1_desc, (struct usb_descriptor_header *)&std_as_in_if1_desc,
@ -611,6 +619,7 @@ static void setup_headers(struct f_uac2_opts *opts,
{ {
struct usb_ss_ep_comp_descriptor *epout_desc_comp = NULL; struct usb_ss_ep_comp_descriptor *epout_desc_comp = NULL;
struct usb_ss_ep_comp_descriptor *epin_desc_comp = NULL; struct usb_ss_ep_comp_descriptor *epin_desc_comp = NULL;
struct usb_ss_ep_comp_descriptor *epin_fback_desc_comp = NULL;
struct usb_endpoint_descriptor *epout_desc; struct usb_endpoint_descriptor *epout_desc;
struct usb_endpoint_descriptor *epin_desc; struct usb_endpoint_descriptor *epin_desc;
struct usb_endpoint_descriptor *epin_fback_desc; struct usb_endpoint_descriptor *epin_fback_desc;
@ -633,6 +642,7 @@ static void setup_headers(struct f_uac2_opts *opts,
epout_desc_comp = &ss_epout_desc_comp; epout_desc_comp = &ss_epout_desc_comp;
epin_desc_comp = &ss_epin_desc_comp; epin_desc_comp = &ss_epin_desc_comp;
epin_fback_desc = &ss_epin_fback_desc; epin_fback_desc = &ss_epin_fback_desc;
epin_fback_desc_comp = &ss_epin_fback_desc_comp;
} }
i = 0; i = 0;
@ -663,8 +673,8 @@ static void setup_headers(struct f_uac2_opts *opts,
if (EPOUT_FBACK_IN_EN(opts)) { if (EPOUT_FBACK_IN_EN(opts)) {
headers[i++] = USBDHDR(epin_fback_desc); headers[i++] = USBDHDR(epin_fback_desc);
if (epin_desc_comp) if (epin_fback_desc_comp)
headers[i++] = USBDHDR(epin_desc_comp); headers[i++] = USBDHDR(epin_fback_desc_comp);
} }
} }
if (EPIN_EN(opts)) { if (EPIN_EN(opts)) {
@ -947,6 +957,9 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
agdev->out_ep_maxpsize = max_t(u16, agdev->out_ep_maxpsize, agdev->out_ep_maxpsize = max_t(u16, agdev->out_ep_maxpsize,
le16_to_cpu(ss_epout_desc.wMaxPacketSize)); le16_to_cpu(ss_epout_desc.wMaxPacketSize));
ss_epin_desc_comp.wBytesPerInterval = ss_epin_desc.wMaxPacketSize;
ss_epout_desc_comp.wBytesPerInterval = ss_epout_desc.wMaxPacketSize;
hs_epout_desc.bEndpointAddress = fs_epout_desc.bEndpointAddress; hs_epout_desc.bEndpointAddress = fs_epout_desc.bEndpointAddress;
hs_epin_fback_desc.bEndpointAddress = fs_epin_fback_desc.bEndpointAddress; hs_epin_fback_desc.bEndpointAddress = fs_epin_fback_desc.bEndpointAddress;
hs_epin_desc.bEndpointAddress = fs_epin_desc.bEndpointAddress; hs_epin_desc.bEndpointAddress = fs_epin_desc.bEndpointAddress;

View File

@ -1454,12 +1454,16 @@ void invalidate_bh_lrus(void)
} }
EXPORT_SYMBOL_GPL(invalidate_bh_lrus); EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
void invalidate_bh_lrus_cpu(int cpu) /*
* It's called from workqueue context so we need a bh_lru_lock to close
* the race with preemption/irq.
*/
void invalidate_bh_lrus_cpu(void)
{ {
struct bh_lru *b; struct bh_lru *b;
bh_lru_lock(); bh_lru_lock();
b = per_cpu_ptr(&bh_lrus, cpu); b = this_cpu_ptr(&bh_lrus);
__invalidate_bh_lrus(b); __invalidate_bh_lrus(b);
bh_lru_unlock(); bh_lru_unlock();
} }

View File

@ -76,17 +76,3 @@ config EROFS_FS_ZIP
If you don't want to enable compression feature, say N. If you don't want to enable compression feature, say N.
config EROFS_FS_CLUSTER_PAGE_LIMIT
int "EROFS Cluster Pages Hard Limit"
depends on EROFS_FS_ZIP
range 1 256
default "1"
help
Indicates maximum # of pages of a compressed
physical cluster.
For example, if files in a image were compressed
into 8k-unit, hard limit should not be configured
less than 2. Otherwise, the image will be refused
to mount on this kernel.

View File

@ -1,11 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only # SPDX-License-Identifier: GPL-2.0-only
EROFS_VERSION = "1.0"
ccflags-y += -DEROFS_VERSION=\"$(EROFS_VERSION)\"
obj-$(CONFIG_EROFS_FS) += erofs.o obj-$(CONFIG_EROFS_FS) += erofs.o
erofs-objs := super.o inode.o data.o namei.o dir.o utils.o erofs-objs := super.o inode.o data.o namei.o dir.o utils.o pcpubuf.o
erofs-$(CONFIG_EROFS_FS_XATTR) += xattr.o erofs-$(CONFIG_EROFS_FS_XATTR) += xattr.o
erofs-$(CONFIG_EROFS_FS_ZIP) += decompressor.o zmap.o zdata.o erofs-$(CONFIG_EROFS_FS_ZIP) += decompressor.o zmap.o zdata.o

View File

@ -109,21 +109,6 @@ static int erofs_map_blocks_flatmode(struct inode *inode,
return err; return err;
} }
int erofs_map_blocks(struct inode *inode,
struct erofs_map_blocks *map, int flags)
{
if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) {
int err = z_erofs_map_blocks_iter(inode, map, flags);
if (map->mpage) {
put_page(map->mpage);
map->mpage = NULL;
}
return err;
}
return erofs_map_blocks_flatmode(inode, map, flags);
}
static inline struct bio *erofs_read_raw_page(struct bio *bio, static inline struct bio *erofs_read_raw_page(struct bio *bio,
struct address_space *mapping, struct address_space *mapping,
struct page *page, struct page *page,
@ -159,7 +144,7 @@ static inline struct bio *erofs_read_raw_page(struct bio *bio,
erofs_blk_t blknr; erofs_blk_t blknr;
unsigned int blkoff; unsigned int blkoff;
err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW); err = erofs_map_blocks_flatmode(inode, &map, EROFS_GET_BLOCKS_RAW);
if (err) if (err)
goto err_out; goto err_out;
@ -326,7 +311,7 @@ static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
return 0; return 0;
} }
if (!erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW)) if (!erofs_map_blocks_flatmode(inode, &map, EROFS_GET_BLOCKS_RAW))
return erofs_blknr(map.m_pa); return erofs_blknr(map.m_pa);
return 0; return 0;

View File

@ -29,14 +29,39 @@ struct z_erofs_decompressor {
}; };
int z_erofs_load_lz4_config(struct super_block *sb, int z_erofs_load_lz4_config(struct super_block *sb,
struct erofs_super_block *dsb) struct erofs_super_block *dsb,
struct z_erofs_lz4_cfgs *lz4, int size)
{ {
u16 distance = le16_to_cpu(dsb->lz4_max_distance); struct erofs_sb_info *sbi = EROFS_SB(sb);
u16 distance;
EROFS_SB(sb)->lz4.max_distance_pages = distance ? if (lz4) {
if (size < sizeof(struct z_erofs_lz4_cfgs)) {
erofs_err(sb, "invalid lz4 cfgs, size=%u", size);
return -EINVAL;
}
distance = le16_to_cpu(lz4->max_distance);
sbi->lz4.max_pclusterblks = le16_to_cpu(lz4->max_pclusterblks);
if (!sbi->lz4.max_pclusterblks) {
sbi->lz4.max_pclusterblks = 1; /* reserved case */
} else if (sbi->lz4.max_pclusterblks >
Z_EROFS_PCLUSTER_MAX_SIZE / EROFS_BLKSIZ) {
erofs_err(sb, "too large lz4 pclusterblks %u",
sbi->lz4.max_pclusterblks);
return -EINVAL;
} else if (sbi->lz4.max_pclusterblks >= 2) {
erofs_info(sb, "EXPERIMENTAL big pcluster feature in use. Use at your own risk!");
}
} else {
distance = le16_to_cpu(dsb->u1.lz4_max_distance);
sbi->lz4.max_pclusterblks = 1;
}
sbi->lz4.max_distance_pages = distance ?
DIV_ROUND_UP(distance, PAGE_SIZE) + 1 : DIV_ROUND_UP(distance, PAGE_SIZE) + 1 :
LZ4_MAX_DISTANCE_PAGES; LZ4_MAX_DISTANCE_PAGES;
return 0; return erofs_pcpubuf_growsize(sbi->lz4.max_pclusterblks);
} }
static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq, static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq,
@ -95,96 +120,123 @@ static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq,
return kaddr ? 1 : 0; return kaddr ? 1 : 0;
} }
static void *generic_copy_inplace_data(struct z_erofs_decompress_req *rq, static void *z_erofs_handle_inplace_io(struct z_erofs_decompress_req *rq,
u8 *src, unsigned int pageofs_in) void *inpage, unsigned int *inputmargin, int *maptype,
bool support_0padding)
{ {
/* unsigned int nrpages_in, nrpages_out;
* if in-place decompression is ongoing, those decompressed unsigned int ofull, oend, inputsize, total, i, j;
* pages should be copied in order to avoid being overlapped. struct page **in;
*/ void *src, *tmp;
struct page **in = rq->in;
u8 *const tmp = erofs_get_pcpubuf(0);
u8 *tmpp = tmp;
unsigned int inlen = rq->inputsize - pageofs_in;
unsigned int count = min_t(uint, inlen, PAGE_SIZE - pageofs_in);
while (tmpp < tmp + inlen) { inputsize = rq->inputsize;
if (!src) nrpages_in = PAGE_ALIGN(inputsize) >> PAGE_SHIFT;
src = kmap_atomic(*in); oend = rq->pageofs_out + rq->outputsize;
memcpy(tmpp, src + pageofs_in, count); ofull = PAGE_ALIGN(oend);
kunmap_atomic(src); nrpages_out = ofull >> PAGE_SHIFT;
src = NULL;
tmpp += count; if (rq->inplace_io) {
pageofs_in = 0; if (rq->partial_decoding || !support_0padding ||
count = PAGE_SIZE; ofull - oend < LZ4_DECOMPRESS_INPLACE_MARGIN(inputsize))
++in; goto docopy;
for (i = 0; i < nrpages_in; ++i) {
DBG_BUGON(rq->in[i] == NULL);
for (j = 0; j < nrpages_out - nrpages_in + i; ++j)
if (rq->out[j] == rq->in[i])
goto docopy;
}
} }
return tmp;
if (nrpages_in <= 1) {
*maptype = 0;
return inpage;
}
kunmap_atomic(inpage);
might_sleep();
src = erofs_vm_map_ram(rq->in, nrpages_in);
if (!src)
return ERR_PTR(-ENOMEM);
*maptype = 1;
return src;
docopy:
/* Or copy compressed data which can be overlapped to per-CPU buffer */
in = rq->in;
src = erofs_get_pcpubuf(nrpages_in);
if (!src) {
DBG_BUGON(1);
kunmap_atomic(inpage);
return ERR_PTR(-EFAULT);
}
tmp = src;
total = rq->inputsize;
while (total) {
unsigned int page_copycnt =
min_t(unsigned int, total, PAGE_SIZE - *inputmargin);
if (!inpage)
inpage = kmap_atomic(*in);
memcpy(tmp, inpage + *inputmargin, page_copycnt);
kunmap_atomic(inpage);
inpage = NULL;
tmp += page_copycnt;
total -= page_copycnt;
++in;
*inputmargin = 0;
}
*maptype = 2;
return src;
} }
static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, u8 *out) static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, u8 *out)
{ {
unsigned int inputmargin, inlen; unsigned int inputmargin;
u8 *src; u8 *headpage, *src;
bool copied, support_0padding; bool support_0padding;
int ret; int ret, maptype;
if (rq->inputsize > PAGE_SIZE) DBG_BUGON(*rq->in == NULL);
return -EOPNOTSUPP; headpage = kmap_atomic(*rq->in);
src = kmap_atomic(*rq->in);
inputmargin = 0; inputmargin = 0;
support_0padding = false; support_0padding = false;
/* decompression inplace is only safe when 0padding is enabled */ /* decompression inplace is only safe when 0padding is enabled */
if (EROFS_SB(rq->sb)->feature_incompat & if (erofs_sb_has_lz4_0padding(EROFS_SB(rq->sb))) {
EROFS_FEATURE_INCOMPAT_LZ4_0PADDING) {
support_0padding = true; support_0padding = true;
while (!src[inputmargin & ~PAGE_MASK]) while (!headpage[inputmargin & ~PAGE_MASK])
if (!(++inputmargin & ~PAGE_MASK)) if (!(++inputmargin & ~PAGE_MASK))
break; break;
if (inputmargin >= rq->inputsize) { if (inputmargin >= rq->inputsize) {
kunmap_atomic(src); kunmap_atomic(headpage);
return -EIO; return -EIO;
} }
} }
copied = false; rq->inputsize -= inputmargin;
inlen = rq->inputsize - inputmargin; src = z_erofs_handle_inplace_io(rq, headpage, &inputmargin, &maptype,
if (rq->inplace_io) { support_0padding);
const uint oend = (rq->pageofs_out + if (IS_ERR(src))
rq->outputsize) & ~PAGE_MASK; return PTR_ERR(src);
const uint nr = PAGE_ALIGN(rq->pageofs_out +
rq->outputsize) >> PAGE_SHIFT;
if (rq->partial_decoding || !support_0padding ||
rq->out[nr - 1] != rq->in[0] ||
rq->inputsize - oend <
LZ4_DECOMPRESS_INPLACE_MARGIN(inlen)) {
src = generic_copy_inplace_data(rq, src, inputmargin);
inputmargin = 0;
copied = true;
}
}
/* legacy format could compress extra data in a pcluster. */ /* legacy format could compress extra data in a pcluster. */
if (rq->partial_decoding || !support_0padding) if (rq->partial_decoding || !support_0padding)
ret = LZ4_decompress_safe_partial(src + inputmargin, out, ret = LZ4_decompress_safe_partial(src + inputmargin, out,
inlen, rq->outputsize, rq->inputsize, rq->outputsize, rq->outputsize);
rq->outputsize);
else else
ret = LZ4_decompress_safe(src + inputmargin, out, ret = LZ4_decompress_safe(src + inputmargin, out,
inlen, rq->outputsize); rq->inputsize, rq->outputsize);
if (ret != rq->outputsize) { if (ret != rq->outputsize) {
erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]", erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]",
ret, inlen, inputmargin, rq->outputsize); ret, rq->inputsize, inputmargin, rq->outputsize);
WARN_ON(1); WARN_ON(1);
print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET, print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET,
16, 1, src + inputmargin, inlen, true); 16, 1, src + inputmargin, rq->inputsize, true);
print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET, print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET,
16, 1, out, rq->outputsize, true); 16, 1, out, rq->outputsize, true);
@ -193,10 +245,16 @@ static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, u8 *out)
ret = -EIO; ret = -EIO;
} }
if (copied) if (maptype == 0) {
erofs_put_pcpubuf(src);
else
kunmap_atomic(src); kunmap_atomic(src);
} else if (maptype == 1) {
vm_unmap_ram(src, PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT);
} else if (maptype == 2) {
erofs_put_pcpubuf(src);
} else {
DBG_BUGON(1);
return -EFAULT;
}
return ret; return ret;
} }
@ -246,57 +304,51 @@ static int z_erofs_decompress_generic(struct z_erofs_decompress_req *rq,
const struct z_erofs_decompressor *alg = decompressors + rq->alg; const struct z_erofs_decompressor *alg = decompressors + rq->alg;
unsigned int dst_maptype; unsigned int dst_maptype;
void *dst; void *dst;
int ret, i; int ret;
if (nrpages_out == 1 && !rq->inplace_io) { /* two optimized fast paths only for non bigpcluster cases yet */
DBG_BUGON(!*rq->out); if (rq->inputsize <= PAGE_SIZE) {
dst = kmap_atomic(*rq->out); if (nrpages_out == 1 && !rq->inplace_io) {
dst_maptype = 0; DBG_BUGON(!*rq->out);
goto dstmap_out; dst = kmap_atomic(*rq->out);
} dst_maptype = 0;
goto dstmap_out;
/* }
* For the case of small output size (especially much less
* than PAGE_SIZE), memcpy the decompressed data rather than /*
* compressed data is preferred. * For the case of small output size (especially much less
*/ * than PAGE_SIZE), memcpy the decompressed data rather than
if (rq->outputsize <= PAGE_SIZE * 7 / 8) { * compressed data is preferred.
dst = erofs_get_pcpubuf(0); */
if (IS_ERR(dst)) if (rq->outputsize <= PAGE_SIZE * 7 / 8) {
return PTR_ERR(dst); dst = erofs_get_pcpubuf(1);
if (IS_ERR(dst))
rq->inplace_io = false; return PTR_ERR(dst);
ret = alg->decompress(rq, dst);
if (!ret) rq->inplace_io = false;
copy_from_pcpubuf(rq->out, dst, rq->pageofs_out, ret = alg->decompress(rq, dst);
rq->outputsize); if (!ret)
copy_from_pcpubuf(rq->out, dst, rq->pageofs_out,
erofs_put_pcpubuf(dst); rq->outputsize);
return ret;
erofs_put_pcpubuf(dst);
return ret;
}
} }
/* general decoding path which can be used for all cases */
ret = alg->prepare_destpages(rq, pagepool); ret = alg->prepare_destpages(rq, pagepool);
if (ret < 0) { if (ret < 0)
return ret; return ret;
} else if (ret) { if (ret) {
dst = page_address(*rq->out); dst = page_address(*rq->out);
dst_maptype = 1; dst_maptype = 1;
goto dstmap_out; goto dstmap_out;
} }
i = 0; dst = erofs_vm_map_ram(rq->out, nrpages_out);
while (1) {
dst = vm_map_ram(rq->out, nrpages_out, -1);
/* retry two more times (totally 3 times) */
if (dst || ++i >= 3)
break;
vm_unmap_aliases();
}
if (!dst) if (!dst)
return -ENOMEM; return -ENOMEM;
dst_maptype = 2; dst_maptype = 2;
dstmap_out: dstmap_out:

View File

@ -18,15 +18,22 @@
* be incompatible with this kernel version. * be incompatible with this kernel version.
*/ */
#define EROFS_FEATURE_INCOMPAT_LZ4_0PADDING 0x00000001 #define EROFS_FEATURE_INCOMPAT_LZ4_0PADDING 0x00000001
#define EROFS_ALL_FEATURE_INCOMPAT EROFS_FEATURE_INCOMPAT_LZ4_0PADDING #define EROFS_FEATURE_INCOMPAT_COMPR_CFGS 0x00000002
#define EROFS_FEATURE_INCOMPAT_BIG_PCLUSTER 0x00000002
#define EROFS_ALL_FEATURE_INCOMPAT \
(EROFS_FEATURE_INCOMPAT_LZ4_0PADDING | \
EROFS_FEATURE_INCOMPAT_COMPR_CFGS | \
EROFS_FEATURE_INCOMPAT_BIG_PCLUSTER)
/* 128-byte erofs on-disk super block */ #define EROFS_SB_EXTSLOT_SIZE 16
/* erofs on-disk super block (currently 128 bytes) */
struct erofs_super_block { struct erofs_super_block {
__le32 magic; /* file system magic number */ __le32 magic; /* file system magic number */
__le32 checksum; /* crc32c(super_block) */ __le32 checksum; /* crc32c(super_block) */
__le32 feature_compat; __le32 feature_compat;
__u8 blkszbits; /* support block_size == PAGE_SIZE only */ __u8 blkszbits; /* support block_size == PAGE_SIZE only */
__u8 reserved; __u8 sb_extslots; /* superblock size = 128 + sb_extslots * 16 */
__le16 root_nid; /* nid of root directory */ __le16 root_nid; /* nid of root directory */
__le64 inos; /* total valid ino # (== f_files - f_favail) */ __le64 inos; /* total valid ino # (== f_files - f_favail) */
@ -39,8 +46,12 @@ struct erofs_super_block {
__u8 uuid[16]; /* 128-bit uuid for volume */ __u8 uuid[16]; /* 128-bit uuid for volume */
__u8 volume_name[16]; /* volume name */ __u8 volume_name[16]; /* volume name */
__le32 feature_incompat; __le32 feature_incompat;
/* customized lz4 sliding window size instead of 64k by default */ union {
__le16 lz4_max_distance; /* bitmap for available compression algorithms */
__le16 available_compr_algs;
/* customized sliding window size instead of 64k by default */
__le16 lz4_max_distance;
} __packed u1;
__u8 reserved2[42]; __u8 reserved2[42];
}; };
@ -194,20 +205,33 @@ static inline unsigned int erofs_xattr_entry_size(struct erofs_xattr_entry *e)
e->e_name_len + le16_to_cpu(e->e_value_size)); e->e_name_len + le16_to_cpu(e->e_value_size));
} }
/* maximum supported size of a physical compression cluster */
#define Z_EROFS_PCLUSTER_MAX_SIZE (1024 * 1024)
/* available compression algorithm types (for h_algorithmtype) */ /* available compression algorithm types (for h_algorithmtype) */
enum { enum {
Z_EROFS_COMPRESSION_LZ4 = 0, Z_EROFS_COMPRESSION_LZ4 = 0,
Z_EROFS_COMPRESSION_MAX Z_EROFS_COMPRESSION_MAX
}; };
#define Z_EROFS_ALL_COMPR_ALGS (1 << (Z_EROFS_COMPRESSION_MAX - 1))
/* 14 bytes (+ length field = 16 bytes) */
struct z_erofs_lz4_cfgs {
__le16 max_distance;
__le16 max_pclusterblks;
u8 reserved[10];
} __packed;
/* /*
* bit 0 : COMPACTED_2B indexes (0 - off; 1 - on) * bit 0 : COMPACTED_2B indexes (0 - off; 1 - on)
* e.g. for 4k logical cluster size, 4B if compacted 2B is off; * e.g. for 4k logical cluster size, 4B if compacted 2B is off;
* (4B) + 2B + (4B) if compacted 2B is on. * (4B) + 2B + (4B) if compacted 2B is on.
* bit 1 : HEAD1 big pcluster (0 - off; 1 - on)
* bit 2 : HEAD2 big pcluster (0 - off; 1 - on)
*/ */
#define Z_EROFS_ADVISE_COMPACTED_2B_BIT 0 #define Z_EROFS_ADVISE_COMPACTED_2B 0x0001
#define Z_EROFS_ADVISE_BIG_PCLUSTER_1 0x0002
#define Z_EROFS_ADVISE_COMPACTED_2B (1 << Z_EROFS_ADVISE_COMPACTED_2B_BIT) #define Z_EROFS_ADVISE_BIG_PCLUSTER_2 0x0004
struct z_erofs_map_header { struct z_erofs_map_header {
__le32 h_reserved1; __le32 h_reserved1;
@ -219,9 +243,7 @@ struct z_erofs_map_header {
__u8 h_algorithmtype; __u8 h_algorithmtype;
/* /*
* bit 0-2 : logical cluster bits - 12, e.g. 0 for 4096; * bit 0-2 : logical cluster bits - 12, e.g. 0 for 4096;
* bit 3-4 : (physical - logical) cluster bits of head 1: * bit 3-7 : reserved.
* For example, if logical clustersize = 4096, 1 for 8192.
* bit 5-7 : (physical - logical) cluster bits of head 2.
*/ */
__u8 h_clusterbits; __u8 h_clusterbits;
}; };
@ -264,6 +286,13 @@ enum {
#define Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS 2 #define Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS 2
#define Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT 0 #define Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT 0
/*
* D0_CBLKCNT will be marked _only_ at the 1st non-head lcluster to store the
* compressed block count of a compressed extent (in logical clusters, aka.
* block count of a pcluster).
*/
#define Z_EROFS_VLE_DI_D0_CBLKCNT (1 << 11)
struct z_erofs_vle_decompressed_index { struct z_erofs_vle_decompressed_index {
__le16 di_advise; __le16 di_advise;
/* where to decompress in the head cluster */ /* where to decompress in the head cluster */

View File

@ -63,6 +63,8 @@ struct erofs_fs_context {
struct erofs_sb_lz4_info { struct erofs_sb_lz4_info {
/* # of pages needed for EROFS lz4 rolling decompression */ /* # of pages needed for EROFS lz4 rolling decompression */
u16 max_distance_pages; u16 max_distance_pages;
/* maximum possible blocks for pclusters in the filesystem */
u16 max_pclusterblks;
}; };
struct erofs_sb_info { struct erofs_sb_info {
@ -75,6 +77,7 @@ struct erofs_sb_info {
struct xarray managed_pslots; struct xarray managed_pslots;
unsigned int shrinker_run_no; unsigned int shrinker_run_no;
u16 available_compr_algs;
/* pseudo inode to manage cached pages */ /* pseudo inode to manage cached pages */
struct inode *managed_cache; struct inode *managed_cache;
@ -90,6 +93,7 @@ struct erofs_sb_info {
/* inode slot unit size in bit shift */ /* inode slot unit size in bit shift */
unsigned char islotbits; unsigned char islotbits;
u32 sb_size; /* total superblock size */
u32 build_time_nsec; u32 build_time_nsec;
u64 build_time; u64 build_time;
@ -192,12 +196,6 @@ static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
return v; return v;
} }
#endif /* !CONFIG_SMP */ #endif /* !CONFIG_SMP */
/* hard limit of pages per compressed cluster */
#define Z_EROFS_CLUSTER_MAX_PAGES (CONFIG_EROFS_FS_CLUSTER_PAGE_LIMIT)
#define EROFS_PCPUBUF_NR_PAGES Z_EROFS_CLUSTER_MAX_PAGES
#else
#define EROFS_PCPUBUF_NR_PAGES 0
#endif /* !CONFIG_EROFS_FS_ZIP */ #endif /* !CONFIG_EROFS_FS_ZIP */
/* we strictly follow PAGE_SIZE and no buffer head yet */ /* we strictly follow PAGE_SIZE and no buffer head yet */
@ -226,6 +224,17 @@ static inline erofs_off_t iloc(struct erofs_sb_info *sbi, erofs_nid_t nid)
return blknr_to_addr(sbi->meta_blkaddr) + (nid << sbi->islotbits); return blknr_to_addr(sbi->meta_blkaddr) + (nid << sbi->islotbits);
} }
#define EROFS_FEATURE_FUNCS(name, compat, feature) \
static inline bool erofs_sb_has_##name(struct erofs_sb_info *sbi) \
{ \
return sbi->feature_##compat & EROFS_FEATURE_##feature; \
}
EROFS_FEATURE_FUNCS(lz4_0padding, incompat, INCOMPAT_LZ4_0PADDING)
EROFS_FEATURE_FUNCS(compr_cfgs, incompat, INCOMPAT_COMPR_CFGS)
EROFS_FEATURE_FUNCS(big_pcluster, incompat, INCOMPAT_BIG_PCLUSTER)
EROFS_FEATURE_FUNCS(sb_chksum, compat, COMPAT_SB_CHKSUM)
/* atomic flag definitions */ /* atomic flag definitions */
#define EROFS_I_EA_INITED_BIT 0 #define EROFS_I_EA_INITED_BIT 0
#define EROFS_I_Z_INITED_BIT 1 #define EROFS_I_Z_INITED_BIT 1
@ -254,7 +263,6 @@ struct erofs_inode {
unsigned short z_advise; unsigned short z_advise;
unsigned char z_algorithmtype[2]; unsigned char z_algorithmtype[2];
unsigned char z_logical_clusterbits; unsigned char z_logical_clusterbits;
unsigned char z_physical_clusterbits[2];
}; };
#endif /* CONFIG_EROFS_FS_ZIP */ #endif /* CONFIG_EROFS_FS_ZIP */
}; };
@ -297,7 +305,7 @@ extern const struct address_space_operations erofs_raw_access_aops;
extern const struct address_space_operations z_erofs_aops; extern const struct address_space_operations z_erofs_aops;
/* /*
* Logical to physical block mapping, used by erofs_map_blocks() * Logical to physical block mapping
* *
* Different with other file systems, it is used for 2 access modes: * Different with other file systems, it is used for 2 access modes:
* *
@ -344,7 +352,7 @@ struct erofs_map_blocks {
struct page *mpage; struct page *mpage;
}; };
/* Flags used by erofs_map_blocks() */ /* Flags used by erofs_map_blocks_flatmode() */
#define EROFS_GET_BLOCKS_RAW 0x0001 #define EROFS_GET_BLOCKS_RAW 0x0001
/* zmap.c */ /* zmap.c */
@ -366,8 +374,6 @@ static inline int z_erofs_map_blocks_iter(struct inode *inode,
/* data.c */ /* data.c */
struct page *erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr); struct page *erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr);
int erofs_map_blocks(struct inode *, struct erofs_map_blocks *, int);
/* inode.c */ /* inode.c */
static inline unsigned long erofs_inode_hash(erofs_nid_t nid) static inline unsigned long erofs_inode_hash(erofs_nid_t nid)
{ {
@ -395,23 +401,30 @@ int erofs_namei(struct inode *dir, struct qstr *name,
/* dir.c */ /* dir.c */
extern const struct file_operations erofs_dir_fops; extern const struct file_operations erofs_dir_fops;
/* utils.c / zdata.c */ static inline void *erofs_vm_map_ram(struct page **pages, unsigned int count)
struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp);
#if (EROFS_PCPUBUF_NR_PAGES > 0)
void *erofs_get_pcpubuf(unsigned int pagenr);
#define erofs_put_pcpubuf(buf) do { \
(void)&(buf); \
preempt_enable(); \
} while (0)
#else
static inline void *erofs_get_pcpubuf(unsigned int pagenr)
{ {
return ERR_PTR(-EOPNOTSUPP); int retried = 0;
while (1) {
void *p = vm_map_ram(pages, count, -1);
/* retry two more times (totally 3 times) */
if (p || ++retried >= 3)
return p;
vm_unmap_aliases();
}
return NULL;
} }
#define erofs_put_pcpubuf(buf) do {} while (0) /* pcpubuf.c */
#endif void *erofs_get_pcpubuf(unsigned int requiredpages);
void erofs_put_pcpubuf(void *ptr);
int erofs_pcpubuf_growsize(unsigned int nrpages);
void erofs_pcpubuf_init(void);
void erofs_pcpubuf_exit(void);
/* utils.c / zdata.c */
struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp);
#ifdef CONFIG_EROFS_FS_ZIP #ifdef CONFIG_EROFS_FS_ZIP
int erofs_workgroup_put(struct erofs_workgroup *grp); int erofs_workgroup_put(struct erofs_workgroup *grp);
@ -431,7 +444,8 @@ int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
int erofs_try_to_free_cached_page(struct address_space *mapping, int erofs_try_to_free_cached_page(struct address_space *mapping,
struct page *page); struct page *page);
int z_erofs_load_lz4_config(struct super_block *sb, int z_erofs_load_lz4_config(struct super_block *sb,
struct erofs_super_block *dsb); struct erofs_super_block *dsb,
struct z_erofs_lz4_cfgs *lz4, int len);
#else #else
static inline void erofs_shrinker_register(struct super_block *sb) {} static inline void erofs_shrinker_register(struct super_block *sb) {}
static inline void erofs_shrinker_unregister(struct super_block *sb) {} static inline void erofs_shrinker_unregister(struct super_block *sb) {}
@ -440,9 +454,10 @@ static inline void erofs_exit_shrinker(void) {}
static inline int z_erofs_init_zip_subsystem(void) { return 0; } static inline int z_erofs_init_zip_subsystem(void) { return 0; }
static inline void z_erofs_exit_zip_subsystem(void) {} static inline void z_erofs_exit_zip_subsystem(void) {}
static inline int z_erofs_load_lz4_config(struct super_block *sb, static inline int z_erofs_load_lz4_config(struct super_block *sb,
struct erofs_super_block *dsb) struct erofs_super_block *dsb,
struct z_erofs_lz4_cfgs *lz4, int len)
{ {
if (dsb->lz4_max_distance) { if (lz4 || dsb->u1.lz4_max_distance) {
erofs_err(sb, "lz4 algorithm isn't enabled"); erofs_err(sb, "lz4 algorithm isn't enabled");
return -EINVAL; return -EINVAL;
} }

148
fs/erofs/pcpubuf.c Normal file
View File

@ -0,0 +1,148 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) Gao Xiang <xiang@kernel.org>
*
* For low-latency decompression algorithms (e.g. lz4), reserve consecutive
* per-CPU virtual memory (in pages) in advance to store such inplace I/O
* data if inplace decompression is failed (due to unmet inplace margin for
* example).
*/
#include "internal.h"
struct erofs_pcpubuf {
raw_spinlock_t lock;
void *ptr;
struct page **pages;
unsigned int nrpages;
};
static DEFINE_PER_CPU(struct erofs_pcpubuf, erofs_pcb);
void *erofs_get_pcpubuf(unsigned int requiredpages)
__acquires(pcb->lock)
{
struct erofs_pcpubuf *pcb = &get_cpu_var(erofs_pcb);
raw_spin_lock(&pcb->lock);
/* check if the per-CPU buffer is too small */
if (requiredpages > pcb->nrpages) {
raw_spin_unlock(&pcb->lock);
put_cpu_var(erofs_pcb);
/* (for sparse checker) pretend pcb->lock is still taken */
__acquire(pcb->lock);
return NULL;
}
return pcb->ptr;
}
void erofs_put_pcpubuf(void *ptr) __releases(pcb->lock)
{
struct erofs_pcpubuf *pcb = &per_cpu(erofs_pcb, smp_processor_id());
DBG_BUGON(pcb->ptr != ptr);
raw_spin_unlock(&pcb->lock);
put_cpu_var(erofs_pcb);
}
/* the next step: support per-CPU page buffers hotplug */
int erofs_pcpubuf_growsize(unsigned int nrpages)
{
static DEFINE_MUTEX(pcb_resize_mutex);
static unsigned int pcb_nrpages;
LIST_HEAD(pagepool);
int delta, cpu, ret, i;
mutex_lock(&pcb_resize_mutex);
delta = nrpages - pcb_nrpages;
ret = 0;
/* avoid shrinking pcpubuf, since no idea how many fses rely on */
if (delta <= 0)
goto out;
for_each_possible_cpu(cpu) {
struct erofs_pcpubuf *pcb = &per_cpu(erofs_pcb, cpu);
struct page **pages, **oldpages;
void *ptr, *old_ptr;
pages = kmalloc_array(nrpages, sizeof(*pages), GFP_KERNEL);
if (!pages) {
ret = -ENOMEM;
break;
}
for (i = 0; i < nrpages; ++i) {
pages[i] = erofs_allocpage(&pagepool, GFP_KERNEL);
if (!pages[i]) {
ret = -ENOMEM;
oldpages = pages;
goto free_pagearray;
}
}
ptr = vmap(pages, nrpages, VM_MAP, PAGE_KERNEL);
if (!ptr) {
ret = -ENOMEM;
oldpages = pages;
goto free_pagearray;
}
raw_spin_lock(&pcb->lock);
old_ptr = pcb->ptr;
pcb->ptr = ptr;
oldpages = pcb->pages;
pcb->pages = pages;
i = pcb->nrpages;
pcb->nrpages = nrpages;
raw_spin_unlock(&pcb->lock);
if (!oldpages) {
DBG_BUGON(old_ptr);
continue;
}
if (old_ptr)
vunmap(old_ptr);
free_pagearray:
while (i)
list_add(&oldpages[--i]->lru, &pagepool);
kfree(oldpages);
if (ret)
break;
}
pcb_nrpages = nrpages;
put_pages_list(&pagepool);
out:
mutex_unlock(&pcb_resize_mutex);
return ret;
}
void erofs_pcpubuf_init(void)
{
int cpu;
for_each_possible_cpu(cpu) {
struct erofs_pcpubuf *pcb = &per_cpu(erofs_pcb, cpu);
raw_spin_lock_init(&pcb->lock);
}
}
void erofs_pcpubuf_exit(void)
{
int cpu, i;
for_each_possible_cpu(cpu) {
struct erofs_pcpubuf *pcb = &per_cpu(erofs_pcb, cpu);
if (pcb->ptr) {
vunmap(pcb->ptr);
pcb->ptr = NULL;
}
if (!pcb->pages)
continue;
for (i = 0; i < pcb->nrpages; ++i)
if (pcb->pages[i])
put_page(pcb->pages[i]);
kfree(pcb->pages);
pcb->pages = NULL;
}
}

View File

@ -122,6 +122,136 @@ static bool check_layout_compatibility(struct super_block *sb,
return true; return true;
} }
#ifdef CONFIG_EROFS_FS_ZIP
/* read variable-sized metadata, offset will be aligned by 4-byte */
static void *erofs_read_metadata(struct super_block *sb, struct page **pagep,
erofs_off_t *offset, int *lengthp)
{
struct page *page = *pagep;
u8 *buffer, *ptr;
int len, i, cnt;
erofs_blk_t blk;
*offset = round_up(*offset, 4);
blk = erofs_blknr(*offset);
if (!page || page->index != blk) {
if (page) {
unlock_page(page);
put_page(page);
}
page = erofs_get_meta_page(sb, blk);
if (IS_ERR(page))
goto err_nullpage;
}
ptr = kmap(page);
len = le16_to_cpu(*(__le16 *)&ptr[erofs_blkoff(*offset)]);
if (!len)
len = U16_MAX + 1;
buffer = kmalloc(len, GFP_KERNEL);
if (!buffer) {
buffer = ERR_PTR(-ENOMEM);
goto out;
}
*offset += sizeof(__le16);
*lengthp = len;
for (i = 0; i < len; i += cnt) {
cnt = min(EROFS_BLKSIZ - (int)erofs_blkoff(*offset), len - i);
blk = erofs_blknr(*offset);
if (!page || page->index != blk) {
if (page) {
kunmap(page);
unlock_page(page);
put_page(page);
}
page = erofs_get_meta_page(sb, blk);
if (IS_ERR(page)) {
kfree(buffer);
goto err_nullpage;
}
ptr = kmap(page);
}
memcpy(buffer + i, ptr + erofs_blkoff(*offset), cnt);
*offset += cnt;
}
out:
kunmap(page);
*pagep = page;
return buffer;
err_nullpage:
*pagep = NULL;
return page;
}
static int erofs_load_compr_cfgs(struct super_block *sb,
struct erofs_super_block *dsb)
{
struct erofs_sb_info *sbi;
struct page *page;
unsigned int algs, alg;
erofs_off_t offset;
int size, ret;
sbi = EROFS_SB(sb);
sbi->available_compr_algs = le16_to_cpu(dsb->u1.available_compr_algs);
if (sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS) {
erofs_err(sb, "try to load compressed fs with unsupported algorithms %x",
sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS);
return -EINVAL;
}
offset = EROFS_SUPER_OFFSET + sbi->sb_size;
page = NULL;
alg = 0;
ret = 0;
for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) {
void *data;
if (!(algs & 1))
continue;
data = erofs_read_metadata(sb, &page, &offset, &size);
if (IS_ERR(data)) {
ret = PTR_ERR(data);
goto err;
}
switch (alg) {
case Z_EROFS_COMPRESSION_LZ4:
ret = z_erofs_load_lz4_config(sb, dsb, data, size);
break;
default:
DBG_BUGON(1);
ret = -EFAULT;
}
kfree(data);
if (ret)
goto err;
}
err:
if (page) {
unlock_page(page);
put_page(page);
}
return ret;
}
#else
static int erofs_load_compr_cfgs(struct super_block *sb,
struct erofs_super_block *dsb)
{
if (dsb->u1.available_compr_algs) {
erofs_err(sb, "try to load compressed fs when compression is disabled");
return -EINVAL;
}
return 0;
}
#endif
static int erofs_read_superblock(struct super_block *sb) static int erofs_read_superblock(struct super_block *sb)
{ {
struct erofs_sb_info *sbi; struct erofs_sb_info *sbi;
@ -149,7 +279,7 @@ static int erofs_read_superblock(struct super_block *sb)
} }
sbi->feature_compat = le32_to_cpu(dsb->feature_compat); sbi->feature_compat = le32_to_cpu(dsb->feature_compat);
if (sbi->feature_compat & EROFS_FEATURE_COMPAT_SB_CHKSUM) { if (erofs_sb_has_sb_chksum(sbi)) {
ret = erofs_superblock_csum_verify(sb, data); ret = erofs_superblock_csum_verify(sb, data);
if (ret) if (ret)
goto out; goto out;
@ -167,6 +297,12 @@ static int erofs_read_superblock(struct super_block *sb)
if (!check_layout_compatibility(sb, dsb)) if (!check_layout_compatibility(sb, dsb))
goto out; goto out;
sbi->sb_size = 128 + dsb->sb_extslots * EROFS_SB_EXTSLOT_SIZE;
if (sbi->sb_size > EROFS_BLKSIZ) {
erofs_err(sb, "invalid sb_extslots %u (more than a fs block)",
sbi->sb_size);
goto out;
}
sbi->blocks = le32_to_cpu(dsb->blocks); sbi->blocks = le32_to_cpu(dsb->blocks);
sbi->meta_blkaddr = le32_to_cpu(dsb->meta_blkaddr); sbi->meta_blkaddr = le32_to_cpu(dsb->meta_blkaddr);
#ifdef CONFIG_EROFS_FS_XATTR #ifdef CONFIG_EROFS_FS_XATTR
@ -190,7 +326,10 @@ static int erofs_read_superblock(struct super_block *sb)
} }
/* parse on-disk compression configurations */ /* parse on-disk compression configurations */
ret = z_erofs_load_lz4_config(sb, dsb); if (erofs_sb_has_compr_cfgs(sbi))
ret = erofs_load_compr_cfgs(sb, dsb);
else
ret = z_erofs_load_lz4_config(sb, dsb, NULL, 0);
out: out:
kunmap(page); kunmap(page);
put_page(page); put_page(page);
@ -517,6 +656,7 @@ static int __init erofs_module_init(void)
if (err) if (err)
goto shrinker_err; goto shrinker_err;
erofs_pcpubuf_init();
err = z_erofs_init_zip_subsystem(); err = z_erofs_init_zip_subsystem();
if (err) if (err)
goto zip_err; goto zip_err;
@ -546,6 +686,7 @@ static void __exit erofs_module_exit(void)
/* Ensure all RCU free inodes are safe before cache is destroyed. */ /* Ensure all RCU free inodes are safe before cache is destroyed. */
rcu_barrier(); rcu_barrier();
kmem_cache_destroy(erofs_inode_cachep); kmem_cache_destroy(erofs_inode_cachep);
erofs_pcpubuf_exit();
} }
/* get filesystem statistics */ /* get filesystem statistics */

View File

@ -21,18 +21,6 @@ struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp)
return page; return page;
} }
#if (EROFS_PCPUBUF_NR_PAGES > 0)
static struct {
u8 data[PAGE_SIZE * EROFS_PCPUBUF_NR_PAGES];
} ____cacheline_aligned_in_smp erofs_pcpubuf[NR_CPUS];
void *erofs_get_pcpubuf(unsigned int pagenr)
{
preempt_disable();
return &erofs_pcpubuf[smp_processor_id()].data[pagenr * PAGE_SIZE];
}
#endif
#ifdef CONFIG_EROFS_FS_ZIP #ifdef CONFIG_EROFS_FS_ZIP
/* global shrink count (for all mounted EROFS instances) */ /* global shrink count (for all mounted EROFS instances) */
static atomic_long_t erofs_global_shrink_cnt; static atomic_long_t erofs_global_shrink_cnt;

View File

@ -10,6 +10,93 @@
#include <trace/events/erofs.h> #include <trace/events/erofs.h>
/*
* since pclustersize is variable for big pcluster feature, introduce slab
* pools implementation for different pcluster sizes.
*/
struct z_erofs_pcluster_slab {
struct kmem_cache *slab;
unsigned int maxpages;
char name[48];
};
#define _PCLP(n) { .maxpages = n }
static struct z_erofs_pcluster_slab pcluster_pool[] __read_mostly = {
_PCLP(1), _PCLP(4), _PCLP(16), _PCLP(64), _PCLP(128),
_PCLP(Z_EROFS_PCLUSTER_MAX_PAGES)
};
static void z_erofs_destroy_pcluster_pool(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) {
if (!pcluster_pool[i].slab)
continue;
kmem_cache_destroy(pcluster_pool[i].slab);
pcluster_pool[i].slab = NULL;
}
}
static int z_erofs_create_pcluster_pool(void)
{
struct z_erofs_pcluster_slab *pcs;
struct z_erofs_pcluster *a;
unsigned int size;
for (pcs = pcluster_pool;
pcs < pcluster_pool + ARRAY_SIZE(pcluster_pool); ++pcs) {
size = struct_size(a, compressed_pages, pcs->maxpages);
sprintf(pcs->name, "erofs_pcluster-%u", pcs->maxpages);
pcs->slab = kmem_cache_create(pcs->name, size, 0,
SLAB_RECLAIM_ACCOUNT, NULL);
if (pcs->slab)
continue;
z_erofs_destroy_pcluster_pool();
return -ENOMEM;
}
return 0;
}
static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int nrpages)
{
int i;
for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) {
struct z_erofs_pcluster_slab *pcs = pcluster_pool + i;
struct z_erofs_pcluster *pcl;
if (nrpages > pcs->maxpages)
continue;
pcl = kmem_cache_zalloc(pcs->slab, GFP_NOFS);
if (!pcl)
return ERR_PTR(-ENOMEM);
pcl->pclusterpages = nrpages;
return pcl;
}
return ERR_PTR(-EINVAL);
}
static void z_erofs_free_pcluster(struct z_erofs_pcluster *pcl)
{
int i;
for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) {
struct z_erofs_pcluster_slab *pcs = pcluster_pool + i;
if (pcl->pclusterpages > pcs->maxpages)
continue;
kmem_cache_free(pcs->slab, pcl);
return;
}
DBG_BUGON(1);
}
/* /*
* a compressed_pages[] placeholder in order to avoid * a compressed_pages[] placeholder in order to avoid
* being filled with file pages for in-place decompression. * being filled with file pages for in-place decompression.
@ -37,12 +124,11 @@ typedef tagptr1_t compressed_page_t;
tagptr_fold(compressed_page_t, page, 1) tagptr_fold(compressed_page_t, page, 1)
static struct workqueue_struct *z_erofs_workqueue __read_mostly; static struct workqueue_struct *z_erofs_workqueue __read_mostly;
static struct kmem_cache *pcluster_cachep __read_mostly;
void z_erofs_exit_zip_subsystem(void) void z_erofs_exit_zip_subsystem(void)
{ {
destroy_workqueue(z_erofs_workqueue); destroy_workqueue(z_erofs_workqueue);
kmem_cache_destroy(pcluster_cachep); z_erofs_destroy_pcluster_pool();
} }
static inline int z_erofs_init_workqueue(void) static inline int z_erofs_init_workqueue(void)
@ -59,32 +145,16 @@ static inline int z_erofs_init_workqueue(void)
return z_erofs_workqueue ? 0 : -ENOMEM; return z_erofs_workqueue ? 0 : -ENOMEM;
} }
static void z_erofs_pcluster_init_once(void *ptr)
{
struct z_erofs_pcluster *pcl = ptr;
struct z_erofs_collection *cl = z_erofs_primarycollection(pcl);
unsigned int i;
mutex_init(&cl->lock);
cl->nr_pages = 0;
cl->vcnt = 0;
for (i = 0; i < Z_EROFS_CLUSTER_MAX_PAGES; ++i)
pcl->compressed_pages[i] = NULL;
}
int __init z_erofs_init_zip_subsystem(void) int __init z_erofs_init_zip_subsystem(void)
{ {
pcluster_cachep = kmem_cache_create("erofs_compress", int err = z_erofs_create_pcluster_pool();
Z_EROFS_WORKGROUP_SIZE, 0,
SLAB_RECLAIM_ACCOUNT,
z_erofs_pcluster_init_once);
if (pcluster_cachep) {
if (!z_erofs_init_workqueue())
return 0;
kmem_cache_destroy(pcluster_cachep); if (err)
} return err;
return -ENOMEM; err = z_erofs_init_workqueue();
if (err)
z_erofs_destroy_pcluster_pool();
return err;
} }
enum z_erofs_collectmode { enum z_erofs_collectmode {
@ -104,6 +174,12 @@ enum z_erofs_collectmode {
* |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________| * |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________|
*/ */
COLLECT_PRIMARY_HOOKED, COLLECT_PRIMARY_HOOKED,
/*
* a weak form of COLLECT_PRIMARY_FOLLOWED, the difference is that it
* could be dispatched into bypass queue later due to uptodated managed
* pages. All related online pages cannot be reused for inplace I/O (or
* pagevec) since it can be directly decoded without I/O submission.
*/
COLLECT_PRIMARY_FOLLOWED_NOINPLACE, COLLECT_PRIMARY_FOLLOWED_NOINPLACE,
/* /*
* The current collection has been linked with the owned chain, and * The current collection has been linked with the owned chain, and
@ -128,7 +204,8 @@ struct z_erofs_collector {
struct z_erofs_pcluster *pcl, *tailpcl; struct z_erofs_pcluster *pcl, *tailpcl;
struct z_erofs_collection *cl; struct z_erofs_collection *cl;
struct page **compressedpages; /* a pointer used to pick up inplace I/O pages */
struct page **icpage_ptr;
z_erofs_next_pcluster_t owned_head; z_erofs_next_pcluster_t owned_head;
enum z_erofs_collectmode mode; enum z_erofs_collectmode mode;
@ -162,18 +239,19 @@ static void preload_compressed_pages(struct z_erofs_collector *clt,
enum z_erofs_cache_alloctype type, enum z_erofs_cache_alloctype type,
struct list_head *pagepool) struct list_head *pagepool)
{ {
const struct z_erofs_pcluster *pcl = clt->pcl; struct z_erofs_pcluster *pcl = clt->pcl;
const unsigned int clusterpages = BIT(pcl->clusterbits);
struct page **pages = clt->compressedpages;
pgoff_t index = pcl->obj.index + (pages - pcl->compressed_pages);
bool standalone = true; bool standalone = true;
gfp_t gfp = (mapping_gfp_mask(mc) & ~__GFP_DIRECT_RECLAIM) | gfp_t gfp = (mapping_gfp_mask(mc) & ~__GFP_DIRECT_RECLAIM) |
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN; __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
struct page **pages;
pgoff_t index;
if (clt->mode < COLLECT_PRIMARY_FOLLOWED) if (clt->mode < COLLECT_PRIMARY_FOLLOWED)
return; return;
for (; pages < pcl->compressed_pages + clusterpages; ++pages) { pages = pcl->compressed_pages;
index = pcl->obj.index;
for (; index < pcl->obj.index + pcl->pclusterpages; ++index, ++pages) {
struct page *page; struct page *page;
compressed_page_t t; compressed_page_t t;
struct page *newpage = NULL; struct page *newpage = NULL;
@ -186,21 +264,25 @@ static void preload_compressed_pages(struct z_erofs_collector *clt,
if (page) { if (page) {
t = tag_compressed_page_justfound(page); t = tag_compressed_page_justfound(page);
} else if (type == DELAYEDALLOC) { } else {
t = tagptr_init(compressed_page_t, PAGE_UNALLOCATED); /* I/O is needed, no possible to decompress directly */
} else if (type == TRYALLOC) {
newpage = erofs_allocpage(pagepool, gfp);
if (!newpage)
goto dontalloc;
set_page_private(newpage, Z_EROFS_PREALLOCATED_PAGE);
t = tag_compressed_page_justfound(newpage);
} else { /* DONTALLOC */
dontalloc:
if (standalone)
clt->compressedpages = pages;
standalone = false; standalone = false;
continue; switch (type) {
case DELAYEDALLOC:
t = tagptr_init(compressed_page_t,
PAGE_UNALLOCATED);
break;
case TRYALLOC:
newpage = erofs_allocpage(pagepool, gfp);
if (!newpage)
continue;
set_page_private(newpage,
Z_EROFS_PREALLOCATED_PAGE);
t = tag_compressed_page_justfound(newpage);
break;
default: /* DONTALLOC */
continue;
}
} }
if (!cmpxchg_relaxed(pages, NULL, tagptr_cast_ptr(t))) if (!cmpxchg_relaxed(pages, NULL, tagptr_cast_ptr(t)))
@ -214,7 +296,11 @@ static void preload_compressed_pages(struct z_erofs_collector *clt,
} }
} }
if (standalone) /* downgrade to PRIMARY_FOLLOWED_NOINPLACE */ /*
* don't do inplace I/O if all compressed pages are available in
* managed cache since it can be moved to the bypass queue instead.
*/
if (standalone)
clt->mode = COLLECT_PRIMARY_FOLLOWED_NOINPLACE; clt->mode = COLLECT_PRIMARY_FOLLOWED_NOINPLACE;
} }
@ -225,14 +311,13 @@ int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
struct z_erofs_pcluster *const pcl = struct z_erofs_pcluster *const pcl =
container_of(grp, struct z_erofs_pcluster, obj); container_of(grp, struct z_erofs_pcluster, obj);
struct address_space *const mapping = MNGD_MAPPING(sbi); struct address_space *const mapping = MNGD_MAPPING(sbi);
const unsigned int clusterpages = BIT(pcl->clusterbits);
int i; int i;
/* /*
* refcount of workgroup is now freezed as 1, * refcount of workgroup is now freezed as 1,
* therefore no need to worry about available decompression users. * therefore no need to worry about available decompression users.
*/ */
for (i = 0; i < clusterpages; ++i) { for (i = 0; i < pcl->pclusterpages; ++i) {
struct page *page = pcl->compressed_pages[i]; struct page *page = pcl->compressed_pages[i];
if (!page) if (!page)
@ -257,13 +342,12 @@ int erofs_try_to_free_cached_page(struct address_space *mapping,
struct page *page) struct page *page)
{ {
struct z_erofs_pcluster *const pcl = (void *)page_private(page); struct z_erofs_pcluster *const pcl = (void *)page_private(page);
const unsigned int clusterpages = BIT(pcl->clusterbits);
int ret = 0; /* 0 - busy */ int ret = 0; /* 0 - busy */
if (erofs_workgroup_try_to_freeze(&pcl->obj, 1)) { if (erofs_workgroup_try_to_freeze(&pcl->obj, 1)) {
unsigned int i; unsigned int i;
for (i = 0; i < clusterpages; ++i) { for (i = 0; i < pcl->pclusterpages; ++i) {
if (pcl->compressed_pages[i] == page) { if (pcl->compressed_pages[i] == page) {
WRITE_ONCE(pcl->compressed_pages[i], NULL); WRITE_ONCE(pcl->compressed_pages[i], NULL);
ret = 1; ret = 1;
@ -279,16 +363,14 @@ int erofs_try_to_free_cached_page(struct address_space *mapping,
} }
/* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */ /* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */
static inline bool z_erofs_try_inplace_io(struct z_erofs_collector *clt, static bool z_erofs_try_inplace_io(struct z_erofs_collector *clt,
struct page *page) struct page *page)
{ {
struct z_erofs_pcluster *const pcl = clt->pcl; struct z_erofs_pcluster *const pcl = clt->pcl;
const unsigned int clusterpages = BIT(pcl->clusterbits);
while (clt->compressedpages < pcl->compressed_pages + clusterpages) { while (clt->icpage_ptr > pcl->compressed_pages)
if (!cmpxchg(clt->compressedpages++, NULL, page)) if (!cmpxchg(--clt->icpage_ptr, NULL, page))
return true; return true;
}
return false; return false;
} }
@ -402,10 +484,10 @@ static int z_erofs_register_collection(struct z_erofs_collector *clt,
struct erofs_workgroup *grp; struct erofs_workgroup *grp;
int err; int err;
/* no available workgroup, let's allocate one */ /* no available pcluster, let's allocate one */
pcl = kmem_cache_alloc(pcluster_cachep, GFP_NOFS); pcl = z_erofs_alloc_pcluster(map->m_plen >> PAGE_SHIFT);
if (!pcl) if (IS_ERR(pcl))
return -ENOMEM; return PTR_ERR(pcl);
atomic_set(&pcl->obj.refcount, 1); atomic_set(&pcl->obj.refcount, 1);
pcl->obj.index = map->m_pa >> PAGE_SHIFT; pcl->obj.index = map->m_pa >> PAGE_SHIFT;
@ -419,25 +501,18 @@ static int z_erofs_register_collection(struct z_erofs_collector *clt,
else else
pcl->algorithmformat = Z_EROFS_COMPRESSION_SHIFTED; pcl->algorithmformat = Z_EROFS_COMPRESSION_SHIFTED;
pcl->clusterbits = EROFS_I(inode)->z_physical_clusterbits[0];
pcl->clusterbits -= PAGE_SHIFT;
/* new pclusters should be claimed as type 1, primary and followed */ /* new pclusters should be claimed as type 1, primary and followed */
pcl->next = clt->owned_head; pcl->next = clt->owned_head;
clt->mode = COLLECT_PRIMARY_FOLLOWED; clt->mode = COLLECT_PRIMARY_FOLLOWED;
cl = z_erofs_primarycollection(pcl); cl = z_erofs_primarycollection(pcl);
/* must be cleaned before freeing to slab */
DBG_BUGON(cl->nr_pages);
DBG_BUGON(cl->vcnt);
cl->pageofs = map->m_la & ~PAGE_MASK; cl->pageofs = map->m_la & ~PAGE_MASK;
/* /*
* lock all primary followed works before visible to others * lock all primary followed works before visible to others
* and mutex_trylock *never* fails for a new pcluster. * and mutex_trylock *never* fails for a new pcluster.
*/ */
mutex_init(&cl->lock);
DBG_BUGON(!mutex_trylock(&cl->lock)); DBG_BUGON(!mutex_trylock(&cl->lock));
grp = erofs_insert_workgroup(inode->i_sb, &pcl->obj); grp = erofs_insert_workgroup(inode->i_sb, &pcl->obj);
@ -461,7 +536,7 @@ static int z_erofs_register_collection(struct z_erofs_collector *clt,
err_out: err_out:
mutex_unlock(&cl->lock); mutex_unlock(&cl->lock);
kmem_cache_free(pcluster_cachep, pcl); z_erofs_free_pcluster(pcl);
return err; return err;
} }
@ -505,9 +580,8 @@ static int z_erofs_collector_begin(struct z_erofs_collector *clt,
z_erofs_pagevec_ctor_init(&clt->vector, Z_EROFS_NR_INLINE_PAGEVECS, z_erofs_pagevec_ctor_init(&clt->vector, Z_EROFS_NR_INLINE_PAGEVECS,
clt->cl->pagevec, clt->cl->vcnt); clt->cl->pagevec, clt->cl->vcnt);
clt->compressedpages = clt->pcl->compressed_pages; /* since file-backed online pages are traversed in reverse order */
if (clt->mode <= COLLECT_PRIMARY) /* cannot do in-place I/O */ clt->icpage_ptr = clt->pcl->compressed_pages + clt->pcl->pclusterpages;
clt->compressedpages += Z_EROFS_CLUSTER_MAX_PAGES;
return 0; return 0;
} }
@ -520,9 +594,8 @@ static void z_erofs_rcu_callback(struct rcu_head *head)
struct z_erofs_collection *const cl = struct z_erofs_collection *const cl =
container_of(head, struct z_erofs_collection, rcu); container_of(head, struct z_erofs_collection, rcu);
kmem_cache_free(pcluster_cachep, z_erofs_free_pcluster(container_of(cl, struct z_erofs_pcluster,
container_of(cl, struct z_erofs_pcluster, primary_collection));
primary_collection));
} }
void erofs_workgroup_free_rcu(struct erofs_workgroup *grp) void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
@ -774,9 +847,8 @@ static int z_erofs_decompress_pcluster(struct super_block *sb,
struct list_head *pagepool) struct list_head *pagepool)
{ {
struct erofs_sb_info *const sbi = EROFS_SB(sb); struct erofs_sb_info *const sbi = EROFS_SB(sb);
const unsigned int clusterpages = BIT(pcl->clusterbits);
struct z_erofs_pagevec_ctor ctor; struct z_erofs_pagevec_ctor ctor;
unsigned int i, outputsize, llen, nr_pages; unsigned int i, inputsize, outputsize, llen, nr_pages;
struct page *pages_onstack[Z_EROFS_VMAP_ONSTACK_PAGES]; struct page *pages_onstack[Z_EROFS_VMAP_ONSTACK_PAGES];
struct page **pages, **compressed_pages, *page; struct page **pages, **compressed_pages, *page;
@ -856,7 +928,7 @@ static int z_erofs_decompress_pcluster(struct super_block *sb,
overlapped = false; overlapped = false;
compressed_pages = pcl->compressed_pages; compressed_pages = pcl->compressed_pages;
for (i = 0; i < clusterpages; ++i) { for (i = 0; i < pcl->pclusterpages; ++i) {
unsigned int pagenr; unsigned int pagenr;
page = compressed_pages[i]; page = compressed_pages[i];
@ -909,12 +981,13 @@ static int z_erofs_decompress_pcluster(struct super_block *sb,
partial = true; partial = true;
} }
inputsize = pcl->pclusterpages * PAGE_SIZE;
err = z_erofs_decompress(&(struct z_erofs_decompress_req) { err = z_erofs_decompress(&(struct z_erofs_decompress_req) {
.sb = sb, .sb = sb,
.in = compressed_pages, .in = compressed_pages,
.out = pages, .out = pages,
.pageofs_out = cl->pageofs, .pageofs_out = cl->pageofs,
.inputsize = PAGE_SIZE, .inputsize = inputsize,
.outputsize = outputsize, .outputsize = outputsize,
.alg = pcl->algorithmformat, .alg = pcl->algorithmformat,
.inplace_io = overlapped, .inplace_io = overlapped,
@ -922,8 +995,8 @@ static int z_erofs_decompress_pcluster(struct super_block *sb,
}, pagepool); }, pagepool);
out: out:
/* must handle all compressed pages before endding pages */ /* must handle all compressed pages before ending pages */
for (i = 0; i < clusterpages; ++i) { for (i = 0; i < pcl->pclusterpages; ++i) {
page = compressed_pages[i]; page = compressed_pages[i];
if (erofs_page_is_managed(sbi, page)) if (erofs_page_is_managed(sbi, page))
@ -1226,7 +1299,7 @@ static void z_erofs_submit_queue(struct super_block *sb,
pcl = container_of(owned_head, struct z_erofs_pcluster, next); pcl = container_of(owned_head, struct z_erofs_pcluster, next);
cur = pcl->obj.index; cur = pcl->obj.index;
end = cur + BIT(pcl->clusterbits); end = cur + pcl->pclusterpages;
/* close the main owned chain at first */ /* close the main owned chain at first */
owned_head = cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL, owned_head = cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL,

View File

@ -10,6 +10,7 @@
#include "internal.h" #include "internal.h"
#include "zpvec.h" #include "zpvec.h"
#define Z_EROFS_PCLUSTER_MAX_PAGES (Z_EROFS_PCLUSTER_MAX_SIZE / PAGE_SIZE)
#define Z_EROFS_NR_INLINE_PAGEVECS 3 #define Z_EROFS_NR_INLINE_PAGEVECS 3
/* /*
@ -59,16 +60,17 @@ struct z_erofs_pcluster {
/* A: point to next chained pcluster or TAILs */ /* A: point to next chained pcluster or TAILs */
z_erofs_next_pcluster_t next; z_erofs_next_pcluster_t next;
/* A: compressed pages (including multi-usage pages) */
struct page *compressed_pages[Z_EROFS_CLUSTER_MAX_PAGES];
/* A: lower limit of decompressed length and if full length or not */ /* A: lower limit of decompressed length and if full length or not */
unsigned int length; unsigned int length;
/* I: physical cluster size in pages */
unsigned short pclusterpages;
/* I: compression algorithm format */ /* I: compression algorithm format */
unsigned char algorithmformat; unsigned char algorithmformat;
/* I: bit shift of physical cluster size */
unsigned char clusterbits; /* A: compressed pages (can be cached or inplaced pages) */
struct page *compressed_pages[];
}; };
#define z_erofs_primarycollection(pcluster) (&(pcluster)->primary_collection) #define z_erofs_primarycollection(pcluster) (&(pcluster)->primary_collection)
@ -82,8 +84,6 @@ struct z_erofs_pcluster {
#define Z_EROFS_PCLUSTER_NIL (NULL) #define Z_EROFS_PCLUSTER_NIL (NULL)
#define Z_EROFS_WORKGROUP_SIZE sizeof(struct z_erofs_pcluster)
struct z_erofs_decompressqueue { struct z_erofs_decompressqueue {
struct super_block *sb; struct super_block *sb;
atomic_t pending_bios; atomic_t pending_bios;

View File

@ -11,17 +11,16 @@
int z_erofs_fill_inode(struct inode *inode) int z_erofs_fill_inode(struct inode *inode)
{ {
struct erofs_inode *const vi = EROFS_I(inode); struct erofs_inode *const vi = EROFS_I(inode);
struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
if (vi->datalayout == EROFS_INODE_FLAT_COMPRESSION_LEGACY) { if (!erofs_sb_has_big_pcluster(sbi) &&
vi->datalayout == EROFS_INODE_FLAT_COMPRESSION_LEGACY) {
vi->z_advise = 0; vi->z_advise = 0;
vi->z_algorithmtype[0] = 0; vi->z_algorithmtype[0] = 0;
vi->z_algorithmtype[1] = 0; vi->z_algorithmtype[1] = 0;
vi->z_logical_clusterbits = LOG_BLOCK_SIZE; vi->z_logical_clusterbits = LOG_BLOCK_SIZE;
vi->z_physical_clusterbits[0] = vi->z_logical_clusterbits;
vi->z_physical_clusterbits[1] = vi->z_logical_clusterbits;
set_bit(EROFS_I_Z_INITED_BIT, &vi->flags); set_bit(EROFS_I_Z_INITED_BIT, &vi->flags);
} }
inode->i_mapping->a_ops = &z_erofs_aops; inode->i_mapping->a_ops = &z_erofs_aops;
return 0; return 0;
} }
@ -52,7 +51,8 @@ static int z_erofs_fill_inode_lazy(struct inode *inode)
if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags))
goto out_unlock; goto out_unlock;
DBG_BUGON(vi->datalayout == EROFS_INODE_FLAT_COMPRESSION_LEGACY); DBG_BUGON(!erofs_sb_has_big_pcluster(EROFS_SB(sb)) &&
vi->datalayout == EROFS_INODE_FLAT_COMPRESSION_LEGACY);
pos = ALIGN(iloc(EROFS_SB(sb), vi->nid) + vi->inode_isize + pos = ALIGN(iloc(EROFS_SB(sb), vi->nid) + vi->inode_isize +
vi->xattr_isize, 8); vi->xattr_isize, 8);
@ -77,18 +77,22 @@ static int z_erofs_fill_inode_lazy(struct inode *inode)
} }
vi->z_logical_clusterbits = LOG_BLOCK_SIZE + (h->h_clusterbits & 7); vi->z_logical_clusterbits = LOG_BLOCK_SIZE + (h->h_clusterbits & 7);
vi->z_physical_clusterbits[0] = vi->z_logical_clusterbits + if (!erofs_sb_has_big_pcluster(EROFS_SB(sb)) &&
((h->h_clusterbits >> 3) & 3); vi->z_advise & (Z_EROFS_ADVISE_BIG_PCLUSTER_1 |
Z_EROFS_ADVISE_BIG_PCLUSTER_2)) {
if (vi->z_physical_clusterbits[0] != LOG_BLOCK_SIZE) { erofs_err(sb, "per-inode big pcluster without sb feature for nid %llu",
erofs_err(sb, "unsupported physical clusterbits %u for nid %llu, please upgrade kernel", vi->nid);
vi->z_physical_clusterbits[0], vi->nid); err = -EFSCORRUPTED;
err = -EOPNOTSUPP; goto unmap_done;
}
if (vi->datalayout == EROFS_INODE_FLAT_COMPRESSION &&
!(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1) ^
!(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2)) {
erofs_err(sb, "big pcluster head1/2 of compact indexes should be consistent for nid %llu",
vi->nid);
err = -EFSCORRUPTED;
goto unmap_done; goto unmap_done;
} }
vi->z_physical_clusterbits[1] = vi->z_logical_clusterbits +
((h->h_clusterbits >> 5) & 7);
/* paired with smp_mb() at the beginning of the function */ /* paired with smp_mb() at the beginning of the function */
smp_mb(); smp_mb();
set_bit(EROFS_I_Z_INITED_BIT, &vi->flags); set_bit(EROFS_I_Z_INITED_BIT, &vi->flags);
@ -111,7 +115,7 @@ struct z_erofs_maprecorder {
u8 type; u8 type;
u16 clusterofs; u16 clusterofs;
u16 delta[2]; u16 delta[2];
erofs_blk_t pblk; erofs_blk_t pblk, compressedlcs;
}; };
static int z_erofs_reload_indexes(struct z_erofs_maprecorder *m, static int z_erofs_reload_indexes(struct z_erofs_maprecorder *m,
@ -174,6 +178,15 @@ static int legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m,
case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
m->clusterofs = 1 << vi->z_logical_clusterbits; m->clusterofs = 1 << vi->z_logical_clusterbits;
m->delta[0] = le16_to_cpu(di->di_u.delta[0]); m->delta[0] = le16_to_cpu(di->di_u.delta[0]);
if (m->delta[0] & Z_EROFS_VLE_DI_D0_CBLKCNT) {
if (!(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1)) {
DBG_BUGON(1);
return -EFSCORRUPTED;
}
m->compressedlcs = m->delta[0] &
~Z_EROFS_VLE_DI_D0_CBLKCNT;
m->delta[0] = 1;
}
m->delta[1] = le16_to_cpu(di->di_u.delta[1]); m->delta[1] = le16_to_cpu(di->di_u.delta[1]);
break; break;
case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
@ -210,6 +223,7 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
unsigned int vcnt, base, lo, encodebits, nblk; unsigned int vcnt, base, lo, encodebits, nblk;
int i; int i;
u8 *in, type; u8 *in, type;
bool big_pcluster;
if (1 << amortizedshift == 4) if (1 << amortizedshift == 4)
vcnt = 2; vcnt = 2;
@ -218,6 +232,7 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
else else
return -EOPNOTSUPP; return -EOPNOTSUPP;
big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1;
encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt; encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt;
base = round_down(eofs, vcnt << amortizedshift); base = round_down(eofs, vcnt << amortizedshift);
in = m->kaddr + base; in = m->kaddr + base;
@ -229,7 +244,15 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
m->type = type; m->type = type;
if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) { if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) {
m->clusterofs = 1 << lclusterbits; m->clusterofs = 1 << lclusterbits;
if (i + 1 != vcnt) { if (lo & Z_EROFS_VLE_DI_D0_CBLKCNT) {
if (!big_pcluster) {
DBG_BUGON(1);
return -EFSCORRUPTED;
}
m->compressedlcs = lo & ~Z_EROFS_VLE_DI_D0_CBLKCNT;
m->delta[0] = 1;
return 0;
} else if (i + 1 != (int)vcnt) {
m->delta[0] = lo; m->delta[0] = lo;
return 0; return 0;
} }
@ -242,22 +265,48 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
in, encodebits * (i - 1), &type); in, encodebits * (i - 1), &type);
if (type != Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) if (type != Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD)
lo = 0; lo = 0;
else if (lo & Z_EROFS_VLE_DI_D0_CBLKCNT)
lo = 1;
m->delta[0] = lo + 1; m->delta[0] = lo + 1;
return 0; return 0;
} }
m->clusterofs = lo; m->clusterofs = lo;
m->delta[0] = 0; m->delta[0] = 0;
/* figout out blkaddr (pblk) for HEAD lclusters */ /* figout out blkaddr (pblk) for HEAD lclusters */
nblk = 1; if (!big_pcluster) {
while (i > 0) { nblk = 1;
--i; while (i > 0) {
lo = decode_compactedbits(lclusterbits, lomask, --i;
in, encodebits * i, &type); lo = decode_compactedbits(lclusterbits, lomask,
if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) in, encodebits * i, &type);
i -= lo; if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD)
i -= lo;
if (i >= 0) if (i >= 0)
++nblk;
}
} else {
nblk = 0;
while (i > 0) {
--i;
lo = decode_compactedbits(lclusterbits, lomask,
in, encodebits * i, &type);
if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) {
if (lo & Z_EROFS_VLE_DI_D0_CBLKCNT) {
--i;
nblk += lo & ~Z_EROFS_VLE_DI_D0_CBLKCNT;
continue;
}
/* bigpcluster shouldn't have plain d0 == 1 */
if (lo <= 1) {
DBG_BUGON(1);
return -EFSCORRUPTED;
}
i -= lo - 2;
continue;
}
++nblk; ++nblk;
}
} }
in += (vcnt << amortizedshift) - sizeof(__le32); in += (vcnt << amortizedshift) - sizeof(__le32);
m->pblk = le32_to_cpu(*(__le32 *)in) + nblk; m->pblk = le32_to_cpu(*(__le32 *)in) + nblk;
@ -381,6 +430,75 @@ static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m,
return 0; return 0;
} }
static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m,
unsigned int initial_lcn)
{
struct erofs_inode *const vi = EROFS_I(m->inode);
struct erofs_map_blocks *const map = m->map;
const unsigned int lclusterbits = vi->z_logical_clusterbits;
unsigned long lcn;
int err;
DBG_BUGON(m->type != Z_EROFS_VLE_CLUSTER_TYPE_PLAIN &&
m->type != Z_EROFS_VLE_CLUSTER_TYPE_HEAD);
if (!(map->m_flags & EROFS_MAP_ZIPPED) ||
!(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1)) {
map->m_plen = 1 << lclusterbits;
return 0;
}
lcn = m->lcn + 1;
if (m->compressedlcs)
goto out;
err = z_erofs_load_cluster_from_disk(m, lcn);
if (err)
return err;
/*
* If the 1st NONHEAD lcluster has already been handled initially w/o
* valid compressedlcs, which means at least it mustn't be CBLKCNT, or
* an internal implemenatation error is detected.
*
* The following code can also handle it properly anyway, but let's
* BUG_ON in the debugging mode only for developers to notice that.
*/
DBG_BUGON(lcn == initial_lcn &&
m->type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD);
switch (m->type) {
case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
/*
* if the 1st NONHEAD lcluster is actually PLAIN or HEAD type
* rather than CBLKCNT, it's a 1 lcluster-sized pcluster.
*/
m->compressedlcs = 1;
break;
case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
if (m->delta[0] != 1)
goto err_bonus_cblkcnt;
if (m->compressedlcs)
break;
fallthrough;
default:
erofs_err(m->inode->i_sb,
"cannot found CBLKCNT @ lcn %lu of nid %llu",
lcn, vi->nid);
DBG_BUGON(1);
return -EFSCORRUPTED;
}
out:
map->m_plen = m->compressedlcs << lclusterbits;
return 0;
err_bonus_cblkcnt:
erofs_err(m->inode->i_sb,
"bogus CBLKCNT @ lcn %lu of nid %llu",
lcn, vi->nid);
DBG_BUGON(1);
return -EFSCORRUPTED;
}
int z_erofs_map_blocks_iter(struct inode *inode, int z_erofs_map_blocks_iter(struct inode *inode,
struct erofs_map_blocks *map, struct erofs_map_blocks *map,
int flags) int flags)
@ -392,6 +510,7 @@ int z_erofs_map_blocks_iter(struct inode *inode,
}; };
int err = 0; int err = 0;
unsigned int lclusterbits, endoff; unsigned int lclusterbits, endoff;
unsigned long initial_lcn;
unsigned long long ofs, end; unsigned long long ofs, end;
trace_z_erofs_map_blocks_iter_enter(inode, map, flags); trace_z_erofs_map_blocks_iter_enter(inode, map, flags);
@ -410,10 +529,10 @@ int z_erofs_map_blocks_iter(struct inode *inode,
lclusterbits = vi->z_logical_clusterbits; lclusterbits = vi->z_logical_clusterbits;
ofs = map->m_la; ofs = map->m_la;
m.lcn = ofs >> lclusterbits; initial_lcn = ofs >> lclusterbits;
endoff = ofs & ((1 << lclusterbits) - 1); endoff = ofs & ((1 << lclusterbits) - 1);
err = z_erofs_load_cluster_from_disk(&m, m.lcn); err = z_erofs_load_cluster_from_disk(&m, initial_lcn);
if (err) if (err)
goto unmap_out; goto unmap_out;
@ -443,7 +562,7 @@ int z_erofs_map_blocks_iter(struct inode *inode,
m.delta[0] = 1; m.delta[0] = 1;
fallthrough; fallthrough;
case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
/* get the correspoinding first chunk */ /* get the corresponding first chunk */
err = z_erofs_extent_lookback(&m, m.delta[0]); err = z_erofs_extent_lookback(&m, m.delta[0]);
if (err) if (err)
goto unmap_out; goto unmap_out;
@ -457,10 +576,12 @@ int z_erofs_map_blocks_iter(struct inode *inode,
} }
map->m_llen = end - map->m_la; map->m_llen = end - map->m_la;
map->m_plen = 1 << lclusterbits;
map->m_pa = blknr_to_addr(m.pblk); map->m_pa = blknr_to_addr(m.pblk);
map->m_flags |= EROFS_MAP_MAPPED; map->m_flags |= EROFS_MAP_MAPPED;
err = z_erofs_get_extent_compressedlen(&m, initial_lcn);
if (err)
goto out;
unmap_out: unmap_out:
if (m.kaddr) if (m.kaddr)
kunmap_atomic(m.kaddr); kunmap_atomic(m.kaddr);

View File

@ -527,7 +527,7 @@ struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
inode->i_op = &f2fs_dir_inode_operations; inode->i_op = &f2fs_dir_inode_operations;
inode->i_fop = &f2fs_dir_operations; inode->i_fop = &f2fs_dir_operations;
inode->i_mapping->a_ops = &f2fs_dblock_aops; inode->i_mapping->a_ops = &f2fs_dblock_aops;
inode_nohighmem(inode); mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
} else if (S_ISLNK(inode->i_mode)) { } else if (S_ISLNK(inode->i_mode)) {
if (file_is_encrypt(inode)) if (file_is_encrypt(inode))
inode->i_op = &f2fs_encrypted_symlink_inode_operations; inode->i_op = &f2fs_encrypted_symlink_inode_operations;

View File

@ -747,7 +747,7 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
inode->i_op = &f2fs_dir_inode_operations; inode->i_op = &f2fs_dir_inode_operations;
inode->i_fop = &f2fs_dir_operations; inode->i_fop = &f2fs_dir_operations;
inode->i_mapping->a_ops = &f2fs_dblock_aops; inode->i_mapping->a_ops = &f2fs_dblock_aops;
inode_nohighmem(inode); mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
set_inode_flag(inode, FI_INC_LINK); set_inode_flag(inode, FI_INC_LINK);
f2fs_lock_op(sbi); f2fs_lock_op(sbi);

View File

@ -405,7 +405,7 @@ static void fuse_dentry_canonical_path(const struct path *path,
char *path_name; char *path_name;
int err; int err;
path_name = (char *)__get_free_page(GFP_KERNEL); path_name = (char *)get_zeroed_page(GFP_KERNEL);
if (!path_name) if (!path_name)
goto default_path; goto default_path;

View File

@ -14,11 +14,34 @@ struct fuse_aio_req {
struct kiocb *iocb_fuse; struct kiocb *iocb_fuse;
}; };
static void fuse_file_accessed(struct file *dst_file, struct file *src_file)
{
struct inode *dst_inode;
struct inode *src_inode;
if (dst_file->f_flags & O_NOATIME)
return;
dst_inode = file_inode(dst_file);
src_inode = file_inode(src_file);
if ((!timespec64_equal(&dst_inode->i_mtime, &src_inode->i_mtime) ||
!timespec64_equal(&dst_inode->i_ctime, &src_inode->i_ctime))) {
dst_inode->i_mtime = src_inode->i_mtime;
dst_inode->i_ctime = src_inode->i_ctime;
}
touch_atime(&dst_file->f_path);
}
static void fuse_copyattr(struct file *dst_file, struct file *src_file) static void fuse_copyattr(struct file *dst_file, struct file *src_file)
{ {
struct inode *dst = file_inode(dst_file); struct inode *dst = file_inode(dst_file);
struct inode *src = file_inode(src_file); struct inode *src = file_inode(src_file);
dst->i_atime = src->i_atime;
dst->i_mtime = src->i_mtime;
dst->i_ctime = src->i_ctime;
i_size_write(dst, i_size_read(src)); i_size_write(dst, i_size_read(src));
} }
@ -84,6 +107,8 @@ ssize_t fuse_passthrough_read_iter(struct kiocb *iocb_fuse,
out: out:
revert_creds(old_cred); revert_creds(old_cred);
fuse_file_accessed(fuse_filp, passthrough_filp);
return ret; return ret;
} }
@ -103,6 +128,8 @@ ssize_t fuse_passthrough_write_iter(struct kiocb *iocb_fuse,
inode_lock(fuse_inode); inode_lock(fuse_inode);
fuse_copyattr(fuse_filp, passthrough_filp);
old_cred = override_creds(ff->passthrough.cred); old_cred = override_creds(ff->passthrough.cred);
if (is_sync_kiocb(iocb_fuse)) { if (is_sync_kiocb(iocb_fuse)) {
file_start_write(passthrough_filp); file_start_write(passthrough_filp);
@ -143,9 +170,7 @@ ssize_t fuse_passthrough_mmap(struct file *file, struct vm_area_struct *vma)
int ret; int ret;
const struct cred *old_cred; const struct cred *old_cred;
struct fuse_file *ff = file->private_data; struct fuse_file *ff = file->private_data;
struct inode *fuse_inode = file_inode(file);
struct file *passthrough_filp = ff->passthrough.filp; struct file *passthrough_filp = ff->passthrough.filp;
struct inode *passthrough_inode = file_inode(passthrough_filp);
if (!passthrough_filp->f_op->mmap) if (!passthrough_filp->f_op->mmap)
return -ENODEV; return -ENODEV;
@ -164,17 +189,7 @@ ssize_t fuse_passthrough_mmap(struct file *file, struct vm_area_struct *vma)
else else
fput(file); fput(file);
if (file->f_flags & O_NOATIME) fuse_file_accessed(file, passthrough_filp);
return ret;
if ((!timespec64_equal(&fuse_inode->i_mtime,
&passthrough_inode->i_mtime) ||
!timespec64_equal(&fuse_inode->i_ctime,
&passthrough_inode->i_ctime))) {
fuse_inode->i_mtime = passthrough_inode->i_mtime;
fuse_inode->i_ctime = passthrough_inode->i_ctime;
}
touch_atime(&file->f_path);
return ret; return ret;
} }

View File

@ -194,7 +194,7 @@ void __breadahead_gfp(struct block_device *, sector_t block, unsigned int size,
struct buffer_head *__bread_gfp(struct block_device *, struct buffer_head *__bread_gfp(struct block_device *,
sector_t block, unsigned size, gfp_t gfp); sector_t block, unsigned size, gfp_t gfp);
void invalidate_bh_lrus(void); void invalidate_bh_lrus(void);
void invalidate_bh_lrus_cpu(int cpu); void invalidate_bh_lrus_cpu(void);
bool has_bh_in_lru(int cpu, void *dummy); bool has_bh_in_lru(int cpu, void *dummy);
struct buffer_head *alloc_buffer_head(gfp_t gfp_flags); struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
void free_buffer_head(struct buffer_head * bh); void free_buffer_head(struct buffer_head * bh);
@ -408,7 +408,7 @@ static inline int inode_has_buffers(struct inode *inode) { return 0; }
static inline void invalidate_inode_buffers(struct inode *inode) {} static inline void invalidate_inode_buffers(struct inode *inode) {}
static inline int remove_inode_buffers(struct inode *inode) { return 1; } static inline int remove_inode_buffers(struct inode *inode) { return 1; }
static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; } static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
static inline void invalidate_bh_lrus_cpu(int cpu) {} static inline void invalidate_bh_lrus_cpu(void) {}
static inline bool has_bh_in_lru(int cpu, void *dummy) { return 0; } static inline bool has_bh_in_lru(int cpu, void *dummy) { return 0; }
#define buffer_heads_over_limit 0 #define buffer_heads_over_limit 0

View File

@ -623,6 +623,7 @@ int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
unsigned long); unsigned long);
void *dma_buf_vmap(struct dma_buf *); void *dma_buf_vmap(struct dma_buf *);
void dma_buf_vunmap(struct dma_buf *, void *vaddr); void dma_buf_vunmap(struct dma_buf *, void *vaddr);
long dma_buf_set_name(struct dma_buf *dmabuf, const char *name);
int dma_buf_get_flags(struct dma_buf *dmabuf, unsigned long *flags); int dma_buf_get_flags(struct dma_buf *dmabuf, unsigned long *flags);
int dma_buf_get_uuid(struct dma_buf *dmabuf, uuid_t *uuid); int dma_buf_get_uuid(struct dma_buf *dmabuf, uuid_t *uuid);

View File

@ -0,0 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_KASAN_TAGS_H
#define _LINUX_KASAN_TAGS_H
#define KASAN_TAG_KERNEL 0xFF /* native kernel pointers tag */
#define KASAN_TAG_INVALID 0xFE /* inaccessible memory tag */
#define KASAN_TAG_MAX 0xFD /* maximum value for random tags */
#ifdef CONFIG_KASAN_HW_TAGS
#define KASAN_TAG_MIN 0xF0 /* minimum value for random tags */
#else
#define KASAN_TAG_MIN 0x00 /* minimum value for random tags */
#endif
#endif /* LINUX_KASAN_TAGS_H */

View File

@ -89,7 +89,7 @@ static __always_inline bool kasan_enabled(void)
return static_branch_likely(&kasan_flag_enabled); return static_branch_likely(&kasan_flag_enabled);
} }
static inline bool kasan_has_integrated_init(void) static inline bool kasan_hw_tags_enabled(void)
{ {
return kasan_enabled(); return kasan_enabled();
} }
@ -104,7 +104,7 @@ static inline bool kasan_enabled(void)
return IS_ENABLED(CONFIG_KASAN); return IS_ENABLED(CONFIG_KASAN);
} }
static inline bool kasan_has_integrated_init(void) static inline bool kasan_hw_tags_enabled(void)
{ {
return false; return false;
} }
@ -125,6 +125,11 @@ static __always_inline void kasan_free_pages(struct page *page,
#endif /* CONFIG_KASAN_HW_TAGS */ #endif /* CONFIG_KASAN_HW_TAGS */
static inline bool kasan_has_integrated_init(void)
{
return kasan_hw_tags_enabled();
}
#ifdef CONFIG_KASAN #ifdef CONFIG_KASAN
struct kasan_cache { struct kasan_cache {

View File

@ -245,7 +245,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
extern int __traceiter_##name(data_proto); \ extern int __traceiter_##name(data_proto); \
DECLARE_STATIC_CALL(tp_func_##name, __traceiter_##name); \ DECLARE_STATIC_CALL(tp_func_##name, __traceiter_##name); \
extern struct tracepoint __tracepoint_##name; \ extern struct tracepoint __tracepoint_##name; \
static inline void trace_##name(proto) \ static inline void __nocfi trace_##name(proto) \
{ \ { \
if (static_key_false(&__tracepoint_##name.key)) \ if (static_key_false(&__tracepoint_##name.key)) \
__DO_TRACE(name, \ __DO_TRACE(name, \
@ -310,7 +310,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
.unregfunc = _unreg, \ .unregfunc = _unreg, \
.funcs = NULL }; \ .funcs = NULL }; \
__TRACEPOINT_ENTRY(_name); \ __TRACEPOINT_ENTRY(_name); \
int __traceiter_##_name(void *__data, proto) \ int __nocfi __traceiter_##_name(void *__data, proto) \
{ \ { \
struct tracepoint_func *it_func_ptr; \ struct tracepoint_func *it_func_ptr; \
void *it_func; \ void *it_func; \

View File

@ -1105,6 +1105,12 @@ struct snd_soc_card {
ANDROID_KABI_RESERVE(3); ANDROID_KABI_RESERVE(3);
ANDROID_KABI_RESERVE(4); ANDROID_KABI_RESERVE(4);
}; };
struct snd_soc_card_ext {
struct snd_soc_card card;
unsigned int component_chaining:1;
};
#define for_each_card_prelinks(card, i, link) \ #define for_each_card_prelinks(card, i, link) \
for ((i) = 0; \ for ((i) = 0; \
((i) < (card)->num_links) && ((link) = &(card)->dai_link[i]); \ ((i) < (card)->num_links) && ((link) = &(card)->dai_link[i]); \

View File

@ -19,6 +19,10 @@ DECLARE_HOOK(android_vh_ufs_fill_prdt,
unsigned int segments, int *err), unsigned int segments, int *err),
TP_ARGS(hba, lrbp, segments, err)); TP_ARGS(hba, lrbp, segments, err));
DECLARE_RESTRICTED_HOOK(android_rvh_ufs_complete_init,
TP_PROTO(struct ufs_hba *hba),
TP_ARGS(hba), 1);
DECLARE_RESTRICTED_HOOK(android_rvh_ufs_reprogram_all_keys, DECLARE_RESTRICTED_HOOK(android_rvh_ufs_reprogram_all_keys,
TP_PROTO(struct ufs_hba *hba, int *err), TP_PROTO(struct ufs_hba *hba, int *err),
TP_ARGS(hba, err), 1); TP_ARGS(hba, err), 1);

View File

@ -273,7 +273,14 @@ struct binder_freeze_info {
struct binder_frozen_status_info { struct binder_frozen_status_info {
__u32 pid; __u32 pid;
/* process received sync transactions since last frozen
* bit 0: received sync transaction after being frozen
* bit 1: new pending sync transaction during freezing
*/
__u32 sync_recv; __u32 sync_recv;
/* process received async transactions since last frozen */
__u32 async_recv; __u32 async_recv;
}; };

View File

@ -78,6 +78,7 @@ void scs_free(void *s)
if (this_cpu_cmpxchg(scs_cache[i], 0, s) == NULL) if (this_cpu_cmpxchg(scs_cache[i], 0, s) == NULL)
return; return;
kasan_unpoison_vmalloc(s, SCS_SIZE);
vfree_atomic(s); vfree_atomic(s);
} }

View File

@ -2996,6 +2996,8 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
addr = vma->vm_start + ((start_pgoff - vma->vm_pgoff) << PAGE_SHIFT); addr = vma->vm_start + ((start_pgoff - vma->vm_pgoff) << PAGE_SHIFT);
if (!pte_map_lock_addr(vmf, addr)) { if (!pte_map_lock_addr(vmf, addr)) {
unlock_page(head);
put_page(head);
ret = VM_FAULT_RETRY; ret = VM_FAULT_RETRY;
goto out; goto out;
} }

View File

@ -142,8 +142,6 @@ void kasan_init_hw_tags_cpu(void)
if (kasan_arg == KASAN_ARG_OFF) if (kasan_arg == KASAN_ARG_OFF)
return; return;
hw_init_tags(KASAN_TAG_MAX);
/* /*
* Enable async mode only when explicitly requested through * Enable async mode only when explicitly requested through
* the command line. * the command line.

View File

@ -3,6 +3,7 @@
#define __MM_KASAN_KASAN_H #define __MM_KASAN_KASAN_H
#include <linux/kasan.h> #include <linux/kasan.h>
#include <linux/kasan-tags.h>
#include <linux/kfence.h> #include <linux/kfence.h>
#include <linux/stackdepot.h> #include <linux/stackdepot.h>
@ -51,16 +52,6 @@ extern bool kasan_flag_async __ro_after_init;
#define KASAN_MEMORY_PER_SHADOW_PAGE (KASAN_GRANULE_SIZE << PAGE_SHIFT) #define KASAN_MEMORY_PER_SHADOW_PAGE (KASAN_GRANULE_SIZE << PAGE_SHIFT)
#define KASAN_TAG_KERNEL 0xFF /* native kernel pointers tag */
#define KASAN_TAG_INVALID 0xFE /* inaccessible memory tag */
#define KASAN_TAG_MAX 0xFD /* maximum value for random tags */
#ifdef CONFIG_KASAN_HW_TAGS
#define KASAN_TAG_MIN 0xF0 /* mimimum value for random tags */
#else
#define KASAN_TAG_MIN 0x00 /* mimimum value for random tags */
#endif
#ifdef CONFIG_KASAN_GENERIC #ifdef CONFIG_KASAN_GENERIC
#define KASAN_FREE_PAGE 0xFF /* page was freed */ #define KASAN_FREE_PAGE 0xFF /* page was freed */
#define KASAN_PAGE_REDZONE 0xFE /* redzone for kmalloc_large allocations */ #define KASAN_PAGE_REDZONE 0xFE /* redzone for kmalloc_large allocations */
@ -299,9 +290,6 @@ static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)
#ifndef arch_enable_tagging_async #ifndef arch_enable_tagging_async
#define arch_enable_tagging_async() #define arch_enable_tagging_async()
#endif #endif
#ifndef arch_init_tags
#define arch_init_tags(max_tag)
#endif
#ifndef arch_set_tagging_report_once #ifndef arch_set_tagging_report_once
#define arch_set_tagging_report_once(state) #define arch_set_tagging_report_once(state)
#endif #endif
@ -320,7 +308,6 @@ static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)
#define hw_enable_tagging_sync() arch_enable_tagging_sync() #define hw_enable_tagging_sync() arch_enable_tagging_sync()
#define hw_enable_tagging_async() arch_enable_tagging_async() #define hw_enable_tagging_async() arch_enable_tagging_async()
#define hw_init_tags(max_tag) arch_init_tags(max_tag)
#define hw_set_tagging_report_once(state) arch_set_tagging_report_once(state) #define hw_set_tagging_report_once(state) arch_set_tagging_report_once(state)
#define hw_force_async_tag_fault() arch_force_async_tag_fault() #define hw_force_async_tag_fault() arch_force_async_tag_fault()
#define hw_get_random_tag() arch_get_random_tag() #define hw_get_random_tag() arch_get_random_tag()

View File

@ -59,7 +59,7 @@ static bool page_pinner_enabled;
DEFINE_STATIC_KEY_FALSE(page_pinner_inited); DEFINE_STATIC_KEY_FALSE(page_pinner_inited);
DEFINE_STATIC_KEY_TRUE(failure_tracking); DEFINE_STATIC_KEY_TRUE(failure_tracking);
EXPORT_SYMBOL_GPL(failure_tracking); EXPORT_SYMBOL(failure_tracking);
static depot_stack_handle_t failure_handle; static depot_stack_handle_t failure_handle;
@ -350,7 +350,7 @@ void __page_pinner_migration_failed(struct page *page)
acf_pinner.pinner[idx] = record; acf_pinner.pinner[idx] = record;
spin_unlock_irqrestore(&acf_pinner.lock, flags); spin_unlock_irqrestore(&acf_pinner.lock, flags);
} }
EXPORT_SYMBOL_GPL(__page_pinner_migration_failed); EXPORT_SYMBOL(__page_pinner_migration_failed);
void __page_pinner_mark_migration_failed_pages(struct list_head *page_list) void __page_pinner_mark_migration_failed_pages(struct list_head *page_list)
{ {

View File

@ -313,6 +313,16 @@ kmem_cache_create_usercopy(const char *name,
get_online_cpus(); get_online_cpus();
get_online_mems(); get_online_mems();
#ifdef CONFIG_SLUB_DEBUG
/*
* If no slub_debug was enabled globally, the static key is not yet
* enabled by setup_slub_debug(). Enable it if the cache is being
* created with any of the debugging flags passed explicitly.
*/
if (flags & SLAB_DEBUG_FLAGS)
static_branch_enable(&slub_debug_enabled);
#endif
mutex_lock(&slab_mutex); mutex_lock(&slab_mutex);
err = kmem_cache_sanity_check(name, size); err = kmem_cache_sanity_check(name, size);

View File

@ -691,7 +691,6 @@ void lru_add_drain_cpu(int cpu)
pagevec_lru_move_fn(pvec, lru_lazyfree_movetail_fn, NULL); pagevec_lru_move_fn(pvec, lru_lazyfree_movetail_fn, NULL);
activate_page_drain(cpu); activate_page_drain(cpu);
invalidate_bh_lrus_cpu(cpu);
} }
/** /**
@ -797,6 +796,20 @@ void lru_add_drain(void)
local_unlock(&lru_pvecs.lock); local_unlock(&lru_pvecs.lock);
} }
/*
* It's called from per-cpu workqueue context in SMP case so
* lru_add_drain_cpu and invalidate_bh_lrus_cpu should run on
* the same cpu. It shouldn't be a problem in !SMP case since
* the core is only one and the locks will disable preemption.
*/
static void lru_add_and_bh_lrus_drain(void)
{
local_lock(&lru_pvecs.lock);
lru_add_drain_cpu(smp_processor_id());
local_unlock(&lru_pvecs.lock);
invalidate_bh_lrus_cpu();
}
void lru_add_drain_cpu_zone(struct zone *zone) void lru_add_drain_cpu_zone(struct zone *zone)
{ {
local_lock(&lru_pvecs.lock); local_lock(&lru_pvecs.lock);
@ -811,7 +824,7 @@ static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
static void lru_add_drain_per_cpu(struct work_struct *dummy) static void lru_add_drain_per_cpu(struct work_struct *dummy)
{ {
lru_add_drain(); lru_add_and_bh_lrus_drain();
} }
/* /*
@ -969,7 +982,7 @@ void lru_cache_disable(void)
*/ */
__lru_add_drain_all(true); __lru_add_drain_all(true);
#else #else
lru_add_drain(); lru_add_and_bh_lrus_drain();
#endif #endif
atomic_inc(&lru_disable_count); atomic_inc(&lru_disable_count);
} }

View File

@ -2175,9 +2175,17 @@ EXPORT_SYMBOL_GPL(snd_soc_add_dai_controls);
*/ */
int snd_soc_register_card(struct snd_soc_card *card) int snd_soc_register_card(struct snd_soc_card *card)
{ {
struct snd_soc_card_ext *card_ext;
if (!card->name || !card->dev) if (!card->name || !card->dev)
return -EINVAL; return -EINVAL;
card_ext = devm_kzalloc(card->dev,
sizeof(struct snd_soc_card_ext), GFP_KERNEL);
memcpy(&card_ext->card, card, sizeof(struct snd_soc_card));
card = &card_ext->card;
dev_set_drvdata(card->dev, card); dev_set_drvdata(card->dev, card);
INIT_LIST_HEAD(&card->widgets); INIT_LIST_HEAD(&card->widgets);

View File

@ -1274,6 +1274,7 @@ int dpcm_path_get(struct snd_soc_pcm_runtime *fe,
int stream, struct snd_soc_dapm_widget_list **list) int stream, struct snd_soc_dapm_widget_list **list)
{ {
struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(fe, 0); struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(fe, 0);
struct snd_soc_card_ext *card_ext;
int paths; int paths;
if (fe->num_cpus > 1) { if (fe->num_cpus > 1) {
@ -1282,9 +1283,12 @@ int dpcm_path_get(struct snd_soc_pcm_runtime *fe,
return -EINVAL; return -EINVAL;
} }
card_ext = container_of(fe->card, struct snd_soc_card_ext, card);
/* get number of valid DAI paths and their widgets */ /* get number of valid DAI paths and their widgets */
paths = snd_soc_dapm_dai_get_connected_widgets(cpu_dai, stream, list, paths = snd_soc_dapm_dai_get_connected_widgets(cpu_dai, stream, list,
dpcm_end_walk_at_be); card_ext->component_chaining ?
NULL : dpcm_end_walk_at_be);
dev_dbg(fe->dev, "ASoC: found %d audio %s paths\n", paths, dev_dbg(fe->dev, "ASoC: found %d audio %s paths\n", paths,
stream ? "capture" : "playback"); stream ? "capture" : "playback");