Merge branch 'android14-6.1' into branch 'android14-6.1-lts'
We need the updates in the normal branch due to the symbol changes happening. This consists of the following changes: *92432f07d6
ANDROID: GKI: Update the ABI symbol list *b86713e341
UPSTREAM: virtio: Add support for no-reset virtio PCI PM *7dc3b2e49e
UPSTREAM: netfilter: nf_tables: check if catch-all set element is active in next generation *8d4d76a2bd
UPSTREAM: net: tls, update curr on splice as well *9e2b775333
ANDROID: Update the ABI symbol list *5a1f8b6b46
ANDROID: Add pci_read_config_word to virtual device symbol list *e356cae969
ANDROID: Export kthread_set_per_cpu *81c922621d
ANDROID: GKI: fix ABI breakage in struct ipv6_devconf *20131b787f
Reapply "net: release reference to inet6_dev pointer" *c2c0273029
Reapply "net: change accept_ra_min_rtr_lft to affect all RA lifetimes" *a8053aadca
Reapply "net: add sysctl accept_ra_min_rtr_lft" *fc6c1b3acb
ANDROID: GKI: explicit include of stringify.h *8481b97df5
BACKPORT: erofs: fix infinite loop due to a race of filling compressed_bvecs *886c9d1fc2
ANDROID: arm64: virt: Invalidate tlb once the balloon before reporting/inflating *4aedc102c3
ANDROID: arm64: virt: Make the page_relinquish call generic *d3f73f0452
UPSTREAM: drm/msm/dsi: Use pm_runtime_resume_and_get to prevent refcnt leaks *df2fe1add7
UPSTREAM: drm/msm/dsi: Enable runtime PM *6053d8ffd4
UPSTREAM: arm64: scs: Disable LTO for SCS patching code *696293ef72
ANDROID: GKI: Update symbol list for mtk *6338e41509
BACKPORT: f2fs: Restrict max filesize for 16K f2fs *8c8bcbdf0b
BACKPORT: f2fs: Support Block Size == Page Size *eb20497d6d
ANDROID: abi_gki_aarch64_qcom: Update symbol list *1e6c1ca9a2
UPSTREAM: usb: typec: tcpm: fix the PD disabled case *ba97ad7b91
UPSTREAM: netfilter: nf_tables: reject QUEUE/DROP verdict parameters *76f0396b57
ANDROID: ABI: Update oplus symbol list *016b640235
ANDROID: fuse: Fix the issue of fuse_dentry_canonical_path *5eb1cbb384
ANDROID: Update the ABI symbol list *b53c3cb351
ANDROID: Export cpufreq_driver_test_flags() *9b476ebe99
ANDROID: Build null_blk and scsi_debug as kernel modules *01472f3af1
ANDROID: GKI: Update the pixel symbol list *fbcd29ac45
ANDROID: usb: dwc3: export tracepoint for dwc3 read/write *0123832f68
ANDROID: scsi: ufs: add vendor hook to override key reprogramming *7ce117301e
ANDROID: vendor_hooks: Add hook for binder_detect_low_async_space_locked *7b6a6228df
ANDROID: Update the ABI symbol list *db94de2a17
ANDROID: Update the ABI representation *e08371bcf5
UPSTREAM: usb: typec: tcpm: Support multiple capabilities *4025820c61
UPSTREAM: usb: typec: tcpm: Parse Accessory Mode information Change-Id: Ibe422d476544081f14095e025b2a77ab70c577df Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
6e04c1ccf7
@ -2148,6 +2148,14 @@ accept_ra_min_hop_limit - INTEGER
|
||||
|
||||
Default: 1
|
||||
|
||||
accept_ra_min_lft - INTEGER
|
||||
Minimum acceptable lifetime value in Router Advertisement.
|
||||
|
||||
RA sections with a lifetime less than this value shall be
|
||||
ignored. Zero lifetimes stay unaffected.
|
||||
|
||||
Default: 0
|
||||
|
||||
accept_ra_pinfo - BOOLEAN
|
||||
Learn Prefix Information in Router Advertisement.
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -3048,6 +3048,7 @@
|
||||
usb_autopm_get_interface_no_resume
|
||||
usb_autopm_put_interface
|
||||
usb_autopm_put_interface_async
|
||||
usb_check_bulk_endpoints
|
||||
usb_clear_halt
|
||||
usb_composite_probe
|
||||
usb_composite_unregister
|
||||
|
@ -142,6 +142,7 @@
|
||||
__traceiter_android_vh_binder_thread_read
|
||||
__traceiter_android_vh_binder_thread_release
|
||||
__traceiter_android_vh_binder_wait_for_work
|
||||
__traceiter_android_vh_binder_detect_low_async_space_locked
|
||||
__traceiter_android_vh_cgroup_set_task
|
||||
__traceiter_android_vh_check_folio_look_around_ref
|
||||
__traceiter_android_vh_dup_task_struct
|
||||
@ -233,6 +234,7 @@
|
||||
__tracepoint_android_vh_binder_thread_read
|
||||
__tracepoint_android_vh_binder_thread_release
|
||||
__tracepoint_android_vh_binder_wait_for_work
|
||||
__tracepoint_android_vh_binder_detect_low_async_space_locked
|
||||
__tracepoint_android_vh_cgroup_set_task
|
||||
__tracepoint_android_vh_check_folio_look_around_ref
|
||||
__tracepoint_android_vh_do_futex
|
||||
|
@ -238,6 +238,7 @@
|
||||
cpufreq_driver_resolve_freq
|
||||
__cpufreq_driver_target
|
||||
cpufreq_driver_target
|
||||
cpufreq_driver_test_flags
|
||||
cpufreq_enable_fast_switch
|
||||
cpufreq_freq_transition_begin
|
||||
cpufreq_freq_transition_end
|
||||
@ -369,6 +370,7 @@
|
||||
devfreq_recommended_opp
|
||||
devfreq_register_opp_notifier
|
||||
devfreq_remove_device
|
||||
devfreq_remove_governor
|
||||
devfreq_unregister_opp_notifier
|
||||
devfreq_update_interval
|
||||
dev_fwnode
|
||||
@ -400,6 +402,7 @@
|
||||
device_property_present
|
||||
device_property_read_string
|
||||
device_property_read_u32_array
|
||||
device_property_read_u8_array
|
||||
device_register
|
||||
device_remove_file
|
||||
device_remove_groups
|
||||
@ -614,6 +617,7 @@
|
||||
down_read_trylock
|
||||
down_trylock
|
||||
down_write
|
||||
d_path
|
||||
dput
|
||||
drain_workqueue
|
||||
driver_register
|
||||
@ -825,6 +829,7 @@
|
||||
drm_self_refresh_helper_alter_state
|
||||
drm_send_event
|
||||
drm_send_event_locked
|
||||
drm_sysfs_connector_status_event
|
||||
drm_universal_plane_init
|
||||
drm_vblank_init
|
||||
drm_writeback_connector_init
|
||||
@ -883,6 +888,7 @@
|
||||
find_pid_ns
|
||||
find_task_by_vpid
|
||||
find_vma_intersection
|
||||
find_vpid
|
||||
finish_wait
|
||||
firmware_request_nowarn
|
||||
flush_dcache_page
|
||||
@ -972,6 +978,7 @@
|
||||
get_sg_io_hdr
|
||||
__get_task_comm
|
||||
get_task_cred
|
||||
get_task_mm
|
||||
get_thermal_instance
|
||||
get_unused_fd_flags
|
||||
get_user_pages
|
||||
@ -1283,6 +1290,7 @@
|
||||
kthread_park
|
||||
kthread_parkme
|
||||
kthread_queue_work
|
||||
kthread_set_per_cpu
|
||||
kthread_should_park
|
||||
kthread_should_stop
|
||||
kthread_stop
|
||||
@ -1323,6 +1331,7 @@
|
||||
loops_per_jiffy
|
||||
mac_pton
|
||||
mas_empty_area_rev
|
||||
mas_find
|
||||
max_load_balance_interval
|
||||
mbox_chan_received_data
|
||||
mbox_controller_register
|
||||
@ -1377,6 +1386,7 @@
|
||||
__mmap_lock_do_trace_released
|
||||
__mmap_lock_do_trace_start_locking
|
||||
__mmdrop
|
||||
mmput
|
||||
mod_delayed_work_on
|
||||
mod_node_page_state
|
||||
mod_timer
|
||||
@ -1859,6 +1869,7 @@
|
||||
remove_proc_entry
|
||||
remove_wait_queue
|
||||
request_firmware
|
||||
request_firmware_direct
|
||||
request_firmware_nowait
|
||||
__request_percpu_irq
|
||||
__request_region
|
||||
@ -2303,6 +2314,7 @@
|
||||
__traceiter_android_rvh_remove_entity_load_avg
|
||||
__traceiter_android_rvh_rtmutex_prepare_setprio
|
||||
__traceiter_android_rvh_sched_newidle_balance
|
||||
__traceiter_android_rvh_sched_setaffinity
|
||||
__traceiter_android_rvh_select_task_rq_fair
|
||||
__traceiter_android_rvh_select_task_rq_rt
|
||||
__traceiter_android_rvh_set_cpus_allowed_by_task
|
||||
@ -2313,6 +2325,7 @@
|
||||
__traceiter_android_rvh_set_user_nice_locked
|
||||
__traceiter_android_rvh_typec_tcpci_get_vbus
|
||||
__traceiter_android_rvh_uclamp_eff_get
|
||||
__traceiter_android_rvh_ufs_reprogram_all_keys
|
||||
__traceiter_android_rvh_update_blocked_fair
|
||||
__traceiter_android_rvh_update_load_avg
|
||||
__traceiter_android_rvh_update_load_sum
|
||||
@ -2364,6 +2377,7 @@
|
||||
__traceiter_cpu_frequency
|
||||
__traceiter_device_pm_callback_end
|
||||
__traceiter_device_pm_callback_start
|
||||
__traceiter_dwc3_readl
|
||||
__traceiter_gpu_mem_total
|
||||
__traceiter_hrtimer_expire_entry
|
||||
__traceiter_hrtimer_expire_exit
|
||||
@ -2415,6 +2429,7 @@
|
||||
__tracepoint_android_rvh_remove_entity_load_avg
|
||||
__tracepoint_android_rvh_rtmutex_prepare_setprio
|
||||
__tracepoint_android_rvh_sched_newidle_balance
|
||||
__tracepoint_android_rvh_sched_setaffinity
|
||||
__tracepoint_android_rvh_select_task_rq_fair
|
||||
__tracepoint_android_rvh_select_task_rq_rt
|
||||
__tracepoint_android_rvh_set_cpus_allowed_by_task
|
||||
@ -2425,6 +2440,7 @@
|
||||
__tracepoint_android_rvh_set_user_nice_locked
|
||||
__tracepoint_android_rvh_typec_tcpci_get_vbus
|
||||
__tracepoint_android_rvh_uclamp_eff_get
|
||||
__tracepoint_android_rvh_ufs_reprogram_all_keys
|
||||
__tracepoint_android_rvh_update_blocked_fair
|
||||
__tracepoint_android_rvh_update_load_avg
|
||||
__tracepoint_android_rvh_update_load_sum
|
||||
@ -2476,6 +2492,7 @@
|
||||
__tracepoint_cpu_frequency
|
||||
__tracepoint_device_pm_callback_end
|
||||
__tracepoint_device_pm_callback_start
|
||||
__tracepoint_dwc3_readl
|
||||
__tracepoint_gpu_mem_total
|
||||
__tracepoint_hrtimer_expire_entry
|
||||
__tracepoint_hrtimer_expire_exit
|
||||
|
@ -1207,6 +1207,10 @@
|
||||
genlmsg_put
|
||||
genl_register_family
|
||||
genl_unregister_family
|
||||
genphy_c45_an_config_aneg
|
||||
genphy_c45_check_and_restart_aneg
|
||||
genphy_c45_pma_setup_forced
|
||||
genphy_c45_read_status
|
||||
__genphy_config_aneg
|
||||
genphy_read_abilities
|
||||
genphy_read_mmd_unsupported
|
||||
@ -2370,9 +2374,12 @@
|
||||
phy_mac_interrupt
|
||||
phy_modify
|
||||
phy_modify_mmd
|
||||
phy_modify_mmd_changed
|
||||
phy_power_off
|
||||
phy_power_on
|
||||
phy_read_mmd
|
||||
phy_resolve_aneg_linkmode
|
||||
phy_set_max_speed
|
||||
phy_set_mode_ext
|
||||
phy_suspend
|
||||
phy_trigger_machine
|
||||
|
@ -314,6 +314,7 @@
|
||||
pci_iounmap
|
||||
pci_read_config_byte
|
||||
pci_read_config_dword
|
||||
pci_read_config_word
|
||||
__pci_register_driver
|
||||
pci_release_region
|
||||
pci_request_region
|
||||
@ -1143,6 +1144,7 @@
|
||||
page_relinquish
|
||||
page_reporting_register
|
||||
page_reporting_unregister
|
||||
post_page_relinquish_tlb_inv
|
||||
register_oom_notifier
|
||||
register_shrinker
|
||||
__SetPageMovable
|
||||
|
@ -309,6 +309,7 @@ CONFIG_ARM_SCPI_PROTOCOL=y
|
||||
# CONFIG_ARM_SCPI_POWER_DOMAIN is not set
|
||||
# CONFIG_EFI_ARMSTUB_DTB_LOADER is not set
|
||||
CONFIG_GNSS=y
|
||||
CONFIG_BLK_DEV_NULL_BLK=m
|
||||
CONFIG_ZRAM=m
|
||||
CONFIG_BLK_DEV_LOOP=y
|
||||
CONFIG_BLK_DEV_LOOP_MIN_COUNT=16
|
||||
@ -322,6 +323,7 @@ CONFIG_UID_SYS_STATS=y
|
||||
CONFIG_SCSI=y
|
||||
# CONFIG_SCSI_PROC_FS is not set
|
||||
CONFIG_BLK_DEV_SD=y
|
||||
CONFIG_SCSI_DEBUG=m
|
||||
CONFIG_MD=y
|
||||
CONFIG_BLK_DEV_DM=y
|
||||
CONFIG_DM_CRYPT=y
|
||||
|
@ -2,6 +2,9 @@
|
||||
#ifndef _ASM_ARM64_HYPERVISOR_H
|
||||
#define _ASM_ARM64_HYPERVISOR_H
|
||||
|
||||
#include <linux/memory.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#include <asm/xen/hypervisor.h>
|
||||
|
||||
void kvm_init_hyp_services(void);
|
||||
@ -10,6 +13,15 @@ void kvm_arm_init_hyp_services(void);
|
||||
void kvm_init_memshare_services(void);
|
||||
void kvm_init_ioremap_services(void);
|
||||
|
||||
struct hypervisor_ops {
|
||||
#ifdef CONFIG_MEMORY_RELINQUISH
|
||||
void (*page_relinquish)(struct page *page);
|
||||
void (*post_page_relinquish_tlb_inv)(void);
|
||||
#endif
|
||||
};
|
||||
|
||||
extern struct hypervisor_ops hyp_ops;
|
||||
|
||||
#ifdef CONFIG_MEMORY_RELINQUISH
|
||||
void kvm_init_memrelinquish_services(void);
|
||||
#else
|
||||
|
@ -11,5 +11,6 @@ struct page;
|
||||
|
||||
bool kvm_has_memrelinquish_services(void);
|
||||
void page_relinquish(struct page *page);
|
||||
void post_page_relinquish_tlb_inv(void);
|
||||
|
||||
#endif /* __ASM_MEM_RELINQUISH_H */
|
||||
|
@ -81,7 +81,13 @@ obj-$(CONFIG_ARM64_MTE) += mte.o
|
||||
obj-y += vdso-wrap.o
|
||||
obj-$(CONFIG_COMPAT_VDSO) += vdso32-wrap.o
|
||||
obj-$(CONFIG_UNWIND_PATCH_PAC_INTO_SCS) += patch-scs.o
|
||||
|
||||
# We need to prevent the SCS patching code from patching itself. Using
|
||||
# -mbranch-protection=none here to avoid the patchable PAC opcodes from being
|
||||
# generated triggers an issue with full LTO on Clang, which stops emitting PAC
|
||||
# instructions altogether. So disable LTO as well for the compilation unit.
|
||||
CFLAGS_patch-scs.o += -mbranch-protection=none
|
||||
CFLAGS_REMOVE_patch-scs.o += $(CC_FLAGS_LTO)
|
||||
|
||||
# Force dependency (vdso*-wrap.S includes vdso.so through incbin)
|
||||
$(obj)/vdso-wrap.o: $(obj)/vdso/vdso.so
|
||||
|
@ -58,6 +58,7 @@
|
||||
|
||||
static int num_standard_resources;
|
||||
static struct resource *standard_resources;
|
||||
struct hypervisor_ops hyp_ops;
|
||||
|
||||
phys_addr_t __fdt_pointer __initdata;
|
||||
|
||||
|
@ -14,6 +14,24 @@
|
||||
|
||||
static unsigned long memshare_granule_sz;
|
||||
|
||||
static void kvm_page_relinquish(struct page *page)
|
||||
{
|
||||
phys_addr_t phys, end;
|
||||
u32 func_id = ARM_SMCCC_VENDOR_HYP_KVM_MEM_RELINQUISH_FUNC_ID;
|
||||
|
||||
phys = page_to_phys(page);
|
||||
end = phys + PAGE_SIZE;
|
||||
|
||||
while (phys < end) {
|
||||
struct arm_smccc_res res;
|
||||
|
||||
arm_smccc_1_1_invoke(func_id, phys, 0, 0, &res);
|
||||
BUG_ON(res.a0 != SMCCC_RET_SUCCESS);
|
||||
|
||||
phys += memshare_granule_sz;
|
||||
}
|
||||
}
|
||||
|
||||
void kvm_init_memrelinquish_services(void)
|
||||
{
|
||||
int i;
|
||||
@ -34,6 +52,9 @@ void kvm_init_memrelinquish_services(void)
|
||||
return;
|
||||
|
||||
memshare_granule_sz = res.a0;
|
||||
|
||||
if (memshare_granule_sz)
|
||||
hyp_ops.page_relinquish = kvm_page_relinquish;
|
||||
}
|
||||
|
||||
bool kvm_has_memrelinquish_services(void)
|
||||
@ -44,22 +65,14 @@ EXPORT_SYMBOL_GPL(kvm_has_memrelinquish_services);
|
||||
|
||||
void page_relinquish(struct page *page)
|
||||
{
|
||||
phys_addr_t phys, end;
|
||||
u32 func_id = ARM_SMCCC_VENDOR_HYP_KVM_MEM_RELINQUISH_FUNC_ID;
|
||||
|
||||
if (!memshare_granule_sz)
|
||||
return;
|
||||
|
||||
phys = page_to_phys(page);
|
||||
end = phys + PAGE_SIZE;
|
||||
|
||||
while (phys < end) {
|
||||
struct arm_smccc_res res;
|
||||
|
||||
arm_smccc_1_1_invoke(func_id, phys, 0, 0, &res);
|
||||
BUG_ON(res.a0 != SMCCC_RET_SUCCESS);
|
||||
|
||||
phys += memshare_granule_sz;
|
||||
}
|
||||
if (hyp_ops.page_relinquish)
|
||||
hyp_ops.page_relinquish(page);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(page_relinquish);
|
||||
|
||||
void post_page_relinquish_tlb_inv(void)
|
||||
{
|
||||
if (hyp_ops.post_page_relinquish_tlb_inv)
|
||||
hyp_ops.post_page_relinquish_tlb_inv();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(post_page_relinquish_tlb_inv);
|
||||
|
@ -295,6 +295,7 @@ CONFIG_FW_LOADER_USER_HELPER=y
|
||||
# CONFIG_FW_CACHE is not set
|
||||
CONFIG_GNSS=y
|
||||
CONFIG_OF=y
|
||||
CONFIG_BLK_DEV_NULL_BLK=m
|
||||
CONFIG_ZRAM=m
|
||||
CONFIG_BLK_DEV_LOOP=y
|
||||
CONFIG_BLK_DEV_LOOP_MIN_COUNT=16
|
||||
@ -308,6 +309,7 @@ CONFIG_UID_SYS_STATS=y
|
||||
CONFIG_SCSI=y
|
||||
# CONFIG_SCSI_PROC_FS is not set
|
||||
CONFIG_BLK_DEV_SD=y
|
||||
CONFIG_SCSI_DEBUG=m
|
||||
CONFIG_MD=y
|
||||
CONFIG_BLK_DEV_DM=y
|
||||
CONFIG_DM_CRYPT=y
|
||||
|
@ -383,6 +383,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
|
||||
void __user *end_page_addr;
|
||||
size_t size, data_offsets_size;
|
||||
int ret;
|
||||
bool should_fail = false;
|
||||
|
||||
/* Check binder_alloc is fully initialized */
|
||||
if (!binder_alloc_get_vma(alloc)) {
|
||||
@ -409,6 +410,13 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
trace_android_vh_binder_alloc_new_buf_locked(size, &alloc->free_async_space, is_async);
|
||||
trace_android_vh_binder_detect_low_async_space_locked(is_async, &alloc->free_async_space, pid, &should_fail);
|
||||
if (should_fail) {
|
||||
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
|
||||
"%d: binder_alloc_buf size %zd failed, not allowed to alloc more async space\n",
|
||||
alloc->pid, size);
|
||||
return ERR_PTR(-EPERM);
|
||||
}
|
||||
if (is_async &&
|
||||
alloc->free_async_space < size + sizeof(struct binder_buffer)) {
|
||||
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
|
||||
|
@ -135,6 +135,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ftrace_size_check);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ftrace_format_check);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ftrace_dump_buffer);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_fill_prdt);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_ufs_reprogram_all_keys);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_prepare_command);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_update_sysfs);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_send_command);
|
||||
@ -375,3 +376,4 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_ioctl_end);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_looper_exited);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_spawn_new_thread);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_has_special_work_ilocked);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_detect_low_async_space_locked);
|
||||
|
@ -1983,6 +1983,7 @@ bool cpufreq_driver_test_flags(u16 flags)
|
||||
{
|
||||
return !!(cpufreq_driver->flags & flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cpufreq_driver_test_flags);
|
||||
|
||||
/**
|
||||
* cpufreq_get_current_driver - Return the current driver's name.
|
||||
|
@ -6,6 +6,9 @@
|
||||
#include <ufs/ufshcd.h>
|
||||
#include "ufshcd-crypto.h"
|
||||
|
||||
#undef CREATE_TRACE_POINTS
|
||||
#include <trace/hooks/ufshcd.h>
|
||||
|
||||
/* Blk-crypto modes supported by UFS crypto */
|
||||
static const struct ufs_crypto_alg_entry {
|
||||
enum ufs_crypto_alg ufs_alg;
|
||||
@ -122,7 +125,13 @@ bool ufshcd_crypto_enable(struct ufs_hba *hba)
|
||||
return false;
|
||||
|
||||
/* Reset might clear all keys, so reprogram all the keys. */
|
||||
blk_crypto_reprogram_all_keys(&hba->crypto_profile);
|
||||
if (hba->crypto_profile.num_slots) {
|
||||
int err = -EOPNOTSUPP;
|
||||
|
||||
trace_android_rvh_ufs_reprogram_all_keys(hba, &err);
|
||||
if (err == -EOPNOTSUPP)
|
||||
blk_crypto_reprogram_all_keys(&hba->crypto_profile);
|
||||
}
|
||||
|
||||
if (hba->android_quirks & UFSHCD_ANDROID_QUIRK_BROKEN_CRYPTO_ENABLE)
|
||||
return false;
|
||||
|
@ -9,3 +9,6 @@
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include "trace.h"
|
||||
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(dwc3_readl);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(dwc3_writel);
|
||||
|
@ -297,6 +297,15 @@ struct pd_pps_data {
|
||||
bool active;
|
||||
};
|
||||
|
||||
struct pd_data {
|
||||
struct usb_power_delivery *pd;
|
||||
struct usb_power_delivery_capabilities *source_cap;
|
||||
struct usb_power_delivery_capabilities_desc source_desc;
|
||||
struct usb_power_delivery_capabilities *sink_cap;
|
||||
struct usb_power_delivery_capabilities_desc sink_desc;
|
||||
unsigned int operating_snk_mw;
|
||||
};
|
||||
|
||||
struct tcpm_port {
|
||||
struct device *dev;
|
||||
|
||||
@ -398,12 +407,14 @@ struct tcpm_port {
|
||||
unsigned int rx_msgid;
|
||||
|
||||
/* USB PD objects */
|
||||
struct usb_power_delivery *pd;
|
||||
struct usb_power_delivery **pds;
|
||||
struct pd_data **pd_list;
|
||||
struct usb_power_delivery_capabilities *port_source_caps;
|
||||
struct usb_power_delivery_capabilities *port_sink_caps;
|
||||
struct usb_power_delivery *partner_pd;
|
||||
struct usb_power_delivery_capabilities *partner_source_caps;
|
||||
struct usb_power_delivery_capabilities *partner_sink_caps;
|
||||
struct usb_power_delivery *selected_pd;
|
||||
|
||||
/* Partner capabilities/requests */
|
||||
u32 sink_request;
|
||||
@ -413,6 +424,7 @@ struct tcpm_port {
|
||||
unsigned int nr_sink_caps;
|
||||
|
||||
/* Local capabilities */
|
||||
unsigned int pd_count;
|
||||
u32 src_pdo[PDO_MAX_OBJECTS];
|
||||
unsigned int nr_src_pdo;
|
||||
u32 snk_pdo[PDO_MAX_OBJECTS];
|
||||
@ -6116,12 +6128,114 @@ static int tcpm_port_type_set(struct typec_port *p, enum typec_port_type type)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct pd_data *tcpm_find_pd_data(struct tcpm_port *port, struct usb_power_delivery *pd)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; port->pd_list[i]; i++) {
|
||||
if (port->pd_list[i]->pd == pd)
|
||||
return port->pd_list[i];
|
||||
}
|
||||
|
||||
return ERR_PTR(-ENODATA);
|
||||
}
|
||||
|
||||
static struct usb_power_delivery **tcpm_pd_get(struct typec_port *p)
|
||||
{
|
||||
struct tcpm_port *port = typec_get_drvdata(p);
|
||||
|
||||
return port->pds;
|
||||
}
|
||||
|
||||
static int tcpm_pd_set(struct typec_port *p, struct usb_power_delivery *pd)
|
||||
{
|
||||
struct tcpm_port *port = typec_get_drvdata(p);
|
||||
struct pd_data *data;
|
||||
int i, ret = 0;
|
||||
|
||||
mutex_lock(&port->lock);
|
||||
|
||||
if (port->selected_pd == pd)
|
||||
goto unlock;
|
||||
|
||||
data = tcpm_find_pd_data(port, pd);
|
||||
if (IS_ERR(data)) {
|
||||
ret = PTR_ERR(data);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (data->sink_desc.pdo[0]) {
|
||||
for (i = 0; i < PDO_MAX_OBJECTS && data->sink_desc.pdo[i]; i++)
|
||||
port->snk_pdo[i] = data->sink_desc.pdo[i];
|
||||
port->nr_snk_pdo = i + 1;
|
||||
port->operating_snk_mw = data->operating_snk_mw;
|
||||
}
|
||||
|
||||
if (data->source_desc.pdo[0]) {
|
||||
for (i = 0; i < PDO_MAX_OBJECTS && data->source_desc.pdo[i]; i++)
|
||||
port->snk_pdo[i] = data->source_desc.pdo[i];
|
||||
port->nr_src_pdo = i + 1;
|
||||
}
|
||||
|
||||
switch (port->state) {
|
||||
case SRC_UNATTACHED:
|
||||
case SRC_ATTACH_WAIT:
|
||||
case SRC_TRYWAIT:
|
||||
tcpm_set_cc(port, tcpm_rp_cc(port));
|
||||
break;
|
||||
case SRC_SEND_CAPABILITIES:
|
||||
case SRC_SEND_CAPABILITIES_TIMEOUT:
|
||||
case SRC_NEGOTIATE_CAPABILITIES:
|
||||
case SRC_READY:
|
||||
case SRC_WAIT_NEW_CAPABILITIES:
|
||||
port->caps_count = 0;
|
||||
port->upcoming_state = SRC_SEND_CAPABILITIES;
|
||||
ret = tcpm_ams_start(port, POWER_NEGOTIATION);
|
||||
if (ret == -EAGAIN) {
|
||||
port->upcoming_state = INVALID_STATE;
|
||||
goto unlock;
|
||||
}
|
||||
break;
|
||||
case SNK_NEGOTIATE_CAPABILITIES:
|
||||
case SNK_NEGOTIATE_PPS_CAPABILITIES:
|
||||
case SNK_READY:
|
||||
case SNK_TRANSITION_SINK:
|
||||
case SNK_TRANSITION_SINK_VBUS:
|
||||
if (port->pps_data.active)
|
||||
port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
|
||||
else if (port->pd_capable)
|
||||
port->upcoming_state = SNK_NEGOTIATE_CAPABILITIES;
|
||||
else
|
||||
break;
|
||||
|
||||
port->update_sink_caps = true;
|
||||
|
||||
ret = tcpm_ams_start(port, POWER_NEGOTIATION);
|
||||
if (ret == -EAGAIN) {
|
||||
port->upcoming_state = INVALID_STATE;
|
||||
goto unlock;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
port->port_source_caps = data->source_cap;
|
||||
port->port_sink_caps = data->sink_cap;
|
||||
port->selected_pd = pd;
|
||||
unlock:
|
||||
mutex_unlock(&port->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct typec_operations tcpm_ops = {
|
||||
.try_role = tcpm_try_role,
|
||||
.dr_set = tcpm_dr_set,
|
||||
.pr_set = tcpm_pr_set,
|
||||
.vconn_set = tcpm_vconn_set,
|
||||
.port_type_set = tcpm_port_type_set
|
||||
.port_type_set = tcpm_port_type_set,
|
||||
.pd_get = tcpm_pd_get,
|
||||
.pd_set = tcpm_pd_set
|
||||
};
|
||||
|
||||
void tcpm_tcpc_reset(struct tcpm_port *port)
|
||||
@ -6135,58 +6249,63 @@ EXPORT_SYMBOL_GPL(tcpm_tcpc_reset);
|
||||
|
||||
static void tcpm_port_unregister_pd(struct tcpm_port *port)
|
||||
{
|
||||
usb_power_delivery_unregister_capabilities(port->port_sink_caps);
|
||||
int i;
|
||||
|
||||
port->port_sink_caps = NULL;
|
||||
usb_power_delivery_unregister_capabilities(port->port_source_caps);
|
||||
port->port_source_caps = NULL;
|
||||
usb_power_delivery_unregister(port->pd);
|
||||
port->pd = NULL;
|
||||
for (i = 0; i < port->pd_count; i++) {
|
||||
usb_power_delivery_unregister_capabilities(port->pd_list[i]->sink_cap);
|
||||
kfree(port->pd_list[i]->sink_cap);
|
||||
usb_power_delivery_unregister_capabilities(port->pd_list[i]->source_cap);
|
||||
kfree(port->pd_list[i]->source_cap);
|
||||
devm_kfree(port->dev, port->pd_list[i]);
|
||||
port->pd_list[i] = NULL;
|
||||
usb_power_delivery_unregister(port->pds[i]);
|
||||
port->pds[i] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int tcpm_port_register_pd(struct tcpm_port *port)
|
||||
{
|
||||
struct usb_power_delivery_desc desc = { port->typec_caps.pd_revision };
|
||||
struct usb_power_delivery_capabilities_desc caps = { };
|
||||
struct usb_power_delivery_capabilities *cap;
|
||||
int ret;
|
||||
int ret, i;
|
||||
|
||||
if (!port->nr_src_pdo && !port->nr_snk_pdo)
|
||||
return 0;
|
||||
|
||||
port->pd = usb_power_delivery_register(port->dev, &desc);
|
||||
if (IS_ERR(port->pd)) {
|
||||
ret = PTR_ERR(port->pd);
|
||||
goto err_unregister;
|
||||
}
|
||||
|
||||
if (port->nr_src_pdo) {
|
||||
memcpy_and_pad(caps.pdo, sizeof(caps.pdo), port->src_pdo,
|
||||
port->nr_src_pdo * sizeof(u32), 0);
|
||||
caps.role = TYPEC_SOURCE;
|
||||
|
||||
cap = usb_power_delivery_register_capabilities(port->pd, &caps);
|
||||
if (IS_ERR(cap)) {
|
||||
ret = PTR_ERR(cap);
|
||||
for (i = 0; i < port->pd_count; i++) {
|
||||
port->pds[i] = usb_power_delivery_register(port->dev, &desc);
|
||||
if (IS_ERR(port->pds[i])) {
|
||||
ret = PTR_ERR(port->pds[i]);
|
||||
goto err_unregister;
|
||||
}
|
||||
port->pd_list[i]->pd = port->pds[i];
|
||||
|
||||
port->port_source_caps = cap;
|
||||
}
|
||||
|
||||
if (port->nr_snk_pdo) {
|
||||
memcpy_and_pad(caps.pdo, sizeof(caps.pdo), port->snk_pdo,
|
||||
port->nr_snk_pdo * sizeof(u32), 0);
|
||||
caps.role = TYPEC_SINK;
|
||||
|
||||
cap = usb_power_delivery_register_capabilities(port->pd, &caps);
|
||||
if (IS_ERR(cap)) {
|
||||
ret = PTR_ERR(cap);
|
||||
goto err_unregister;
|
||||
if (port->pd_list[i]->source_desc.pdo[0]) {
|
||||
cap = usb_power_delivery_register_capabilities(port->pds[i],
|
||||
&port->pd_list[i]->source_desc);
|
||||
if (IS_ERR(cap)) {
|
||||
ret = PTR_ERR(cap);
|
||||
goto err_unregister;
|
||||
}
|
||||
port->pd_list[i]->source_cap = cap;
|
||||
}
|
||||
|
||||
port->port_sink_caps = cap;
|
||||
if (port->pd_list[i]->sink_desc.pdo[0]) {
|
||||
cap = usb_power_delivery_register_capabilities(port->pds[i],
|
||||
&port->pd_list[i]->sink_desc);
|
||||
if (IS_ERR(cap)) {
|
||||
ret = PTR_ERR(cap);
|
||||
goto err_unregister;
|
||||
}
|
||||
port->pd_list[i]->sink_cap = cap;
|
||||
}
|
||||
}
|
||||
|
||||
port->port_source_caps = port->pd_list[0]->source_cap;
|
||||
port->port_sink_caps = port->pd_list[0]->sink_cap;
|
||||
port->selected_pd = port->pds[0];
|
||||
return 0;
|
||||
|
||||
err_unregister:
|
||||
@ -6195,12 +6314,15 @@ static int tcpm_port_register_pd(struct tcpm_port *port)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int tcpm_fw_get_caps(struct tcpm_port *port,
|
||||
struct fwnode_handle *fwnode)
|
||||
static int tcpm_fw_get_caps(struct tcpm_port *port, struct fwnode_handle *fwnode)
|
||||
{
|
||||
struct fwnode_handle *capabilities, *child, *caps = NULL;
|
||||
unsigned int nr_src_pdo, nr_snk_pdo;
|
||||
const char *opmode_str;
|
||||
int ret;
|
||||
u32 mw, frs_current;
|
||||
u32 *src_pdo, *snk_pdo;
|
||||
u32 uw, frs_current;
|
||||
int ret = 0, i;
|
||||
int mode;
|
||||
|
||||
if (!fwnode)
|
||||
return -EINVAL;
|
||||
@ -6218,30 +6340,20 @@ static int tcpm_fw_get_caps(struct tcpm_port *port,
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
mode = 0;
|
||||
|
||||
if (fwnode_property_read_bool(fwnode, "accessory-mode-audio"))
|
||||
port->typec_caps.accessory[mode++] = TYPEC_ACCESSORY_AUDIO;
|
||||
|
||||
if (fwnode_property_read_bool(fwnode, "accessory-mode-debug"))
|
||||
port->typec_caps.accessory[mode++] = TYPEC_ACCESSORY_DEBUG;
|
||||
|
||||
port->port_type = port->typec_caps.type;
|
||||
port->pd_supported = !fwnode_property_read_bool(fwnode, "pd-disable");
|
||||
|
||||
port->slow_charger_loop = fwnode_property_read_bool(fwnode, "slow-charger-loop");
|
||||
if (port->port_type == TYPEC_PORT_SNK)
|
||||
goto sink;
|
||||
port->self_powered = fwnode_property_read_bool(fwnode, "self-powered");
|
||||
|
||||
/* Get Source PDOs for the PD port or Source Rp value for the non-PD port */
|
||||
if (port->pd_supported) {
|
||||
ret = fwnode_property_count_u32(fwnode, "source-pdos");
|
||||
if (ret == 0)
|
||||
return -EINVAL;
|
||||
else if (ret < 0)
|
||||
return ret;
|
||||
|
||||
port->nr_src_pdo = min(ret, PDO_MAX_OBJECTS);
|
||||
ret = fwnode_property_read_u32_array(fwnode, "source-pdos",
|
||||
port->src_pdo, port->nr_src_pdo);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = tcpm_validate_caps(port, port->src_pdo, port->nr_src_pdo);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
if (!port->pd_supported) {
|
||||
ret = fwnode_property_read_string(fwnode, "typec-power-opmode", &opmode_str);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -6249,45 +6361,150 @@ static int tcpm_fw_get_caps(struct tcpm_port *port,
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
port->src_rp = tcpm_pwr_opmode_to_rp(ret);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (port->port_type == TYPEC_PORT_SRC)
|
||||
return 0;
|
||||
|
||||
sink:
|
||||
port->self_powered = fwnode_property_read_bool(fwnode, "self-powered");
|
||||
|
||||
if (!port->pd_supported)
|
||||
return 0;
|
||||
|
||||
/* Get sink pdos */
|
||||
ret = fwnode_property_count_u32(fwnode, "sink-pdos");
|
||||
if (ret <= 0)
|
||||
return -EINVAL;
|
||||
|
||||
port->nr_snk_pdo = min(ret, PDO_MAX_OBJECTS);
|
||||
ret = fwnode_property_read_u32_array(fwnode, "sink-pdos",
|
||||
port->snk_pdo, port->nr_snk_pdo);
|
||||
if ((ret < 0) || tcpm_validate_caps(port, port->snk_pdo,
|
||||
port->nr_snk_pdo))
|
||||
return -EINVAL;
|
||||
|
||||
if (fwnode_property_read_u32(fwnode, "op-sink-microwatt", &mw) < 0)
|
||||
return -EINVAL;
|
||||
port->operating_snk_mw = mw / 1000;
|
||||
/* The following code are applicable to pd-capable ports, i.e. pd_supported is true. */
|
||||
|
||||
/* FRS can only be supported by DRP ports */
|
||||
if (port->port_type == TYPEC_PORT_DRP) {
|
||||
ret = fwnode_property_read_u32(fwnode, "new-source-frs-typec-current",
|
||||
&frs_current);
|
||||
if (ret >= 0 && frs_current <= FRS_5V_3A)
|
||||
if (!ret && frs_current <= FRS_5V_3A)
|
||||
port->new_source_frs_current = frs_current;
|
||||
|
||||
if (ret)
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
/* For the backward compatibility, "capabilities" node is optional. */
|
||||
capabilities = fwnode_get_named_child_node(fwnode, "capabilities");
|
||||
if (!capabilities) {
|
||||
port->pd_count = 1;
|
||||
} else {
|
||||
fwnode_for_each_child_node(capabilities, child)
|
||||
port->pd_count++;
|
||||
|
||||
if (!port->pd_count) {
|
||||
ret = -ENODATA;
|
||||
goto put_capabilities;
|
||||
}
|
||||
}
|
||||
|
||||
port->pds = devm_kcalloc(port->dev, port->pd_count, sizeof(struct usb_power_delivery *),
|
||||
GFP_KERNEL);
|
||||
if (!port->pds) {
|
||||
ret = -ENOMEM;
|
||||
goto put_capabilities;
|
||||
}
|
||||
|
||||
port->pd_list = devm_kcalloc(port->dev, port->pd_count, sizeof(struct pd_data *),
|
||||
GFP_KERNEL);
|
||||
if (!port->pd_list) {
|
||||
ret = -ENOMEM;
|
||||
goto put_capabilities;
|
||||
}
|
||||
|
||||
for (i = 0; i < port->pd_count; i++) {
|
||||
port->pd_list[i] = devm_kzalloc(port->dev, sizeof(struct pd_data), GFP_KERNEL);
|
||||
if (!port->pd_list[i]) {
|
||||
ret = -ENOMEM;
|
||||
goto put_capabilities;
|
||||
}
|
||||
|
||||
src_pdo = port->pd_list[i]->source_desc.pdo;
|
||||
port->pd_list[i]->source_desc.role = TYPEC_SOURCE;
|
||||
snk_pdo = port->pd_list[i]->sink_desc.pdo;
|
||||
port->pd_list[i]->sink_desc.role = TYPEC_SINK;
|
||||
|
||||
/* If "capabilities" is NULL, fall back to single pd cap population. */
|
||||
if (!capabilities)
|
||||
caps = fwnode;
|
||||
else
|
||||
caps = fwnode_get_next_child_node(capabilities, caps);
|
||||
|
||||
if (port->port_type != TYPEC_PORT_SNK) {
|
||||
ret = fwnode_property_count_u32(caps, "source-pdos");
|
||||
if (ret == 0) {
|
||||
ret = -EINVAL;
|
||||
goto put_caps;
|
||||
}
|
||||
if (ret < 0)
|
||||
goto put_caps;
|
||||
|
||||
nr_src_pdo = min(ret, PDO_MAX_OBJECTS);
|
||||
ret = fwnode_property_read_u32_array(caps, "source-pdos", src_pdo,
|
||||
nr_src_pdo);
|
||||
if (ret)
|
||||
goto put_caps;
|
||||
|
||||
ret = tcpm_validate_caps(port, src_pdo, nr_src_pdo);
|
||||
if (ret)
|
||||
goto put_caps;
|
||||
|
||||
if (i == 0) {
|
||||
port->nr_src_pdo = nr_src_pdo;
|
||||
memcpy_and_pad(port->src_pdo, sizeof(u32) * PDO_MAX_OBJECTS,
|
||||
port->pd_list[0]->source_desc.pdo,
|
||||
sizeof(u32) * nr_src_pdo,
|
||||
0);
|
||||
}
|
||||
}
|
||||
|
||||
if (port->port_type != TYPEC_PORT_SRC) {
|
||||
ret = fwnode_property_count_u32(caps, "sink-pdos");
|
||||
if (ret == 0) {
|
||||
ret = -EINVAL;
|
||||
goto put_caps;
|
||||
}
|
||||
|
||||
if (ret < 0)
|
||||
goto put_caps;
|
||||
|
||||
nr_snk_pdo = min(ret, PDO_MAX_OBJECTS);
|
||||
ret = fwnode_property_read_u32_array(caps, "sink-pdos", snk_pdo,
|
||||
nr_snk_pdo);
|
||||
if (ret)
|
||||
goto put_caps;
|
||||
|
||||
ret = tcpm_validate_caps(port, snk_pdo, nr_snk_pdo);
|
||||
if (ret)
|
||||
goto put_caps;
|
||||
|
||||
if (fwnode_property_read_u32(caps, "op-sink-microwatt", &uw) < 0) {
|
||||
ret = -EINVAL;
|
||||
goto put_caps;
|
||||
}
|
||||
|
||||
port->pd_list[i]->operating_snk_mw = uw / 1000;
|
||||
|
||||
if (i == 0) {
|
||||
port->nr_snk_pdo = nr_snk_pdo;
|
||||
memcpy_and_pad(port->snk_pdo, sizeof(u32) * PDO_MAX_OBJECTS,
|
||||
port->pd_list[0]->sink_desc.pdo,
|
||||
sizeof(u32) * nr_snk_pdo,
|
||||
0);
|
||||
port->operating_snk_mw = port->pd_list[0]->operating_snk_mw;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
put_caps:
|
||||
if (caps != fwnode)
|
||||
fwnode_handle_put(caps);
|
||||
put_capabilities:
|
||||
fwnode_handle_put(capabilities);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int tcpm_fw_get_snk_vdos(struct tcpm_port *port, struct fwnode_handle *fwnode)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* sink-vdos is optional */
|
||||
ret = fwnode_property_count_u32(fwnode, "sink-vdos");
|
||||
if (ret < 0)
|
||||
ret = 0;
|
||||
return 0;
|
||||
|
||||
port->nr_snk_vdo = min(ret, VDO_MAX_OBJECTS);
|
||||
if (port->nr_snk_vdo) {
|
||||
@ -6629,12 +6846,14 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
|
||||
tcpm_debugfs_init(port);
|
||||
|
||||
err = tcpm_fw_get_caps(port, tcpc->fwnode);
|
||||
if (err < 0)
|
||||
goto out_destroy_wq;
|
||||
err = tcpm_fw_get_snk_vdos(port, tcpc->fwnode);
|
||||
if (err < 0)
|
||||
goto out_destroy_wq;
|
||||
|
||||
port->try_role = port->typec_caps.prefer_role;
|
||||
|
||||
port->typec_caps.fwnode = tcpc->fwnode;
|
||||
port->typec_caps.revision = 0x0120; /* Type-C spec release 1.2 */
|
||||
port->typec_caps.pd_revision = 0x0300; /* USB-PD spec release 3.0 */
|
||||
port->typec_caps.svdm_version = SVDM_VER_2_0;
|
||||
@ -6643,7 +6862,6 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
|
||||
port->typec_caps.orientation_aware = 1;
|
||||
|
||||
port->partner_desc.identity = &port->partner_ident;
|
||||
port->port_type = port->typec_caps.type;
|
||||
|
||||
port->role_sw = usb_role_switch_get(port->dev);
|
||||
if (IS_ERR(port->role_sw)) {
|
||||
@ -6660,7 +6878,8 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
|
||||
if (err)
|
||||
goto out_role_sw_put;
|
||||
|
||||
port->typec_caps.pd = port->pd;
|
||||
if (port->pds)
|
||||
port->typec_caps.pd = port->pds[0];
|
||||
|
||||
port->typec_port = typec_register_port(port->dev, &port->typec_caps);
|
||||
if (IS_ERR(port->typec_port)) {
|
||||
|
@ -149,6 +149,8 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
|
||||
|
||||
sg_init_one(&sg, vb->pfns, sizeof(vb->pfns[0]) * vb->num_pfns);
|
||||
|
||||
post_page_relinquish_tlb_inv();
|
||||
|
||||
/* We should always be able to add one buffer to an empty queue. */
|
||||
virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL);
|
||||
virtqueue_kick(vq);
|
||||
@ -177,6 +179,8 @@ static int virtballoon_free_page_report(struct page_reporting_dev_info *pr_dev_i
|
||||
if (WARN_ON_ONCE(err))
|
||||
return err;
|
||||
|
||||
post_page_relinquish_tlb_inv();
|
||||
|
||||
virtqueue_kick(vq);
|
||||
|
||||
/* When host has read buffer, this completes via balloon_ack */
|
||||
|
@ -492,8 +492,40 @@ static int virtio_pci_restore(struct device *dev)
|
||||
return virtio_device_restore(&vp_dev->vdev);
|
||||
}
|
||||
|
||||
static bool vp_supports_pm_no_reset(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pci_dev = to_pci_dev(dev);
|
||||
u16 pmcsr;
|
||||
|
||||
if (!pci_dev->pm_cap)
|
||||
return false;
|
||||
|
||||
pci_read_config_word(pci_dev, pci_dev->pm_cap + PCI_PM_CTRL, &pmcsr);
|
||||
if (PCI_POSSIBLE_ERROR(pmcsr)) {
|
||||
dev_err(dev, "Unable to query pmcsr");
|
||||
return false;
|
||||
}
|
||||
|
||||
return pmcsr & PCI_PM_CTRL_NO_SOFT_RESET;
|
||||
}
|
||||
|
||||
static int virtio_pci_suspend(struct device *dev)
|
||||
{
|
||||
return vp_supports_pm_no_reset(dev) ? 0 : virtio_pci_freeze(dev);
|
||||
}
|
||||
|
||||
static int virtio_pci_resume(struct device *dev)
|
||||
{
|
||||
return vp_supports_pm_no_reset(dev) ? 0 : virtio_pci_restore(dev);
|
||||
}
|
||||
|
||||
static const struct dev_pm_ops virtio_pci_pm_ops = {
|
||||
SET_SYSTEM_SLEEP_PM_OPS(virtio_pci_freeze, virtio_pci_restore)
|
||||
.suspend = virtio_pci_suspend,
|
||||
.resume = virtio_pci_resume,
|
||||
.freeze = virtio_pci_freeze,
|
||||
.thaw = virtio_pci_restore,
|
||||
.poweroff = virtio_pci_freeze,
|
||||
.restore = virtio_pci_restore,
|
||||
};
|
||||
#endif
|
||||
|
||||
|
@ -208,6 +208,7 @@ struct erofs_workgroup {
|
||||
|
||||
/* overall workgroup reference count */
|
||||
atomic_t refcount;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
|
||||
|
@ -575,21 +575,19 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
|
||||
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
|
||||
unsigned int i;
|
||||
|
||||
if (i_blocksize(fe->inode) != PAGE_SIZE)
|
||||
return;
|
||||
if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED)
|
||||
if (i_blocksize(fe->inode) != PAGE_SIZE ||
|
||||
fe->mode < Z_EROFS_PCLUSTER_FOLLOWED)
|
||||
return;
|
||||
|
||||
for (i = 0; i < pclusterpages; ++i) {
|
||||
struct page *page, *newpage;
|
||||
void *t; /* mark pages just found for debugging */
|
||||
|
||||
/* the compressed page was loaded before */
|
||||
/* Inaccurate check w/o locking to avoid unneeded lookups */
|
||||
if (READ_ONCE(pcl->compressed_bvecs[i].page))
|
||||
continue;
|
||||
|
||||
page = find_get_page(mc, pcl->obj.index + i);
|
||||
|
||||
if (page) {
|
||||
t = (void *)((unsigned long)page | 1);
|
||||
newpage = NULL;
|
||||
@ -609,9 +607,13 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
|
||||
set_page_private(newpage, Z_EROFS_PREALLOCATED_PAGE);
|
||||
t = (void *)((unsigned long)newpage | 1);
|
||||
}
|
||||
|
||||
if (!cmpxchg_relaxed(&pcl->compressed_bvecs[i].page, NULL, t))
|
||||
spin_lock(&pcl->obj.lock);
|
||||
if (!pcl->compressed_bvecs[i].page) {
|
||||
pcl->compressed_bvecs[i].page = t;
|
||||
spin_unlock(&pcl->obj.lock);
|
||||
continue;
|
||||
}
|
||||
spin_unlock(&pcl->obj.lock);
|
||||
|
||||
if (page)
|
||||
put_page(page);
|
||||
@ -729,31 +731,25 @@ int erofs_init_managed_cache(struct super_block *sb)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool z_erofs_try_inplace_io(struct z_erofs_decompress_frontend *fe,
|
||||
struct z_erofs_bvec *bvec)
|
||||
{
|
||||
struct z_erofs_pcluster *const pcl = fe->pcl;
|
||||
|
||||
while (fe->icur > 0) {
|
||||
if (!cmpxchg(&pcl->compressed_bvecs[--fe->icur].page,
|
||||
NULL, bvec->page)) {
|
||||
pcl->compressed_bvecs[fe->icur] = *bvec;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/* callers must be with pcluster lock held */
|
||||
static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe,
|
||||
struct z_erofs_bvec *bvec, bool exclusive)
|
||||
{
|
||||
struct z_erofs_pcluster *pcl = fe->pcl;
|
||||
int ret;
|
||||
|
||||
if (exclusive) {
|
||||
/* give priority for inplaceio to use file pages first */
|
||||
if (z_erofs_try_inplace_io(fe, bvec))
|
||||
spin_lock(&pcl->obj.lock);
|
||||
while (fe->icur > 0) {
|
||||
if (pcl->compressed_bvecs[--fe->icur].page)
|
||||
continue;
|
||||
pcl->compressed_bvecs[fe->icur] = *bvec;
|
||||
spin_unlock(&pcl->obj.lock);
|
||||
return 0;
|
||||
}
|
||||
spin_unlock(&pcl->obj.lock);
|
||||
|
||||
/* otherwise, check if it can be used as a bvpage */
|
||||
if (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED &&
|
||||
!fe->candidate_bvpage)
|
||||
@ -803,6 +799,7 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
|
||||
if (IS_ERR(pcl))
|
||||
return PTR_ERR(pcl);
|
||||
|
||||
spin_lock_init(&pcl->obj.lock);
|
||||
atomic_set(&pcl->obj.refcount, 1);
|
||||
pcl->algorithmformat = map->m_algorithmformat;
|
||||
pcl->length = 0;
|
||||
@ -1450,23 +1447,26 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec,
|
||||
{
|
||||
gfp_t gfp = mapping_gfp_mask(mc);
|
||||
bool tocache = false;
|
||||
struct z_erofs_bvec *zbv = pcl->compressed_bvecs + nr;
|
||||
struct z_erofs_bvec zbv;
|
||||
struct address_space *mapping;
|
||||
struct page *page, *oldpage;
|
||||
struct page *page;
|
||||
int justfound, bs = i_blocksize(f->inode);
|
||||
|
||||
/* Except for inplace pages, the entire page can be used for I/Os */
|
||||
bvec->bv_offset = 0;
|
||||
bvec->bv_len = PAGE_SIZE;
|
||||
repeat:
|
||||
oldpage = READ_ONCE(zbv->page);
|
||||
if (!oldpage)
|
||||
spin_lock(&pcl->obj.lock);
|
||||
zbv = pcl->compressed_bvecs[nr];
|
||||
page = zbv.page;
|
||||
justfound = (unsigned long)page & 1UL;
|
||||
page = (struct page *)((unsigned long)page & ~1UL);
|
||||
pcl->compressed_bvecs[nr].page = page;
|
||||
spin_unlock(&pcl->obj.lock);
|
||||
if (!page)
|
||||
goto out_allocpage;
|
||||
|
||||
justfound = (unsigned long)oldpage & 1UL;
|
||||
page = (struct page *)((unsigned long)oldpage & ~1UL);
|
||||
bvec->bv_page = page;
|
||||
|
||||
DBG_BUGON(z_erofs_is_shortlived_page(page));
|
||||
/*
|
||||
* Handle preallocated cached pages. We tried to allocate such pages
|
||||
@ -1475,7 +1475,6 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec,
|
||||
*/
|
||||
if (page->private == Z_EROFS_PREALLOCATED_PAGE) {
|
||||
set_page_private(page, 0);
|
||||
WRITE_ONCE(zbv->page, page);
|
||||
tocache = true;
|
||||
goto out_tocache;
|
||||
}
|
||||
@ -1486,9 +1485,9 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec,
|
||||
* therefore it is impossible for `mapping` to be NULL.
|
||||
*/
|
||||
if (mapping && mapping != mc) {
|
||||
if (zbv->offset < 0)
|
||||
bvec->bv_offset = round_up(-zbv->offset, bs);
|
||||
bvec->bv_len = round_up(zbv->end, bs) - bvec->bv_offset;
|
||||
if (zbv.offset < 0)
|
||||
bvec->bv_offset = round_up(-zbv.offset, bs);
|
||||
bvec->bv_len = round_up(zbv.end, bs) - bvec->bv_offset;
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1498,7 +1497,6 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec,
|
||||
|
||||
/* the cached page is still in managed cache */
|
||||
if (page->mapping == mc) {
|
||||
WRITE_ONCE(zbv->page, page);
|
||||
/*
|
||||
* The cached page is still available but without a valid
|
||||
* `->private` pcluster hint. Let's reconnect them.
|
||||
@ -1530,11 +1528,15 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec,
|
||||
put_page(page);
|
||||
out_allocpage:
|
||||
page = erofs_allocpage(&f->pagepool, gfp | __GFP_NOFAIL);
|
||||
if (oldpage != cmpxchg(&zbv->page, oldpage, page)) {
|
||||
spin_lock(&pcl->obj.lock);
|
||||
if (pcl->compressed_bvecs[nr].page) {
|
||||
erofs_pagepool_add(&f->pagepool, page);
|
||||
spin_unlock(&pcl->obj.lock);
|
||||
cond_resched();
|
||||
goto repeat;
|
||||
}
|
||||
pcl->compressed_bvecs[nr].page = page;
|
||||
spin_unlock(&pcl->obj.lock);
|
||||
bvec->bv_page = page;
|
||||
out_tocache:
|
||||
if (!tocache || bs != PAGE_SIZE ||
|
||||
@ -1712,6 +1714,7 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
|
||||
|
||||
if (cur + bvec.bv_len > end)
|
||||
bvec.bv_len = end - cur;
|
||||
DBG_BUGON(bvec.bv_len < sb->s_blocksize);
|
||||
if (!bio_add_page(bio, bvec.bv_page, bvec.bv_len,
|
||||
bvec.bv_offset))
|
||||
goto submit_bio_retry;
|
||||
|
@ -4036,7 +4036,7 @@ static int check_swap_activate(struct swap_info_struct *sis,
|
||||
sis->highest_bit = cur_lblock - 1;
|
||||
out:
|
||||
if (not_aligned)
|
||||
f2fs_warn(sbi, "Swapfile (%u) is not align to section: 1) creat(), 2) ioctl(F2FS_IOC_SET_PIN_FILE), 3) fallocate(%u * N)",
|
||||
f2fs_warn(sbi, "Swapfile (%u) is not align to section: 1) creat(), 2) ioctl(F2FS_IOC_SET_PIN_FILE), 3) fallocate(%lu * N)",
|
||||
not_aligned, blks_per_sec * F2FS_BLKSIZE);
|
||||
return ret;
|
||||
}
|
||||
|
@ -256,7 +256,7 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
|
||||
(!fi->i_inline_xattr_size ||
|
||||
fi->i_inline_xattr_size > MAX_INLINE_XATTR_SIZE)) {
|
||||
set_sbi_flag(sbi, SBI_NEED_FSCK);
|
||||
f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_inline_xattr_size: %d, max: %zu",
|
||||
f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_inline_xattr_size: %d, max: %lu",
|
||||
__func__, inode->i_ino, fi->i_inline_xattr_size,
|
||||
MAX_INLINE_XATTR_SIZE);
|
||||
return false;
|
||||
|
@ -633,7 +633,7 @@ static void f2fs_ra_node_pages(struct page *parent, int start, int n)
|
||||
|
||||
/* Then, try readahead for siblings of the desired node */
|
||||
end = start + n;
|
||||
end = min(end, NIDS_PER_BLOCK);
|
||||
end = min(end, (int)NIDS_PER_BLOCK);
|
||||
for (i = start; i < end; i++) {
|
||||
nid = get_nid(parent, i, false);
|
||||
f2fs_ra_node_page(sbi, nid);
|
||||
|
@ -3268,6 +3268,14 @@ loff_t max_file_blocks(struct inode *inode)
|
||||
leaf_count *= NIDS_PER_BLOCK;
|
||||
result += leaf_count;
|
||||
|
||||
/*
|
||||
* For compatibility with FSCRYPT_POLICY_FLAG_IV_INO_LBLK_{64,32} with
|
||||
* a 4K crypto data unit, we must restrict the max filesize to what can
|
||||
* fit within U32_MAX + 1 data units.
|
||||
*/
|
||||
|
||||
result = min(result, (((loff_t)U32_MAX + 1) * 4096) >> F2FS_BLKSIZE_BITS);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -3422,7 +3430,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
/* Currently, support 512/1024/2048/4096 bytes sector size */
|
||||
/* Currently, support 512/1024/2048/4096/16K bytes sector size */
|
||||
if (le32_to_cpu(raw_super->log_sectorsize) >
|
||||
F2FS_MAX_LOG_SECTOR_SIZE ||
|
||||
le32_to_cpu(raw_super->log_sectorsize) <
|
||||
@ -4773,7 +4781,7 @@ static int __init init_f2fs_fs(void)
|
||||
int err;
|
||||
|
||||
if (PAGE_SIZE != F2FS_BLKSIZE) {
|
||||
printk("F2FS not supported on PAGE_SIZE(%lu) != %d\n",
|
||||
printk("F2FS not supported on PAGE_SIZE(%lu) != BLOCK_SIZE(%lu)\n",
|
||||
PAGE_SIZE, F2FS_BLKSIZE);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -426,7 +426,12 @@ static int fuse_dentry_canonical_path(const struct path *path,
|
||||
|
||||
#ifdef CONFIG_FUSE_BPF
|
||||
struct fuse_err_ret fer;
|
||||
#endif
|
||||
|
||||
if (fm->fc->no_dentry_canonical_path)
|
||||
goto out;
|
||||
|
||||
#ifdef CONFIG_FUSE_BPF
|
||||
fer = fuse_bpf_backing(inode, struct fuse_dummy_io,
|
||||
fuse_canonical_path_initialize,
|
||||
fuse_canonical_path_backing,
|
||||
@ -453,9 +458,13 @@ static int fuse_dentry_canonical_path(const struct path *path,
|
||||
free_page((unsigned long)path_name);
|
||||
if (err > 0)
|
||||
return 0;
|
||||
if (err < 0)
|
||||
if (err < 0 && err != -ENOSYS)
|
||||
return err;
|
||||
|
||||
if (err == -ENOSYS)
|
||||
fm->fc->no_dentry_canonical_path = 1;
|
||||
|
||||
out:
|
||||
canonical_path->dentry = path->dentry;
|
||||
canonical_path->mnt = path->mnt;
|
||||
path_get(canonical_path);
|
||||
|
@ -812,6 +812,9 @@ struct fuse_conn {
|
||||
/** Is bmap not implemented by fs? */
|
||||
unsigned no_bmap:1;
|
||||
|
||||
/** Is dentry_canonical_path not implemented by fs? */
|
||||
unsigned no_dentry_canonical_path:1;
|
||||
|
||||
/** Is poll not implemented by fs? */
|
||||
unsigned no_poll:1;
|
||||
|
||||
|
@ -33,6 +33,7 @@
|
||||
#define _ANDROID_KABI_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/stringify.h>
|
||||
|
||||
/*
|
||||
* Worker macros, don't use these, use the ones without a leading '_'
|
||||
|
@ -13,10 +13,10 @@
|
||||
|
||||
#define F2FS_SUPER_OFFSET 1024 /* byte-size offset */
|
||||
#define F2FS_MIN_LOG_SECTOR_SIZE 9 /* 9 bits for 512 bytes */
|
||||
#define F2FS_MAX_LOG_SECTOR_SIZE 12 /* 12 bits for 4096 bytes */
|
||||
#define F2FS_LOG_SECTORS_PER_BLOCK 3 /* log number for sector/blk */
|
||||
#define F2FS_BLKSIZE 4096 /* support only 4KB block */
|
||||
#define F2FS_BLKSIZE_BITS 12 /* bits for F2FS_BLKSIZE */
|
||||
#define F2FS_MAX_LOG_SECTOR_SIZE PAGE_SHIFT /* Max is Block Size */
|
||||
#define F2FS_LOG_SECTORS_PER_BLOCK (PAGE_SHIFT - 9) /* log number for sector/blk */
|
||||
#define F2FS_BLKSIZE PAGE_SIZE /* support only block == page */
|
||||
#define F2FS_BLKSIZE_BITS PAGE_SHIFT /* bits for F2FS_BLKSIZE */
|
||||
#define F2FS_MAX_EXTENSION 64 /* # of extension entries */
|
||||
#define F2FS_EXTENSION_LEN 8 /* max size of extension */
|
||||
#define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) >> F2FS_BLKSIZE_BITS)
|
||||
@ -209,14 +209,14 @@ struct f2fs_checkpoint {
|
||||
unsigned char sit_nat_version_bitmap[];
|
||||
} __packed;
|
||||
|
||||
#define CP_CHKSUM_OFFSET 4092 /* default chksum offset in checkpoint */
|
||||
#define CP_CHKSUM_OFFSET (F2FS_BLKSIZE - sizeof(__le32)) /* default chksum offset in checkpoint */
|
||||
#define CP_MIN_CHKSUM_OFFSET \
|
||||
(offsetof(struct f2fs_checkpoint, sit_nat_version_bitmap))
|
||||
|
||||
/*
|
||||
* For orphan inode management
|
||||
*/
|
||||
#define F2FS_ORPHANS_PER_BLOCK 1020
|
||||
#define F2FS_ORPHANS_PER_BLOCK ((F2FS_BLKSIZE - 4 * sizeof(__le32)) / sizeof(__le32))
|
||||
|
||||
#define GET_ORPHAN_BLOCKS(n) (((n) + F2FS_ORPHANS_PER_BLOCK - 1) / \
|
||||
F2FS_ORPHANS_PER_BLOCK)
|
||||
@ -242,14 +242,31 @@ struct f2fs_extent {
|
||||
#define F2FS_NAME_LEN 255
|
||||
/* 200 bytes for inline xattrs by default */
|
||||
#define DEFAULT_INLINE_XATTR_ADDRS 50
|
||||
#define DEF_ADDRS_PER_INODE 923 /* Address Pointers in an Inode */
|
||||
|
||||
#define OFFSET_OF_END_OF_I_EXT 360
|
||||
#define SIZE_OF_I_NID 20
|
||||
|
||||
struct node_footer {
|
||||
__le32 nid; /* node id */
|
||||
__le32 ino; /* inode number */
|
||||
__le32 flag; /* include cold/fsync/dentry marks and offset */
|
||||
__le64 cp_ver; /* checkpoint version */
|
||||
__le32 next_blkaddr; /* next node page block address */
|
||||
} __packed;
|
||||
|
||||
/* Address Pointers in an Inode */
|
||||
#define DEF_ADDRS_PER_INODE ((F2FS_BLKSIZE - OFFSET_OF_END_OF_I_EXT \
|
||||
- SIZE_OF_I_NID \
|
||||
- sizeof(struct node_footer)) / sizeof(__le32))
|
||||
#define CUR_ADDRS_PER_INODE(inode) (DEF_ADDRS_PER_INODE - \
|
||||
get_extra_isize(inode))
|
||||
#define DEF_NIDS_PER_INODE 5 /* Node IDs in an Inode */
|
||||
#define ADDRS_PER_INODE(inode) addrs_per_inode(inode)
|
||||
#define DEF_ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */
|
||||
/* Address Pointers in a Direct Block */
|
||||
#define DEF_ADDRS_PER_BLOCK ((F2FS_BLKSIZE - sizeof(struct node_footer)) / sizeof(__le32))
|
||||
#define ADDRS_PER_BLOCK(inode) addrs_per_block(inode)
|
||||
#define NIDS_PER_BLOCK 1018 /* Node IDs in an Indirect Block */
|
||||
/* Node IDs in an Indirect Block */
|
||||
#define NIDS_PER_BLOCK ((F2FS_BLKSIZE - sizeof(struct node_footer)) / sizeof(__le32))
|
||||
|
||||
#define ADDRS_PER_PAGE(page, inode) \
|
||||
(IS_INODE(page) ? ADDRS_PER_INODE(inode) : ADDRS_PER_BLOCK(inode))
|
||||
@ -341,14 +358,6 @@ enum {
|
||||
|
||||
#define OFFSET_BIT_MASK GENMASK(OFFSET_BIT_SHIFT - 1, 0)
|
||||
|
||||
struct node_footer {
|
||||
__le32 nid; /* node id */
|
||||
__le32 ino; /* inode number */
|
||||
__le32 flag; /* include cold/fsync/dentry marks and offset */
|
||||
__le64 cp_ver; /* checkpoint version */
|
||||
__le32 next_blkaddr; /* next node page block address */
|
||||
} __packed;
|
||||
|
||||
struct f2fs_node {
|
||||
/* can be one of three types: inode, direct, and indirect types */
|
||||
union {
|
||||
@ -362,7 +371,7 @@ struct f2fs_node {
|
||||
/*
|
||||
* For NAT entries
|
||||
*/
|
||||
#define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry))
|
||||
#define NAT_ENTRY_PER_BLOCK (F2FS_BLKSIZE / sizeof(struct f2fs_nat_entry))
|
||||
|
||||
struct f2fs_nat_entry {
|
||||
__u8 version; /* latest version of cached nat entry */
|
||||
@ -377,12 +386,13 @@ struct f2fs_nat_block {
|
||||
/*
|
||||
* For SIT entries
|
||||
*
|
||||
* Each segment is 2MB in size by default so that a bitmap for validity of
|
||||
* there-in blocks should occupy 64 bytes, 512 bits.
|
||||
* A validity bitmap of 64 bytes covers 512 blocks of area. For a 4K page size,
|
||||
* this results in a segment size of 2MB. For 16k pages, the default segment size
|
||||
* is 8MB.
|
||||
* Not allow to change this.
|
||||
*/
|
||||
#define SIT_VBLOCK_MAP_SIZE 64
|
||||
#define SIT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_sit_entry))
|
||||
#define SIT_ENTRY_PER_BLOCK (F2FS_BLKSIZE / sizeof(struct f2fs_sit_entry))
|
||||
|
||||
/*
|
||||
* F2FS uses 4 bytes to represent block address. As a result, supported size of
|
||||
@ -417,7 +427,7 @@ struct f2fs_sit_block {
|
||||
* For segment summary
|
||||
*
|
||||
* One summary block contains exactly 512 summary entries, which represents
|
||||
* exactly 2MB segment by default. Not allow to change the basic units.
|
||||
* exactly one segment by default. Not allow to change the basic units.
|
||||
*
|
||||
* NOTE: For initializing fields, you must use set_summary
|
||||
*
|
||||
@ -428,12 +438,12 @@ struct f2fs_sit_block {
|
||||
* from node's page's beginning to get a data block address.
|
||||
* ex) data_blkaddr = (block_t)(nodepage_start_address + ofs_in_node)
|
||||
*/
|
||||
#define ENTRIES_IN_SUM 512
|
||||
#define ENTRIES_IN_SUM (F2FS_BLKSIZE / 8)
|
||||
#define SUMMARY_SIZE (7) /* sizeof(struct summary) */
|
||||
#define SUM_FOOTER_SIZE (5) /* sizeof(struct summary_footer) */
|
||||
#define SUM_ENTRY_SIZE (SUMMARY_SIZE * ENTRIES_IN_SUM)
|
||||
|
||||
/* a summary entry for a 4KB-sized block in a segment */
|
||||
/* a summary entry for a block in a segment */
|
||||
struct f2fs_summary {
|
||||
__le32 nid; /* parent node id */
|
||||
union {
|
||||
@ -517,7 +527,7 @@ struct f2fs_journal {
|
||||
};
|
||||
} __packed;
|
||||
|
||||
/* 4KB-sized summary block structure */
|
||||
/* Block-sized summary block structure */
|
||||
struct f2fs_summary_block {
|
||||
struct f2fs_summary entries[ENTRIES_IN_SUM];
|
||||
struct f2fs_journal journal;
|
||||
@ -558,11 +568,14 @@ typedef __le32 f2fs_hash_t;
|
||||
* Note: there are more reserved space in inline dentry than in regular
|
||||
* dentry, when converting inline dentry we should handle this carefully.
|
||||
*/
|
||||
#define NR_DENTRY_IN_BLOCK 214 /* the number of dentry in a block */
|
||||
|
||||
/* the number of dentry in a block */
|
||||
#define NR_DENTRY_IN_BLOCK ((BITS_PER_BYTE * F2FS_BLKSIZE) / \
|
||||
((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * BITS_PER_BYTE + 1))
|
||||
#define SIZE_OF_DIR_ENTRY 11 /* by byte */
|
||||
#define SIZE_OF_DENTRY_BITMAP ((NR_DENTRY_IN_BLOCK + BITS_PER_BYTE - 1) / \
|
||||
BITS_PER_BYTE)
|
||||
#define SIZE_OF_RESERVED (PAGE_SIZE - ((SIZE_OF_DIR_ENTRY + \
|
||||
#define SIZE_OF_RESERVED (F2FS_BLKSIZE - ((SIZE_OF_DIR_ENTRY + \
|
||||
F2FS_SLOT_LEN) * \
|
||||
NR_DENTRY_IN_BLOCK + SIZE_OF_DENTRY_BITMAP))
|
||||
#define MIN_INLINE_DENTRY_SIZE 40 /* just include '.' and '..' entries */
|
||||
@ -575,7 +588,7 @@ struct f2fs_dir_entry {
|
||||
__u8 file_type; /* file type */
|
||||
} __packed;
|
||||
|
||||
/* 4KB-sized directory entry block */
|
||||
/* Block-sized directory entry block */
|
||||
struct f2fs_dentry_block {
|
||||
/* validity bitmap for directory entries in each block */
|
||||
__u8 dentry_bitmap[SIZE_OF_DENTRY_BITMAP];
|
||||
|
@ -86,7 +86,8 @@ struct ipv6_devconf {
|
||||
|
||||
struct ctl_table_header *sysctl_header;
|
||||
|
||||
ANDROID_KABI_RESERVE(1);
|
||||
ANDROID_KABI_USE(1, struct { __s32 accept_ra_min_lft; u32 padding; });
|
||||
|
||||
ANDROID_KABI_RESERVE(2);
|
||||
ANDROID_KABI_RESERVE(3);
|
||||
ANDROID_KABI_RESERVE(4);
|
||||
|
@ -15,6 +15,7 @@
|
||||
|
||||
static inline bool kvm_has_memrelinquish_services(void) { return false; }
|
||||
static inline void page_relinquish(struct page *page) { }
|
||||
static inline void post_page_relinquish_tlb_inv(void) { }
|
||||
|
||||
#endif /* CONFIG_MEMORY_RELINQUISH */
|
||||
|
||||
|
@ -82,6 +82,10 @@ DECLARE_HOOK(android_vh_binder_select_special_worklist,
|
||||
DECLARE_HOOK(android_vh_binder_alloc_new_buf_locked,
|
||||
TP_PROTO(size_t size, size_t *free_async_space, int is_async),
|
||||
TP_ARGS(size, free_async_space, is_async));
|
||||
|
||||
DECLARE_HOOK(android_vh_binder_detect_low_async_space_locked,
|
||||
TP_PROTO(int is_async, size_t *free_async_space, int pid, bool *should_fail),
|
||||
TP_ARGS(is_async, free_async_space, pid, should_fail));
|
||||
struct binder_transaction_data;
|
||||
DECLARE_HOOK(android_vh_binder_reply,
|
||||
TP_PROTO(struct binder_proc *target_proc, struct binder_proc *proc,
|
||||
|
@ -19,6 +19,10 @@ DECLARE_HOOK(android_vh_ufs_fill_prdt,
|
||||
unsigned int segments, int *err),
|
||||
TP_ARGS(hba, lrbp, segments, err));
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_ufs_reprogram_all_keys,
|
||||
TP_PROTO(struct ufs_hba *hba, int *err),
|
||||
TP_ARGS(hba, err), 1);
|
||||
|
||||
DECLARE_HOOK(android_vh_ufs_prepare_command,
|
||||
TP_PROTO(struct ufs_hba *hba, struct request *rq,
|
||||
struct ufshcd_lrb *lrbp, int *err),
|
||||
|
@ -198,6 +198,7 @@ enum {
|
||||
DEVCONF_IOAM6_ID_WIDE,
|
||||
DEVCONF_NDISC_EVICT_NOCARRIER,
|
||||
DEVCONF_ACCEPT_UNTRACKED_NA,
|
||||
DEVCONF_ACCEPT_RA_MIN_LFT,
|
||||
DEVCONF_MAX
|
||||
};
|
||||
|
||||
|
@ -601,6 +601,7 @@ void kthread_set_per_cpu(struct task_struct *k, int cpu)
|
||||
kthread->cpu = cpu;
|
||||
set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kthread_set_per_cpu);
|
||||
|
||||
bool kthread_is_per_cpu(struct task_struct *p)
|
||||
{
|
||||
|
@ -8,6 +8,9 @@ This module contains a full list of kernel modules
|
||||
|
||||
_COMMON_GKI_MODULES_LIST = [
|
||||
# keep sorted
|
||||
"crypto/crct10dif_common.ko",
|
||||
"crypto/crct10dif_generic.ko",
|
||||
"drivers/block/null_blk/null_blk.ko",
|
||||
"drivers/block/zram/zram.ko",
|
||||
"drivers/bluetooth/btbcm.ko",
|
||||
"drivers/bluetooth/btqca.ko",
|
||||
@ -35,10 +38,12 @@ _COMMON_GKI_MODULES_LIST = [
|
||||
"drivers/net/usb/rtl8150.ko",
|
||||
"drivers/net/usb/usbnet.ko",
|
||||
"drivers/net/wwan/wwan.ko",
|
||||
"drivers/scsi/scsi_debug.ko",
|
||||
"drivers/usb/class/cdc-acm.ko",
|
||||
"drivers/usb/serial/ftdi_sio.ko",
|
||||
"drivers/usb/serial/usbserial.ko",
|
||||
"kernel/kheaders.ko",
|
||||
"lib/crc-t10dif.ko",
|
||||
"lib/crypto/libarc4.ko",
|
||||
"mm/zsmalloc.ko",
|
||||
"net/6lowpan/6lowpan.ko",
|
||||
|
@ -202,6 +202,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
|
||||
.ra_defrtr_metric = IP6_RT_PRIO_USER,
|
||||
.accept_ra_from_local = 0,
|
||||
.accept_ra_min_hop_limit= 1,
|
||||
.accept_ra_min_lft = 0,
|
||||
.accept_ra_pinfo = 1,
|
||||
#ifdef CONFIG_IPV6_ROUTER_PREF
|
||||
.accept_ra_rtr_pref = 1,
|
||||
@ -263,6 +264,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
|
||||
.ra_defrtr_metric = IP6_RT_PRIO_USER,
|
||||
.accept_ra_from_local = 0,
|
||||
.accept_ra_min_hop_limit= 1,
|
||||
.accept_ra_min_lft = 0,
|
||||
.accept_ra_pinfo = 1,
|
||||
#ifdef CONFIG_IPV6_ROUTER_PREF
|
||||
.accept_ra_rtr_pref = 1,
|
||||
@ -2753,6 +2755,9 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
|
||||
return;
|
||||
}
|
||||
|
||||
if (valid_lft != 0 && valid_lft < in6_dev->cnf.accept_ra_min_lft)
|
||||
goto put;
|
||||
|
||||
/*
|
||||
* Two things going on here:
|
||||
* 1) Add routes for on-link prefixes
|
||||
@ -5623,6 +5628,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
|
||||
array[DEVCONF_IOAM6_ID_WIDE] = cnf->ioam6_id_wide;
|
||||
array[DEVCONF_NDISC_EVICT_NOCARRIER] = cnf->ndisc_evict_nocarrier;
|
||||
array[DEVCONF_ACCEPT_UNTRACKED_NA] = cnf->accept_untracked_na;
|
||||
array[DEVCONF_ACCEPT_RA_MIN_LFT] = cnf->accept_ra_min_lft;
|
||||
}
|
||||
|
||||
static inline size_t inet6_ifla6_size(void)
|
||||
@ -6812,6 +6818,13 @@ static const struct ctl_table addrconf_sysctl[] = {
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec,
|
||||
},
|
||||
{
|
||||
.procname = "accept_ra_min_lft",
|
||||
.data = &ipv6_devconf.accept_ra_min_lft,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec,
|
||||
},
|
||||
{
|
||||
.procname = "accept_ra_pinfo",
|
||||
.data = &ipv6_devconf.accept_ra_pinfo,
|
||||
|
@ -1331,6 +1331,14 @@ static void ndisc_router_discovery(struct sk_buff *skb)
|
||||
goto skip_defrtr;
|
||||
}
|
||||
|
||||
lifetime = ntohs(ra_msg->icmph.icmp6_rt_lifetime);
|
||||
if (lifetime != 0 && lifetime < in6_dev->cnf.accept_ra_min_lft) {
|
||||
ND_PRINTK(2, info,
|
||||
"RA: router lifetime (%ds) is too short: %s\n",
|
||||
lifetime, skb->dev->name);
|
||||
goto skip_defrtr;
|
||||
}
|
||||
|
||||
/* Do not accept RA with source-addr found on local machine unless
|
||||
* accept_ra_from_local is set to true.
|
||||
*/
|
||||
@ -1343,8 +1351,6 @@ static void ndisc_router_discovery(struct sk_buff *skb)
|
||||
goto skip_defrtr;
|
||||
}
|
||||
|
||||
lifetime = ntohs(ra_msg->icmph.icmp6_rt_lifetime);
|
||||
|
||||
#ifdef CONFIG_IPV6_ROUTER_PREF
|
||||
pref = ra_msg->icmph.icmp6_router_pref;
|
||||
/* 10b is handled as if it were 00b (medium) */
|
||||
@ -1519,6 +1525,9 @@ static void ndisc_router_discovery(struct sk_buff *skb)
|
||||
if (ri->prefix_len == 0 &&
|
||||
!in6_dev->cnf.accept_ra_defrtr)
|
||||
continue;
|
||||
if (ri->lifetime != 0 &&
|
||||
ntohl(ri->lifetime) < in6_dev->cnf.accept_ra_min_lft)
|
||||
continue;
|
||||
if (ri->prefix_len < in6_dev->cnf.accept_ra_rt_info_min_plen)
|
||||
continue;
|
||||
if (ri->prefix_len > in6_dev->cnf.accept_ra_rt_info_max_plen)
|
||||
|
@ -6152,7 +6152,7 @@ static int nft_setelem_catchall_deactivate(const struct net *net,
|
||||
|
||||
list_for_each_entry(catchall, &set->catchall_list, list) {
|
||||
ext = nft_set_elem_ext(set, catchall->elem);
|
||||
if (!nft_is_active(net, ext))
|
||||
if (!nft_is_active_next(net, ext))
|
||||
continue;
|
||||
|
||||
kfree(elem->priv);
|
||||
@ -10427,16 +10427,10 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
|
||||
data->verdict.code = ntohl(nla_get_be32(tb[NFTA_VERDICT_CODE]));
|
||||
|
||||
switch (data->verdict.code) {
|
||||
default:
|
||||
switch (data->verdict.code & NF_VERDICT_MASK) {
|
||||
case NF_ACCEPT:
|
||||
case NF_DROP:
|
||||
case NF_QUEUE:
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
fallthrough;
|
||||
case NF_ACCEPT:
|
||||
case NF_DROP:
|
||||
case NF_QUEUE:
|
||||
break;
|
||||
case NFT_CONTINUE:
|
||||
case NFT_BREAK:
|
||||
case NFT_RETURN:
|
||||
@ -10471,6 +10465,8 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
|
||||
|
||||
data->verdict.chain = chain;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
desc->len = sizeof(data->verdict);
|
||||
|
Loading…
Reference in New Issue
Block a user