Merge branch 'android14-6.1' into 'android14-6.1-lts'

Catches the android14-6.1-lts branch up with the android14-6.1 branch
which has had a lot of changes that are needed here to resolve future
LTS merges and to ensure that the ABI is kept stable.

It contains the following commits:

abb897fe2f8e Merge branch 'android14-6.1' into 'android14-6.1-lts'
a5e46b0f3c UPSTREAM: io_uring/poll: serialize poll linked timer start with poll removal
6c695fad68 ANDROID: fuse-bpf: Add partial flock support
9b655e9328 ANDROID: Incremental fs: Allocate data buffer based on input request size
facf08fa5f UPSTREAM: gfs2: Don't deref jdesc in evict
a16d62a296 ANDROID: KVM: arm64: Fix MMU context save/restore over TLB invalidation
7f0f58f97b ANDROID: Update symbol list for VIVO
1b7f110278 ANDROID: add initial symbol list file for ExynosAuto SoCs
f6707f352b ANDROID: sched: Export sched_domains_mutex for lockdep
a24911abfd ANDROID: Update symbol for Exynos SoC
5e7421101f ANDROID: ABI: Update symbol for Exynos SoC
270ca05882 ANDROID: Update symbol list for mtk
47e02fe1ef UPSTREAM: dma-remap: use kvmalloc_array/kvfree for larger dma memory remap
22e008d6d5 ANDROID: vendor_hooks: Supplement the missing hook call point.
214e6f268b ANDROID: GKI: Add WWAN as GKI protected module
8726a2d930 ANDROID: GKI: regmap: Add regmap vendor hook for of_syscon_register
7c2b6c7b56 UPSTREAM: kasan: suppress recursive reports for HW_TAGS
c0226bf0c7 UPSTREAM: kasan, arm64: add arch_suppress_tag_checks_start/stop
da926e6077 UPSTREAM: arm64: mte: rename TCO routines
553be6e70d BACKPORT: kasan, arm64: rename tagging-related routines
b39a3be50a UPSTREAM: kasan: drop empty tagging-related defines
44ee9eef21 ANDROID: usb: xhci-plat: Fix double-free in xhci_plat_remove
55679fd0a8 ANDROID: ABI: update symbol list for galaxy
30807bebbf ANDROID: GKI: update the ABI symbol list
f3c6324daa ANDROID: ABI: Update symbol for Exynos SoC
c75c8311c8 ANDROID: GKI: ABI: update whitelist for the kmsg_dump and native_hang symbols used by unisoc for kernel6.1
0a2e9dd65c ANDROID: ABI: Update symbols to unisoc whitelist for ims_bridge module
fc9c1ccbbf ANDROID: abi_gki_aarch64_qcom: Add drm_plane_from_index and drm_gem_prime_export
c480e4e576 ANDROID: abi_gki_aarch64_qcom: Update symbol list
8ecaef4d4b UPSTREAM: fsverity: reject FS_IOC_ENABLE_VERITY on mode 3 fds
d5feaf8163 UPSTREAM: fsverity: explicitly check for buffer overflow in build_merkle_tree()
711f5d5bfe ANDROID: update unisoc symbol list
dde9b1794c ANDROID: update symbol for unisoc whitelist
dfd6ca2517 UPSTREAM: f2fs: fix deadlock in i_xattr_sem and inode page lock
a3d8701485 ANDROID: GKI: update xiaomi symbol list
dfc69fd81c Revert "FROMLIST: f2fs: remove i_xattr_sem to avoid deadlock and fix the original issue"
2e2b1f4982 ANDROID: ABI: Update pixel symbol list
b57cdabd55 ANDROID: Set arch attribute for allmodconfig builds
f63b2625af UPSTREAM: usb: gadget: udc: renesas_usb3: Fix use after free bug in renesas_usb3_remove due to race condition
dc8c661b99 ANDROID: ABI: Add to QCOM symbols list
dd451f19f0 UPSTREAM: arm64: mm: pass original fault address to handle_mm_fault() in PER_VMA_LOCK block
39385f7568 UPSTREAM: media: rkvdec: fix use after free bug in rkvdec_remove
35a9539d66 ANDROID: GKI: Update symbol list for MediatTek
fcbb015efd UPSTREAM: scsi: ufs: core: Remove dedicated hwq for dev command
2eb4158749 BACKPORT: scsi: ufs: mcq: Fix the incorrect OCS value for the device command
dc64f5f480 FROMLIST: scsi: ufs: ufs-mediatek: Add MCQ support for MTK platform
8740a92b2e FROMLIST: scsi: ufs: core: Export symbols for MTK driver module
c9814a3af5 UPSTREAM: blk-mq: check on cpu id when there is only one ctx mapping
c413cf731a UPSTREAM: relayfs: fix out-of-bounds access in relay_file_read
e84e043a3c UPSTREAM: net/sched: flower: fix possible OOB write in fl_set_geneve_opt()
d2dfb4ee11 UPSTREAM: x86/mm: Avoid using set_pgd() outside of real PGD pages
3c60e58d7a UPSTREAM: iommu/amd: Add missing domain type checks
820f96cba5 UPSTREAM: tty: serial: qcom_geni: avoid duplicate struct member init
cbea99e1de UPSTREAM: scsi: ufs: core: bsg: Fix cast to restricted __be16 warning
c779836709 UPSTREAM: netfilter: nf_tables: incorrect error path handling with NFT_MSG_NEWRULE
ed2a228522 ANDROID: fix build error when use cpu_cgroup_online vh
8cd2dc493a ANDROID: ABI: add android_debug_symbol to whitelist
1047d4a5df ANDROID: defconfig: Enable debug_symbol driver
dfabd2e38b ANDROID: android: Create debug_symbols driver
f54778f021 ANDROID: ABI: update symbol list for exynos
58004e1d0e ANDROID: KVM: arm64: Remove 'struct kvm_vcpu' from the KMI
8a717a85c5 UPSTREAM: KVM: arm64: Restore GICv2-on-GICv3 functionality
b9d7d47d4a UPSTREAM: KVM: arm64: vgic: Wrap vgic_its_create() with config_lock
486a8ab3ad UPSTREAM: KVM: arm64: vgic: Fix a circular locking issue
b5e26cd12f UPSTREAM: KVM: arm64: vgic: Don't acquire its_lock before config_lock
b1bb8a0bc4 BACKPORT: KVM: arm64: Avoid lock inversion when setting the VM register width
b39849bde6 UPSTREAM: KVM: arm64: Avoid vcpu->mutex v. kvm->lock inversion in CPU_ON
04b12278ee BACKPORT: KVM: arm64: Use config_lock to protect data ordered against KVM_RUN
de6bb81c8b UPSTREAM: KVM: arm64: Use config_lock to protect vgic state
cf0e6c7e09 BACKPORT: KVM: arm64: Add helper vgic_write_guest_lock()
4bbcece823 ANDROID: sound: usb: Fix wrong behavior of vendor hooking
55f146682b ANDROID: GKI: USB: XHCI: add Android ABI padding to struct xhci_vendor_ops
e27c6490ba Revert "ANDROID: android: Create debug_symbols driver"
bb732365f7 ANDROID: android: Create debug_symbols driver
80ac923694 UPSTREAM: ipvlan:Fix out-of-bounds caused by unclear skb->cb
9a9c876461 ANDROID: update symbol list for unisoc vendor hook
e3a72785da ANDROID: thermal: Add hook to enable/disable thermal power throttle
05ba0cb850 ANDROID: ABI: Update symbol for Exynos SoC
251aa28d16 BACKPORT: FROMGIT: usb: gadget: udc: Handle gadget_connect failure during bind operation
5af5006061 FROMGIT: usb: dwc3: gadget: Bail out in pullup if soft reset timeout happens
79b7e0db16 ANDROID: GKI: Update symbol list for xiaomi
ff8496749d ANDROID: vendor_hooks: vendor hook for MM
43d7226c5f ANDROID: add a symbol to unisoc symbol list
51cb1e1cfd ANDROID: GKI: update symbol list file for xiaomi
1499ddcb78 UPSTREAM: net/sched: cls_u32: Fix reference counter leak leading to overflow
054ab3ab00 ANDROID: db845c: Fix build when using --kgdb
a39af6210e FROMGIT: usb: host: xhci-plat: Set XHCI_STATE_REMOVING before resuming XHCI HC
50c99c83e2 FROMGIT: usb: host: xhci: Do not re-initialize the XHCI HC if being removed
fa9645687e FROMLIST: kheaders: dereferences the source tree
21061b7d0f FROMLIST: f2fs: remove i_xattr_sem to avoid deadlock and fix the original issue
ec0fc55aa4 ANDROID: db845c: Local define for db845c targets
947e7c1d72 ANDROID: GKI: Update symbols to symbol list
9afd7b261a ANDROID: Export memcg functions to allow module to add new files
32c2d42ee1 ANDROID: rockpi4: Fix build when using --kgdb
275048c878 ANDROID: GKI: update symbol list file for xiaomi
64e4b4d31b ANDROID: kleaf: android/gki_system_dlkm_modules is generated.
734b06dabf ANDROID: ABI: Update pixel symbol list
9ea87136d1 ANDROID: fuse-bpf: Move FUSE_RELEASE to correct place
b8ef5bfbee ANDROID: fuse-bpf: Ensure bpf field can never be nulled
a97d54b54d ANDROID: GKI: Increase CMA areas to 32
d28f02c47b ANDROID: Delete MODULES_LIST from build configs.
97a56a07e9 ANDROID: ABI: Update symbols to unisoc whitelist
7668cef283 ANDROID: HID: Only utilise UHID provided exports if UHID is enabled
1c4d2aa0c7 UPSTREAM: memstick: r592: Fix UAF bug in r592_remove due to race condition
8aea35f109 UPSTREAM: xfs: verify buffer contents when we skip log replay
04b6079eae UPSTREAM: bluetooth: Perform careful capability checks in hci_sock_ioctl()
8f5a220975 FROMLIST: maple_tree: Adjust node allocation on mas_rebalance()
e835ffdfbc FROMLIST: maple_tree: Reduce resets during store setup
708234485a FROMLIST: BACKPORT: maple_tree: Refine mas_preallocate() node calculations
d766c8399b Revert "FROMLIST: BACKPORT: maple_tree: Refine mas_preallocate() node calculations"

Change-Id: I0c77dd36d8336542cbb66edceec28f36ce3d798f
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-07-19 18:09:34 +00:00
commit 524f946fbc
97 changed files with 4345 additions and 6959 deletions

View File

@ -1,9 +1,18 @@
# SPDX-License-Identifier: GPL-2.0
# Copyright (C) 2021 The Android Open Source Project
load("@bazel_skylib//rules:write_file.bzl", "write_file")
load("//build/bazel_common_rules/dist:dist.bzl", "copy_to_dist_dir")
load("//build/kernel/kleaf:common_kernels.bzl", "define_common_kernels", "define_db845c")
load("//build/kernel/kleaf:kernel.bzl", "ddk_headers", "kernel_build", "kernel_images", "kernel_modules_install")
load("//build/kernel/kleaf:common_kernels.bzl", "define_common_kernels")
load(
"//build/kernel/kleaf:kernel.bzl",
"ddk_headers",
"kernel_abi",
"kernel_build",
"kernel_images",
"kernel_modules_install",
"merged_kernel_uapi_headers",
)
load(":modules.bzl", "COMMON_GKI_MODULES_LIST")
package(
@ -31,12 +40,22 @@ _GKI_X86_64_MAKE_GOALS = [
"modules",
]
write_file(
name = "gki_system_dlkm_modules",
out = "android/gki_system_dlkm_modules",
content = COMMON_GKI_MODULES_LIST + [
# Ensure new line at the end.
"",
],
)
filegroup(
name = "aarch64_additional_kmi_symbol_lists",
srcs = [
# keep sorted
"android/abi_gki_aarch64_db845c",
"android/abi_gki_aarch64_exynos",
"android/abi_gki_aarch64_exynosauto",
"android/abi_gki_aarch64_galaxy",
"android/abi_gki_aarch64_honor",
"android/abi_gki_aarch64_imx",
@ -97,22 +116,7 @@ define_common_kernels(target_configs = {
},
})
define_db845c(
name = "db845c",
outs = [
"arch/arm64/boot/dts/qcom/qrb5165-rb5.dtb",
"arch/arm64/boot/dts/qcom/sdm845-db845c.dtb",
"arch/arm64/boot/dts/qcom/sm8450-qrd.dtb",
],
define_abi_targets = True,
kmi_symbol_list = "//common:android/abi_gki_aarch64_db845c",
make_goals = [
"modules",
"qcom/sdm845-db845c.dtb",
"qcom/qrb5165-rb5.dtb",
"qcom/sm8450-qrd.dtb",
],
module_outs = [
_DB845C_MODULE_OUTS = [
# keep sorted
"crypto/michael_mic.ko",
"drivers/base/regmap/regmap-sdw.ko",
@ -260,8 +264,6 @@ define_db845c(
"drivers/usb/host/ohci-pci.ko",
"drivers/usb/host/ohci-platform.ko",
"drivers/usb/typec/qcom-pmic-typec.ko",
"drivers/watchdog/pm8916_wdt.ko",
"drivers/watchdog/qcom-wdt.ko",
"net/mac80211/mac80211.ko",
"net/qrtr/qrtr.ko",
"net/qrtr/qrtr-mhi.ko",
@ -298,29 +300,104 @@ define_db845c(
"sound/soc/qcom/snd-soc-qcom-sdw.ko",
"sound/soc/qcom/snd-soc-sdm845.ko",
"sound/soc/qcom/snd-soc-sm8250.ko",
]
_DB845C_WATCHDOG_MODULE_OUTS = [
"drivers/watchdog/pm8916_wdt.ko",
"drivers/watchdog/qcom-wdt.ko",
]
kernel_build(
name = "db845c_no_kgdb",
outs = [
"arch/arm64/boot/dts/qcom/qrb5165-rb5.dtb",
"arch/arm64/boot/dts/qcom/sdm845-db845c.dtb",
"arch/arm64/boot/dts/qcom/sm8450-qrd.dtb",
],
# Enable mixed build.
base_kernel = ":kernel_aarch64",
build_config = "build.config.db845c",
collect_unstripped_modules = True,
kmi_symbol_list = "android/abi_gki_aarch64_db845c",
make_goals = [
"modules",
"qcom/sdm845-db845c.dtb",
"qcom/qrb5165-rb5.dtb",
"qcom/sm8450-qrd.dtb",
],
module_outs = _DB845C_MODULE_OUTS + _DB845C_WATCHDOG_MODULE_OUTS,
strip_modules = True,
)
# TODO(b/258259749): Convert rockpi4 to mixed build
kernel_build(
name = "rockpi4",
name = "db845c_with_kgdb",
outs = [
"Image",
"System.map",
"modules.builtin",
"modules.builtin.modinfo",
"rk3399-rock-pi-4b.dtb",
"vmlinux",
"vmlinux.symvers",
"arch/arm64/boot/dts/qcom/qrb5165-rb5.dtb",
"arch/arm64/boot/dts/qcom/sdm845-db845c.dtb",
"arch/arm64/boot/dts/qcom/sm8450-qrd.dtb",
],
build_config = "build.config.rockpi4",
dtstree = "//common-modules/virtual-device:rockpi4_dts",
# Enable mixed build.
base_kernel = ":kernel_aarch64",
build_config = "build.config.db845c",
make_goals = [
"Image",
"modules",
"rk3399-rock-pi-4b.dtb",
"qcom/sdm845-db845c.dtb",
"qcom/qrb5165-rb5.dtb",
"qcom/sm8450-qrd.dtb",
],
module_outs = COMMON_GKI_MODULES_LIST + [
module_outs = _DB845C_MODULE_OUTS,
strip_modules = True,
)
alias(
name = "db845c",
actual = select({
"//build/kernel/kleaf:kgdb_is_true": "db845c_with_kgdb",
"//conditions:default": "db845c_no_kgdb",
}),
)
kernel_abi(
name = "db845c_abi",
kernel_build = ":db845c",
kmi_symbol_list_add_only = True,
)
kernel_modules_install(
name = "db845c_modules_install",
kernel_build = ":db845c",
)
merged_kernel_uapi_headers(
name = "db845c_merged_kernel_uapi_headers",
kernel_build = ":db845c",
)
kernel_images(
name = "db845c_images",
build_initramfs = True,
kernel_build = ":db845c",
kernel_modules_install = ":db845c_modules_install",
)
copy_to_dist_dir(
name = "db845c_dist",
data = [
":db845c",
":db845c_images",
":db845c_modules_install",
":db845c_merged_kernel_uapi_headers",
# Mixed build: Additional GKI artifacts.
":kernel_aarch64",
":kernel_aarch64_modules",
":kernel_aarch64_additional_artifacts",
],
dist_dir = "out/db845/dist",
flat = True,
log = "info",
)
_ROCKPI4_MODULE_OUTS = [
# keep sorted
"drivers/block/virtio_blk.ko",
"drivers/char/hw_random/virtio-rng.ko",
@ -367,21 +444,78 @@ kernel_build(
"drivers/virtio/virtio_pci.ko",
"drivers/virtio/virtio_pci_legacy_dev.ko",
"drivers/virtio/virtio_pci_modern_dev.ko",
"drivers/watchdog/dw_wdt.ko",
"net/core/failover.ko",
]
_ROCKPI4_WATCHDOG_MODULE_OUTS = [
# keep sorted
"drivers/watchdog/dw_wdt.ko",
]
# TODO(b/258259749): Convert rockpi4 to mixed build
kernel_build(
name = "rockpi4_no_kgdb",
outs = [
"Image",
"System.map",
"modules.builtin",
"modules.builtin.modinfo",
"rk3399-rock-pi-4b.dtb",
"vmlinux",
"vmlinux.symvers",
],
build_config = "build.config.rockpi4",
dtstree = "//common-modules/virtual-device:rockpi4_dts",
make_goals = [
"Image",
"modules",
"rk3399-rock-pi-4b.dtb",
],
module_outs = COMMON_GKI_MODULES_LIST + _ROCKPI4_MODULE_OUTS + _ROCKPI4_WATCHDOG_MODULE_OUTS,
visibility = ["//visibility:private"],
)
# TODO(b/258259749): Convert rockpi4 to mixed build
kernel_build(
name = "rockpi4_with_kgdb",
outs = [
"Image",
"System.map",
"modules.builtin",
"modules.builtin.modinfo",
"rk3399-rock-pi-4b.dtb",
"vmlinux",
"vmlinux.symvers",
],
build_config = "build.config.rockpi4",
dtstree = "//common-modules/virtual-device:rockpi4_dts",
make_goals = [
"Image",
"modules",
"rk3399-rock-pi-4b.dtb",
],
module_outs = COMMON_GKI_MODULES_LIST + _ROCKPI4_MODULE_OUTS,
visibility = ["//visibility:private"],
)
alias(
name = "rockpi4",
actual = select({
"//build/kernel/kleaf:kgdb_is_true": "rockpi4_with_kgdb",
"//conditions:default": "rockpi4_no_kgdb",
}),
)
kernel_modules_install(
name = "rockpi4_modules_install",
kernel_build = "//common:rockpi4",
kernel_build = ":rockpi4",
)
kernel_images(
name = "rockpi4_images",
build_initramfs = True,
kernel_build = "//common:rockpi4",
kernel_modules_install = "//common:rockpi4_modules_install",
kernel_build = ":rockpi4",
kernel_modules_install = ":rockpi4_modules_install",
)
copy_to_dist_dir(
@ -434,6 +568,7 @@ kernel_build(
# Hack to actually check the build.
# Otherwise, Bazel thinks that there are no output files, and skip building.
outs = [".config"],
arch = "x86_64",
build_config = "build.config.allmodconfig.x86_64",
visibility = ["//visibility:private"],
)
@ -444,6 +579,7 @@ kernel_build(
# Hack to actually check the build.
# Otherwise, Bazel thinks that there are no output files, and skip building.
outs = [".config"],
arch = "arm",
build_config = "build.config.allmodconfig.arm",
visibility = ["//visibility:private"],
)

File diff suppressed because it is too large Load Diff

View File

@ -220,6 +220,7 @@
dma_fence_release
dma_fence_remove_callback
dma_fence_signal
dma_fence_signal_locked
dma_fence_wait_timeout
dma_free_attrs
dma_heap_buffer_alloc
@ -322,6 +323,7 @@
enable_irq
eth_type_trans
fd_install
fget
_find_first_bit
_find_last_bit
_find_next_and_bit
@ -467,10 +469,12 @@
kmemdup
kobject_add
kobject_create_and_add
kobject_get
kobject_init
kobject_init_and_add
kobject_put
kobject_uevent_env
kobj_sysfs_ops
krealloc
kstrdup
kstrndup
@ -605,6 +609,7 @@
of_reserved_mem_lookup
of_root
of_thermal_get_ntrips
on_each_cpu_cond_mask
panic
panic_notifier_list
param_array_ops
@ -633,6 +638,7 @@
perf_trace_run_bpf_submit
phy_power_off
phy_power_on
phy_set_mode_ext
pinconf_generic_dt_node_to_map
pinctrl_dev_get_drvdata
pinctrl_lookup_state
@ -646,6 +652,7 @@
platform_driver_unregister
platform_get_irq
platform_get_irq_byname
platform_get_irq_byname_optional
platform_get_resource
platform_get_resource_byname
__platform_register_drivers
@ -655,6 +662,8 @@
__pm_runtime_disable
pm_runtime_enable
pm_runtime_forbid
pm_runtime_force_resume
pm_runtime_force_suspend
__pm_runtime_idle
pm_runtime_no_callbacks
__pm_runtime_resume
@ -934,9 +943,11 @@
up_write
usb_add_function
usb_copy_descriptors
usb_enable_autosuspend
usb_ep_autoconfig
usb_function_register
usb_function_unregister
usb_hcd_is_primary_hcd
usb_hub_find_child
usb_interface_id
usb_put_function_instance
@ -1149,8 +1160,32 @@
__tracepoint_workqueue_execute_end
__tracepoint_workqueue_execute_start
# required by dw_mmc-exynos.ko
mmc_wait_for_req
# required by dw_mmc.ko
debugfs_create_x64
device_property_read_string_array
dma_map_sg_attrs
mmc_add_host
mmc_alloc_host
mmc_can_gpio_cd
mmc_detect_change
mmc_free_host
mmc_gpio_get_cd
mmc_gpio_get_ro
mmc_of_parse
mmc_regulator_get_supply
mmc_regulator_set_ocr
mmc_regulator_set_vqmmc
mmc_remove_host
mmc_request_done
sdio_signal_irq
# required by dwc3-exynos-usb.ko
device_create_managed_software_node
platform_device_add
platform_device_add_resources
platform_device_alloc
platform_device_del
platform_device_put
@ -1377,7 +1412,6 @@
drm_crtc_enable_color_mgmt
drm_crtc_handle_vblank
drm_crtc_vblank_get
drm_dev_has_vblank
drm_display_mode_to_videomode
drm_edid_to_sad
drm_format_info
@ -1452,6 +1486,7 @@
# required by exynos-hypervisor.ko
__arm_smccc_hvc
kimage_vaddr
__pkvm_load_el2_module
__pkvm_register_el2_call
@ -1472,6 +1507,17 @@
# required by exynos-ssld.ko
driver_register
# required by exynos-usb-audio-offloading.ko
snd_ctl_add
snd_ctl_new1
snd_vendor_set_ops
__traceiter_android_rvh_audio_usb_offload_disconnect
__traceiter_android_vh_audio_usb_offload_connect
__tracepoint_android_rvh_audio_usb_offload_disconnect
__tracepoint_android_vh_audio_usb_offload_connect
usb_choose_configuration
usb_ifnum_to_if
# required by exynos9945_sound.ko
clk_bulk_disable
clk_bulk_enable
@ -1514,6 +1560,9 @@
__tracepoint_clock_set_rate
update_devfreq
# required by exynos_esca.ko
debugfs_lookup
# required by exynos_mct_v3.ko
clockevents_config_and_register
__clocksource_register_scale
@ -1704,6 +1753,10 @@
__traceiter_android_rvh_gic_v3_set_affinity
__tracepoint_android_rvh_gic_v3_set_affinity
# required by irq-gic-vh.ko
__traceiter_android_vh_gic_set_affinity
__tracepoint_android_vh_gic_set_affinity
# required by is-actuator-ak737x.ko
v4l2_i2c_subdev_init
@ -1742,9 +1795,7 @@
# required by mpam_arch.ko
bitmap_alloc
kobj_sysfs_ops
kstrtou16
on_each_cpu_cond_mask
# required by nanohub.ko
arch_timer_read_counter
@ -1767,8 +1818,6 @@
cpuidle_resume_and_unlock
dev_pm_opp_find_freq_floor
dma_fence_free
dma_fence_signal_locked
fget
vsprintf
# required by pablo-icpu.ko
@ -1829,7 +1878,6 @@
pci_generic_config_write
pci_get_device
pci_rescan_bus
platform_get_irq_byname_optional
# required by pinctrl-samsung-core.ko
fwnode_handle_put
@ -1981,7 +2029,6 @@
vprintk_emit
# required by scsc_mx.ko
kobject_get
kobject_uevent
# required by scsc_platform_mif.ko
@ -2065,6 +2112,7 @@
backlight_device_unregister
__bitmap_xor
capable
copy_highpage
devfreq_monitor_resume
devfreq_monitor_start
devfreq_monitor_stop
@ -2165,12 +2213,15 @@
idr_get_next
idr_replace
jiffies64_to_msecs
mark_page_accessed
memremap
memunmap
mmu_notifier_synchronize
page_pinner_inited
__page_pinner_put_page
param_get_charp
param_ops_hexint
param_set_charp
pci_assign_unassigned_bus_resources
pci_bus_resource_n
pcie_bandwidth_available
@ -2196,13 +2247,19 @@
rb_first_postorder
__rb_insert_augmented
rb_next_postorder
reclaim_shmem_address_space
request_firmware_into_buf
seq_putc
seq_write
set_page_dirty
sg_alloc_table_from_pages_segment
shmem_file_setup
shmem_read_mapping_page_gfp
si_meminfo
sysfs_remove_files
__traceiter_android_vh_meminfo_proc_show
__traceiter_gpu_mem_total
__tracepoint_android_vh_meminfo_proc_show
__tracepoint_gpu_mem_total
ttm_bo_eviction_valuable
ttm_bo_init_reserved
@ -2217,6 +2274,7 @@
ttm_bo_put
ttm_bo_set_bulk_move
ttm_bo_unlock_delayed_workqueue
ttm_bo_unmap_virtual
ttm_bo_unpin
ttm_bo_validate
ttm_bo_vm_access
@ -2225,9 +2283,11 @@
ttm_bo_vm_fault_reserved
ttm_bo_vm_open
ttm_bo_vm_reserve
ttm_bo_wait
ttm_device_fini
ttm_device_init
ttm_eu_backoff_reservation
ttm_eu_fence_buffer_objects
ttm_eu_reserve_buffers
ttm_glob
ttm_lru_bulk_move_tail
@ -2392,7 +2452,7 @@
ufshcd_dme_get_attr
ufshcd_dme_set_attr
ufshcd_hold
ufshcd_mcq_poll_cqe_nolock
ufshcd_mcq_poll_cqe_lock
ufshcd_mcq_write_cqis
ufshcd_pltfrm_init
ufshcd_release
@ -2416,6 +2476,25 @@
kthread_complete_and_exit
usb_speed_string
# required by xhci-exynos-audio.ko
dma_pool_alloc
dma_pool_free
__kmalloc_node
kmalloc_node_trace
xhci_add_endpoint
xhci_address_device
xhci_check_trb_in_td_math
xhci_get_endpoint_index
xhci_get_ep_ctx
xhci_get_slot_ctx
xhci_initialize_ring_info
xhci_link_segments
xhci_remove_stream_mapping
xhci_ring_alloc
xhci_segment_free
xhci_trb_virt_to_dma
xhci_vendor_get_ops
# required by xhci-exynos.ko
device_property_read_u32_array
device_set_wakeup_enable
@ -2426,10 +2505,10 @@
usb_add_hcd
__usb_create_hcd
usb_disabled
usb_enable_autosuspend
usb_hcd_is_primary_hcd
usb_put_hcd
usb_remove_hcd
xhci_bus_resume
xhci_bus_suspend
xhci_gen_setup
xhci_init_driver
xhci_resume
@ -2440,3 +2519,61 @@
cpufreq_quick_get_max
kthread_bind_mask
stpcpy
# preserved by --additions-only
__alloc_percpu_gfp
arc4_crypt
arc4_setkey
__cpu_present_mask
crc32_be
crypto_aead_decrypt
crypto_aead_encrypt
crypto_aead_setauthsize
crypto_aead_setkey
crypto_alloc_aead
crypto_alloc_skcipher
__crypto_memneq
crypto_shash_finup
crypto_shash_setkey
crypto_skcipher_decrypt
crypto_skcipher_encrypt
crypto_skcipher_setkey
__crypto_xor
dev_fetch_sw_netstats
drm_dev_has_vblank
eth_mac_addr
ethtool_op_get_link
get_random_u16
__hw_addr_init
__hw_addr_sync
__hw_addr_unsync
kernel_param_lock
kernel_param_unlock
kfree_skb_list_reason
ktime_get_seconds
memmove
netdev_info
netdev_set_default_ethtool_ops
netif_receive_skb_list
net_ratelimit
pskb_expand_head
___pskb_trim
rhashtable_free_and_destroy
rhashtable_insert_slow
rhltable_init
__rht_bucket_nested
rht_bucket_nested
rht_bucket_nested_insert
round_jiffies
round_jiffies_relative
round_jiffies_up
sg_init_one
skb_checksum_help
skb_clone_sk
skb_complete_wifi_ack
skb_ensure_writable
__skb_get_hash
__skb_gso_segment
tasklet_unlock_wait
ufshcd_mcq_poll_cqe_nolock
unregister_netdevice_many

View File

@ -0,0 +1,156 @@
[abi_symbol_list]
# commonly used symbols
clk_prepare
clk_unprepare
_dev_err
_dev_info
devm_kmalloc
devm_request_threaded_irq
_dev_warn
dma_set_coherent_mask
dma_set_mask
kfree
kmalloc_caches
kmalloc_trace
__list_add_valid
log_post_read_mmio
log_post_write_mmio
log_read_mmio
log_write_mmio
module_layout
of_find_property
of_property_read_variable_u32_array
__platform_driver_register
_printk
__put_task_struct
_raw_spin_lock_irqsave
_raw_spin_unlock_irqrestore
snprintf
__stack_chk_fail
# required by exynos-chipid_v2.ko
kasprintf
of_device_is_available
of_find_matching_node_and_match
of_find_node_opts_by_path
of_iomap
of_property_read_string
soc_device_register
subsys_system_register
# required by exynos_tty.ko
atomic_notifier_chain_register
clk_disable
clk_enable
clk_get_rate
clk_set_rate
__const_udelay
dev_driver_string
device_create_file
devm_clk_get
devm_ioremap
devm_kfree
devm_pinctrl_get
disable_irq_nosync
dma_get_slave_caps
dma_map_page_attrs
dma_release_channel
dma_request_chan
dma_sync_single_for_cpu
dma_sync_single_for_device
dma_unmap_page_attrs
do_SAK
enable_irq
free_irq
gic_nonsecure_priorities
handle_sysrq
iomem_resource
is_vmalloc_addr
jiffies
kmalloc_large
of_alias_get_id
of_get_property
of_match_node
oops_in_progress
panic_notifier_list
pinctrl_lookup_state
pinctrl_select_state
platform_driver_unregister
platform_get_irq
platform_get_resource
_raw_spin_trylock
register_console
regmap_read
regmap_update_bits_base
__release_region
__request_region
request_threaded_irq
sched_clock
sg_init_table
sscanf
syscon_regmap_lookup_by_phandle
sysrq_mask
tty_flip_buffer_push
tty_insert_flip_string_fixed_flag
tty_kref_put
tty_port_tty_get
uart_add_one_port
uart_console_write
uart_get_baud_rate
uart_parse_options
uart_register_driver
uart_remove_one_port
uart_resume_port
uart_set_options
uart_suspend_port
uart_try_toggle_sysrq
uart_unregister_driver
uart_update_timeout
uart_write_wakeup
__warn_printk
# required by pl330.ko
alt_cb_patch_nops
amba_driver_register
amba_driver_unregister
debugfs_create_file
dev_err_probe
devm_free_irq
devm_ioremap_resource
__devm_reset_control_get
dma_alloc_attrs
dma_async_device_register
dma_async_device_unregister
dma_async_tx_descriptor_init
dmaengine_unmap_put
dma_free_attrs
dma_get_slave_channel
dma_map_resource
dma_unmap_resource
__kmalloc
ktime_get_mono_fast_ns
__list_del_entry_valid
loops_per_jiffy
of_dma_controller_free
of_dma_controller_register
pm_runtime_force_resume
pm_runtime_force_suspend
pm_runtime_irq_safe
__pm_runtime_resume
pm_runtime_set_autosuspend_delay
__pm_runtime_suspend
__pm_runtime_use_autosuspend
_raw_spin_lock
_raw_spin_unlock
reset_control_assert
reset_control_deassert
seq_lseek
seq_printf
seq_puts
seq_read
sg_next
single_open
single_release
tasklet_kill
__tasklet_schedule
tasklet_setup

View File

@ -62,6 +62,7 @@
_dev_info
__devm_alloc_percpu
devm_bitmap_zalloc
devm_clk_get_optional_enabled
devm_gen_pool_create
devm_gpiod_get_index
devm_ioremap
@ -77,6 +78,7 @@
dma_alloc_attrs
dma_free_attrs
down_write
dummy_irq_chip
elevator_alloc
elv_bio_merge_ok
elv_rb_add
@ -129,6 +131,7 @@
I_BDEV
iio_channel_get
iio_channel_release
iio_read_channel_raw
init_task
init_timer_key
input_close_device
@ -140,6 +143,8 @@
ioremap_prot
iounmap
iov_iter_kvec
__irq_domain_alloc_fwnode
irq_domain_free_fwnode
jiffies
kasan_flag_enabled
kasprintf
@ -218,12 +223,16 @@
param_ops_ullong
param_set_uint
__per_cpu_offset
phy_connect_direct
phy_find_first
phy_get_pause
phy_resolve_aneg_linkmode
phy_start_aneg
pinctrl_get
pinctrl_put
__platform_driver_register
platform_driver_unregister
platform_get_ethdev_address
preempt_schedule
print_hex_dump
_printk
@ -311,8 +320,10 @@
__traceiter_android_vh_is_fpsimd_save
__traceiter_android_vh_madvise_pageout_swap_entry
__traceiter_android_vh_madvise_swapin_walk_pmd_entry
__traceiter_android_vh_meminfo_cache_adjust
__traceiter_android_vh_mutex_wait_finish
__traceiter_android_vh_mutex_wait_start
__traceiter_android_vh_process_madvise_end
__traceiter_android_vh_ptype_head
__traceiter_android_vh_rtmutex_wait_finish
__traceiter_android_vh_rtmutex_wait_start
@ -320,16 +331,14 @@
__traceiter_android_vh_rwsem_read_wait_start
__traceiter_android_vh_rwsem_write_wait_finish
__traceiter_android_vh_rwsem_write_wait_start
__traceiter_android_vh_process_madvise_end
__traceiter_android_vh_sched_show_task
__traceiter_android_vh_show_smap
__traceiter_android_vh_si_mem_available_adjust
__traceiter_android_vh_si_meminfo_adjust
__traceiter_android_vh_smaps_pte_entry
__traceiter_android_vh_try_to_freeze_todo
__traceiter_android_vh_try_to_freeze_todo_unfrozen
__traceiter_android_vh_watchdog_timer_softlockup
__traceiter_android_vh_meminfo_cache_adjust
__traceiter_android_vh_si_mem_available_adjust
__traceiter_android_vh_si_meminfo_adjust
__traceiter_block_rq_insert
__traceiter_console
__traceiter_hrtimer_expire_entry
@ -352,8 +361,10 @@
__tracepoint_android_vh_is_fpsimd_save
__tracepoint_android_vh_madvise_pageout_swap_entry
__tracepoint_android_vh_madvise_swapin_walk_pmd_entry
__tracepoint_android_vh_meminfo_cache_adjust
__tracepoint_android_vh_mutex_wait_finish
__tracepoint_android_vh_mutex_wait_start
__tracepoint_android_vh_process_madvise_end
__tracepoint_android_vh_ptype_head
__tracepoint_android_vh_rtmutex_wait_finish
__tracepoint_android_vh_rtmutex_wait_start
@ -361,16 +372,14 @@
__tracepoint_android_vh_rwsem_read_wait_start
__tracepoint_android_vh_rwsem_write_wait_finish
__tracepoint_android_vh_rwsem_write_wait_start
__tracepoint_android_vh_process_madvise_end
__tracepoint_android_vh_sched_show_task
__tracepoint_android_vh_show_smap
__tracepoint_android_vh_si_mem_available_adjust
__tracepoint_android_vh_si_meminfo_adjust
__tracepoint_android_vh_smaps_pte_entry
__tracepoint_android_vh_try_to_freeze_todo
__tracepoint_android_vh_try_to_freeze_todo_unfrozen
__tracepoint_android_vh_watchdog_timer_softlockup
__tracepoint_android_vh_meminfo_cache_adjust
__tracepoint_android_vh_si_mem_available_adjust
__tracepoint_android_vh_si_meminfo_adjust
__tracepoint_block_rq_insert
__tracepoint_console
__tracepoint_hrtimer_expire_entry

View File

@ -98,10 +98,10 @@
blocking_notifier_call_chain
blocking_notifier_chain_register
blocking_notifier_chain_unregister
bpf_trace_run1
bpf_trace_run10
bpf_trace_run11
bpf_trace_run12
bpf_trace_run1
bpf_trace_run2
bpf_trace_run3
bpf_trace_run4
@ -690,8 +690,11 @@
drm_atomic_helper_commit_modeset_disables
drm_atomic_helper_commit_modeset_enables
drm_atomic_helper_commit_planes
__drm_atomic_helper_connector_destroy_state
drm_atomic_helper_connector_destroy_state
__drm_atomic_helper_connector_duplicate_state
drm_atomic_helper_connector_duplicate_state
__drm_atomic_helper_connector_reset
drm_atomic_helper_connector_reset
__drm_atomic_helper_crtc_destroy_state
__drm_atomic_helper_crtc_duplicate_state
@ -1089,6 +1092,7 @@
ida_destroy
ida_free
idr_alloc
idr_alloc_cyclic
idr_alloc_u32
idr_destroy
idr_find
@ -1180,6 +1184,7 @@
iommu_report_device_fault
iommu_unmap
ioremap_prot
io_schedule_timeout
iounmap
iov_iter_init
iov_iter_kvec
@ -1235,9 +1240,9 @@
is_vmalloc_addr
iterate_dir
iterate_fd
jiffies
jiffies_64_to_clock_t
jiffies64_to_nsecs
jiffies
jiffies_to_msecs
jiffies_to_usecs
kasan_flag_enabled
@ -1430,8 +1435,8 @@
memremap
memscan
mem_section
memset64
memset
memset64
__memset_io
memstart_addr
memunmap
@ -1563,8 +1568,8 @@
nla_find
nla_memcpy
__nla_parse
nla_put_64bit
nla_put
nla_put_64bit
nla_put_nohdr
nla_reserve
nla_strscpy
@ -1687,6 +1692,8 @@
out_of_line_wait_on_bit_timeout
overflowuid
page_endio
page_pinner_inited
__page_pinner_put_page
page_pool_alloc_pages
page_pool_create
page_pool_destroy
@ -1856,6 +1863,7 @@
pm_runtime_force_suspend
pm_runtime_get_if_active
__pm_runtime_idle
pm_runtime_irq_safe
__pm_runtime_resume
pm_runtime_set_autosuspend_delay
__pm_runtime_set_status
@ -2061,6 +2069,7 @@
remove_proc_subtree
remove_wait_queue
request_firmware
request_firmware_direct
request_firmware_nowait
__request_module
__request_percpu_irq
@ -2123,6 +2132,7 @@
rtnl_unregister
rtnl_unregister_all
runqueues
sbitmap_weight
sched_clock
sched_clock_register
sched_feat_keys
@ -2145,11 +2155,13 @@
scnprintf
scsi_autopm_get_device
scsi_autopm_put_device
scsi_block_requests
scsi_device_get
scsi_device_put
scsi_execute_cmd
__scsi_iterate_devices
scsi_print_sense_hdr
scsi_unblock_requests
sdio_claim_host
sdio_claim_irq
sdio_disable_func
@ -2283,6 +2295,7 @@
skb_try_coalesce
skb_tstamp_tx
skb_unlink
sk_capable
sk_common_release
sk_error_report
sk_filter_trim_cap
@ -2501,6 +2514,7 @@
tasklist_lock
__task_pid_nr_ns
__task_rq_lock
task_rq_lock
thermal_cooling_device_unregister
thermal_of_cooling_device_register
thermal_pressure
@ -2576,6 +2590,7 @@
__traceiter_android_rvh_tick_entry
__traceiter_android_rvh_try_to_wake_up
__traceiter_android_rvh_try_to_wake_up_success
__traceiter_android_rvh_uclamp_eff_get
__traceiter_android_rvh_update_cpu_capacity
__traceiter_android_rvh_wake_up_new_task
__traceiter_android_vh_alter_futex_plist_add
@ -2670,6 +2685,7 @@
__tracepoint_android_rvh_tick_entry
__tracepoint_android_rvh_try_to_wake_up
__tracepoint_android_rvh_try_to_wake_up_success
__tracepoint_android_rvh_uclamp_eff_get
__tracepoint_android_rvh_update_cpu_capacity
__tracepoint_android_rvh_wake_up_new_task
__tracepoint_android_vh_alter_futex_plist_add
@ -2801,6 +2817,7 @@
udp_tunnel6_xmit_skb
udp_tunnel_sock_release
udp_tunnel_xmit_skb
ufshcd_config_pwr_mode
ufshcd_delay_us
ufshcd_dme_configure_adapt
ufshcd_dme_get_attr
@ -2810,14 +2827,19 @@
ufshcd_get_pwr_dev_param
ufshcd_hba_enable
ufshcd_hba_stop
ufshcd_hold
ufshcd_init_pwr_dev_param
ufshcd_link_recovery
ufshcd_make_hba_operational
ufshcd_mcq_config_mac
ufshcd_mcq_make_queues_operational
ufshcd_mcq_read_cqis
ufshcd_pltfrm_init
ufshcd_query_attr
ufshcd_query_descriptor_retry
ufshcd_query_flag
ufshcd_read_desc_param
ufshcd_release
ufshcd_remove
ufshcd_resume_complete
ufshcd_runtime_resume

View File

@ -8,6 +8,7 @@
blk_rq_map_user
blk_rq_map_user_iov
blk_start_plug
cgroup_add_dfl_cftypes
cgroup_add_legacy_cftypes
console_printk
cpufreq_frequency_table_get_index

View File

@ -11,6 +11,7 @@
alarm_try_to_cancel
alloc_chrdev_region
alloc_etherdev_mqs
alloc_iova_fast
alloc_netdev_mqs
__alloc_pages
alloc_pages_exact
@ -770,6 +771,7 @@
frame_vector_create
frame_vector_destroy
frame_vector_to_pages
free_iova_fast
free_irq
free_netdev
__free_pages
@ -800,11 +802,13 @@
gen_pool_add_owner
gen_pool_alloc_algo_owner
gen_pool_avail
gen_pool_best_fit
gen_pool_create
gen_pool_destroy
gen_pool_first_fit_align
gen_pool_free_owner
gen_pool_has_addr
gen_pool_set_algo
gen_pool_size
get_cpu_device
get_cpu_idle_time
@ -921,6 +925,7 @@
inc_zone_page_state
inet_csk_get_port
init_dummy_netdev
init_iova_domain
init_net
init_pid_ns
__init_rwsem
@ -990,6 +995,7 @@
io_schedule
io_schedule_timeout
iounmap
iova_domain_init_rcaches
ip_compute_csum
ip_send_check
__irq_alloc_descs
@ -1492,6 +1498,7 @@
___pskb_trim
put_device
put_disk
put_iova_domain
__put_net
put_pid
put_sg_io_hdr
@ -1959,6 +1966,8 @@
topology_update_thermal_pressure
_totalram_pages
touch_softlockup_watchdog
__trace_bprintk
__trace_bputs
trace_event_buffer_commit
trace_event_buffer_reserve
trace_event_printf

View File

@ -951,6 +951,7 @@
drm_gem_object_init
drm_gem_object_lookup
drm_gem_object_release
drm_gem_prime_export
drm_gem_prime_fd_to_handle
drm_gem_prime_handle_to_fd
drm_gem_private_object_init
@ -1018,6 +1019,7 @@
drm_panel_remove
drm_plane_cleanup
drm_plane_create_rotation_property
drm_plane_from_index
drm_poll
drm_prime_gem_destroy
drm_prime_pages_to_sg
@ -1147,6 +1149,7 @@
fwnode_get_name
fwnode_get_named_child_node
fwnode_get_next_child_node
fwnode_get_phy_node
fwnode_handle_get
fwnode_handle_put
fwnode_property_present
@ -1234,10 +1237,10 @@
gh_rm_notifier_unregister
gh_rm_register_platform_ops
gh_rm_unregister_platform_ops
gic_nonsecure_priorities
gic_v3_cpu_init
gic_v3_dist_init
gic_v3_dist_wait_for_rwp
gic_nonsecure_priorities
gov_attr_set_init
gov_attr_set_put
governor_sysfs_ops
@ -1541,6 +1544,7 @@
iommu_group_set_iommudata
iommu_iova_to_phys
iommu_map
iommu_map_atomic
iommu_map_sg
iommu_present
iommu_put_resv_regions
@ -2311,6 +2315,7 @@
phylink_ethtool_set_eee
phylink_ethtool_set_pauseparam
phylink_ethtool_set_wol
phylink_expects_phy
phylink_fwnode_phy_connect
phylink_generic_validate
phylink_get_eee_err
@ -3335,12 +3340,12 @@
__traceiter_android_vh_binder_set_priority
__traceiter_android_vh_binder_wakeup_ilocked
__traceiter_android_vh_build_sched_domains
__traceiter_android_vh_check_hibernation_swap
__traceiter_android_vh_check_uninterrupt_tasks
__traceiter_android_vh_check_uninterrupt_tasks_done
__traceiter_android_vh_cpufreq_fast_switch
__traceiter_android_vh_cpufreq_resolve_freq
__traceiter_android_vh_cpufreq_target
__traceiter_android_vh_check_hibernation_swap
__traceiter_android_vh_cpu_idle_enter
__traceiter_android_vh_cpu_idle_exit
__traceiter_android_vh_cpuidle_psci_enter
@ -3348,20 +3353,20 @@
__traceiter_android_vh_disable_thermal_cooling_stats
__traceiter_android_vh_do_wake_up_sync
__traceiter_android_vh_dump_throttled_rt_tasks
__traceiter_android_vh_free_task
__traceiter_android_vh_encrypt_page
__traceiter_android_vh_free_task
__traceiter_android_vh_ftrace_dump_buffer
__traceiter_android_vh_ftrace_format_check
__traceiter_android_vh_ftrace_oops_enter
__traceiter_android_vh_ftrace_oops_exit
__traceiter_android_vh_ftrace_size_check
__traceiter_android_vh_ignore_dmabuf_vmap_bounds
__traceiter_android_vh_gic_v3_suspend
__traceiter_android_vh_ignore_dmabuf_vmap_bounds
__traceiter_android_vh_init_aes_encrypt
__traceiter_android_vh_ipi_stop
__traceiter_android_vh_jiffies_update
__traceiter_android_vh_kswapd_per_node
__traceiter_android_vh_mpam_set
__traceiter_android_vh_init_aes_encrypt
__traceiter_android_vh_post_image_save
__traceiter_android_vh_printk_hotplug
__traceiter_android_vh_rproc_recovery
@ -3478,12 +3483,12 @@
__tracepoint_android_vh_binder_set_priority
__tracepoint_android_vh_binder_wakeup_ilocked
__tracepoint_android_vh_build_sched_domains
__tracepoint_android_vh_check_hibernation_swap
__tracepoint_android_vh_check_uninterrupt_tasks
__tracepoint_android_vh_check_uninterrupt_tasks_done
__tracepoint_android_vh_cpufreq_fast_switch
__tracepoint_android_vh_cpufreq_resolve_freq
__tracepoint_android_vh_cpufreq_target
__tracepoint_android_vh_check_hibernation_swap
__tracepoint_android_vh_cpu_idle_enter
__tracepoint_android_vh_cpu_idle_exit
__tracepoint_android_vh_cpuidle_psci_enter
@ -3491,20 +3496,20 @@
__tracepoint_android_vh_disable_thermal_cooling_stats
__tracepoint_android_vh_do_wake_up_sync
__tracepoint_android_vh_dump_throttled_rt_tasks
__tracepoint_android_vh_free_task
__tracepoint_android_vh_encrypt_page
__tracepoint_android_vh_free_task
__tracepoint_android_vh_ftrace_dump_buffer
__tracepoint_android_vh_ftrace_format_check
__tracepoint_android_vh_ftrace_oops_enter
__tracepoint_android_vh_ftrace_oops_exit
__tracepoint_android_vh_ftrace_size_check
__tracepoint_android_vh_ignore_dmabuf_vmap_bounds
__tracepoint_android_vh_gic_v3_suspend
__tracepoint_android_vh_ignore_dmabuf_vmap_bounds
__tracepoint_android_vh_init_aes_encrypt
__tracepoint_android_vh_ipi_stop
__tracepoint_android_vh_jiffies_update
__tracepoint_android_vh_kswapd_per_node
__tracepoint_android_vh_mpam_set
__tracepoint_android_vh_init_aes_encrypt
__tracepoint_android_vh_post_image_save
__tracepoint_android_vh_printk_hotplug
__tracepoint_android_vh_rproc_recovery

View File

@ -1,5 +1,6 @@
[abi_symbol_list]
# commonly used symbols
access_process_vm
add_cpu
add_wait_queue
add_wait_queue_exclusive
@ -14,6 +15,8 @@
__alloc_skb
alloc_workqueue
alt_cb_patch_nops
android_debug_per_cpu_symbol
android_debug_symbol
arch_freq_scale
__arch_copy_from_user
__arch_copy_to_user
@ -171,6 +174,7 @@
dma_unmap_page_attrs
do_trace_netlink_extack
double_rq_lock
down
down_read
down_write
driver_unregister
@ -181,6 +185,7 @@
__dynamic_dev_dbg
__dynamic_netdev_dbg
__dynamic_pr_debug
inet_proto_csum_replace4
init_task
enable_irq
eth_mac_addr
@ -191,7 +196,10 @@
eth_type_trans
eth_validate_addr
extcon_get_state
filp_open_block
find_get_pid
_find_next_bit
find_task_by_vpid
finish_wait
flush_delayed_work
flush_work
@ -217,6 +225,7 @@
get_device
__get_free_pages
get_random_bytes
get_zeroed_page
gic_nonsecure_priorities
gpiochip_disable_irq
gpiochip_enable_irq
@ -286,6 +295,10 @@
kmem_cache_destroy
kmem_cache_free
kmemdup
kmsg_dump_get_buffer
kmsg_dump_register
kmsg_dump_rewind
kmsg_dump_unregister
kobject_uevent
kobject_uevent_env
ksoftirqd
@ -310,11 +323,13 @@
log_post_write_mmio
log_read_mmio
log_write_mmio
mas_find
mbox_chan_received_data
mbox_chan_txdone
memcpy
memmove
memset
mem_section
memstart_addr
migrate_swap
misc_deregister
@ -354,12 +369,15 @@
netlink_unicast
net_ratelimit
nf_conntrack_destroy
nf_conntrack_find_get
nf_ct_destroy
nla_memcpy
__nla_parse
nla_put
nla_put_64bit
nla_strscpy
__nlmsg_put
node_states
noop_llseek
nr_cpu_ids
ns_capable
@ -437,9 +455,11 @@
__pskb_copy_fclone
pskb_expand_head
__pskb_pull_tail
pskb_trim_rcsum_slow
put_cmsg
put_device
__put_net
put_pid
__put_task_struct
queue_delayed_work_on
queue_work_on
@ -498,6 +518,7 @@
request_threaded_irq
return_address
root_task_group
rt6_lookup
rtc_time64_to_tm
rtnl_is_locked
rtnl_link_register
@ -513,6 +534,7 @@
scnprintf
security_sk_clone
security_sock_graft
send_sig_info
seq_buf_printf
seq_lseek
seq_printf
@ -688,6 +710,7 @@
__traceiter_android_vh_cpufreq_target
__traceiter_android_vh_cpu_idle_enter
__traceiter_android_vh_cpu_idle_exit
__traceiter_android_vh_enable_thermal_power_throttle
__traceiter_android_vh_get_thermal_zone_device
__traceiter_android_vh_modify_thermal_request_freq
__traceiter_android_vh_modify_thermal_target_freq
@ -765,6 +788,7 @@
__tracepoint_android_vh_cpufreq_target
__tracepoint_android_vh_cpu_idle_enter
__tracepoint_android_vh_cpu_idle_exit
__tracepoint_android_vh_enable_thermal_power_throttle
__tracepoint_android_vh_get_thermal_zone_device
__tracepoint_android_vh_modify_thermal_request_freq
__tracepoint_android_vh_modify_thermal_target_freq
@ -819,6 +843,7 @@
unregister_pernet_device
unregister_pernet_subsys
unregister_pm_notifier
up
up_read
up_write
update_rq_clock
@ -1204,6 +1229,17 @@
sock_common_recvmsg
sock_common_setsockopt
# required by ims_bridge.ko
icmp6_send
inet_select_addr
ip6_find_1stfragopt
ip6_route_output_flags
ipv6_dev_get_saddr
ipv6_select_ident
nf_ct_get_tuplepr
nf_ct_invert_tuple
xfrm_state_afinfo_get_rcu
# required by kfifo_buf.ko
devres_add
__devres_alloc_node
@ -1444,6 +1480,9 @@
power_supply_put
power_supply_temp2resist_simple
# required by sc27xx_pd.ko
devm_extcon_register_notifier_all
# required by sc27xx_typec.ko
devm_extcon_dev_allocate
devm_extcon_dev_register
@ -1543,6 +1582,9 @@
kthread_queue_work
kthread_worker_fn
# required by sprd-charger-manager.ko
orderly_poweroff
# required by sprd-dma.ko
dma_async_device_register
dma_async_device_unregister
@ -1651,6 +1693,30 @@
dev_pm_opp_add
update_devfreq
# required by sprd_tcpm.ko
typec_altmode_attention
typec_altmode_notify
typec_altmode_update_active
typec_altmode_vdm
typec_find_power_role
typec_match_altmode
typec_partner_register_altmode
typec_port_register_altmode
typec_set_mode
typec_unregister_altmode
# required by sprd_thermal_ctl.ko
cpufreq_quick_get_max
devfreq_cooling_em_register
devfreq_cooling_unregister
# required by sprd_soc_thm.ko
thermal_of_zone_unregister
thermal_zone_bind_cooling_device
thermal_zone_device_register_with_trips
thermal_zone_device_update
thermal_zone_unbind_cooling_device
# required by sprd_hwspinlock.ko
devm_hwspin_lock_register
@ -1796,6 +1862,7 @@
sk_stop_timer
sock_recvmsg
sock_rfree
strcasecmp
strchr
strrchr
sysctl_vals

View File

@ -164,6 +164,7 @@
__kfifo_to_user
__kfifo_to_user_r
__kmalloc
__kmalloc_node
__kmalloc_node_track_caller
__kthread_init_worker
__list_add_valid
@ -240,6 +241,7 @@
__reset_control_get
__rht_bucket_nested
__rt_mutex_init
__sbitmap_queue_get
__scsi_add_device
__scsi_iterate_devices
__scsi_print_sense
@ -430,6 +432,7 @@
__traceiter_android_vh_update_topology_flags_workfn
__traceiter_android_vh_watchdog_timer_softlockup
__traceiter_binder_transaction_received
__traceiter_block_rq_insert
__traceiter_clock_set_rate
__traceiter_console
__traceiter_cpu_frequency_limits
@ -598,6 +601,7 @@
__tracepoint_android_vh_update_topology_flags_workfn
__tracepoint_android_vh_watchdog_timer_softlockup
__tracepoint_binder_transaction_received
__tracepoint_block_rq_insert
__tracepoint_clock_set_rate
__tracepoint_console
__tracepoint_cpu_frequency_limits
@ -798,12 +802,14 @@
bitmap_release_region
bitmap_to_arr32
bitmap_zalloc
blk_bio_list_merge
blk_execute_rq
blk_execute_rq_nowait
blk_mq_alloc_request
blk_mq_alloc_sq_tag_set
blk_mq_alloc_tag_set
blk_mq_complete_request
blk_mq_debugfs_rq_show
blk_mq_end_request
blk_mq_end_request_batch
blk_mq_free_request
@ -814,6 +820,7 @@
blk_mq_quiesce_queue
blk_mq_requeue_request
blk_mq_rq_cpu
blk_mq_run_hw_queue
blk_mq_start_request
blk_mq_start_stopped_hw_queues
blk_mq_stop_hw_queue
@ -843,6 +850,8 @@
blk_rq_map_user_io
blk_rq_map_user_iov
blk_rq_unmap_user
blk_stat_disable_accounting
blk_stat_enable_accounting
blk_status_to_errno
blk_update_request
blkdev_get_by_dev
@ -1986,6 +1995,9 @@
edac_device_free_ctl_info
edac_device_handle_ce_count
edac_device_handle_ue_count
elv_register
elv_unregister
elevator_alloc
em_cpu_get
emergency_restart
enable_irq
@ -3832,6 +3844,15 @@
safe_candev_priv
sampling_rate_store
sb800_prefetch
sbitmap_add_wait_queue
sbitmap_any_bit_set
sbitmap_del_wait_queue
sbitmap_init_node
sbitmap_queue_clear
sbitmap_queue_init_node
sbitmap_queue_min_shallow_depth
sbitmap_queue_resize
sbitmap_queue_show
scatterwalk_ffwd
scatterwalk_map_and_copy
sched_clock
@ -3924,6 +3945,8 @@
seq_hex_dump
seq_hlist_next
seq_hlist_start_head
seq_list_next
seq_list_start
seq_lseek
seq_open
seq_printf
@ -4419,6 +4442,7 @@
timecounter_cyc2time
timecounter_init
timecounter_read
timer_reduce
timer_unstable_counter_workaround
timespec64_to_jiffies
tipc_dump_done

View File

@ -144,6 +144,9 @@
sbitmap_weight
scsi_done
scsi_remove_device
param_get_bool
blk_mq_unique_tag
param_set_uint_minmax
#required by mi_sched.ko
__traceiter_android_vh_scheduler_tick
@ -186,6 +189,34 @@
io_cgrp_subsys_on_dfl_key
ioc_lookup_icq
bdi_dev_name
blk_mq_run_hw_queues
blkcg_policy_register
elv_register
blkcg_policy_unregister
elv_rb_former_request
elv_rb_latter_request
elevator_alloc
blk_stat_enable_accounting
blkcg_deactivate_policy
blk_stat_disable_accounting
sbitmap_queue_min_shallow_depth
blk_mq_sched_try_merge
elv_bio_merge_ok
elv_rb_find
elv_rb_del
elv_rb_add
elv_rqhash_del
ioc_find_get_icq
put_io_context
blk_mq_sched_try_insert_merge
elv_unregister
__tracepoint_block_rq_insert
elv_rqhash_add
__traceiter_block_rq_insert
kmem_cache_alloc_node
bio_associate_blkg_from_css
kernfs_path_from_node
blkcg_activate_policy
#required by metis.ko module
__traceiter_android_vh_rwsem_read_wait_start
@ -205,6 +236,7 @@
__tracepoint_android_rvh_set_cpus_allowed_comm
__tracepoint_android_rvh_dequeue_task
cpuset_cpus_allowed
cpufreq_update_policy
#required by millet.ko
__traceiter_android_vh_binder_wait_for_work
@ -231,6 +263,9 @@
#required by touch module
power_supply_is_system_supplied
#required by mi-power.ko
class_create_file_ns
#required by mi_mempool.ko
__traceiter_android_vh_alloc_pages_reclaim_bypass
__traceiter_android_vh_alloc_pages_failure_bypass
@ -255,3 +290,19 @@
tty_port_close_start
tty_port_lower_dtr_rts
tty_port_close_end
#required by mtdoops.ko
of_node_name_prefix
bdi_unregister
#required by dispaly.ko
mipi_dsi_dcs_set_display_off
#required by debug_ext.ko
of_find_all_nodes
# required by mi_mem_center.ko
__traceiter_android_vh_rmqueue_smallest_bypass
__tracepoint_android_vh_rmqueue_smallest_bypass
__traceiter_android_vh_free_one_page_bypass
__tracepoint_android_vh_free_one_page_bypass

View File

@ -335,3 +335,13 @@ wpan_phy_free
wpan_phy_new
wpan_phy_register
wpan_phy_unregister
wwan_create_port
wwan_get_debugfs_dir
wwan_port_get_drvdata
wwan_port_rx
wwan_port_txoff
wwan_port_txon
wwan_put_debugfs_dir
wwan_register_ops
wwan_remove_port
wwan_unregister_ops

View File

@ -335,3 +335,13 @@ wpan_phy_free
wpan_phy_new
wpan_phy_register
wpan_phy_unregister
wwan_create_port
wwan_get_debugfs_dir
wwan_port_get_drvdata
wwan_port_rx
wwan_port_txoff
wwan_port_txon
wwan_put_debugfs_dir
wwan_register_ops
wwan_remove_port
wwan_unregister_ops

View File

@ -23,6 +23,7 @@ drivers/net/usb/r8152.ko
drivers/net/usb/r8153_ecm.ko
drivers/net/usb/rtl8150.ko
drivers/net/usb/usbnet.ko
drivers/net/wwan/wwan.ko
drivers/usb/class/cdc-acm.ko
drivers/usb/serial/ftdi_sio.ko
drivers/usb/serial/usbserial.ko

View File

@ -1,59 +0,0 @@
drivers/block/zram/zram.ko
drivers/bluetooth/btbcm.ko
drivers/bluetooth/btqca.ko
drivers/bluetooth/btsdio.ko
drivers/bluetooth/hci_uart.ko
drivers/net/can/dev/can-dev.ko
drivers/net/can/slcan/slcan.ko
drivers/net/can/vcan.ko
drivers/net/mii.ko
drivers/net/ppp/bsd_comp.ko
drivers/net/ppp/ppp_deflate.ko
drivers/net/ppp/ppp_generic.ko
drivers/net/ppp/ppp_mppe.ko
drivers/net/ppp/pppox.ko
drivers/net/ppp/pptp.ko
drivers/net/slip/slhc.ko
drivers/net/usb/aqc111.ko
drivers/net/usb/asix.ko
drivers/net/usb/ax88179_178a.ko
drivers/net/usb/cdc_eem.ko
drivers/net/usb/cdc_ether.ko
drivers/net/usb/cdc_ncm.ko
drivers/net/usb/r8152.ko
drivers/net/usb/r8153_ecm.ko
drivers/net/usb/rtl8150.ko
drivers/net/usb/usbnet.ko
drivers/usb/class/cdc-acm.ko
drivers/usb/serial/ftdi_sio.ko
drivers/usb/serial/usbserial.ko
kernel/kheaders.ko
lib/crypto/libarc4.ko
mm/zsmalloc.ko
net/6lowpan/6lowpan.ko
net/6lowpan/nhc_dest.ko
net/6lowpan/nhc_fragment.ko
net/6lowpan/nhc_hop.ko
net/6lowpan/nhc_ipv6.ko
net/6lowpan/nhc_mobility.ko
net/6lowpan/nhc_routing.ko
net/6lowpan/nhc_udp.ko
net/8021q/8021q.ko
net/bluetooth/bluetooth.ko
net/bluetooth/hidp/hidp.ko
net/bluetooth/rfcomm/rfcomm.ko
net/can/can.ko
net/can/can-bcm.ko
net/can/can-gw.ko
net/can/can-raw.ko
net/ieee802154/6lowpan/ieee802154_6lowpan.ko
net/ieee802154/ieee802154.ko
net/ieee802154/ieee802154_socket.ko
net/l2tp/l2tp_core.ko
net/l2tp/l2tp_ppp.ko
net/mac802154/mac802154.ko
net/nfc/nfc.ko
net/rfkill/rfkill.ko
net/tipc/diag.ko
net/tipc/tipc.ko

View File

@ -23,6 +23,7 @@ drivers/net/usb/r8152.ko
drivers/net/usb/r8153_ecm.ko
drivers/net/usb/rtl8150.ko
drivers/net/usb/usbnet.ko
drivers/net/wwan/wwan.ko
drivers/usb/class/cdc-acm.ko
drivers/usb/serial/ftdi_sio.ko
drivers/usb/serial/usbserial.ko

View File

@ -115,7 +115,7 @@ CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y
CONFIG_CLEANCACHE=y
CONFIG_CMA=y
CONFIG_CMA_DEBUGFS=y
CONFIG_CMA_AREAS=16
CONFIG_CMA_AREAS=32
# CONFIG_ZONE_DMA is not set
CONFIG_ZONE_DEVICE=y
CONFIG_ANON_VMA_NAME=y
@ -365,6 +365,8 @@ CONFIG_USB_NET_AQC111=m
# CONFIG_WLAN_VENDOR_TI is not set
# CONFIG_WLAN_VENDOR_ZYDAS is not set
# CONFIG_WLAN_VENDOR_QUANTENNA is not set
CONFIG_WWAN=m
# CONFIG_WWAN_DEBUGFS is not set
CONFIG_INPUT_EVDEV=y
CONFIG_KEYBOARD_GPIO=y
# CONFIG_MOUSE_PS2 is not set
@ -575,6 +577,7 @@ CONFIG_GENERIC_PHY=y
CONFIG_POWERCAP=y
CONFIG_ANDROID_BINDER_IPC=y
CONFIG_ANDROID_BINDERFS=y
CONFIG_ANDROID_DEBUG_SYMBOLS=y
CONFIG_ANDROID_VENDOR_HOOKS=y
CONFIG_ANDROID_DEBUG_KINFO=y
CONFIG_LIBNVDIMM=y

View File

@ -203,6 +203,11 @@ struct kvm_arch {
/* Mandated version of PSCI */
u32 psci_version;
#ifndef __GENKSYMS__
/* Protects VM-scoped configuration data */
struct mutex config_lock;
#endif
/*
* If we encounter a data abort without valid instruction syndrome
* information, report this to user space. User space can (and
@ -350,7 +355,11 @@ struct kvm_cpu_context {
u64 sys_regs[NR_SYS_REGS];
#ifdef __GENKSYMS__
struct kvm_vcpu *__hyp_running_vcpu;
#else
void *__hyp_running_vcpu;
#endif
};
struct kvm_host_data {
@ -509,6 +518,9 @@ struct kvm_vcpu_arch {
/* vcpu power state */
struct kvm_mp_state mp_state;
#ifndef __GENKSYMS__
spinlock_t mp_state_lock;
#endif
union {
/* Cache some mmu pages needed inside spinlock regions */

View File

@ -253,9 +253,11 @@ static inline const void *__tag_set(const void *addr, u8 tag)
}
#ifdef CONFIG_KASAN_HW_TAGS
#define arch_enable_tagging_sync() mte_enable_kernel_sync()
#define arch_enable_tagging_async() mte_enable_kernel_async()
#define arch_enable_tagging_asymm() mte_enable_kernel_asymm()
#define arch_enable_tag_checks_sync() mte_enable_kernel_sync()
#define arch_enable_tag_checks_async() mte_enable_kernel_async()
#define arch_enable_tag_checks_asymm() mte_enable_kernel_asymm()
#define arch_suppress_tag_checks_start() mte_enable_tco()
#define arch_suppress_tag_checks_stop() mte_disable_tco()
#define arch_force_async_tag_fault() mte_check_tfsr_exit()
#define arch_get_random_tag() mte_get_random_tag()
#define arch_get_mem_tag(addr) mte_get_mem_tag(addr)

View File

@ -13,8 +13,73 @@
#include <linux/types.h>
#ifdef CONFIG_KASAN_HW_TAGS
/* Whether the MTE asynchronous mode is enabled. */
DECLARE_STATIC_KEY_FALSE(mte_async_or_asymm_mode);
static inline bool system_uses_mte_async_or_asymm_mode(void)
{
return static_branch_unlikely(&mte_async_or_asymm_mode);
}
#else /* CONFIG_KASAN_HW_TAGS */
static inline bool system_uses_mte_async_or_asymm_mode(void)
{
return false;
}
#endif /* CONFIG_KASAN_HW_TAGS */
#ifdef CONFIG_ARM64_MTE
/*
* The Tag Check Flag (TCF) mode for MTE is per EL, hence TCF0
* affects EL0 and TCF affects EL1 irrespective of which TTBR is
* used.
* The kernel accesses TTBR0 usually with LDTR/STTR instructions
* when UAO is available, so these would act as EL0 accesses using
* TCF0.
* However futex.h code uses exclusives which would be executed as
* EL1, this can potentially cause a tag check fault even if the
* user disables TCF0.
*
* To address the problem we set the PSTATE.TCO bit in uaccess_enable()
* and reset it in uaccess_disable().
*
* The Tag check override (TCO) bit disables temporarily the tag checking
* preventing the issue.
*/
static inline void mte_disable_tco(void)
{
asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(0),
ARM64_MTE, CONFIG_KASAN_HW_TAGS));
}
static inline void mte_enable_tco(void)
{
asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(1),
ARM64_MTE, CONFIG_KASAN_HW_TAGS));
}
/*
* These functions disable tag checking only if in MTE async mode
* since the sync mode generates exceptions synchronously and the
* nofault or load_unaligned_zeropad can handle them.
*/
static inline void __mte_disable_tco_async(void)
{
if (system_uses_mte_async_or_asymm_mode())
mte_disable_tco();
}
static inline void __mte_enable_tco_async(void)
{
if (system_uses_mte_async_or_asymm_mode())
mte_enable_tco();
}
/*
* These functions are meant to be only used from KASAN runtime through
* the arch_*() interface defined in asm/memory.h.
@ -138,6 +203,22 @@ void mte_enable_kernel_asymm(void);
#else /* CONFIG_ARM64_MTE */
static inline void mte_disable_tco(void)
{
}
static inline void mte_enable_tco(void)
{
}
static inline void __mte_disable_tco_async(void)
{
}
static inline void __mte_enable_tco_async(void)
{
}
static inline u8 mte_get_ptr_tag(void *ptr)
{
return 0xFF;

View File

@ -145,14 +145,6 @@ static inline void mte_disable_tco_entry(struct task_struct *task)
}
#ifdef CONFIG_KASAN_HW_TAGS
/* Whether the MTE asynchronous mode is enabled. */
DECLARE_STATIC_KEY_FALSE(mte_async_or_asymm_mode);
static inline bool system_uses_mte_async_or_asymm_mode(void)
{
return static_branch_unlikely(&mte_async_or_asymm_mode);
}
void mte_check_tfsr_el1(void);
static inline void mte_check_tfsr_entry(void)
@ -179,10 +171,6 @@ static inline void mte_check_tfsr_exit(void)
mte_check_tfsr_el1();
}
#else
static inline bool system_uses_mte_async_or_asymm_mode(void)
{
return false;
}
static inline void mte_check_tfsr_el1(void)
{
}

View File

@ -136,55 +136,9 @@ static inline void __uaccess_enable_hw_pan(void)
CONFIG_ARM64_PAN));
}
/*
* The Tag Check Flag (TCF) mode for MTE is per EL, hence TCF0
* affects EL0 and TCF affects EL1 irrespective of which TTBR is
* used.
* The kernel accesses TTBR0 usually with LDTR/STTR instructions
* when UAO is available, so these would act as EL0 accesses using
* TCF0.
* However futex.h code uses exclusives which would be executed as
* EL1, this can potentially cause a tag check fault even if the
* user disables TCF0.
*
* To address the problem we set the PSTATE.TCO bit in uaccess_enable()
* and reset it in uaccess_disable().
*
* The Tag check override (TCO) bit disables temporarily the tag checking
* preventing the issue.
*/
static inline void __uaccess_disable_tco(void)
{
asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(0),
ARM64_MTE, CONFIG_KASAN_HW_TAGS));
}
static inline void __uaccess_enable_tco(void)
{
asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(1),
ARM64_MTE, CONFIG_KASAN_HW_TAGS));
}
/*
* These functions disable tag checking only if in MTE async mode
* since the sync mode generates exceptions synchronously and the
* nofault or load_unaligned_zeropad can handle them.
*/
static inline void __uaccess_disable_tco_async(void)
{
if (system_uses_mte_async_or_asymm_mode())
__uaccess_disable_tco();
}
static inline void __uaccess_enable_tco_async(void)
{
if (system_uses_mte_async_or_asymm_mode())
__uaccess_enable_tco();
}
static inline void uaccess_disable_privileged(void)
{
__uaccess_disable_tco();
mte_disable_tco();
if (uaccess_ttbr0_disable())
return;
@ -194,7 +148,7 @@ static inline void uaccess_disable_privileged(void)
static inline void uaccess_enable_privileged(void)
{
__uaccess_enable_tco();
mte_enable_tco();
if (uaccess_ttbr0_enable())
return;
@ -302,8 +256,8 @@ do { \
#define get_user __get_user
/*
* We must not call into the scheduler between __uaccess_enable_tco_async() and
* __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking
* We must not call into the scheduler between __mte_enable_tco_async() and
* __mte_disable_tco_async(). As `dst` and `src` may contain blocking
* functions, we must evaluate these outside of the critical section.
*/
#define __get_kernel_nofault(dst, src, type, err_label) \
@ -312,10 +266,10 @@ do { \
__typeof__(src) __gkn_src = (src); \
int __gkn_err = 0; \
\
__uaccess_enable_tco_async(); \
__mte_enable_tco_async(); \
__raw_get_mem("ldr", *((type *)(__gkn_dst)), \
(__force type *)(__gkn_src), __gkn_err, K); \
__uaccess_disable_tco_async(); \
__mte_disable_tco_async(); \
\
if (unlikely(__gkn_err)) \
goto err_label; \
@ -388,8 +342,8 @@ do { \
#define put_user __put_user
/*
* We must not call into the scheduler between __uaccess_enable_tco_async() and
* __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking
* We must not call into the scheduler between __mte_enable_tco_async() and
* __mte_disable_tco_async(). As `dst` and `src` may contain blocking
* functions, we must evaluate these outside of the critical section.
*/
#define __put_kernel_nofault(dst, src, type, err_label) \
@ -398,10 +352,10 @@ do { \
__typeof__(src) __pkn_src = (src); \
int __pkn_err = 0; \
\
__uaccess_enable_tco_async(); \
__mte_enable_tco_async(); \
__raw_put_mem("str", *((type *)(__pkn_src)), \
(__force type *)(__pkn_dst), __pkn_err, K); \
__uaccess_disable_tco_async(); \
__mte_disable_tco_async(); \
\
if (unlikely(__pkn_err)) \
goto err_label; \

View File

@ -55,7 +55,7 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
{
unsigned long ret;
__uaccess_enable_tco_async();
__mte_enable_tco_async();
/* Load word from unaligned pointer addr */
asm(
@ -65,7 +65,7 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
: "=&r" (ret)
: "r" (addr), "Q" (*(unsigned long *)addr));
__uaccess_disable_tco_async();
__mte_disable_tco_async();
return ret;
}

View File

@ -155,6 +155,16 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
if (type & ~KVM_VM_TYPE_MASK)
return -EINVAL;
mutex_init(&kvm->arch.config_lock);
#ifdef CONFIG_LOCKDEP
/* Clue in lockdep that the config_lock must be taken inside kvm->lock */
mutex_lock(&kvm->lock);
mutex_lock(&kvm->arch.config_lock);
mutex_unlock(&kvm->arch.config_lock);
mutex_unlock(&kvm->lock);
#endif
ret = kvm_share_hyp(kvm, kvm + 1);
if (ret)
return ret;
@ -423,6 +433,16 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
{
int err;
spin_lock_init(&vcpu->arch.mp_state_lock);
#ifdef CONFIG_LOCKDEP
/* Inform lockdep that the config_lock is acquired after vcpu->mutex */
mutex_lock(&vcpu->mutex);
mutex_lock(&vcpu->kvm->arch.config_lock);
mutex_unlock(&vcpu->kvm->arch.config_lock);
mutex_unlock(&vcpu->mutex);
#endif
/* Force users to call KVM_ARM_VCPU_INIT */
vcpu->arch.target = -1;
bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
@ -566,34 +586,41 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
vcpu->cpu = -1;
}
void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu)
static void __kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu)
{
vcpu->arch.mp_state.mp_state = KVM_MP_STATE_STOPPED;
WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
kvm_make_request(KVM_REQ_SLEEP, vcpu);
kvm_vcpu_kick(vcpu);
}
void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu)
{
spin_lock(&vcpu->arch.mp_state_lock);
__kvm_arm_vcpu_power_off(vcpu);
spin_unlock(&vcpu->arch.mp_state_lock);
}
bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu)
{
return vcpu->arch.mp_state.mp_state == KVM_MP_STATE_STOPPED;
return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_STOPPED;
}
static void kvm_arm_vcpu_suspend(struct kvm_vcpu *vcpu)
{
vcpu->arch.mp_state.mp_state = KVM_MP_STATE_SUSPENDED;
WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_SUSPENDED);
kvm_make_request(KVM_REQ_SUSPEND, vcpu);
kvm_vcpu_kick(vcpu);
}
static bool kvm_arm_vcpu_suspended(struct kvm_vcpu *vcpu)
{
return vcpu->arch.mp_state.mp_state == KVM_MP_STATE_SUSPENDED;
return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_SUSPENDED;
}
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
*mp_state = vcpu->arch.mp_state;
*mp_state = READ_ONCE(vcpu->arch.mp_state);
return 0;
}
@ -603,12 +630,14 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
{
int ret = 0;
spin_lock(&vcpu->arch.mp_state_lock);
switch (mp_state->mp_state) {
case KVM_MP_STATE_RUNNABLE:
vcpu->arch.mp_state = *mp_state;
WRITE_ONCE(vcpu->arch.mp_state, *mp_state);
break;
case KVM_MP_STATE_STOPPED:
kvm_arm_vcpu_power_off(vcpu);
__kvm_arm_vcpu_power_off(vcpu);
break;
case KVM_MP_STATE_SUSPENDED:
kvm_arm_vcpu_suspend(vcpu);
@ -617,6 +646,8 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
ret = -EINVAL;
}
spin_unlock(&vcpu->arch.mp_state_lock);
return ret;
}
@ -711,9 +742,9 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
static_branch_inc(&userspace_irqchip_in_use);
}
mutex_lock(&kvm->lock);
mutex_lock(&kvm->arch.config_lock);
set_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags);
mutex_unlock(&kvm->lock);
mutex_unlock(&kvm->arch.config_lock);
return ret;
}
@ -1327,7 +1358,7 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
kvm_arm_vcpu_power_off(vcpu);
else
vcpu->arch.mp_state.mp_state = KVM_MP_STATE_RUNNABLE;
WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_RUNNABLE);
return 0;
}

View File

@ -953,7 +953,9 @@ int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
switch (attr->group) {
case KVM_ARM_VCPU_PMU_V3_CTRL:
mutex_lock(&vcpu->kvm->arch.config_lock);
ret = kvm_arm_pmu_v3_set_attr(vcpu, attr);
mutex_unlock(&vcpu->kvm->arch.config_lock);
break;
case KVM_ARM_VCPU_TIMER_CTRL:
ret = kvm_arm_timer_set_attr(vcpu, attr);

View File

@ -32,13 +32,19 @@ static void enter_vmid_context(struct kvm_s2_mmu *mmu,
* to do.
*/
if (vcpu) {
/* We're in guest context */
if (mmu == vcpu->arch.hw_mmu || WARN_ON(mmu != host_s2_mmu))
return;
} else if (mmu == host_s2_mmu) {
cxt->mmu = vcpu->arch.hw_mmu;
} else {
/* We're in host context */
if (mmu == host_s2_mmu)
return;
cxt->mmu = host_s2_mmu;
}
cxt->mmu = mmu;
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
u64 val;

View File

@ -409,7 +409,7 @@ static int kvm_arm_set_fw_reg_bmap(struct kvm_vcpu *vcpu, u64 reg_id, u64 val)
if (val & ~fw_reg_features)
return -EINVAL;
mutex_lock(&kvm->lock);
mutex_lock(&kvm->arch.config_lock);
if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags) &&
val != *fw_reg_bmap) {
@ -419,7 +419,7 @@ static int kvm_arm_set_fw_reg_bmap(struct kvm_vcpu *vcpu, u64 reg_id, u64 val)
WRITE_ONCE(*fw_reg_bmap, val);
out:
mutex_unlock(&kvm->lock);
mutex_unlock(&kvm->arch.config_lock);
return ret;
}

View File

@ -850,7 +850,7 @@ static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id)
struct arm_pmu *arm_pmu;
int ret = -ENXIO;
mutex_lock(&kvm->lock);
lockdep_assert_held(&kvm->arch.config_lock);
mutex_lock(&arm_pmus_lock);
list_for_each_entry(entry, &arm_pmus, entry) {
@ -870,7 +870,6 @@ static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id)
}
mutex_unlock(&arm_pmus_lock);
mutex_unlock(&kvm->lock);
return ret;
}
@ -878,22 +877,20 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
{
struct kvm *kvm = vcpu->kvm;
lockdep_assert_held(&kvm->arch.config_lock);
if (!kvm_vcpu_has_pmu(vcpu))
return -ENODEV;
if (vcpu->arch.pmu.created)
return -EBUSY;
mutex_lock(&kvm->lock);
if (!kvm->arch.arm_pmu) {
/* No PMU set, get the default one */
kvm->arch.arm_pmu = kvm_pmu_probe_armpmu();
if (!kvm->arch.arm_pmu) {
mutex_unlock(&kvm->lock);
if (!kvm->arch.arm_pmu)
return -ENODEV;
}
}
mutex_unlock(&kvm->lock);
switch (attr->attr) {
case KVM_ARM_VCPU_PMU_V3_IRQ: {
@ -937,19 +934,13 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
filter.action != KVM_PMU_EVENT_DENY))
return -EINVAL;
mutex_lock(&kvm->lock);
if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags)) {
mutex_unlock(&kvm->lock);
if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags))
return -EBUSY;
}
if (!kvm->arch.pmu_filter) {
kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL_ACCOUNT);
if (!kvm->arch.pmu_filter) {
mutex_unlock(&kvm->lock);
if (!kvm->arch.pmu_filter)
return -ENOMEM;
}
/*
* The default depends on the first applied filter.
@ -968,8 +959,6 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
else
bitmap_clear(kvm->arch.pmu_filter, filter.base_event, filter.nevents);
mutex_unlock(&kvm->lock);
return 0;
}
case KVM_ARM_VCPU_PMU_V3_SET_PMU: {

View File

@ -46,6 +46,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
struct vcpu_reset_state *reset_state;
struct kvm *kvm = source_vcpu->kvm;
struct kvm_vcpu *vcpu = NULL;
int ret = PSCI_RET_SUCCESS;
unsigned long cpu_id;
cpu_id = smccc_get_arg1(source_vcpu);
@ -60,11 +61,15 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
*/
if (!vcpu)
return PSCI_RET_INVALID_PARAMS;
spin_lock(&vcpu->arch.mp_state_lock);
if (!kvm_arm_vcpu_stopped(vcpu)) {
if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1)
return PSCI_RET_ALREADY_ON;
ret = PSCI_RET_ALREADY_ON;
else
return PSCI_RET_INVALID_PARAMS;
ret = PSCI_RET_INVALID_PARAMS;
goto out_unlock;
}
reset_state = &vcpu->arch.reset_state;
@ -80,7 +85,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
*/
reset_state->r0 = smccc_get_arg3(source_vcpu);
WRITE_ONCE(reset_state->reset, true);
reset_state->reset = true;
kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
/*
@ -92,7 +97,9 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
vcpu->arch.mp_state.mp_state = KVM_MP_STATE_RUNNABLE;
kvm_vcpu_wake_up(vcpu);
return PSCI_RET_SUCCESS;
out_unlock:
spin_unlock(&vcpu->arch.mp_state_lock);
return ret;
}
static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
@ -152,8 +159,11 @@ static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type, u64 flags)
* after this call is handled and before the VCPUs have been
* re-initialized.
*/
kvm_for_each_vcpu(i, tmp, vcpu->kvm)
tmp->arch.mp_state.mp_state = KVM_MP_STATE_STOPPED;
kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
spin_lock(&tmp->arch.mp_state_lock);
WRITE_ONCE(tmp->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
spin_unlock(&tmp->arch.mp_state_lock);
}
kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
@ -201,7 +211,6 @@ static unsigned long kvm_psci_check_allowed_function(struct kvm_vcpu *vcpu, u32
static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
u32 psci_fn = smccc_get_function(vcpu);
unsigned long val;
int ret = 1;
@ -226,9 +235,7 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
kvm_psci_narrow_to_32bit(vcpu);
fallthrough;
case PSCI_0_2_FN64_CPU_ON:
mutex_lock(&kvm->lock);
val = kvm_psci_vcpu_on(vcpu);
mutex_unlock(&kvm->lock);
break;
case PSCI_0_2_FN_AFFINITY_INFO:
kvm_psci_narrow_to_32bit(vcpu);
@ -367,7 +374,6 @@ static int kvm_psci_1_x_call(struct kvm_vcpu *vcpu, u32 minor)
static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
u32 psci_fn = smccc_get_function(vcpu);
unsigned long val;
@ -377,9 +383,7 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
val = PSCI_RET_SUCCESS;
break;
case KVM_PSCI_FN_CPU_ON:
mutex_lock(&kvm->lock);
val = kvm_psci_vcpu_on(vcpu);
mutex_unlock(&kvm->lock);
break;
default:
val = PSCI_RET_NOT_SUPPORTED;

View File

@ -176,7 +176,7 @@ static int kvm_set_vm_width(struct kvm_vcpu *vcpu)
is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
lockdep_assert_held(&kvm->lock);
lockdep_assert_held(&kvm->arch.config_lock);
if (test_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED, &kvm->arch.flags)) {
/*
@ -228,17 +228,18 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
int ret;
bool loaded;
mutex_lock(&vcpu->kvm->lock);
mutex_lock(&vcpu->kvm->arch.config_lock);
ret = kvm_set_vm_width(vcpu);
if (!ret) {
reset_state = vcpu->arch.reset_state;
WRITE_ONCE(vcpu->arch.reset_state.reset, false);
}
mutex_unlock(&vcpu->kvm->lock);
mutex_unlock(&vcpu->kvm->arch.config_lock);
if (ret)
return ret;
spin_lock(&vcpu->arch.mp_state_lock);
reset_state = vcpu->arch.reset_state;
vcpu->arch.reset_state.reset = false;
spin_unlock(&vcpu->arch.mp_state_lock);
/* Reset PMU outside of the non-preemptible section */
kvm_pmu_vcpu_reset(vcpu);

View File

@ -85,7 +85,7 @@ static void *vgic_debug_start(struct seq_file *s, loff_t *pos)
struct kvm *kvm = s->private;
struct vgic_state_iter *iter;
mutex_lock(&kvm->lock);
mutex_lock(&kvm->arch.config_lock);
iter = kvm->arch.vgic.iter;
if (iter) {
iter = ERR_PTR(-EBUSY);
@ -104,7 +104,7 @@ static void *vgic_debug_start(struct seq_file *s, loff_t *pos)
if (end_of_vgic(iter))
iter = NULL;
out:
mutex_unlock(&kvm->lock);
mutex_unlock(&kvm->arch.config_lock);
return iter;
}
@ -132,12 +132,12 @@ static void vgic_debug_stop(struct seq_file *s, void *v)
if (IS_ERR(v))
return;
mutex_lock(&kvm->lock);
mutex_lock(&kvm->arch.config_lock);
iter = kvm->arch.vgic.iter;
kfree(iter->lpi_array);
kfree(iter);
kvm->arch.vgic.iter = NULL;
mutex_unlock(&kvm->lock);
mutex_unlock(&kvm->arch.config_lock);
}
static void print_dist_state(struct seq_file *s, struct vgic_dist *dist)

View File

@ -74,9 +74,6 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
unsigned long i;
int ret;
if (irqchip_in_kernel(kvm))
return -EEXIST;
/*
* This function is also called by the KVM_CREATE_IRQCHIP handler,
* which had no chance yet to check the availability of the GICv2
@ -87,10 +84,20 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
!kvm_vgic_global_state.can_emulate_gicv2)
return -ENODEV;
/* Must be held to avoid race with vCPU creation */
lockdep_assert_held(&kvm->lock);
ret = -EBUSY;
if (!lock_all_vcpus(kvm))
return ret;
mutex_lock(&kvm->arch.config_lock);
if (irqchip_in_kernel(kvm)) {
ret = -EEXIST;
goto out_unlock;
}
kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu_has_run_once(vcpu))
goto out_unlock;
@ -118,6 +125,7 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions);
out_unlock:
mutex_unlock(&kvm->arch.config_lock);
unlock_all_vcpus(kvm);
return ret;
}
@ -227,9 +235,9 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
* KVM io device for the redistributor that belongs to this VCPU.
*/
if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
mutex_lock(&vcpu->kvm->lock);
mutex_lock(&vcpu->kvm->slots_lock);
ret = vgic_register_redist_iodev(vcpu);
mutex_unlock(&vcpu->kvm->lock);
mutex_unlock(&vcpu->kvm->slots_lock);
}
return ret;
}
@ -250,7 +258,6 @@ static void kvm_vgic_vcpu_enable(struct kvm_vcpu *vcpu)
* The function is generally called when nr_spis has been explicitly set
* by the guest through the KVM DEVICE API. If not nr_spis is set to 256.
* vgic_initialized() returns true when this function has succeeded.
* Must be called with kvm->lock held!
*/
int vgic_init(struct kvm *kvm)
{
@ -259,6 +266,8 @@ int vgic_init(struct kvm *kvm)
int ret = 0, i;
unsigned long idx;
lockdep_assert_held(&kvm->arch.config_lock);
if (vgic_initialized(kvm))
return 0;
@ -373,12 +382,13 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
}
/* To be called with kvm->lock held */
static void __kvm_vgic_destroy(struct kvm *kvm)
{
struct kvm_vcpu *vcpu;
unsigned long i;
lockdep_assert_held(&kvm->arch.config_lock);
vgic_debug_destroy(kvm);
kvm_for_each_vcpu(i, vcpu, kvm)
@ -389,9 +399,9 @@ static void __kvm_vgic_destroy(struct kvm *kvm)
void kvm_vgic_destroy(struct kvm *kvm)
{
mutex_lock(&kvm->lock);
mutex_lock(&kvm->arch.config_lock);
__kvm_vgic_destroy(kvm);
mutex_unlock(&kvm->lock);
mutex_unlock(&kvm->arch.config_lock);
}
/**
@ -414,9 +424,9 @@ int vgic_lazy_init(struct kvm *kvm)
if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2)
return -EBUSY;
mutex_lock(&kvm->lock);
mutex_lock(&kvm->arch.config_lock);
ret = vgic_init(kvm);
mutex_unlock(&kvm->lock);
mutex_unlock(&kvm->arch.config_lock);
}
return ret;
@ -436,30 +446,48 @@ int vgic_lazy_init(struct kvm *kvm)
int kvm_vgic_map_resources(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
enum vgic_type type;
gpa_t dist_base;
int ret = 0;
if (likely(vgic_ready(kvm)))
return 0;
mutex_lock(&kvm->lock);
mutex_lock(&kvm->slots_lock);
mutex_lock(&kvm->arch.config_lock);
if (vgic_ready(kvm))
goto out;
if (!irqchip_in_kernel(kvm))
goto out;
if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2)
if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2) {
ret = vgic_v2_map_resources(kvm);
else
type = VGIC_V2;
} else {
ret = vgic_v3_map_resources(kvm);
type = VGIC_V3;
}
if (ret)
if (ret) {
__kvm_vgic_destroy(kvm);
else
goto out;
}
dist->ready = true;
dist_base = dist->vgic_dist_base;
mutex_unlock(&kvm->arch.config_lock);
ret = vgic_register_dist_iodev(kvm, dist_base, type);
if (ret) {
kvm_err("Unable to register VGIC dist MMIO regions\n");
kvm_vgic_destroy(kvm);
}
mutex_unlock(&kvm->slots_lock);
return ret;
out:
mutex_unlock(&kvm->lock);
mutex_unlock(&kvm->arch.config_lock);
mutex_unlock(&kvm->slots_lock);
return ret;
}

View File

@ -1936,6 +1936,7 @@ void vgic_lpi_translation_cache_destroy(struct kvm *kvm)
static int vgic_its_create(struct kvm_device *dev, u32 type)
{
int ret;
struct vgic_its *its;
if (type != KVM_DEV_TYPE_ARM_VGIC_ITS)
@ -1945,9 +1946,12 @@ static int vgic_its_create(struct kvm_device *dev, u32 type)
if (!its)
return -ENOMEM;
mutex_lock(&dev->kvm->arch.config_lock);
if (vgic_initialized(dev->kvm)) {
int ret = vgic_v4_init(dev->kvm);
ret = vgic_v4_init(dev->kvm);
if (ret < 0) {
mutex_unlock(&dev->kvm->arch.config_lock);
kfree(its);
return ret;
}
@ -1958,6 +1962,14 @@ static int vgic_its_create(struct kvm_device *dev, u32 type)
mutex_init(&its->its_lock);
mutex_init(&its->cmd_lock);
/* Yep, even more trickery for lock ordering... */
#ifdef CONFIG_LOCKDEP
mutex_lock(&its->cmd_lock);
mutex_lock(&its->its_lock);
mutex_unlock(&its->its_lock);
mutex_unlock(&its->cmd_lock);
#endif
its->vgic_its_base = VGIC_ADDR_UNDEF;
INIT_LIST_HEAD(&its->device_list);
@ -1976,7 +1988,11 @@ static int vgic_its_create(struct kvm_device *dev, u32 type)
dev->private = its;
return vgic_its_set_abi(its, NR_ITS_ABIS - 1);
ret = vgic_its_set_abi(its, NR_ITS_ABIS - 1);
mutex_unlock(&dev->kvm->arch.config_lock);
return ret;
}
static void vgic_its_destroy(struct kvm_device *kvm_dev)
@ -2045,6 +2061,13 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev,
mutex_lock(&dev->kvm->lock);
if (!lock_all_vcpus(dev->kvm)) {
mutex_unlock(&dev->kvm->lock);
return -EBUSY;
}
mutex_lock(&dev->kvm->arch.config_lock);
if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) {
ret = -ENXIO;
goto out;
@ -2058,11 +2081,6 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev,
goto out;
}
if (!lock_all_vcpus(dev->kvm)) {
ret = -EBUSY;
goto out;
}
addr = its->vgic_its_base + offset;
len = region->access_flags & VGIC_ACCESS_64bit ? 8 : 4;
@ -2076,8 +2094,9 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev,
} else {
*reg = region->its_read(dev->kvm, its, addr, len);
}
unlock_all_vcpus(dev->kvm);
out:
mutex_unlock(&dev->kvm->arch.config_lock);
unlock_all_vcpus(dev->kvm);
mutex_unlock(&dev->kvm->lock);
return ret;
}
@ -2187,7 +2206,7 @@ static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
((u64)ite->irq->intid << KVM_ITS_ITE_PINTID_SHIFT) |
ite->collection->collection_id;
val = cpu_to_le64(val);
return kvm_write_guest_lock(kvm, gpa, &val, ite_esz);
return vgic_write_guest_lock(kvm, gpa, &val, ite_esz);
}
/**
@ -2339,7 +2358,7 @@ static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev,
(itt_addr_field << KVM_ITS_DTE_ITTADDR_SHIFT) |
(dev->num_eventid_bits - 1));
val = cpu_to_le64(val);
return kvm_write_guest_lock(kvm, ptr, &val, dte_esz);
return vgic_write_guest_lock(kvm, ptr, &val, dte_esz);
}
/**
@ -2526,7 +2545,7 @@ static int vgic_its_save_cte(struct vgic_its *its,
((u64)collection->target_addr << KVM_ITS_CTE_RDBASE_SHIFT) |
collection->collection_id);
val = cpu_to_le64(val);
return kvm_write_guest_lock(its->dev->kvm, gpa, &val, esz);
return vgic_write_guest_lock(its->dev->kvm, gpa, &val, esz);
}
/*
@ -2607,7 +2626,7 @@ static int vgic_its_save_collection_table(struct vgic_its *its)
*/
val = 0;
BUG_ON(cte_esz > sizeof(val));
ret = kvm_write_guest_lock(its->dev->kvm, gpa, &val, cte_esz);
ret = vgic_write_guest_lock(its->dev->kvm, gpa, &val, cte_esz);
return ret;
}
@ -2749,14 +2768,15 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
return 0;
mutex_lock(&kvm->lock);
mutex_lock(&its->its_lock);
if (!lock_all_vcpus(kvm)) {
mutex_unlock(&its->its_lock);
mutex_unlock(&kvm->lock);
return -EBUSY;
}
mutex_lock(&kvm->arch.config_lock);
mutex_lock(&its->its_lock);
switch (attr) {
case KVM_DEV_ARM_ITS_CTRL_RESET:
vgic_its_reset(kvm, its);
@ -2769,8 +2789,9 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
break;
}
unlock_all_vcpus(kvm);
mutex_unlock(&its->its_lock);
mutex_unlock(&kvm->arch.config_lock);
unlock_all_vcpus(kvm);
mutex_unlock(&kvm->lock);
return ret;
}

View File

@ -46,7 +46,7 @@ int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev
struct vgic_dist *vgic = &kvm->arch.vgic;
int r;
mutex_lock(&kvm->lock);
mutex_lock(&kvm->arch.config_lock);
switch (FIELD_GET(KVM_ARM_DEVICE_TYPE_MASK, dev_addr->id)) {
case KVM_VGIC_V2_ADDR_TYPE_DIST:
r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
@ -68,7 +68,7 @@ int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev
r = -ENODEV;
}
mutex_unlock(&kvm->lock);
mutex_unlock(&kvm->arch.config_lock);
return r;
}
@ -102,7 +102,11 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri
if (get_user(addr, uaddr))
return -EFAULT;
mutex_lock(&kvm->lock);
/*
* Since we can't hold config_lock while registering the redistributor
* iodevs, take the slots_lock immediately.
*/
mutex_lock(&kvm->slots_lock);
switch (attr->attr) {
case KVM_VGIC_V2_ADDR_TYPE_DIST:
r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
@ -182,6 +186,7 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri
if (r)
goto out;
mutex_lock(&kvm->arch.config_lock);
if (write) {
r = vgic_check_iorange(kvm, *addr_ptr, addr, alignment, size);
if (!r)
@ -189,9 +194,10 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri
} else {
addr = *addr_ptr;
}
mutex_unlock(&kvm->arch.config_lock);
out:
mutex_unlock(&kvm->lock);
mutex_unlock(&kvm->slots_lock);
if (!r && !write)
r = put_user(addr, uaddr);
@ -227,7 +233,7 @@ static int vgic_set_common_attr(struct kvm_device *dev,
(val & 31))
return -EINVAL;
mutex_lock(&dev->kvm->lock);
mutex_lock(&dev->kvm->arch.config_lock);
if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_spis)
ret = -EBUSY;
@ -235,16 +241,16 @@ static int vgic_set_common_attr(struct kvm_device *dev,
dev->kvm->arch.vgic.nr_spis =
val - VGIC_NR_PRIVATE_IRQS;
mutex_unlock(&dev->kvm->lock);
mutex_unlock(&dev->kvm->arch.config_lock);
return ret;
}
case KVM_DEV_ARM_VGIC_GRP_CTRL: {
switch (attr->attr) {
case KVM_DEV_ARM_VGIC_CTRL_INIT:
mutex_lock(&dev->kvm->lock);
mutex_lock(&dev->kvm->arch.config_lock);
r = vgic_init(dev->kvm);
mutex_unlock(&dev->kvm->lock);
mutex_unlock(&dev->kvm->arch.config_lock);
return r;
case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES:
/*
@ -260,7 +266,10 @@ static int vgic_set_common_attr(struct kvm_device *dev,
mutex_unlock(&dev->kvm->lock);
return -EBUSY;
}
mutex_lock(&dev->kvm->arch.config_lock);
r = vgic_v3_save_pending_tables(dev->kvm);
mutex_unlock(&dev->kvm->arch.config_lock);
unlock_all_vcpus(dev->kvm);
mutex_unlock(&dev->kvm->lock);
return r;
@ -411,15 +420,17 @@ static int vgic_v2_attr_regs_access(struct kvm_device *dev,
mutex_lock(&dev->kvm->lock);
if (!lock_all_vcpus(dev->kvm)) {
mutex_unlock(&dev->kvm->lock);
return -EBUSY;
}
mutex_lock(&dev->kvm->arch.config_lock);
ret = vgic_init(dev->kvm);
if (ret)
goto out;
if (!lock_all_vcpus(dev->kvm)) {
ret = -EBUSY;
goto out;
}
switch (attr->group) {
case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, &val);
@ -432,8 +443,9 @@ static int vgic_v2_attr_regs_access(struct kvm_device *dev,
break;
}
unlock_all_vcpus(dev->kvm);
out:
mutex_unlock(&dev->kvm->arch.config_lock);
unlock_all_vcpus(dev->kvm);
mutex_unlock(&dev->kvm->lock);
if (!ret && !is_write)
@ -569,12 +581,14 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev,
mutex_lock(&dev->kvm->lock);
if (unlikely(!vgic_initialized(dev->kvm))) {
ret = -EBUSY;
goto out;
if (!lock_all_vcpus(dev->kvm)) {
mutex_unlock(&dev->kvm->lock);
return -EBUSY;
}
if (!lock_all_vcpus(dev->kvm)) {
mutex_lock(&dev->kvm->arch.config_lock);
if (unlikely(!vgic_initialized(dev->kvm))) {
ret = -EBUSY;
goto out;
}
@ -609,8 +623,9 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev,
break;
}
unlock_all_vcpus(dev->kvm);
out:
mutex_unlock(&dev->kvm->arch.config_lock);
unlock_all_vcpus(dev->kvm);
mutex_unlock(&dev->kvm->lock);
if (!ret && uaccess && !is_write) {

View File

@ -111,7 +111,7 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu,
case GICD_CTLR: {
bool was_enabled, is_hwsgi;
mutex_lock(&vcpu->kvm->lock);
mutex_lock(&vcpu->kvm->arch.config_lock);
was_enabled = dist->enabled;
is_hwsgi = dist->nassgireq;
@ -139,7 +139,7 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu,
else if (!was_enabled && dist->enabled)
vgic_kick_vcpus(vcpu->kvm);
mutex_unlock(&vcpu->kvm->lock);
mutex_unlock(&vcpu->kvm->arch.config_lock);
break;
}
case GICD_TYPER:
@ -769,10 +769,13 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev;
struct vgic_redist_region *rdreg;
gpa_t rd_base;
int ret;
int ret = 0;
lockdep_assert_held(&kvm->slots_lock);
mutex_lock(&kvm->arch.config_lock);
if (!IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr))
return 0;
goto out_unlock;
/*
* We may be creating VCPUs before having set the base address for the
@ -782,10 +785,12 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
*/
rdreg = vgic_v3_rdist_free_slot(&vgic->rd_regions);
if (!rdreg)
return 0;
goto out_unlock;
if (!vgic_v3_check_base(kvm))
return -EINVAL;
if (!vgic_v3_check_base(kvm)) {
ret = -EINVAL;
goto out_unlock;
}
vgic_cpu->rdreg = rdreg;
vgic_cpu->rdreg_index = rdreg->free_index;
@ -799,16 +804,20 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
rd_dev->nr_regions = ARRAY_SIZE(vgic_v3_rd_registers);
rd_dev->redist_vcpu = vcpu;
mutex_lock(&kvm->slots_lock);
mutex_unlock(&kvm->arch.config_lock);
ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, rd_base,
2 * SZ_64K, &rd_dev->dev);
mutex_unlock(&kvm->slots_lock);
if (ret)
return ret;
/* Protected by slots_lock */
rdreg->free_index++;
return 0;
out_unlock:
mutex_unlock(&kvm->arch.config_lock);
return ret;
}
static void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu)
@ -834,12 +843,10 @@ static int vgic_register_all_redist_iodevs(struct kvm *kvm)
/* The current c failed, so iterate over the previous ones. */
int i;
mutex_lock(&kvm->slots_lock);
for (i = 0; i < c; i++) {
vcpu = kvm_get_vcpu(kvm, i);
vgic_unregister_redist_iodev(vcpu);
}
mutex_unlock(&kvm->slots_lock);
}
return ret;
@ -938,7 +945,9 @@ int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count)
{
int ret;
mutex_lock(&kvm->arch.config_lock);
ret = vgic_v3_alloc_redist_region(kvm, index, addr, count);
mutex_unlock(&kvm->arch.config_lock);
if (ret)
return ret;
@ -950,8 +959,10 @@ int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count)
if (ret) {
struct vgic_redist_region *rdreg;
mutex_lock(&kvm->arch.config_lock);
rdreg = vgic_v3_rdist_region_from_index(kvm, index);
vgic_v3_free_redist_region(rdreg);
mutex_unlock(&kvm->arch.config_lock);
return ret;
}

View File

@ -527,13 +527,13 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
u32 val;
mutex_lock(&vcpu->kvm->lock);
mutex_lock(&vcpu->kvm->arch.config_lock);
vgic_access_active_prepare(vcpu, intid);
val = __vgic_mmio_read_active(vcpu, addr, len);
vgic_access_active_finish(vcpu, intid);
mutex_unlock(&vcpu->kvm->lock);
mutex_unlock(&vcpu->kvm->arch.config_lock);
return val;
}
@ -622,13 +622,13 @@ void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
{
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
mutex_lock(&vcpu->kvm->lock);
mutex_lock(&vcpu->kvm->arch.config_lock);
vgic_access_active_prepare(vcpu, intid);
__vgic_mmio_write_cactive(vcpu, addr, len, val);
vgic_access_active_finish(vcpu, intid);
mutex_unlock(&vcpu->kvm->lock);
mutex_unlock(&vcpu->kvm->arch.config_lock);
}
int vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
@ -659,13 +659,13 @@ void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
{
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
mutex_lock(&vcpu->kvm->lock);
mutex_lock(&vcpu->kvm->arch.config_lock);
vgic_access_active_prepare(vcpu, intid);
__vgic_mmio_write_sactive(vcpu, addr, len, val);
vgic_access_active_finish(vcpu, intid);
mutex_unlock(&vcpu->kvm->lock);
mutex_unlock(&vcpu->kvm->arch.config_lock);
}
int vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,
@ -1093,7 +1093,6 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
enum vgic_type type)
{
struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev;
int ret = 0;
unsigned int len;
switch (type) {
@ -1111,10 +1110,6 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
io_device->iodev_type = IODEV_DIST;
io_device->redist_vcpu = NULL;
mutex_lock(&kvm->slots_lock);
ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
return kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
len, &io_device->dev);
mutex_unlock(&kvm->slots_lock);
return ret;
}

View File

@ -312,12 +312,6 @@ int vgic_v2_map_resources(struct kvm *kvm)
return ret;
}
ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V2);
if (ret) {
kvm_err("Unable to register VGIC MMIO regions\n");
return ret;
}
if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) {
ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
kvm_vgic_global_state.vcpu_base,

View File

@ -538,7 +538,6 @@ int vgic_v3_map_resources(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
struct kvm_vcpu *vcpu;
int ret = 0;
unsigned long c;
kvm_for_each_vcpu(c, vcpu, kvm) {
@ -568,12 +567,6 @@ int vgic_v3_map_resources(struct kvm *kvm)
return -EBUSY;
}
ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V3);
if (ret) {
kvm_err("Unable to register VGICv3 dist MMIO regions\n");
return ret;
}
if (kvm_vgic_global_state.has_gicv4_1)
vgic_v4_configure_vsgis(kvm);

View File

@ -232,9 +232,8 @@ int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq)
* @kvm: Pointer to the VM being initialized
*
* We may be called each time a vITS is created, or when the
* vgic is initialized. This relies on kvm->lock to be
* held. In both cases, the number of vcpus should now be
* fixed.
* vgic is initialized. In both cases, the number of vcpus
* should now be fixed.
*/
int vgic_v4_init(struct kvm *kvm)
{
@ -243,6 +242,8 @@ int vgic_v4_init(struct kvm *kvm)
int nr_vcpus, ret;
unsigned long i;
lockdep_assert_held(&kvm->arch.config_lock);
if (!kvm_vgic_global_state.has_gicv4)
return 0; /* Nothing to see here... move along. */
@ -309,14 +310,14 @@ int vgic_v4_init(struct kvm *kvm)
/**
* vgic_v4_teardown - Free the GICv4 data structures
* @kvm: Pointer to the VM being destroyed
*
* Relies on kvm->lock to be held.
*/
void vgic_v4_teardown(struct kvm *kvm)
{
struct its_vm *its_vm = &kvm->arch.vgic.its_vm;
int i;
lockdep_assert_held(&kvm->arch.config_lock);
if (!its_vm->vpes)
return;

View File

@ -24,6 +24,8 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
/*
* Locking order is always:
* kvm->lock (mutex)
* vcpu->mutex (mutex)
* kvm->arch.config_lock (mutex)
* its->cmd_lock (mutex)
* its->its_lock (mutex)
* vgic_cpu->ap_list_lock must be taken with IRQs disabled

View File

@ -6,6 +6,7 @@
#define __KVM_ARM_VGIC_NEW_H__
#include <linux/irqchip/arm-gic-common.h>
#include <asm/kvm_mmu.h>
#define PRODUCT_ID_KVM 0x4b /* ASCII code K */
#define IMPLEMENTER_ARM 0x43b
@ -131,6 +132,16 @@ static inline bool vgic_irq_is_multi_sgi(struct vgic_irq *irq)
return vgic_irq_get_lr_count(irq) > 1;
}
static inline int vgic_write_guest_lock(struct kvm *kvm, gpa_t gpa,
const void *data, unsigned long len)
{
int ret;
ret = kvm_write_guest_lock(kvm, gpa, data, len);
return ret;
}
/*
* This struct provides an intermediate representation of the fields contained
* in the GICH_VMCR and ICH_VMCR registers, such that code exporting the GIC

View File

@ -626,8 +626,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
vma_end_read(vma);
goto lock_mmap;
}
fault = handle_mm_fault(vma, addr & PAGE_MASK,
mm_flags | FAULT_FLAG_VMA_LOCK, regs);
fault = handle_mm_fault(vma, addr, mm_flags | FAULT_FLAG_VMA_LOCK, regs);
vma_end_read(vma);
if (!(fault & VM_FAULT_RETRY)) {

View File

@ -351,6 +351,8 @@ CONFIG_USB_NET_AQC111=m
# CONFIG_WLAN_VENDOR_TI is not set
# CONFIG_WLAN_VENDOR_ZYDAS is not set
# CONFIG_WLAN_VENDOR_QUANTENNA is not set
CONFIG_WWAN=m
# CONFIG_WWAN_DEBUGFS is not set
CONFIG_INPUT_EVDEV=y
CONFIG_KEYBOARD_GPIO=y
# CONFIG_MOUSE_PS2 is not set
@ -519,6 +521,7 @@ CONFIG_IIO_TRIGGER=y
CONFIG_POWERCAP=y
CONFIG_ANDROID_BINDER_IPC=y
CONFIG_ANDROID_BINDERFS=y
CONFIG_ANDROID_DEBUG_SYMBOLS=y
CONFIG_ANDROID_VENDOR_HOOKS=y
CONFIG_ANDROID_DEBUG_KINFO=y
CONFIG_LIBNVDIMM=y

View File

@ -1188,7 +1188,8 @@ bool blk_mq_complete_request_remote(struct request *rq)
* or a polled request, always complete locally,
* it's pointless to redirect the completion.
*/
if (rq->mq_hctx->nr_ctx == 1 ||
if ((rq->mq_hctx->nr_ctx == 1 &&
rq->mq_ctx->cpu == raw_smp_processor_id()) ||
rq->cmd_flags & REQ_POLLED)
return false;

View File

@ -8,7 +8,6 @@ arch/arm64/boot/Image.gz
"
BUILD_SYSTEM_DLKM=1
MODULES_LIST=${ROOT_DIR}/${KERNEL_DIR}/android/gki_system_dlkm_modules
BUILD_GKI_CERTIFICATION_TOOLS=1

View File

@ -8,7 +8,6 @@ arch/riscv/boot/Image.gz
"
BUILD_SYSTEM_DLKM=1
MODULES_LIST=${ROOT_DIR}/${KERNEL_DIR}/android/gki_system_dlkm_modules
BUILD_GKI_CERTIFICATION_TOOLS=1

View File

@ -3,7 +3,6 @@
. ${ROOT_DIR}/${KERNEL_DIR}/build.config.gki
BUILD_SYSTEM_DLKM=1
MODULES_LIST=${ROOT_DIR}/${KERNEL_DIR}/android/gki_system_dlkm_modules
BUILD_GKI_CERTIFICATION_TOOLS=1

View File

@ -47,6 +47,17 @@ config ANDROID_BINDER_IPC_SELFTEST
exhaustively with combinations of various buffer sizes and
alignments.
config ANDROID_DEBUG_SYMBOLS
bool "Android Debug Symbols"
help
Enables export of debug symbols that are useful for offline debugging
of a kernel. These symbols would be used in vendor modules to find
addresses of the core kernel symbols for vendor extensions.
This driver is statically compiled into kernel and maintains all the
required symbol addresses for vendor modules and provides necessary
interface vendor modules.
config ANDROID_VENDOR_HOOKS
bool "Android Vendor Hooks"
depends on TRACEPOINTS

View File

@ -4,5 +4,6 @@ ccflags-y += -I$(src) # needed for trace events
obj-$(CONFIG_ANDROID_BINDERFS) += binderfs.o
obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o binder_alloc.o
obj-$(CONFIG_ANDROID_BINDER_IPC_SELFTEST) += binder_alloc_selftest.o
obj-$(CONFIG_ANDROID_DEBUG_SYMBOLS) += android_debug_symbols.o
obj-$(CONFIG_ANDROID_VENDOR_HOOKS) += vendor_hooks.o
obj-$(CONFIG_ANDROID_DEBUG_KINFO) += debug_kinfo.o

View File

@ -0,0 +1,96 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2023, Unisoc (Shanghai) Technologies Co., Ltd
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/android_debug_symbols.h>
#include <asm/sections.h>
#include <asm/stacktrace.h>
#include <linux/cma.h>
#include <linux/mm.h>
#include "../../mm/slab.h"
#include <linux/security.h>
struct ads_entry {
char *name;
void *addr;
};
#define _ADS_ENTRY(index, symbol) \
[index] = { .name = #symbol, .addr = (void *)symbol }
#define ADS_ENTRY(index, symbol) _ADS_ENTRY(index, symbol)
#define _ADS_PER_CPU_ENTRY(index, symbol) \
[index] = { .name = #symbol, .addr = (void *)&symbol }
#define ADS_PER_CPU_ENTRY(index, symbol) _ADS_PER_CPU_ENTRY(index, symbol)
/*
* This module maintains static array of symbol and address information.
* Add all required core kernel symbols and their addresses into ads_entries[] array,
* so that vendor modules can query and to find address of non-exported symbol.
*/
static const struct ads_entry ads_entries[ADS_END] = {
ADS_ENTRY(ADS_SDATA, _sdata),
ADS_ENTRY(ADS_BSS_END, __bss_stop),
ADS_ENTRY(ADS_PER_CPU_START, __per_cpu_start),
ADS_ENTRY(ADS_PER_CPU_END, __per_cpu_end),
ADS_ENTRY(ADS_TEXT, _text),
ADS_ENTRY(ADS_SEND, _end),
ADS_ENTRY(ADS_LINUX_BANNER, linux_banner),
ADS_ENTRY(ADS_TOTAL_CMA, &totalcma_pages),
ADS_ENTRY(ADS_SLAB_CACHES, &slab_caches),
ADS_ENTRY(ADS_SLAB_MUTEX, &slab_mutex),
};
/*
* ads_per_cpu_entries array contains all the per_cpu variable address information.
*/
static const struct ads_entry ads_per_cpu_entries[ADS_DEBUG_PER_CPU_END] = {
#ifdef CONFIG_ARM64
ADS_PER_CPU_ENTRY(ADS_IRQ_STACK_PTR, irq_stack_ptr),
#endif
#ifdef CONFIG_X86
ADS_PER_CPU_ENTRY(ADS_IRQ_STACK_PTR, hardirq_stack_ptr),
#endif
};
/*
* android_debug_symbol - Provide address inforamtion of debug symbol.
* @symbol: Index of debug symbol array.
*
* Return address of core kernel symbol on success and a negative errno will be
* returned in error cases.
*
*/
void *android_debug_symbol(enum android_debug_symbol symbol)
{
if (symbol >= ADS_END)
return ERR_PTR(-EINVAL);
return ads_entries[symbol].addr;
}
EXPORT_SYMBOL_NS_GPL(android_debug_symbol, MINIDUMP);
/*
* android_debug_per_cpu_symbol - Provide address inforamtion of per cpu debug symbol.
* @symbol: Index of per cpu debug symbol array.
*
* Return address of core kernel symbol on success and a negative errno will be
* returned in error cases.
*
*/
void *android_debug_per_cpu_symbol(enum android_debug_per_cpu_symbol symbol)
{
if (symbol >= ADS_DEBUG_PER_CPU_END)
return ERR_PTR(-EINVAL);
return ads_per_cpu_entries[symbol].addr;
}
EXPORT_SYMBOL_NS_GPL(android_debug_per_cpu_symbol, MINIDUMP);

View File

@ -66,6 +66,7 @@
#include <trace/hooks/psci.h>
#include <trace/hooks/psi.h>
#include <trace/hooks/bl_hib.h>
#include <trace/hooks/regmap.h>
/*
* Export tracepoints that act as a bare tracehook (ie: have no trace event
@ -291,6 +292,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_thermal_register);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_thermal_unregister);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_get_thermal_zone_device);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_thermal_power_cap);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_enable_thermal_power_throttle);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_pages_reclaim_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_pages_failure_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_madvise_pageout_swap_entry);
@ -307,3 +309,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_init_aes_encrypt);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_skip_swap_map_write);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_post_image_save);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_madvise_cold_pageout_skip);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rmqueue_smallest_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_one_page_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_regmap_update);

View File

@ -293,7 +293,7 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
offset = report->size;
report->size += parser->global.report_size * parser->global.report_count;
if (parser->device->ll_driver == &uhid_hid_driver)
if (IS_ENABLED(CONFIG_UHID) && parser->device->ll_driver == &uhid_hid_driver)
max_buffer_size = UHID_DATA_MAX;
/* Total size check: Allow for possible report index byte */
@ -1987,7 +1987,7 @@ int hid_report_raw_event(struct hid_device *hid, enum hid_report_type type, u8 *
rsize = hid_compute_report_size(report);
if (hid->ll_driver == &uhid_hid_driver)
if (IS_ENABLED(CONFIG_UHID) && hid->ll_driver == &uhid_hid_driver)
max_buffer_size = UHID_DATA_MAX;
if (report_enum->numbered && rsize >= max_buffer_size)
@ -2398,7 +2398,7 @@ int hid_hw_raw_request(struct hid_device *hdev,
{
unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
if (hdev->ll_driver == &uhid_hid_driver)
if (IS_ENABLED(CONFIG_UHID) && hdev->ll_driver == &uhid_hid_driver)
max_buffer_size = UHID_DATA_MAX;
if (len < 1 || len > max_buffer_size || !buf)
@ -2422,7 +2422,7 @@ int hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len)
{
unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
if (hdev->ll_driver == &uhid_hid_driver)
if (IS_ENABLED(CONFIG_UHID) && hdev->ll_driver == &uhid_hid_driver)
max_buffer_size = UHID_DATA_MAX;
if (len < 1 || len > max_buffer_size || !buf)

View File

@ -2056,7 +2056,7 @@ static struct protection_domain *protection_domain_alloc(unsigned int type)
{
struct io_pgtable_ops *pgtbl_ops;
struct protection_domain *domain;
int pgtable = amd_iommu_pgtable;
int pgtable;
int mode = DEFAULT_PGTABLE_LEVEL;
int ret;
@ -2073,6 +2073,10 @@ static struct protection_domain *protection_domain_alloc(unsigned int type)
mode = PAGE_MODE_NONE;
} else if (type == IOMMU_DOMAIN_UNMANAGED) {
pgtable = AMD_IOMMU_V1;
} else if (type == IOMMU_DOMAIN_DMA || type == IOMMU_DOMAIN_DMA_FQ) {
pgtable = amd_iommu_pgtable;
} else {
return NULL;
}
switch (pgtable) {

View File

@ -22,6 +22,7 @@
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
#include <linux/slab.h>
#include <trace/hooks/regmap.h>
static struct platform_driver syscon_driver;
@ -128,6 +129,7 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_clk)
}
}
trace_android_vh_regmap_update(&syscon_config, regmap);
syscon->regmap = regmap;
syscon->np = np;

View File

@ -704,6 +704,7 @@ static int power_allocator_throttle(struct thermal_zone_device *tz, int trip)
int switch_on_temp, control_temp;
struct power_allocator_params *params = tz->governor_data;
bool update;
bool enable = true;
lockdep_assert_held(&tz->lock);
@ -714,9 +715,11 @@ static int power_allocator_throttle(struct thermal_zone_device *tz, int trip)
if (trip != params->trip_max_desired_temperature)
return 0;
trace_android_vh_enable_thermal_power_throttle(&enable);
ret = tz->ops->get_trip_temp(tz, params->trip_switch_on,
&switch_on_temp);
if (!ret && (tz->temperature < switch_on_temp)) {
if ((!ret && (tz->temperature < switch_on_temp)) || !enable) {
update = (tz->last_temperature >= switch_on_temp);
tz->passive = 0;
reset_pid_controller(params);

View File

@ -1529,7 +1529,7 @@ static int qcom_geni_serial_remove(struct platform_device *pdev)
return 0;
}
static int __maybe_unused qcom_geni_serial_sys_suspend(struct device *dev)
static int qcom_geni_serial_sys_suspend(struct device *dev)
{
struct qcom_geni_serial_port *port = dev_get_drvdata(dev);
struct uart_port *uport = &port->uport;
@ -1546,7 +1546,7 @@ static int __maybe_unused qcom_geni_serial_sys_suspend(struct device *dev)
return uart_suspend_port(private_data->drv, uport);
}
static int __maybe_unused qcom_geni_serial_sys_resume(struct device *dev)
static int qcom_geni_serial_sys_resume(struct device *dev)
{
int ret;
struct qcom_geni_serial_port *port = dev_get_drvdata(dev);
@ -1594,10 +1594,12 @@ static int qcom_geni_serial_sys_hib_resume(struct device *dev)
}
static const struct dev_pm_ops qcom_geni_serial_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(qcom_geni_serial_sys_suspend,
qcom_geni_serial_sys_resume)
.restore = qcom_geni_serial_sys_hib_resume,
.thaw = qcom_geni_serial_sys_hib_resume,
.suspend = pm_sleep_ptr(qcom_geni_serial_sys_suspend),
.resume = pm_sleep_ptr(qcom_geni_serial_sys_resume),
.freeze = pm_sleep_ptr(qcom_geni_serial_sys_suspend),
.poweroff = pm_sleep_ptr(qcom_geni_serial_sys_suspend),
.restore = pm_sleep_ptr(qcom_geni_serial_sys_hib_resume),
.thaw = pm_sleep_ptr(qcom_geni_serial_sys_hib_resume),
};
static const struct of_device_id qcom_geni_serial_match_table[] = {

View File

@ -20,12 +20,10 @@
#define MAX_QUEUE_SUP GENMASK(7, 0)
#define UFS_MCQ_MIN_RW_QUEUES 2
#define UFS_MCQ_MIN_READ_QUEUES 0
#define UFS_MCQ_NUM_DEV_CMD_QUEUES 1
#define UFS_MCQ_MIN_POLL_QUEUES 0
#define QUEUE_EN_OFFSET 31
#define QUEUE_ID_OFFSET 16
#define MAX_DEV_CMD_ENTRIES 2
#define MCQ_CFG_MAC_MASK GENMASK(16, 8)
#define MCQ_QCFG_SIZE 0x40
#define MCQ_ENTRY_SIZE_IN_DWORD 8
@ -99,6 +97,7 @@ void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds)
val |= FIELD_PREP(MCQ_CFG_MAC_MASK, max_active_cmds);
ufshcd_writel(hba, val, REG_UFS_MCQ_CFG);
}
EXPORT_SYMBOL_GPL(ufshcd_mcq_config_mac);
/**
* ufshcd_mcq_req_to_hwq - find the hardware queue on which the
@ -115,8 +114,7 @@ struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
u32 utag = blk_mq_unique_tag(req);
u32 hwq = blk_mq_unique_tag_to_hwq(utag);
/* uhq[0] is used to serve device commands */
return &hba->uhq[hwq + UFSHCD_MCQ_IO_QUEUE_OFFSET];
return &hba->uhq[hwq];
}
/**
@ -160,8 +158,7 @@ static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba)
/* maxq is 0 based value */
hba_maxq = FIELD_GET(MAX_QUEUE_SUP, hba->mcq_capabilities) + 1;
tot_queues = UFS_MCQ_NUM_DEV_CMD_QUEUES + read_queues + poll_queues +
rw_queues;
tot_queues = read_queues + poll_queues + rw_queues;
if (hba_maxq < tot_queues) {
dev_err(hba->dev, "Total queues (%d) exceeds HC capacity (%d)\n",
@ -169,7 +166,7 @@ static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba)
return -EOPNOTSUPP;
}
rem = hba_maxq - UFS_MCQ_NUM_DEV_CMD_QUEUES;
rem = hba_maxq;
if (rw_queues) {
hba->nr_queues[HCTX_TYPE_DEFAULT] = rw_queues;
@ -195,7 +192,7 @@ static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba)
for (i = 0; i < HCTX_MAX_TYPES; i++)
host->nr_hw_queues += hba->nr_queues[i];
hba->nr_hw_queues = host->nr_hw_queues + UFS_MCQ_NUM_DEV_CMD_QUEUES;
hba->nr_hw_queues = host->nr_hw_queues;
return 0;
}
@ -249,6 +246,7 @@ u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i)
{
return readl(mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIS);
}
EXPORT_SYMBOL_GPL(ufshcd_mcq_read_cqis);
void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i)
{
@ -402,6 +400,7 @@ void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
MCQ_CFG_n(REG_SQATTR, i));
}
}
EXPORT_SYMBOL_GPL(ufshcd_mcq_make_queues_operational);
void ufshcd_mcq_enable_esi(struct ufs_hba *hba)
{
@ -455,8 +454,6 @@ int ufshcd_mcq_init(struct ufs_hba *hba)
/* The very first HW queue serves device commands */
hba->dev_cmd_queue = &hba->uhq[0];
/* Give dev_cmd_queue the minimal number of entries */
hba->dev_cmd_queue->max_entries = MAX_DEV_CMD_ENTRIES;
host->host_tagset = 1;
return 0;

View File

@ -84,7 +84,6 @@ int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp);
#define UFSHCD_MCQ_IO_QUEUE_OFFSET 1
#define SD_ASCII_STD true
#define SD_RAW false
int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,

View File

@ -3166,7 +3166,7 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
* not trigger any race conditions.
*/
hba->dev_cmd.complete = NULL;
err = ufshcd_get_tr_ocs(lrbp, hba->dev_cmd.cqe);
err = ufshcd_get_tr_ocs(lrbp, NULL);
if (!err)
err = ufshcd_dev_cmd_completion(hba, lrbp);
} else {
@ -3262,7 +3262,6 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
goto out;
hba->dev_cmd.complete = &wait;
hba->dev_cmd.cqe = NULL;
ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
@ -5520,6 +5519,7 @@ void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
{
struct ufshcd_lrb *lrbp;
struct scsi_cmnd *cmd;
enum utp_ocs ocs;
lrbp = &hba->lrb[task_tag];
lrbp->compl_time_stamp = ktime_get();
@ -5538,7 +5538,11 @@ void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
if (hba->dev_cmd.complete) {
trace_android_vh_ufs_compl_command(hba, lrbp);
hba->dev_cmd.cqe = cqe;
if (cqe) {
ocs = le32_to_cpu(cqe->status) & MASK_OCS;
lrbp->utr_descriptor_ptr->header.dword_2 =
cpu_to_le32(ocs);
}
ufshcd_add_command_trace(hba, task_tag, UFS_DEV_COMP);
complete(hba->dev_cmd.complete);
ufshcd_clk_scaling_update_busy(hba);
@ -5592,7 +5596,7 @@ static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
struct ufs_hw_queue *hwq;
if (is_mcq_enabled(hba)) {
hwq = &hba->uhq[queue_num + UFSHCD_MCQ_IO_QUEUE_OFFSET];
hwq = &hba->uhq[queue_num];
return ufshcd_mcq_poll_cqe_lock(hba, hwq);
}
@ -5646,7 +5650,7 @@ static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba,
utag = blk_mq_unique_tag(scsi_cmd_to_rq(cmd));
hwq_num = blk_mq_unique_tag_to_hwq(utag);
hwq = &hba->uhq[hwq_num + UFSHCD_MCQ_IO_QUEUE_OFFSET];
hwq = &hba->uhq[hwq_num];
if (force_compl) {
ufshcd_mcq_compl_all_cqes_lock(hba, hwq);

View File

@ -27,8 +27,14 @@
#include <ufs/unipro.h>
#include "ufs-mediatek.h"
static int ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq);
#define CREATE_TRACE_POINTS
#include "ufs-mediatek-trace.h"
#undef CREATE_TRACE_POINTS
#define MAX_SUPP_MAC 64
#define MCQ_QUEUE_OFFSET(c) ((((c) >> 16) & 0xFF) * 0x200)
static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = {
{ .wmanufacturerid = UFS_ANY_VENDOR,
@ -843,6 +849,38 @@ static void ufs_mtk_vreg_fix_vccqx(struct ufs_hba *hba)
}
}
static void ufs_mtk_init_mcq_irq(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct platform_device *pdev;
int i;
int irq;
host->mcq_nr_intr = UFSHCD_MAX_Q_NR;
pdev = container_of(hba->dev, struct platform_device, dev);
for (i = 0; i < host->mcq_nr_intr; i++) {
/* irq index 0 is legacy irq, sq/cq irq start from index 1 */
irq = platform_get_irq(pdev, i + 1);
if (irq < 0) {
host->mcq_intr_info[i].irq = MTK_MCQ_INVALID_IRQ;
dev_err(hba->dev, "get platform mcq irq fail: %d\n", i);
goto failed;
}
host->mcq_intr_info[i].hba = hba;
host->mcq_intr_info[i].irq = irq;
dev_info(hba->dev, "get platform mcq irq: %d, %d\n", i, irq);
}
return;
failed:
/* invalidate irq info */
for (i = 0; i < host->mcq_nr_intr; i++)
host->mcq_intr_info[i].irq = MTK_MCQ_INVALID_IRQ;
host->mcq_nr_intr = 0;
}
/**
* ufs_mtk_init - find other essential mmio bases
* @hba: host controller instance
@ -879,6 +917,8 @@ static int ufs_mtk_init(struct ufs_hba *hba)
/* Initialize host capability */
ufs_mtk_init_host_caps(hba);
ufs_mtk_init_mcq_irq(hba);
err = ufs_mtk_bind_mphy(hba);
if (err)
goto out_variant_clear;
@ -1176,7 +1216,17 @@ static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
else
return err;
if (!hba->mcq_enabled) {
err = ufshcd_make_hba_operational(hba);
} else {
ufs_mtk_config_mcq(hba, false);
ufshcd_mcq_make_queues_operational(hba);
ufshcd_mcq_config_mac(hba, hba->nutrs);
/* Enable MCQ mode */
ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x1,
REG_UFS_MEM_CFG);
}
if (err)
return err;
@ -1500,6 +1550,121 @@ static int ufs_mtk_clk_scale_notify(struct ufs_hba *hba, bool scale_up,
return 0;
}
static int ufs_mtk_get_hba_mac(struct ufs_hba *hba)
{
return MAX_SUPP_MAC;
}
static int ufs_mtk_op_runtime_config(struct ufs_hba *hba)
{
struct ufshcd_mcq_opr_info_t *opr;
int i;
hba->mcq_opr[OPR_SQD].offset = REG_UFS_MTK_SQD;
hba->mcq_opr[OPR_SQIS].offset = REG_UFS_MTK_SQIS;
hba->mcq_opr[OPR_CQD].offset = REG_UFS_MTK_CQD;
hba->mcq_opr[OPR_CQIS].offset = REG_UFS_MTK_CQIS;
for (i = 0; i < OPR_MAX; i++) {
opr = &hba->mcq_opr[i];
opr->stride = REG_UFS_MCQ_STRIDE;
opr->base = hba->mmio_base + opr->offset;
}
return 0;
}
static int ufs_mtk_mcq_config_resource(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
/* fail mcq initialization if interrupt is not filled properly */
if (!host->mcq_nr_intr) {
dev_info(hba->dev, "IRQs not ready. MCQ disabled.");
return -EINVAL;
}
hba->mcq_base = hba->mmio_base + MCQ_QUEUE_OFFSET(hba->mcq_capabilities);
return 0;
}
static irqreturn_t ufs_mtk_mcq_intr(int irq, void *__intr_info)
{
struct ufs_mtk_mcq_intr_info *mcq_intr_info = __intr_info;
struct ufs_hba *hba = mcq_intr_info->hba;
struct ufs_hw_queue *hwq;
u32 events;
int qid = mcq_intr_info->qid;
hwq = &hba->uhq[qid];
events = ufshcd_mcq_read_cqis(hba, qid);
if (events)
ufshcd_mcq_write_cqis(hba, events, qid);
if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS)
ufshcd_mcq_poll_cqe_lock(hba, hwq);
return IRQ_HANDLED;
}
static int ufs_mtk_config_mcq_irq(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
u32 irq, i;
int ret;
for (i = 0; i < host->mcq_nr_intr; i++) {
irq = host->mcq_intr_info[i].irq;
if (irq == MTK_MCQ_INVALID_IRQ) {
dev_err(hba->dev, "invalid irq. %d\n", i);
return -ENOPARAM;
}
host->mcq_intr_info[i].qid = i;
ret = devm_request_irq(hba->dev, irq, ufs_mtk_mcq_intr, 0, UFSHCD,
&host->mcq_intr_info[i]);
dev_dbg(hba->dev, "request irq %d intr %s\n", irq, ret ? "failed" : "");
if (ret) {
dev_err(hba->dev, "Cannot request irq %d\n", ret);
return ret;
}
}
return 0;
}
static int ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
int ret = 0;
if (!host->mcq_set_intr) {
/* Disable irq option register */
ufshcd_rmwl(hba, MCQ_INTR_EN_MSK, 0, REG_UFS_MMIO_OPT_CTRL_0);
if (irq) {
ret = ufs_mtk_config_mcq_irq(hba);
if (ret)
return ret;
}
host->mcq_set_intr = true;
}
ufshcd_rmwl(hba, MCQ_AH8, MCQ_AH8, REG_UFS_MMIO_OPT_CTRL_0);
ufshcd_rmwl(hba, MCQ_INTR_EN_MSK, MCQ_MULTI_INTR_EN, REG_UFS_MMIO_OPT_CTRL_0);
return 0;
}
static int ufs_mtk_config_esi(struct ufs_hba *hba)
{
return ufs_mtk_config_mcq(hba, true);
}
/*
* struct ufs_hba_mtk_vops - UFS MTK specific variant operations
*
@ -1523,6 +1688,11 @@ static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
.event_notify = ufs_mtk_event_notify,
.config_scaling_param = ufs_mtk_config_scaling_param,
.clk_scale_notify = ufs_mtk_clk_scale_notify,
/* mcq vops */
.get_hba_mac = ufs_mtk_get_hba_mac,
.op_runtime_config = ufs_mtk_op_runtime_config,
.mcq_config_resource = ufs_mtk_mcq_config_resource,
.config_esi = ufs_mtk_config_esi,
};
/**
@ -1569,7 +1739,7 @@ static int ufs_mtk_probe(struct platform_device *pdev)
out:
if (err)
dev_info(dev, "probe failed %d\n", err);
dev_err(dev, "probe failed %d\n", err);
of_node_put(reset_node);
return err;

View File

@ -10,11 +10,27 @@
#include <linux/pm_qos.h>
#include <linux/soc/mediatek/mtk_sip_svc.h>
/*
* MCQ define and struct
*/
#define UFSHCD_MAX_Q_NR 8
#define MTK_MCQ_INVALID_IRQ 0xFFFF
/* REG_UFS_MMIO_OPT_CTRL_0 160h */
#define EHS_EN BIT(0)
#define PFM_IMPV BIT(1)
#define MCQ_MULTI_INTR_EN BIT(2)
#define MCQ_CMB_INTR_EN BIT(3)
#define MCQ_AH8 BIT(4)
#define MCQ_INTR_EN_MSK (MCQ_MULTI_INTR_EN | MCQ_CMB_INTR_EN)
/*
* Vendor specific UFSHCI Registers
*/
#define REG_UFS_XOUFS_CTRL 0x140
#define REG_UFS_REFCLK_CTRL 0x144
#define REG_UFS_MMIO_OPT_CTRL_0 0x160
#define REG_UFS_EXTREG 0x2100
#define REG_UFS_MPHYCTRL 0x2200
#define REG_UFS_MTK_IP_VER 0x2240
@ -26,6 +42,13 @@
#define REG_UFS_DEBUG_SEL_B2 0x22D8
#define REG_UFS_DEBUG_SEL_B3 0x22DC
#define REG_UFS_MTK_SQD 0x2800
#define REG_UFS_MTK_SQIS 0x2814
#define REG_UFS_MTK_CQD 0x281C
#define REG_UFS_MTK_CQIS 0x2824
#define REG_UFS_MCQ_STRIDE 0x30
/*
* Ref-clk control
*
@ -136,6 +159,12 @@ struct ufs_mtk_hw_ver {
u8 major;
};
struct ufs_mtk_mcq_intr_info {
struct ufs_hba *hba;
u32 irq;
u8 qid;
};
struct ufs_mtk_host {
struct phy *mphy;
struct pm_qos_request pm_qos_req;
@ -155,6 +184,10 @@ struct ufs_mtk_host {
u16 ref_clk_ungating_wait_us;
u16 ref_clk_gating_wait_us;
u32 ip_ver;
bool mcq_set_intr;
int mcq_nr_intr;
struct ufs_mtk_mcq_intr_info mcq_intr_info[UFSHCD_MAX_Q_NR];
};
/*

View File

@ -2652,11 +2652,25 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
synchronize_irq(dwc->irq_gadget);
if (!is_on)
if (!is_on) {
ret = dwc3_gadget_soft_disconnect(dwc);
else
ret = dwc3_gadget_soft_connect(dwc);
} else {
/*
* In the Synopsys DWC_usb31 1.90a programming guide section
* 4.1.9, it specifies that for a reconnect after a
* device-initiated disconnect requires a core soft reset
* (DCTL.CSftRst) before enabling the run/stop bit.
*/
ret = dwc3_core_soft_reset(dwc);
if (ret)
goto done;
dwc3_event_buffers_setup(dwc);
__dwc3_gadget_start(dwc);
ret = dwc3_gadget_run_stop(dwc, true);
}
done:
pm_runtime_put(dwc->dev);
return ret;

View File

@ -1094,12 +1094,16 @@ EXPORT_SYMBOL_GPL(usb_gadget_set_state);
/* ------------------------------------------------------------------------- */
/* Acquire connect_lock before calling this function. */
static void usb_udc_connect_control_locked(struct usb_udc *udc) __must_hold(&udc->connect_lock)
static int usb_udc_connect_control_locked(struct usb_udc *udc) __must_hold(&udc->connect_lock)
{
int ret;
if (udc->vbus)
usb_gadget_connect_locked(udc->gadget);
ret = usb_gadget_connect_locked(udc->gadget);
else
usb_gadget_disconnect_locked(udc->gadget);
ret = usb_gadget_disconnect_locked(udc->gadget);
return ret;
}
static void vbus_event_work(struct work_struct *work)
@ -1573,12 +1577,21 @@ static int gadget_bind_driver(struct device *dev)
}
usb_gadget_enable_async_callbacks(udc);
udc->allow_connect = true;
usb_udc_connect_control_locked(udc);
ret = usb_udc_connect_control_locked(udc);
if (ret)
goto err_connect_control;
mutex_unlock(&udc->connect_lock);
kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
return 0;
err_connect_control:
usb_gadget_disable_async_callbacks(udc);
if (gadget->irq)
synchronize_irq(gadget->irq);
usb_gadget_udc_stop_locked(udc);
err_start:
driver->unbind(udc->gadget);

View File

@ -436,8 +436,8 @@ static int xhci_plat_remove(struct platform_device *dev)
struct clk *reg_clk = xhci->reg_clk;
struct usb_hcd *shared_hcd = xhci->shared_hcd;
pm_runtime_get_sync(&dev->dev);
xhci->xhc_state |= XHCI_STATE_REMOVING;
pm_runtime_get_sync(&dev->dev);
if (shared_hcd) {
usb_remove_hcd(shared_hcd);
@ -453,7 +453,6 @@ static int xhci_plat_remove(struct platform_device *dev)
xhci_vendor_cleanup(xhci);
usb_put_hcd(shared_hcd);
clk_disable_unprepare(clk);
clk_disable_unprepare(reg_clk);
usb_put_hcd(hcd);

View File

@ -1194,7 +1194,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
temp = readl(&xhci->op_regs->status);
/* re-initialize the HC on Restore Error, or Host Controller Error */
if (temp & (STS_SRE | STS_HCE)) {
if ((temp & (STS_SRE | STS_HCE)) &&
!(xhci->xhc_state & XHCI_STATE_REMOVING)) {
reinit_xhc = true;
if (!xhci->broken_suspend)
xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp);

View File

@ -2296,6 +2296,11 @@ struct xhci_vendor_ops {
void (*alloc_container_ctx)(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx,
int type, gfp_t flags);
void (*free_container_ctx)(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx);
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
ANDROID_KABI_RESERVE(3);
ANDROID_KABI_RESERVE(4);
};
struct xhci_vendor_ops *xhci_vendor_get_ops(struct xhci_hcd *xhci);

View File

@ -775,8 +775,15 @@ int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname,
{
int err = -EAGAIN;
if (f2fs_has_inline_dentry(dir))
if (f2fs_has_inline_dentry(dir)) {
/*
* Should get i_xattr_sem to keep the lock order:
* i_xattr_sem -> inode_page lock used by f2fs_setxattr.
*/
f2fs_down_read(&F2FS_I(dir)->i_xattr_sem);
err = f2fs_add_inline_entry(dir, fname, inode, ino, mode);
f2fs_up_read(&F2FS_I(dir)->i_xattr_sem);
}
if (err == -EAGAIN)
err = f2fs_add_regular_entry(dir, fname, inode, ino, mode);

View File

@ -527,9 +527,11 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
if (len > F2FS_NAME_LEN)
return -ERANGE;
if (!ipage)
f2fs_down_read(&F2FS_I(inode)->i_xattr_sem);
error = lookup_all_xattrs(inode, ipage, index, len, name,
&entry, &base_addr, &base_size, &is_inline);
if (!ipage)
f2fs_up_read(&F2FS_I(inode)->i_xattr_sem);
if (error)
return error;

View File

@ -295,44 +295,19 @@ void *fuse_create_open_finalize(
}
int fuse_release_initialize(struct fuse_bpf_args *fa, struct fuse_release_in *fri,
struct inode *inode, struct file *file)
struct inode *inode, struct fuse_file *ff)
{
struct fuse_file *fuse_file = file->private_data;
/* Always put backing file whatever bpf/userspace says */
fput(fuse_file->backing_file);
fput(ff->backing_file);
*fri = (struct fuse_release_in) {
.fh = ((struct fuse_file *)(file->private_data))->fh,
.fh = ff->fh,
};
*fa = (struct fuse_bpf_args) {
.nodeid = get_fuse_inode(inode)->nodeid,
.opcode = FUSE_RELEASE,
.in_numargs = 1,
.in_args[0].size = sizeof(*fri),
.in_args[0].value = fri,
};
return 0;
}
int fuse_releasedir_initialize(struct fuse_bpf_args *fa,
struct fuse_release_in *fri,
struct inode *inode, struct file *file)
{
struct fuse_file *fuse_file = file->private_data;
/* Always put backing file whatever bpf/userspace says */
fput(fuse_file->backing_file);
*fri = (struct fuse_release_in) {
.fh = ((struct fuse_file *)(file->private_data))->fh,
};
*fa = (struct fuse_bpf_args) {
.nodeid = get_fuse_inode(inode)->nodeid,
.opcode = FUSE_RELEASEDIR,
.opcode = S_ISDIR(inode->i_mode) ? FUSE_RELEASEDIR
: FUSE_RELEASE,
.in_numargs = 1,
.in_args[0].size = sizeof(*fri),
.in_args[0].value = fri,
@ -342,15 +317,14 @@ int fuse_releasedir_initialize(struct fuse_bpf_args *fa,
}
int fuse_release_backing(struct fuse_bpf_args *fa,
struct inode *inode, struct file *file)
struct inode *inode, struct fuse_file *ff)
{
return 0;
}
void *fuse_release_finalize(struct fuse_bpf_args *fa,
struct inode *inode, struct file *file)
struct inode *inode, struct fuse_file *ff)
{
fuse_file_free(file->private_data);
return NULL;
}
@ -992,6 +966,20 @@ void *fuse_file_write_iter_finalize(struct fuse_bpf_args *fa,
return ERR_PTR(fwio->ret);
}
int fuse_file_flock_backing(struct file *file, int cmd, struct file_lock *fl)
{
struct fuse_file *ff = file->private_data;
struct file *backing_file = ff->backing_file;
int error;
fl->fl_file = backing_file;
if (backing_file->f_op->flock)
error = backing_file->f_op->flock(backing_file, cmd, fl);
else
error = locks_lock_file_wait(backing_file, fl);
return error;
}
ssize_t fuse_backing_mmap(struct file *file, struct vm_area_struct *vma)
{
int ret;
@ -1224,14 +1212,12 @@ int fuse_handle_bpf_prog(struct fuse_entry_bpf *feb, struct inode *parent,
}
/* Cannot change existing program */
if (*bpf && new_bpf) {
if (*bpf) {
if (new_bpf)
bpf_prog_put(new_bpf);
return new_bpf == *bpf ? 0 : -EINVAL;
}
if (*bpf)
bpf_prog_put(*bpf);
*bpf = new_bpf;
return 0;
}

View File

@ -1790,17 +1790,6 @@ static int fuse_dir_open(struct inode *inode, struct file *file)
static int fuse_dir_release(struct inode *inode, struct file *file)
{
#ifdef CONFIG_FUSE_BPF
struct fuse_err_ret fer;
fer = fuse_bpf_backing(inode, struct fuse_release_in,
fuse_releasedir_initialize, fuse_release_backing,
fuse_release_finalize,
inode, file);
if (fer.ret)
return PTR_ERR(fer.result);
#endif
fuse_release_common(file, true);
return 0;
}

View File

@ -104,11 +104,26 @@ static void fuse_release_end(struct fuse_mount *fm, struct fuse_args *args,
kfree(ra);
}
static void fuse_file_put(struct fuse_file *ff, bool sync, bool isdir)
static void fuse_file_put(struct inode *inode, struct fuse_file *ff,
bool sync, bool isdir)
{
if (refcount_dec_and_test(&ff->count)) {
struct fuse_args *args = &ff->release_args->args;
#ifdef CONFIG_FUSE_BPF
struct fuse_err_ret fer;
#endif
if (!refcount_dec_and_test(&ff->count))
return;
#ifdef CONFIG_FUSE_BPF
fer = fuse_bpf_backing(inode, struct fuse_release_in,
fuse_release_initialize, fuse_release_backing,
fuse_release_finalize,
inode, ff);
if (fer.ret) {
fuse_release_end(ff->fm, args, 0);
} else
#endif
if (isdir ? ff->fm->fc->no_opendir : ff->fm->fc->no_open) {
/* Do nothing when client does not implement 'open' */
fuse_release_end(ff->fm, args, 0);
@ -122,7 +137,6 @@ static void fuse_file_put(struct fuse_file *ff, bool sync, bool isdir)
fuse_release_end(ff->fm, args, -ENOTCONN);
}
kfree(ff);
}
}
struct fuse_file *fuse_file_open(struct fuse_mount *fm, u64 nodeid,
@ -343,7 +357,7 @@ void fuse_file_release(struct inode *inode, struct fuse_file *ff,
* synchronous RELEASE is allowed (and desirable) in this case
* because the server can be trusted not to screw up.
*/
fuse_file_put(ff, ff->fm->fc->destroy, isdir);
fuse_file_put(ra->inode, ff, ff->fm->fc->destroy, isdir);
}
void fuse_release_common(struct file *file, bool isdir)
@ -361,17 +375,6 @@ static int fuse_release(struct inode *inode, struct file *file)
{
struct fuse_conn *fc = get_fuse_conn(inode);
#ifdef CONFIG_FUSE_BPF
struct fuse_err_ret fer;
fer = fuse_bpf_backing(inode, struct fuse_release_in,
fuse_release_initialize, fuse_release_backing,
fuse_release_finalize,
inode, file);
if (fer.ret)
return PTR_ERR(fer.result);
#endif
/*
* Dirty pages might remain despite write_inode_now() call from
* fuse_flush() due to writes racing with the close.
@ -394,7 +397,7 @@ void fuse_sync_release(struct fuse_inode *fi, struct fuse_file *ff,
* iput(NULL) is a no-op and since the refcount is 1 and everything's
* synchronous, we are fine with not doing igrab() here"
*/
fuse_file_put(ff, true, false);
fuse_file_put(&fi->inode, ff, true, false);
}
EXPORT_SYMBOL_GPL(fuse_sync_release);
@ -972,8 +975,11 @@ static void fuse_readpages_end(struct fuse_mount *fm, struct fuse_args *args,
unlock_page(page);
put_page(page);
}
if (ia->ff)
fuse_file_put(ia->ff, false, false);
if (ia->ff) {
WARN_ON(!mapping);
fuse_file_put(mapping ? mapping->host : NULL, ia->ff,
false, false);
}
fuse_io_free(ia);
}
@ -1716,7 +1722,7 @@ static void fuse_writepage_free(struct fuse_writepage_args *wpa)
__free_page(ap->pages[i]);
if (wpa->ia.ff)
fuse_file_put(wpa->ia.ff, false, false);
fuse_file_put(wpa->inode, wpa->ia.ff, false, false);
kfree(ap->pages);
kfree(wpa);
@ -1971,7 +1977,7 @@ int fuse_write_inode(struct inode *inode, struct writeback_control *wbc)
ff = __fuse_write_file_get(fi);
err = fuse_flush_times(inode, ff);
if (ff)
fuse_file_put(ff, false, false);
fuse_file_put(inode, ff, false, false);
return err;
}
@ -2369,7 +2375,7 @@ static int fuse_writepages(struct address_space *mapping,
fuse_writepages_send(&data);
}
if (data.ff)
fuse_file_put(data.ff, false, false);
fuse_file_put(inode, data.ff, false, false);
kfree(data.orig_pages);
out:
@ -2689,12 +2695,18 @@ static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl)
{
struct inode *inode = file_inode(file);
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_file *ff = file->private_data;
int err;
#ifdef CONFIG_FUSE_BPF
/* TODO - this is simply passthrough, not a proper BPF filter */
if (ff->backing_file)
return fuse_file_flock_backing(file, cmd, fl);
#endif
if (fc->no_flock) {
err = locks_lock_file_wait(file, fl);
} else {
struct fuse_file *ff = file->private_data;
/* emulate flock with POSIX locks */
ff->flock = true;

View File

@ -1536,14 +1536,11 @@ void *fuse_link_finalize(struct fuse_bpf_args *fa, struct dentry *entry,
struct inode *dir, struct dentry *newent);
int fuse_release_initialize(struct fuse_bpf_args *fa, struct fuse_release_in *fri,
struct inode *inode, struct file *file);
int fuse_releasedir_initialize(struct fuse_bpf_args *fa,
struct fuse_release_in *fri,
struct inode *inode, struct file *file);
struct inode *inode, struct fuse_file *ff);
int fuse_release_backing(struct fuse_bpf_args *fa,
struct inode *inode, struct file *file);
struct inode *inode, struct fuse_file *ff);
void *fuse_release_finalize(struct fuse_bpf_args *fa,
struct inode *inode, struct file *file);
struct inode *inode, struct fuse_file *ff);
int fuse_flush_initialize(struct fuse_bpf_args *fa, struct fuse_flush_in *ffi,
struct file *file, fl_owner_t id);
@ -1667,6 +1664,7 @@ int fuse_file_write_iter_backing(struct fuse_bpf_args *fa,
void *fuse_file_write_iter_finalize(struct fuse_bpf_args *fa,
struct kiocb *iocb, struct iov_iter *from);
int fuse_file_flock_backing(struct file *file, int cmd, struct file_lock *fl);
ssize_t fuse_backing_mmap(struct file *file, struct vm_area_struct *vma);
int fuse_file_fallocate_initialize(struct fuse_bpf_args *fa,

View File

@ -113,6 +113,10 @@ static void fuse_free_inode(struct inode *inode)
kfree(fi->forget);
#ifdef CONFIG_FUSE_DAX
kfree(fi->dax);
#endif
#ifdef CONFIG_FUSE_BPF
if (fi->bpf)
bpf_prog_put(fi->bpf);
#endif
kmem_cache_free(fuse_inode_cachep, fi);
}
@ -123,13 +127,6 @@ static void fuse_evict_inode(struct inode *inode)
/* Will write inode on close/munmap and in all other dirtiers */
WARN_ON(inode->i_state & I_DIRTY_INODE);
#ifdef CONFIG_FUSE_BPF
iput(fi->backing_inode);
if (fi->bpf)
bpf_prog_put(fi->bpf);
fi->bpf = NULL;
#endif
truncate_inode_pages_final(&inode->i_data);
clear_inode(inode);
if (inode->i_sb->s_flags & SB_ACTIVE) {
@ -149,6 +146,15 @@ static void fuse_evict_inode(struct inode *inode)
}
}
#ifdef CONFIG_FUSE_BPF
static void fuse_destroy_inode(struct inode *inode)
{
struct fuse_inode *fi = get_fuse_inode(inode);
iput(fi->backing_inode);
}
#endif
static int fuse_reconfigure(struct fs_context *fsc)
{
struct super_block *sb = fsc->root->d_sb;
@ -1209,6 +1215,9 @@ static const struct export_operations fuse_export_operations = {
static const struct super_operations fuse_super_operations = {
.alloc_inode = fuse_alloc_inode,
#ifdef CONFIG_FUSE_BPF
.destroy_inode = fuse_destroy_inode,
#endif
.free_inode = fuse_free_inode,
.evict_inode = fuse_evict_inode,
.write_inode = fuse_write_inode,

View File

@ -918,10 +918,10 @@ static long ioctl_get_read_timeouts(struct mount_info *mi, void __user *arg)
if (copy_from_user(&args, args_usr_ptr, sizeof(args)))
return -EINVAL;
if (args.timeouts_array_size_out > INCFS_DATA_FILE_BLOCK_SIZE)
if (args.timeouts_array_size > INCFS_DATA_FILE_BLOCK_SIZE)
return -EINVAL;
buffer = kzalloc(args.timeouts_array_size_out, GFP_NOFS);
buffer = kzalloc(args.timeouts_array_size, GFP_NOFS);
if (!buffer)
return -ENOMEM;

View File

@ -14,6 +14,7 @@
struct block_buffer {
u32 filled;
bool is_root_hash;
u8 *data;
};
@ -25,6 +26,14 @@ static int hash_one_block(struct inode *inode,
struct block_buffer *next = cur + 1;
int err;
/*
* Safety check to prevent a buffer overflow in case of a filesystem bug
* that allows the file size to change despite deny_write_access(), or a
* bug in the Merkle tree logic itself
*/
if (WARN_ON_ONCE(next->is_root_hash && next->filled != 0))
return -EINVAL;
/* Zero-pad the block if it's shorter than the block size. */
memset(&cur->data[cur->filled], 0, params->block_size - cur->filled);
@ -98,6 +107,7 @@ static int build_merkle_tree(struct file *filp,
}
}
buffers[num_levels].data = root_hash;
buffers[num_levels].is_root_hash = true;
BUILD_BUG_ON(sizeof(level_offset) != sizeof(params->level_start));
memcpy(level_offset, params->level_start, sizeof(level_offset));
@ -348,6 +358,13 @@ int fsverity_ioctl_enable(struct file *filp, const void __user *uarg)
err = file_permission(filp, MAY_WRITE);
if (err)
return err;
/*
* __kernel_read() is used while building the Merkle tree. So, we can't
* allow file descriptors that were opened for ioctl access only, using
* the special nonstandard access mode 3. O_RDONLY only, please!
*/
if (!(filp->f_mode & FMODE_READ))
return -EBADF;
if (IS_APPEND(inode))
return -EPERM;

View File

@ -0,0 +1,47 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2023, Unisoc (Shanghai) Technologies Co., Ltd
*/
#ifndef _ANDROID_DEBUG_SYMBOLS_H
#define _ANDROID_DEBUG_SYMBOLS_H
enum android_debug_symbol {
ADS_SDATA = 0,
ADS_BSS_END,
ADS_PER_CPU_START,
ADS_PER_CPU_END,
ADS_TEXT,
ADS_SEND,
ADS_LINUX_BANNER,
ADS_TOTAL_CMA,
ADS_SLAB_CACHES,
ADS_SLAB_MUTEX,
ADS_END
};
enum android_debug_per_cpu_symbol {
ADS_IRQ_STACK_PTR = 0,
ADS_DEBUG_PER_CPU_END
};
#ifdef CONFIG_ANDROID_DEBUG_SYMBOLS
void *android_debug_symbol(enum android_debug_symbol symbol);
void *android_debug_per_cpu_symbol(enum android_debug_per_cpu_symbol symbol);
#else /* !CONFIG_ANDROID_DEBUG_SYMBOLS */
static inline void *android_debug_symbol(enum android_debug_symbol symbol)
{
return NULL;
}
static inline void *android_debug_per_cpu_symbol(enum android_debug_per_cpu_symbol symbol)
{
return NULL;
}
#endif /* CONFIG_ANDROID_DEBUG_SYMBOLS */
#endif /* _ANDROID_DEBUG_SYMBOLS_H */

View File

@ -8,6 +8,9 @@
#include <trace/hooks/vendor_hooks.h>
struct task_struct;
struct cgroup_taskset;
struct cgroup_subsys;
struct cgroup_subsys_state;
DECLARE_HOOK(android_vh_cgroup_set_task,
TP_PROTO(int ret, struct task_struct *task),
TP_ARGS(ret, task));
@ -16,8 +19,6 @@ DECLARE_RESTRICTED_HOOK(android_rvh_refrigerator,
TP_PROTO(bool f),
TP_ARGS(f), 1);
struct cgroup_subsys;
struct cgroup_taskset;
DECLARE_HOOK(android_vh_cgroup_attach,
TP_PROTO(struct cgroup_subsys *ss, struct cgroup_taskset *tset),
TP_ARGS(ss, tset))
@ -25,8 +26,6 @@ DECLARE_RESTRICTED_HOOK(android_rvh_cgroup_force_kthread_migration,
TP_PROTO(struct task_struct *tsk, struct cgroup *dst_cgrp, bool *force_migration),
TP_ARGS(tsk, dst_cgrp, force_migration), 1);
struct cgroup_taskset;
struct cgroup_subsys;
DECLARE_RESTRICTED_HOOK(android_rvh_cpuset_fork,
TP_PROTO(struct task_struct *p, bool *inherit_cpus),

View File

@ -122,6 +122,13 @@ DECLARE_HOOK(android_vh_mem_cgroup_css_online,
DECLARE_HOOK(android_vh_mem_cgroup_css_offline,
TP_PROTO(struct cgroup_subsys_state *css, struct mem_cgroup *memcg),
TP_ARGS(css, memcg));
DECLARE_HOOK(android_vh_rmqueue_smallest_bypass,
TP_PROTO(struct page **page, struct zone *zone, int order, int migratetype),
TP_ARGS(page, zone, order, migratetype));
DECLARE_HOOK(android_vh_free_one_page_bypass,
TP_PROTO(struct page *page, struct zone *zone, int order, int migratetype,
int fpi_flags, bool *bypass),
TP_ARGS(page, zone, order, migratetype, fpi_flags, bypass));
#endif /* _TRACE_HOOK_MM_H */

View File

@ -0,0 +1,24 @@
/* SPDX-License-Identifier: GPL-2.0 */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM regmap
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH trace/hooks
#if !defined(_TRACE_HOOK_REGMAP_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_HOOK_REGMAP_H
#include <trace/hooks/vendor_hooks.h>
struct regmap_config;
struct regmap;
/*
* Following tracepoints are not exported in tracefs and provide a
* mechanism for vendor modules to hook and extend functionality
*/
DECLARE_HOOK(android_vh_regmap_update,
TP_PROTO(const struct regmap_config *config, struct regmap *map),
TP_ARGS(config, map));
#endif /* _TRACE_HOOK_REGMAP_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@ -48,6 +48,10 @@ DECLARE_HOOK(android_vh_thermal_power_cap,
TP_PROTO(u32 *power_range),
TP_ARGS(power_range));
DECLARE_HOOK(android_vh_enable_thermal_power_throttle,
TP_PROTO(bool *enable),
TP_ARGS(enable));
#endif /* _TRACE_HOOK_THERMAL_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@ -97,18 +97,18 @@ struct utp_upiu_req {
};
struct ufs_arpmb_meta {
__u16 req_resp_type;
__be16 req_resp_type;
__u8 nonce[16];
__u32 write_counter;
__u16 addr_lun;
__u16 block_count;
__u16 result;
__be32 write_counter;
__be16 addr_lun;
__be16 block_count;
__be16 result;
} __attribute__((__packed__));
struct ufs_ehs {
__u8 length;
__u8 ehs_type;
__u16 ehssub_type;
__be16 ehssub_type;
struct ufs_arpmb_meta meta;
__u8 mac_key[32];
} __attribute__((__packed__));

View File

@ -1297,11 +1297,14 @@ void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk);
void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val);
void ufshcd_hba_stop(struct ufs_hba *hba);
void ufshcd_schedule_eh_work(struct ufs_hba *hba);
void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds);
u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i);
void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i);
unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
struct ufs_hw_queue *hwq);
unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
struct ufs_hw_queue *hwq);
void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba);
void ufshcd_mcq_enable_esi(struct ufs_hba *hba);
void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg);

View File

@ -4457,6 +4457,7 @@ int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
cft->flags |= __CFTYPE_ONLY_ON_DFL;
return cgroup_add_cftypes(ss, cfts);
}
EXPORT_SYMBOL_GPL(cgroup_add_dfl_cftypes);
/**
* cgroup_add_legacy_cftypes - add an array of cftypes for legacy hierarchies

View File

@ -43,13 +43,13 @@ void *dma_common_contiguous_remap(struct page *page, size_t size,
void *vaddr;
int i;
pages = kmalloc_array(count, sizeof(struct page *), GFP_KERNEL);
pages = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL);
if (!pages)
return NULL;
for (i = 0; i < count; i++)
pages[i] = nth_page(page, i);
vaddr = vmap(pages, count, VM_DMA_COHERENT, prot);
kfree(pages);
kvfree(pages);
return vaddr;
}

View File

@ -66,7 +66,7 @@ if [ "$building_out_of_srctree" ]; then
cd $srctree
for f in $dir_list
do find "$f" -name "*.h";
done | cpio --quiet -pd $cpio_dir
done | cpio --quiet -L -pd $cpio_dir
)
fi
@ -74,7 +74,7 @@ fi
# of tree builds having stale headers in srctree. Just silence CPIO for now.
for f in $dir_list;
do find "$f" -name "*.h";
done | cpio --quiet -pdu $cpio_dir >/dev/null 2>&1
done | cpio --quiet -L -pdu $cpio_dir >/dev/null 2>&1
# Remove comments except SDPX lines
find $cpio_dir -type f -print0 |

View File

@ -1078,6 +1078,8 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, unsigned int stat
raw_spin_unlock_irq(&sem->wait_lock);
rwsem_set_reader_owned(sem);
lockevent_inc(rwsem_rlock_fast);
trace_android_vh_record_rwsem_lock_starttime(
current, jiffies);
return sem;
}
adjustment += RWSEM_FLAG_WAITERS;

View File

@ -6,6 +6,9 @@
#include <trace/hooks/sched.h>
DEFINE_MUTEX(sched_domains_mutex);
#ifdef CONFIG_LOCKDEP
EXPORT_SYMBOL_GPL(sched_domains_mutex);
#endif
/* Protected by sched_domains_mutex: */
static cpumask_var_t sched_domains_tmpmask;

View File

@ -3189,7 +3189,7 @@ static inline int mas_rebalance(struct ma_state *mas,
* tries to combine the data in the same way. If one node contains the
* entire range of the tree, then that node is used as a new root node.
*/
mas_node_count(mas, 1 + empty_count * 3);
mas_node_count(mas, empty_count * 2 - 1);
if (mas_is_err(mas))
return 0;
@ -5607,20 +5607,34 @@ static inline void mte_destroy_walk(struct maple_enode *enode,
static void mas_wr_store_setup(struct ma_wr_state *wr_mas)
{
if (mas_is_start(wr_mas->mas))
return;
if (unlikely(mas_is_paused(wr_mas->mas)))
mas_reset(wr_mas->mas);
goto reset;
if (!mas_is_start(wr_mas->mas)) {
if (mas_is_none(wr_mas->mas)) {
mas_reset(wr_mas->mas);
} else {
wr_mas->r_max = wr_mas->mas->max;
wr_mas->type = mte_node_type(wr_mas->mas->node);
if (mas_is_span_wr(wr_mas))
mas_reset(wr_mas->mas);
}
}
if (unlikely(mas_is_none(wr_mas->mas)))
goto reset;
/*
* A less strict version of mas_is_span_wr() where we allow spanning
* writes within this node. This is to stop partial walks in
* mas_prealloc() from being reset.
*/
if (wr_mas->mas->last > wr_mas->mas->max)
goto reset;
if (wr_mas->entry)
return;
if (mte_is_leaf(wr_mas->mas->node) &&
wr_mas->mas->last == wr_mas->mas->max)
goto reset;
return;
reset:
mas_reset(wr_mas->mas);
}
/* Interface */
@ -5747,25 +5761,25 @@ int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp)
mas_wr_end_piv(&wr_mas);
node_size = mas_wr_node_size(&wr_mas);
/* Slot store can avoid using any nodes */
if (node_size == wr_mas.node_end && wr_mas.offset_end - mas->offset == 1)
return 0;
if (node_size >= mt_slots[wr_mas.type]) {
/* Slot store is possible in some cases */
if ((node_size == mt_slots[wr_mas.type]) &&
(wr_mas.r_min == mas->index || wr_mas.r_max == mas->last))
goto ask_now;
/* Split, worst case for now. */
request = 1 + mas_mt_height(mas) * 2;
goto ask_now;
}
/* New root needs a singe node */
if (unlikely(mte_is_root(mas->node)))
goto ask_now;
/* Appending does not need any nodes */
if (node_size == wr_mas.node_end + 1 && mas->offset == wr_mas.node_end)
return 0;
/* Potential spanning rebalance collapsing a node, use worst-case */
if (node_size - 1 <= mt_min_slots[wr_mas.type])
request = mas_mt_height(mas) * 2 - 1;
/* node store, slot store needs one node */
/* node store needs one node */
ask_now:
mas_node_count_gfp(mas, request, gfp);
mas->mas_flags |= MA_STATE_PREALLOC;

View File

@ -205,7 +205,7 @@ void kasan_init_hw_tags_cpu(void)
* Enable async or asymm modes only when explicitly requested
* through the command line.
*/
kasan_enable_tagging();
kasan_enable_hw_tags();
}
/* kasan_init_hw_tags() is called once on boot CPU. */
@ -373,19 +373,19 @@ void __kasan_poison_vmalloc(const void *start, unsigned long size)
#endif
void kasan_enable_tagging(void)
void kasan_enable_hw_tags(void)
{
if (kasan_arg_mode == KASAN_ARG_MODE_ASYNC)
hw_enable_tagging_async();
hw_enable_tag_checks_async();
else if (kasan_arg_mode == KASAN_ARG_MODE_ASYMM)
hw_enable_tagging_asymm();
hw_enable_tag_checks_asymm();
else
hw_enable_tagging_sync();
hw_enable_tag_checks_sync();
}
#if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
EXPORT_SYMBOL_GPL(kasan_enable_tagging);
EXPORT_SYMBOL_GPL(kasan_enable_hw_tags);
void kasan_force_async_fault(void)
{

View File

@ -401,46 +401,22 @@ static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)
#ifdef CONFIG_KASAN_HW_TAGS
#ifndef arch_enable_tagging_sync
#define arch_enable_tagging_sync()
#endif
#ifndef arch_enable_tagging_async
#define arch_enable_tagging_async()
#endif
#ifndef arch_enable_tagging_asymm
#define arch_enable_tagging_asymm()
#endif
#ifndef arch_force_async_tag_fault
#define arch_force_async_tag_fault()
#endif
#ifndef arch_get_random_tag
#define arch_get_random_tag() (0xFF)
#endif
#ifndef arch_get_mem_tag
#define arch_get_mem_tag(addr) (0xFF)
#endif
#ifndef arch_set_mem_tag_range
#define arch_set_mem_tag_range(addr, size, tag, init) ((void *)(addr))
#endif
#define hw_enable_tagging_sync() arch_enable_tagging_sync()
#define hw_enable_tagging_async() arch_enable_tagging_async()
#define hw_enable_tagging_asymm() arch_enable_tagging_asymm()
#define hw_enable_tag_checks_sync() arch_enable_tag_checks_sync()
#define hw_enable_tag_checks_async() arch_enable_tag_checks_async()
#define hw_enable_tag_checks_asymm() arch_enable_tag_checks_asymm()
#define hw_suppress_tag_checks_start() arch_suppress_tag_checks_start()
#define hw_suppress_tag_checks_stop() arch_suppress_tag_checks_stop()
#define hw_force_async_tag_fault() arch_force_async_tag_fault()
#define hw_get_random_tag() arch_get_random_tag()
#define hw_get_mem_tag(addr) arch_get_mem_tag(addr)
#define hw_set_mem_tag_range(addr, size, tag, init) \
arch_set_mem_tag_range((addr), (size), (tag), (init))
void kasan_enable_tagging(void);
void kasan_enable_hw_tags(void);
#else /* CONFIG_KASAN_HW_TAGS */
#define hw_enable_tagging_sync()
#define hw_enable_tagging_async()
#define hw_enable_tagging_asymm()
static inline void kasan_enable_tagging(void) { }
static inline void kasan_enable_hw_tags(void) { }
#endif /* CONFIG_KASAN_HW_TAGS */

View File

@ -107,7 +107,7 @@ static void kasan_test_exit(struct kunit *test)
kasan_sync_fault_possible()) { \
if (READ_ONCE(test_status.report_found) && \
READ_ONCE(test_status.sync_fault)) \
kasan_enable_tagging(); \
kasan_enable_hw_tags(); \
migrate_enable(); \
} \
WRITE_ONCE(test_status.report_found, false); \

View File

@ -73,10 +73,18 @@ static int __init kasan_set_multi_shot(char *str)
__setup("kasan_multi_shot", kasan_set_multi_shot);
/*
* Used to suppress reports within kasan_disable/enable_current() critical
* sections, which are used for marking accesses to slab metadata.
* This function is used to check whether KASAN reports are suppressed for
* software KASAN modes via kasan_disable/enable_current() critical sections.
*
* This is done to avoid:
* 1. False-positive reports when accessing slab metadata,
* 2. Deadlocking when poisoned memory is accessed by the reporting code.
*
* Hardware Tag-Based KASAN instead relies on:
* For #1: Resetting tags via kasan_reset_tag().
* For #2: Suppression of tag checks via CPU, see report_suppress_start/end().
*/
static bool report_suppressed(void)
static bool report_suppressed_sw(void)
{
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
if (current->kasan_depth)
@ -85,6 +93,30 @@ static bool report_suppressed(void)
return false;
}
static void report_suppress_start(void)
{
#ifdef CONFIG_KASAN_HW_TAGS
/*
* Disable preemption for the duration of printing a KASAN report, as
* hw_suppress_tag_checks_start() disables checks on the current CPU.
*/
preempt_disable();
hw_suppress_tag_checks_start();
#else
kasan_disable_current();
#endif
}
static void report_suppress_stop(void)
{
#ifdef CONFIG_KASAN_HW_TAGS
hw_suppress_tag_checks_stop();
preempt_enable();
#else
kasan_enable_current();
#endif
}
/*
* Used to avoid reporting more than one KASAN bug unless kasan_multi_shot
* is enabled. Note that KASAN tests effectively enable kasan_multi_shot
@ -152,7 +184,7 @@ static void start_report(unsigned long *flags, bool sync)
/* Do not allow LOCKDEP mangling KASAN reports. */
lockdep_off();
/* Make sure we don't end up in loop. */
kasan_disable_current();
report_suppress_start();
spin_lock_irqsave(&report_lock, *flags);
pr_err("==================================================================\n");
}
@ -170,7 +202,7 @@ static void end_report(unsigned long *flags, void *addr)
panic("kasan.fault=panic set ...\n");
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
lockdep_on();
kasan_enable_current();
report_suppress_stop();
}
static void print_error_description(struct kasan_report_info *info)
@ -439,9 +471,13 @@ void kasan_report_invalid_free(void *ptr, unsigned long ip, enum kasan_report_ty
struct kasan_report_info info;
/*
* Do not check report_suppressed(), as an invalid-free cannot be
* caused by accessing slab metadata and thus should not be
* suppressed by kasan_disable/enable_current() critical sections.
* Do not check report_suppressed_sw(), as an invalid-free cannot be
* caused by accessing poisoned memory and thus should not be suppressed
* by kasan_disable/enable_current() critical sections.
*
* Note that for Hardware Tag-Based KASAN, kasan_report_invalid_free()
* is triggered by explicit tag checks and not by the ones performed by
* the CPU. Thus, reporting invalid-free is not suppressed as well.
*/
if (unlikely(!report_enabled()))
return;
@ -476,7 +512,7 @@ bool kasan_report(unsigned long addr, size_t size, bool is_write,
unsigned long irq_flags;
struct kasan_report_info info;
if (unlikely(report_suppressed()) || unlikely(!report_enabled())) {
if (unlikely(report_suppressed_sw()) || unlikely(!report_enabled())) {
ret = false;
goto out;
}
@ -508,8 +544,9 @@ void kasan_report_async(void)
unsigned long flags;
/*
* Do not check report_suppressed(), as kasan_disable/enable_current()
* critical sections do not affect Hardware Tag-Based KASAN.
* Do not check report_suppressed_sw(), as
* kasan_disable/enable_current() critical sections do not affect
* Hardware Tag-Based KASAN.
*/
if (unlikely(!report_enabled()))
return;

View File

@ -1141,6 +1141,13 @@ static inline void __free_one_page(struct page *page,
unsigned long combined_pfn;
struct page *buddy;
bool to_tail;
bool bypass = false;
trace_android_vh_free_one_page_bypass(page, zone, order,
migratetype, (int)fpi_flags, &bypass);
if (bypass)
return;
VM_BUG_ON(!zone_is_initialized(zone));
VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
@ -3129,7 +3136,11 @@ static __always_inline struct page *
__rmqueue(struct zone *zone, unsigned int order, int migratetype,
unsigned int alloc_flags)
{
struct page *page;
struct page *page = NULL;
trace_android_vh_rmqueue_smallest_bypass(&page, zone, order, migratetype);
if (page)
return page;
retry:
page = __rmqueue_smallest(zone, order, migratetype);

View File

@ -34,6 +34,7 @@ COMMON_GKI_MODULES_LIST = [
"drivers/net/usb/r8153_ecm.ko",
"drivers/net/usb/rtl8150.ko",
"drivers/net/usb/usbnet.ko",
"drivers/net/wwan/wwan.ko",
"drivers/usb/class/cdc-acm.ko",
"drivers/usb/serial/ftdi_sio.ko",
"drivers/usb/serial/usbserial.ko",

View File

@ -675,7 +675,7 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
ret = snd_vendor_set_pcm_intf(iface, subs->data_endpoint->iface,
subs->data_endpoint->altsetting,
subs->direction, subs);
if (!ret)
if (ret)
goto unlock;
/* reset the pointer */

View File

@ -12,6 +12,7 @@
#include <string.h>
#include <unistd.h>
#include <sys/file.h>
#include <sys/inotify.h>
#include <sys/mman.h>
#include <sys/mount.h>
@ -1336,6 +1337,50 @@ static int mmap_test(const char *mount_dir)
return result;
}
static int flock_test(const char *mount_dir)
{
const char *file = "file";
int result = TEST_FAILURE;
int src_fd = -1;
int fuse_dev = -1;
int fd = -1, fd2 = -1;
int backing_fd = -1;
char *addr = NULL;
TEST(src_fd = open(ft_src, O_DIRECTORY | O_RDONLY | O_CLOEXEC),
src_fd != -1);
TESTEQUAL(mount_fuse(mount_dir, -1, src_fd, &fuse_dev), 0);
TEST(fd = s_open(s_path(s(mount_dir), s(file)),
O_CREAT | O_RDWR | O_CLOEXEC, 0777),
fd != -1);
TEST(fd2 = s_open(s_path(s(mount_dir), s(file)),
O_RDWR | O_CLOEXEC, 0777),
fd2 != -1);
TESTSYSCALL(flock(fd, LOCK_EX | LOCK_NB));
TESTCONDERR((flock(fd2, LOCK_EX | LOCK_NB)) == -1);
TESTCOND(errno == EAGAIN);
TESTSYSCALL(flock(fd, LOCK_UN));
TESTSYSCALL(flock(fd2, LOCK_EX | LOCK_NB));
TEST(backing_fd = s_open(s_path(s(ft_src), s(file)),
O_RDONLY | O_CLOEXEC),
backing_fd != -1);
TESTCONDERR((flock(backing_fd, LOCK_EX | LOCK_NB)) == -1);
TESTCOND(errno == EAGAIN);
close(fd2);
fd2 = 0;
TESTSYSCALL(flock(backing_fd, LOCK_EX | LOCK_NB));
result = TEST_SUCCESS;
out:
close(fd);
close(fd2);
close(backing_fd);
umount(mount_dir);
close(fuse_dev);
close(src_fd);
return result;
}
static int readdir_perms_test(const char *mount_dir)
{
int result = TEST_FAILURE;
@ -2091,6 +2136,7 @@ int main(int argc, char *argv[])
MAKE_TEST(bpf_test_no_readdirplus_without_nodeid),
MAKE_TEST(bpf_test_revalidate_handle_backing_fd),
MAKE_TEST(bpf_test_lookup_postfilter),
MAKE_TEST(flock_test),
};
#undef MAKE_TEST