Merge keystone/android14-6.1-keystone-qcom-release.6.1.25 (506939c
) into
qcom-6.1 * refs/heads/tmp-506939c: ANDROID: Update STG for ANDROID_KABI_USE(1, unsigned int saved_state) FROMGIT: freezer,sched: Use saved_state to reduce some spurious wakeups BACKPORT: FROMGIT: sched/core: Remove ifdeffery for saved_state UPSTREAM: sched: Consider task_struct::saved_state in wait_task_inactive() UPSTREAM: sched: Unconditionally use full-fat wait_task_inactive() UPSTREAM: net: prevent skb corruption on frag list segmentation ANDROID: fuse-bpf: Get correct inode in mkdir Revert "BACKPORT: FROMGIT: usb: gadget: udc: Handle gadget_connect failure during bind operation" UPSTREAM: tcpm: Avoid soft reset when partner does not support get_status ANDROID: vendor_hooks: mm: Add tune_swappiness vendor hook in get_swappiness() ANDROID: ABI: Update symbols to unisoc whitelist ANDROID: ABI: Add to QCOM symbols list ANDROID: ABI: update symbol list for galaxy BACKPORT: printk: ringbuffer: Fix truncating buffer size min_t cast ANDROID: GKI: Add symbols to symbol list for oplus ANDROID: signal: Add vendor hook for memory reap ANDROID: abi_gki_aarch64_qcom: white list symbols for mglru overshoot ANDROID: vendor_hook: Add vendor hook to decide scan abort policy UPSTREAM: af_unix: Fix null-ptr-deref in unix_stream_sendpage(). FROMLIST: ufs: core: fix abnormal scale up after last cmd finish FROMLIST: ufs: core: fix abnormal scale up after scale down FROMLIST: ufs: core: only suspend clock scaling if scale down ANDROID: GKI: update ABI definition UPSTREAM: zsmalloc: allow only one active pool compaction context ANDROID: GKI: Update Tuxera symbol list ANDROID: ABI: Update symbols to qcom whitelist UPSTREAM: usb: typec: tcpm: set initial svdm version based on pd revision ANDROID: KVM: arm64: Don't update IOMMUs for share/unshare ANDROID: cpuidle: teo: Export a function that allows modifying util_threshold ANDROID: sched: Add vendor hook for rt util update ANDROID: sched: Add vendor hook for util-update related functions ANDROID: sched: Add vendor hooks for override sugov behavior ANDROID: Add new hook to enable overriding uclamp_validate() ANDROID: sched/uclamp: Don't enable uclamp_is_used static key by in-kernel requests ANDROID: topology: Add vendor hook for use_amu_fie ANDROID: sched: Export symbols needed for vendor hooks ANDROID: Update symbol list for Exynos Auto SoCs UPSTREAM: netfilter: nf_tables: deactivate catchall elements in next generation ANDROID: GKI: Update symbols to symbol list ANDROID: GKI: Export four symbols in file net/core/net-trace.c UPSTREAM: blk-ioc: fix recursive spin_lock/unlock_irq() in ioc_clear_queue() ANDROID: fuse-bpf: Align data structs for 32-bit kernels ANDROID: GKI: Update symbol list for xiaomi ANDROID: vendor_hooks: export cgroup_threadgroup_rwsem ANDROID: GKI: add symbol list file for meizu ANDROID: fuse-bpf: Get correct inode in mkdir ANDROID: ABI: Update allowed list for QCOM UPSTREAM: blk-ioc: protect ioc_destroy_icq() by 'queue_lock' ANDROID: GKI: Update symbols to symbol list ANDROID: uid_sys_stats: Use llist for deferred work UPSTREAM: net: nfc: Fix use-after-free caused by nfc_llcp_find_local UPSTREAM: netfilter: nf_tables: disallow rule addition to bound chain via NFTA_RULE_CHAIN_ID UPSTREAM: net: tap_open(): set sk_uid from current_fsuid() UPSTREAM: usb: typec: ucsi: Fix command cancellation UPSTREAM: locks: fix KASAN: use-after-free in trace_event_raw_event_filelock_lock ANDROID: kleaf: Remove ptp_kvm.ko from i386 modules ANDROID: GKI: Add symbols to symbol list for oplus ANDROID: vendor_hooks: Add tune swappiness hook in get_scan_count() ANDROID: GKI: Update symbol list for VIVO ANDROID: kleaf: get_gki_modules_list add i386 option ANDROID: arm as an option for get_gki_modules_list UPSTREAM: um: Only disable SSE on clang to work around old GCC bugs ANDROID: GKI: Update abi_gki_aarch64_qcom for page_owner symbols ANDROID: mm: Export page_owner_inited and __set_page_owner ANDROID: Use alias for old rules. ANDROID: virt: geniezone: Enable as GKI module for arm64 ANDROID: Add arch specific gki module list targets FROMLIST: virt: geniezone: Add dtb config support FROMLIST: virt: geniezone: Add memory region support FROMLIST: virt: geniezone: Add ioeventfd support FROMLIST: virt: geniezone: Add irqfd support FROMLIST: virt: geniezone: Add irqchip support for virtual interrupt injection FROMLIST: virt: geniezone: Add vcpu support FROMLIST: virt: geniezone: Add GenieZone hypervisor support FROMLIST: dt-bindings: hypervisor: Add MediaTek GenieZone hypervisor FROMLIST: docs: geniezone: Introduce GenieZone hypervisor UPSTREAM: net/sched: cls_route: No longer copy tcf_result on update to avoid use-after-free UPSTREAM: net: tun_chr_open(): set sk_uid from current_fsuid() UPSTREAM: exfat: check if filename entries exceeds max filename length UPSTREAM: net/sched: cls_fw: No longer copy tcf_result on update to avoid use-after-free ANDROID: abi_gki_aarch64_qcom: update abi symbols ANDROID: cgroup: Add android_rvh_cgroup_force_kthread_migration FROMGIT: pstore/ram: Check start of empty przs during init UPSTREAM: erofs: avoid infinite loop in z_erofs_do_read_page() when reading beyond EOF UPSTREAM: erofs: avoid useless loops in z_erofs_pcluster_readmore() when reading beyond EOF UPSTREAM: erofs: Fix detection of atomic context UPSTREAM: erofs: fix compact 4B support for 16k block size UPSTREAM: erofs: kill hooked chains to avoid loops on deduplicated compressed images UPSTREAM: erofs: fix potential overflow calculating xattr_isize UPSTREAM: erofs: stop parsing non-compact HEAD index if clusterofs is invalid UPSTREAM: erofs: initialize packed inode after root inode is assigned ANDROID: GKI: Update ABI for zsmalloc fixes BACKPORT: zsmalloc: fix races between modifications of fullness and isolated ANDROID: ABI: Update symbols to unisoc whitelist for A14-6.1 UPSTREAM: zsmalloc: consolidate zs_pool's migrate_lock and size_class's locks UPSTREAM: netfilter: nfnetlink_log: always add a timestamp ANDROID: virt: gunyah: Do not allocate irq for GH_RM_RESOURCE_NO_VIRQ ANDROID: GKI: Add Tuxera symbol list ANDROID: ABI: Update oplus symbol list ANDROID: vendor_hooks: Add hooks for waking up and exiting control ANDROID: GKI: Update symbol list for xiaomi ANDROID: vendor_hooks:vendor hook for percpu-rwsem ANDROID: fips140: fix the error injection module parameters BACKPORT: blk-crypto: dynamically allocate fallback profile UPSTREAM: net/sched: cls_u32: No longer copy tcf_result on update to avoid use-after-free UPSTREAM: Bluetooth: L2CAP: Fix use-after-free in l2cap_sock_ready_cb UPSTREAM: media: usb: siano: Fix warning due to null work_func_t function pointer ANDROID: Delete build.config.gki.aarch64.16k. FROMGIT: usb: typec: tcpm: Refactor the PPS APDO selection UPSTREAM: usb: typec: tcpm: Fix response to vsafe0V event ANDROID: Revert "ANDROID: allmodconfig: disable WERROR" ANDROID: GKI: update symbol list file for xiaomi FROMGIT: usb: typec: tcpm: not sink vbus if operational current is 0mA Conflicts: Documentation/devicetree/bindings drivers/virt/Kconfig Change-Id: I73f03160eff968ab65d417e3e2e6a8df593ffd81 Upstream-Build: ks_qcom-android14-6.1-keystone-qcom-release@10952003 UKQ2.231015.001 Signed-off-by: jianzhou <quic_jianzhou@quicinc.com>
This commit is contained in:
commit
aafc325fd1
65
BUILD.bazel
65
BUILD.bazel
@ -14,7 +14,7 @@ load(
|
||||
"kernel_modules_install",
|
||||
"merged_kernel_uapi_headers",
|
||||
)
|
||||
load(":modules.bzl", "COMMON_GKI_MODULES_LIST")
|
||||
load(":modules.bzl", "get_gki_modules_list")
|
||||
|
||||
package(
|
||||
default_visibility = [
|
||||
@ -46,10 +46,49 @@ checkpatch(
|
||||
checkpatch_pl = "scripts/checkpatch.pl",
|
||||
)
|
||||
|
||||
write_file(
|
||||
# Deprecated - Use arch specific files from below.
|
||||
alias(
|
||||
name = "gki_system_dlkm_modules",
|
||||
out = "android/gki_system_dlkm_modules",
|
||||
content = COMMON_GKI_MODULES_LIST + [
|
||||
actual = "gki_system_dlkm_modules_arm64",
|
||||
deprecation = """
|
||||
Common list for all architectures is deprecated.
|
||||
Instead use the file corresponding to the architecture used:
|
||||
i.e. `gki_system_dlkm_modules_{arch}`
|
||||
""",
|
||||
)
|
||||
|
||||
alias(
|
||||
name = "android/gki_system_dlkm_modules",
|
||||
actual = "android/gki_system_dlkm_modules_arm64",
|
||||
deprecation = """
|
||||
Common list for all architectures is deprecated.
|
||||
Instead use the file corresponding to the architecture used:
|
||||
i.e. `gki_system_dlkm_modules_{arch}`
|
||||
""",
|
||||
)
|
||||
|
||||
write_file(
|
||||
name = "gki_system_dlkm_modules_arm64",
|
||||
out = "android/gki_system_dlkm_modules_arm64",
|
||||
content = get_gki_modules_list("arm64") + [
|
||||
# Ensure new line at the end.
|
||||
"",
|
||||
],
|
||||
)
|
||||
|
||||
write_file(
|
||||
name = "gki_system_dlkm_modules_x86_64",
|
||||
out = "android/gki_system_dlkm_modules_x86_64",
|
||||
content = get_gki_modules_list("x86_64") + [
|
||||
# Ensure new line at the end.
|
||||
"",
|
||||
],
|
||||
)
|
||||
|
||||
write_file(
|
||||
name = "gki_system_dlkm_modules_risc64",
|
||||
out = "android/gki_system_dlkm_modules_riscv64",
|
||||
content = get_gki_modules_list("riscv64") + [
|
||||
# Ensure new line at the end.
|
||||
"",
|
||||
],
|
||||
@ -65,10 +104,12 @@ filegroup(
|
||||
"android/abi_gki_aarch64_galaxy",
|
||||
"android/abi_gki_aarch64_honor",
|
||||
"android/abi_gki_aarch64_imx",
|
||||
"android/abi_gki_aarch64_meizu",
|
||||
"android/abi_gki_aarch64_mtk",
|
||||
"android/abi_gki_aarch64_oplus",
|
||||
"android/abi_gki_aarch64_pixel",
|
||||
"android/abi_gki_aarch64_qcom",
|
||||
"android/abi_gki_aarch64_tuxera",
|
||||
"android/abi_gki_aarch64_unisoc",
|
||||
"android/abi_gki_aarch64_virtual_device",
|
||||
"android/abi_gki_aarch64_vivo",
|
||||
@ -80,7 +121,7 @@ filegroup(
|
||||
define_common_kernels(target_configs = {
|
||||
"kernel_aarch64": {
|
||||
"kmi_symbol_list_strict_mode": True,
|
||||
"module_implicit_outs": COMMON_GKI_MODULES_LIST,
|
||||
"module_implicit_outs": get_gki_modules_list("arm64"),
|
||||
"kmi_symbol_list": "android/abi_gki_aarch64",
|
||||
"kmi_symbol_list_add_only": True,
|
||||
"additional_kmi_symbol_lists": [":aarch64_additional_kmi_symbol_lists"],
|
||||
@ -90,12 +131,12 @@ define_common_kernels(target_configs = {
|
||||
},
|
||||
"kernel_aarch64_16k": {
|
||||
"kmi_symbol_list_strict_mode": False,
|
||||
"module_implicit_outs": COMMON_GKI_MODULES_LIST,
|
||||
"module_implicit_outs": get_gki_modules_list("arm64"),
|
||||
"make_goals": _GKI_AARCH64_MAKE_GOALS,
|
||||
},
|
||||
"kernel_aarch64_debug": {
|
||||
"kmi_symbol_list_strict_mode": False,
|
||||
"module_implicit_outs": COMMON_GKI_MODULES_LIST,
|
||||
"module_implicit_outs": get_gki_modules_list("arm64"),
|
||||
"kmi_symbol_list": "android/abi_gki_aarch64",
|
||||
"kmi_symbol_list_add_only": True,
|
||||
"additional_kmi_symbol_lists": [":aarch64_additional_kmi_symbol_lists"],
|
||||
@ -105,19 +146,19 @@ define_common_kernels(target_configs = {
|
||||
},
|
||||
"kernel_riscv64": {
|
||||
"kmi_symbol_list_strict_mode": False,
|
||||
"module_implicit_outs": COMMON_GKI_MODULES_LIST,
|
||||
"module_implicit_outs": get_gki_modules_list("riscv64"),
|
||||
"make_goals": _GKI_RISCV64_MAKE_GOALS,
|
||||
},
|
||||
"kernel_x86_64": {
|
||||
"kmi_symbol_list_strict_mode": False,
|
||||
"module_implicit_outs": COMMON_GKI_MODULES_LIST,
|
||||
"module_implicit_outs": get_gki_modules_list("x86_64"),
|
||||
"protected_exports_list": "android/abi_gki_protected_exports_x86_64",
|
||||
"protected_modules_list": "android/gki_x86_64_protected_modules",
|
||||
"make_goals": _GKI_X86_64_MAKE_GOALS,
|
||||
},
|
||||
"kernel_x86_64_debug": {
|
||||
"kmi_symbol_list_strict_mode": False,
|
||||
"module_implicit_outs": COMMON_GKI_MODULES_LIST,
|
||||
"module_implicit_outs": get_gki_modules_list("x86_64"),
|
||||
"protected_exports_list": "android/abi_gki_protected_exports_x86_64",
|
||||
"protected_modules_list": "android/gki_x86_64_protected_modules",
|
||||
"make_goals": _GKI_X86_64_MAKE_GOALS,
|
||||
@ -487,7 +528,7 @@ kernel_build(
|
||||
"modules",
|
||||
"rk3399-rock-pi-4b.dtb",
|
||||
],
|
||||
module_outs = COMMON_GKI_MODULES_LIST + _ROCKPI4_MODULE_OUTS + _ROCKPI4_WATCHDOG_MODULE_OUTS,
|
||||
module_outs = get_gki_modules_list("arm64") + _ROCKPI4_MODULE_OUTS + _ROCKPI4_WATCHDOG_MODULE_OUTS,
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
@ -510,7 +551,7 @@ kernel_build(
|
||||
"modules",
|
||||
"rk3399-rock-pi-4b.dtb",
|
||||
],
|
||||
module_outs = COMMON_GKI_MODULES_LIST + _ROCKPI4_MODULE_OUTS,
|
||||
module_outs = get_gki_modules_list("arm64") + _ROCKPI4_MODULE_OUTS,
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
|
86
Documentation/virt/geniezone/introduction.rst
Normal file
86
Documentation/virt/geniezone/introduction.rst
Normal file
@ -0,0 +1,86 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
======================
|
||||
GenieZone Introduction
|
||||
======================
|
||||
|
||||
Overview
|
||||
========
|
||||
GenieZone hypervisor(gzvm) is a type-1 hypervisor that supports various virtual
|
||||
machine types and provides security features such as TEE-like scenarios and
|
||||
secure boot. It can create guest VMs for security use cases and has
|
||||
virtualization capabilities for both platform and interrupt. Although the
|
||||
hypervisor can be booted independently, it requires the assistance of GenieZone
|
||||
hypervisor kernel driver(gzvm-ko) to leverage the ability of Linux kernel for
|
||||
vCPU scheduling, memory management, inter-VM communication and virtio backend
|
||||
support.
|
||||
|
||||
Supported Architecture
|
||||
======================
|
||||
GenieZone now only supports MediaTek ARM64 SoC.
|
||||
|
||||
Features
|
||||
========
|
||||
|
||||
- vCPU Management
|
||||
|
||||
VM manager aims to provide vCPUs on the basis of time sharing on physical CPUs.
|
||||
It requires Linux kernel in host VM for vCPU scheduling and VM power management.
|
||||
|
||||
- Memory Management
|
||||
|
||||
Direct use of physical memory from VMs is forbidden and designed to be dictated
|
||||
to the privilege models managed by GenieZone hypervisor for security reason.
|
||||
With the help of gzvm-ko, the hypervisor would be able to manipulate memory as
|
||||
objects.
|
||||
|
||||
- Virtual Platform
|
||||
|
||||
We manage to emulate a virtual mobile platform for guest OS running on guest
|
||||
VM. The platform supports various architecture-defined devices, such as
|
||||
virtual arch timer, GIC, MMIO, PSCI, and exception watching...etc.
|
||||
|
||||
- Inter-VM Communication
|
||||
|
||||
Communication among guest VMs was provided mainly on RPC. More communication
|
||||
mechanisms were to be provided in the future based on VirtIO-vsock.
|
||||
|
||||
- Device Virtualization
|
||||
|
||||
The solution is provided using the well-known VirtIO. The gzvm-ko would
|
||||
redirect MMIO traps back to VMM where the virtual devices are mostly emulated.
|
||||
Ioeventfd is implemented using eventfd for signaling host VM that some IO
|
||||
events in guest VMs need to be processed.
|
||||
|
||||
- Interrupt virtualization
|
||||
|
||||
All Interrupts during some guest VMs running would be handled by GenieZone
|
||||
hypervisor with the help of gzvm-ko, both virtual and physical ones. In case
|
||||
there's no guest VM running out there, physical interrupts would be handled by
|
||||
host VM directly for performance reason. Irqfd is also implemented using
|
||||
eventfd for accepting vIRQ requests in gzvm-ko.
|
||||
|
||||
Platform architecture component
|
||||
===============================
|
||||
|
||||
- vm
|
||||
|
||||
The vm component is responsible for setting up the capability and memory
|
||||
management for the protected VMs. The capability is mainly about the lifecycle
|
||||
control and boot context initialization. And the memory management is highly
|
||||
integrated with ARM 2-stage translation tables to convert VA to IPA to PA under
|
||||
proper security measures required by protected VMs.
|
||||
|
||||
- vcpu
|
||||
|
||||
The vcpu component is the core of virtualizing aarch64 physical CPU runnable,
|
||||
and it controls the vCPU lifecycle including creating, running and destroying.
|
||||
With self-defined exit handler, the vm component would be able to act
|
||||
accordingly before terminated.
|
||||
|
||||
- vgic
|
||||
|
||||
The vgic component exposes control interfaces to Linux kernel via irqchip, and
|
||||
we intend to support all SPI, PPI, and SGI. When it comes to virtual
|
||||
interrupts, the GenieZone hypervisor would write to list registers and trigger
|
||||
vIRQ injection in guest VMs via GIC.
|
@ -16,6 +16,7 @@ Linux Virtualization Support
|
||||
coco/sev-guest
|
||||
hyperv/index
|
||||
gunyah/index
|
||||
geniezone/introduction
|
||||
|
||||
.. only:: html and subproject
|
||||
|
||||
|
13
MAINTAINERS
13
MAINTAINERS
@ -8665,6 +8665,19 @@ F: include/vdso/
|
||||
F: kernel/time/vsyscall.c
|
||||
F: lib/vdso/
|
||||
|
||||
GENIEZONE HYPERVISOR DRIVER
|
||||
M: Yingshiuan Pan <yingshiuan.pan@mediatek.com>
|
||||
M: Ze-Yu Wang <ze-yu.wang@mediatek.com>
|
||||
M: Yi-De Wu <yi-de.wu@mediatek.com>
|
||||
F: Documentation/devicetree/bindings/hypervisor/mediatek,geniezone-hyp.yaml
|
||||
F: Documentation/virt/geniezone/
|
||||
F: arch/arm64/geniezone/
|
||||
F: arch/arm64/include/uapi/asm/gzvm_arch.h
|
||||
F: drivers/virt/geniezone/
|
||||
F: include/linux/gzvm_drv.h
|
||||
F include/uapi/asm-generic/gzvm_arch.h
|
||||
F: include/uapi/linux/gzvm.h
|
||||
|
||||
GENWQE (IBM Generic Workqueue Card)
|
||||
M: Frank Haverkamp <haver@linux.ibm.com>
|
||||
S: Supported
|
||||
|
@ -1,2 +1,2 @@
|
||||
67f6300e6146dcf804628c0479bfd135970dcd2b
|
||||
android14-6.1-2023-08_r8
|
||||
7c8cdb3894d30a697255b8940719f9fde86e2741
|
||||
android14-6.1-2023-09_r3
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,94 +1,386 @@
|
||||
[abi_symbol_list]
|
||||
# commonly used symbols
|
||||
alloc_workqueue
|
||||
alt_cb_patch_nops
|
||||
__arch_copy_from_user
|
||||
__arch_copy_to_user
|
||||
arm64_use_ng_mappings
|
||||
blocking_notifier_call_chain
|
||||
blocking_notifier_chain_register
|
||||
cancel_delayed_work_sync
|
||||
__check_object_size
|
||||
__class_create
|
||||
class_destroy
|
||||
clk_disable
|
||||
clk_enable
|
||||
__clk_get_hw
|
||||
clk_get_rate
|
||||
clk_hw_get_parent
|
||||
clk_prepare
|
||||
clk_register
|
||||
clk_register_clkdev
|
||||
clk_register_fixed_factor
|
||||
clk_register_fixed_rate
|
||||
clk_set_rate
|
||||
clk_unprepare
|
||||
complete
|
||||
__const_udelay
|
||||
cpu_hwcaps
|
||||
cpu_number
|
||||
debugfs_create_dir
|
||||
debugfs_create_file
|
||||
debugfs_create_u32
|
||||
debugfs_remove
|
||||
default_llseek
|
||||
delayed_work_timer_fn
|
||||
dev_driver_string
|
||||
_dev_err
|
||||
device_create
|
||||
device_create_file
|
||||
device_destroy
|
||||
device_for_each_child
|
||||
device_get_match_data
|
||||
device_remove_file
|
||||
device_set_wakeup_capable
|
||||
device_wakeup_disable
|
||||
device_wakeup_enable
|
||||
_dev_info
|
||||
devm_clk_get
|
||||
devm_free_irq
|
||||
devm_i2c_new_dummy_device
|
||||
devm_ioremap
|
||||
devm_ioremap_resource
|
||||
devm_kfree
|
||||
devm_kmalloc
|
||||
devm_mfd_add_devices
|
||||
devm_pinctrl_get
|
||||
devm_regulator_register
|
||||
devm_request_threaded_irq
|
||||
_dev_warn
|
||||
disable_irq
|
||||
disable_irq_nosync
|
||||
dma_alloc_attrs
|
||||
dma_free_attrs
|
||||
dma_map_page_attrs
|
||||
dma_release_channel
|
||||
dma_request_chan
|
||||
dma_set_coherent_mask
|
||||
dma_set_mask
|
||||
dma_unmap_page_attrs
|
||||
enable_irq
|
||||
fortify_panic
|
||||
free_irq
|
||||
get_device
|
||||
gic_nonsecure_priorities
|
||||
gpiod_direction_input
|
||||
gpiod_direction_output_raw
|
||||
gpiod_get_raw_value
|
||||
gpiod_set_raw_value
|
||||
gpio_to_desc
|
||||
handle_level_irq
|
||||
i2c_add_numbered_adapter
|
||||
i2c_del_driver
|
||||
i2c_register_driver
|
||||
i2c_unregister_device
|
||||
__init_swait_queue_head
|
||||
init_timer_key
|
||||
ioremap_prot
|
||||
iounmap
|
||||
irq_get_irq_data
|
||||
irq_modify_status
|
||||
irq_of_parse_and_map
|
||||
irq_set_chip_and_handler_name
|
||||
irq_set_chip_data
|
||||
irq_set_irq_wake
|
||||
is_vmalloc_addr
|
||||
jiffies
|
||||
kfree
|
||||
__kmalloc
|
||||
kmalloc_caches
|
||||
kmalloc_trace
|
||||
kstrtoint
|
||||
ktime_get
|
||||
ktime_get_mono_fast_ns
|
||||
__list_add_valid
|
||||
__list_del_entry_valid
|
||||
log_post_read_mmio
|
||||
log_post_write_mmio
|
||||
log_read_mmio
|
||||
log_write_mmio
|
||||
loops_per_jiffy
|
||||
memcpy
|
||||
memdup_user
|
||||
memset
|
||||
module_layout
|
||||
__msecs_to_jiffies
|
||||
msleep
|
||||
__mutex_init
|
||||
mutex_lock
|
||||
mutex_unlock
|
||||
of_address_to_resource
|
||||
of_alias_get_id
|
||||
of_clk_add_provider
|
||||
of_clk_src_onecell_get
|
||||
of_device_is_available
|
||||
of_find_compatible_node
|
||||
of_find_device_by_node
|
||||
of_find_matching_node_and_match
|
||||
of_find_node_by_name
|
||||
of_find_property
|
||||
of_get_child_by_name
|
||||
of_get_named_gpio_flags
|
||||
of_get_next_child
|
||||
of_get_property
|
||||
of_get_regulator_init_data
|
||||
of_iomap
|
||||
of_match_node
|
||||
__of_parse_phandle_with_args
|
||||
of_property_read_string
|
||||
of_property_read_string_helper
|
||||
of_property_read_variable_u32_array
|
||||
of_root
|
||||
panic
|
||||
param_ops_int
|
||||
param_ops_uint
|
||||
pinctrl_lookup_state
|
||||
pinctrl_select_state
|
||||
__platform_driver_register
|
||||
platform_driver_unregister
|
||||
platform_get_irq
|
||||
platform_get_irq_optional
|
||||
platform_get_resource
|
||||
platform_get_resource_byname
|
||||
__pm_runtime_disable
|
||||
pm_runtime_enable
|
||||
__pm_runtime_idle
|
||||
__pm_runtime_resume
|
||||
pm_runtime_set_autosuspend_delay
|
||||
__pm_runtime_suspend
|
||||
__pm_runtime_use_autosuspend
|
||||
_printk
|
||||
put_device
|
||||
__put_task_struct
|
||||
queue_delayed_work_on
|
||||
queue_work_on
|
||||
_raw_spin_lock
|
||||
_raw_spin_lock_irqsave
|
||||
_raw_spin_unlock
|
||||
_raw_spin_unlock_irqrestore
|
||||
rdev_get_drvdata
|
||||
rdev_get_id
|
||||
register_pm_notifier
|
||||
register_syscore_ops
|
||||
regmap_read
|
||||
regmap_update_bits_base
|
||||
regmap_write
|
||||
regulator_disable
|
||||
regulator_enable
|
||||
regulator_list_voltage_linear
|
||||
regulator_map_voltage_linear
|
||||
sched_clock
|
||||
seq_lseek
|
||||
seq_printf
|
||||
seq_puts
|
||||
seq_read
|
||||
sg_init_table
|
||||
simple_open
|
||||
simple_read_from_buffer
|
||||
simple_write_to_buffer
|
||||
single_open
|
||||
single_release
|
||||
snprintf
|
||||
sprintf
|
||||
sscanf
|
||||
__stack_chk_fail
|
||||
strcasecmp
|
||||
strcmp
|
||||
strlcat
|
||||
strlen
|
||||
strncmp
|
||||
strnlen
|
||||
strscpy
|
||||
strstr
|
||||
syscon_regmap_lookup_by_phandle
|
||||
sysfs_create_group
|
||||
sysfs_emit
|
||||
system_wq
|
||||
__udelay
|
||||
usleep_range_state
|
||||
wait_for_completion_timeout
|
||||
__warn_printk
|
||||
|
||||
# required by cmupmucal.ko
|
||||
debugfs_create_x32
|
||||
kernel_kobj
|
||||
kobject_create_and_add
|
||||
kstrtouint
|
||||
of_find_node_by_type
|
||||
|
||||
# required by dmatest.ko
|
||||
__dma_request_channel
|
||||
finish_wait
|
||||
get_random_bytes
|
||||
init_wait_entry
|
||||
__init_waitqueue_head
|
||||
kmem_cache_create
|
||||
kmem_cache_destroy
|
||||
kthread_create_on_node
|
||||
kthread_should_stop
|
||||
kthread_stop
|
||||
mempool_alloc
|
||||
mempool_alloc_slab
|
||||
mempool_create
|
||||
mempool_destroy
|
||||
mempool_free
|
||||
mempool_free_slab
|
||||
param_get_bool
|
||||
param_get_string
|
||||
param_ops_bool
|
||||
param_ops_string
|
||||
param_set_bool
|
||||
param_set_copystring
|
||||
prepare_to_wait_event
|
||||
refcount_warn_saturate
|
||||
schedule
|
||||
schedule_timeout
|
||||
set_freezable
|
||||
set_user_nice
|
||||
strim
|
||||
__wake_up
|
||||
wake_up_process
|
||||
|
||||
# required by dwc3-exynosauto-usb.ko
|
||||
cancel_work_sync
|
||||
device_create_managed_software_node
|
||||
device_property_present
|
||||
devm_phy_get
|
||||
devm_regulator_get
|
||||
of_match_device
|
||||
of_platform_populate
|
||||
phy_exit
|
||||
phy_init
|
||||
phy_power_off
|
||||
phy_power_on
|
||||
platform_device_add
|
||||
platform_device_add_resources
|
||||
platform_device_alloc
|
||||
platform_device_del
|
||||
platform_device_put
|
||||
platform_device_unregister
|
||||
platform_get_irq_byname_optional
|
||||
__pm_relax
|
||||
pm_runtime_allow
|
||||
pm_runtime_forbid
|
||||
__pm_runtime_set_status
|
||||
__pm_stay_awake
|
||||
sysfs_remove_group
|
||||
typec_register_partner
|
||||
typec_register_port
|
||||
typec_set_data_role
|
||||
typec_set_pwr_opmode
|
||||
typec_set_pwr_role
|
||||
typec_unregister_partner
|
||||
typec_unregister_port
|
||||
unregister_pm_notifier
|
||||
usb_gadget_set_state
|
||||
usb_otg_state_string
|
||||
wakeup_source_register
|
||||
wakeup_source_unregister
|
||||
|
||||
# required by ect_parser.ko
|
||||
memstart_addr
|
||||
of_reserved_mem_lookup
|
||||
strcpy
|
||||
vmap
|
||||
|
||||
# required by exynos-acme.ko
|
||||
__bitmap_and
|
||||
bitmap_parselist
|
||||
__bitmap_weight
|
||||
__cpu_active_mask
|
||||
cpufreq_cpu_get
|
||||
cpufreq_cpu_get_raw
|
||||
cpufreq_cpu_put
|
||||
__cpufreq_driver_target
|
||||
cpufreq_freq_transition_begin
|
||||
cpufreq_freq_transition_end
|
||||
cpufreq_frequency_table_verify
|
||||
cpufreq_generic_attr
|
||||
cpufreq_register_driver
|
||||
cpufreq_table_index_unsorted
|
||||
cpuhp_tasks_frozen
|
||||
__cpu_online_mask
|
||||
dev_pm_opp_add
|
||||
_find_first_bit
|
||||
freq_qos_add_request
|
||||
freq_qos_update_request
|
||||
get_cpu_device
|
||||
nr_cpu_ids
|
||||
scnprintf
|
||||
sysfs_create_file_ns
|
||||
system_state
|
||||
|
||||
# required by exynos-bts.ko
|
||||
devm_kstrdup
|
||||
_dev_notice
|
||||
|
||||
# required by exynos-chipid_v2.ko
|
||||
kasprintf
|
||||
of_device_is_available
|
||||
of_find_matching_node_and_match
|
||||
of_find_node_opts_by_path
|
||||
of_iomap
|
||||
of_property_read_string
|
||||
soc_device_register
|
||||
subsys_system_register
|
||||
|
||||
# required by exynos-pd.ko
|
||||
atomic_notifier_call_chain
|
||||
kstrdup
|
||||
of_genpd_add_provider_simple
|
||||
pm_genpd_add_subdomain
|
||||
pm_genpd_init
|
||||
|
||||
# required by exynos-pd_el3.ko
|
||||
__arm_smccc_smc
|
||||
|
||||
# required by exynos-pm-time.ko
|
||||
jiffies_to_msecs
|
||||
|
||||
# required by exynos-pm.ko
|
||||
of_property_count_elems_of_size
|
||||
|
||||
# required by exynos_acpm.ko
|
||||
generic_file_llseek
|
||||
simple_attr_open
|
||||
simple_attr_read
|
||||
simple_attr_release
|
||||
simple_attr_write
|
||||
wait_for_completion_interruptible_timeout
|
||||
|
||||
# required by exynos_pm_qos.ko
|
||||
blocking_notifier_chain_unregister
|
||||
kstrtoint_from_user
|
||||
misc_register
|
||||
noop_llseek
|
||||
_raw_read_lock_irqsave
|
||||
_raw_read_unlock_irqrestore
|
||||
_raw_write_lock_irqsave
|
||||
_raw_write_unlock_irqrestore
|
||||
__usecs_to_jiffies
|
||||
|
||||
# required by exynos_tty.ko
|
||||
atomic_notifier_chain_register
|
||||
clk_disable
|
||||
clk_enable
|
||||
clk_get_rate
|
||||
clk_set_rate
|
||||
__const_udelay
|
||||
dev_driver_string
|
||||
device_create_file
|
||||
devm_clk_get
|
||||
devm_ioremap
|
||||
devm_kfree
|
||||
devm_pinctrl_get
|
||||
disable_irq_nosync
|
||||
dma_get_slave_caps
|
||||
dma_map_page_attrs
|
||||
dma_release_channel
|
||||
dma_request_chan
|
||||
dma_sync_single_for_cpu
|
||||
dma_sync_single_for_device
|
||||
dma_unmap_page_attrs
|
||||
do_SAK
|
||||
enable_irq
|
||||
free_irq
|
||||
gic_nonsecure_priorities
|
||||
handle_sysrq
|
||||
iomem_resource
|
||||
is_vmalloc_addr
|
||||
jiffies
|
||||
kmalloc_large
|
||||
of_alias_get_id
|
||||
of_get_property
|
||||
of_match_node
|
||||
oops_in_progress
|
||||
panic_notifier_list
|
||||
pinctrl_lookup_state
|
||||
pinctrl_select_state
|
||||
platform_driver_unregister
|
||||
platform_get_irq
|
||||
platform_get_resource
|
||||
_raw_spin_trylock
|
||||
register_console
|
||||
regmap_read
|
||||
regmap_update_bits_base
|
||||
__release_region
|
||||
__request_region
|
||||
request_threaded_irq
|
||||
sched_clock
|
||||
sg_init_table
|
||||
sscanf
|
||||
syscon_regmap_lookup_by_phandle
|
||||
sysrq_mask
|
||||
tty_flip_buffer_push
|
||||
tty_insert_flip_string_fixed_flag
|
||||
@ -107,50 +399,158 @@
|
||||
uart_unregister_driver
|
||||
uart_update_timeout
|
||||
uart_write_wakeup
|
||||
__warn_printk
|
||||
|
||||
# required by exynosauto_v920_thermal.ko
|
||||
devm_thermal_of_zone_register
|
||||
devm_thermal_of_zone_unregister
|
||||
of_device_is_compatible
|
||||
of_thermal_get_ntrips
|
||||
strncpy
|
||||
thermal_zone_device_update
|
||||
|
||||
# required by i2c-dev.ko
|
||||
bus_register_notifier
|
||||
bus_unregister_notifier
|
||||
cdev_device_add
|
||||
cdev_device_del
|
||||
cdev_init
|
||||
device_initialize
|
||||
dev_set_name
|
||||
i2c_adapter_type
|
||||
i2c_bus_type
|
||||
i2c_for_each_dev
|
||||
i2c_get_adapter
|
||||
i2c_put_adapter
|
||||
i2c_smbus_xfer
|
||||
i2c_transfer
|
||||
i2c_transfer_buffer_flags
|
||||
i2c_verify_client
|
||||
register_chrdev_region
|
||||
unregister_chrdev_region
|
||||
|
||||
# required by i2c-exynosauto.ko
|
||||
cpu_bit_bitmap
|
||||
i2c_del_adapter
|
||||
__irq_apply_affinity_hint
|
||||
|
||||
# required by phy-exynosauto-usbdrd-super.ko
|
||||
__clk_is_enabled
|
||||
__devm_of_phy_provider_register
|
||||
devm_phy_create
|
||||
gpio_request
|
||||
|
||||
# required by pinctrl-samsung-core.ko
|
||||
device_get_next_child_node
|
||||
devm_gpiochip_add_data_with_key
|
||||
devm_kmemdup
|
||||
devm_pinctrl_register
|
||||
fwnode_handle_put
|
||||
fwnode_property_present
|
||||
generic_handle_domain_irq
|
||||
gpiochip_generic_free
|
||||
gpiochip_generic_request
|
||||
gpiochip_get_data
|
||||
gpiochip_lock_as_irq
|
||||
gpiochip_unlock_as_irq
|
||||
handle_edge_irq
|
||||
irq_create_mapping_affinity
|
||||
__irq_domain_add
|
||||
irq_domain_remove
|
||||
irq_domain_xlate_twocell
|
||||
irq_set_chained_handler_and_data
|
||||
kmemdup
|
||||
krealloc
|
||||
of_device_get_match_data
|
||||
of_fwnode_ops
|
||||
of_node_name_eq
|
||||
of_prop_next_string
|
||||
pinctrl_add_gpio_range
|
||||
pinctrl_dev_get_drvdata
|
||||
pinctrl_remove_gpio_range
|
||||
|
||||
# required by pl330.ko
|
||||
alt_cb_patch_nops
|
||||
amba_driver_register
|
||||
amba_driver_unregister
|
||||
debugfs_create_file
|
||||
dev_err_probe
|
||||
devm_free_irq
|
||||
devm_ioremap_resource
|
||||
__devm_reset_control_get
|
||||
dma_alloc_attrs
|
||||
dma_async_device_register
|
||||
dma_async_device_unregister
|
||||
dma_async_tx_descriptor_init
|
||||
dmaengine_unmap_put
|
||||
dma_free_attrs
|
||||
dma_get_slave_channel
|
||||
dma_map_resource
|
||||
dma_unmap_resource
|
||||
__kmalloc
|
||||
ktime_get_mono_fast_ns
|
||||
__list_del_entry_valid
|
||||
loops_per_jiffy
|
||||
of_dma_controller_free
|
||||
of_dma_controller_register
|
||||
pm_runtime_force_resume
|
||||
pm_runtime_force_suspend
|
||||
pm_runtime_irq_safe
|
||||
__pm_runtime_resume
|
||||
pm_runtime_set_autosuspend_delay
|
||||
__pm_runtime_suspend
|
||||
__pm_runtime_use_autosuspend
|
||||
_raw_spin_lock
|
||||
_raw_spin_unlock
|
||||
reset_control_assert
|
||||
reset_control_deassert
|
||||
seq_lseek
|
||||
seq_printf
|
||||
seq_puts
|
||||
seq_read
|
||||
sg_next
|
||||
single_open
|
||||
single_release
|
||||
tasklet_kill
|
||||
__tasklet_schedule
|
||||
tasklet_setup
|
||||
|
||||
# required by pwm-samsung.ko
|
||||
devm_platform_ioremap_resource
|
||||
of_prop_next_u32
|
||||
pwmchip_add
|
||||
pwmchip_remove
|
||||
pwm_get_chip_data
|
||||
pwm_set_chip_data
|
||||
|
||||
# required by rtc-s2vps02.ko
|
||||
devm_rtc_device_register
|
||||
pm_wakeup_ws_event
|
||||
rtc_update_irq
|
||||
rtc_valid_tm
|
||||
|
||||
# required by s2vps02-master-mfd.ko
|
||||
destroy_workqueue
|
||||
__devm_irq_alloc_descs
|
||||
handle_nested_irq
|
||||
|
||||
# required by spi-exynosauto.ko
|
||||
gpio_free
|
||||
gpio_request_one
|
||||
__spi_alloc_controller
|
||||
spi_controller_resume
|
||||
spi_controller_suspend
|
||||
spi_delay_exec
|
||||
spi_finalize_current_message
|
||||
spi_register_controller
|
||||
spi_unregister_controller
|
||||
|
||||
# required by spidev.ko
|
||||
device_property_match_string
|
||||
driver_unregister
|
||||
_raw_spin_lock_irq
|
||||
_raw_spin_unlock_irq
|
||||
__register_chrdev
|
||||
__spi_register_driver
|
||||
spi_setup
|
||||
spi_sync
|
||||
stream_open
|
||||
__unregister_chrdev
|
||||
|
||||
# required by ufs-exynosauto-core.ko
|
||||
_find_next_bit
|
||||
flush_work
|
||||
kstrtoull
|
||||
of_property_read_variable_u16_array
|
||||
of_property_read_variable_u8_array
|
||||
scsi_dma_unmap
|
||||
scsi_done
|
||||
ufshcd_auto_hibern8_update
|
||||
ufshcd_config_pwr_mode
|
||||
ufshcd_dme_get_attr
|
||||
ufshcd_dme_set_attr
|
||||
ufshcd_dump_regs
|
||||
ufshcd_link_recovery
|
||||
ufshcd_pltfrm_init
|
||||
ufshcd_release
|
||||
ufshcd_remove
|
||||
ufshcd_shutdown
|
||||
ufshcd_system_resume
|
||||
ufshcd_system_suspend
|
||||
|
@ -359,6 +359,7 @@
|
||||
__traceiter_android_vh_wq_lockup_pool
|
||||
__traceiter_block_rq_insert
|
||||
__traceiter_console
|
||||
__traceiter_error_report_end
|
||||
__traceiter_hrtimer_expire_entry
|
||||
__traceiter_hrtimer_expire_exit
|
||||
__traceiter_irq_handler_entry
|
||||
@ -400,6 +401,7 @@
|
||||
__tracepoint_android_vh_watchdog_timer_softlockup
|
||||
__tracepoint_android_vh_wq_lockup_pool
|
||||
__tracepoint_block_rq_insert
|
||||
__tracepoint_error_report_end
|
||||
__tracepoint_console
|
||||
__tracepoint_hrtimer_expire_entry
|
||||
__tracepoint_hrtimer_expire_exit
|
||||
|
14
android/abi_gki_aarch64_meizu
Normal file
14
android/abi_gki_aarch64_meizu
Normal file
@ -0,0 +1,14 @@
|
||||
[abi_symbol_list]
|
||||
__traceiter_android_vh_tune_scan_type
|
||||
__traceiter_android_vh_tune_swappiness
|
||||
__tracepoint_android_vh_tune_swappiness
|
||||
__tracepoint_android_vh_tune_scan_type
|
||||
__traceiter_android_rvh_sk_alloc
|
||||
__traceiter_android_rvh_sk_free
|
||||
__tracepoint_android_rvh_sk_alloc
|
||||
__tracepoint_android_rvh_sk_free
|
||||
__traceiter_android_vh_alloc_pages_slowpath
|
||||
__tracepoint_android_vh_tune_swappiness
|
||||
__tracepoint_android_vh_tune_scan_type
|
||||
__tracepoint_android_vh_alloc_pages_slowpath
|
||||
|
@ -32,12 +32,15 @@
|
||||
iio_channel_get
|
||||
iio_channel_release
|
||||
iio_get_channel_type
|
||||
ip_local_deliver
|
||||
ip6_local_out
|
||||
ip6_route_me_harder
|
||||
ip_route_me_harder
|
||||
ipv6_find_hdr
|
||||
iov_iter_advance
|
||||
is_ashmem_file
|
||||
jiffies_64_to_clock_t
|
||||
kick_process
|
||||
ktime_get_coarse_real_ts64
|
||||
memory_cgrp_subsys
|
||||
memory_cgrp_subsys_enabled_key
|
||||
@ -46,8 +49,11 @@
|
||||
mmc_wait_for_cmd
|
||||
nf_ct_attach
|
||||
nf_ct_delete
|
||||
nf_register_net_hook
|
||||
nf_register_net_hooks
|
||||
nf_unregister_net_hook
|
||||
nf_unregister_net_hooks
|
||||
nr_running
|
||||
of_css
|
||||
__page_file_index
|
||||
__page_mapcount
|
||||
@ -65,6 +71,7 @@
|
||||
remove_proc_subtree
|
||||
rtc_read_alarm
|
||||
rtc_set_alarm
|
||||
__rtnl_link_unregister
|
||||
sdio_memcpy_fromio
|
||||
sdio_memcpy_toio
|
||||
sdio_set_block_size
|
||||
@ -90,6 +97,9 @@
|
||||
__traceiter_android_vh_account_process_tick_gran
|
||||
__traceiter_android_vh_account_task_time
|
||||
__traceiter_android_vh_do_futex
|
||||
__traceiter_android_vh_exit_check
|
||||
__traceiter_android_vh_exit_signal_whether_wake
|
||||
__traceiter_android_vh_freeze_whether_wake
|
||||
__traceiter_android_vh_futex_sleep_start
|
||||
__traceiter_android_vh_futex_wait_end
|
||||
__traceiter_android_vh_futex_wait_start
|
||||
@ -120,6 +130,7 @@
|
||||
__traceiter_android_vh_check_folio_look_around_ref
|
||||
__traceiter_android_vh_dup_task_struct
|
||||
__traceiter_android_vh_exit_signal
|
||||
__traceiter_android_vh_killed_process
|
||||
__traceiter_android_vh_look_around
|
||||
__traceiter_android_vh_look_around_migrate_folio
|
||||
__traceiter_android_vh_mem_cgroup_id_remove
|
||||
@ -150,6 +161,10 @@
|
||||
__traceiter_block_rq_issue
|
||||
__traceiter_block_rq_merge
|
||||
__traceiter_block_rq_requeue
|
||||
__traceiter_net_dev_queue
|
||||
__traceiter_net_dev_xmit
|
||||
__traceiter_netif_receive_skb
|
||||
__traceiter_netif_rx
|
||||
__traceiter_sched_stat_blocked
|
||||
__traceiter_sched_stat_iowait
|
||||
__traceiter_sched_stat_runtime
|
||||
@ -158,6 +173,7 @@
|
||||
__traceiter_sched_waking
|
||||
__traceiter_task_rename
|
||||
__traceiter_android_vh_test_clear_look_around_ref
|
||||
__traceiter_android_vh_tune_swappiness
|
||||
__tracepoint_android_rvh_post_init_entity_util_avg
|
||||
__tracepoint_android_rvh_rtmutex_force_update
|
||||
__tracepoint_android_vh_account_process_tick_gran
|
||||
@ -182,12 +198,16 @@
|
||||
__tracepoint_android_vh_check_folio_look_around_ref
|
||||
__tracepoint_android_vh_do_futex
|
||||
__tracepoint_android_vh_dup_task_struct
|
||||
__tracepoint_android_vh_exit_check
|
||||
__tracepoint_android_vh_exit_signal
|
||||
__tracepoint_android_vh_killed_process
|
||||
__tracepoint_android_vh_exit_signal_whether_wake
|
||||
__tracepoint_android_vh_mem_cgroup_id_remove
|
||||
__tracepoint_android_vh_mem_cgroup_css_offline
|
||||
__tracepoint_android_vh_mem_cgroup_css_online
|
||||
__tracepoint_android_vh_mem_cgroup_free
|
||||
__tracepoint_android_vh_mem_cgroup_alloc
|
||||
__tracepoint_android_vh_freeze_whether_wake
|
||||
__tracepoint_android_vh_futex_sleep_start
|
||||
__tracepoint_android_vh_futex_wait_end
|
||||
__tracepoint_android_vh_futex_wait_start
|
||||
@ -218,12 +238,17 @@
|
||||
__tracepoint_android_vh_sync_txn_recvd
|
||||
__tracepoint_android_vh_task_blocks_on_rtmutex
|
||||
__tracepoint_android_vh_test_clear_look_around_ref
|
||||
__tracepoint_android_vh_tune_swappiness
|
||||
__tracepoint_block_bio_queue
|
||||
__tracepoint_block_getrq
|
||||
__tracepoint_block_rq_complete
|
||||
__tracepoint_block_rq_issue
|
||||
__tracepoint_block_rq_merge
|
||||
__tracepoint_block_rq_requeue
|
||||
__tracepoint_net_dev_queue
|
||||
__tracepoint_net_dev_xmit
|
||||
__tracepoint_netif_receive_skb
|
||||
__tracepoint_netif_rx
|
||||
__tracepoint_sched_stat_blocked
|
||||
__tracepoint_sched_stat_iowait
|
||||
__tracepoint_sched_stat_runtime
|
||||
@ -240,5 +265,6 @@
|
||||
wait_for_completion_io_timeout
|
||||
wait_for_completion_killable_timeout
|
||||
wakeup_source_remove
|
||||
wake_up_state
|
||||
wq_worker_comm
|
||||
zero_pfn
|
||||
|
@ -73,6 +73,7 @@
|
||||
bin2hex
|
||||
bio_endio
|
||||
bio_end_io_acct_remapped
|
||||
bio_split
|
||||
bio_start_io_acct
|
||||
bitmap_allocate_region
|
||||
__bitmap_and
|
||||
@ -93,6 +94,8 @@
|
||||
bit_wait
|
||||
bit_wait_timeout
|
||||
__blk_alloc_disk
|
||||
blk_crypto_keyslot_index
|
||||
blk_crypto_register
|
||||
blkdev_get_by_dev
|
||||
blk_execute_rq
|
||||
blk_execute_rq_nowait
|
||||
@ -813,6 +816,22 @@
|
||||
dma_unmap_sg_attrs
|
||||
dma_vmap_noncontiguous
|
||||
dma_vunmap_noncontiguous
|
||||
dm_bufio_client_create
|
||||
dm_bufio_client_destroy
|
||||
dm_bufio_mark_buffer_dirty
|
||||
dm_bufio_new
|
||||
dm_bufio_read
|
||||
dm_bufio_release
|
||||
dm_bufio_write_dirty_buffers
|
||||
dm_disk
|
||||
dm_get_device
|
||||
dm_kobject_release
|
||||
dm_read_arg_group
|
||||
dm_register_target
|
||||
dm_shift_arg
|
||||
dm_table_get_md
|
||||
dm_table_get_mode
|
||||
dm_unregister_target
|
||||
do_trace_netlink_extack
|
||||
do_trace_rcu_torture_read
|
||||
double_rq_lock
|
||||
@ -2212,6 +2231,7 @@
|
||||
page_ext_put
|
||||
page_is_ram
|
||||
page_mapping
|
||||
page_owner_inited
|
||||
page_pinner_inited
|
||||
__page_pinner_put_page
|
||||
page_pool_alloc_pages
|
||||
@ -2887,6 +2907,7 @@
|
||||
set_normalized_timespec64
|
||||
set_page_dirty_lock
|
||||
__SetPageMovable
|
||||
__set_page_owner
|
||||
set_task_cpu
|
||||
setup_udp_tunnel_sock
|
||||
set_user_nice
|
||||
@ -3903,6 +3924,7 @@
|
||||
vhost_add_used_and_signal
|
||||
vhost_dev_check_owner
|
||||
vhost_dev_cleanup
|
||||
vhost_dev_flush
|
||||
vhost_dev_init
|
||||
vhost_dev_ioctl
|
||||
vhost_dev_stop
|
||||
|
280
android/abi_gki_aarch64_tuxera
Normal file
280
android/abi_gki_aarch64_tuxera
Normal file
@ -0,0 +1,280 @@
|
||||
[abi_symbol_list]
|
||||
alt_cb_patch_nops
|
||||
__arch_copy_from_user
|
||||
__arch_copy_to_user
|
||||
autoremove_wake_function
|
||||
balance_dirty_pages_ratelimited
|
||||
bcmp
|
||||
__bforget
|
||||
__bh_read_batch
|
||||
bio_add_page
|
||||
bio_alloc_bioset
|
||||
bio_put
|
||||
__bitmap_weight
|
||||
bit_waitqueue
|
||||
blkdev_issue_discard
|
||||
blkdev_issue_flush
|
||||
blk_finish_plug
|
||||
blk_start_plug
|
||||
__blockdev_direct_IO
|
||||
block_dirty_folio
|
||||
block_invalidate_folio
|
||||
block_is_partially_uptodate
|
||||
__breadahead
|
||||
__bread_gfp
|
||||
__brelse
|
||||
buffer_migrate_folio
|
||||
call_rcu
|
||||
capable
|
||||
capable_wrt_inode_uidgid
|
||||
__check_object_size
|
||||
clean_bdev_aliases
|
||||
clear_inode
|
||||
clear_page
|
||||
clear_page_dirty_for_io
|
||||
copy_page_from_iter_atomic
|
||||
cpu_hwcaps
|
||||
create_empty_buffers
|
||||
current_umask
|
||||
d_add
|
||||
d_add_ci
|
||||
d_instantiate
|
||||
d_make_root
|
||||
d_obtain_alias
|
||||
down_read
|
||||
down_write
|
||||
down_write_trylock
|
||||
dput
|
||||
drop_nlink
|
||||
d_splice_alias
|
||||
dump_stack
|
||||
end_buffer_read_sync
|
||||
end_buffer_write_sync
|
||||
end_page_writeback
|
||||
errseq_set
|
||||
fault_in_iov_iter_readable
|
||||
fault_in_safe_writeable
|
||||
fget
|
||||
fiemap_fill_next_extent
|
||||
fiemap_prep
|
||||
file_check_and_advance_wb_err
|
||||
filemap_add_folio
|
||||
filemap_dirty_folio
|
||||
filemap_fault
|
||||
filemap_fdatawait_range
|
||||
filemap_fdatawrite
|
||||
filemap_fdatawrite_range
|
||||
filemap_flush
|
||||
__filemap_set_wb_err
|
||||
filemap_write_and_wait_range
|
||||
file_remove_privs
|
||||
file_update_time
|
||||
file_write_and_wait_range
|
||||
finish_wait
|
||||
flush_dcache_page
|
||||
__folio_alloc
|
||||
__folio_cancel_dirty
|
||||
__folio_lock
|
||||
__folio_put
|
||||
folio_wait_bit
|
||||
folio_write_one
|
||||
fortify_panic
|
||||
fput
|
||||
freezer_active
|
||||
freezing_slow_path
|
||||
fs_bio_set
|
||||
generic_error_remove_page
|
||||
generic_file_direct_write
|
||||
generic_file_llseek
|
||||
generic_file_mmap
|
||||
generic_file_open
|
||||
generic_file_read_iter
|
||||
generic_file_splice_read
|
||||
generic_fillattr
|
||||
generic_perform_write
|
||||
generic_read_dir
|
||||
generic_write_checks
|
||||
__getblk_gfp
|
||||
gic_nonsecure_priorities
|
||||
grab_cache_page_write_begin
|
||||
iget5_locked
|
||||
igrab
|
||||
ihold
|
||||
ilookup5
|
||||
inc_nlink
|
||||
in_group_p
|
||||
__init_rwsem
|
||||
init_special_inode
|
||||
init_wait_entry
|
||||
__init_waitqueue_head
|
||||
inode_dio_wait
|
||||
inode_init_once
|
||||
inode_init_owner
|
||||
inode_maybe_inc_iversion
|
||||
inode_newsize_ok
|
||||
inode_set_flags
|
||||
__insert_inode_hash
|
||||
invalidate_bdev
|
||||
invalidate_inode_pages2_range
|
||||
invalidate_mapping_pages
|
||||
io_schedule
|
||||
iov_iter_advance
|
||||
iov_iter_alignment
|
||||
iov_iter_get_pages2
|
||||
iov_iter_single_seg_count
|
||||
iput
|
||||
is_bad_inode
|
||||
iter_file_splice_write
|
||||
iunique
|
||||
jiffies
|
||||
jiffies_to_msecs
|
||||
kasan_flag_enabled
|
||||
kfree
|
||||
kill_block_super
|
||||
__kmalloc
|
||||
kmalloc_caches
|
||||
kmalloc_trace
|
||||
kmem_cache_alloc
|
||||
kmem_cache_alloc_lru
|
||||
kmem_cache_create
|
||||
kmem_cache_create_usercopy
|
||||
kmem_cache_destroy
|
||||
kmem_cache_free
|
||||
krealloc
|
||||
kthread_complete_and_exit
|
||||
kthread_create_on_node
|
||||
kthread_should_stop
|
||||
kthread_stop
|
||||
ktime_get_coarse_real_ts64
|
||||
kvfree
|
||||
__list_add_valid
|
||||
__list_del_entry_valid
|
||||
load_nls
|
||||
load_nls_default
|
||||
__lock_buffer
|
||||
make_bad_inode
|
||||
mark_buffer_async_write
|
||||
mark_buffer_dirty
|
||||
mark_buffer_write_io_error
|
||||
__mark_inode_dirty
|
||||
mark_page_accessed
|
||||
memcmp
|
||||
memcpy
|
||||
memmove
|
||||
memset
|
||||
mktime64
|
||||
mnt_drop_write_file
|
||||
mnt_want_write_file
|
||||
mount_bdev
|
||||
mpage_readahead
|
||||
mpage_read_folio
|
||||
__msecs_to_jiffies
|
||||
__mutex_init
|
||||
mutex_lock
|
||||
mutex_trylock
|
||||
mutex_unlock
|
||||
new_inode
|
||||
notify_change
|
||||
pagecache_get_page
|
||||
page_cache_next_miss
|
||||
page_cache_prev_miss
|
||||
page_pinner_inited
|
||||
__page_pinner_put_page
|
||||
pagevec_lookup_range_tag
|
||||
__pagevec_release
|
||||
page_zero_new_buffers
|
||||
__percpu_down_read
|
||||
preempt_schedule
|
||||
preempt_schedule_notrace
|
||||
prepare_to_wait
|
||||
prepare_to_wait_event
|
||||
_printk
|
||||
__printk_ratelimit
|
||||
___ratelimit
|
||||
_raw_read_lock
|
||||
_raw_read_lock_irqsave
|
||||
_raw_read_unlock
|
||||
_raw_read_unlock_irqrestore
|
||||
_raw_spin_lock
|
||||
_raw_spin_lock_irqsave
|
||||
_raw_spin_unlock
|
||||
_raw_spin_unlock_irqrestore
|
||||
_raw_write_lock
|
||||
_raw_write_lock_irqsave
|
||||
_raw_write_unlock
|
||||
_raw_write_unlock_irqrestore
|
||||
rcu_barrier
|
||||
rcuwait_wake_up
|
||||
readahead_gfp_mask
|
||||
read_cache_page
|
||||
redirty_page_for_writepage
|
||||
__refrigerator
|
||||
register_filesystem
|
||||
__remove_inode_hash
|
||||
sb_min_blocksize
|
||||
sb_set_blocksize
|
||||
schedule
|
||||
schedule_timeout
|
||||
schedule_timeout_interruptible
|
||||
security_inode_init_security
|
||||
seq_printf
|
||||
setattr_prepare
|
||||
set_freezable
|
||||
set_nlink
|
||||
set_page_dirty
|
||||
__set_page_dirty_nobuffers
|
||||
set_page_writeback
|
||||
set_user_nice
|
||||
simple_strtol
|
||||
simple_strtoul
|
||||
simple_strtoull
|
||||
snprintf
|
||||
sprintf
|
||||
sscanf
|
||||
__stack_chk_fail
|
||||
strchr
|
||||
strcmp
|
||||
strlen
|
||||
strncasecmp
|
||||
strncmp
|
||||
strsep
|
||||
strstr
|
||||
submit_bh
|
||||
submit_bio
|
||||
sync_blockdev
|
||||
__sync_dirty_buffer
|
||||
sync_dirty_buffer
|
||||
sync_filesystem
|
||||
sync_inode_metadata
|
||||
sys_tz
|
||||
tag_pages_for_writeback
|
||||
time64_to_tm
|
||||
timestamp_truncate
|
||||
touch_atime
|
||||
_trace_android_vh_record_pcpu_rwsem_starttime
|
||||
_trace_android_vh_record_pcpu_rwsem_time_early
|
||||
truncate_inode_pages
|
||||
truncate_inode_pages_final
|
||||
truncate_pagecache
|
||||
truncate_setsize
|
||||
try_to_writeback_inodes_sb
|
||||
unload_nls
|
||||
unlock_buffer
|
||||
unlock_new_inode
|
||||
unlock_page
|
||||
unregister_filesystem
|
||||
up_read
|
||||
up_write
|
||||
vfree
|
||||
vfs_fsync_range
|
||||
__vmalloc
|
||||
vmalloc
|
||||
vsnprintf
|
||||
vzalloc
|
||||
__wait_on_buffer
|
||||
wake_bit_function
|
||||
__wake_up
|
||||
wake_up_process
|
||||
__warn_printk
|
||||
write_inode_now
|
||||
xa_load
|
@ -412,6 +412,7 @@
|
||||
param_ops_int
|
||||
param_ops_uint
|
||||
pcpu_nr_pages
|
||||
percpu_counter_batch
|
||||
__per_cpu_offset
|
||||
perf_trace_buf_alloc
|
||||
perf_trace_run_bpf_submit
|
||||
@ -1912,6 +1913,8 @@
|
||||
# required by trusty-log.ko
|
||||
vm_map_ram
|
||||
vm_unmap_ram
|
||||
# required by sprd_time_sync_cp.ko
|
||||
pvclock_gtod_register_notifier
|
||||
|
||||
# required by trusty-pm.ko
|
||||
unregister_syscore_ops
|
||||
|
@ -807,6 +807,7 @@
|
||||
blk_bio_list_merge
|
||||
blk_execute_rq
|
||||
blk_execute_rq_nowait
|
||||
blk_fill_rwbs
|
||||
blk_mq_alloc_request
|
||||
blk_mq_alloc_sq_tag_set
|
||||
blk_mq_alloc_tag_set
|
||||
|
@ -1,3 +1,4 @@
|
||||
arch/arm64/geniezone/gzvm.ko
|
||||
drivers/bluetooth/btbcm.ko
|
||||
drivers/bluetooth/btqca.ko
|
||||
drivers/bluetooth/btsdio.ko
|
||||
|
@ -5,6 +5,7 @@ obj-$(CONFIG_XEN) += xen/
|
||||
obj-$(subst m,y,$(CONFIG_HYPERV)) += hyperv/
|
||||
obj-$(CONFIG_GUNYAH) += gunyah/
|
||||
obj-$(CONFIG_CRYPTO) += crypto/
|
||||
obj-$(CONFIG_MTK_GZVM) += geniezone/
|
||||
|
||||
# for cleaning
|
||||
subdir- += boot
|
||||
|
@ -551,6 +551,7 @@ CONFIG_GUNYAH=y
|
||||
CONFIG_GUNYAH_VCPU=y
|
||||
CONFIG_GUNYAH_IRQFD=y
|
||||
CONFIG_GUNYAH_IOEVENTFD=y
|
||||
CONFIG_MTK_GZVM=m
|
||||
CONFIG_VHOST_VSOCK=y
|
||||
CONFIG_STAGING=y
|
||||
CONFIG_ASHMEM=y
|
||||
|
9
arch/arm64/geniezone/Makefile
Normal file
9
arch/arm64/geniezone/Makefile
Normal file
@ -0,0 +1,9 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
# Main Makefile for gzvm, this one includes drivers/virt/geniezone/Makefile
|
||||
#
|
||||
include $(srctree)/drivers/virt/geniezone/Makefile
|
||||
|
||||
gzvm-y += vm.o vcpu.o vgic.o
|
||||
|
||||
obj-$(CONFIG_MTK_GZVM) += gzvm.o
|
110
arch/arm64/geniezone/gzvm_arch_common.h
Normal file
110
arch/arm64/geniezone/gzvm_arch_common.h
Normal file
@ -0,0 +1,110 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (c) 2023 MediaTek Inc.
|
||||
*/
|
||||
|
||||
#ifndef __GZVM_ARCH_COMMON_H__
|
||||
#define __GZVM_ARCH_COMMON_H__
|
||||
|
||||
#include <linux/arm-smccc.h>
|
||||
|
||||
enum {
|
||||
GZVM_FUNC_CREATE_VM = 0,
|
||||
GZVM_FUNC_DESTROY_VM = 1,
|
||||
GZVM_FUNC_CREATE_VCPU = 2,
|
||||
GZVM_FUNC_DESTROY_VCPU = 3,
|
||||
GZVM_FUNC_SET_MEMREGION = 4,
|
||||
GZVM_FUNC_RUN = 5,
|
||||
GZVM_FUNC_GET_ONE_REG = 8,
|
||||
GZVM_FUNC_SET_ONE_REG = 9,
|
||||
GZVM_FUNC_IRQ_LINE = 10,
|
||||
GZVM_FUNC_CREATE_DEVICE = 11,
|
||||
GZVM_FUNC_PROBE = 12,
|
||||
GZVM_FUNC_ENABLE_CAP = 13,
|
||||
GZVM_FUNC_INFORM_EXIT = 14,
|
||||
GZVM_FUNC_MEMREGION_PURPOSE = 15,
|
||||
GZVM_FUNC_SET_DTB_CONFIG = 16,
|
||||
NR_GZVM_FUNC,
|
||||
};
|
||||
|
||||
#define SMC_ENTITY_MTK 59
|
||||
#define GZVM_FUNCID_START (0x1000)
|
||||
#define GZVM_HCALL_ID(func) \
|
||||
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_32, \
|
||||
SMC_ENTITY_MTK, (GZVM_FUNCID_START + (func)))
|
||||
|
||||
#define MT_HVC_GZVM_CREATE_VM GZVM_HCALL_ID(GZVM_FUNC_CREATE_VM)
|
||||
#define MT_HVC_GZVM_DESTROY_VM GZVM_HCALL_ID(GZVM_FUNC_DESTROY_VM)
|
||||
#define MT_HVC_GZVM_CREATE_VCPU GZVM_HCALL_ID(GZVM_FUNC_CREATE_VCPU)
|
||||
#define MT_HVC_GZVM_DESTROY_VCPU GZVM_HCALL_ID(GZVM_FUNC_DESTROY_VCPU)
|
||||
#define MT_HVC_GZVM_SET_MEMREGION GZVM_HCALL_ID(GZVM_FUNC_SET_MEMREGION)
|
||||
#define MT_HVC_GZVM_RUN GZVM_HCALL_ID(GZVM_FUNC_RUN)
|
||||
#define MT_HVC_GZVM_GET_ONE_REG GZVM_HCALL_ID(GZVM_FUNC_GET_ONE_REG)
|
||||
#define MT_HVC_GZVM_SET_ONE_REG GZVM_HCALL_ID(GZVM_FUNC_SET_ONE_REG)
|
||||
#define MT_HVC_GZVM_IRQ_LINE GZVM_HCALL_ID(GZVM_FUNC_IRQ_LINE)
|
||||
#define MT_HVC_GZVM_CREATE_DEVICE GZVM_HCALL_ID(GZVM_FUNC_CREATE_DEVICE)
|
||||
#define MT_HVC_GZVM_PROBE GZVM_HCALL_ID(GZVM_FUNC_PROBE)
|
||||
#define MT_HVC_GZVM_ENABLE_CAP GZVM_HCALL_ID(GZVM_FUNC_ENABLE_CAP)
|
||||
#define MT_HVC_GZVM_INFORM_EXIT GZVM_HCALL_ID(GZVM_FUNC_INFORM_EXIT)
|
||||
#define MT_HVC_GZVM_MEMREGION_PURPOSE GZVM_HCALL_ID(GZVM_FUNC_MEMREGION_PURPOSE)
|
||||
#define MT_HVC_GZVM_SET_DTB_CONFIG GZVM_HCALL_ID(GZVM_FUNC_SET_DTB_CONFIG)
|
||||
|
||||
#define GIC_V3_NR_LRS 16
|
||||
|
||||
/**
|
||||
* gzvm_hypcall_wrapper() - the wrapper for hvc calls
|
||||
* @a0-a7: arguments passed in registers 0 to 7
|
||||
* @res: result values from registers 0 to 3
|
||||
*
|
||||
* Return: The wrapper helps caller to convert geniezone errno to Linux errno.
|
||||
*/
|
||||
static inline int gzvm_hypcall_wrapper(unsigned long a0, unsigned long a1,
|
||||
unsigned long a2, unsigned long a3,
|
||||
unsigned long a4, unsigned long a5,
|
||||
unsigned long a6, unsigned long a7,
|
||||
struct arm_smccc_res *res)
|
||||
{
|
||||
arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res);
|
||||
return gzvm_err_to_errno(res->a0);
|
||||
}
|
||||
|
||||
static inline u16 get_vmid_from_tuple(unsigned int tuple)
|
||||
{
|
||||
return (u16)(tuple >> 16);
|
||||
}
|
||||
|
||||
static inline u16 get_vcpuid_from_tuple(unsigned int tuple)
|
||||
{
|
||||
return (u16)(tuple & 0xffff);
|
||||
}
|
||||
|
||||
/**
|
||||
* struct gzvm_vcpu_hwstate: Sync architecture state back to host for handling
|
||||
* @nr_lrs: The available LRs(list registers) in Soc.
|
||||
* @__pad: add an explicit '__u32 __pad;' in the middle to make it clear
|
||||
* what the actual layout is.
|
||||
* @lr: The array of LRs(list registers).
|
||||
*
|
||||
* - Keep the same layout of hypervisor data struct.
|
||||
* - Sync list registers back for acking virtual device interrupt status.
|
||||
*/
|
||||
struct gzvm_vcpu_hwstate {
|
||||
__le32 nr_lrs;
|
||||
__le32 __pad;
|
||||
__le64 lr[GIC_V3_NR_LRS];
|
||||
};
|
||||
|
||||
static inline unsigned int
|
||||
assemble_vm_vcpu_tuple(u16 vmid, u16 vcpuid)
|
||||
{
|
||||
return ((unsigned int)vmid << 16 | vcpuid);
|
||||
}
|
||||
|
||||
static inline void
|
||||
disassemble_vm_vcpu_tuple(unsigned int tuple, u16 *vmid, u16 *vcpuid)
|
||||
{
|
||||
*vmid = get_vmid_from_tuple(tuple);
|
||||
*vcpuid = get_vcpuid_from_tuple(tuple);
|
||||
}
|
||||
|
||||
#endif /* __GZVM_ARCH_COMMON_H__ */
|
88
arch/arm64/geniezone/vcpu.c
Normal file
88
arch/arm64/geniezone/vcpu.c
Normal file
@ -0,0 +1,88 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2023 MediaTek Inc.
|
||||
*/
|
||||
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include <linux/gzvm.h>
|
||||
#include <linux/gzvm_drv.h>
|
||||
#include "gzvm_arch_common.h"
|
||||
|
||||
int gzvm_arch_vcpu_update_one_reg(struct gzvm_vcpu *vcpu, __u64 reg_id,
|
||||
bool is_write, __u64 *data)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
unsigned long a1;
|
||||
int ret;
|
||||
|
||||
/* reg id follows KVM's encoding */
|
||||
switch (reg_id & GZVM_REG_ARM_COPROC_MASK) {
|
||||
case GZVM_REG_ARM_CORE:
|
||||
break;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
a1 = assemble_vm_vcpu_tuple(vcpu->gzvm->vm_id, vcpu->vcpuid);
|
||||
if (!is_write) {
|
||||
ret = gzvm_hypcall_wrapper(MT_HVC_GZVM_GET_ONE_REG,
|
||||
a1, reg_id, 0, 0, 0, 0, 0, &res);
|
||||
if (ret == 0)
|
||||
*data = res.a1;
|
||||
} else {
|
||||
ret = gzvm_hypcall_wrapper(MT_HVC_GZVM_SET_ONE_REG,
|
||||
a1, reg_id, *data, 0, 0, 0, 0, &res);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int gzvm_arch_vcpu_run(struct gzvm_vcpu *vcpu, __u64 *exit_reason)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
unsigned long a1;
|
||||
int ret;
|
||||
|
||||
a1 = assemble_vm_vcpu_tuple(vcpu->gzvm->vm_id, vcpu->vcpuid);
|
||||
ret = gzvm_hypcall_wrapper(MT_HVC_GZVM_RUN, a1, 0, 0, 0, 0, 0,
|
||||
0, &res);
|
||||
*exit_reason = res.a1;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int gzvm_arch_destroy_vcpu(u16 vm_id, int vcpuid)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
unsigned long a1;
|
||||
|
||||
a1 = assemble_vm_vcpu_tuple(vm_id, vcpuid);
|
||||
gzvm_hypcall_wrapper(MT_HVC_GZVM_DESTROY_VCPU, a1, 0, 0, 0, 0, 0, 0,
|
||||
&res);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* gzvm_arch_create_vcpu() - Call smc to gz hypervisor to create vcpu
|
||||
* @vm_id: vm id
|
||||
* @vcpuid: vcpu id
|
||||
* @run: Virtual address of vcpu->run
|
||||
*
|
||||
* Return: The wrapper helps caller to convert geniezone errno to Linux errno.
|
||||
*/
|
||||
int gzvm_arch_create_vcpu(u16 vm_id, int vcpuid, void *run)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
unsigned long a1, a2;
|
||||
int ret;
|
||||
|
||||
a1 = assemble_vm_vcpu_tuple(vm_id, vcpuid);
|
||||
a2 = (__u64)virt_to_phys(run);
|
||||
ret = gzvm_hypcall_wrapper(MT_HVC_GZVM_CREATE_VCPU, a1, a2, 0, 0, 0, 0,
|
||||
0, &res);
|
||||
|
||||
return ret;
|
||||
}
|
108
arch/arm64/geniezone/vgic.c
Normal file
108
arch/arm64/geniezone/vgic.c
Normal file
@ -0,0 +1,108 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2023 MediaTek Inc.
|
||||
*/
|
||||
|
||||
#include <linux/irqchip/arm-gic-v3.h>
|
||||
#include <linux/gzvm.h>
|
||||
#include <linux/gzvm_drv.h>
|
||||
#include "gzvm_arch_common.h"
|
||||
|
||||
/**
|
||||
* is_irq_valid() - Check the irq number and irq_type are matched
|
||||
* @irq: interrupt number
|
||||
* @irq_type: interrupt type
|
||||
*
|
||||
* Return:
|
||||
* true if irq is valid else false.
|
||||
*/
|
||||
static bool is_irq_valid(u32 irq, u32 irq_type)
|
||||
{
|
||||
switch (irq_type) {
|
||||
case GZVM_IRQ_TYPE_CPU:
|
||||
/* 0 ~ 15: SGI */
|
||||
if (likely(irq <= GZVM_IRQ_CPU_FIQ))
|
||||
return true;
|
||||
break;
|
||||
case GZVM_IRQ_TYPE_PPI:
|
||||
/* 16 ~ 31: PPI */
|
||||
if (likely(irq >= GZVM_VGIC_NR_SGIS &&
|
||||
irq < GZVM_VGIC_NR_PRIVATE_IRQS))
|
||||
return true;
|
||||
break;
|
||||
case GZVM_IRQ_TYPE_SPI:
|
||||
/* 32 ~ : SPT */
|
||||
if (likely(irq >= GZVM_VGIC_NR_PRIVATE_IRQS))
|
||||
return true;
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* gzvm_vgic_inject_irq() - Inject virtual interrupt to a VM
|
||||
* @gzvm: Pointer to struct gzvm
|
||||
* @vcpu_idx: vcpu index, only valid if PPI
|
||||
* @irq_type: Interrupt type
|
||||
* @irq: irq number
|
||||
* @level: 1 if true else 0
|
||||
*
|
||||
* Return:
|
||||
* * 0 - Success.
|
||||
* * Negative - Failure.
|
||||
*/
|
||||
static int gzvm_vgic_inject_irq(struct gzvm *gzvm, unsigned int vcpu_idx,
|
||||
u32 irq_type, u32 irq, bool level)
|
||||
{
|
||||
unsigned long a1 = assemble_vm_vcpu_tuple(gzvm->vm_id, vcpu_idx);
|
||||
struct arm_smccc_res res;
|
||||
|
||||
if (!unlikely(is_irq_valid(irq, irq_type)))
|
||||
return -EINVAL;
|
||||
|
||||
gzvm_hypcall_wrapper(MT_HVC_GZVM_IRQ_LINE, a1, irq, level,
|
||||
0, 0, 0, 0, &res);
|
||||
if (res.a0) {
|
||||
pr_err("Failed to set IRQ level (%d) to irq#%u on vcpu %d with ret=%d\n",
|
||||
level, irq, vcpu_idx, (int)res.a0);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* gzvm_vgic_inject_spi() - Inject virtual spi interrupt
|
||||
* @gzvm: Pointer to struct gzvm
|
||||
* @vcpu_idx: vcpu index
|
||||
* @spi_irq: This is spi interrupt number (starts from 0 instead of 32)
|
||||
* @level: 1 if true else 0
|
||||
*
|
||||
* Return:
|
||||
* * 0 if succeed else other negative values indicating each errors
|
||||
*/
|
||||
static int gzvm_vgic_inject_spi(struct gzvm *gzvm, unsigned int vcpu_idx,
|
||||
u32 spi_irq, bool level)
|
||||
{
|
||||
return gzvm_vgic_inject_irq(gzvm, 0, GZVM_IRQ_TYPE_SPI,
|
||||
spi_irq + GZVM_VGIC_NR_PRIVATE_IRQS,
|
||||
level);
|
||||
}
|
||||
|
||||
int gzvm_arch_create_device(u16 vm_id, struct gzvm_create_device *gzvm_dev)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
|
||||
return gzvm_hypcall_wrapper(MT_HVC_GZVM_CREATE_DEVICE, vm_id,
|
||||
virt_to_phys(gzvm_dev), 0, 0, 0, 0, 0,
|
||||
&res);
|
||||
}
|
||||
|
||||
int gzvm_arch_inject_irq(struct gzvm *gzvm, unsigned int vcpu_idx,
|
||||
u32 irq_type, u32 irq, bool level)
|
||||
{
|
||||
/* default use spi */
|
||||
return gzvm_vgic_inject_spi(gzvm, vcpu_idx, irq, level);
|
||||
}
|
242
arch/arm64/geniezone/vm.c
Normal file
242
arch/arm64/geniezone/vm.c
Normal file
@ -0,0 +1,242 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2023 MediaTek Inc.
|
||||
*/
|
||||
|
||||
#include <asm/sysreg.h>
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include <linux/gzvm.h>
|
||||
#include <linux/gzvm_drv.h>
|
||||
#include "gzvm_arch_common.h"
|
||||
|
||||
#define PAR_PA47_MASK ((((1UL << 48) - 1) >> 12) << 12)
|
||||
|
||||
int gzvm_arch_inform_exit(u16 vm_id)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
|
||||
arm_smccc_hvc(MT_HVC_GZVM_INFORM_EXIT, vm_id, 0, 0, 0, 0, 0, 0, &res);
|
||||
if (res.a0 == 0)
|
||||
return 0;
|
||||
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
int gzvm_arch_probe(void)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
|
||||
arm_smccc_hvc(MT_HVC_GZVM_PROBE, 0, 0, 0, 0, 0, 0, 0, &res);
|
||||
if (res.a0 == 0)
|
||||
return 0;
|
||||
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
int gzvm_arch_set_memregion(u16 vm_id, size_t buf_size,
|
||||
phys_addr_t region)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
|
||||
return gzvm_hypcall_wrapper(MT_HVC_GZVM_SET_MEMREGION, vm_id,
|
||||
buf_size, region, 0, 0, 0, 0, &res);
|
||||
}
|
||||
|
||||
static int gzvm_cap_arm_vm_ipa_size(void __user *argp)
|
||||
{
|
||||
__u64 value = CONFIG_ARM64_PA_BITS;
|
||||
|
||||
if (copy_to_user(argp, &value, sizeof(__u64)))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int gzvm_arch_check_extension(struct gzvm *gzvm, __u64 cap, void __user *argp)
|
||||
{
|
||||
int ret = -EOPNOTSUPP;
|
||||
|
||||
switch (cap) {
|
||||
case GZVM_CAP_ARM_PROTECTED_VM: {
|
||||
__u64 success = 1;
|
||||
|
||||
if (copy_to_user(argp, &success, sizeof(__u64)))
|
||||
return -EFAULT;
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
case GZVM_CAP_ARM_VM_IPA_SIZE: {
|
||||
ret = gzvm_cap_arm_vm_ipa_size(argp);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
ret = -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* gzvm_arch_create_vm() - create vm
|
||||
* @vm_type: VM type. Only supports Linux VM now.
|
||||
*
|
||||
* Return:
|
||||
* * positive value - VM ID
|
||||
* * -ENOMEM - Memory not enough for storing VM data
|
||||
*/
|
||||
int gzvm_arch_create_vm(unsigned long vm_type)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
int ret;
|
||||
|
||||
ret = gzvm_hypcall_wrapper(MT_HVC_GZVM_CREATE_VM, vm_type, 0, 0, 0, 0,
|
||||
0, 0, &res);
|
||||
|
||||
if (ret == 0)
|
||||
return res.a1;
|
||||
else
|
||||
return ret;
|
||||
}
|
||||
|
||||
int gzvm_arch_destroy_vm(u16 vm_id)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
|
||||
return gzvm_hypcall_wrapper(MT_HVC_GZVM_DESTROY_VM, vm_id, 0, 0, 0, 0,
|
||||
0, 0, &res);
|
||||
}
|
||||
|
||||
int gzvm_arch_memregion_purpose(struct gzvm *gzvm,
|
||||
struct gzvm_userspace_memory_region *mem)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
|
||||
return gzvm_hypcall_wrapper(MT_HVC_GZVM_MEMREGION_PURPOSE, gzvm->vm_id,
|
||||
mem->guest_phys_addr, mem->memory_size,
|
||||
mem->flags, 0, 0, 0, &res);
|
||||
}
|
||||
|
||||
int gzvm_arch_set_dtb_config(struct gzvm *gzvm, struct gzvm_dtb_config *cfg)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
|
||||
return gzvm_hypcall_wrapper(MT_HVC_GZVM_SET_DTB_CONFIG, gzvm->vm_id,
|
||||
cfg->dtb_addr, cfg->dtb_size, 0, 0, 0, 0,
|
||||
&res);
|
||||
}
|
||||
|
||||
static int gzvm_vm_arch_enable_cap(struct gzvm *gzvm,
|
||||
struct gzvm_enable_cap *cap,
|
||||
struct arm_smccc_res *res)
|
||||
{
|
||||
return gzvm_hypcall_wrapper(MT_HVC_GZVM_ENABLE_CAP, gzvm->vm_id,
|
||||
cap->cap, cap->args[0], cap->args[1],
|
||||
cap->args[2], cap->args[3], cap->args[4],
|
||||
res);
|
||||
}
|
||||
|
||||
/**
|
||||
* gzvm_vm_ioctl_get_pvmfw_size() - Get pvmfw size from hypervisor, return
|
||||
* in x1, and return to userspace in args
|
||||
* @gzvm: Pointer to struct gzvm.
|
||||
* @cap: Pointer to struct gzvm_enable_cap.
|
||||
* @argp: Pointer to struct gzvm_enable_cap in user space.
|
||||
*
|
||||
* Return:
|
||||
* * 0 - Succeed
|
||||
* * -EINVAL - Hypervisor return invalid results
|
||||
* * -EFAULT - Fail to copy back to userspace buffer
|
||||
*/
|
||||
static int gzvm_vm_ioctl_get_pvmfw_size(struct gzvm *gzvm,
|
||||
struct gzvm_enable_cap *cap,
|
||||
void __user *argp)
|
||||
{
|
||||
struct arm_smccc_res res = {0};
|
||||
|
||||
if (gzvm_vm_arch_enable_cap(gzvm, cap, &res) != 0)
|
||||
return -EINVAL;
|
||||
|
||||
cap->args[1] = res.a1;
|
||||
if (copy_to_user(argp, cap, sizeof(*cap)))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* gzvm_vm_ioctl_cap_pvm() - Proceed GZVM_CAP_ARM_PROTECTED_VM's subcommands
|
||||
* @gzvm: Pointer to struct gzvm.
|
||||
* @cap: Pointer to struct gzvm_enable_cap.
|
||||
* @argp: Pointer to struct gzvm_enable_cap in user space.
|
||||
*
|
||||
* Return:
|
||||
* * 0 - Succeed
|
||||
* * -EINVAL - Invalid subcommand or arguments
|
||||
*/
|
||||
static int gzvm_vm_ioctl_cap_pvm(struct gzvm *gzvm,
|
||||
struct gzvm_enable_cap *cap,
|
||||
void __user *argp)
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
struct arm_smccc_res res = {0};
|
||||
|
||||
switch (cap->args[0]) {
|
||||
case GZVM_CAP_ARM_PVM_SET_PVMFW_IPA:
|
||||
fallthrough;
|
||||
case GZVM_CAP_ARM_PVM_SET_PROTECTED_VM:
|
||||
ret = gzvm_vm_arch_enable_cap(gzvm, cap, &res);
|
||||
break;
|
||||
case GZVM_CAP_ARM_PVM_GET_PVMFW_SIZE:
|
||||
ret = gzvm_vm_ioctl_get_pvmfw_size(gzvm, cap, argp);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int gzvm_vm_ioctl_arch_enable_cap(struct gzvm *gzvm,
|
||||
struct gzvm_enable_cap *cap,
|
||||
void __user *argp)
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
|
||||
switch (cap->cap) {
|
||||
case GZVM_CAP_ARM_PROTECTED_VM:
|
||||
ret = gzvm_vm_ioctl_cap_pvm(gzvm, cap, argp);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* gzvm_hva_to_pa_arch() - converts hva to pa with arch-specific way
|
||||
* @hva: Host virtual address.
|
||||
*
|
||||
* Return: 0 if translation error
|
||||
*/
|
||||
u64 gzvm_hva_to_pa_arch(u64 hva)
|
||||
{
|
||||
u64 par;
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
asm volatile("at s1e1r, %0" :: "r" (hva));
|
||||
isb();
|
||||
par = read_sysreg_par();
|
||||
local_irq_restore(flags);
|
||||
|
||||
if (par & SYS_PAR_EL1_F)
|
||||
return 0;
|
||||
|
||||
return par & PAR_PA47_MASK;
|
||||
}
|
54
arch/arm64/include/uapi/asm/gzvm_arch.h
Normal file
54
arch/arm64/include/uapi/asm/gzvm_arch.h
Normal file
@ -0,0 +1,54 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
/*
|
||||
* Copyright (c) 2023 MediaTek Inc.
|
||||
*/
|
||||
|
||||
#ifndef __GZVM_ARCH_H__
|
||||
#define __GZVM_ARCH_H__
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#define GZVM_CAP_ARM_VM_IPA_SIZE 165
|
||||
#define GZVM_CAP_ARM_PROTECTED_VM 0xffbadab1
|
||||
|
||||
/* sub-commands put in args[0] for GZVM_CAP_ARM_PROTECTED_VM */
|
||||
#define GZVM_CAP_ARM_PVM_SET_PVMFW_IPA 0
|
||||
#define GZVM_CAP_ARM_PVM_GET_PVMFW_SIZE 1
|
||||
/* GZVM_CAP_ARM_PVM_SET_PROTECTED_VM only sets protected but not load pvmfw */
|
||||
#define GZVM_CAP_ARM_PVM_SET_PROTECTED_VM 2
|
||||
|
||||
/*
|
||||
* Architecture specific registers are to be defined in arch headers and
|
||||
* ORed with the arch identifier.
|
||||
*/
|
||||
#define GZVM_REG_ARM 0x4000000000000000ULL
|
||||
#define GZVM_REG_ARM64 0x6000000000000000ULL
|
||||
|
||||
#define GZVM_REG_SIZE_SHIFT 52
|
||||
#define GZVM_REG_SIZE_MASK 0x00f0000000000000ULL
|
||||
#define GZVM_REG_SIZE_U8 0x0000000000000000ULL
|
||||
#define GZVM_REG_SIZE_U16 0x0010000000000000ULL
|
||||
#define GZVM_REG_SIZE_U32 0x0020000000000000ULL
|
||||
#define GZVM_REG_SIZE_U64 0x0030000000000000ULL
|
||||
#define GZVM_REG_SIZE_U128 0x0040000000000000ULL
|
||||
#define GZVM_REG_SIZE_U256 0x0050000000000000ULL
|
||||
#define GZVM_REG_SIZE_U512 0x0060000000000000ULL
|
||||
#define GZVM_REG_SIZE_U1024 0x0070000000000000ULL
|
||||
#define GZVM_REG_SIZE_U2048 0x0080000000000000ULL
|
||||
|
||||
#define GZVM_REG_ARCH_MASK 0xff00000000000000ULL
|
||||
|
||||
/* If you need to interpret the index values, here is the key: */
|
||||
#define GZVM_REG_ARM_COPROC_MASK 0x000000000FFF0000
|
||||
#define GZVM_REG_ARM_COPROC_SHIFT 16
|
||||
|
||||
/* Normal registers are mapped as coprocessor 16. */
|
||||
#define GZVM_REG_ARM_CORE (0x0010 << GZVM_REG_ARM_COPROC_SHIFT)
|
||||
#define GZVM_REG_ARM_CORE_REG(name) \
|
||||
(offsetof(struct gzvm_regs, name) / sizeof(__u32))
|
||||
|
||||
#define GZVM_VGIC_NR_SGIS 16
|
||||
#define GZVM_VGIC_NR_PPIS 16
|
||||
#define GZVM_VGIC_NR_PRIVATE_IRQS (GZVM_VGIC_NR_SGIS + GZVM_VGIC_NR_PPIS)
|
||||
|
||||
#endif /* __GZVM_ARCH_H__ */
|
@ -22,6 +22,8 @@
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/topology.h>
|
||||
|
||||
#include <trace/hooks/topology.h>
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
static bool __init acpi_cpu_is_threaded(int cpu)
|
||||
{
|
||||
@ -151,6 +153,11 @@ static void amu_scale_freq_tick(void)
|
||||
{
|
||||
u64 prev_core_cnt, prev_const_cnt;
|
||||
u64 core_cnt, const_cnt, scale;
|
||||
bool use_amu_fie = true;
|
||||
|
||||
trace_android_vh_use_amu_fie(&use_amu_fie);
|
||||
if(!use_amu_fie)
|
||||
return;
|
||||
|
||||
prev_const_cnt = this_cpu_read(arch_const_cycles_prev);
|
||||
prev_core_cnt = this_cpu_read(arch_core_cycles_prev);
|
||||
|
@ -1048,9 +1048,20 @@ static int __host_check_page_state_range(u64 addr, u64 size,
|
||||
static int __host_set_page_state_range(u64 addr, u64 size,
|
||||
enum pkvm_page_state state)
|
||||
{
|
||||
bool update_iommu = true;
|
||||
enum kvm_pgtable_prot prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, state);
|
||||
|
||||
return host_stage2_idmap_locked(addr, size, prot, true);
|
||||
/*
|
||||
* Sharing and unsharing host pages shouldn't change the IOMMU page tables,
|
||||
* so avoid extra page tables walks for the IOMMU.
|
||||
* HOWEVER THIS WILL NOT WORK WHEN DEVICE ASSIGNMENT IS SUPPORTED AS THE GUEST
|
||||
* MIGHT HAVE ACCESS TO DMA.
|
||||
* but as Android-14 doesn't support device assignment this should be fine.
|
||||
*/
|
||||
if ((state == PKVM_PAGE_OWNED) || (state == PKVM_PAGE_SHARED_OWNED))
|
||||
update_iommu = false;
|
||||
|
||||
return host_stage2_idmap_locked(addr, size, prot, update_iommu);
|
||||
}
|
||||
|
||||
static int host_request_owned_transition(u64 *completer_addr,
|
||||
|
@ -3,9 +3,14 @@ core-y += arch/x86/crypto/
|
||||
|
||||
#
|
||||
# Disable SSE and other FP/SIMD instructions to match normal x86
|
||||
# This is required to work around issues in older LLVM versions, but breaks
|
||||
# GCC versions < 11. See:
|
||||
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99652
|
||||
#
|
||||
ifeq ($(CONFIG_CC_IS_CLANG),y)
|
||||
KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx
|
||||
KBUILD_RUSTFLAGS += -Ctarget-feature=-sse,-sse2,-sse3,-ssse3,-sse4.1,-sse4.2,-avx,-avx2
|
||||
endif
|
||||
|
||||
ifeq ($(CONFIG_X86_32),y)
|
||||
START := 0x8048000
|
||||
|
@ -78,7 +78,7 @@ static struct blk_crypto_fallback_keyslot {
|
||||
struct crypto_skcipher *tfms[BLK_ENCRYPTION_MODE_MAX];
|
||||
} *blk_crypto_keyslots;
|
||||
|
||||
static struct blk_crypto_profile blk_crypto_fallback_profile;
|
||||
static struct blk_crypto_profile *blk_crypto_fallback_profile;
|
||||
static struct workqueue_struct *blk_crypto_wq;
|
||||
static mempool_t *blk_crypto_bounce_page_pool;
|
||||
static struct bio_set crypto_bio_split;
|
||||
@ -294,7 +294,7 @@ static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr)
|
||||
* Get a blk-crypto-fallback keyslot that contains a crypto_skcipher for
|
||||
* this bio's algorithm and key.
|
||||
*/
|
||||
blk_st = blk_crypto_get_keyslot(&blk_crypto_fallback_profile,
|
||||
blk_st = blk_crypto_get_keyslot(blk_crypto_fallback_profile,
|
||||
bc->bc_key, &slot);
|
||||
if (blk_st != BLK_STS_OK) {
|
||||
src_bio->bi_status = blk_st;
|
||||
@ -397,7 +397,7 @@ static void blk_crypto_fallback_decrypt_bio(struct work_struct *work)
|
||||
* Get a blk-crypto-fallback keyslot that contains a crypto_skcipher for
|
||||
* this bio's algorithm and key.
|
||||
*/
|
||||
blk_st = blk_crypto_get_keyslot(&blk_crypto_fallback_profile,
|
||||
blk_st = blk_crypto_get_keyslot(blk_crypto_fallback_profile,
|
||||
bc->bc_key, &slot);
|
||||
if (blk_st != BLK_STS_OK) {
|
||||
bio->bi_status = blk_st;
|
||||
@ -501,7 +501,7 @@ bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!__blk_crypto_cfg_supported(&blk_crypto_fallback_profile,
|
||||
if (!__blk_crypto_cfg_supported(blk_crypto_fallback_profile,
|
||||
&bc->bc_key->crypto_cfg)) {
|
||||
bio->bi_status = BLK_STS_NOTSUPP;
|
||||
return false;
|
||||
@ -528,7 +528,7 @@ bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)
|
||||
|
||||
int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
|
||||
{
|
||||
return __blk_crypto_evict_key(&blk_crypto_fallback_profile, key);
|
||||
return __blk_crypto_evict_key(blk_crypto_fallback_profile, key);
|
||||
}
|
||||
|
||||
static bool blk_crypto_fallback_inited;
|
||||
@ -536,7 +536,6 @@ static int blk_crypto_fallback_init(void)
|
||||
{
|
||||
int i;
|
||||
int err;
|
||||
struct blk_crypto_profile *profile = &blk_crypto_fallback_profile;
|
||||
|
||||
if (blk_crypto_fallback_inited)
|
||||
return 0;
|
||||
@ -547,19 +546,28 @@ static int blk_crypto_fallback_init(void)
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = blk_crypto_profile_init(profile, blk_crypto_num_keyslots);
|
||||
if (err)
|
||||
/* Dynamic allocation is needed because of lockdep_register_key(). */
|
||||
blk_crypto_fallback_profile =
|
||||
kzalloc(sizeof(*blk_crypto_fallback_profile), GFP_KERNEL);
|
||||
if (!blk_crypto_fallback_profile) {
|
||||
err = -ENOMEM;
|
||||
goto fail_free_bioset;
|
||||
}
|
||||
|
||||
err = blk_crypto_profile_init(blk_crypto_fallback_profile,
|
||||
blk_crypto_num_keyslots);
|
||||
if (err)
|
||||
goto fail_free_profile;
|
||||
err = -ENOMEM;
|
||||
|
||||
profile->ll_ops = blk_crypto_fallback_ll_ops;
|
||||
profile->max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE;
|
||||
profile->key_types_supported = BLK_CRYPTO_KEY_TYPE_STANDARD;
|
||||
blk_crypto_fallback_profile->ll_ops = blk_crypto_fallback_ll_ops;
|
||||
blk_crypto_fallback_profile->max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE;
|
||||
blk_crypto_fallback_profile->key_types_supported = BLK_CRYPTO_KEY_TYPE_STANDARD;
|
||||
|
||||
/* All blk-crypto modes have a crypto API fallback. */
|
||||
for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++)
|
||||
profile->modes_supported[i] = 0xFFFFFFFF;
|
||||
profile->modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0;
|
||||
blk_crypto_fallback_profile->modes_supported[i] = 0xFFFFFFFF;
|
||||
blk_crypto_fallback_profile->modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0;
|
||||
|
||||
blk_crypto_wq = alloc_workqueue("blk_crypto_wq",
|
||||
WQ_UNBOUND | WQ_HIGHPRI |
|
||||
@ -600,7 +608,9 @@ static int blk_crypto_fallback_init(void)
|
||||
fail_free_wq:
|
||||
destroy_workqueue(blk_crypto_wq);
|
||||
fail_destroy_profile:
|
||||
blk_crypto_profile_destroy(profile);
|
||||
blk_crypto_profile_destroy(blk_crypto_fallback_profile);
|
||||
fail_free_profile:
|
||||
kfree(blk_crypto_fallback_profile);
|
||||
fail_free_bioset:
|
||||
bioset_exit(&crypto_bio_split);
|
||||
out:
|
||||
|
@ -77,6 +77,10 @@ static void ioc_destroy_icq(struct io_cq *icq)
|
||||
struct elevator_type *et = q->elevator->type;
|
||||
|
||||
lockdep_assert_held(&ioc->lock);
|
||||
lockdep_assert_held(&q->queue_lock);
|
||||
|
||||
if (icq->flags & ICQ_DESTROYED)
|
||||
return;
|
||||
|
||||
radix_tree_delete(&ioc->icq_tree, icq->q->id);
|
||||
hlist_del_init(&icq->ioc_node);
|
||||
@ -128,12 +132,7 @@ static void ioc_release_fn(struct work_struct *work)
|
||||
spin_lock(&q->queue_lock);
|
||||
spin_lock(&ioc->lock);
|
||||
|
||||
/*
|
||||
* The icq may have been destroyed when the ioc lock
|
||||
* was released.
|
||||
*/
|
||||
if (!(icq->flags & ICQ_DESTROYED))
|
||||
ioc_destroy_icq(icq);
|
||||
ioc_destroy_icq(icq);
|
||||
|
||||
spin_unlock(&q->queue_lock);
|
||||
rcu_read_unlock();
|
||||
@ -171,23 +170,20 @@ static bool ioc_delay_free(struct io_context *ioc)
|
||||
*/
|
||||
void ioc_clear_queue(struct request_queue *q)
|
||||
{
|
||||
LIST_HEAD(icq_list);
|
||||
|
||||
spin_lock_irq(&q->queue_lock);
|
||||
list_splice_init(&q->icq_list, &icq_list);
|
||||
spin_unlock_irq(&q->queue_lock);
|
||||
|
||||
rcu_read_lock();
|
||||
while (!list_empty(&icq_list)) {
|
||||
while (!list_empty(&q->icq_list)) {
|
||||
struct io_cq *icq =
|
||||
list_entry(icq_list.next, struct io_cq, q_node);
|
||||
list_first_entry(&q->icq_list, struct io_cq, q_node);
|
||||
|
||||
spin_lock_irq(&icq->ioc->lock);
|
||||
if (!(icq->flags & ICQ_DESTROYED))
|
||||
ioc_destroy_icq(icq);
|
||||
spin_unlock_irq(&icq->ioc->lock);
|
||||
/*
|
||||
* Other context won't hold ioc lock to wait for queue_lock, see
|
||||
* details in ioc_release_fn().
|
||||
*/
|
||||
spin_lock(&icq->ioc->lock);
|
||||
ioc_destroy_icq(icq);
|
||||
spin_unlock(&icq->ioc->lock);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
spin_unlock_irq(&q->queue_lock);
|
||||
}
|
||||
#else /* CONFIG_BLK_ICQ */
|
||||
static inline void ioc_exit_icqs(struct io_context *ioc)
|
||||
|
@ -4,7 +4,6 @@ POST_DEFCONFIG_CMDS="update_config"
|
||||
function update_config() {
|
||||
${KERNEL_DIR}/scripts/config --file ${OUT_DIR}/.config \
|
||||
-e UNWINDER_FRAME_POINTER \
|
||||
-d WERROR \
|
||||
-d SAMPLES \
|
||||
-d BPFILTER \
|
||||
-e RANDSTRUCT_NONE \
|
||||
|
@ -1,5 +0,0 @@
|
||||
. ${ROOT_DIR}/${KERNEL_DIR}/build.config.gki.aarch64
|
||||
|
||||
DEFCONFIG=16k_gki_defconfig
|
||||
PRE_DEFCONFIG_CMDS="mkdir -p \${OUT_DIR}/arch/arm64/configs/ && cat ${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/gki_defconfig ${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/16k_gki.fragment > \${OUT_DIR}/arch/arm64/configs/${DEFCONFIG};"
|
||||
POST_DEFCONFIG_CMDS=""
|
@ -17,9 +17,9 @@
|
||||
* related macros to be expanded as they would be for built-in code; e.g.,
|
||||
* module_init() adds the function to the .initcalls section of the binary.
|
||||
*
|
||||
* The .c file that contains the real module_init() for fips140.ko is then
|
||||
* responsible for redefining MODULE, and the real module_init() is responsible
|
||||
* for executing all the initcalls that were collected into .initcalls.
|
||||
* The .c files that contain the real module_init, module license, and module
|
||||
* parameters for fips140.ko are then responsible for redefining MODULE. The
|
||||
* real module_init executes all initcalls that were collected into .initcalls.
|
||||
*/
|
||||
#undef MODULE
|
||||
|
||||
|
@ -20,6 +20,14 @@
|
||||
__inline_maybe_unused notrace
|
||||
#undef BUILD_FIPS140_KO
|
||||
|
||||
/*
|
||||
* Since this .c file contains real module parameters for fips140.ko, it needs
|
||||
* to be compiled normally, so undo the hacks that were done in fips140-defs.h.
|
||||
*/
|
||||
#define MODULE
|
||||
#undef KBUILD_MODFILE
|
||||
#undef __DISABLE_EXPORTS
|
||||
|
||||
#include <linux/cdev.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/module.h>
|
||||
|
@ -85,6 +85,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_set_priority);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_restore_priority);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_wakeup_ilocked);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_do_send_sig_info);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_killed_process);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_wait_start);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_wait_finish);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_init);
|
||||
@ -322,4 +323,9 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_look_around);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_look_around_migrate_folio);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_test_clear_look_around_ref);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tune_scan_type);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tune_swappiness);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_exit_signal_whether_wake);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_exit_check);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_freeze_whether_wake);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_use_amu_fie);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_scan_abort_check_wmarks);
|
||||
|
@ -34,6 +34,12 @@ static DEFINE_PER_CPU(u32, freq_factor) = 1;
|
||||
|
||||
static bool supports_scale_freq_counters(const struct cpumask *cpus)
|
||||
{
|
||||
bool use_amu_fie = true;
|
||||
|
||||
trace_android_vh_use_amu_fie(&use_amu_fie);
|
||||
if (!use_amu_fie)
|
||||
return false;
|
||||
|
||||
return cpumask_subset(cpus, &scale_freq_counters_mask);
|
||||
}
|
||||
|
||||
|
@ -202,6 +202,19 @@ struct teo_cpu {
|
||||
|
||||
static DEFINE_PER_CPU(struct teo_cpu, teo_cpus);
|
||||
|
||||
unsigned long teo_cpu_get_util_threshold(int cpu)
|
||||
{
|
||||
struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, cpu);
|
||||
return cpu_data->util_threshold;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(teo_cpu_get_util_threshold);
|
||||
void teo_cpu_set_util_threshold(int cpu, unsigned long util)
|
||||
{
|
||||
struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, cpu);
|
||||
cpu_data->util_threshold = util;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(teo_cpu_set_util_threshold);
|
||||
|
||||
/**
|
||||
* teo_cpu_is_utilized - Check if the CPU's util is above the threshold
|
||||
* @cpu: Target CPU
|
||||
|
@ -179,7 +179,8 @@ static void smsusb_stop_streaming(struct smsusb_device_t *dev)
|
||||
|
||||
for (i = 0; i < MAX_URBS; i++) {
|
||||
usb_kill_urb(&dev->surbs[i].urb);
|
||||
cancel_work_sync(&dev->surbs[i].wq);
|
||||
if (dev->surbs[i].wq.func)
|
||||
cancel_work_sync(&dev->surbs[i].wq);
|
||||
|
||||
if (dev->surbs[i].cb) {
|
||||
smscore_putbuffer(dev->coredev, dev->surbs[i].cb);
|
||||
|
@ -533,7 +533,7 @@ static int tap_open(struct inode *inode, struct file *file)
|
||||
q->sock.state = SS_CONNECTED;
|
||||
q->sock.file = file;
|
||||
q->sock.ops = &tap_socket_ops;
|
||||
sock_init_data_uid(&q->sock, &q->sk, inode->i_uid);
|
||||
sock_init_data_uid(&q->sock, &q->sk, current_fsuid());
|
||||
q->sk.sk_write_space = tap_sock_write_space;
|
||||
q->sk.sk_destruct = tap_sock_destruct;
|
||||
q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
|
||||
|
@ -3449,7 +3449,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
|
||||
tfile->socket.file = file;
|
||||
tfile->socket.ops = &tun_socket_ops;
|
||||
|
||||
sock_init_data_uid(&tfile->socket, &tfile->sk, inode->i_uid);
|
||||
sock_init_data_uid(&tfile->socket, &tfile->sk, current_fsuid());
|
||||
|
||||
tfile->sk.sk_write_space = tun_sock_write_space;
|
||||
tfile->sk.sk_sndbuf = INT_MAX;
|
||||
|
@ -294,7 +294,6 @@ static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
|
||||
static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
|
||||
static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
|
||||
static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
|
||||
static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
|
||||
static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
|
||||
static irqreturn_t ufshcd_intr(int irq, void *__hba);
|
||||
static int ufshcd_change_power_mode(struct ufs_hba *hba,
|
||||
@ -1417,9 +1416,10 @@ static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
|
||||
return;
|
||||
}
|
||||
hba->clk_scaling.is_suspended = true;
|
||||
hba->clk_scaling.window_start_t = 0;
|
||||
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
|
||||
|
||||
__ufshcd_suspend_clkscaling(hba);
|
||||
devfreq_suspend_device(hba->devfreq);
|
||||
}
|
||||
|
||||
static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
|
||||
@ -1464,6 +1464,13 @@ static int ufshcd_devfreq_target(struct device *dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Skip scaling clock when clock scaling is suspended */
|
||||
if (hba->clk_scaling.is_suspended) {
|
||||
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
|
||||
dev_warn(hba->dev, "clock scaling is suspended, skip");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!hba->clk_scaling.active_reqs)
|
||||
sched_clk_scaling_suspend_work = true;
|
||||
|
||||
@ -1495,7 +1502,7 @@ static int ufshcd_devfreq_target(struct device *dev,
|
||||
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
|
||||
|
||||
out:
|
||||
if (sched_clk_scaling_suspend_work)
|
||||
if (sched_clk_scaling_suspend_work && !scale_up)
|
||||
queue_work(hba->clk_scaling.workq,
|
||||
&hba->clk_scaling.suspend_work);
|
||||
|
||||
@ -1601,16 +1608,6 @@ static void ufshcd_devfreq_remove(struct ufs_hba *hba)
|
||||
dev_pm_opp_remove(hba->dev, clki->max_freq);
|
||||
}
|
||||
|
||||
static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
devfreq_suspend_device(hba->devfreq);
|
||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||
hba->clk_scaling.window_start_t = 0;
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
}
|
||||
|
||||
static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
|
||||
{
|
||||
unsigned long flags;
|
||||
@ -1623,11 +1620,12 @@ static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
|
||||
if (!hba->clk_scaling.is_suspended) {
|
||||
suspend = true;
|
||||
hba->clk_scaling.is_suspended = true;
|
||||
hba->clk_scaling.window_start_t = 0;
|
||||
}
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
|
||||
if (suspend)
|
||||
__ufshcd_suspend_clkscaling(hba);
|
||||
devfreq_suspend_device(hba->devfreq);
|
||||
}
|
||||
|
||||
static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
|
||||
|
@ -2748,6 +2748,13 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
|
||||
port->sink_cap_done = true;
|
||||
tcpm_set_state(port, ready_state(port), 0);
|
||||
break;
|
||||
/*
|
||||
* Some port partners do not support GET_STATUS, avoid soft reset the link to
|
||||
* prevent redundant power re-negotiation
|
||||
*/
|
||||
case GET_STATUS_SEND:
|
||||
tcpm_set_state(port, ready_state(port), 0);
|
||||
break;
|
||||
case SRC_READY:
|
||||
case SNK_READY:
|
||||
if (port->vdm_state > VDM_STATE_READY) {
|
||||
@ -3248,23 +3255,12 @@ static int tcpm_pd_select_pdo(struct tcpm_port *port, int *sink_pdo,
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define min_pps_apdo_current(x, y) \
|
||||
min(pdo_pps_apdo_max_current(x), pdo_pps_apdo_max_current(y))
|
||||
|
||||
static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
|
||||
{
|
||||
unsigned int i, j, max_mw = 0, max_mv = 0;
|
||||
unsigned int min_src_mv, max_src_mv, src_ma, src_mw;
|
||||
unsigned int min_snk_mv, max_snk_mv;
|
||||
unsigned int max_op_mv;
|
||||
u32 pdo, src, snk;
|
||||
unsigned int src_pdo = 0, snk_pdo = 0;
|
||||
unsigned int i, src_ma, max_temp_mw = 0, max_op_ma, op_mw;
|
||||
unsigned int src_pdo = 0;
|
||||
u32 pdo, src;
|
||||
|
||||
/*
|
||||
* Select the source PPS APDO providing the most power while staying
|
||||
* within the board's limits. We skip the first PDO as this is always
|
||||
* 5V 3A.
|
||||
*/
|
||||
for (i = 1; i < port->nr_source_caps; ++i) {
|
||||
pdo = port->source_caps[i];
|
||||
|
||||
@ -3275,54 +3271,17 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
|
||||
continue;
|
||||
}
|
||||
|
||||
min_src_mv = pdo_pps_apdo_min_voltage(pdo);
|
||||
max_src_mv = pdo_pps_apdo_max_voltage(pdo);
|
||||
if (port->pps_data.req_out_volt > pdo_pps_apdo_max_voltage(pdo) ||
|
||||
port->pps_data.req_out_volt < pdo_pps_apdo_min_voltage(pdo))
|
||||
continue;
|
||||
|
||||
src_ma = pdo_pps_apdo_max_current(pdo);
|
||||
src_mw = (src_ma * max_src_mv) / 1000;
|
||||
|
||||
/*
|
||||
* Now search through the sink PDOs to find a matching
|
||||
* PPS APDO. Again skip the first sink PDO as this will
|
||||
* always be 5V 3A.
|
||||
*/
|
||||
for (j = 1; j < port->nr_snk_pdo; j++) {
|
||||
pdo = port->snk_pdo[j];
|
||||
|
||||
switch (pdo_type(pdo)) {
|
||||
case PDO_TYPE_APDO:
|
||||
if (pdo_apdo_type(pdo) != APDO_TYPE_PPS) {
|
||||
tcpm_log(port,
|
||||
"Not PPS APDO (sink), ignoring");
|
||||
continue;
|
||||
}
|
||||
|
||||
min_snk_mv =
|
||||
pdo_pps_apdo_min_voltage(pdo);
|
||||
max_snk_mv =
|
||||
pdo_pps_apdo_max_voltage(pdo);
|
||||
break;
|
||||
default:
|
||||
tcpm_log(port,
|
||||
"Not APDO type (sink), ignoring");
|
||||
continue;
|
||||
}
|
||||
|
||||
if (min_src_mv <= max_snk_mv &&
|
||||
max_src_mv >= min_snk_mv) {
|
||||
max_op_mv = min(max_src_mv, max_snk_mv);
|
||||
src_mw = (max_op_mv * src_ma) / 1000;
|
||||
/* Prefer higher voltages if available */
|
||||
if ((src_mw == max_mw &&
|
||||
max_op_mv > max_mv) ||
|
||||
src_mw > max_mw) {
|
||||
src_pdo = i;
|
||||
snk_pdo = j;
|
||||
max_mw = src_mw;
|
||||
max_mv = max_op_mv;
|
||||
}
|
||||
}
|
||||
max_op_ma = min(src_ma, port->pps_data.req_op_curr);
|
||||
op_mw = max_op_ma * port->pps_data.req_out_volt / 1000;
|
||||
if (op_mw > max_temp_mw) {
|
||||
src_pdo = i;
|
||||
max_temp_mw = op_mw;
|
||||
}
|
||||
|
||||
break;
|
||||
default:
|
||||
tcpm_log(port, "Not APDO type (source), ignoring");
|
||||
@ -3332,16 +3291,10 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
|
||||
|
||||
if (src_pdo) {
|
||||
src = port->source_caps[src_pdo];
|
||||
snk = port->snk_pdo[snk_pdo];
|
||||
|
||||
port->pps_data.req_min_volt = max(pdo_pps_apdo_min_voltage(src),
|
||||
pdo_pps_apdo_min_voltage(snk));
|
||||
port->pps_data.req_max_volt = min(pdo_pps_apdo_max_voltage(src),
|
||||
pdo_pps_apdo_max_voltage(snk));
|
||||
port->pps_data.req_max_curr = min_pps_apdo_current(src, snk);
|
||||
port->pps_data.req_out_volt = min(port->pps_data.req_max_volt,
|
||||
max(port->pps_data.req_min_volt,
|
||||
port->pps_data.req_out_volt));
|
||||
port->pps_data.req_min_volt = pdo_pps_apdo_min_voltage(src);
|
||||
port->pps_data.req_max_volt = pdo_pps_apdo_max_voltage(src);
|
||||
port->pps_data.req_max_curr = pdo_pps_apdo_max_current(src);
|
||||
port->pps_data.req_op_curr = min(port->pps_data.req_max_curr,
|
||||
port->pps_data.req_op_curr);
|
||||
}
|
||||
@ -3459,32 +3412,16 @@ static int tcpm_pd_send_request(struct tcpm_port *port)
|
||||
static int tcpm_pd_build_pps_request(struct tcpm_port *port, u32 *rdo)
|
||||
{
|
||||
unsigned int out_mv, op_ma, op_mw, max_mv, max_ma, flags;
|
||||
enum pd_pdo_type type;
|
||||
unsigned int src_pdo_index;
|
||||
u32 pdo;
|
||||
|
||||
src_pdo_index = tcpm_pd_select_pps_apdo(port);
|
||||
if (!src_pdo_index)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
pdo = port->source_caps[src_pdo_index];
|
||||
type = pdo_type(pdo);
|
||||
|
||||
switch (type) {
|
||||
case PDO_TYPE_APDO:
|
||||
if (pdo_apdo_type(pdo) != APDO_TYPE_PPS) {
|
||||
tcpm_log(port, "Invalid APDO selected!");
|
||||
return -EINVAL;
|
||||
}
|
||||
max_mv = port->pps_data.req_max_volt;
|
||||
max_ma = port->pps_data.req_max_curr;
|
||||
out_mv = port->pps_data.req_out_volt;
|
||||
op_ma = port->pps_data.req_op_curr;
|
||||
break;
|
||||
default:
|
||||
tcpm_log(port, "Invalid PDO selected!");
|
||||
return -EINVAL;
|
||||
}
|
||||
max_mv = port->pps_data.req_max_volt;
|
||||
max_ma = port->pps_data.req_max_curr;
|
||||
out_mv = port->pps_data.req_out_volt;
|
||||
op_ma = port->pps_data.req_op_curr;
|
||||
|
||||
flags = RDO_USB_COMM | RDO_NO_SUSPEND;
|
||||
|
||||
@ -3923,6 +3860,29 @@ static enum typec_cc_status tcpm_pwr_opmode_to_rp(enum typec_pwr_opmode opmode)
|
||||
}
|
||||
}
|
||||
|
||||
static void tcpm_set_initial_svdm_version(struct tcpm_port *port)
|
||||
{
|
||||
switch (port->negotiated_rev) {
|
||||
case PD_REV30:
|
||||
break;
|
||||
/*
|
||||
* 6.4.4.2.3 Structured VDM Version
|
||||
* 2.0 states "At this time, there is only one version (1.0) defined.
|
||||
* This field Shall be set to zero to indicate Version 1.0."
|
||||
* 3.0 states "This field Shall be set to 01b to indicate Version 2.0."
|
||||
* To ensure that we follow the Power Delivery revision we are currently
|
||||
* operating on, downgrade the SVDM version to the highest one supported
|
||||
* by the Power Delivery revision.
|
||||
*/
|
||||
case PD_REV20:
|
||||
typec_partner_set_svdm_version(port->partner, SVDM_VER_1_0);
|
||||
break;
|
||||
default:
|
||||
typec_partner_set_svdm_version(port->partner, SVDM_VER_1_0);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void run_state_machine(struct tcpm_port *port)
|
||||
{
|
||||
int ret;
|
||||
@ -4167,10 +4127,12 @@ static void run_state_machine(struct tcpm_port *port)
|
||||
* For now, this driver only supports SOP for DISCOVER_IDENTITY, thus using
|
||||
* port->explicit_contract to decide whether to send the command.
|
||||
*/
|
||||
if (port->explicit_contract)
|
||||
if (port->explicit_contract) {
|
||||
tcpm_set_initial_svdm_version(port);
|
||||
mod_send_discover_delayed_work(port, 0);
|
||||
else
|
||||
} else {
|
||||
port->send_discover = false;
|
||||
}
|
||||
|
||||
/*
|
||||
* 6.3.5
|
||||
@ -4312,7 +4274,9 @@ static void run_state_machine(struct tcpm_port *port)
|
||||
if (port->slow_charger_loop && (current_lim > PD_P_SNK_STDBY_MW / 5))
|
||||
current_lim = PD_P_SNK_STDBY_MW / 5;
|
||||
tcpm_set_current_limit(port, current_lim, 5000);
|
||||
tcpm_set_charge(port, true);
|
||||
/* Not sink vbus if operational current is 0mA */
|
||||
tcpm_set_charge(port, !!pdo_max_current(port->snk_pdo[0]));
|
||||
|
||||
if (!port->pd_supported)
|
||||
tcpm_set_state(port, SNK_READY, 0);
|
||||
else
|
||||
@ -4471,10 +4435,12 @@ static void run_state_machine(struct tcpm_port *port)
|
||||
* For now, this driver only supports SOP for DISCOVER_IDENTITY, thus using
|
||||
* port->explicit_contract.
|
||||
*/
|
||||
if (port->explicit_contract)
|
||||
if (port->explicit_contract) {
|
||||
tcpm_set_initial_svdm_version(port);
|
||||
mod_send_discover_delayed_work(port, 0);
|
||||
else
|
||||
} else {
|
||||
port->send_discover = false;
|
||||
}
|
||||
|
||||
power_supply_changed(port->psy);
|
||||
break;
|
||||
@ -4601,7 +4567,8 @@ static void run_state_machine(struct tcpm_port *port)
|
||||
tcpm_set_current_limit(port,
|
||||
tcpm_get_current_limit(port),
|
||||
5000);
|
||||
tcpm_set_charge(port, true);
|
||||
/* Not sink vbus if operational current is 0mA */
|
||||
tcpm_set_charge(port, !!pdo_max_current(port->snk_pdo[0]));
|
||||
}
|
||||
if (port->ams == HARD_RESET)
|
||||
tcpm_ams_finish(port);
|
||||
@ -5378,6 +5345,10 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port)
|
||||
/* Do nothing, vbus drop expected */
|
||||
break;
|
||||
|
||||
case SNK_HARD_RESET_WAIT_VBUS:
|
||||
/* Do nothing, its OK to receive vbus off events */
|
||||
break;
|
||||
|
||||
default:
|
||||
if (port->pwr_role == TYPEC_SINK && port->attached)
|
||||
tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
|
||||
@ -5429,6 +5400,9 @@ static void _tcpm_pd_vbus_vsafe0v(struct tcpm_port *port)
|
||||
case SNK_DEBOUNCED:
|
||||
/*Do nothing, still waiting for VSAFE5V for connect */
|
||||
break;
|
||||
case SNK_HARD_RESET_WAIT_VBUS:
|
||||
/* Do nothing, its OK to receive vbus off events */
|
||||
break;
|
||||
default:
|
||||
if (port->pwr_role == TYPEC_SINK && port->auto_vbus_discharge_enabled)
|
||||
tcpm_set_state(port, SNK_UNATTACHED, 0);
|
||||
@ -5945,12 +5919,6 @@ static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 req_out_volt)
|
||||
goto port_unlock;
|
||||
}
|
||||
|
||||
if (req_out_volt < port->pps_data.min_volt ||
|
||||
req_out_volt > port->pps_data.max_volt) {
|
||||
ret = -EINVAL;
|
||||
goto port_unlock;
|
||||
}
|
||||
|
||||
target_mw = (port->current_limit * req_out_volt) / 1000;
|
||||
if (target_mw < port->operating_snk_mw) {
|
||||
ret = -EINVAL;
|
||||
@ -6479,11 +6447,7 @@ static int tcpm_psy_set_prop(struct power_supply *psy,
|
||||
ret = tcpm_psy_set_online(port, val);
|
||||
break;
|
||||
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
|
||||
if (val->intval < port->pps_data.min_volt * 1000 ||
|
||||
val->intval > port->pps_data.max_volt * 1000)
|
||||
ret = -EINVAL;
|
||||
else
|
||||
ret = tcpm_pps_set_out_volt(port, val->intval / 1000);
|
||||
ret = tcpm_pps_set_out_volt(port, val->intval / 1000);
|
||||
break;
|
||||
case POWER_SUPPLY_PROP_CURRENT_NOW:
|
||||
if (val->intval > port->pps_data.max_curr * 1000)
|
||||
|
@ -48,9 +48,11 @@ source "drivers/virt/nitro_enclaves/Kconfig"
|
||||
|
||||
source "drivers/virt/acrn/Kconfig"
|
||||
|
||||
source "drivers/virt/gunyah/Kconfig"
|
||||
|
||||
source "drivers/virt/coco/efi_secret/Kconfig"
|
||||
|
||||
source "drivers/virt/coco/sev-guest/Kconfig"
|
||||
|
||||
source "drivers/virt/gunyah/Kconfig"
|
||||
|
||||
source "drivers/virt/geniezone/Kconfig"
|
||||
endif
|
||||
|
16
drivers/virt/geniezone/Kconfig
Normal file
16
drivers/virt/geniezone/Kconfig
Normal file
@ -0,0 +1,16 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
|
||||
config MTK_GZVM
|
||||
tristate "GenieZone Hypervisor driver for guest VM operation"
|
||||
depends on ARM64
|
||||
help
|
||||
This driver, gzvm, enables to run guest VMs on MTK GenieZone
|
||||
hypervisor. It exports kvm-like interfaces for VMM (e.g., crosvm) in
|
||||
order to operate guest VMs on GenieZone hypervisor.
|
||||
|
||||
GenieZone hypervisor now only supports MediaTek SoC and arm64
|
||||
architecture.
|
||||
|
||||
Select M if you want it be built as a module (gzvm.ko).
|
||||
|
||||
If unsure, say N.
|
11
drivers/virt/geniezone/Makefile
Normal file
11
drivers/virt/geniezone/Makefile
Normal file
@ -0,0 +1,11 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
# Makefile for GenieZone driver, this file should be include in arch's
|
||||
# to avoid two ko being generated.
|
||||
#
|
||||
|
||||
GZVM_DIR ?= ../../../drivers/virt/geniezone
|
||||
|
||||
gzvm-y := $(GZVM_DIR)/gzvm_main.o $(GZVM_DIR)/gzvm_vm.o \
|
||||
$(GZVM_DIR)/gzvm_vcpu.o $(GZVM_DIR)/gzvm_irqfd.o \
|
||||
$(GZVM_DIR)/gzvm_ioeventfd.o
|
12
drivers/virt/geniezone/gzvm_common.h
Normal file
12
drivers/virt/geniezone/gzvm_common.h
Normal file
@ -0,0 +1,12 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (c) 2023 MediaTek Inc.
|
||||
*/
|
||||
|
||||
#ifndef __GZ_COMMON_H__
|
||||
#define __GZ_COMMON_H__
|
||||
|
||||
int gzvm_irqchip_inject_irq(struct gzvm *gzvm, unsigned int vcpu_idx,
|
||||
u32 irq_type, u32 irq, bool level);
|
||||
|
||||
#endif /* __GZVM_COMMON_H__ */
|
273
drivers/virt/geniezone/gzvm_ioeventfd.c
Normal file
273
drivers/virt/geniezone/gzvm_ioeventfd.c
Normal file
@ -0,0 +1,273 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2023 MediaTek Inc.
|
||||
*/
|
||||
|
||||
#include <linux/eventfd.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/gzvm.h>
|
||||
#include <linux/gzvm_drv.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/poll.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
struct gzvm_ioevent {
|
||||
struct list_head list;
|
||||
__u64 addr;
|
||||
__u32 len;
|
||||
struct eventfd_ctx *evt_ctx;
|
||||
__u64 datamatch;
|
||||
bool wildcard;
|
||||
};
|
||||
|
||||
/**
|
||||
* ioeventfd_check_collision() - Check collison assumes gzvm->slots_lock held.
|
||||
* @gzvm: Pointer to gzvm.
|
||||
* @p: Pointer to gzvm_ioevent.
|
||||
*
|
||||
* Return:
|
||||
* * true - collison found
|
||||
* * false - no collison
|
||||
*/
|
||||
static bool ioeventfd_check_collision(struct gzvm *gzvm, struct gzvm_ioevent *p)
|
||||
{
|
||||
struct gzvm_ioevent *_p;
|
||||
|
||||
list_for_each_entry(_p, &gzvm->ioevents, list)
|
||||
if (_p->addr == p->addr &&
|
||||
(!_p->len || !p->len ||
|
||||
(_p->len == p->len &&
|
||||
(_p->wildcard || p->wildcard ||
|
||||
_p->datamatch == p->datamatch))))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void gzvm_ioevent_release(struct gzvm_ioevent *p)
|
||||
{
|
||||
eventfd_ctx_put(p->evt_ctx);
|
||||
list_del(&p->list);
|
||||
kfree(p);
|
||||
}
|
||||
|
||||
static bool gzvm_ioevent_in_range(struct gzvm_ioevent *p, __u64 addr, int len,
|
||||
const void *val)
|
||||
{
|
||||
u64 _val;
|
||||
|
||||
if (addr != p->addr)
|
||||
/* address must be precise for a hit */
|
||||
return false;
|
||||
|
||||
if (!p->len)
|
||||
/* length = 0 means only look at the address, so always a hit */
|
||||
return true;
|
||||
|
||||
if (len != p->len)
|
||||
/* address-range must be precise for a hit */
|
||||
return false;
|
||||
|
||||
if (p->wildcard)
|
||||
/* all else equal, wildcard is always a hit */
|
||||
return true;
|
||||
|
||||
/* otherwise, we have to actually compare the data */
|
||||
|
||||
WARN_ON_ONCE(!IS_ALIGNED((unsigned long)val, len));
|
||||
|
||||
switch (len) {
|
||||
case 1:
|
||||
_val = *(u8 *)val;
|
||||
break;
|
||||
case 2:
|
||||
_val = *(u16 *)val;
|
||||
break;
|
||||
case 4:
|
||||
_val = *(u32 *)val;
|
||||
break;
|
||||
case 8:
|
||||
_val = *(u64 *)val;
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
||||
return _val == p->datamatch;
|
||||
}
|
||||
|
||||
static int gzvm_deassign_ioeventfd(struct gzvm *gzvm,
|
||||
struct gzvm_ioeventfd *args)
|
||||
{
|
||||
struct gzvm_ioevent *p, *tmp;
|
||||
struct eventfd_ctx *evt_ctx;
|
||||
int ret = -ENOENT;
|
||||
bool wildcard;
|
||||
|
||||
evt_ctx = eventfd_ctx_fdget(args->fd);
|
||||
if (IS_ERR(evt_ctx))
|
||||
return PTR_ERR(evt_ctx);
|
||||
|
||||
wildcard = !(args->flags & GZVM_IOEVENTFD_FLAG_DATAMATCH);
|
||||
|
||||
mutex_lock(&gzvm->lock);
|
||||
|
||||
list_for_each_entry_safe(p, tmp, &gzvm->ioevents, list) {
|
||||
if (p->evt_ctx != evt_ctx ||
|
||||
p->addr != args->addr ||
|
||||
p->len != args->len ||
|
||||
p->wildcard != wildcard)
|
||||
continue;
|
||||
|
||||
if (!p->wildcard && p->datamatch != args->datamatch)
|
||||
continue;
|
||||
|
||||
gzvm_ioevent_release(p);
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
mutex_unlock(&gzvm->lock);
|
||||
|
||||
/* got in the front of this function */
|
||||
eventfd_ctx_put(evt_ctx);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int gzvm_assign_ioeventfd(struct gzvm *gzvm, struct gzvm_ioeventfd *args)
|
||||
{
|
||||
struct eventfd_ctx *evt_ctx;
|
||||
struct gzvm_ioevent *evt;
|
||||
int ret;
|
||||
|
||||
evt_ctx = eventfd_ctx_fdget(args->fd);
|
||||
if (IS_ERR(evt_ctx))
|
||||
return PTR_ERR(evt_ctx);
|
||||
|
||||
evt = kmalloc(sizeof(*evt), GFP_KERNEL);
|
||||
if (!evt)
|
||||
return -ENOMEM;
|
||||
*evt = (struct gzvm_ioevent) {
|
||||
.addr = args->addr,
|
||||
.len = args->len,
|
||||
.evt_ctx = evt_ctx,
|
||||
};
|
||||
if (args->flags & GZVM_IOEVENTFD_FLAG_DATAMATCH) {
|
||||
evt->datamatch = args->datamatch;
|
||||
evt->wildcard = false;
|
||||
} else {
|
||||
evt->wildcard = true;
|
||||
}
|
||||
|
||||
if (ioeventfd_check_collision(gzvm, evt)) {
|
||||
ret = -EEXIST;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
mutex_lock(&gzvm->lock);
|
||||
list_add_tail(&evt->list, &gzvm->ioevents);
|
||||
mutex_unlock(&gzvm->lock);
|
||||
|
||||
return 0;
|
||||
|
||||
err_free:
|
||||
kfree(evt);
|
||||
eventfd_ctx_put(evt_ctx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* gzvm_ioeventfd_check_valid() - Check user arguments is valid.
|
||||
* @args: Pointer to gzvm_ioeventfd.
|
||||
*
|
||||
* Return:
|
||||
* * true if user arguments are valid.
|
||||
* * false if user arguments are invalid.
|
||||
*/
|
||||
static bool gzvm_ioeventfd_check_valid(struct gzvm_ioeventfd *args)
|
||||
{
|
||||
/* must be natural-word sized, or 0 to ignore length */
|
||||
switch (args->len) {
|
||||
case 0:
|
||||
case 1:
|
||||
case 2:
|
||||
case 4:
|
||||
case 8:
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
||||
/* check for range overflow */
|
||||
if (args->addr + args->len < args->addr)
|
||||
return false;
|
||||
|
||||
/* check for extra flags that we don't understand */
|
||||
if (args->flags & ~GZVM_IOEVENTFD_VALID_FLAG_MASK)
|
||||
return false;
|
||||
|
||||
/* ioeventfd with no length can't be combined with DATAMATCH */
|
||||
if (!args->len && (args->flags & GZVM_IOEVENTFD_FLAG_DATAMATCH))
|
||||
return false;
|
||||
|
||||
/* gzvm does not support pio bus ioeventfd */
|
||||
if (args->flags & GZVM_IOEVENTFD_FLAG_PIO)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* gzvm_ioeventfd() - Register ioevent to ioevent list.
|
||||
* @gzvm: Pointer to gzvm.
|
||||
* @args: Pointer to gzvm_ioeventfd.
|
||||
*
|
||||
* Return:
|
||||
* * 0 - Success.
|
||||
* * Negative - Failure.
|
||||
*/
|
||||
int gzvm_ioeventfd(struct gzvm *gzvm, struct gzvm_ioeventfd *args)
|
||||
{
|
||||
if (gzvm_ioeventfd_check_valid(args) == false)
|
||||
return -EINVAL;
|
||||
|
||||
if (args->flags & GZVM_IOEVENTFD_FLAG_DEASSIGN)
|
||||
return gzvm_deassign_ioeventfd(gzvm, args);
|
||||
return gzvm_assign_ioeventfd(gzvm, args);
|
||||
}
|
||||
|
||||
/**
|
||||
* gzvm_ioevent_write() - Travers this vm's registered ioeventfd to see if
|
||||
* need notifying it.
|
||||
* @vcpu: Pointer to vcpu.
|
||||
* @addr: mmio address.
|
||||
* @len: mmio size.
|
||||
* @val: Pointer to void.
|
||||
*
|
||||
* Return:
|
||||
* * true if this io is already sent to ioeventfd's listener.
|
||||
* * false if we cannot find any ioeventfd registering this mmio write.
|
||||
*/
|
||||
bool gzvm_ioevent_write(struct gzvm_vcpu *vcpu, __u64 addr, int len,
|
||||
const void *val)
|
||||
{
|
||||
struct gzvm_ioevent *e;
|
||||
|
||||
list_for_each_entry(e, &vcpu->gzvm->ioevents, list) {
|
||||
if (gzvm_ioevent_in_range(e, addr, len, val)) {
|
||||
eventfd_signal(e->evt_ctx, 1);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
int gzvm_init_ioeventfd(struct gzvm *gzvm)
|
||||
{
|
||||
INIT_LIST_HEAD(&gzvm->ioevents);
|
||||
|
||||
return 0;
|
||||
}
|
566
drivers/virt/geniezone/gzvm_irqfd.c
Normal file
566
drivers/virt/geniezone/gzvm_irqfd.c
Normal file
@ -0,0 +1,566 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2023 MediaTek Inc.
|
||||
*/
|
||||
|
||||
#include <linux/eventfd.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/gzvm_drv.h>
|
||||
#include "gzvm_common.h"
|
||||
|
||||
struct gzvm_irq_ack_notifier {
|
||||
struct hlist_node link;
|
||||
unsigned int gsi;
|
||||
void (*irq_acked)(struct gzvm_irq_ack_notifier *ian);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct gzvm_kernel_irqfd_resampler - irqfd resampler descriptor.
|
||||
* @gzvm: Poiner to gzvm.
|
||||
* @list: List of resampling struct _irqfd objects sharing this gsi.
|
||||
* RCU list modified under gzvm->irqfds.resampler_lock.
|
||||
* @notifier: gzvm irq ack notifier.
|
||||
* @link: Entry in list of gzvm->irqfd.resampler_list.
|
||||
* Use for sharing esamplers among irqfds on the same gsi.
|
||||
* Accessed and modified under gzvm->irqfds.resampler_lock.
|
||||
*
|
||||
* Resampling irqfds are a special variety of irqfds used to emulate
|
||||
* level triggered interrupts. The interrupt is asserted on eventfd
|
||||
* trigger. On acknowledgment through the irq ack notifier, the
|
||||
* interrupt is de-asserted and userspace is notified through the
|
||||
* resamplefd. All resamplers on the same gsi are de-asserted
|
||||
* together, so we don't need to track the state of each individual
|
||||
* user. We can also therefore share the same irq source ID.
|
||||
*/
|
||||
struct gzvm_kernel_irqfd_resampler {
|
||||
struct gzvm *gzvm;
|
||||
|
||||
struct list_head list;
|
||||
struct gzvm_irq_ack_notifier notifier;
|
||||
|
||||
struct list_head link;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct gzvm_kernel_irqfd: gzvm kernel irqfd descriptor.
|
||||
* @gzvm: Pointer to struct gzvm.
|
||||
* @wait: Wait queue entry.
|
||||
* @gsi: Used for level IRQ fast-path.
|
||||
* @resampler: The resampler used by this irqfd (resampler-only).
|
||||
* @resamplefd: Eventfd notified on resample (resampler-only).
|
||||
* @resampler_link: Entry in list of irqfds for a resampler (resampler-only).
|
||||
* @eventfd: Used for setup/shutdown.
|
||||
* @list: struct list_head.
|
||||
* @pt: struct poll_table_struct.
|
||||
* @shutdown: struct work_struct.
|
||||
*/
|
||||
struct gzvm_kernel_irqfd {
|
||||
struct gzvm *gzvm;
|
||||
wait_queue_entry_t wait;
|
||||
|
||||
int gsi;
|
||||
|
||||
struct gzvm_kernel_irqfd_resampler *resampler;
|
||||
|
||||
struct eventfd_ctx *resamplefd;
|
||||
|
||||
struct list_head resampler_link;
|
||||
|
||||
struct eventfd_ctx *eventfd;
|
||||
struct list_head list;
|
||||
poll_table pt;
|
||||
struct work_struct shutdown;
|
||||
};
|
||||
|
||||
static struct workqueue_struct *irqfd_cleanup_wq;
|
||||
|
||||
/**
|
||||
* irqfd_set_spi(): irqfd to inject virtual interrupt.
|
||||
* @gzvm: Pointer to gzvm.
|
||||
* @irq_source_id: irq source id.
|
||||
* @irq: This is spi interrupt number (starts from 0 instead of 32).
|
||||
* @level: irq triggered level.
|
||||
* @line_status: irq status.
|
||||
*/
|
||||
static void irqfd_set_spi(struct gzvm *gzvm, int irq_source_id, u32 irq,
|
||||
int level, bool line_status)
|
||||
{
|
||||
if (level)
|
||||
gzvm_irqchip_inject_irq(gzvm, irq_source_id, 0, irq, level);
|
||||
}
|
||||
|
||||
/**
|
||||
* irqfd_resampler_ack() - Notify all of the resampler irqfds using this GSI
|
||||
* when IRQ de-assert once.
|
||||
* @ian: Pointer to gzvm_irq_ack_notifier.
|
||||
*
|
||||
* Since resampler irqfds share an IRQ source ID, we de-assert once
|
||||
* then notify all of the resampler irqfds using this GSI. We can't
|
||||
* do multiple de-asserts or we risk racing with incoming re-asserts.
|
||||
*/
|
||||
static void irqfd_resampler_ack(struct gzvm_irq_ack_notifier *ian)
|
||||
{
|
||||
struct gzvm_kernel_irqfd_resampler *resampler;
|
||||
struct gzvm *gzvm;
|
||||
struct gzvm_kernel_irqfd *irqfd;
|
||||
int idx;
|
||||
|
||||
resampler = container_of(ian,
|
||||
struct gzvm_kernel_irqfd_resampler, notifier);
|
||||
gzvm = resampler->gzvm;
|
||||
|
||||
irqfd_set_spi(gzvm, GZVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
|
||||
resampler->notifier.gsi, 0, false);
|
||||
|
||||
idx = srcu_read_lock(&gzvm->irq_srcu);
|
||||
|
||||
list_for_each_entry_srcu(irqfd, &resampler->list, resampler_link,
|
||||
srcu_read_lock_held(&gzvm->irq_srcu)) {
|
||||
eventfd_signal(irqfd->resamplefd, 1);
|
||||
}
|
||||
|
||||
srcu_read_unlock(&gzvm->irq_srcu, idx);
|
||||
}
|
||||
|
||||
static void gzvm_register_irq_ack_notifier(struct gzvm *gzvm,
|
||||
struct gzvm_irq_ack_notifier *ian)
|
||||
{
|
||||
mutex_lock(&gzvm->irq_lock);
|
||||
hlist_add_head_rcu(&ian->link, &gzvm->irq_ack_notifier_list);
|
||||
mutex_unlock(&gzvm->irq_lock);
|
||||
}
|
||||
|
||||
static void gzvm_unregister_irq_ack_notifier(struct gzvm *gzvm,
|
||||
struct gzvm_irq_ack_notifier *ian)
|
||||
{
|
||||
mutex_lock(&gzvm->irq_lock);
|
||||
hlist_del_init_rcu(&ian->link);
|
||||
mutex_unlock(&gzvm->irq_lock);
|
||||
synchronize_srcu(&gzvm->irq_srcu);
|
||||
}
|
||||
|
||||
static void irqfd_resampler_shutdown(struct gzvm_kernel_irqfd *irqfd)
|
||||
{
|
||||
struct gzvm_kernel_irqfd_resampler *resampler = irqfd->resampler;
|
||||
struct gzvm *gzvm = resampler->gzvm;
|
||||
|
||||
mutex_lock(&gzvm->irqfds.resampler_lock);
|
||||
|
||||
list_del_rcu(&irqfd->resampler_link);
|
||||
synchronize_srcu(&gzvm->irq_srcu);
|
||||
|
||||
if (list_empty(&resampler->list)) {
|
||||
list_del(&resampler->link);
|
||||
gzvm_unregister_irq_ack_notifier(gzvm, &resampler->notifier);
|
||||
irqfd_set_spi(gzvm, GZVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
|
||||
resampler->notifier.gsi, 0, false);
|
||||
kfree(resampler);
|
||||
}
|
||||
|
||||
mutex_unlock(&gzvm->irqfds.resampler_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* irqfd_shutdown() - Race-free decouple logic (ordering is critical).
|
||||
* @work: Pointer to work_struct.
|
||||
*/
|
||||
static void irqfd_shutdown(struct work_struct *work)
|
||||
{
|
||||
struct gzvm_kernel_irqfd *irqfd =
|
||||
container_of(work, struct gzvm_kernel_irqfd, shutdown);
|
||||
struct gzvm *gzvm = irqfd->gzvm;
|
||||
u64 cnt;
|
||||
|
||||
/* Make sure irqfd has been initialized in assign path. */
|
||||
synchronize_srcu(&gzvm->irq_srcu);
|
||||
|
||||
/*
|
||||
* Synchronize with the wait-queue and unhook ourselves to prevent
|
||||
* further events.
|
||||
*/
|
||||
eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt);
|
||||
|
||||
if (irqfd->resampler) {
|
||||
irqfd_resampler_shutdown(irqfd);
|
||||
eventfd_ctx_put(irqfd->resamplefd);
|
||||
}
|
||||
|
||||
/*
|
||||
* It is now safe to release the object's resources
|
||||
*/
|
||||
eventfd_ctx_put(irqfd->eventfd);
|
||||
kfree(irqfd);
|
||||
}
|
||||
|
||||
/**
|
||||
* irqfd_is_active() - Assumes gzvm->irqfds.lock is held.
|
||||
* @irqfd: Pointer to gzvm_kernel_irqfd.
|
||||
*
|
||||
* Return:
|
||||
* * true - irqfd is active.
|
||||
*/
|
||||
static bool irqfd_is_active(struct gzvm_kernel_irqfd *irqfd)
|
||||
{
|
||||
return list_empty(&irqfd->list) ? false : true;
|
||||
}
|
||||
|
||||
/**
|
||||
* irqfd_deactivate() - Mark the irqfd as inactive and schedule it for removal.
|
||||
* assumes gzvm->irqfds.lock is held.
|
||||
* @irqfd: Pointer to gzvm_kernel_irqfd.
|
||||
*/
|
||||
static void irqfd_deactivate(struct gzvm_kernel_irqfd *irqfd)
|
||||
{
|
||||
if (!irqfd_is_active(irqfd))
|
||||
return;
|
||||
|
||||
list_del_init(&irqfd->list);
|
||||
|
||||
queue_work(irqfd_cleanup_wq, &irqfd->shutdown);
|
||||
}
|
||||
|
||||
/**
|
||||
* irqfd_wakeup() - Callback of irqfd wait queue, would be woken by writing to
|
||||
* irqfd to do virtual interrupt injection.
|
||||
* @wait: Pointer to wait_queue_entry_t.
|
||||
* @mode: Unused.
|
||||
* @sync: Unused.
|
||||
* @key: Get flags about Epoll events.
|
||||
*
|
||||
* Return:
|
||||
* * 0 - Success
|
||||
*/
|
||||
static int irqfd_wakeup(wait_queue_entry_t *wait, unsigned int mode, int sync,
|
||||
void *key)
|
||||
{
|
||||
struct gzvm_kernel_irqfd *irqfd =
|
||||
container_of(wait, struct gzvm_kernel_irqfd, wait);
|
||||
__poll_t flags = key_to_poll(key);
|
||||
struct gzvm *gzvm = irqfd->gzvm;
|
||||
|
||||
if (flags & EPOLLIN) {
|
||||
u64 cnt;
|
||||
|
||||
eventfd_ctx_do_read(irqfd->eventfd, &cnt);
|
||||
/* gzvm's irq injection is not blocked, don't need workq */
|
||||
irqfd_set_spi(gzvm, GZVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi,
|
||||
1, false);
|
||||
}
|
||||
|
||||
if (flags & EPOLLHUP) {
|
||||
/* The eventfd is closing, detach from GZVM */
|
||||
unsigned long iflags;
|
||||
|
||||
spin_lock_irqsave(&gzvm->irqfds.lock, iflags);
|
||||
|
||||
/*
|
||||
* Do more check if someone deactivated the irqfd before
|
||||
* we could acquire the irqfds.lock.
|
||||
*/
|
||||
if (irqfd_is_active(irqfd))
|
||||
irqfd_deactivate(irqfd);
|
||||
|
||||
spin_unlock_irqrestore(&gzvm->irqfds.lock, iflags);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh,
|
||||
poll_table *pt)
|
||||
{
|
||||
struct gzvm_kernel_irqfd *irqfd =
|
||||
container_of(pt, struct gzvm_kernel_irqfd, pt);
|
||||
add_wait_queue_priority(wqh, &irqfd->wait);
|
||||
}
|
||||
|
||||
static int gzvm_irqfd_assign(struct gzvm *gzvm, struct gzvm_irqfd *args)
|
||||
{
|
||||
struct gzvm_kernel_irqfd *irqfd, *tmp;
|
||||
struct fd f;
|
||||
struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
|
||||
int ret;
|
||||
__poll_t events;
|
||||
int idx;
|
||||
|
||||
irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL_ACCOUNT);
|
||||
if (!irqfd)
|
||||
return -ENOMEM;
|
||||
|
||||
irqfd->gzvm = gzvm;
|
||||
irqfd->gsi = args->gsi;
|
||||
irqfd->resampler = NULL;
|
||||
|
||||
INIT_LIST_HEAD(&irqfd->list);
|
||||
INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
|
||||
|
||||
f = fdget(args->fd);
|
||||
if (!f.file) {
|
||||
ret = -EBADF;
|
||||
goto out;
|
||||
}
|
||||
|
||||
eventfd = eventfd_ctx_fileget(f.file);
|
||||
if (IS_ERR(eventfd)) {
|
||||
ret = PTR_ERR(eventfd);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
irqfd->eventfd = eventfd;
|
||||
|
||||
if (args->flags & GZVM_IRQFD_FLAG_RESAMPLE) {
|
||||
struct gzvm_kernel_irqfd_resampler *resampler;
|
||||
|
||||
resamplefd = eventfd_ctx_fdget(args->resamplefd);
|
||||
if (IS_ERR(resamplefd)) {
|
||||
ret = PTR_ERR(resamplefd);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
irqfd->resamplefd = resamplefd;
|
||||
INIT_LIST_HEAD(&irqfd->resampler_link);
|
||||
|
||||
mutex_lock(&gzvm->irqfds.resampler_lock);
|
||||
|
||||
list_for_each_entry(resampler,
|
||||
&gzvm->irqfds.resampler_list, link) {
|
||||
if (resampler->notifier.gsi == irqfd->gsi) {
|
||||
irqfd->resampler = resampler;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!irqfd->resampler) {
|
||||
resampler = kzalloc(sizeof(*resampler),
|
||||
GFP_KERNEL_ACCOUNT);
|
||||
if (!resampler) {
|
||||
ret = -ENOMEM;
|
||||
mutex_unlock(&gzvm->irqfds.resampler_lock);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
resampler->gzvm = gzvm;
|
||||
INIT_LIST_HEAD(&resampler->list);
|
||||
resampler->notifier.gsi = irqfd->gsi;
|
||||
resampler->notifier.irq_acked = irqfd_resampler_ack;
|
||||
INIT_LIST_HEAD(&resampler->link);
|
||||
|
||||
list_add(&resampler->link, &gzvm->irqfds.resampler_list);
|
||||
gzvm_register_irq_ack_notifier(gzvm,
|
||||
&resampler->notifier);
|
||||
irqfd->resampler = resampler;
|
||||
}
|
||||
|
||||
list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list);
|
||||
synchronize_srcu(&gzvm->irq_srcu);
|
||||
|
||||
mutex_unlock(&gzvm->irqfds.resampler_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Install our own custom wake-up handling so we are notified via
|
||||
* a callback whenever someone signals the underlying eventfd
|
||||
*/
|
||||
init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup);
|
||||
init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc);
|
||||
|
||||
spin_lock_irq(&gzvm->irqfds.lock);
|
||||
|
||||
ret = 0;
|
||||
list_for_each_entry(tmp, &gzvm->irqfds.items, list) {
|
||||
if (irqfd->eventfd != tmp->eventfd)
|
||||
continue;
|
||||
/* This fd is used for another irq already. */
|
||||
pr_err("already used: gsi=%d fd=%d\n", args->gsi, args->fd);
|
||||
ret = -EBUSY;
|
||||
spin_unlock_irq(&gzvm->irqfds.lock);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
idx = srcu_read_lock(&gzvm->irq_srcu);
|
||||
|
||||
list_add_tail(&irqfd->list, &gzvm->irqfds.items);
|
||||
|
||||
spin_unlock_irq(&gzvm->irqfds.lock);
|
||||
|
||||
/*
|
||||
* Check if there was an event already pending on the eventfd
|
||||
* before we registered, and trigger it as if we didn't miss it.
|
||||
*/
|
||||
events = vfs_poll(f.file, &irqfd->pt);
|
||||
|
||||
/* In case there is already a pending event */
|
||||
if (events & EPOLLIN)
|
||||
irqfd_set_spi(gzvm, GZVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
|
||||
irqfd->gsi, 1, false);
|
||||
|
||||
srcu_read_unlock(&gzvm->irq_srcu, idx);
|
||||
|
||||
/*
|
||||
* do not drop the file until the irqfd is fully initialized, otherwise
|
||||
* we might race against the EPOLLHUP
|
||||
*/
|
||||
fdput(f);
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
if (irqfd->resampler)
|
||||
irqfd_resampler_shutdown(irqfd);
|
||||
|
||||
if (resamplefd && !IS_ERR(resamplefd))
|
||||
eventfd_ctx_put(resamplefd);
|
||||
|
||||
if (eventfd && !IS_ERR(eventfd))
|
||||
eventfd_ctx_put(eventfd);
|
||||
|
||||
fdput(f);
|
||||
|
||||
out:
|
||||
kfree(irqfd);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void gzvm_notify_acked_gsi(struct gzvm *gzvm, int gsi)
|
||||
{
|
||||
struct gzvm_irq_ack_notifier *gian;
|
||||
|
||||
hlist_for_each_entry_srcu(gian, &gzvm->irq_ack_notifier_list,
|
||||
link, srcu_read_lock_held(&gzvm->irq_srcu))
|
||||
if (gian->gsi == gsi)
|
||||
gian->irq_acked(gian);
|
||||
}
|
||||
|
||||
void gzvm_notify_acked_irq(struct gzvm *gzvm, unsigned int gsi)
|
||||
{
|
||||
int idx;
|
||||
|
||||
idx = srcu_read_lock(&gzvm->irq_srcu);
|
||||
gzvm_notify_acked_gsi(gzvm, gsi);
|
||||
srcu_read_unlock(&gzvm->irq_srcu, idx);
|
||||
}
|
||||
|
||||
/**
|
||||
* gzvm_irqfd_deassign() - Shutdown any irqfd's that match fd+gsi.
|
||||
* @gzvm: Pointer to gzvm.
|
||||
* @args: Pointer to gzvm_irqfd.
|
||||
*
|
||||
* Return:
|
||||
* * 0 - Success.
|
||||
* * Negative value - Failure.
|
||||
*/
|
||||
static int gzvm_irqfd_deassign(struct gzvm *gzvm, struct gzvm_irqfd *args)
|
||||
{
|
||||
struct gzvm_kernel_irqfd *irqfd, *tmp;
|
||||
struct eventfd_ctx *eventfd;
|
||||
|
||||
eventfd = eventfd_ctx_fdget(args->fd);
|
||||
if (IS_ERR(eventfd))
|
||||
return PTR_ERR(eventfd);
|
||||
|
||||
spin_lock_irq(&gzvm->irqfds.lock);
|
||||
|
||||
list_for_each_entry_safe(irqfd, tmp, &gzvm->irqfds.items, list) {
|
||||
if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi)
|
||||
irqfd_deactivate(irqfd);
|
||||
}
|
||||
|
||||
spin_unlock_irq(&gzvm->irqfds.lock);
|
||||
eventfd_ctx_put(eventfd);
|
||||
|
||||
/*
|
||||
* Block until we know all outstanding shutdown jobs have completed
|
||||
* so that we guarantee there will not be any more interrupts on this
|
||||
* gsi once this deassign function returns.
|
||||
*/
|
||||
flush_workqueue(irqfd_cleanup_wq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int gzvm_irqfd(struct gzvm *gzvm, struct gzvm_irqfd *args)
|
||||
{
|
||||
for (int i = 0; i < ARRAY_SIZE(args->pad); i++) {
|
||||
if (args->pad[i])
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (args->flags &
|
||||
~(GZVM_IRQFD_FLAG_DEASSIGN | GZVM_IRQFD_FLAG_RESAMPLE))
|
||||
return -EINVAL;
|
||||
|
||||
if (args->flags & GZVM_IRQFD_FLAG_DEASSIGN)
|
||||
return gzvm_irqfd_deassign(gzvm, args);
|
||||
|
||||
return gzvm_irqfd_assign(gzvm, args);
|
||||
}
|
||||
|
||||
/**
|
||||
* gzvm_vm_irqfd_init() - Initialize irqfd data structure per VM
|
||||
*
|
||||
* @gzvm: Pointer to struct gzvm.
|
||||
*
|
||||
* Return:
|
||||
* * 0 - Success.
|
||||
* * Negative - Failure.
|
||||
*/
|
||||
int gzvm_vm_irqfd_init(struct gzvm *gzvm)
|
||||
{
|
||||
mutex_init(&gzvm->irq_lock);
|
||||
|
||||
spin_lock_init(&gzvm->irqfds.lock);
|
||||
INIT_LIST_HEAD(&gzvm->irqfds.items);
|
||||
INIT_LIST_HEAD(&gzvm->irqfds.resampler_list);
|
||||
if (init_srcu_struct(&gzvm->irq_srcu))
|
||||
return -EINVAL;
|
||||
INIT_HLIST_HEAD(&gzvm->irq_ack_notifier_list);
|
||||
mutex_init(&gzvm->irqfds.resampler_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* gzvm_vm_irqfd_release() - This function is called as the gzvm VM fd is being
|
||||
* released. Shutdown all irqfds that still remain open.
|
||||
* @gzvm: Pointer to gzvm.
|
||||
*/
|
||||
void gzvm_vm_irqfd_release(struct gzvm *gzvm)
|
||||
{
|
||||
struct gzvm_kernel_irqfd *irqfd, *tmp;
|
||||
|
||||
spin_lock_irq(&gzvm->irqfds.lock);
|
||||
|
||||
list_for_each_entry_safe(irqfd, tmp, &gzvm->irqfds.items, list)
|
||||
irqfd_deactivate(irqfd);
|
||||
|
||||
spin_unlock_irq(&gzvm->irqfds.lock);
|
||||
|
||||
/*
|
||||
* Block until we know all outstanding shutdown jobs have completed.
|
||||
*/
|
||||
flush_workqueue(irqfd_cleanup_wq);
|
||||
}
|
||||
|
||||
/**
|
||||
* gzvm_drv_irqfd_init() - Erase flushing work items when a VM exits.
|
||||
*
|
||||
* Return:
|
||||
* * 0 - Success.
|
||||
* * Negative - Failure.
|
||||
*
|
||||
* Create a host-wide workqueue for issuing deferred shutdown requests
|
||||
* aggregated from all vm* instances. We need our own isolated
|
||||
* queue to ease flushing work items when a VM exits.
|
||||
*/
|
||||
int gzvm_drv_irqfd_init(void)
|
||||
{
|
||||
irqfd_cleanup_wq = alloc_workqueue("gzvm-irqfd-cleanup", 0, 0);
|
||||
if (!irqfd_cleanup_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void gzvm_drv_irqfd_exit(void)
|
||||
{
|
||||
destroy_workqueue(irqfd_cleanup_wq);
|
||||
}
|
143
drivers/virt/geniezone/gzvm_main.c
Normal file
143
drivers/virt/geniezone/gzvm_main.c
Normal file
@ -0,0 +1,143 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2023 MediaTek Inc.
|
||||
*/
|
||||
|
||||
#include <linux/anon_inodes.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/kdev_t.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/gzvm_drv.h>
|
||||
|
||||
/**
|
||||
* gzvm_err_to_errno() - Convert geniezone return value to standard errno
|
||||
*
|
||||
* @err: Return value from geniezone function return
|
||||
*
|
||||
* Return: Standard errno
|
||||
*/
|
||||
int gzvm_err_to_errno(unsigned long err)
|
||||
{
|
||||
int gz_err = (int)err;
|
||||
|
||||
switch (gz_err) {
|
||||
case 0:
|
||||
return 0;
|
||||
case ERR_NO_MEMORY:
|
||||
return -ENOMEM;
|
||||
case ERR_NOT_SUPPORTED:
|
||||
return -EOPNOTSUPP;
|
||||
case ERR_NOT_IMPLEMENTED:
|
||||
return -EOPNOTSUPP;
|
||||
case ERR_FAULT:
|
||||
return -EFAULT;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/**
|
||||
* gzvm_dev_ioctl_check_extension() - Check if given capability is support
|
||||
* or not
|
||||
*
|
||||
* @gzvm: Pointer to struct gzvm
|
||||
* @args: Pointer in u64 from userspace
|
||||
*
|
||||
* Return:
|
||||
* * 0 - Support, no error
|
||||
* * -EOPNOTSUPP - Not support
|
||||
* * -EFAULT - Failed to get data from userspace
|
||||
*/
|
||||
long gzvm_dev_ioctl_check_extension(struct gzvm *gzvm, unsigned long args)
|
||||
{
|
||||
__u64 cap;
|
||||
void __user *argp = (void __user *)args;
|
||||
|
||||
if (copy_from_user(&cap, argp, sizeof(uint64_t)))
|
||||
return -EFAULT;
|
||||
return gzvm_arch_check_extension(gzvm, cap, argp);
|
||||
}
|
||||
|
||||
static long gzvm_dev_ioctl(struct file *filp, unsigned int cmd,
|
||||
unsigned long user_args)
|
||||
{
|
||||
long ret = -ENOTTY;
|
||||
|
||||
switch (cmd) {
|
||||
case GZVM_CREATE_VM:
|
||||
ret = gzvm_dev_ioctl_create_vm(user_args);
|
||||
break;
|
||||
case GZVM_CHECK_EXTENSION:
|
||||
if (!user_args)
|
||||
return -EINVAL;
|
||||
ret = gzvm_dev_ioctl_check_extension(NULL, user_args);
|
||||
break;
|
||||
default:
|
||||
ret = -ENOTTY;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct file_operations gzvm_chardev_ops = {
|
||||
.unlocked_ioctl = gzvm_dev_ioctl,
|
||||
.llseek = noop_llseek,
|
||||
};
|
||||
|
||||
static struct miscdevice gzvm_dev = {
|
||||
.minor = MISC_DYNAMIC_MINOR,
|
||||
.name = KBUILD_MODNAME,
|
||||
.fops = &gzvm_chardev_ops,
|
||||
};
|
||||
|
||||
static int gzvm_drv_probe(struct platform_device *pdev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (gzvm_arch_probe() != 0) {
|
||||
dev_err(&pdev->dev, "Not found available conduit\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
ret = misc_register(&gzvm_dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return gzvm_drv_irqfd_init();
|
||||
}
|
||||
|
||||
static int gzvm_drv_remove(struct platform_device *pdev)
|
||||
{
|
||||
gzvm_drv_irqfd_exit();
|
||||
gzvm_destroy_all_vms();
|
||||
misc_deregister(&gzvm_dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id gzvm_of_match[] = {
|
||||
{ .compatible = "mediatek,geniezone-hyp", },
|
||||
{/* sentinel */},
|
||||
};
|
||||
|
||||
static struct platform_driver gzvm_driver = {
|
||||
.probe = gzvm_drv_probe,
|
||||
.remove = gzvm_drv_remove,
|
||||
.driver = {
|
||||
.name = KBUILD_MODNAME,
|
||||
.owner = THIS_MODULE,
|
||||
.of_match_table = gzvm_of_match,
|
||||
},
|
||||
};
|
||||
|
||||
module_platform_driver(gzvm_driver);
|
||||
|
||||
MODULE_DEVICE_TABLE(of, gzvm_of_match);
|
||||
MODULE_AUTHOR("MediaTek");
|
||||
MODULE_DESCRIPTION("GenieZone interface for VMM");
|
||||
MODULE_LICENSE("GPL");
|
276
drivers/virt/geniezone/gzvm_vcpu.c
Normal file
276
drivers/virt/geniezone/gzvm_vcpu.c
Normal file
@ -0,0 +1,276 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2023 MediaTek Inc.
|
||||
*/
|
||||
|
||||
#include <asm/sysreg.h>
|
||||
#include <linux/anon_inodes.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/gzvm_drv.h>
|
||||
|
||||
/* maximum size needed for holding an integer */
|
||||
#define ITOA_MAX_LEN 12
|
||||
|
||||
static long gzvm_vcpu_update_one_reg(struct gzvm_vcpu *vcpu,
|
||||
void * __user argp,
|
||||
bool is_write)
|
||||
{
|
||||
struct gzvm_one_reg reg;
|
||||
void __user *reg_addr;
|
||||
u64 data = 0;
|
||||
u64 reg_size;
|
||||
long ret;
|
||||
|
||||
if (copy_from_user(®, argp, sizeof(reg)))
|
||||
return -EFAULT;
|
||||
|
||||
reg_addr = (void __user *)reg.addr;
|
||||
reg_size = (reg.id & GZVM_REG_SIZE_MASK) >> GZVM_REG_SIZE_SHIFT;
|
||||
reg_size = BIT(reg_size);
|
||||
|
||||
if (is_write) {
|
||||
if (copy_from_user(&data, reg_addr, reg_size))
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
ret = gzvm_arch_vcpu_update_one_reg(vcpu, reg.id, is_write, &data);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!is_write) {
|
||||
if (copy_to_user(reg_addr, &data, reg_size))
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* gzvm_vcpu_handle_mmio() - Handle mmio in kernel space.
|
||||
* @vcpu: Pointer to vcpu.
|
||||
*
|
||||
* Return:
|
||||
* * true - This mmio exit has been processed.
|
||||
* * false - This mmio exit has not been processed, require userspace.
|
||||
*/
|
||||
static bool gzvm_vcpu_handle_mmio(struct gzvm_vcpu *vcpu)
|
||||
{
|
||||
__u64 addr;
|
||||
__u32 len;
|
||||
const void *val_ptr;
|
||||
|
||||
/* So far, we don't have in-kernel mmio read handler */
|
||||
if (!vcpu->run->mmio.is_write)
|
||||
return false;
|
||||
addr = vcpu->run->mmio.phys_addr;
|
||||
len = vcpu->run->mmio.size;
|
||||
val_ptr = &vcpu->run->mmio.data;
|
||||
|
||||
return gzvm_ioevent_write(vcpu, addr, len, val_ptr);
|
||||
}
|
||||
|
||||
/**
|
||||
* gzvm_vcpu_run() - Handle vcpu run ioctl, entry point to guest and exit
|
||||
* point from guest
|
||||
* @vcpu: Pointer to struct gzvm_vcpu
|
||||
* @argp: Pointer to struct gzvm_vcpu_run in userspace
|
||||
*
|
||||
* Return:
|
||||
* * 0 - Success.
|
||||
* * Negative - Failure.
|
||||
*/
|
||||
static long gzvm_vcpu_run(struct gzvm_vcpu *vcpu, void * __user argp)
|
||||
{
|
||||
bool need_userspace = false;
|
||||
u64 exit_reason = 0;
|
||||
|
||||
if (copy_from_user(vcpu->run, argp, sizeof(struct gzvm_vcpu_run)))
|
||||
return -EFAULT;
|
||||
|
||||
for (int i = 0; i < ARRAY_SIZE(vcpu->run->padding1); i++) {
|
||||
if (vcpu->run->padding1[i])
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (vcpu->run->immediate_exit == 1)
|
||||
return -EINTR;
|
||||
|
||||
while (!need_userspace && !signal_pending(current)) {
|
||||
gzvm_arch_vcpu_run(vcpu, &exit_reason);
|
||||
|
||||
switch (exit_reason) {
|
||||
case GZVM_EXIT_MMIO:
|
||||
if (!gzvm_vcpu_handle_mmio(vcpu))
|
||||
need_userspace = true;
|
||||
break;
|
||||
/**
|
||||
* it's geniezone's responsibility to fill corresponding data
|
||||
* structure
|
||||
*/
|
||||
case GZVM_EXIT_HYPERCALL:
|
||||
fallthrough;
|
||||
case GZVM_EXIT_EXCEPTION:
|
||||
fallthrough;
|
||||
case GZVM_EXIT_DEBUG:
|
||||
fallthrough;
|
||||
case GZVM_EXIT_FAIL_ENTRY:
|
||||
fallthrough;
|
||||
case GZVM_EXIT_INTERNAL_ERROR:
|
||||
fallthrough;
|
||||
case GZVM_EXIT_SYSTEM_EVENT:
|
||||
fallthrough;
|
||||
case GZVM_EXIT_SHUTDOWN:
|
||||
need_userspace = true;
|
||||
break;
|
||||
case GZVM_EXIT_IRQ:
|
||||
fallthrough;
|
||||
case GZVM_EXIT_GZ:
|
||||
break;
|
||||
case GZVM_EXIT_UNKNOWN:
|
||||
fallthrough;
|
||||
default:
|
||||
pr_err("vcpu unknown exit\n");
|
||||
need_userspace = true;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
if (copy_to_user(argp, vcpu->run, sizeof(struct gzvm_vcpu_run)))
|
||||
return -EFAULT;
|
||||
if (signal_pending(current)) {
|
||||
// invoke hvc to inform gz to map memory
|
||||
gzvm_arch_inform_exit(vcpu->gzvm->vm_id);
|
||||
return -ERESTARTSYS;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static long gzvm_vcpu_ioctl(struct file *filp, unsigned int ioctl,
|
||||
unsigned long arg)
|
||||
{
|
||||
int ret = -ENOTTY;
|
||||
void __user *argp = (void __user *)arg;
|
||||
struct gzvm_vcpu *vcpu = filp->private_data;
|
||||
|
||||
switch (ioctl) {
|
||||
case GZVM_RUN:
|
||||
ret = gzvm_vcpu_run(vcpu, argp);
|
||||
break;
|
||||
case GZVM_GET_ONE_REG:
|
||||
/* is_write */
|
||||
ret = gzvm_vcpu_update_one_reg(vcpu, argp, false);
|
||||
break;
|
||||
case GZVM_SET_ONE_REG:
|
||||
/* is_write */
|
||||
ret = gzvm_vcpu_update_one_reg(vcpu, argp, true);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct file_operations gzvm_vcpu_fops = {
|
||||
.unlocked_ioctl = gzvm_vcpu_ioctl,
|
||||
.llseek = noop_llseek,
|
||||
};
|
||||
|
||||
/* caller must hold the vm lock */
|
||||
static void gzvm_destroy_vcpu(struct gzvm_vcpu *vcpu)
|
||||
{
|
||||
if (!vcpu)
|
||||
return;
|
||||
|
||||
gzvm_arch_destroy_vcpu(vcpu->gzvm->vm_id, vcpu->vcpuid);
|
||||
/* clean guest's data */
|
||||
memset(vcpu->run, 0, GZVM_VCPU_RUN_MAP_SIZE);
|
||||
free_pages_exact(vcpu->run, GZVM_VCPU_RUN_MAP_SIZE);
|
||||
kfree(vcpu);
|
||||
}
|
||||
|
||||
/**
|
||||
* gzvm_destroy_vcpus() - Destroy all vcpus, caller has to hold the vm lock
|
||||
*
|
||||
* @gzvm: vm struct that owns the vcpus
|
||||
*/
|
||||
void gzvm_destroy_vcpus(struct gzvm *gzvm)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < GZVM_MAX_VCPUS; i++) {
|
||||
gzvm_destroy_vcpu(gzvm->vcpus[i]);
|
||||
gzvm->vcpus[i] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* create_vcpu_fd() - Allocates an inode for the vcpu. */
|
||||
static int create_vcpu_fd(struct gzvm_vcpu *vcpu)
|
||||
{
|
||||
/* sizeof("gzvm-vcpu:") + max(strlen(itoa(vcpuid))) + null */
|
||||
char name[10 + ITOA_MAX_LEN + 1];
|
||||
|
||||
snprintf(name, sizeof(name), "gzvm-vcpu:%d", vcpu->vcpuid);
|
||||
return anon_inode_getfd(name, &gzvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC);
|
||||
}
|
||||
|
||||
/**
|
||||
* gzvm_vm_ioctl_create_vcpu() - for GZVM_CREATE_VCPU
|
||||
* @gzvm: Pointer to struct gzvm
|
||||
* @cpuid: equals arg
|
||||
*
|
||||
* Return: Fd of vcpu, negative errno if error occurs
|
||||
*/
|
||||
int gzvm_vm_ioctl_create_vcpu(struct gzvm *gzvm, u32 cpuid)
|
||||
{
|
||||
struct gzvm_vcpu *vcpu;
|
||||
int ret;
|
||||
|
||||
if (cpuid >= GZVM_MAX_VCPUS)
|
||||
return -EINVAL;
|
||||
|
||||
vcpu = kzalloc(sizeof(*vcpu), GFP_KERNEL);
|
||||
if (!vcpu)
|
||||
return -ENOMEM;
|
||||
|
||||
/**
|
||||
* Allocate 2 pages for data sharing between driver and gz hypervisor
|
||||
*
|
||||
* |- page 0 -|- page 1 -|
|
||||
* |gzvm_vcpu_run|......|hwstate|.......|
|
||||
*
|
||||
*/
|
||||
vcpu->run = alloc_pages_exact(GZVM_VCPU_RUN_MAP_SIZE,
|
||||
GFP_KERNEL_ACCOUNT | __GFP_ZERO);
|
||||
if (!vcpu->run) {
|
||||
ret = -ENOMEM;
|
||||
goto free_vcpu;
|
||||
}
|
||||
vcpu->hwstate = (void *)vcpu->run + PAGE_SIZE;
|
||||
vcpu->vcpuid = cpuid;
|
||||
vcpu->gzvm = gzvm;
|
||||
mutex_init(&vcpu->lock);
|
||||
|
||||
ret = gzvm_arch_create_vcpu(gzvm->vm_id, vcpu->vcpuid, vcpu->run);
|
||||
if (ret < 0)
|
||||
goto free_vcpu_run;
|
||||
|
||||
ret = create_vcpu_fd(vcpu);
|
||||
if (ret < 0)
|
||||
goto free_vcpu_run;
|
||||
gzvm->vcpus[cpuid] = vcpu;
|
||||
|
||||
return ret;
|
||||
|
||||
free_vcpu_run:
|
||||
free_pages_exact(vcpu->run, GZVM_VCPU_RUN_MAP_SIZE);
|
||||
free_vcpu:
|
||||
kfree(vcpu);
|
||||
return ret;
|
||||
}
|
539
drivers/virt/geniezone/gzvm_vm.c
Normal file
539
drivers/virt/geniezone/gzvm_vm.c
Normal file
@ -0,0 +1,539 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2023 MediaTek Inc.
|
||||
*/
|
||||
|
||||
#include <linux/anon_inodes.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/kdev_t.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/gzvm_drv.h>
|
||||
#include "gzvm_common.h"
|
||||
|
||||
static DEFINE_MUTEX(gzvm_list_lock);
|
||||
static LIST_HEAD(gzvm_list);
|
||||
|
||||
/**
|
||||
* hva_to_pa_fast() - converts hva to pa in generic fast way
|
||||
* @hva: Host virtual address.
|
||||
*
|
||||
* Return: 0 if translation error
|
||||
*/
|
||||
static u64 hva_to_pa_fast(u64 hva)
|
||||
{
|
||||
struct page *page[1];
|
||||
|
||||
u64 pfn;
|
||||
|
||||
if (get_user_page_fast_only(hva, 0, page)) {
|
||||
pfn = page_to_phys(page[0]);
|
||||
put_page((struct page *)page);
|
||||
return pfn;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* hva_to_pa_slow() - note that this function may sleep
|
||||
* @hva: Host virtual address.
|
||||
*
|
||||
* Return: 0 if translation error
|
||||
*/
|
||||
static u64 hva_to_pa_slow(u64 hva)
|
||||
{
|
||||
struct page *page;
|
||||
int npages;
|
||||
u64 pfn;
|
||||
|
||||
npages = get_user_pages_unlocked(hva, 1, &page, 0);
|
||||
if (npages != 1)
|
||||
return 0;
|
||||
|
||||
pfn = page_to_phys(page);
|
||||
put_page(page);
|
||||
|
||||
return pfn;
|
||||
}
|
||||
|
||||
static u64 gzvm_gfn_to_hva_memslot(struct gzvm_memslot *memslot, u64 gfn)
|
||||
{
|
||||
u64 offset = gfn - memslot->base_gfn;
|
||||
|
||||
return memslot->userspace_addr + offset * PAGE_SIZE;
|
||||
}
|
||||
|
||||
static u64 __gzvm_gfn_to_pfn_memslot(struct gzvm_memslot *memslot, u64 gfn)
|
||||
{
|
||||
u64 hva, pa;
|
||||
|
||||
hva = gzvm_gfn_to_hva_memslot(memslot, gfn);
|
||||
|
||||
pa = gzvm_hva_to_pa_arch(hva);
|
||||
if (pa != 0)
|
||||
return PHYS_PFN(pa);
|
||||
|
||||
pa = hva_to_pa_fast(hva);
|
||||
if (pa)
|
||||
return PHYS_PFN(pa);
|
||||
|
||||
pa = hva_to_pa_slow(hva);
|
||||
if (pa)
|
||||
return PHYS_PFN(pa);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* gzvm_gfn_to_pfn_memslot() - Translate gfn (guest ipa) to pfn (host pa),
|
||||
* result is in @pfn
|
||||
* @memslot: Pointer to struct gzvm_memslot.
|
||||
* @gfn: Guest frame number.
|
||||
* @pfn: Host page frame number.
|
||||
*
|
||||
* Return:
|
||||
* * 0 - Succeed
|
||||
* * -EFAULT - Failed to convert
|
||||
*/
|
||||
static int gzvm_gfn_to_pfn_memslot(struct gzvm_memslot *memslot, u64 gfn,
|
||||
u64 *pfn)
|
||||
{
|
||||
u64 __pfn;
|
||||
|
||||
if (!memslot)
|
||||
return -EFAULT;
|
||||
|
||||
__pfn = __gzvm_gfn_to_pfn_memslot(memslot, gfn);
|
||||
if (__pfn == 0) {
|
||||
*pfn = 0;
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
*pfn = __pfn;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* fill_constituents() - Populate pa to buffer until full
|
||||
* @consti: Pointer to struct mem_region_addr_range.
|
||||
* @consti_cnt: Constituent count.
|
||||
* @max_nr_consti: Maximum number of constituent count.
|
||||
* @gfn: Guest frame number.
|
||||
* @total_pages: Total page numbers.
|
||||
* @slot: Pointer to struct gzvm_memslot.
|
||||
*
|
||||
* Return: how many pages we've fill in, negative if error
|
||||
*/
|
||||
static int fill_constituents(struct mem_region_addr_range *consti,
|
||||
int *consti_cnt, int max_nr_consti, u64 gfn,
|
||||
u32 total_pages, struct gzvm_memslot *slot)
|
||||
{
|
||||
u64 pfn, prev_pfn, gfn_end;
|
||||
int nr_pages = 1;
|
||||
int i = 0;
|
||||
|
||||
if (unlikely(total_pages == 0))
|
||||
return -EINVAL;
|
||||
gfn_end = gfn + total_pages;
|
||||
|
||||
/* entry 0 */
|
||||
if (gzvm_gfn_to_pfn_memslot(slot, gfn, &pfn) != 0)
|
||||
return -EFAULT;
|
||||
consti[0].address = PFN_PHYS(pfn);
|
||||
consti[0].pg_cnt = 1;
|
||||
gfn++;
|
||||
prev_pfn = pfn;
|
||||
|
||||
while (i < max_nr_consti && gfn < gfn_end) {
|
||||
if (gzvm_gfn_to_pfn_memslot(slot, gfn, &pfn) != 0)
|
||||
return -EFAULT;
|
||||
if (pfn == (prev_pfn + 1)) {
|
||||
consti[i].pg_cnt++;
|
||||
} else {
|
||||
i++;
|
||||
if (i >= max_nr_consti)
|
||||
break;
|
||||
consti[i].address = PFN_PHYS(pfn);
|
||||
consti[i].pg_cnt = 1;
|
||||
}
|
||||
prev_pfn = pfn;
|
||||
gfn++;
|
||||
nr_pages++;
|
||||
}
|
||||
if (i != max_nr_consti)
|
||||
i++;
|
||||
*consti_cnt = i;
|
||||
|
||||
return nr_pages;
|
||||
}
|
||||
|
||||
/* register_memslot_addr_range() - Register memory region to GZ */
|
||||
static int
|
||||
register_memslot_addr_range(struct gzvm *gzvm, struct gzvm_memslot *memslot)
|
||||
{
|
||||
struct gzvm_memory_region_ranges *region;
|
||||
u32 buf_size;
|
||||
int max_nr_consti, remain_pages;
|
||||
u64 gfn, gfn_end;
|
||||
|
||||
buf_size = PAGE_SIZE * 2;
|
||||
region = alloc_pages_exact(buf_size, GFP_KERNEL);
|
||||
if (!region)
|
||||
return -ENOMEM;
|
||||
max_nr_consti = (buf_size - sizeof(*region)) /
|
||||
sizeof(struct mem_region_addr_range);
|
||||
|
||||
region->slot = memslot->slot_id;
|
||||
remain_pages = memslot->npages;
|
||||
gfn = memslot->base_gfn;
|
||||
gfn_end = gfn + remain_pages;
|
||||
while (gfn < gfn_end) {
|
||||
int nr_pages;
|
||||
|
||||
nr_pages = fill_constituents(region->constituents,
|
||||
®ion->constituent_cnt,
|
||||
max_nr_consti, gfn,
|
||||
remain_pages, memslot);
|
||||
if (nr_pages < 0) {
|
||||
pr_err("Failed to fill constituents\n");
|
||||
free_pages_exact(region, buf_size);
|
||||
return nr_pages;
|
||||
}
|
||||
region->gpa = PFN_PHYS(gfn);
|
||||
region->total_pages = nr_pages;
|
||||
|
||||
remain_pages -= nr_pages;
|
||||
gfn += nr_pages;
|
||||
|
||||
if (gzvm_arch_set_memregion(gzvm->vm_id, buf_size,
|
||||
virt_to_phys(region))) {
|
||||
pr_err("Failed to register memregion to hypervisor\n");
|
||||
free_pages_exact(region, buf_size);
|
||||
return -EFAULT;
|
||||
}
|
||||
}
|
||||
free_pages_exact(region, buf_size);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* gzvm_vm_ioctl_set_memory_region() - Set memory region of guest
|
||||
* @gzvm: Pointer to struct gzvm.
|
||||
* @mem: Input memory region from user.
|
||||
*
|
||||
* Return:
|
||||
* * -EXIO - memslot is out-of-range
|
||||
* * -EFAULT - Cannot find corresponding vma
|
||||
* * -EINVAL - region size and vma size does not match
|
||||
*/
|
||||
static int
|
||||
gzvm_vm_ioctl_set_memory_region(struct gzvm *gzvm,
|
||||
struct gzvm_userspace_memory_region *mem)
|
||||
{
|
||||
int ret;
|
||||
struct vm_area_struct *vma;
|
||||
struct gzvm_memslot *memslot;
|
||||
unsigned long size;
|
||||
__u32 slot;
|
||||
|
||||
slot = mem->slot;
|
||||
if (slot >= GZVM_MAX_MEM_REGION)
|
||||
return -ENXIO;
|
||||
memslot = &gzvm->memslot[slot];
|
||||
|
||||
vma = vma_lookup(gzvm->mm, mem->userspace_addr);
|
||||
if (!vma)
|
||||
return -EFAULT;
|
||||
|
||||
size = vma->vm_end - vma->vm_start;
|
||||
if (size != mem->memory_size)
|
||||
return -EINVAL;
|
||||
|
||||
memslot->base_gfn = __phys_to_pfn(mem->guest_phys_addr);
|
||||
memslot->npages = size >> PAGE_SHIFT;
|
||||
memslot->userspace_addr = mem->userspace_addr;
|
||||
memslot->vma = vma;
|
||||
memslot->flags = mem->flags;
|
||||
memslot->slot_id = mem->slot;
|
||||
|
||||
ret = gzvm_arch_memregion_purpose(gzvm, mem);
|
||||
if (ret) {
|
||||
pr_err("Failed to config memory region for the specified purpose\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
return register_memslot_addr_range(gzvm, memslot);
|
||||
}
|
||||
|
||||
int gzvm_irqchip_inject_irq(struct gzvm *gzvm, unsigned int vcpu_idx,
|
||||
u32 irq_type, u32 irq, bool level)
|
||||
{
|
||||
return gzvm_arch_inject_irq(gzvm, vcpu_idx, irq_type, irq, level);
|
||||
}
|
||||
|
||||
static int gzvm_vm_ioctl_irq_line(struct gzvm *gzvm,
|
||||
struct gzvm_irq_level *irq_level)
|
||||
{
|
||||
u32 irq = irq_level->irq;
|
||||
u32 irq_type, vcpu_idx, vcpu2_idx, irq_num;
|
||||
bool level = irq_level->level;
|
||||
|
||||
irq_type = FIELD_GET(GZVM_IRQ_LINE_TYPE, irq);
|
||||
vcpu_idx = FIELD_GET(GZVM_IRQ_LINE_VCPU, irq);
|
||||
vcpu2_idx = FIELD_GET(GZVM_IRQ_LINE_VCPU2, irq) * (GZVM_IRQ_VCPU_MASK + 1);
|
||||
irq_num = FIELD_GET(GZVM_IRQ_LINE_NUM, irq);
|
||||
|
||||
return gzvm_irqchip_inject_irq(gzvm, vcpu_idx + vcpu2_idx, irq_type, irq_num,
|
||||
level);
|
||||
}
|
||||
|
||||
static int gzvm_vm_ioctl_create_device(struct gzvm *gzvm, void __user *argp)
|
||||
{
|
||||
struct gzvm_create_device *gzvm_dev;
|
||||
void *dev_data = NULL;
|
||||
int ret;
|
||||
|
||||
gzvm_dev = (struct gzvm_create_device *)alloc_pages_exact(PAGE_SIZE,
|
||||
GFP_KERNEL);
|
||||
if (!gzvm_dev)
|
||||
return -ENOMEM;
|
||||
if (copy_from_user(gzvm_dev, argp, sizeof(*gzvm_dev))) {
|
||||
ret = -EFAULT;
|
||||
goto err_free_dev;
|
||||
}
|
||||
|
||||
if (gzvm_dev->attr_addr != 0 && gzvm_dev->attr_size != 0) {
|
||||
size_t attr_size = gzvm_dev->attr_size;
|
||||
void __user *attr_addr = (void __user *)gzvm_dev->attr_addr;
|
||||
|
||||
/* Size of device specific data should not be over a page. */
|
||||
if (attr_size > PAGE_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
dev_data = alloc_pages_exact(attr_size, GFP_KERNEL);
|
||||
if (!dev_data) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free_dev;
|
||||
}
|
||||
|
||||
if (copy_from_user(dev_data, attr_addr, attr_size)) {
|
||||
ret = -EFAULT;
|
||||
goto err_free_dev_data;
|
||||
}
|
||||
gzvm_dev->attr_addr = virt_to_phys(dev_data);
|
||||
}
|
||||
|
||||
ret = gzvm_arch_create_device(gzvm->vm_id, gzvm_dev);
|
||||
err_free_dev_data:
|
||||
if (dev_data)
|
||||
free_pages_exact(dev_data, 0);
|
||||
err_free_dev:
|
||||
free_pages_exact(gzvm_dev, 0);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int gzvm_vm_ioctl_enable_cap(struct gzvm *gzvm,
|
||||
struct gzvm_enable_cap *cap,
|
||||
void __user *argp)
|
||||
{
|
||||
return gzvm_vm_ioctl_arch_enable_cap(gzvm, cap, argp);
|
||||
}
|
||||
|
||||
/* gzvm_vm_ioctl() - Ioctl handler of VM FD */
|
||||
static long gzvm_vm_ioctl(struct file *filp, unsigned int ioctl,
|
||||
unsigned long arg)
|
||||
{
|
||||
long ret = -ENOTTY;
|
||||
void __user *argp = (void __user *)arg;
|
||||
struct gzvm *gzvm = filp->private_data;
|
||||
|
||||
switch (ioctl) {
|
||||
case GZVM_CHECK_EXTENSION: {
|
||||
ret = gzvm_dev_ioctl_check_extension(gzvm, arg);
|
||||
break;
|
||||
}
|
||||
case GZVM_CREATE_VCPU: {
|
||||
ret = gzvm_vm_ioctl_create_vcpu(gzvm, arg);
|
||||
break;
|
||||
}
|
||||
case GZVM_SET_USER_MEMORY_REGION: {
|
||||
struct gzvm_userspace_memory_region userspace_mem;
|
||||
|
||||
if (copy_from_user(&userspace_mem, argp, sizeof(userspace_mem))) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
ret = gzvm_vm_ioctl_set_memory_region(gzvm, &userspace_mem);
|
||||
break;
|
||||
}
|
||||
case GZVM_IRQ_LINE: {
|
||||
struct gzvm_irq_level irq_event;
|
||||
|
||||
if (copy_from_user(&irq_event, argp, sizeof(irq_event))) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
ret = gzvm_vm_ioctl_irq_line(gzvm, &irq_event);
|
||||
break;
|
||||
}
|
||||
case GZVM_CREATE_DEVICE: {
|
||||
ret = gzvm_vm_ioctl_create_device(gzvm, argp);
|
||||
break;
|
||||
}
|
||||
case GZVM_IRQFD: {
|
||||
struct gzvm_irqfd data;
|
||||
|
||||
if (copy_from_user(&data, argp, sizeof(data))) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
ret = gzvm_irqfd(gzvm, &data);
|
||||
break;
|
||||
}
|
||||
case GZVM_IOEVENTFD: {
|
||||
struct gzvm_ioeventfd data;
|
||||
|
||||
if (copy_from_user(&data, argp, sizeof(data))) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
ret = gzvm_ioeventfd(gzvm, &data);
|
||||
break;
|
||||
}
|
||||
case GZVM_ENABLE_CAP: {
|
||||
struct gzvm_enable_cap cap;
|
||||
|
||||
if (copy_from_user(&cap, argp, sizeof(cap))) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
ret = gzvm_vm_ioctl_enable_cap(gzvm, &cap, argp);
|
||||
break;
|
||||
}
|
||||
case GZVM_SET_DTB_CONFIG: {
|
||||
struct gzvm_dtb_config cfg;
|
||||
|
||||
if (copy_from_user(&cfg, argp, sizeof(cfg))) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
ret = gzvm_arch_set_dtb_config(gzvm, &cfg);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
ret = -ENOTTY;
|
||||
}
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void gzvm_destroy_vm(struct gzvm *gzvm)
|
||||
{
|
||||
pr_debug("VM-%u is going to be destroyed\n", gzvm->vm_id);
|
||||
|
||||
mutex_lock(&gzvm->lock);
|
||||
|
||||
gzvm_vm_irqfd_release(gzvm);
|
||||
gzvm_destroy_vcpus(gzvm);
|
||||
gzvm_arch_destroy_vm(gzvm->vm_id);
|
||||
|
||||
mutex_lock(&gzvm_list_lock);
|
||||
list_del(&gzvm->vm_list);
|
||||
mutex_unlock(&gzvm_list_lock);
|
||||
|
||||
mutex_unlock(&gzvm->lock);
|
||||
|
||||
kfree(gzvm);
|
||||
}
|
||||
|
||||
static int gzvm_vm_release(struct inode *inode, struct file *filp)
|
||||
{
|
||||
struct gzvm *gzvm = filp->private_data;
|
||||
|
||||
gzvm_destroy_vm(gzvm);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct file_operations gzvm_vm_fops = {
|
||||
.release = gzvm_vm_release,
|
||||
.unlocked_ioctl = gzvm_vm_ioctl,
|
||||
.llseek = noop_llseek,
|
||||
};
|
||||
|
||||
static struct gzvm *gzvm_create_vm(unsigned long vm_type)
|
||||
{
|
||||
int ret;
|
||||
struct gzvm *gzvm;
|
||||
|
||||
gzvm = kzalloc(sizeof(*gzvm), GFP_KERNEL);
|
||||
if (!gzvm)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ret = gzvm_arch_create_vm(vm_type);
|
||||
if (ret < 0) {
|
||||
kfree(gzvm);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
gzvm->vm_id = ret;
|
||||
gzvm->mm = current->mm;
|
||||
mutex_init(&gzvm->lock);
|
||||
|
||||
ret = gzvm_vm_irqfd_init(gzvm);
|
||||
if (ret) {
|
||||
pr_err("Failed to initialize irqfd\n");
|
||||
kfree(gzvm);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
ret = gzvm_init_ioeventfd(gzvm);
|
||||
if (ret) {
|
||||
pr_err("Failed to initialize ioeventfd\n");
|
||||
kfree(gzvm);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
mutex_lock(&gzvm_list_lock);
|
||||
list_add(&gzvm->vm_list, &gzvm_list);
|
||||
mutex_unlock(&gzvm_list_lock);
|
||||
|
||||
pr_debug("VM-%u is created\n", gzvm->vm_id);
|
||||
|
||||
return gzvm;
|
||||
}
|
||||
|
||||
/**
|
||||
* gzvm_dev_ioctl_create_vm - Create vm fd
|
||||
* @vm_type: VM type. Only supports Linux VM now.
|
||||
*
|
||||
* Return: fd of vm, negative if error
|
||||
*/
|
||||
int gzvm_dev_ioctl_create_vm(unsigned long vm_type)
|
||||
{
|
||||
struct gzvm *gzvm;
|
||||
|
||||
gzvm = gzvm_create_vm(vm_type);
|
||||
if (IS_ERR(gzvm))
|
||||
return PTR_ERR(gzvm);
|
||||
|
||||
return anon_inode_getfd("gzvm-vm", &gzvm_vm_fops, gzvm,
|
||||
O_RDWR | O_CLOEXEC);
|
||||
}
|
||||
|
||||
void gzvm_destroy_all_vms(void)
|
||||
{
|
||||
struct gzvm *gzvm, *tmp;
|
||||
|
||||
mutex_lock(&gzvm_list_lock);
|
||||
if (list_empty(&gzvm_list))
|
||||
goto out;
|
||||
|
||||
list_for_each_entry_safe(gzvm, tmp, &gzvm_list, vm_list)
|
||||
gzvm_destroy_vm(gzvm);
|
||||
|
||||
out:
|
||||
mutex_unlock(&gzvm_list_lock);
|
||||
}
|
@ -300,7 +300,7 @@ struct gh_resource *gh_rm_alloc_resource(struct gh_rm *rm, struct gh_rm_hyp_reso
|
||||
ghrsc->capid = le64_to_cpu(hyp_resource->cap_id);
|
||||
ghrsc->irq = IRQ_NOTCONNECTED;
|
||||
ghrsc->rm_label = le32_to_cpu(hyp_resource->resource_label);
|
||||
if (hyp_resource->virq) {
|
||||
if (hyp_resource->virq && hyp_resource->virq != GH_RM_RESOURCE_NO_VIRQ) {
|
||||
struct gh_irq_chip_data irq_data = {
|
||||
.gh_virq = le32_to_cpu(hyp_resource->virq),
|
||||
};
|
||||
|
@ -18,6 +18,10 @@ arch/arm64/crypto/sha256-glue.c
|
||||
arch/arm64/crypto/sha512-ce-core.S
|
||||
arch/arm64/crypto/sha512-ce-glue.c
|
||||
arch/arm64/crypto/sha512-glue.c
|
||||
arch/arm64/geniezone/gzvm_arch_common.h
|
||||
arch/arm64/geniezone/vcpu.c
|
||||
arch/arm64/geniezone/vgic.c
|
||||
arch/arm64/geniezone/vm.c
|
||||
arch/arm64/gunyah/gunyah_hypercall.c
|
||||
arch/arm64/include/asm/acpi.h
|
||||
arch/arm64/include/asm/alternative-macros.h
|
||||
@ -206,6 +210,7 @@ arch/arm64/include/uapi/asm/bitsperlong.h
|
||||
arch/arm64/include/uapi/asm/bpf_perf_event.h
|
||||
arch/arm64/include/uapi/asm/byteorder.h
|
||||
arch/arm64/include/uapi/asm/fcntl.h
|
||||
arch/arm64/include/uapi/asm/gzvm_arch.h
|
||||
arch/arm64/include/uapi/asm/hwcap.h
|
||||
arch/arm64/include/uapi/asm/kvm.h
|
||||
arch/arm64/include/uapi/asm/mman.h
|
||||
@ -1874,6 +1879,12 @@ drivers/video/hdmi.c
|
||||
drivers/video/of_display_timing.c
|
||||
drivers/video/of_videomode.c
|
||||
drivers/video/videomode.c
|
||||
drivers/virt/geniezone/gzvm_common.h
|
||||
drivers/virt/geniezone/gzvm_ioeventfd.c
|
||||
drivers/virt/geniezone/gzvm_irqfd.c
|
||||
drivers/virt/geniezone/gzvm_main.c
|
||||
drivers/virt/geniezone/gzvm_vcpu.c
|
||||
drivers/virt/geniezone/gzvm_vm.c
|
||||
drivers/virt/gunyah/gunyah_ioeventfd.c
|
||||
drivers/virt/gunyah/gunyah_irqfd.c
|
||||
drivers/virt/gunyah/gunyah_platform_hooks.c
|
||||
@ -2902,6 +2913,7 @@ include/linux/gpio_keys.h
|
||||
include/linux/gunyah.h
|
||||
include/linux/gunyah_rsc_mgr.h
|
||||
include/linux/gunyah_vm_mgr.h
|
||||
include/linux/gzvm_drv.h
|
||||
include/linux/hardirq.h
|
||||
include/linux/hash.h
|
||||
include/linux/hashtable.h
|
||||
@ -4391,6 +4403,7 @@ include/uapi/linux/gen_stats.h
|
||||
include/uapi/linux/genetlink.h
|
||||
include/uapi/linux/gpio.h
|
||||
include/uapi/linux/gunyah.h
|
||||
include/uapi/linux/gzvm.h
|
||||
include/uapi/linux/hash_info.h
|
||||
include/uapi/linux/hdlc/ioctl.h
|
||||
include/uapi/linux/hdreg.h
|
||||
|
@ -34,6 +34,7 @@ static void exfat_get_uniname_from_ext_entry(struct super_block *sb,
|
||||
{
|
||||
int i;
|
||||
struct exfat_entry_set_cache *es;
|
||||
unsigned int uni_len = 0, len;
|
||||
|
||||
es = exfat_get_dentry_set(sb, p_dir, entry, ES_ALL_ENTRIES);
|
||||
if (!es)
|
||||
@ -52,7 +53,10 @@ static void exfat_get_uniname_from_ext_entry(struct super_block *sb,
|
||||
if (exfat_get_entry_type(ep) != TYPE_EXTEND)
|
||||
break;
|
||||
|
||||
exfat_extract_uni_name(ep, uniname);
|
||||
len = exfat_extract_uni_name(ep, uniname);
|
||||
uni_len += len;
|
||||
if (len != EXFAT_FILE_NAME_LEN || uni_len >= MAX_NAME_LENGTH)
|
||||
break;
|
||||
uniname += EXFAT_FILE_NAME_LEN;
|
||||
}
|
||||
|
||||
@ -1027,7 +1031,8 @@ int exfat_find_dir_entry(struct super_block *sb, struct exfat_inode_info *ei,
|
||||
if (entry_type == TYPE_EXTEND) {
|
||||
unsigned short entry_uniname[16], unichar;
|
||||
|
||||
if (step != DIRENT_STEP_NAME) {
|
||||
if (step != DIRENT_STEP_NAME ||
|
||||
name_len >= MAX_NAME_LENGTH) {
|
||||
step = DIRENT_STEP_FILE;
|
||||
continue;
|
||||
}
|
||||
|
@ -1446,32 +1446,34 @@ int fuse_mkdir_initialize(
|
||||
|
||||
int fuse_mkdir_backing(
|
||||
struct fuse_bpf_args *fa,
|
||||
struct inode *dir, struct dentry *entry, umode_t mode)
|
||||
struct inode *dir_inode, struct dentry *entry, umode_t mode)
|
||||
{
|
||||
int err = 0;
|
||||
const struct fuse_mkdir_in *fmi = fa->in_args[0].value;
|
||||
struct fuse_inode *fuse_inode = get_fuse_inode(dir);
|
||||
struct inode *backing_inode = fuse_inode->backing_inode;
|
||||
struct fuse_inode *dir_fuse_inode = get_fuse_inode(dir_inode);
|
||||
struct inode *dir_backing_inode = dir_fuse_inode->backing_inode;
|
||||
struct path backing_path = {};
|
||||
struct inode *inode = NULL;
|
||||
struct dentry *d;
|
||||
|
||||
//TODO Actually deal with changing the backing entry in mkdir
|
||||
get_fuse_backing_path(entry, &backing_path);
|
||||
if (!backing_path.dentry)
|
||||
return -EBADF;
|
||||
|
||||
inode_lock_nested(backing_inode, I_MUTEX_PARENT);
|
||||
inode_lock_nested(dir_backing_inode, I_MUTEX_PARENT);
|
||||
mode = fmi->mode;
|
||||
if (!IS_POSIXACL(backing_inode))
|
||||
if (!IS_POSIXACL(dir_backing_inode))
|
||||
mode &= ~fmi->umask;
|
||||
err = vfs_mkdir(&init_user_ns, backing_inode, backing_path.dentry, mode);
|
||||
err = vfs_mkdir(&init_user_ns, dir_backing_inode, backing_path.dentry,
|
||||
mode);
|
||||
if (err)
|
||||
goto out;
|
||||
if (d_really_is_negative(backing_path.dentry) ||
|
||||
unlikely(d_unhashed(backing_path.dentry))) {
|
||||
d = lookup_one_len(entry->d_name.name, backing_path.dentry->d_parent,
|
||||
entry->d_name.len);
|
||||
struct dentry *d = lookup_one_len(entry->d_name.name,
|
||||
backing_path.dentry->d_parent,
|
||||
entry->d_name.len);
|
||||
|
||||
if (IS_ERR(d)) {
|
||||
err = PTR_ERR(d);
|
||||
goto out;
|
||||
@ -1479,14 +1481,19 @@ int fuse_mkdir_backing(
|
||||
dput(backing_path.dentry);
|
||||
backing_path.dentry = d;
|
||||
}
|
||||
inode = fuse_iget_backing(dir->i_sb, fuse_inode->nodeid, backing_inode);
|
||||
inode = fuse_iget_backing(dir_inode->i_sb, 0,
|
||||
backing_path.dentry->d_inode);
|
||||
if (IS_ERR(inode)) {
|
||||
err = PTR_ERR(inode);
|
||||
goto out;
|
||||
}
|
||||
d_instantiate(entry, inode);
|
||||
if (get_fuse_inode(inode)->bpf)
|
||||
bpf_prog_put(get_fuse_inode(inode)->bpf);
|
||||
get_fuse_inode(inode)->bpf = get_fuse_dentry(entry)->bpf;
|
||||
get_fuse_dentry(entry)->bpf = NULL;
|
||||
out:
|
||||
inode_unlock(backing_inode);
|
||||
inode_unlock(dir_backing_inode);
|
||||
path_put(&backing_path);
|
||||
return err;
|
||||
}
|
||||
|
@ -1300,6 +1300,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
|
||||
out:
|
||||
spin_unlock(&ctx->flc_lock);
|
||||
percpu_up_read(&file_rwsem);
|
||||
trace_posix_lock_inode(inode, request, error);
|
||||
/*
|
||||
* Free any unused locks.
|
||||
*/
|
||||
@ -1308,7 +1309,6 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
|
||||
if (new_fl2)
|
||||
locks_free_lock(new_fl2);
|
||||
locks_dispose_list(&dispose);
|
||||
trace_posix_lock_inode(inode, request, error);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
@ -518,7 +518,7 @@ static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig,
|
||||
sig ^= PERSISTENT_RAM_SIG;
|
||||
|
||||
if (prz->buffer->sig == sig) {
|
||||
if (buffer_size(prz) == 0) {
|
||||
if (buffer_size(prz) == 0 && buffer_start(prz) == 0) {
|
||||
pr_debug("found existing empty buffer\n");
|
||||
return 0;
|
||||
}
|
||||
|
@ -314,4 +314,12 @@ extern s64 cpuidle_governor_latency_req(unsigned int cpu);
|
||||
#define CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(low_level_idle_enter, idx, state) \
|
||||
__CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 1)
|
||||
|
||||
#ifdef CONFIG_CPU_IDLE_GOV_TEO
|
||||
unsigned long teo_cpu_get_util_threshold(int cpu);
|
||||
void teo_cpu_set_util_threshold(int cpu, unsigned long util);
|
||||
#else
|
||||
static inline unsigned long teo_cpu_get_util_threshold(int cpu) {return -1;}
|
||||
static inline void teo_cpu_set_util_threshold(int cpu, unsigned long util) {}
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_CPUIDLE_H */
|
||||
|
@ -139,6 +139,7 @@ struct gh_rm_hyp_resource {
|
||||
__le32 resource_label;
|
||||
__le64 cap_id;
|
||||
__le32 virq_handle;
|
||||
#define GH_RM_RESOURCE_NO_VIRQ 0xFFFFFFFF
|
||||
__le32 virq;
|
||||
__le64 base;
|
||||
__le64 size;
|
||||
|
158
include/linux/gzvm_drv.h
Normal file
158
include/linux/gzvm_drv.h
Normal file
@ -0,0 +1,158 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (c) 2023 MediaTek Inc.
|
||||
*/
|
||||
|
||||
#ifndef __GZVM_DRV_H__
|
||||
#define __GZVM_DRV_H__
|
||||
|
||||
#include <linux/eventfd.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/miscdevice.h>
|
||||
#include <linux/gzvm.h>
|
||||
#include <linux/srcu.h>
|
||||
|
||||
#define GZVM_VCPU_MMAP_SIZE PAGE_SIZE
|
||||
#define INVALID_VM_ID 0xffff
|
||||
|
||||
/*
|
||||
* These are the efinitions of APIs between GenieZone hypervisor and driver,
|
||||
* there's no need to be visible to uapi. Furthermore, We need GenieZone
|
||||
* specific error code in order to map to Linux errno
|
||||
*/
|
||||
#define NO_ERROR (0)
|
||||
#define ERR_NO_MEMORY (-5)
|
||||
#define ERR_NOT_SUPPORTED (-24)
|
||||
#define ERR_NOT_IMPLEMENTED (-27)
|
||||
#define ERR_FAULT (-40)
|
||||
#define GZVM_USERSPACE_IRQ_SOURCE_ID 0
|
||||
#define GZVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
|
||||
|
||||
/*
|
||||
* The following data structures are for data transferring between driver and
|
||||
* hypervisor, and they're aligned with hypervisor definitions
|
||||
*/
|
||||
#define GZVM_MAX_VCPUS 8
|
||||
#define GZVM_MAX_MEM_REGION 10
|
||||
|
||||
#define GZVM_VCPU_RUN_MAP_SIZE (PAGE_SIZE * 2)
|
||||
|
||||
/* struct mem_region_addr_range - Identical to ffa memory constituent */
|
||||
struct mem_region_addr_range {
|
||||
/* the base IPA of the constituent memory region, aligned to 4 kiB */
|
||||
__u64 address;
|
||||
/* the number of 4 kiB pages in the constituent memory region. */
|
||||
__u32 pg_cnt;
|
||||
__u32 reserved;
|
||||
};
|
||||
|
||||
struct gzvm_memory_region_ranges {
|
||||
__u32 slot;
|
||||
__u32 constituent_cnt;
|
||||
__u64 total_pages;
|
||||
__u64 gpa;
|
||||
struct mem_region_addr_range constituents[];
|
||||
};
|
||||
|
||||
/* struct gzvm_memslot - VM's memory slot descriptor */
|
||||
struct gzvm_memslot {
|
||||
u64 base_gfn; /* begin of guest page frame */
|
||||
unsigned long npages; /* number of pages this slot covers */
|
||||
unsigned long userspace_addr; /* corresponding userspace va */
|
||||
struct vm_area_struct *vma; /* vma related to this userspace addr */
|
||||
u32 flags;
|
||||
u32 slot_id;
|
||||
};
|
||||
|
||||
struct gzvm_vcpu {
|
||||
struct gzvm *gzvm;
|
||||
int vcpuid;
|
||||
/* lock of vcpu*/
|
||||
struct mutex lock;
|
||||
struct gzvm_vcpu_run *run;
|
||||
struct gzvm_vcpu_hwstate *hwstate;
|
||||
};
|
||||
|
||||
struct gzvm {
|
||||
struct gzvm_vcpu *vcpus[GZVM_MAX_VCPUS];
|
||||
/* userspace tied to this vm */
|
||||
struct mm_struct *mm;
|
||||
struct gzvm_memslot memslot[GZVM_MAX_MEM_REGION];
|
||||
/* lock for list_add*/
|
||||
struct mutex lock;
|
||||
|
||||
struct {
|
||||
/* lock for irqfds list operation */
|
||||
spinlock_t lock;
|
||||
struct list_head items;
|
||||
struct list_head resampler_list;
|
||||
/* lock for irqfds resampler */
|
||||
struct mutex resampler_lock;
|
||||
} irqfds;
|
||||
|
||||
struct list_head ioevents;
|
||||
|
||||
struct list_head vm_list;
|
||||
u16 vm_id;
|
||||
|
||||
struct hlist_head irq_ack_notifier_list;
|
||||
struct srcu_struct irq_srcu;
|
||||
/* lock for irq injection */
|
||||
struct mutex irq_lock;
|
||||
};
|
||||
|
||||
long gzvm_dev_ioctl_check_extension(struct gzvm *gzvm, unsigned long args);
|
||||
int gzvm_dev_ioctl_create_vm(unsigned long vm_type);
|
||||
|
||||
int gzvm_err_to_errno(unsigned long err);
|
||||
|
||||
void gzvm_destroy_all_vms(void);
|
||||
|
||||
void gzvm_destroy_vcpus(struct gzvm *gzvm);
|
||||
|
||||
/* arch-dependant functions */
|
||||
int gzvm_arch_probe(void);
|
||||
int gzvm_arch_set_memregion(u16 vm_id, size_t buf_size,
|
||||
phys_addr_t region);
|
||||
int gzvm_arch_check_extension(struct gzvm *gzvm, __u64 cap, void __user *argp);
|
||||
int gzvm_arch_create_vm(unsigned long vm_type);
|
||||
int gzvm_arch_destroy_vm(u16 vm_id);
|
||||
int gzvm_vm_ioctl_arch_enable_cap(struct gzvm *gzvm,
|
||||
struct gzvm_enable_cap *cap,
|
||||
void __user *argp);
|
||||
|
||||
u64 gzvm_hva_to_pa_arch(u64 hva);
|
||||
int gzvm_vm_ioctl_create_vcpu(struct gzvm *gzvm, u32 cpuid);
|
||||
int gzvm_arch_vcpu_update_one_reg(struct gzvm_vcpu *vcpu, __u64 reg_id,
|
||||
bool is_write, __u64 *data);
|
||||
int gzvm_arch_create_vcpu(u16 vm_id, int vcpuid, void *run);
|
||||
int gzvm_arch_vcpu_run(struct gzvm_vcpu *vcpu, __u64 *exit_reason);
|
||||
int gzvm_arch_destroy_vcpu(u16 vm_id, int vcpuid);
|
||||
int gzvm_arch_inform_exit(u16 vm_id);
|
||||
|
||||
int gzvm_arch_create_device(u16 vm_id, struct gzvm_create_device *gzvm_dev);
|
||||
int gzvm_arch_inject_irq(struct gzvm *gzvm, unsigned int vcpu_idx,
|
||||
u32 irq_type, u32 irq, bool level);
|
||||
|
||||
void gzvm_notify_acked_irq(struct gzvm *gzvm, unsigned int gsi);
|
||||
int gzvm_irqfd(struct gzvm *gzvm, struct gzvm_irqfd *args);
|
||||
int gzvm_drv_irqfd_init(void);
|
||||
void gzvm_drv_irqfd_exit(void);
|
||||
int gzvm_vm_irqfd_init(struct gzvm *gzvm);
|
||||
void gzvm_vm_irqfd_release(struct gzvm *gzvm);
|
||||
|
||||
int gzvm_arch_memregion_purpose(struct gzvm *gzvm,
|
||||
struct gzvm_userspace_memory_region *mem);
|
||||
int gzvm_arch_set_dtb_config(struct gzvm *gzvm, struct gzvm_dtb_config *args);
|
||||
|
||||
int gzvm_init_ioeventfd(struct gzvm *gzvm);
|
||||
int gzvm_ioeventfd(struct gzvm *gzvm, struct gzvm_ioeventfd *args);
|
||||
bool gzvm_ioevent_write(struct gzvm_vcpu *vcpu, __u64 addr, int len,
|
||||
const void *val);
|
||||
void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt);
|
||||
struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr);
|
||||
void add_wait_queue_priority(struct wait_queue_head *wq_head,
|
||||
struct wait_queue_entry *wq_entry);
|
||||
|
||||
#endif /* __GZVM_DRV_H__ */
|
@ -112,4 +112,6 @@ extern void oom_killer_enable(void);
|
||||
|
||||
extern struct task_struct *find_lock_task_mm(struct task_struct *p);
|
||||
|
||||
/* call for adding killed process to reaper. */
|
||||
extern void add_to_oom_reaper(struct task_struct *p);
|
||||
#endif /* _INCLUDE_LINUX_OOM_H */
|
||||
|
@ -758,10 +758,8 @@ struct task_struct {
|
||||
#endif
|
||||
unsigned int __state;
|
||||
|
||||
#ifdef CONFIG_PREEMPT_RT
|
||||
/* saved state for "spinlock sleepers" */
|
||||
unsigned int saved_state;
|
||||
#endif
|
||||
/* moved to ANDROID_KABI_USE(1, unsigned int saved_state) */
|
||||
|
||||
/*
|
||||
* This begins the randomizable portion of task_struct. Only
|
||||
@ -1548,7 +1546,7 @@ struct task_struct {
|
||||
*/
|
||||
union rv_task_monitor rv[RV_PER_TASK_MONITORS];
|
||||
#endif
|
||||
ANDROID_KABI_RESERVE(1);
|
||||
ANDROID_KABI_USE(1, unsigned int saved_state);
|
||||
ANDROID_KABI_RESERVE(2);
|
||||
ANDROID_KABI_RESERVE(3);
|
||||
ANDROID_KABI_RESERVE(4);
|
||||
@ -2029,15 +2027,12 @@ static __always_inline void scheduler_ipi(void)
|
||||
*/
|
||||
preempt_fold_need_resched();
|
||||
}
|
||||
extern unsigned long wait_task_inactive(struct task_struct *, unsigned int match_state);
|
||||
#else
|
||||
static inline void scheduler_ipi(void) { }
|
||||
static inline unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
|
||||
extern unsigned long wait_task_inactive(struct task_struct *, unsigned int match_state);
|
||||
|
||||
/*
|
||||
* Set thread flags in other task's structures.
|
||||
* See asm/thread_info.h for TIF_xxxx flags available:
|
||||
|
@ -105,6 +105,19 @@ DECLARE_HOOK(android_vh_task_blocks_on_rtmutex,
|
||||
DECLARE_HOOK(android_vh_rtmutex_waiter_prio,
|
||||
TP_PROTO(struct task_struct *task, int *waiter_prio),
|
||||
TP_ARGS(task, waiter_prio));
|
||||
|
||||
DECLARE_HOOK(android_vh_exit_signal_whether_wake,
|
||||
TP_PROTO(struct task_struct *p, bool *wake),
|
||||
TP_ARGS(p, wake));
|
||||
|
||||
DECLARE_HOOK(android_vh_exit_check,
|
||||
TP_PROTO(struct task_struct *p),
|
||||
TP_ARGS(p));
|
||||
|
||||
DECLARE_HOOK(android_vh_freeze_whether_wake,
|
||||
TP_PROTO(struct task_struct *t, bool *wake),
|
||||
TP_ARGS(t, wake));
|
||||
|
||||
#endif /* _TRACE_HOOK_DTASK_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
|
@ -321,6 +321,11 @@ DECLARE_HOOK(android_vh_setscheduler_uclamp,
|
||||
TP_PROTO(struct task_struct *tsk, int clamp_id, unsigned int value),
|
||||
TP_ARGS(tsk, clamp_id, value));
|
||||
|
||||
DECLARE_HOOK(android_vh_uclamp_validate,
|
||||
TP_PROTO(struct task_struct *p, const struct sched_attr *attr,
|
||||
bool user, int *ret, bool *done),
|
||||
TP_ARGS(p, attr, user, ret, done));
|
||||
|
||||
DECLARE_HOOK(android_vh_update_topology_flags_workfn,
|
||||
TP_PROTO(void *unused),
|
||||
TP_ARGS(unused));
|
||||
@ -389,7 +394,37 @@ DECLARE_HOOK(android_vh_mmput,
|
||||
TP_PROTO(struct mm_struct *mm),
|
||||
TP_ARGS(mm));
|
||||
|
||||
/* macro versions of hooks are no longer required */
|
||||
struct sched_attr;
|
||||
DECLARE_HOOK(android_vh_set_sugov_sched_attr,
|
||||
TP_PROTO(struct sched_attr *attr),
|
||||
TP_ARGS(attr));
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_set_iowait,
|
||||
TP_PROTO(struct task_struct *p, struct rq *rq, int *should_iowait_boost),
|
||||
TP_ARGS(p, rq, should_iowait_boost), 1);
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_attach_entity_load_avg,
|
||||
TP_PROTO(struct cfs_rq *cfs_rq, struct sched_entity *se),
|
||||
TP_ARGS(cfs_rq, se), 1);
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_detach_entity_load_avg,
|
||||
TP_PROTO(struct cfs_rq *cfs_rq, struct sched_entity *se),
|
||||
TP_ARGS(cfs_rq, se), 1);
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_update_load_avg,
|
||||
TP_PROTO(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se),
|
||||
TP_ARGS(now, cfs_rq, se), 1);
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_remove_entity_load_avg,
|
||||
TP_PROTO(struct cfs_rq *cfs_rq, struct sched_entity *se),
|
||||
TP_ARGS(cfs_rq, se), 1);
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_update_blocked_fair,
|
||||
TP_PROTO(struct rq *rq),
|
||||
TP_ARGS(rq), 1);
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_update_rt_rq_load_avg,
|
||||
TP_PROTO(u64 now, struct rq *rq, struct task_struct *tsk, int running),
|
||||
TP_ARGS(now, rq, tsk, running), 1);
|
||||
|
||||
#endif /* _TRACE_HOOK_SCHED_H */
|
||||
/* This part must be outside protection */
|
||||
|
@ -14,6 +14,9 @@ DECLARE_HOOK(android_vh_do_send_sig_info,
|
||||
DECLARE_HOOK(android_vh_exit_signal,
|
||||
TP_PROTO(struct task_struct *task),
|
||||
TP_ARGS(task));
|
||||
DECLARE_HOOK(android_vh_killed_process,
|
||||
TP_PROTO(struct task_struct *killer, struct task_struct *dst, bool *reap),
|
||||
TP_ARGS(killer, dst, reap));
|
||||
#endif /* _TRACE_HOOK_SIGNAL_H */
|
||||
/* This part must be outside protection */
|
||||
#include <trace/define_trace.h>
|
||||
|
@ -24,6 +24,10 @@ DECLARE_HOOK(android_vh_arch_set_freq_scale,
|
||||
|
||||
#endif
|
||||
|
||||
DECLARE_HOOK(android_vh_use_amu_fie,
|
||||
TP_PROTO(bool *use_amu_fie),
|
||||
TP_ARGS(use_amu_fie));
|
||||
|
||||
#endif /* _TRACE_HOOK_TOPOLOGY_H */
|
||||
/* This part must be outside protection */
|
||||
#include <trace/define_trace.h>
|
||||
|
@ -43,6 +43,9 @@ enum scan_balance;
|
||||
DECLARE_HOOK(android_vh_tune_scan_type,
|
||||
TP_PROTO(enum scan_balance *scan_type),
|
||||
TP_ARGS(scan_type));
|
||||
DECLARE_HOOK(android_vh_tune_swappiness,
|
||||
TP_PROTO(int *swappiness),
|
||||
TP_ARGS(swappiness));
|
||||
DECLARE_HOOK(android_vh_scan_abort_check_wmarks,
|
||||
TP_PROTO(bool *check_wmarks),
|
||||
TP_ARGS(check_wmarks));
|
||||
|
@ -34,3 +34,4 @@ mandatory-y += termbits.h
|
||||
mandatory-y += termios.h
|
||||
mandatory-y += types.h
|
||||
mandatory-y += unistd.h
|
||||
mandatory-y += gzvm_arch.h
|
||||
|
10
include/uapi/asm-generic/gzvm_arch.h
Normal file
10
include/uapi/asm-generic/gzvm_arch.h
Normal file
@ -0,0 +1,10 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
/*
|
||||
* Copyright (c) 2023 MediaTek Inc.
|
||||
*/
|
||||
|
||||
#ifndef __ASM_GENERIC_GZVM_ARCH_H
|
||||
#define __ASM_GENERIC_GZVM_ARCH_H
|
||||
/* geniezone only supports aarch64 platform for now */
|
||||
|
||||
#endif /* __ASM_GENERIC_GZVM_ARCH_H */
|
@ -56,15 +56,29 @@ struct fuse_in_postfilter_header {
|
||||
/** One input argument of a request */
|
||||
struct fuse_bpf_in_arg {
|
||||
uint32_t size;
|
||||
const void *value;
|
||||
const void *end_offset;
|
||||
uint32_t padding;
|
||||
union {
|
||||
const void *value;
|
||||
uint64_t padding2;
|
||||
};
|
||||
union {
|
||||
const void *end_offset;
|
||||
uint64_t padding3;
|
||||
};
|
||||
};
|
||||
|
||||
/** One output argument of a request */
|
||||
struct fuse_bpf_arg {
|
||||
uint32_t size;
|
||||
void *value;
|
||||
void *end_offset;
|
||||
uint32_t padding;
|
||||
union {
|
||||
void *value;
|
||||
uint64_t padding2;
|
||||
};
|
||||
union {
|
||||
void *end_offset;
|
||||
uint64_t padding3;
|
||||
};
|
||||
};
|
||||
|
||||
#define FUSE_MAX_IN_ARGS 5
|
||||
@ -80,6 +94,7 @@ struct fuse_bpf_args {
|
||||
uint32_t in_numargs;
|
||||
uint32_t out_numargs;
|
||||
uint32_t flags;
|
||||
uint32_t padding;
|
||||
struct fuse_bpf_in_arg in_args[FUSE_MAX_IN_ARGS];
|
||||
struct fuse_bpf_arg out_args[FUSE_MAX_OUT_ARGS];
|
||||
};
|
||||
|
343
include/uapi/linux/gzvm.h
Normal file
343
include/uapi/linux/gzvm.h
Normal file
@ -0,0 +1,343 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
/*
|
||||
* Copyright (c) 2023 MediaTek Inc.
|
||||
*/
|
||||
|
||||
/**
|
||||
* DOC: UAPI of GenieZone Hypervisor
|
||||
*
|
||||
* This file declares common data structure shared among user space,
|
||||
* kernel space, and GenieZone hypervisor.
|
||||
*/
|
||||
#ifndef __GZVM_H__
|
||||
#define __GZVM_H__
|
||||
|
||||
#include <linux/const.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/ioctl.h>
|
||||
|
||||
#include <asm/gzvm_arch.h>
|
||||
|
||||
/* GZVM ioctls */
|
||||
#define GZVM_IOC_MAGIC 0x92 /* gz */
|
||||
|
||||
/* ioctls for /dev/gzvm fds */
|
||||
#define GZVM_CREATE_VM _IO(GZVM_IOC_MAGIC, 0x01) /* Returns a Geniezone VM fd */
|
||||
|
||||
/*
|
||||
* Check if the given capability is supported or not.
|
||||
* The argument is capability. Ex. GZVM_CAP_ARM_PROTECTED_VM or GZVM_CAP_ARM_VM_IPA_SIZE
|
||||
* return is 0 (supported, no error)
|
||||
* return is -EOPNOTSUPP (unsupported)
|
||||
* return is -EFAULT (failed to get the argument from userspace)
|
||||
*/
|
||||
#define GZVM_CHECK_EXTENSION _IO(GZVM_IOC_MAGIC, 0x03)
|
||||
|
||||
/* ioctls for VM fds */
|
||||
/* for GZVM_SET_MEMORY_REGION */
|
||||
struct gzvm_memory_region {
|
||||
__u32 slot;
|
||||
__u32 flags;
|
||||
__u64 guest_phys_addr;
|
||||
__u64 memory_size; /* bytes */
|
||||
};
|
||||
|
||||
#define GZVM_SET_MEMORY_REGION _IOW(GZVM_IOC_MAGIC, 0x40, \
|
||||
struct gzvm_memory_region)
|
||||
/*
|
||||
* GZVM_CREATE_VCPU receives as a parameter the vcpu slot,
|
||||
* and returns a vcpu fd.
|
||||
*/
|
||||
#define GZVM_CREATE_VCPU _IO(GZVM_IOC_MAGIC, 0x41)
|
||||
|
||||
/* for GZVM_SET_USER_MEMORY_REGION */
|
||||
struct gzvm_userspace_memory_region {
|
||||
__u32 slot;
|
||||
__u32 flags;
|
||||
__u64 guest_phys_addr;
|
||||
/* bytes */
|
||||
__u64 memory_size;
|
||||
/* start of the userspace allocated memory */
|
||||
__u64 userspace_addr;
|
||||
};
|
||||
|
||||
#define GZVM_SET_USER_MEMORY_REGION _IOW(GZVM_IOC_MAGIC, 0x46, \
|
||||
struct gzvm_userspace_memory_region)
|
||||
|
||||
/* for GZVM_IRQ_LINE, irq field index values */
|
||||
#define GZVM_IRQ_VCPU_MASK 0xff
|
||||
#define GZVM_IRQ_LINE_TYPE GENMASK(27, 24)
|
||||
#define GZVM_IRQ_LINE_VCPU GENMASK(23, 16)
|
||||
#define GZVM_IRQ_LINE_VCPU2 GENMASK(31, 28)
|
||||
#define GZVM_IRQ_LINE_NUM GENMASK(15, 0)
|
||||
|
||||
/* irq_type field */
|
||||
#define GZVM_IRQ_TYPE_CPU 0
|
||||
#define GZVM_IRQ_TYPE_SPI 1
|
||||
#define GZVM_IRQ_TYPE_PPI 2
|
||||
|
||||
/* out-of-kernel GIC cpu interrupt injection irq_number field */
|
||||
#define GZVM_IRQ_CPU_IRQ 0
|
||||
#define GZVM_IRQ_CPU_FIQ 1
|
||||
|
||||
struct gzvm_irq_level {
|
||||
union {
|
||||
__u32 irq;
|
||||
__s32 status;
|
||||
};
|
||||
__u32 level;
|
||||
};
|
||||
|
||||
#define GZVM_IRQ_LINE _IOW(GZVM_IOC_MAGIC, 0x61, \
|
||||
struct gzvm_irq_level)
|
||||
|
||||
enum gzvm_device_type {
|
||||
GZVM_DEV_TYPE_ARM_VGIC_V3_DIST = 0,
|
||||
GZVM_DEV_TYPE_ARM_VGIC_V3_REDIST = 1,
|
||||
GZVM_DEV_TYPE_MAX,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct gzvm_create_device: For GZVM_CREATE_DEVICE.
|
||||
* @dev_type: Device type.
|
||||
* @id: Device id.
|
||||
* @flags: Bypass to hypervisor to handle them and these are flags of virtual
|
||||
* devices.
|
||||
* @dev_addr: Device ipa address in VM's view.
|
||||
* @dev_reg_size: Device register range size.
|
||||
* @attr_addr: If user -> kernel, this is user virtual address of device
|
||||
* specific attributes (if needed). If kernel->hypervisor,
|
||||
* this is ipa.
|
||||
* @attr_size: This attr_size is the buffer size in bytes of each attribute
|
||||
* needed from various devices. The attribute here refers to the
|
||||
* additional data passed from VMM(e.g. Crosvm) to GenieZone
|
||||
* hypervisor when virtual devices were to be created. Thus,
|
||||
* we need attr_addr and attr_size in the gzvm_create_device
|
||||
* structure to keep track of the attribute mentioned.
|
||||
*
|
||||
* Store information needed to create device.
|
||||
*/
|
||||
struct gzvm_create_device {
|
||||
__u32 dev_type;
|
||||
__u32 id;
|
||||
__u64 flags;
|
||||
__u64 dev_addr;
|
||||
__u64 dev_reg_size;
|
||||
__u64 attr_addr;
|
||||
__u64 attr_size;
|
||||
};
|
||||
|
||||
#define GZVM_CREATE_DEVICE _IOWR(GZVM_IOC_MAGIC, 0xe0, \
|
||||
struct gzvm_create_device)
|
||||
|
||||
/*
|
||||
* ioctls for vcpu fds
|
||||
*/
|
||||
#define GZVM_RUN _IO(GZVM_IOC_MAGIC, 0x80)
|
||||
|
||||
/* VM exit reason */
|
||||
enum {
|
||||
GZVM_EXIT_UNKNOWN = 0x92920000,
|
||||
GZVM_EXIT_MMIO = 0x92920001,
|
||||
GZVM_EXIT_HYPERCALL = 0x92920002,
|
||||
GZVM_EXIT_IRQ = 0x92920003,
|
||||
GZVM_EXIT_EXCEPTION = 0x92920004,
|
||||
GZVM_EXIT_DEBUG = 0x92920005,
|
||||
GZVM_EXIT_FAIL_ENTRY = 0x92920006,
|
||||
GZVM_EXIT_INTERNAL_ERROR = 0x92920007,
|
||||
GZVM_EXIT_SYSTEM_EVENT = 0x92920008,
|
||||
GZVM_EXIT_SHUTDOWN = 0x92920009,
|
||||
GZVM_EXIT_GZ = 0x9292000a,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct gzvm_vcpu_run: Same purpose as kvm_run, this struct is
|
||||
* shared between userspace, kernel and
|
||||
* GenieZone hypervisor
|
||||
* @exit_reason: The reason why gzvm_vcpu_run has stopped running the vCPU
|
||||
* @immediate_exit: Polled when the vcpu is scheduled.
|
||||
* If set, immediately returns -EINTR
|
||||
* @padding1: Reserved for future-proof and must be zero filled
|
||||
* @mmio: The nested struct in anonymous union. Handle mmio in host side
|
||||
* @phys_addr: The address guest tries to access
|
||||
* @data: The value to be written (is_write is 1) or
|
||||
* be filled by user for reads (is_write is 0)
|
||||
* @size: The size of written data.
|
||||
* Only the first `size` bytes of `data` are handled
|
||||
* @reg_nr: The register number where the data is stored
|
||||
* @is_write: 1 for VM to perform a write or 0 for VM to perform a read
|
||||
* @fail_entry: The nested struct in anonymous union.
|
||||
* Handle invalid entry address at the first run
|
||||
* @hardware_entry_failure_reason: The reason codes about hardware entry failure
|
||||
* @cpu: The current processor number via smp_processor_id()
|
||||
* @exception: The nested struct in anonymous union.
|
||||
* Handle exception occurred in VM
|
||||
* @exception: Which exception vector
|
||||
* @error_code: Exception error codes
|
||||
* @hypercall: The nested struct in anonymous union.
|
||||
* Some hypercalls issued from VM must be handled
|
||||
* @args: The hypercall's arguments
|
||||
* @internal: The nested struct in anonymous union. The errors from hypervisor
|
||||
* @suberror: The errors codes about GZVM_EXIT_INTERNAL_ERROR
|
||||
* @ndata: The number of elements used in data[]
|
||||
* @data: Keep the detailed information about GZVM_EXIT_INTERNAL_ERROR
|
||||
* @system_event: The nested struct in anonymous union.
|
||||
* VM's PSCI must be handled by host
|
||||
* @type: System event type.
|
||||
* Ex. GZVM_SYSTEM_EVENT_SHUTDOWN or GZVM_SYSTEM_EVENT_RESET...etc.
|
||||
* @ndata: The number of elements used in data[]
|
||||
* @data: Keep the detailed information about GZVM_EXIT_SYSTEM_EVENT
|
||||
* @padding: Fix it to a reasonable size future-proof for keeping the same
|
||||
* struct size when adding new variables in the union is needed
|
||||
*
|
||||
* Keep identical layout between the 3 modules
|
||||
*/
|
||||
struct gzvm_vcpu_run {
|
||||
/* to userspace */
|
||||
__u32 exit_reason;
|
||||
__u8 immediate_exit;
|
||||
__u8 padding1[3];
|
||||
/* union structure of collection of guest exit reason */
|
||||
union {
|
||||
/* GZVM_EXIT_MMIO */
|
||||
struct {
|
||||
/* from FAR_EL2 */
|
||||
__u64 phys_addr;
|
||||
__u8 data[8];
|
||||
/* from ESR_EL2 as */
|
||||
__u64 size;
|
||||
/* from ESR_EL2 */
|
||||
__u32 reg_nr;
|
||||
/* from ESR_EL2 */
|
||||
__u8 is_write;
|
||||
} mmio;
|
||||
/* GZVM_EXIT_FAIL_ENTRY */
|
||||
struct {
|
||||
__u64 hardware_entry_failure_reason;
|
||||
__u32 cpu;
|
||||
} fail_entry;
|
||||
/* GZVM_EXIT_EXCEPTION */
|
||||
struct {
|
||||
__u32 exception;
|
||||
__u32 error_code;
|
||||
} exception;
|
||||
/* GZVM_EXIT_HYPERCALL */
|
||||
struct {
|
||||
__u64 args[8]; /* in-out */
|
||||
} hypercall;
|
||||
/* GZVM_EXIT_INTERNAL_ERROR */
|
||||
struct {
|
||||
__u32 suberror;
|
||||
__u32 ndata;
|
||||
__u64 data[16];
|
||||
} internal;
|
||||
/* GZVM_EXIT_SYSTEM_EVENT */
|
||||
struct {
|
||||
#define GZVM_SYSTEM_EVENT_SHUTDOWN 1
|
||||
#define GZVM_SYSTEM_EVENT_RESET 2
|
||||
#define GZVM_SYSTEM_EVENT_CRASH 3
|
||||
#define GZVM_SYSTEM_EVENT_WAKEUP 4
|
||||
#define GZVM_SYSTEM_EVENT_SUSPEND 5
|
||||
#define GZVM_SYSTEM_EVENT_SEV_TERM 6
|
||||
#define GZVM_SYSTEM_EVENT_S2IDLE 7
|
||||
__u32 type;
|
||||
__u32 ndata;
|
||||
__u64 data[16];
|
||||
} system_event;
|
||||
char padding[256];
|
||||
};
|
||||
};
|
||||
|
||||
/* for GZVM_ENABLE_CAP */
|
||||
struct gzvm_enable_cap {
|
||||
/* in */
|
||||
__u64 cap;
|
||||
/**
|
||||
* we have total 5 (8 - 3) registers can be used for
|
||||
* additional args
|
||||
*/
|
||||
__u64 args[5];
|
||||
};
|
||||
|
||||
#define GZVM_ENABLE_CAP _IOW(GZVM_IOC_MAGIC, 0xa3, \
|
||||
struct gzvm_enable_cap)
|
||||
|
||||
/* for GZVM_GET/SET_ONE_REG */
|
||||
struct gzvm_one_reg {
|
||||
__u64 id;
|
||||
__u64 addr;
|
||||
};
|
||||
|
||||
#define GZVM_GET_ONE_REG _IOW(GZVM_IOC_MAGIC, 0xab, \
|
||||
struct gzvm_one_reg)
|
||||
#define GZVM_SET_ONE_REG _IOW(GZVM_IOC_MAGIC, 0xac, \
|
||||
struct gzvm_one_reg)
|
||||
|
||||
#define GZVM_REG_GENERIC 0x0000000000000000ULL
|
||||
|
||||
#define GZVM_IRQFD_FLAG_DEASSIGN BIT(0)
|
||||
/*
|
||||
* GZVM_IRQFD_FLAG_RESAMPLE indicates resamplefd is valid and specifies
|
||||
* the irqfd to operate in resampling mode for level triggered interrupt
|
||||
* emulation.
|
||||
*/
|
||||
#define GZVM_IRQFD_FLAG_RESAMPLE BIT(1)
|
||||
|
||||
/**
|
||||
* struct gzvm_irqfd: gzvm irqfd descriptor
|
||||
* @fd: File descriptor.
|
||||
* @gsi: Used for level IRQ fast-path.
|
||||
* @flags: FLAG_DEASSIGN or FLAG_RESAMPLE.
|
||||
* @resamplefd: The file descriptor of the resampler.
|
||||
* @pad: Reserved for future-proof.
|
||||
*/
|
||||
struct gzvm_irqfd {
|
||||
__u32 fd;
|
||||
__u32 gsi;
|
||||
__u32 flags;
|
||||
__u32 resamplefd;
|
||||
__u8 pad[16];
|
||||
};
|
||||
|
||||
#define GZVM_IRQFD _IOW(GZVM_IOC_MAGIC, 0x76, struct gzvm_irqfd)
|
||||
|
||||
enum {
|
||||
gzvm_ioeventfd_flag_nr_datamatch = 0,
|
||||
gzvm_ioeventfd_flag_nr_pio = 1,
|
||||
gzvm_ioeventfd_flag_nr_deassign = 2,
|
||||
gzvm_ioeventfd_flag_nr_max,
|
||||
};
|
||||
|
||||
#define GZVM_IOEVENTFD_FLAG_DATAMATCH (1 << gzvm_ioeventfd_flag_nr_datamatch)
|
||||
#define GZVM_IOEVENTFD_FLAG_PIO (1 << gzvm_ioeventfd_flag_nr_pio)
|
||||
#define GZVM_IOEVENTFD_FLAG_DEASSIGN (1 << gzvm_ioeventfd_flag_nr_deassign)
|
||||
#define GZVM_IOEVENTFD_VALID_FLAG_MASK ((1 << gzvm_ioeventfd_flag_nr_max) - 1)
|
||||
|
||||
struct gzvm_ioeventfd {
|
||||
__u64 datamatch;
|
||||
/* private: legal pio/mmio address */
|
||||
__u64 addr;
|
||||
/* private: 1, 2, 4, or 8 bytes; or 0 to ignore length */
|
||||
__u32 len;
|
||||
__s32 fd;
|
||||
__u32 flags;
|
||||
__u8 pad[36];
|
||||
};
|
||||
|
||||
#define GZVM_IOEVENTFD _IOW(GZVM_IOC_MAGIC, 0x79, struct gzvm_ioeventfd)
|
||||
|
||||
/**
|
||||
* struct gzvm_dtb_config: store address and size of dtb passed from userspace
|
||||
*
|
||||
* @dtb_addr: dtb address set by VMM (guset memory)
|
||||
* @dtb_size: dtb size
|
||||
*/
|
||||
struct gzvm_dtb_config {
|
||||
__u64 dtb_addr;
|
||||
__u64 dtb_size;
|
||||
};
|
||||
|
||||
#define GZVM_SET_DTB_CONFIG _IOW(GZVM_IOC_MAGIC, 0xff, \
|
||||
struct gzvm_dtb_config)
|
||||
|
||||
#endif /* __GZVM_H__ */
|
@ -7,6 +7,7 @@
|
||||
#include "cgroup-internal.h"
|
||||
|
||||
#include <trace/events/cgroup.h>
|
||||
#include <trace/hooks/dtask.h>
|
||||
|
||||
/*
|
||||
* Propagate the cgroup frozen state upwards by the cgroup tree.
|
||||
@ -155,17 +156,21 @@ void cgroup_leave_frozen(bool always_leave)
|
||||
static void cgroup_freeze_task(struct task_struct *task, bool freeze)
|
||||
{
|
||||
unsigned long flags;
|
||||
bool wake = true;
|
||||
|
||||
/* If the task is about to die, don't bother with freezing it. */
|
||||
if (!lock_task_sighand(task, &flags))
|
||||
return;
|
||||
|
||||
trace_android_vh_freeze_whether_wake(task, &wake);
|
||||
if (freeze) {
|
||||
task->jobctl |= JOBCTL_TRAP_FREEZE;
|
||||
signal_wake_up(task, false);
|
||||
if (wake)
|
||||
signal_wake_up(task, false);
|
||||
} else {
|
||||
task->jobctl &= ~JOBCTL_TRAP_FREEZE;
|
||||
wake_up_process(task);
|
||||
if (wake)
|
||||
wake_up_process(task);
|
||||
}
|
||||
|
||||
unlock_task_sighand(task, &flags);
|
||||
|
@ -73,6 +73,7 @@
|
||||
#include <asm/unistd.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <trace/hooks/mm.h>
|
||||
#include <trace/hooks/dtask.h>
|
||||
|
||||
/*
|
||||
* The default value should be high enough to not crash a system that randomly
|
||||
@ -827,6 +828,8 @@ void __noreturn do_exit(long code)
|
||||
io_uring_files_cancel();
|
||||
exit_signals(tsk); /* sets PF_EXITING */
|
||||
|
||||
trace_android_vh_exit_check(current);
|
||||
|
||||
/* sync mm's RSS info before statistics gathering */
|
||||
if (tsk->mm)
|
||||
sync_mm_rss(tsk->mm);
|
||||
|
@ -74,7 +74,11 @@ bool __refrigerator(bool check_kthr_stop)
|
||||
for (;;) {
|
||||
bool freeze;
|
||||
|
||||
raw_spin_lock_irq(¤t->pi_lock);
|
||||
set_current_state(TASK_FROZEN);
|
||||
/* unstale saved_state so that __thaw_task() will wake us up */
|
||||
current->saved_state = TASK_RUNNING;
|
||||
raw_spin_unlock_irq(¤t->pi_lock);
|
||||
|
||||
spin_lock_irq(&freezer_lock);
|
||||
freeze = freezing(current) && !(check_kthr_stop && kthread_should_stop());
|
||||
@ -133,6 +137,7 @@ static int __set_task_frozen(struct task_struct *p, void *arg)
|
||||
WARN_ON_ONCE(debug_locks && p->lockdep_depth);
|
||||
#endif
|
||||
|
||||
p->saved_state = p->__state;
|
||||
WRITE_ONCE(p->__state, TASK_FROZEN);
|
||||
return TASK_FROZEN;
|
||||
}
|
||||
@ -174,42 +179,34 @@ bool freeze_task(struct task_struct *p)
|
||||
}
|
||||
|
||||
/*
|
||||
* The special task states (TASK_STOPPED, TASK_TRACED) keep their canonical
|
||||
* state in p->jobctl. If either of them got a wakeup that was missed because
|
||||
* TASK_FROZEN, then their canonical state reflects that and the below will
|
||||
* refuse to restore the special state and instead issue the wakeup.
|
||||
* Restore the saved_state before the task entered freezer. For typical task
|
||||
* in the __refrigerator(), saved_state == TASK_RUNNING so nothing happens
|
||||
* here. For tasks which were TASK_NORMAL | TASK_FREEZABLE, their initial state
|
||||
* is restored unless they got an expected wakeup (see ttwu_state_match()).
|
||||
* Returns 1 if the task state was restored.
|
||||
*/
|
||||
static int __set_task_special(struct task_struct *p, void *arg)
|
||||
static int __restore_freezer_state(struct task_struct *p, void *arg)
|
||||
{
|
||||
unsigned int state = 0;
|
||||
unsigned int state = p->saved_state;
|
||||
|
||||
if (p->jobctl & JOBCTL_TRACED)
|
||||
state = TASK_TRACED;
|
||||
|
||||
else if (p->jobctl & JOBCTL_STOPPED)
|
||||
state = TASK_STOPPED;
|
||||
|
||||
if (state)
|
||||
if (state != TASK_RUNNING) {
|
||||
WRITE_ONCE(p->__state, state);
|
||||
return 1;
|
||||
}
|
||||
|
||||
return state;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __thaw_task(struct task_struct *p)
|
||||
{
|
||||
unsigned long flags, flags2;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&freezer_lock, flags);
|
||||
if (WARN_ON_ONCE(freezing(p)))
|
||||
goto unlock;
|
||||
|
||||
if (lock_task_sighand(p, &flags2)) {
|
||||
/* TASK_FROZEN -> TASK_{STOPPED,TRACED} */
|
||||
bool ret = task_call_func(p, __set_task_special, NULL);
|
||||
unlock_task_sighand(p, &flags2);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
}
|
||||
if (task_call_func(p, __restore_freezer_state, NULL))
|
||||
goto unlock;
|
||||
|
||||
wake_up_state(p, TASK_FROZEN);
|
||||
unlock:
|
||||
|
@ -1735,7 +1735,7 @@ static bool copy_data(struct prb_data_ring *data_ring,
|
||||
if (!buf || !buf_size)
|
||||
return true;
|
||||
|
||||
data_size = min_t(u16, buf_size, len);
|
||||
data_size = min_t(unsigned int, buf_size, len);
|
||||
|
||||
memcpy(&buf[0], data, data_size); /* LMM(copy_data:A) */
|
||||
return true;
|
||||
|
@ -1880,10 +1880,16 @@ static int sysctl_sched_uclamp_handler(struct ctl_table *table, int write,
|
||||
#endif
|
||||
|
||||
static int uclamp_validate(struct task_struct *p,
|
||||
const struct sched_attr *attr)
|
||||
const struct sched_attr *attr, bool user)
|
||||
{
|
||||
int util_min = p->uclamp_req[UCLAMP_MIN].value;
|
||||
int util_max = p->uclamp_req[UCLAMP_MAX].value;
|
||||
bool done = false;
|
||||
int ret = 0;
|
||||
|
||||
trace_android_vh_uclamp_validate(p, attr, user, &ret, &done);
|
||||
if (done)
|
||||
return ret;
|
||||
|
||||
if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) {
|
||||
util_min = attr->sched_util_min;
|
||||
@ -1905,11 +1911,19 @@ static int uclamp_validate(struct task_struct *p,
|
||||
/*
|
||||
* We have valid uclamp attributes; make sure uclamp is enabled.
|
||||
*
|
||||
* We need to do that here, because enabling static branches is a
|
||||
* blocking operation which obviously cannot be done while holding
|
||||
* We need to do that here, because enabling static branches is
|
||||
* a blocking operation which obviously cannot be done while holding
|
||||
* scheduler locks.
|
||||
*
|
||||
* We only enable the static key if this was initiated by user space
|
||||
* request. There should be no in-kernel users of uclamp except to
|
||||
* implement things like inheritance like in binder. These in-kernel
|
||||
* callers can rightfully be called be sometimes in_atomic() context
|
||||
* which is invalid context to enable the key in. The enabling path
|
||||
* unconditionally holds the cpus_read_lock() which might_sleep().
|
||||
*/
|
||||
static_branch_enable(&sched_uclamp_used);
|
||||
if (user)
|
||||
static_branch_enable(&sched_uclamp_used);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2050,7 +2064,7 @@ static void __init init_uclamp(void)
|
||||
static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { }
|
||||
static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { }
|
||||
static inline int uclamp_validate(struct task_struct *p,
|
||||
const struct sched_attr *attr)
|
||||
const struct sched_attr *attr, bool user)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
@ -2235,6 +2249,149 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(check_preempt_curr);
|
||||
|
||||
static __always_inline
|
||||
int __task_state_match(struct task_struct *p, unsigned int state)
|
||||
{
|
||||
if (READ_ONCE(p->__state) & state)
|
||||
return 1;
|
||||
|
||||
if (READ_ONCE(p->saved_state) & state)
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __always_inline
|
||||
int task_state_match(struct task_struct *p, unsigned int state)
|
||||
{
|
||||
int match;
|
||||
|
||||
/*
|
||||
* Serialize against current_save_and_set_rtlock_wait_state(),
|
||||
* current_restore_rtlock_saved_state(), and __refrigerator().
|
||||
*/
|
||||
raw_spin_lock_irq(&p->pi_lock);
|
||||
match = __task_state_match(p, state);
|
||||
raw_spin_unlock_irq(&p->pi_lock);
|
||||
|
||||
return match;
|
||||
}
|
||||
|
||||
/*
|
||||
* wait_task_inactive - wait for a thread to unschedule.
|
||||
*
|
||||
* Wait for the thread to block in any of the states set in @match_state.
|
||||
* If it changes, i.e. @p might have woken up, then return zero. When we
|
||||
* succeed in waiting for @p to be off its CPU, we return a positive number
|
||||
* (its total switch count). If a second call a short while later returns the
|
||||
* same number, the caller can be sure that @p has remained unscheduled the
|
||||
* whole time.
|
||||
*
|
||||
* The caller must ensure that the task *will* unschedule sometime soon,
|
||||
* else this function might spin for a *long* time. This function can't
|
||||
* be called with interrupts off, or it may introduce deadlock with
|
||||
* smp_call_function() if an IPI is sent by the same process we are
|
||||
* waiting to become inactive.
|
||||
*/
|
||||
unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
|
||||
{
|
||||
int running, queued, match;
|
||||
struct rq_flags rf;
|
||||
unsigned long ncsw;
|
||||
struct rq *rq;
|
||||
|
||||
for (;;) {
|
||||
/*
|
||||
* We do the initial early heuristics without holding
|
||||
* any task-queue locks at all. We'll only try to get
|
||||
* the runqueue lock when things look like they will
|
||||
* work out!
|
||||
*/
|
||||
rq = task_rq(p);
|
||||
|
||||
/*
|
||||
* If the task is actively running on another CPU
|
||||
* still, just relax and busy-wait without holding
|
||||
* any locks.
|
||||
*
|
||||
* NOTE! Since we don't hold any locks, it's not
|
||||
* even sure that "rq" stays as the right runqueue!
|
||||
* But we don't care, since "task_on_cpu()" will
|
||||
* return false if the runqueue has changed and p
|
||||
* is actually now running somewhere else!
|
||||
*/
|
||||
while (task_on_cpu(rq, p)) {
|
||||
if (!task_state_match(p, match_state))
|
||||
return 0;
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
/*
|
||||
* Ok, time to look more closely! We need the rq
|
||||
* lock now, to be *sure*. If we're wrong, we'll
|
||||
* just go back and repeat.
|
||||
*/
|
||||
rq = task_rq_lock(p, &rf);
|
||||
trace_sched_wait_task(p);
|
||||
running = task_on_cpu(rq, p);
|
||||
queued = task_on_rq_queued(p);
|
||||
ncsw = 0;
|
||||
if ((match = __task_state_match(p, match_state))) {
|
||||
/*
|
||||
* When matching on p->saved_state, consider this task
|
||||
* still queued so it will wait.
|
||||
*/
|
||||
if (match < 0)
|
||||
queued = 1;
|
||||
ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
|
||||
}
|
||||
task_rq_unlock(rq, p, &rf);
|
||||
|
||||
/*
|
||||
* If it changed from the expected state, bail out now.
|
||||
*/
|
||||
if (unlikely(!ncsw))
|
||||
break;
|
||||
|
||||
/*
|
||||
* Was it really running after all now that we
|
||||
* checked with the proper locks actually held?
|
||||
*
|
||||
* Oops. Go back and try again..
|
||||
*/
|
||||
if (unlikely(running)) {
|
||||
cpu_relax();
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* It's not enough that it's not actively running,
|
||||
* it must be off the runqueue _entirely_, and not
|
||||
* preempted!
|
||||
*
|
||||
* So if it was still runnable (but just not actively
|
||||
* running right now), it's preempted, and we should
|
||||
* yield - it could be a while.
|
||||
*/
|
||||
if (unlikely(queued)) {
|
||||
ktime_t to = NSEC_PER_SEC / HZ;
|
||||
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
schedule_hrtimeout(&to, HRTIMER_MODE_REL_HARD);
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ahh, all good. It wasn't running, and it wasn't
|
||||
* runnable, which means that it will never become
|
||||
* running in the future either. We're all done!
|
||||
*/
|
||||
break;
|
||||
}
|
||||
|
||||
return ncsw;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
static void
|
||||
@ -2588,6 +2745,7 @@ int push_cpu_stop(void *arg)
|
||||
put_task_struct(p);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(push_cpu_stop);
|
||||
|
||||
/*
|
||||
* sched_class::set_cpus_allowed must do the below, but is not required to
|
||||
@ -3348,114 +3506,6 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(migrate_swap);
|
||||
|
||||
/*
|
||||
* wait_task_inactive - wait for a thread to unschedule.
|
||||
*
|
||||
* Wait for the thread to block in any of the states set in @match_state.
|
||||
* If it changes, i.e. @p might have woken up, then return zero. When we
|
||||
* succeed in waiting for @p to be off its CPU, we return a positive number
|
||||
* (its total switch count). If a second call a short while later returns the
|
||||
* same number, the caller can be sure that @p has remained unscheduled the
|
||||
* whole time.
|
||||
*
|
||||
* The caller must ensure that the task *will* unschedule sometime soon,
|
||||
* else this function might spin for a *long* time. This function can't
|
||||
* be called with interrupts off, or it may introduce deadlock with
|
||||
* smp_call_function() if an IPI is sent by the same process we are
|
||||
* waiting to become inactive.
|
||||
*/
|
||||
unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
|
||||
{
|
||||
int running, queued;
|
||||
struct rq_flags rf;
|
||||
unsigned long ncsw;
|
||||
struct rq *rq;
|
||||
|
||||
for (;;) {
|
||||
/*
|
||||
* We do the initial early heuristics without holding
|
||||
* any task-queue locks at all. We'll only try to get
|
||||
* the runqueue lock when things look like they will
|
||||
* work out!
|
||||
*/
|
||||
rq = task_rq(p);
|
||||
|
||||
/*
|
||||
* If the task is actively running on another CPU
|
||||
* still, just relax and busy-wait without holding
|
||||
* any locks.
|
||||
*
|
||||
* NOTE! Since we don't hold any locks, it's not
|
||||
* even sure that "rq" stays as the right runqueue!
|
||||
* But we don't care, since "task_on_cpu()" will
|
||||
* return false if the runqueue has changed and p
|
||||
* is actually now running somewhere else!
|
||||
*/
|
||||
while (task_on_cpu(rq, p)) {
|
||||
if (!(READ_ONCE(p->__state) & match_state))
|
||||
return 0;
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
/*
|
||||
* Ok, time to look more closely! We need the rq
|
||||
* lock now, to be *sure*. If we're wrong, we'll
|
||||
* just go back and repeat.
|
||||
*/
|
||||
rq = task_rq_lock(p, &rf);
|
||||
trace_sched_wait_task(p);
|
||||
running = task_on_cpu(rq, p);
|
||||
queued = task_on_rq_queued(p);
|
||||
ncsw = 0;
|
||||
if (READ_ONCE(p->__state) & match_state)
|
||||
ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
|
||||
task_rq_unlock(rq, p, &rf);
|
||||
|
||||
/*
|
||||
* If it changed from the expected state, bail out now.
|
||||
*/
|
||||
if (unlikely(!ncsw))
|
||||
break;
|
||||
|
||||
/*
|
||||
* Was it really running after all now that we
|
||||
* checked with the proper locks actually held?
|
||||
*
|
||||
* Oops. Go back and try again..
|
||||
*/
|
||||
if (unlikely(running)) {
|
||||
cpu_relax();
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* It's not enough that it's not actively running,
|
||||
* it must be off the runqueue _entirely_, and not
|
||||
* preempted!
|
||||
*
|
||||
* So if it was still runnable (but just not actively
|
||||
* running right now), it's preempted, and we should
|
||||
* yield - it could be a while.
|
||||
*/
|
||||
if (unlikely(queued)) {
|
||||
ktime_t to = NSEC_PER_SEC / HZ;
|
||||
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
schedule_hrtimeout(&to, HRTIMER_MODE_REL_HARD);
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ahh, all good. It wasn't running, and it wasn't
|
||||
* runnable, which means that it will never become
|
||||
* running in the future either. We're all done!
|
||||
*/
|
||||
break;
|
||||
}
|
||||
|
||||
return ncsw;
|
||||
}
|
||||
|
||||
/***
|
||||
* kick_process - kick a running thread to enter/exit the kernel
|
||||
* @p: the to-be-kicked thread
|
||||
@ -3993,34 +4043,37 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
|
||||
* The caller holds p::pi_lock if p != current or has preemption
|
||||
* disabled when p == current.
|
||||
*
|
||||
* The rules of PREEMPT_RT saved_state:
|
||||
* The rules of saved_state:
|
||||
*
|
||||
* The related locking code always holds p::pi_lock when updating
|
||||
* p::saved_state, which means the code is fully serialized in both cases.
|
||||
*
|
||||
* The lock wait and lock wakeups happen via TASK_RTLOCK_WAIT. No other
|
||||
* bits set. This allows to distinguish all wakeup scenarios.
|
||||
* For PREEMPT_RT, the lock wait and lock wakeups happen via TASK_RTLOCK_WAIT.
|
||||
* No other bits set. This allows to distinguish all wakeup scenarios.
|
||||
*
|
||||
* For FREEZER, the wakeup happens via TASK_FROZEN. No other bits set. This
|
||||
* allows us to prevent early wakeup of tasks before they can be run on
|
||||
* asymmetric ISA architectures (eg ARMv9).
|
||||
*/
|
||||
static __always_inline
|
||||
bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
|
||||
{
|
||||
int match;
|
||||
|
||||
if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
|
||||
WARN_ON_ONCE((state & TASK_RTLOCK_WAIT) &&
|
||||
state != TASK_RTLOCK_WAIT);
|
||||
}
|
||||
|
||||
if (READ_ONCE(p->__state) & state) {
|
||||
*success = 1;
|
||||
return true;
|
||||
}
|
||||
*success = !!(match = __task_state_match(p, state));
|
||||
|
||||
#ifdef CONFIG_PREEMPT_RT
|
||||
/*
|
||||
* Saved state preserves the task state across blocking on
|
||||
* an RT lock. If the state matches, set p::saved_state to
|
||||
* TASK_RUNNING, but do not wake the task because it waits
|
||||
* for a lock wakeup. Also indicate success because from
|
||||
* the regular waker's point of view this has succeeded.
|
||||
* an RT lock or TASK_FREEZABLE tasks. If the state matches,
|
||||
* set p::saved_state to TASK_RUNNING, but do not wake the task
|
||||
* because it waits for a lock wakeup or __thaw_task(). Also
|
||||
* indicate success because from the regular waker's point of
|
||||
* view this has succeeded.
|
||||
*
|
||||
* After acquiring the lock the task will restore p::__state
|
||||
* from p::saved_state which ensures that the regular
|
||||
@ -4028,12 +4081,10 @@ bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
|
||||
* p::saved_state to TASK_RUNNING so any further tests will
|
||||
* not result in false positives vs. @success
|
||||
*/
|
||||
if (p->saved_state & state) {
|
||||
if (match < 0)
|
||||
p->saved_state = TASK_RUNNING;
|
||||
*success = 1;
|
||||
}
|
||||
#endif
|
||||
return false;
|
||||
|
||||
return match > 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -4455,6 +4506,7 @@ int wake_up_state(struct task_struct *p, unsigned int state)
|
||||
{
|
||||
return try_to_wake_up(p, state, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(wake_up_state);
|
||||
|
||||
/*
|
||||
* Perform scheduler related setup for a newly forked process p.
|
||||
@ -5364,6 +5416,7 @@ unsigned int nr_running(void)
|
||||
|
||||
return sum;
|
||||
}
|
||||
EXPORT_SYMBOL(nr_running);
|
||||
|
||||
/*
|
||||
* Check if only the current task is running on the CPU.
|
||||
@ -7646,7 +7699,7 @@ static int __sched_setscheduler(struct task_struct *p,
|
||||
|
||||
/* Update task specific "requested" clamps */
|
||||
if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) {
|
||||
retval = uclamp_validate(p, attr);
|
||||
retval = uclamp_validate(p, attr, user);
|
||||
if (retval)
|
||||
return retval;
|
||||
}
|
||||
|
@ -603,6 +603,7 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy)
|
||||
if (policy->fast_switch_enabled)
|
||||
return 0;
|
||||
|
||||
trace_android_vh_set_sugov_sched_attr(&attr);
|
||||
kthread_init_work(&sg_policy->work, sugov_work);
|
||||
kthread_init_worker(&sg_policy->worker);
|
||||
thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
|
||||
|
@ -3350,6 +3350,7 @@ void reweight_task(struct task_struct *p, int prio)
|
||||
reweight_entity(cfs_rq, se, weight);
|
||||
load->inv_weight = sched_prio_to_wmult[prio];
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(reweight_task);
|
||||
|
||||
static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
|
||||
|
||||
@ -4128,6 +4129,8 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
|
||||
else
|
||||
se->avg.load_sum = 1;
|
||||
|
||||
trace_android_rvh_attach_entity_load_avg(cfs_rq, se);
|
||||
|
||||
enqueue_load_avg(cfs_rq, se);
|
||||
cfs_rq->avg.util_avg += se->avg.util_avg;
|
||||
cfs_rq->avg.util_sum += se->avg.util_sum;
|
||||
@ -4151,6 +4154,8 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
|
||||
*/
|
||||
static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
{
|
||||
trace_android_rvh_detach_entity_load_avg(cfs_rq, se);
|
||||
|
||||
dequeue_load_avg(cfs_rq, se);
|
||||
sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
|
||||
sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
|
||||
@ -4195,6 +4200,8 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
|
||||
decayed = update_cfs_rq_load_avg(now, cfs_rq);
|
||||
decayed |= propagate_entity_load_avg(se);
|
||||
|
||||
trace_android_rvh_update_load_avg(now, cfs_rq, se);
|
||||
|
||||
if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
|
||||
|
||||
/*
|
||||
@ -4252,6 +4259,8 @@ static void remove_entity_load_avg(struct sched_entity *se)
|
||||
|
||||
sync_entity_load_avg(se);
|
||||
|
||||
trace_android_rvh_remove_entity_load_avg(cfs_rq, se);
|
||||
|
||||
raw_spin_lock_irqsave(&cfs_rq->removed.lock, flags);
|
||||
++cfs_rq->removed.nr;
|
||||
cfs_rq->removed.util_avg += se->avg.util_avg;
|
||||
@ -6129,6 +6138,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
|
||||
struct sched_entity *se = &p->se;
|
||||
int idle_h_nr_running = task_has_idle_policy(p);
|
||||
int task_new = !(flags & ENQUEUE_WAKEUP);
|
||||
int should_iowait_boost;
|
||||
|
||||
/*
|
||||
* The code below (indirectly) updates schedutil which looks at
|
||||
@ -6143,7 +6153,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
|
||||
* utilization updates, so do it here explicitly with the IOWAIT flag
|
||||
* passed.
|
||||
*/
|
||||
if (p->in_iowait)
|
||||
should_iowait_boost = p->in_iowait;
|
||||
trace_android_rvh_set_iowait(p, rq, &should_iowait_boost);
|
||||
if (should_iowait_boost)
|
||||
cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT);
|
||||
|
||||
for_each_sched_entity(se) {
|
||||
@ -8752,6 +8764,8 @@ static bool __update_blocked_fair(struct rq *rq, bool *done)
|
||||
bool decayed = false;
|
||||
int cpu = cpu_of(rq);
|
||||
|
||||
trace_android_rvh_update_blocked_fair(rq);
|
||||
|
||||
/*
|
||||
* Iterates the task_group tree in a bottom up fashion, see
|
||||
* list_add_leaf_cfs_rq() for details.
|
||||
|
@ -176,7 +176,7 @@ accumulate_sum(u64 delta, struct sched_avg *sa,
|
||||
* load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
|
||||
* = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
|
||||
*/
|
||||
static __always_inline int
|
||||
int
|
||||
___update_load_sum(u64 now, struct sched_avg *sa,
|
||||
unsigned long load, unsigned long runnable, int running)
|
||||
{
|
||||
@ -228,6 +228,7 @@ ___update_load_sum(u64 now, struct sched_avg *sa,
|
||||
|
||||
return 1;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(___update_load_sum);
|
||||
|
||||
/*
|
||||
* When syncing *_avg with *_sum, we must take into account the current
|
||||
@ -253,7 +254,7 @@ ___update_load_sum(u64 now, struct sched_avg *sa,
|
||||
* the period_contrib of cfs_rq when updating the sched_avg of a sched_entity
|
||||
* if it's more convenient.
|
||||
*/
|
||||
static __always_inline void
|
||||
void
|
||||
___update_load_avg(struct sched_avg *sa, unsigned long load)
|
||||
{
|
||||
u32 divider = get_pelt_divider(sa);
|
||||
@ -265,6 +266,7 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load)
|
||||
sa->runnable_avg = div_u64(sa->runnable_sum, divider);
|
||||
WRITE_ONCE(sa->util_avg, sa->util_sum / divider);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(___update_load_avg);
|
||||
|
||||
/*
|
||||
* sched_entity:
|
||||
|
@ -1848,6 +1848,7 @@ static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool f
|
||||
*/
|
||||
if (rq->curr->sched_class != &rt_sched_class)
|
||||
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
|
||||
trace_android_rvh_update_rt_rq_load_avg(rq_clock_pelt(rq), rq, p, 0);
|
||||
|
||||
rt_queue_push_tasks(rq);
|
||||
}
|
||||
@ -1918,6 +1919,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
|
||||
update_curr_rt(rq);
|
||||
|
||||
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
|
||||
trace_android_rvh_update_rt_rq_load_avg(rq_clock_pelt(rq), rq, p, 1);
|
||||
|
||||
/*
|
||||
* The previous task needs to be made eligible for pushing
|
||||
@ -2737,6 +2739,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
|
||||
|
||||
update_curr_rt(rq);
|
||||
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
|
||||
trace_android_rvh_update_rt_rq_load_avg(rq_clock_pelt(rq), rq, p, 1);
|
||||
|
||||
watchdog(rq, p);
|
||||
|
||||
|
@ -99,3 +99,12 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_irqtime_account_process_tick);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_post_init_entity_util_avg);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_effective_cpu_util);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mmput);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_uclamp_validate);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_set_sugov_sched_attr);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_iowait);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_attach_entity_load_avg);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_detach_entity_load_avg);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_load_avg);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_remove_entity_load_avg);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_blocked_fair);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_rt_rq_load_avg);
|
||||
|
@ -45,6 +45,7 @@
|
||||
#include <linux/posix-timers.h>
|
||||
#include <linux/cgroup.h>
|
||||
#include <linux/audit.h>
|
||||
#include <linux/oom.h>
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/signal.h>
|
||||
@ -58,6 +59,7 @@
|
||||
|
||||
#undef CREATE_TRACE_POINTS
|
||||
#include <trace/hooks/signal.h>
|
||||
#include <trace/hooks/dtask.h>
|
||||
/*
|
||||
* SLAB caches for signal bits.
|
||||
*/
|
||||
@ -1001,6 +1003,7 @@ static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
|
||||
{
|
||||
struct signal_struct *signal = p->signal;
|
||||
struct task_struct *t;
|
||||
bool wake;
|
||||
|
||||
/*
|
||||
* Now find a thread we can wake up to take the signal off the queue.
|
||||
@ -1060,7 +1063,10 @@ static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
|
||||
trace_android_vh_exit_signal(t);
|
||||
task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
|
||||
sigaddset(&t->pending.signal, SIGKILL);
|
||||
signal_wake_up(t, 1);
|
||||
wake = true;
|
||||
trace_android_vh_exit_signal_whether_wake(t, &wake);
|
||||
if (wake)
|
||||
signal_wake_up(t, 1);
|
||||
} while_each_thread(p, t);
|
||||
return;
|
||||
}
|
||||
@ -1443,8 +1449,16 @@ int group_send_sig_info(int sig, struct kernel_siginfo *info,
|
||||
ret = check_kill_permission(sig, info, p);
|
||||
rcu_read_unlock();
|
||||
|
||||
if (!ret && sig)
|
||||
if (!ret && sig) {
|
||||
ret = do_send_sig_info(sig, info, p, type);
|
||||
if (!ret && sig == SIGKILL) {
|
||||
bool reap = false;
|
||||
|
||||
trace_android_vh_killed_process(current, p, &reap);
|
||||
if (reap)
|
||||
add_to_oom_reaper(p);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -745,6 +745,19 @@ static inline void queue_oom_reaper(struct task_struct *tsk)
|
||||
}
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
/**
|
||||
* tsk->mm has to be non NULL and caller has to guarantee it is stable (either
|
||||
* under task_lock or operate on the current).
|
||||
*/
|
||||
static void __mark_oom_victim(struct task_struct *tsk)
|
||||
{
|
||||
struct mm_struct *mm = tsk->mm;
|
||||
|
||||
if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) {
|
||||
mmgrab(tsk->signal->oom_mm);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* mark_oom_victim - mark the given task as OOM victim
|
||||
* @tsk: task to mark
|
||||
@ -757,16 +770,13 @@ static inline void queue_oom_reaper(struct task_struct *tsk)
|
||||
*/
|
||||
static void mark_oom_victim(struct task_struct *tsk)
|
||||
{
|
||||
struct mm_struct *mm = tsk->mm;
|
||||
|
||||
WARN_ON(oom_killer_disabled);
|
||||
/* OOM killer might race with memcg OOM */
|
||||
if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE))
|
||||
return;
|
||||
|
||||
/* oom_mm is bound to the signal struct life time. */
|
||||
if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm))
|
||||
mmgrab(tsk->signal->oom_mm);
|
||||
__mark_oom_victim(tsk);
|
||||
|
||||
/*
|
||||
* Make sure that the task is woken up from uninterruptible sleep
|
||||
@ -1260,3 +1270,16 @@ SYSCALL_DEFINE2(process_mrelease, int, pidfd, unsigned int, flags)
|
||||
return -ENOSYS;
|
||||
#endif /* CONFIG_MMU */
|
||||
}
|
||||
|
||||
void add_to_oom_reaper(struct task_struct *p)
|
||||
{
|
||||
p = find_lock_task_mm(p);
|
||||
if (!p)
|
||||
return;
|
||||
|
||||
if (task_will_free_mem(p)) {
|
||||
__mark_oom_victim(p);
|
||||
queue_oom_reaper(p);
|
||||
}
|
||||
task_unlock(p);
|
||||
}
|
||||
|
@ -36,6 +36,7 @@ struct page_owner {
|
||||
|
||||
static bool page_owner_enabled __initdata;
|
||||
DEFINE_STATIC_KEY_FALSE(page_owner_inited);
|
||||
EXPORT_SYMBOL_GPL(page_owner_inited);
|
||||
|
||||
static depot_stack_handle_t dummy_handle;
|
||||
static depot_stack_handle_t failure_handle;
|
||||
@ -214,6 +215,7 @@ noinline void __set_page_owner(struct page *page, unsigned short order,
|
||||
__set_page_owner_handle(page_ext, handle, order, gfp_mask);
|
||||
page_ext_put(page_ext);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__set_page_owner);
|
||||
|
||||
void __set_page_owner_migrate_reason(struct page *page, int reason)
|
||||
{
|
||||
|
@ -2966,6 +2966,8 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
||||
trace_android_vh_tune_swappiness(&swappiness);
|
||||
/*
|
||||
* Global reclaim will swap to prevent OOM even with no
|
||||
* swappiness, but memcg users want to use this knob to
|
||||
@ -3219,6 +3221,7 @@ static struct lruvec *get_lruvec(struct mem_cgroup *memcg, int nid)
|
||||
|
||||
static int get_swappiness(struct lruvec *lruvec, struct scan_control *sc)
|
||||
{
|
||||
int swappiness;
|
||||
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
|
||||
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
|
||||
|
||||
@ -3229,7 +3232,10 @@ static int get_swappiness(struct lruvec *lruvec, struct scan_control *sc)
|
||||
mem_cgroup_get_nr_swap_pages(memcg) <= 0)
|
||||
return 0;
|
||||
|
||||
return mem_cgroup_swappiness(memcg);
|
||||
swappiness = mem_cgroup_swappiness(memcg);
|
||||
trace_android_vh_tune_swappiness(&swappiness);
|
||||
|
||||
return swappiness;
|
||||
}
|
||||
|
||||
static int get_nr_gens(struct lruvec *lruvec, int type)
|
||||
|
@ -246,6 +246,7 @@ struct zs_pool {
|
||||
struct work_struct free_work;
|
||||
#endif
|
||||
spinlock_t lock;
|
||||
atomic_t compaction_in_progress;
|
||||
};
|
||||
|
||||
struct zspage {
|
||||
@ -2100,6 +2101,15 @@ unsigned long zs_compact(struct zs_pool *pool)
|
||||
struct size_class *class;
|
||||
unsigned long pages_freed = 0;
|
||||
|
||||
/*
|
||||
* Pool compaction is performed under pool->lock so it is basically
|
||||
* single-threaded. Having more than one thread in __zs_compact()
|
||||
* will increase pool->lock contention, which will impact other
|
||||
* zsmalloc operations that need pool->lock.
|
||||
*/
|
||||
if (atomic_xchg(&pool->compaction_in_progress, 1))
|
||||
return 0;
|
||||
|
||||
for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) {
|
||||
class = pool->size_class[i];
|
||||
if (class->index != i)
|
||||
@ -2107,6 +2117,7 @@ unsigned long zs_compact(struct zs_pool *pool)
|
||||
pages_freed += __zs_compact(pool, class);
|
||||
}
|
||||
atomic_long_add(pages_freed, &pool->stats.pages_compacted);
|
||||
atomic_set(&pool->compaction_in_progress, 0);
|
||||
|
||||
return pages_freed;
|
||||
}
|
||||
@ -2193,6 +2204,7 @@ struct zs_pool *zs_create_pool(const char *name)
|
||||
|
||||
init_deferred_free(pool);
|
||||
spin_lock_init(&pool->lock);
|
||||
atomic_set(&pool->compaction_in_progress, 0);
|
||||
|
||||
pool->name = kstrdup(name, GFP_KERNEL);
|
||||
if (!pool->name)
|
||||
|
55
modules.bzl
55
modules.bzl
@ -6,7 +6,7 @@ This module contains a full list of kernel modules
|
||||
compiled by GKI.
|
||||
"""
|
||||
|
||||
COMMON_GKI_MODULES_LIST = [
|
||||
_COMMON_GKI_MODULES_LIST = [
|
||||
# keep sorted
|
||||
"drivers/block/zram/zram.ko",
|
||||
"drivers/bluetooth/btbcm.ko",
|
||||
@ -68,3 +68,56 @@ COMMON_GKI_MODULES_LIST = [
|
||||
"net/tipc/diag.ko",
|
||||
"net/tipc/tipc.ko",
|
||||
]
|
||||
|
||||
# Deprecated - Use `get_gki_modules_list` function instead.
|
||||
COMMON_GKI_MODULES_LIST = _COMMON_GKI_MODULES_LIST
|
||||
|
||||
_ARM_GKI_MODULES_LIST = [
|
||||
# keep sorted
|
||||
]
|
||||
|
||||
_ARM64_GKI_MODULES_LIST = [
|
||||
# keep sorted
|
||||
"arch/arm64/geniezone/gzvm.ko",
|
||||
]
|
||||
|
||||
_RISCV64_GKI_MODULES_LIST = [
|
||||
# keep sorted
|
||||
]
|
||||
|
||||
_X86_GKI_MODULES_LIST = [
|
||||
# keep sorted
|
||||
]
|
||||
|
||||
_X86_64_GKI_MODULES_LIST = [
|
||||
# keep sorted
|
||||
]
|
||||
|
||||
# buildifier: disable=unnamed-macro
|
||||
def get_gki_modules_list(arch = None):
|
||||
""" Provides the list of GKI modules.
|
||||
|
||||
Args:
|
||||
arch: One of [arm, arm64, i386, x86_64, riscv64].
|
||||
|
||||
Returns:
|
||||
The list of GKI modules for the given |arch|.
|
||||
"""
|
||||
gki_modules_list = [] + _COMMON_GKI_MODULES_LIST
|
||||
if arch == "arm":
|
||||
gki_modules_list += _ARM_GKI_MODULES_LIST
|
||||
elif arch == "arm64":
|
||||
gki_modules_list += _ARM64_GKI_MODULES_LIST
|
||||
elif arch == "i386":
|
||||
gki_modules_list += _X86_GKI_MODULES_LIST
|
||||
elif arch == "x86_64":
|
||||
gki_modules_list += _X86_64_GKI_MODULES_LIST
|
||||
elif arch == "riscv64":
|
||||
gki_modules_list += _RISCV64_GKI_MODULES_LIST
|
||||
else:
|
||||
fail("{}: arch {} not supported. Use one of [arm, arm64, i386, x86_64, riscv64]".format(
|
||||
str(native.package_relative_label(":x")).removesuffix(":x"),
|
||||
arch,
|
||||
))
|
||||
|
||||
return gki_modules_list
|
||||
|
@ -46,6 +46,7 @@ static const struct proto_ops l2cap_sock_ops;
|
||||
static void l2cap_sock_init(struct sock *sk, struct sock *parent);
|
||||
static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock,
|
||||
int proto, gfp_t prio, int kern);
|
||||
static void l2cap_sock_cleanup_listen(struct sock *parent);
|
||||
|
||||
bool l2cap_is_socket(struct socket *sock)
|
||||
{
|
||||
@ -1415,6 +1416,7 @@ static int l2cap_sock_release(struct socket *sock)
|
||||
if (!sk)
|
||||
return 0;
|
||||
|
||||
l2cap_sock_cleanup_listen(sk);
|
||||
bt_sock_unlink(&l2cap_sk_list, sk);
|
||||
|
||||
err = l2cap_sock_shutdown(sock, SHUT_RDWR);
|
||||
|
@ -60,3 +60,9 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(napi_poll);
|
||||
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(tcp_send_reset);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(tcp_bad_csum);
|
||||
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(net_dev_queue);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(net_dev_xmit);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(netif_receive_skb);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(netif_rx);
|
||||
|
||||
|
@ -3533,8 +3533,6 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
|
||||
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN]);
|
||||
return PTR_ERR(chain);
|
||||
}
|
||||
if (nft_chain_is_bound(chain))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
} else if (nla[NFTA_RULE_CHAIN_ID]) {
|
||||
chain = nft_chain_lookup_byid(net, table, nla[NFTA_RULE_CHAIN_ID]);
|
||||
@ -3546,6 +3544,9 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (nft_chain_is_bound(chain))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (nla[NFTA_RULE_HANDLE]) {
|
||||
handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_HANDLE]));
|
||||
rule = __nft_rule_lookup(chain, handle);
|
||||
@ -6687,6 +6688,7 @@ static int nft_set_catchall_flush(const struct nft_ctx *ctx,
|
||||
ret = __nft_set_catchall_flush(ctx, set, &elem);
|
||||
if (ret < 0)
|
||||
break;
|
||||
nft_set_elem_change_active(ctx->net, set, ext);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -460,7 +460,6 @@ __build_packet_message(struct nfnl_log_net *log,
|
||||
sk_buff_data_t old_tail = inst->skb->tail;
|
||||
struct sock *sk;
|
||||
const unsigned char *hwhdrp;
|
||||
ktime_t tstamp;
|
||||
|
||||
nlh = nfnl_msg_put(inst->skb, 0, 0,
|
||||
nfnl_msg_type(NFNL_SUBSYS_ULOG, NFULNL_MSG_PACKET),
|
||||
@ -589,10 +588,9 @@ __build_packet_message(struct nfnl_log_net *log,
|
||||
goto nla_put_failure;
|
||||
}
|
||||
|
||||
tstamp = skb_tstamp_cond(skb, false);
|
||||
if (hooknum <= NF_INET_FORWARD && tstamp) {
|
||||
if (hooknum <= NF_INET_FORWARD) {
|
||||
struct timespec64 kts = ktime_to_timespec64(skb_tstamp_cond(skb, true));
|
||||
struct nfulnl_msg_packet_timestamp ts;
|
||||
struct timespec64 kts = ktime_to_timespec64(tstamp);
|
||||
ts.sec = cpu_to_be64(kts.tv_sec);
|
||||
ts.usec = cpu_to_be64(kts.tv_nsec / NSEC_PER_USEC);
|
||||
|
||||
|
@ -201,7 +201,6 @@ void nfc_llcp_sock_link(struct llcp_sock_list *l, struct sock *s);
|
||||
void nfc_llcp_sock_unlink(struct llcp_sock_list *l, struct sock *s);
|
||||
void nfc_llcp_socket_remote_param_init(struct nfc_llcp_sock *sock);
|
||||
struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev);
|
||||
struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local);
|
||||
int nfc_llcp_local_put(struct nfc_llcp_local *local);
|
||||
u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local,
|
||||
struct nfc_llcp_sock *sock);
|
||||
|
@ -359,6 +359,7 @@ int nfc_llcp_send_symm(struct nfc_dev *dev)
|
||||
struct sk_buff *skb;
|
||||
struct nfc_llcp_local *local;
|
||||
u16 size = 0;
|
||||
int err;
|
||||
|
||||
local = nfc_llcp_find_local(dev);
|
||||
if (local == NULL)
|
||||
@ -368,8 +369,10 @@ int nfc_llcp_send_symm(struct nfc_dev *dev)
|
||||
size += dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE;
|
||||
|
||||
skb = alloc_skb(size, GFP_KERNEL);
|
||||
if (skb == NULL)
|
||||
return -ENOMEM;
|
||||
if (skb == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE);
|
||||
|
||||
@ -379,8 +382,11 @@ int nfc_llcp_send_symm(struct nfc_dev *dev)
|
||||
|
||||
nfc_llcp_send_to_raw_sock(local, skb, NFC_DIRECTION_TX);
|
||||
|
||||
return nfc_data_exchange(dev, local->target_idx, skb,
|
||||
err = nfc_data_exchange(dev, local->target_idx, skb,
|
||||
nfc_llcp_recv, local);
|
||||
out:
|
||||
nfc_llcp_local_put(local);
|
||||
return err;
|
||||
}
|
||||
|
||||
int nfc_llcp_send_connect(struct nfc_llcp_sock *sock)
|
||||
|
@ -17,6 +17,8 @@
|
||||
static u8 llcp_magic[3] = {0x46, 0x66, 0x6d};
|
||||
|
||||
static LIST_HEAD(llcp_devices);
|
||||
/* Protects llcp_devices list */
|
||||
static DEFINE_SPINLOCK(llcp_devices_lock);
|
||||
|
||||
static void nfc_llcp_rx_skb(struct nfc_llcp_local *local, struct sk_buff *skb);
|
||||
|
||||
@ -141,7 +143,7 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool device,
|
||||
write_unlock(&local->raw_sockets.lock);
|
||||
}
|
||||
|
||||
struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local)
|
||||
static struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local)
|
||||
{
|
||||
kref_get(&local->ref);
|
||||
|
||||
@ -169,7 +171,6 @@ static void local_release(struct kref *ref)
|
||||
|
||||
local = container_of(ref, struct nfc_llcp_local, ref);
|
||||
|
||||
list_del(&local->list);
|
||||
local_cleanup(local);
|
||||
kfree(local);
|
||||
}
|
||||
@ -282,12 +283,33 @@ static void nfc_llcp_sdreq_timer(struct timer_list *t)
|
||||
struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev)
|
||||
{
|
||||
struct nfc_llcp_local *local;
|
||||
struct nfc_llcp_local *res = NULL;
|
||||
|
||||
spin_lock(&llcp_devices_lock);
|
||||
list_for_each_entry(local, &llcp_devices, list)
|
||||
if (local->dev == dev)
|
||||
return local;
|
||||
if (local->dev == dev) {
|
||||
res = nfc_llcp_local_get(local);
|
||||
break;
|
||||
}
|
||||
spin_unlock(&llcp_devices_lock);
|
||||
|
||||
pr_debug("No device found\n");
|
||||
return res;
|
||||
}
|
||||
|
||||
static struct nfc_llcp_local *nfc_llcp_remove_local(struct nfc_dev *dev)
|
||||
{
|
||||
struct nfc_llcp_local *local, *tmp;
|
||||
|
||||
spin_lock(&llcp_devices_lock);
|
||||
list_for_each_entry_safe(local, tmp, &llcp_devices, list)
|
||||
if (local->dev == dev) {
|
||||
list_del(&local->list);
|
||||
spin_unlock(&llcp_devices_lock);
|
||||
return local;
|
||||
}
|
||||
spin_unlock(&llcp_devices_lock);
|
||||
|
||||
pr_warn("Shutting down device not found\n");
|
||||
|
||||
return NULL;
|
||||
}
|
||||
@ -608,12 +630,15 @@ u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *general_bytes_len)
|
||||
|
||||
*general_bytes_len = local->gb_len;
|
||||
|
||||
nfc_llcp_local_put(local);
|
||||
|
||||
return local->gb;
|
||||
}
|
||||
|
||||
int nfc_llcp_set_remote_gb(struct nfc_dev *dev, const u8 *gb, u8 gb_len)
|
||||
{
|
||||
struct nfc_llcp_local *local;
|
||||
int err;
|
||||
|
||||
if (gb_len < 3 || gb_len > NFC_MAX_GT_LEN)
|
||||
return -EINVAL;
|
||||
@ -630,12 +655,16 @@ int nfc_llcp_set_remote_gb(struct nfc_dev *dev, const u8 *gb, u8 gb_len)
|
||||
|
||||
if (memcmp(local->remote_gb, llcp_magic, 3)) {
|
||||
pr_err("MAC does not support LLCP\n");
|
||||
return -EINVAL;
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
return nfc_llcp_parse_gb_tlv(local,
|
||||
err = nfc_llcp_parse_gb_tlv(local,
|
||||
&local->remote_gb[3],
|
||||
local->remote_gb_len - 3);
|
||||
out:
|
||||
nfc_llcp_local_put(local);
|
||||
return err;
|
||||
}
|
||||
|
||||
static u8 nfc_llcp_dsap(const struct sk_buff *pdu)
|
||||
@ -1517,6 +1546,8 @@ int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb)
|
||||
|
||||
__nfc_llcp_recv(local, skb);
|
||||
|
||||
nfc_llcp_local_put(local);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1533,6 +1564,8 @@ void nfc_llcp_mac_is_down(struct nfc_dev *dev)
|
||||
|
||||
/* Close and purge all existing sockets */
|
||||
nfc_llcp_socket_release(local, true, 0);
|
||||
|
||||
nfc_llcp_local_put(local);
|
||||
}
|
||||
|
||||
void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx,
|
||||
@ -1558,6 +1591,8 @@ void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx,
|
||||
mod_timer(&local->link_timer,
|
||||
jiffies + msecs_to_jiffies(local->remote_lto));
|
||||
}
|
||||
|
||||
nfc_llcp_local_put(local);
|
||||
}
|
||||
|
||||
int nfc_llcp_register_device(struct nfc_dev *ndev)
|
||||
@ -1608,7 +1643,7 @@ int nfc_llcp_register_device(struct nfc_dev *ndev)
|
||||
|
||||
void nfc_llcp_unregister_device(struct nfc_dev *dev)
|
||||
{
|
||||
struct nfc_llcp_local *local = nfc_llcp_find_local(dev);
|
||||
struct nfc_llcp_local *local = nfc_llcp_remove_local(dev);
|
||||
|
||||
if (local == NULL) {
|
||||
pr_debug("No such device\n");
|
||||
|
@ -99,7 +99,7 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
|
||||
}
|
||||
|
||||
llcp_sock->dev = dev;
|
||||
llcp_sock->local = nfc_llcp_local_get(local);
|
||||
llcp_sock->local = local;
|
||||
llcp_sock->nfc_protocol = llcp_addr.nfc_protocol;
|
||||
llcp_sock->service_name_len = min_t(unsigned int,
|
||||
llcp_addr.service_name_len,
|
||||
@ -186,7 +186,7 @@ static int llcp_raw_sock_bind(struct socket *sock, struct sockaddr *addr,
|
||||
}
|
||||
|
||||
llcp_sock->dev = dev;
|
||||
llcp_sock->local = nfc_llcp_local_get(local);
|
||||
llcp_sock->local = local;
|
||||
llcp_sock->nfc_protocol = llcp_addr.nfc_protocol;
|
||||
|
||||
nfc_llcp_sock_link(&local->raw_sockets, sk);
|
||||
@ -696,22 +696,22 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
|
||||
if (dev->dep_link_up == false) {
|
||||
ret = -ENOLINK;
|
||||
device_unlock(&dev->dev);
|
||||
goto put_dev;
|
||||
goto sock_llcp_put_local;
|
||||
}
|
||||
device_unlock(&dev->dev);
|
||||
|
||||
if (local->rf_mode == NFC_RF_INITIATOR &&
|
||||
addr->target_idx != local->target_idx) {
|
||||
ret = -ENOLINK;
|
||||
goto put_dev;
|
||||
goto sock_llcp_put_local;
|
||||
}
|
||||
|
||||
llcp_sock->dev = dev;
|
||||
llcp_sock->local = nfc_llcp_local_get(local);
|
||||
llcp_sock->local = local;
|
||||
llcp_sock->ssap = nfc_llcp_get_local_ssap(local);
|
||||
if (llcp_sock->ssap == LLCP_SAP_MAX) {
|
||||
ret = -ENOMEM;
|
||||
goto sock_llcp_put_local;
|
||||
goto sock_llcp_nullify;
|
||||
}
|
||||
|
||||
llcp_sock->reserved_ssap = llcp_sock->ssap;
|
||||
@ -757,11 +757,13 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
|
||||
sock_llcp_release:
|
||||
nfc_llcp_put_ssap(local, llcp_sock->ssap);
|
||||
|
||||
sock_llcp_put_local:
|
||||
nfc_llcp_local_put(llcp_sock->local);
|
||||
sock_llcp_nullify:
|
||||
llcp_sock->local = NULL;
|
||||
llcp_sock->dev = NULL;
|
||||
|
||||
sock_llcp_put_local:
|
||||
nfc_llcp_local_put(local);
|
||||
|
||||
put_dev:
|
||||
nfc_put_device(dev);
|
||||
|
||||
|
@ -1039,11 +1039,14 @@ static int nfc_genl_llc_get_params(struct sk_buff *skb, struct genl_info *info)
|
||||
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
|
||||
if (!msg) {
|
||||
rc = -ENOMEM;
|
||||
goto exit;
|
||||
goto put_local;
|
||||
}
|
||||
|
||||
rc = nfc_genl_send_params(msg, local, info->snd_portid, info->snd_seq);
|
||||
|
||||
put_local:
|
||||
nfc_llcp_local_put(local);
|
||||
|
||||
exit:
|
||||
device_unlock(&dev->dev);
|
||||
|
||||
@ -1105,7 +1108,7 @@ static int nfc_genl_llc_set_params(struct sk_buff *skb, struct genl_info *info)
|
||||
if (info->attrs[NFC_ATTR_LLC_PARAM_LTO]) {
|
||||
if (dev->dep_link_up) {
|
||||
rc = -EINPROGRESS;
|
||||
goto exit;
|
||||
goto put_local;
|
||||
}
|
||||
|
||||
local->lto = nla_get_u8(info->attrs[NFC_ATTR_LLC_PARAM_LTO]);
|
||||
@ -1117,6 +1120,9 @@ static int nfc_genl_llc_set_params(struct sk_buff *skb, struct genl_info *info)
|
||||
if (info->attrs[NFC_ATTR_LLC_PARAM_MIUX])
|
||||
local->miux = cpu_to_be16(miux);
|
||||
|
||||
put_local:
|
||||
nfc_llcp_local_put(local);
|
||||
|
||||
exit:
|
||||
device_unlock(&dev->dev);
|
||||
|
||||
@ -1172,7 +1178,7 @@ static int nfc_genl_llc_sdreq(struct sk_buff *skb, struct genl_info *info)
|
||||
|
||||
if (rc != 0) {
|
||||
rc = -EINVAL;
|
||||
goto exit;
|
||||
goto put_local;
|
||||
}
|
||||
|
||||
if (!sdp_attrs[NFC_SDP_ATTR_URI])
|
||||
@ -1191,7 +1197,7 @@ static int nfc_genl_llc_sdreq(struct sk_buff *skb, struct genl_info *info)
|
||||
sdreq = nfc_llcp_build_sdreq_tlv(tid, uri, uri_len);
|
||||
if (sdreq == NULL) {
|
||||
rc = -ENOMEM;
|
||||
goto exit;
|
||||
goto put_local;
|
||||
}
|
||||
|
||||
tlvs_len += sdreq->tlv_len;
|
||||
@ -1201,10 +1207,14 @@ static int nfc_genl_llc_sdreq(struct sk_buff *skb, struct genl_info *info)
|
||||
|
||||
if (hlist_empty(&sdreq_list)) {
|
||||
rc = -EINVAL;
|
||||
goto exit;
|
||||
goto put_local;
|
||||
}
|
||||
|
||||
rc = nfc_llcp_send_snl_sdreq(local, &sdreq_list, tlvs_len);
|
||||
|
||||
put_local:
|
||||
nfc_llcp_local_put(local);
|
||||
|
||||
exit:
|
||||
device_unlock(&dev->dev);
|
||||
|
||||
|
@ -52,6 +52,7 @@ int nfc_llcp_set_remote_gb(struct nfc_dev *dev, const u8 *gb, u8 gb_len);
|
||||
u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *general_bytes_len);
|
||||
int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb);
|
||||
struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev);
|
||||
int nfc_llcp_local_put(struct nfc_llcp_local *local);
|
||||
int __init nfc_llcp_init(void);
|
||||
void nfc_llcp_exit(void);
|
||||
void nfc_llcp_free_sdp_tlv(struct nfc_llcp_sdp_tlv *sdp);
|
||||
|
@ -265,7 +265,6 @@ static int fw_change(struct net *net, struct sk_buff *in_skb,
|
||||
return -ENOBUFS;
|
||||
|
||||
fnew->id = f->id;
|
||||
fnew->res = f->res;
|
||||
fnew->ifindex = f->ifindex;
|
||||
fnew->tp = f->tp;
|
||||
|
||||
|
@ -511,7 +511,6 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
|
||||
if (fold) {
|
||||
f->id = fold->id;
|
||||
f->iif = fold->iif;
|
||||
f->res = fold->res;
|
||||
f->handle = fold->handle;
|
||||
|
||||
f->tp = fold->tp;
|
||||
|
@ -813,7 +813,6 @@ static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp,
|
||||
|
||||
new->ifindex = n->ifindex;
|
||||
new->fshift = n->fshift;
|
||||
new->res = n->res;
|
||||
new->flags = n->flags;
|
||||
RCU_INIT_POINTER(new->ht_down, ht);
|
||||
|
||||
|
@ -2290,6 +2290,7 @@ static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
|
||||
|
||||
if (false) {
|
||||
alloc_skb:
|
||||
spin_unlock(&other->sk_receive_queue.lock);
|
||||
unix_state_unlock(other);
|
||||
mutex_unlock(&unix_sk(other)->iolock);
|
||||
newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
|
||||
@ -2329,6 +2330,7 @@ static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
|
||||
init_scm = false;
|
||||
}
|
||||
|
||||
spin_lock(&other->sk_receive_queue.lock);
|
||||
skb = skb_peek_tail(&other->sk_receive_queue);
|
||||
if (tail && tail == skb) {
|
||||
skb = newskb;
|
||||
@ -2359,14 +2361,11 @@ static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
|
||||
refcount_add(size, &sk->sk_wmem_alloc);
|
||||
|
||||
if (newskb) {
|
||||
err = unix_scm_to_skb(&scm, skb, false);
|
||||
if (err)
|
||||
goto err_state_unlock;
|
||||
spin_lock(&other->sk_receive_queue.lock);
|
||||
unix_scm_to_skb(&scm, skb, false);
|
||||
__skb_queue_tail(&other->sk_receive_queue, newskb);
|
||||
spin_unlock(&other->sk_receive_queue.lock);
|
||||
}
|
||||
|
||||
spin_unlock(&other->sk_receive_queue.lock);
|
||||
unix_state_unlock(other);
|
||||
mutex_unlock(&unix_sk(other)->iolock);
|
||||
|
||||
|
@ -2047,6 +2047,38 @@ static int bpf_test_create_and_remove_bpf(const char *mount_dir)
|
||||
return result;
|
||||
}
|
||||
|
||||
static int bpf_test_mkdir_and_remove_bpf(const char *mount_dir)
|
||||
{
|
||||
const char *dir = "dir";
|
||||
|
||||
int result = TEST_FAILURE;
|
||||
int src_fd = -1;
|
||||
int bpf_fd = -1;
|
||||
int fuse_dev = -1;
|
||||
int fd = -1;
|
||||
int fd2 = -1;
|
||||
|
||||
TEST(src_fd = open(ft_src, O_DIRECTORY | O_RDONLY | O_CLOEXEC),
|
||||
src_fd != -1);
|
||||
TESTEQUAL(install_elf_bpf("test_bpf.bpf", "test_mkdir_remove", &bpf_fd,
|
||||
NULL, NULL), 0);
|
||||
TESTEQUAL(mount_fuse_no_init(mount_dir, bpf_fd, src_fd, &fuse_dev), 0);
|
||||
TEST(fd = s_mkdir(s_path(s(mount_dir), s(dir)), 0777),
|
||||
fd != -1);
|
||||
TEST(fd2 = s_open(s_path(s(mount_dir), s(dir)), O_RDONLY),
|
||||
fd2 != -1);
|
||||
|
||||
result = TEST_SUCCESS;
|
||||
out:
|
||||
close(fd2);
|
||||
close(fd);
|
||||
close(fuse_dev);
|
||||
close(bpf_fd);
|
||||
close(src_fd);
|
||||
umount(mount_dir);
|
||||
return result;
|
||||
}
|
||||
|
||||
static void parse_range(const char *ranges, bool *run_test, size_t tests)
|
||||
{
|
||||
size_t i;
|
||||
@ -2175,6 +2207,7 @@ int main(int argc, char *argv[])
|
||||
MAKE_TEST(bpf_test_lookup_postfilter),
|
||||
MAKE_TEST(flock_test),
|
||||
MAKE_TEST(bpf_test_create_and_remove_bpf),
|
||||
MAKE_TEST(bpf_test_mkdir_and_remove_bpf),
|
||||
};
|
||||
#undef MAKE_TEST
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user