Merge remote-tracking branch into HEAD

* keystone/mirror-android14-6.1-2023-10: (91 commits)
  ANDROID: abi_gki_aarch64_qcom: update abi symbols
  ANDROID: vendor hooks: Enable Vendor hook to register smmu driver to dedicated iommu bus defined by vendor.
  UPSTREAM: netfilter: xt_sctp: validate the flag_info count
  UPSTREAM: mm/mglru: make memcg_lru->lock irq safe
  UPSTREAM: iommu/amd: Fix possible memory leak of 'domain'
  UPSTREAM: selftests/tc-testing: Remove configs that no longer exist
  ANDROID: abi_gki_aarch64_qcom: update abi symbols
  ANDROID: ABI: Update symbol list for imx
  ANDROID: GKI: add allowed list for Exynosauto SoC
  UPSTREAM: ufs: core: wlun send SSU timeout recovery
  UPSTREAM: PM: domains: fix integer overflow issues in genpd_parse_state()
  ANDROID: mm: vh for compaction begin/end
  UPSTREAM: netfilter: xt_u32: validate user space input
  UPSTREAM: netfilter: nfnetlink_osf: avoid OOB read
  UPSTREAM: ipv4: fix null-deref in ipv4_link_failure
  UPSTREAM: net/sched: Retire rsvp classifier
  UPSTREAM: usb: core: stop USB enumeration if too many retries
  ANDROID: KVM: arm64: Add missing hyp events for forwarded SMCs
  ANDROID: KVM: arm64: Store hyp address in the host fp state array
  ANDROID: KVM: arm64: Allocate host fp/simd state later in initialization
  ...

Change-Id: I9af1e1c86cc68a612c09f580a986fe2a32489a45
Signed-off-by: Omkar Sai Sandeep Katadi <okatadi@google.com>
This commit is contained in:
Omkar Sai Sandeep Katadi 2023-10-26 19:17:35 +00:00
commit 21207eb8bb
112 changed files with 12950 additions and 1470 deletions

View File

@ -4,6 +4,7 @@
load("@bazel_skylib//rules:write_file.bzl", "write_file")
load("//build/bazel_common_rules/dist:dist.bzl", "copy_to_dist_dir")
load("//build/kernel/kleaf:common_kernels.bzl", "define_common_kernels")
load("//build/kernel/kleaf:constants.bzl", "X86_64_OUTS")
load(
"//build/kernel/kleaf:kernel.bzl",
"checkpatch",
@ -98,6 +99,7 @@ filegroup(
name = "aarch64_additional_kmi_symbol_lists",
srcs = [
# keep sorted
"android/abi_gki_aarch64_asus",
"android/abi_gki_aarch64_db845c",
"android/abi_gki_aarch64_exynos",
"android/abi_gki_aarch64_exynosauto",
@ -109,6 +111,7 @@ filegroup(
"android/abi_gki_aarch64_oplus",
"android/abi_gki_aarch64_pixel",
"android/abi_gki_aarch64_qcom",
"android/abi_gki_aarch64_rockchip",
"android/abi_gki_aarch64_tuxera",
"android/abi_gki_aarch64_unisoc",
"android/abi_gki_aarch64_virtual_device",
@ -165,6 +168,101 @@ define_common_kernels(target_configs = {
},
})
# Microdroid is not a real device. The kernel image is built with special
# configs to reduce the size. Hence, not using mixed build.
kernel_build(
name = "kernel_aarch64_microdroid",
srcs = ["//common:kernel_aarch64_sources"],
outs = [
"Image",
"System.map",
"modules.builtin",
"modules.builtin.modinfo",
"vmlinux",
"vmlinux.symvers",
],
build_config = "build.config.microdroid.aarch64",
make_goals = [
"Image",
],
)
copy_to_dist_dir(
name = "kernel_aarch64_microdroid_dist",
data = [
":kernel_aarch64_microdroid",
],
dist_dir = "out/kernel_aarch64_microdroid/dist",
flat = True,
log = "info",
)
# Microdroid is not a real device. The kernel image is built with special
# configs to reduce the size. Hence, not using mixed build.
kernel_build(
name = "kernel_x86_64_microdroid",
srcs = ["//common:kernel_x86_64_sources"],
outs = X86_64_OUTS,
arch = "x86_64",
build_config = "build.config.microdroid.x86_64",
make_goals = [
"bzImage",
],
)
copy_to_dist_dir(
name = "kernel_x86_64_microdroid_dist",
data = [
":kernel_x86_64_microdroid",
],
dist_dir = "out/kernel_x86_64_microdroid/dist",
flat = True,
log = "info",
)
kernel_build(
name = "kernel_aarch64_crashdump",
srcs = ["//common:kernel_aarch64_sources"],
outs = [
"Image",
],
build_config = "build.config.crashdump.aarch64",
make_goals = [
"Image",
],
)
copy_to_dist_dir(
name = "kernel_aarch64_crashdump_dist",
data = [
":kernel_aarch64_crashdump",
],
dist_dir = "out/kernel_aarch64_crashdump/dist",
flat = True,
log = "info",
)
kernel_build(
name = "kernel_x86_64_crashdump",
srcs = ["//common:kernel_x86_64_sources"],
outs = X86_64_OUTS,
arch = "x86_64",
build_config = "build.config.crashdump.x86_64",
make_goals = [
"bzImage",
],
)
copy_to_dist_dir(
name = "kernel_x86_64_crashdump_dist",
data = [
":kernel_x86_64_crashdump",
],
dist_dir = "out/kernel_x86_64_crashdump/dist",
flat = True,
log = "info",
)
_DB845C_MODULE_OUTS = [
# keep sorted
"crypto/michael_mic.ko",

View File

@ -264,6 +264,17 @@ Description:
attached to the port will not be detected, initialized,
or enumerated.
What: /sys/bus/usb/devices/.../<hub_interface>/port<X>/early_stop
Date: Sep 2022
Contact: Ray Chi <raychi@google.com>
Description:
Some USB hosts have some watchdog mechanisms so that the device
may enter ramdump if it takes a long time during port initialization.
This attribute allows each port just has two attempts so that the
port initialization will be failed quickly. In addition, if a port
which is marked with early_stop has failed to initialize, it will ignore
all future connections until this attribute is clear.
What: /sys/bus/usb/devices/.../<hub_interface>/port<X>/state
Date: June 2023
Contact: Roy Luo <royluo@google.com>

View File

@ -2015,31 +2015,33 @@ that attribute:
no-change
Do not modify the I/O priority class.
none-to-rt
For requests that do not have an I/O priority class (NONE),
change the I/O priority class into RT. Do not modify
the I/O priority class of other requests.
promote-to-rt
For requests that have a non-RT I/O priority class, change it into RT.
Also change the priority level of these requests to 4. Do not modify
the I/O priority of requests that have priority class RT.
restrict-to-be
For requests that do not have an I/O priority class or that have I/O
priority class RT, change it into BE. Do not modify the I/O priority
class of requests that have priority class IDLE.
priority class RT, change it into BE. Also change the priority level
of these requests to 0. Do not modify the I/O priority class of
requests that have priority class IDLE.
idle
Change the I/O priority class of all requests into IDLE, the lowest
I/O priority class.
none-to-rt
Deprecated. Just an alias for promote-to-rt.
The following numerical values are associated with the I/O priority policies:
+-------------+---+
| no-change | 0 |
+-------------+---+
| none-to-rt | 1 |
+-------------+---+
| rt-to-be | 2 |
+-------------+---+
| all-to-idle | 3 |
+-------------+---+
+----------------+---+
| no-change | 0 |
+----------------+---+
| rt-to-be | 2 |
+----------------+---+
| all-to-idle | 3 |
+----------------+---+
The numerical value that corresponds to each I/O priority class is as follows:
@ -2055,9 +2057,13 @@ The numerical value that corresponds to each I/O priority class is as follows:
The algorithm to set the I/O priority class for a request is as follows:
- Translate the I/O priority class policy into a number.
- Change the request I/O priority class into the maximum of the I/O priority
class policy number and the numerical I/O priority class.
- If I/O priority class policy is promote-to-rt, change the request I/O
priority class to IOPRIO_CLASS_RT and change the request I/O priority
level to 4.
- If I/O priorityt class is not promote-to-rt, translate the I/O priority
class policy into a number, then change the request I/O priority class
into the maximum of the I/O priority class policy number and the numerical
I/O priority class.
PID
---

View File

@ -52,6 +52,30 @@ properties:
Address and Length pairs. Specifies regions of memory that are
acceptable to allocate from.
iommu-addresses:
$ref: /schemas/types.yaml#/definitions/phandle-array
description: >
A list of phandle and specifier pairs that describe static IO virtual
address space mappings and carveouts associated with a given reserved
memory region. The phandle in the first cell refers to the device for
which the mapping or carveout is to be created.
The specifier consists of an address/size pair and denotes the IO
virtual address range of the region for the given device. The exact
format depends on the values of the "#address-cells" and "#size-cells"
properties of the device referenced via the phandle.
When used in combination with a "reg" property, an IOVA mapping is to
be established for this memory region. One example where this can be
useful is to create an identity mapping for physical memory that the
firmware has configured some hardware to access (such as a bootsplash
framebuffer).
If no "reg" property is specified, the "iommu-addresses" property
defines carveout regions in the IOVA space for the given device. This
can be useful if a certain memory region should not be mapped through
the IOMMU.
no-map:
type: boolean
description: >
@ -89,12 +113,69 @@ allOf:
- no-map
oneOf:
- required:
- reg
- oneOf:
- required:
- reg
- required:
- size
- required:
- size
- oneOf:
# IOMMU reservations
- required:
- iommu-addresses
# IOMMU mappings
- required:
- reg
- iommu-addresses
additionalProperties: true
examples:
- |
/ {
compatible = "foo";
model = "foo";
#address-cells = <2>;
#size-cells = <2>;
reserved-memory {
#address-cells = <2>;
#size-cells = <2>;
ranges;
adsp_resv: reservation-adsp {
/*
* Restrict IOVA mappings for ADSP buffers to the 512 MiB region
* from 0x40000000 - 0x5fffffff. Anything outside is reserved by
* the ADSP for I/O memory and private memory allocations.
*/
iommu-addresses = <&adsp 0x0 0x00000000 0x00 0x40000000>,
<&adsp 0x0 0x60000000 0xff 0xa0000000>;
};
fb: framebuffer@90000000 {
reg = <0x0 0x90000000 0x0 0x00800000>;
iommu-addresses = <&dc0 0x0 0x90000000 0x0 0x00800000>;
};
};
bus@0 {
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x0 0x0 0x0 0x40000000>;
adsp: adsp@2990000 {
reg = <0x2990000 0x2000>;
memory-region = <&adsp_resv>;
};
dc0: display@15200000 {
reg = <0x15200000 0x10000>;
memory-region = <&fb>;
};
};
};
...

View File

@ -34,8 +34,14 @@ Here is the main features of EROFS:
- Little endian on-disk design;
- 4KiB block size and 32-bit block addresses, therefore 16TiB address space
at most for now;
- Block-based distribution and file-based distribution over fscache are
supported;
- Support multiple devices to refer to external blobs, which can be used
for container images;
- 32-bit block addresses for each device, therefore 16TiB address space at
most with 4KiB block size for now;
- Two inode layouts for different requirements:

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,11 @@
[abi_symbol_list]
# aura sync
hid_unregister_driver
hid_hw_raw_request
hid_open_report
hid_hw_start
hid_hw_stop
__hid_register_driver
hid_hw_output_report
hid_hw_open
hid_hw_close

File diff suppressed because it is too large Load Diff

View File

@ -492,6 +492,9 @@
dma_get_sgtable_attrs
dma_get_slave_channel
dma_heap_add
dma_heap_buffer_alloc
dma_heap_buffer_free
dma_heap_find
dma_heap_get_dev
dma_heap_get_drvdata
dma_heap_get_name
@ -1164,6 +1167,8 @@
kvfree
kvfree_call_rcu
kvmalloc_node
led_classdev_register_ext
led_classdev_unregister
led_init_default_state_get
__list_add_valid
__list_del_entry_valid
@ -1820,6 +1825,7 @@
schedule
schedule_hrtimeout
schedule_timeout
schedule_timeout_idle
schedule_timeout_uninterruptible
scmi_driver_register
scmi_driver_unregister

View File

@ -2645,12 +2645,17 @@
__traceiter_android_vh_check_bpf_syscall
__traceiter_android_vh_check_file_open
__traceiter_android_vh_check_mmap_file
__traceiter_android_vh_compaction_exit
__traceiter_android_vh_compaction_try_to_compact_pages_exit
__traceiter_android_vh_cpufreq_fast_switch
__traceiter_android_vh_cpu_idle_enter
__traceiter_android_vh_cpu_idle_exit
__traceiter_android_vh_iommu_iovad_alloc_iova
__traceiter_android_vh_iommu_iovad_free_iova
__traceiter_android_vh_is_fpsimd_save
__traceiter_android_vh_mm_alloc_pages_direct_reclaim_enter
__traceiter_android_vh_mm_alloc_pages_direct_reclaim_exit
__traceiter_android_vh_mm_alloc_pages_may_oom_exit
__traceiter_android_vh_rwsem_init
__traceiter_android_vh_rwsem_wake
__traceiter_android_vh_rwsem_write_finished
@ -2661,6 +2666,7 @@
__traceiter_android_vh_show_suspend_epoch_val
__traceiter_android_vh_syscall_prctl_finished
__traceiter_android_vh_ufs_clock_scaling
__traceiter_android_vh_vmscan_kswapd_done
__traceiter_cpu_frequency
__traceiter_gpu_mem_total
__traceiter_ipi_entry
@ -2740,12 +2746,17 @@
__tracepoint_android_vh_check_bpf_syscall
__tracepoint_android_vh_check_file_open
__tracepoint_android_vh_check_mmap_file
__tracepoint_android_vh_compaction_exit
__tracepoint_android_vh_compaction_try_to_compact_pages_exit
__tracepoint_android_vh_cpufreq_fast_switch
__tracepoint_android_vh_cpu_idle_enter
__tracepoint_android_vh_cpu_idle_exit
__tracepoint_android_vh_iommu_iovad_alloc_iova
__tracepoint_android_vh_iommu_iovad_free_iova
__tracepoint_android_vh_is_fpsimd_save
__tracepoint_android_vh_mm_alloc_pages_direct_reclaim_enter
__tracepoint_android_vh_mm_alloc_pages_direct_reclaim_exit
__tracepoint_android_vh_mm_alloc_pages_may_oom_exit
__tracepoint_android_vh_rwsem_init
__tracepoint_android_vh_rwsem_wake
__tracepoint_android_vh_rwsem_write_finished
@ -2756,6 +2767,7 @@
__tracepoint_android_vh_show_suspend_epoch_val
__tracepoint_android_vh_syscall_prctl_finished
__tracepoint_android_vh_ufs_clock_scaling
__tracepoint_android_vh_vmscan_kswapd_done
__tracepoint_cpu_frequency
__tracepoint_gpu_mem_total
__tracepoint_ipi_entry

View File

@ -20,6 +20,9 @@
down_read_trylock
drm_crtc_vblank_waitqueue
filp_close
folio_add_lru
folio_mapping
folio_referenced
for_each_kernel_tracepoint
freq_qos_add_notifier
freq_qos_remove_notifier
@ -42,11 +45,14 @@
jiffies_64_to_clock_t
kick_process
ktime_get_coarse_real_ts64
mem_cgroup_update_lru_size
memory_cgrp_subsys
memory_cgrp_subsys_enabled_key
mem_cgroup_from_id
mipi_dsi_generic_write
mmc_wait_for_cmd
__mod_lruvec_state
__mod_zone_page_state
nf_ct_attach
nf_ct_delete
nf_register_net_hook
@ -62,6 +68,7 @@
prepare_to_wait_exclusive
proc_symlink
public_key_verify_signature
put_pages_list
radix_tree_lookup_slot
radix_tree_replace_slot
_raw_write_trylock
@ -69,6 +76,7 @@
register_tcf_proto_ops
regulator_map_voltage_linear_range
remove_proc_subtree
root_mem_cgroup
rtc_read_alarm
rtc_set_alarm
__rtnl_link_unregister
@ -108,6 +116,7 @@
__traceiter_android_vh_futex_wake_up_q_finish
__traceiter_android_vh_record_mutex_lock_starttime
__traceiter_android_vh_record_pcpu_rwsem_starttime
__traceiter_android_vh_percpu_rwsem_wq_add
__traceiter_android_vh_record_rtmutex_lock_starttime
__traceiter_android_vh_record_rwsem_lock_starttime
__traceiter_android_vh_alter_mutex_list_add
@ -174,6 +183,11 @@
__traceiter_task_rename
__traceiter_android_vh_test_clear_look_around_ref
__traceiter_android_vh_tune_swappiness
__traceiter_android_vh_alloc_oem_binder_struct
__traceiter_android_vh_binder_transaction_received
__traceiter_android_vh_free_oem_binder_struct
__traceiter_android_vh_binder_special_task
__traceiter_android_vh_binder_free_buf
__tracepoint_android_rvh_post_init_entity_util_avg
__tracepoint_android_rvh_rtmutex_force_update
__tracepoint_android_vh_account_process_tick_gran
@ -226,6 +240,7 @@
__tracepoint_android_vh_mutex_unlock_slowpath
__tracepoint_android_vh_record_mutex_lock_starttime
__tracepoint_android_vh_record_pcpu_rwsem_starttime
__tracepoint_android_vh_percpu_rwsem_wq_add
__tracepoint_android_vh_record_rtmutex_lock_starttime
__tracepoint_android_vh_record_rwsem_lock_starttime
__tracepoint_android_vh_rtmutex_waiter_prio
@ -256,6 +271,11 @@
__tracepoint_sched_stat_wait
__tracepoint_sched_waking
__tracepoint_task_rename
__tracepoint_android_vh_alloc_oem_binder_struct
__tracepoint_android_vh_binder_transaction_received
__tracepoint_android_vh_free_oem_binder_struct
__tracepoint_android_vh_binder_special_task
__tracepoint_android_vh_binder_free_buf
__trace_puts
try_to_free_mem_cgroup_pages
typec_mux_get_drvdata

View File

@ -1,4 +1,5 @@
[abi_symbol_list]
activate_task
add_cpu
add_timer
add_timer_on
@ -30,6 +31,7 @@
__arch_clear_user
__arch_copy_from_user
__arch_copy_to_user
arch_freq_scale
arch_timer_read_counter
argv_free
argv_split
@ -42,6 +44,7 @@
atomic_notifier_chain_register
atomic_notifier_chain_unregister
autoremove_wake_function
available_idle_cpu
backlight_device_set_brightness
badblocks_check
badblocks_clear
@ -49,13 +52,19 @@
badblocks_init
badblocks_set
badblocks_show
balance_push_callback
bcmp
bdev_end_io_acct
bdev_nr_zones
bdev_start_io_acct
bin2hex
bio_add_page
bio_alloc_bioset
bio_chain
bio_endio
bio_end_io_acct_remapped
bio_init
bio_put
bio_start_io_acct
__bitmap_and
__bitmap_andnot
@ -76,6 +85,9 @@
bitmap_zalloc
blk_abort_request
__blk_alloc_disk
blk_check_plugged
blkdev_get_by_dev
blkdev_put
blk_execute_rq_nowait
__blk_mq_alloc_disk
blk_mq_alloc_tag_set
@ -114,6 +126,8 @@
blocking_notifier_chain_unregister
bpf_trace_run1
bpf_trace_run10
bpf_trace_run11
bpf_trace_run12
bpf_trace_run2
bpf_trace_run3
bpf_trace_run4
@ -143,11 +157,13 @@
cdev_device_del
cdev_init
__check_object_size
check_preempt_curr
__class_create
class_destroy
class_interface_unregister
__class_register
class_unregister
cleancache_register_ops
clear_page
__ClearPageMovable
clk_disable
@ -204,22 +220,36 @@
_copy_from_iter
__copy_overflow
_copy_to_iter
__cpu_active_mask
cpu_all_bits
cpu_bit_bitmap
cpufreq_add_update_util_hook
cpufreq_cpu_get
cpufreq_cpu_get_raw
cpufreq_cpu_put
cpufreq_disable_fast_switch
cpufreq_driver_fast_switch
cpufreq_driver_resolve_freq
__cpufreq_driver_target
cpufreq_driver_target
cpufreq_enable_fast_switch
cpufreq_freq_transition_begin
cpufreq_freq_transition_end
cpufreq_frequency_table_verify
cpufreq_generic_attr
cpufreq_get
cpufreq_get_policy
cpufreq_policy_transition_delay_us
cpufreq_quick_get
cpufreq_register_driver
cpufreq_register_governor
cpufreq_register_notifier
cpufreq_remove_update_util_hook
cpufreq_table_index_unsorted
cpufreq_this_cpu_can_update
cpufreq_update_util_data
cpu_hotplug_disable
cpu_hotplug_enable
__cpuhp_remove_state
__cpuhp_setup_state
__cpuhp_setup_state_cpuslocked
@ -227,15 +257,19 @@
__cpuhp_state_remove_instance
cpuhp_tasks_frozen
cpu_hwcaps
cpuidle_driver_state_disabled
cpuidle_get_driver
cpu_latency_qos_add_request
cpu_latency_qos_remove_request
cpu_latency_qos_update_request
cpumask_local_spread
cpu_number
__cpu_online_mask
cpu_pm_register_notifier
cpu_pm_unregister_notifier
__cpu_possible_mask
__cpu_present_mask
cpupri_find_fitness
cpu_scale
cpus_read_lock
cpus_read_unlock
@ -275,6 +309,7 @@
csum_partial
csum_tcpudp_nofold
_ctype
deactivate_task
debugfs_attr_read
debugfs_attr_write
debugfs_create_atomic_t
@ -282,6 +317,7 @@
debugfs_create_devm_seqfile
debugfs_create_dir
debugfs_create_file
debugfs_create_file_unsafe
debugfs_create_size_t
debugfs_create_symlink
debugfs_create_u16
@ -326,6 +362,7 @@
__dev_get_by_index
dev_get_by_index
dev_get_by_name
dev_get_stats
device_add
device_add_disk
device_add_groups
@ -340,6 +377,7 @@
device_get_child_node_count
device_get_dma_attr
device_get_match_data
device_get_named_child_node
device_get_next_child_node
device_initialize
device_link_add
@ -394,13 +432,17 @@
devm_ioremap_resource
devm_ioremap_wc
devm_iounmap
__devm_irq_alloc_descs
devm_kasprintf
devm_kfree
devm_kmalloc
devm_kmemdup
devm_krealloc
devm_kstrdup
devm_kstrdup_const
devm_led_classdev_register_ext
devm_memremap
devm_memunmap
devm_mfd_add_devices
devm_nvmem_register
__devm_of_phy_provider_register
@ -420,18 +462,21 @@
__devm_regmap_init
__devm_regmap_init_i2c
__devm_regmap_init_spi
__devm_regmap_init_spmi_ext
devm_regulator_bulk_get
devm_regulator_get
devm_regulator_get_exclusive
devm_regulator_get_optional
devm_regulator_put
devm_regulator_register
devm_request_any_context_irq
__devm_request_region
devm_request_threaded_irq
devm_rtc_device_register
devm_snd_soc_register_component
devm_thermal_of_cooling_device_register
devm_thermal_of_zone_register
devm_thermal_of_zone_unregister
devm_usb_get_phy_by_phandle
_dev_notice
dev_pm_domain_attach_by_name
@ -459,6 +504,7 @@
__devres_alloc_node
devres_free
dev_set_name
dev_vprintk_emit
_dev_warn
disable_irq
disable_irq_nosync
@ -486,6 +532,7 @@
dmabuf_page_pool_free
dmabuf_page_pool_get_size
dma_buf_put
dma_buf_set_name
dma_buf_unmap_attachment
dma_buf_vmap
dma_buf_vunmap
@ -542,19 +589,25 @@
drain_workqueue
driver_register
driver_unregister
drm_add_edid_modes
drm_add_modes_noedid
drm_atomic_add_affected_connectors
drm_atomic_add_affected_planes
drm_atomic_bridge_chain_disable
drm_atomic_bridge_chain_post_disable
drm_atomic_commit
drm_atomic_get_connector_state
drm_atomic_get_crtc_state
drm_atomic_get_new_connector_for_encoder
drm_atomic_get_new_private_obj_state
drm_atomic_get_old_connector_for_encoder
drm_atomic_get_old_private_obj_state
drm_atomic_get_plane_state
drm_atomic_get_private_obj_state
drm_atomic_helper_bridge_destroy_state
drm_atomic_helper_bridge_duplicate_state
drm_atomic_helper_bridge_reset
drm_atomic_helper_calc_timestamping_constants
drm_atomic_helper_check_modeset
drm_atomic_helper_check_planes
drm_atomic_helper_check_plane_state
@ -567,7 +620,10 @@
drm_atomic_helper_commit_planes
drm_atomic_helper_commit_tail
__drm_atomic_helper_connector_destroy_state
drm_atomic_helper_connector_destroy_state
__drm_atomic_helper_connector_duplicate_state
drm_atomic_helper_connector_duplicate_state
drm_atomic_helper_connector_reset
__drm_atomic_helper_crtc_destroy_state
__drm_atomic_helper_crtc_duplicate_state
__drm_atomic_helper_crtc_reset
@ -583,6 +639,7 @@
drm_atomic_helper_setup_commit
drm_atomic_helper_shutdown
drm_atomic_helper_swap_state
drm_atomic_helper_update_legacy_modeset_state
drm_atomic_helper_update_plane
drm_atomic_helper_wait_for_dependencies
drm_atomic_helper_wait_for_flip_done
@ -610,12 +667,17 @@
drm_connector_list_iter_next
drm_connector_register
drm_connector_unregister
drm_connector_update_edid_property
drm_crtc_add_crc_entry
drm_crtc_arm_vblank_event
drm_crtc_cleanup
__drm_crtc_commit_free
drm_crtc_commit_wait
drm_crtc_enable_color_mgmt
drm_crtc_handle_vblank
drm_crtc_init_with_planes
drm_crtc_send_vblank_event
drm_crtc_vblank_count
drm_crtc_vblank_count_and_time
drm_crtc_vblank_get
drm_crtc_vblank_off
@ -623,10 +685,20 @@
drm_crtc_vblank_put
drm_crtc_wait_one_vblank
___drm_dbg
__drm_debug
drm_detect_monitor_audio
__drm_dev_dbg
drm_dev_printk
drm_dev_put
drm_dev_register
drm_dev_unregister
drm_display_mode_from_cea_vic
drm_display_mode_to_videomode
drm_do_get_edid
drm_edid_duplicate
drm_edid_get_monitor_name
drm_edid_is_valid
drm_edid_to_sad
drm_encoder_cleanup
drm_encoder_init
__drm_err
@ -648,6 +720,7 @@
drm_gem_private_object_init
drm_gem_vm_close
drm_gem_vm_open
drm_get_edid
drm_get_format_info
drm_helper_mode_fill_fb_struct
drm_helper_probe_single_connector_modes
@ -655,10 +728,13 @@
drm_kms_helper_hotplug_event
drm_kms_helper_poll_fini
drm_kms_helper_poll_init
drm_match_cea_mode
drmm_kmalloc
drmm_mode_config_init
drm_mode_config_reset
drm_mode_convert_to_umode
drm_mode_copy
drm_mode_destroy
drm_mode_duplicate
drm_mode_equal
drm_mode_equal_no_clocks
@ -672,9 +748,11 @@
drm_modeset_drop_locks
drm_modeset_lock
drm_modeset_lock_all_ctx
drm_modeset_lock_single_interruptible
drm_modeset_unlock
drm_mode_vrefresh
drm_object_attach_property
drm_object_property_set_value
drm_open
drm_panel_add
drm_panel_disable
@ -724,10 +802,13 @@
dump_backtrace
dump_stack
dw_handle_msi_irq
dw_pcie_find_capability
dw_pcie_host_init
dw_pcie_read
dw_pcie_read_dbi
dw_pcie_setup_rc
dw_pcie_write
dw_pcie_write_dbi
__dynamic_dev_dbg
__dynamic_pr_debug
em_cpu_get
@ -755,6 +836,9 @@
__fdget
fd_install
fget
file_path
filp_close
filp_open_block
find_extend_vma
_find_first_and_bit
_find_first_bit
@ -765,12 +849,14 @@
_find_next_bit
_find_next_zero_bit
find_pid_ns
find_task_by_vpid
find_vma_intersection
finish_wait
flush_dcache_page
flush_delayed_work
flush_work
__flush_workqueue
__folio_lock
__folio_put
folio_wait_bit
fortify_panic
@ -791,6 +877,9 @@
freq_qos_add_request
freq_qos_remove_request
freq_qos_update_request
fs_bio_set
fsnotify
__fsnotify_parent
full_name_hash
fwnode_get_name
fwnode_gpiod_get_index
@ -823,6 +912,22 @@
get_cpu_iowait_time_us
get_device
__get_free_pages
get_governor_parent_kobj
gether_cleanup
gether_connect
gether_disconnect
gether_get_dev_addr
gether_get_host_addr
gether_get_host_addr_u8
gether_get_ifname
gether_get_qmult
gether_register_netdev
gether_set_dev_addr
gether_set_gadget
gether_set_host_addr
gether_set_ifname
gether_set_qmult
gether_setup_name_default
get_net_ns_by_fd
get_net_ns_by_pid
get_pid_task
@ -832,6 +937,8 @@
__get_random_u32_below
get_random_u8
get_sg_io_hdr
__get_task_comm
get_task_cred
get_thermal_instance
get_unused_fd_flags
get_user_pages
@ -839,6 +946,10 @@
get_vaddr_frames
gic_nonsecure_priorities
glob_match
gov_attr_set_get
gov_attr_set_init
gov_attr_set_put
governor_sysfs_ops
gpiochip_generic_config
gpiochip_generic_free
gpiochip_generic_request
@ -871,6 +982,7 @@
handle_simple_irq
handle_sysrq
hashlen_string
have_governor_per_policy
hex2bin
hex_dump_to_buffer
hex_to_bin
@ -888,6 +1000,7 @@
hwrng_register
hwrng_unregister
i2c_adapter_type
i2c_add_adapter
i2c_add_numbered_adapter
i2c_bus_type
i2c_del_adapter
@ -965,6 +1078,7 @@
interval_tree_iter_first
interval_tree_iter_next
interval_tree_remove
int_pow
int_sqrt
int_to_scsilun
iomem_resource
@ -1015,7 +1129,9 @@
irq_domain_get_irq_data
irq_domain_remove
irq_domain_set_info
irq_domain_simple_ops
irq_domain_xlate_twocell
irq_force_affinity
irq_get_irq_data
irq_modify_status
irq_of_parse_and_map
@ -1027,6 +1143,8 @@
irq_set_irq_type
irq_set_irq_wake
irq_to_desc
irq_work_queue
irq_work_sync
is_vmalloc_addr
jiffies
jiffies64_to_msecs
@ -1039,6 +1157,7 @@
kernel_param_lock
kernel_param_unlock
kernel_restart
kernfs_path_from_node
key_create_or_update
key_put
keyring_alloc
@ -1064,6 +1183,7 @@
kmem_cache_destroy
kmem_cache_free
kmemdup
kmemdup_nul
kobject_add
kobject_create_and_add
kobject_del
@ -1154,6 +1274,7 @@
mbox_request_channel
mbox_send_message
memchr
memchr_inv
memcmp
memcpy
__memcpy_fromio
@ -1202,6 +1323,7 @@
__msecs_to_jiffies
msleep
msleep_interruptible
mtree_load
__mutex_init
mutex_is_locked
mutex_lock
@ -1238,6 +1360,8 @@
netlink_unregister_notifier
net_ns_type_operations
net_ratelimit
nf_register_net_hooks
nf_unregister_net_hooks
nla_find
nla_memcpy
__nla_parse
@ -1252,6 +1376,7 @@
noop_llseek
nr_cpu_ids
nr_irqs
ns_capable
nsec_to_clock_t
ns_to_timespec64
__num_online_cpus
@ -1284,6 +1409,7 @@
of_find_node_by_phandle
of_find_node_by_type
of_find_node_opts_by_path
of_find_node_with_property
of_find_property
of_fwnode_ops
of_genpd_add_provider_simple
@ -1495,17 +1621,23 @@
prepare_to_wait_event
print_hex_dump
_printk
_printk_deferred
proc_create
proc_create_data
proc_create_single_data
proc_dointvec
proc_dostring
proc_douintvec_minmax
proc_mkdir
proc_mkdir_data
proc_remove
proc_set_size
proc_symlink
pskb_expand_head
__pskb_pull_tail
___pskb_trim
push_cpu_stop
__put_cred
put_device
put_disk
put_iova_domain
@ -1537,6 +1669,8 @@
_raw_spin_lock_bh
_raw_spin_lock_irq
_raw_spin_lock_irqsave
raw_spin_rq_lock_nested
raw_spin_rq_unlock
_raw_spin_trylock
_raw_spin_unlock
_raw_spin_unlock_bh
@ -1634,7 +1768,10 @@
__request_percpu_irq
__request_region
request_threaded_irq
resched_curr
reserve_iova
return_address
reweight_task
rfkill_alloc
rfkill_blocked
rfkill_destroy
@ -1651,6 +1788,7 @@
rht_bucket_nested_insert
__root_device_register
root_device_unregister
root_task_group
round_jiffies
round_jiffies_relative
round_jiffies_up
@ -1668,12 +1806,18 @@
rt_mutex_unlock
rtnl_is_locked
rtnl_lock
rtnl_trylock
rtnl_unlock
runqueues
sched_clock
sched_feat_keys
sched_setattr_nocheck
sched_set_fifo
sched_set_normal
sched_setscheduler
sched_setscheduler_nocheck
sched_show_task
sched_uclamp_used
schedule
schedule_timeout
schedule_timeout_interruptible
@ -1722,6 +1866,7 @@
set_page_dirty
set_page_dirty_lock
__SetPageMovable
set_task_cpu
set_user_nice
sg_alloc_table
sg_alloc_table_from_pages_segment
@ -1789,6 +1934,7 @@
snd_jack_set_key
snd_pcm_format_physical_width
snd_pcm_format_width
snd_pcm_hw_constraint_integer
snd_pcm_hw_constraint_list
snd_pcm_lib_free_pages
snd_pcm_lib_ioctl
@ -1845,6 +1991,7 @@
snd_soc_register_card
snd_soc_register_component
snd_soc_runtime_set_dai_fmt
snd_soc_set_runtime_hwparams
snd_soc_unregister_card
snd_soc_unregister_component
snprintf
@ -1867,6 +2014,10 @@
spi_sync
spi_sync_locked
spi_unregister_controller
spmi_controller_add
spmi_controller_alloc
spmi_controller_remove
__spmi_driver_register
sprintf
sprint_symbol
srcu_init_notifier_head
@ -1876,9 +2027,11 @@
sscanf
__stack_chk_fail
static_key_disable
static_key_enable
static_key_slow_dec
static_key_slow_inc
stop_machine
stop_one_cpu_nowait
strcasecmp
strcat
strchr
@ -1892,6 +2045,7 @@
strlen
strncasecmp
strncat
strnchr
strncmp
strncpy
strncpy_from_user
@ -1904,6 +2058,8 @@
strsep
strspn
strstr
submit_bio
submit_bio_wait
subsys_system_register
suspend_set_ops
__sw_hweight16
@ -1917,6 +2073,8 @@
synchronize_net
synchronize_rcu
syscon_regmap_lookup_by_phandle
sysctl_sched_features
sysctl_sched_latency
sysfs_add_file_to_group
sysfs_add_link_to_group
sysfs_create_file_ns
@ -1946,12 +2104,14 @@
system_wq
sys_tz
task_active_pid_ns
__tasklet_hi_schedule
tasklet_init
tasklet_kill
__tasklet_schedule
tasklet_setup
tasklet_unlock_wait
__task_pid_nr_ns
task_rq_lock
tcpci_get_tcpm_port
tcpci_irq
tcpci_register_port
@ -1962,17 +2122,25 @@
tcpm_pd_transmit_complete
tcpm_port_clean
tcpm_port_is_toggling
tcpm_register_port
tcpm_sink_frs
tcpm_sourcing_vbus
tcpm_tcpc_reset
tcpm_unregister_port
tcpm_vbus_change
teo_cpu_get_util_threshold
teo_cpu_set_util_threshold
thermal_cdev_update
thermal_cooling_device_unregister
thermal_of_cooling_device_register
thermal_pressure
thermal_zone_device_disable
thermal_zone_device_enable
thermal_zone_device_register
thermal_zone_device_unregister
thermal_zone_device_update
thermal_zone_get_temp
thermal_zone_get_zone_by_name
thread_group_cputime_adjusted
time64_to_tm
topology_update_thermal_pressure
@ -1986,17 +2154,58 @@
trace_event_raw_init
trace_event_reg
trace_handle_return
__traceiter_android_rvh_attach_entity_load_avg
__traceiter_android_rvh_audio_usb_offload_disconnect
__traceiter_android_rvh_can_migrate_task
__traceiter_android_rvh_cgroup_force_kthread_migration
__traceiter_android_rvh_check_preempt_wakeup
__traceiter_android_rvh_cpu_overutilized
__traceiter_android_rvh_dequeue_task
__traceiter_android_rvh_dequeue_task_fair
__traceiter_android_rvh_detach_entity_load_avg
__traceiter_android_rvh_enqueue_task
__traceiter_android_rvh_enqueue_task_fair
__traceiter_android_rvh_find_lowest_rq
__traceiter_android_rvh_irqs_disable
__traceiter_android_rvh_irqs_enable
__traceiter_android_rvh_post_init_entity_util_avg
__traceiter_android_rvh_preempt_disable
__traceiter_android_rvh_preempt_enable
__traceiter_android_rvh_prepare_prio_fork
__traceiter_android_rvh_remove_entity_load_avg
__traceiter_android_rvh_rtmutex_prepare_setprio
__traceiter_android_rvh_sched_newidle_balance
__traceiter_android_rvh_select_task_rq_fair
__traceiter_android_rvh_select_task_rq_rt
__traceiter_android_rvh_set_cpus_allowed_by_task
__traceiter_android_rvh_set_iowait
__traceiter_android_rvh_setscheduler
__traceiter_android_rvh_set_task_cpu
__traceiter_android_rvh_set_user_nice
__traceiter_android_rvh_typec_tcpci_get_vbus
__traceiter_android_rvh_uclamp_eff_get
__traceiter_android_rvh_update_blocked_fair
__traceiter_android_rvh_update_load_avg
__traceiter_android_rvh_update_rt_rq_load_avg
__traceiter_android_vh_arch_set_freq_scale
__traceiter_android_vh_audio_usb_offload_connect
__traceiter_android_vh_binder_restore_priority
__traceiter_android_vh_binder_set_priority
__traceiter_android_vh_cpu_idle_enter
__traceiter_android_vh_cpu_idle_exit
__traceiter_android_vh_dump_throttled_rt_tasks
__traceiter_android_vh_dup_task_struct
__traceiter_android_vh_enable_thermal_genl_check
__traceiter_android_vh_ipi_stop
__traceiter_android_vh_scheduler_tick
__traceiter_android_vh_setscheduler_uclamp
__traceiter_android_vh_sysrq_crash
__traceiter_android_vh_typec_store_partner_src_caps
__traceiter_android_vh_typec_tcpci_override_toggling
__traceiter_android_vh_typec_tcpm_get_timer
__traceiter_android_vh_typec_tcpm_log
__traceiter_android_vh_typec_tcpm_modify_src_caps
__traceiter_android_vh_uclamp_validate
__traceiter_android_vh_ufs_check_int_errors
__traceiter_android_vh_ufs_compl_command
__traceiter_android_vh_ufs_fill_prdt
@ -2006,7 +2215,9 @@
__traceiter_android_vh_ufs_send_uic_command
__traceiter_android_vh_ufs_update_sdev
__traceiter_android_vh_ufs_update_sysfs
__traceiter_android_vh_use_amu_fie
__traceiter_clock_set_rate
__traceiter_cpu_frequency
__traceiter_device_pm_callback_end
__traceiter_device_pm_callback_start
__traceiter_gpu_mem_total
@ -2017,22 +2228,72 @@
__traceiter_mmap_lock_acquire_returned
__traceiter_mmap_lock_released
__traceiter_mmap_lock_start_locking
__traceiter_pelt_cfs_tp
__traceiter_pelt_dl_tp
__traceiter_pelt_irq_tp
__traceiter_pelt_rt_tp
__traceiter_pelt_se_tp
__traceiter_sched_cpu_capacity_tp
__traceiter_sched_overutilized_tp
__traceiter_sched_switch
__traceiter_sched_util_est_cfs_tp
__traceiter_sched_util_est_se_tp
__traceiter_suspend_resume
__traceiter_workqueue_execute_end
__traceiter_workqueue_execute_start
trace_output_call
__tracepoint_android_rvh_attach_entity_load_avg
__tracepoint_android_rvh_audio_usb_offload_disconnect
__tracepoint_android_rvh_can_migrate_task
__tracepoint_android_rvh_cgroup_force_kthread_migration
__tracepoint_android_rvh_check_preempt_wakeup
__tracepoint_android_rvh_cpu_overutilized
__tracepoint_android_rvh_dequeue_task
__tracepoint_android_rvh_dequeue_task_fair
__tracepoint_android_rvh_detach_entity_load_avg
__tracepoint_android_rvh_enqueue_task
__tracepoint_android_rvh_enqueue_task_fair
__tracepoint_android_rvh_find_lowest_rq
__tracepoint_android_rvh_irqs_disable
__tracepoint_android_rvh_irqs_enable
__tracepoint_android_rvh_post_init_entity_util_avg
__tracepoint_android_rvh_preempt_disable
__tracepoint_android_rvh_preempt_enable
__tracepoint_android_rvh_prepare_prio_fork
__tracepoint_android_rvh_remove_entity_load_avg
__tracepoint_android_rvh_rtmutex_prepare_setprio
__tracepoint_android_rvh_sched_newidle_balance
__tracepoint_android_rvh_select_task_rq_fair
__tracepoint_android_rvh_select_task_rq_rt
__tracepoint_android_rvh_set_cpus_allowed_by_task
__tracepoint_android_rvh_set_iowait
__tracepoint_android_rvh_setscheduler
__tracepoint_android_rvh_set_task_cpu
__tracepoint_android_rvh_set_user_nice
__tracepoint_android_rvh_typec_tcpci_get_vbus
__tracepoint_android_rvh_uclamp_eff_get
__tracepoint_android_rvh_update_blocked_fair
__tracepoint_android_rvh_update_load_avg
__tracepoint_android_rvh_update_rt_rq_load_avg
__tracepoint_android_vh_arch_set_freq_scale
__tracepoint_android_vh_audio_usb_offload_connect
__tracepoint_android_vh_binder_restore_priority
__tracepoint_android_vh_binder_set_priority
__tracepoint_android_vh_cpu_idle_enter
__tracepoint_android_vh_cpu_idle_exit
__tracepoint_android_vh_dump_throttled_rt_tasks
__tracepoint_android_vh_dup_task_struct
__tracepoint_android_vh_enable_thermal_genl_check
__tracepoint_android_vh_ipi_stop
__tracepoint_android_vh_scheduler_tick
__tracepoint_android_vh_setscheduler_uclamp
__tracepoint_android_vh_sysrq_crash
__tracepoint_android_vh_typec_store_partner_src_caps
__tracepoint_android_vh_typec_tcpci_override_toggling
__tracepoint_android_vh_typec_tcpm_get_timer
__tracepoint_android_vh_typec_tcpm_log
__tracepoint_android_vh_typec_tcpm_modify_src_caps
__tracepoint_android_vh_uclamp_validate
__tracepoint_android_vh_ufs_check_int_errors
__tracepoint_android_vh_ufs_compl_command
__tracepoint_android_vh_ufs_fill_prdt
@ -2042,7 +2303,9 @@
__tracepoint_android_vh_ufs_send_uic_command
__tracepoint_android_vh_ufs_update_sdev
__tracepoint_android_vh_ufs_update_sysfs
__tracepoint_android_vh_use_amu_fie
__tracepoint_clock_set_rate
__tracepoint_cpu_frequency
__tracepoint_device_pm_callback_end
__tracepoint_device_pm_callback_start
__tracepoint_gpu_mem_total
@ -2053,9 +2316,18 @@
__tracepoint_mmap_lock_acquire_returned
__tracepoint_mmap_lock_released
__tracepoint_mmap_lock_start_locking
__tracepoint_pelt_cfs_tp
__tracepoint_pelt_dl_tp
__tracepoint_pelt_irq_tp
__tracepoint_pelt_rt_tp
__tracepoint_pelt_se_tp
tracepoint_probe_register
tracepoint_probe_unregister
__tracepoint_sched_cpu_capacity_tp
__tracepoint_sched_overutilized_tp
__tracepoint_sched_switch
__tracepoint_sched_util_est_cfs_tp
__tracepoint_sched_util_est_se_tp
__tracepoint_suspend_resume
__tracepoint_workqueue_execute_end
__tracepoint_workqueue_execute_start
@ -2093,8 +2365,10 @@
uart_unregister_driver
uart_update_timeout
uart_write_wakeup
uclamp_eff_value
__udelay
udp4_hwcsum
ufshcd_auto_hibern8_update
ufshcd_bkops_ctrl
ufshcd_hold
ufshcd_pltfrm_init
@ -2130,29 +2404,42 @@
unregister_virtio_driver
up
update_devfreq
___update_load_avg
___update_load_sum
update_rq_clock
up_read
up_write
usb_add_function
usb_add_hcd
usb_assign_descriptors
usb_copy_descriptors
__usb_create_hcd
usb_disabled
usb_enable_autosuspend
usb_ep_alloc_request
usb_ep_autoconfig
usb_ep_disable
usb_ep_enable
usb_ep_free_request
usb_ep_queue
usb_free_all_descriptors
usb_function_register
usb_function_unregister
usb_gadget_activate
usb_gadget_deactivate
usb_gadget_set_state
usb_gstrings_attach
usb_hcd_is_primary_hcd
usb_hcd_platform_shutdown
usb_hub_find_child
usb_interface_id
usb_os_desc_prepare_interf_dir
usb_otg_state_string
usb_put_function_instance
usb_put_hcd
usb_register_notify
usb_remove_hcd
usb_role_string
usb_role_switch_get_drvdata
usb_role_switch_register
usb_role_switch_unregister
@ -2247,6 +2534,7 @@
vmalloc_user
vmap
vmf_insert_pfn_prot
vm_iomap_memory
vprintk
vprintk_emit
vring_del_virtqueue
@ -2281,12 +2569,16 @@
__xa_alloc
xa_clear_mark
xa_destroy
__xa_erase
xa_erase
xa_find
xa_find_after
xa_get_mark
xa_load
xa_set_mark
xas_find
xas_pause
__xa_store
__xfrm_state_destroy
xfrm_state_lookup_byspi
xfrm_stateonly_find
@ -2294,7 +2586,18 @@
xhci_bus_resume
xhci_bus_suspend
xhci_gen_setup
xhci_get_endpoint_index
xhci_init_driver
xhci_resume
xhci_run
xhci_suspend
zs_compact
zs_create_pool
zs_destroy_pool
zs_free
zs_get_total_pages
zs_huge_class_size
zs_malloc
zs_map_object
zs_pool_stats
zs_unmap_object

View File

@ -3380,11 +3380,13 @@
__traceiter_android_rvh_update_thermal_stats
__traceiter_android_rvh_util_est_update
__traceiter_android_rvh_wake_up_new_task
__traceiter_android_vh_alter_mutex_list_add
__traceiter_android_vh_audio_usb_offload_connect
__traceiter_android_vh_binder_restore_priority
__traceiter_android_vh_binder_set_priority
__traceiter_android_vh_binder_wakeup_ilocked
__traceiter_android_vh_build_sched_domains
__traceiter_android_vh_bus_iommu_probe
__traceiter_android_vh_check_hibernation_swap
__traceiter_android_vh_check_uninterrupt_tasks
__traceiter_android_vh_check_uninterrupt_tasks_done
@ -3526,11 +3528,13 @@
__tracepoint_android_rvh_update_thermal_stats
__tracepoint_android_rvh_util_est_update
__tracepoint_android_rvh_wake_up_new_task
__tracepoint_android_vh_alter_mutex_list_add
__tracepoint_android_vh_audio_usb_offload_connect
__tracepoint_android_vh_binder_restore_priority
__tracepoint_android_vh_binder_set_priority
__tracepoint_android_vh_binder_wakeup_ilocked
__tracepoint_android_vh_build_sched_domains
__tracepoint_android_vh_bus_iommu_probe
__tracepoint_android_vh_check_hibernation_swap
__tracepoint_android_vh_check_uninterrupt_tasks
__tracepoint_android_vh_check_uninterrupt_tasks_done

File diff suppressed because it is too large Load Diff

View File

@ -25,6 +25,9 @@ static inline int syscall_get_nr(struct task_struct *task,
if (IS_ENABLED(CONFIG_AEABI) && !IS_ENABLED(CONFIG_OABI_COMPAT))
return task_thread_info(task)->abi_syscall;
if (task_thread_info(task)->abi_syscall == -1)
return -1;
return task_thread_info(task)->abi_syscall & __NR_SYSCALL_MASK;
}

View File

@ -103,6 +103,7 @@ slow_work_pending:
cmp r0, #0
beq no_work_pending
movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE)
str scno, [tsk, #TI_ABI_SYSCALL] @ make sure tracers see update
ldmia sp, {r0 - r6} @ have to reload r0 - r6
b local_restart @ ... and off we go
ENDPROC(ret_fast_syscall)

View File

@ -785,8 +785,9 @@ long arch_ptrace(struct task_struct *child, long request,
break;
case PTRACE_SET_SYSCALL:
task_thread_info(child)->abi_syscall = data &
__NR_SYSCALL_MASK;
if (data != -1)
data &= __NR_SYSCALL_MASK;
task_thread_info(child)->abi_syscall = data;
ret = 0;
break;

View File

@ -0,0 +1,81 @@
# CONFIG_WERROR is not set
# CONFIG_LOCALVERSION_AUTO is not set
# CONFIG_SWAP is not set
# CONFIG_CROSS_MEMORY_ATTACH is not set
CONFIG_NO_HZ_IDLE=y
CONFIG_PREEMPT=y
# CONFIG_CPU_ISOLATION is not set
CONFIG_LOG_BUF_SHIFT=15
CONFIG_LOG_CPU_MAX_BUF_SHIFT=10
# CONFIG_UTS_NS is not set
# CONFIG_TIME_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_GZIP is not set
# CONFIG_RD_BZIP2 is not set
# CONFIG_RD_LZMA is not set
# CONFIG_RD_XZ is not set
# CONFIG_RD_LZ4 is not set
# CONFIG_RD_ZSTD is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_COMPAT_BRK is not set
# CONFIG_SLAB_MERGE_DEFAULT is not set
# CONFIG_SLUB_CPU_PARTIAL is not set
CONFIG_ARM64_VA_BITS_48=y
CONFIG_NR_CPUS=2
CONFIG_CRASH_DUMP=y
# CONFIG_RODATA_FULL_DEFAULT_ENABLED is not set
# CONFIG_ARM64_TAGGED_ADDR_ABI is not set
# CONFIG_ARM64_SVE is not set
# CONFIG_EFI is not set
# CONFIG_SUSPEND is not set
CONFIG_JUMP_LABEL=y
# CONFIG_SECCOMP is not set
# CONFIG_STACKPROTECTOR is not set
# CONFIG_VMAP_STACK is not set
# CONFIG_MQ_IOSCHED_DEADLINE is not set
# CONFIG_MQ_IOSCHED_KYBER is not set
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
# CONFIG_BINFMT_SCRIPT is not set
# CONFIG_SPECULATIVE_PAGE_FAULT is not set
CONFIG_PCI=y
CONFIG_PCI_HOST_GENERIC=y
CONFIG_PCI_ENDPOINT=y
CONFIG_DEVTMPFS=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
CONFIG_ARM_SCMI_PROTOCOL=y
# CONFIG_ARM_SMCCC_SOC_ID is not set
# CONFIG_BLK_DEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=1
CONFIG_SERIAL_8250_RUNTIME_UARTS=1
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_VIRTIO_CONSOLE=y
# CONFIG_HW_RANDOM is not set
# CONFIG_DEVMEM is not set
# CONFIG_HWMON is not set
# CONFIG_HID is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_VIRTIO_PCI=y
# CONFIG_VIRTIO_PCI_LEGACY is not set
# CONFIG_VHOST_MENU is not set
# CONFIG_ARM_ARCH_TIMER_EVTSTREAM is not set
# CONFIG_FSL_ERRATUM_A008585 is not set
# CONFIG_HISILICON_ERRATUM_161010101 is not set
# CONFIG_ARM64_ERRATUM_858921 is not set
# CONFIG_IOMMU_SUPPORT is not set
# CONFIG_DNOTIFY is not set
# CONFIG_INOTIFY_USER is not set
CONFIG_TMPFS=y
CONFIG_TMPFS_XATTR=y
# CONFIG_MISC_FILESYSTEMS is not set
CONFIG_XZ_DEC=y
# CONFIG_SYMBOLIC_ERRNAME is not set
# CONFIG_RUNTIME_TESTING_MENU is not set

View File

@ -95,6 +95,7 @@ CONFIG_MODPROBE_PATH="/system/bin/modprobe"
CONFIG_BLK_DEV_ZONED=y
CONFIG_BLK_DEV_THROTTLING=y
CONFIG_BLK_CGROUP_IOCOST=y
CONFIG_BLK_CGROUP_IOPRIO=y
CONFIG_BLK_INLINE_ENCRYPTION=y
CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
CONFIG_IOSCHED_BFQ=y

View File

@ -0,0 +1,216 @@
CONFIG_AUDIT=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_PREEMPT=y
CONFIG_IRQ_TIME_ACCOUNTING=y
CONFIG_PSI=y
CONFIG_RCU_EXPERT=y
CONFIG_RCU_BOOST=y
CONFIG_RCU_NOCB_CPU=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
# CONFIG_UTS_NS is not set
# CONFIG_TIME_NS is not set
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
# CONFIG_RD_GZIP is not set
# CONFIG_RD_BZIP2 is not set
# CONFIG_RD_LZMA is not set
# CONFIG_RD_XZ is not set
# CONFIG_RD_LZO is not set
# CONFIG_RD_ZSTD is not set
CONFIG_BOOT_CONFIG=y
CONFIG_PROFILING=y
CONFIG_SCHED_MC=y
CONFIG_NR_CPUS=32
CONFIG_PARAVIRT_TIME_ACCOUNTING=y
CONFIG_KEXEC_FILE=y
CONFIG_ARM64_SW_TTBR0_PAN=y
CONFIG_RANDOMIZE_BASE=y
# CONFIG_RANDOMIZE_MODULE_REGION_FULL is not set
CONFIG_CMDLINE="stack_depot_disable=on kasan.stacktrace=off cgroup_disable=pressure ioremap_guard panic=-1 bootconfig"
CONFIG_CMDLINE_EXTEND=y
# CONFIG_EFI is not set
CONFIG_PM_WAKELOCKS=y
CONFIG_PM_WAKELOCKS_LIMIT=0
# CONFIG_PM_WAKELOCKS_GC is not set
CONFIG_VIRTUALIZATION=y
CONFIG_JUMP_LABEL=y
CONFIG_SHADOW_CALL_STACK=y
CONFIG_CFI_CLANG=y
CONFIG_MODULES=y
CONFIG_BLK_DEV_ZONED=y
CONFIG_BLK_INLINE_ENCRYPTION=y
CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
CONFIG_PARTITION_ADVANCED=y
# CONFIG_MSDOS_PARTITION is not set
CONFIG_IOSCHED_BFQ=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_MISC=y
# CONFIG_SLAB_MERGE_DEFAULT is not set
CONFIG_SLAB_FREELIST_RANDOM=y
CONFIG_SLAB_FREELIST_HARDENED=y
CONFIG_SHUFFLE_PAGE_ALLOCATOR=y
# CONFIG_COMPAT_BRK is not set
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_DEFAULT_MMAP_MIN_ADDR=32768
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y
CONFIG_ANON_VMA_NAME=y
CONFIG_USERFAULTFD=y
CONFIG_LRU_GEN=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_XFRM_USER=y
CONFIG_XFRM_INTERFACE=y
CONFIG_XFRM_MIGRATE=y
CONFIG_XFRM_STATISTICS=y
CONFIG_NET_KEY=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
CONFIG_IP_MULTIPLE_TABLES=y
CONFIG_NET_IPIP=y
CONFIG_NET_IPGRE_DEMUX=y
CONFIG_NET_IPGRE=y
CONFIG_NET_IPVTI=y
CONFIG_INET_ESP=y
CONFIG_INET_UDP_DIAG=y
CONFIG_INET_DIAG_DESTROY=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
CONFIG_IPV6_OPTIMISTIC_DAD=y
CONFIG_INET6_ESP=y
CONFIG_INET6_IPCOMP=y
CONFIG_IPV6_MIP6=y
CONFIG_IPV6_VTI=y
CONFIG_IPV6_GRE=y
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_VSOCKETS=y
CONFIG_VIRTIO_VSOCKETS=y
# CONFIG_WIRELESS is not set
CONFIG_PCI=y
CONFIG_PCIEPORTBUS=y
CONFIG_PCIEAER=y
CONFIG_PCI_IOV=y
CONFIG_PCI_HOST_GENERIC=y
CONFIG_PCIE_DW_PLAT_EP=y
CONFIG_PCIE_KIRIN=y
CONFIG_PCI_ENDPOINT=y
CONFIG_FW_LOADER_USER_HELPER=y
# CONFIG_FW_CACHE is not set
CONFIG_ARM_SCMI_PROTOCOL=y
# CONFIG_ARM_SCMI_POWER_DOMAIN is not set
CONFIG_ZRAM=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_LOOP_MIN_COUNT=16
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
CONFIG_VIRTIO_BLK=y
CONFIG_OPEN_DICE=y
CONFIG_VCPU_STALL_DETECTOR=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
CONFIG_DM_INIT=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
CONFIG_DM_VERITY_FEC=y
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_8250=y
# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_SERIAL_8250_PCI is not set
CONFIG_SERIAL_8250_RUNTIME_UARTS=0
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_NULL_TTY=y
CONFIG_VIRTIO_CONSOLE=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_CCTRNG=y
# CONFIG_DEVMEM is not set
# CONFIG_DEVPORT is not set
CONFIG_POWER_RESET_SYSCON=y
# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
CONFIG_WATCHDOG_CORE=y
# CONFIG_HID is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_EDAC=y
CONFIG_RTC_CLASS=y
# CONFIG_RTC_NVMEM is not set
CONFIG_RTC_DRV_PL030=y
CONFIG_RTC_DRV_PL031=y
CONFIG_DMABUF_HEAPS=y
CONFIG_DMABUF_SYSFS_STATS=y
CONFIG_UIO=y
CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_BALLOON=y
CONFIG_STAGING=y
CONFIG_HWSPINLOCK=y
CONFIG_EXT4_FS=y
# CONFIG_EXT4_USE_FOR_EXT2 is not set
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
# CONFIG_DNOTIFY is not set
CONFIG_FUSE_FS=y
CONFIG_TMPFS=y
CONFIG_EROFS_FS=y
# CONFIG_NETWORK_FILESYSTEMS is not set
CONFIG_UNICODE=y
CONFIG_SECURITY=y
CONFIG_SECURITYFS=y
CONFIG_SECURITY_NETWORK=y
CONFIG_HARDENED_USERCOPY=y
CONFIG_STATIC_USERMODEHELPER=y
CONFIG_STATIC_USERMODEHELPER_PATH=""
CONFIG_SECURITY_SELINUX=y
CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y
CONFIG_CRYPTO_DES=y
CONFIG_CRYPTO_ADIANTUM=y
CONFIG_CRYPTO_HCTR2=y
CONFIG_CRYPTO_CHACHA20POLY1305=y
CONFIG_CRYPTO_BLAKE2B=y
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_SHA1=y
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_LZO=y
CONFIG_CRYPTO_LZ4=y
CONFIG_CRYPTO_ZSTD=y
CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_TRACE_MMIO_ACCESS=y
CONFIG_XZ_DEC=y
CONFIG_DMA_RESTRICTED_POOL=y
CONFIG_PRINTK_TIME=y
CONFIG_PRINTK_CALLER=y
CONFIG_DYNAMIC_DEBUG_CORE=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_INFO_DWARF5=y
CONFIG_DEBUG_INFO_REDUCED=y
CONFIG_DEBUG_INFO_COMPRESSED=y
CONFIG_HEADERS_INSTALL=y
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
CONFIG_UBSAN=y
CONFIG_UBSAN_TRAP=y
CONFIG_UBSAN_LOCAL_BOUNDS=y
# CONFIG_UBSAN_SHIFT is not set
# CONFIG_UBSAN_BOOL is not set
# CONFIG_UBSAN_ENUM is not set
CONFIG_PAGE_OWNER=y
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_KASAN=y
CONFIG_KASAN_HW_TAGS=y
CONFIG_PANIC_ON_OOPS=y
CONFIG_PANIC_TIMEOUT=-1
CONFIG_SOFTLOCKUP_DETECTOR=y
CONFIG_WQ_WATCHDOG=y
CONFIG_SCHEDSTATS=y
# CONFIG_DEBUG_PREEMPT is not set
CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_HIST_TRIGGERS=y
CONFIG_PID_IN_CONTEXTIDR=y
# CONFIG_RUNTIME_TESTING_MENU is not set

View File

@ -0,0 +1,358 @@
# CONFIG_MODULE_SIG_ALL is not set
CONFIG_PWRSEQ_SIMPLE=m
CONFIG_AP6XXX=m
CONFIG_ARCH_ROCKCHIP=y
CONFIG_ARM_ROCKCHIP_BUS_DEVFREQ=m
CONFIG_ARM_ROCKCHIP_CPUFREQ=m
CONFIG_ARM_ROCKCHIP_DMC_DEVFREQ=m
CONFIG_BACKLIGHT_PWM=m
CONFIG_BATTERY_CW2015=m
CONFIG_BATTERY_CW2017=m
CONFIG_BATTERY_CW221X=m
CONFIG_BATTERY_RK817=m
CONFIG_BATTERY_RK818=m
CONFIG_BMA2XX_ACC=m
CONFIG_CHARGER_BQ25700=m
CONFIG_CHARGER_BQ25890=m
CONFIG_CHARGER_RK817=m
CONFIG_CHARGER_RK818=m
CONFIG_CHARGER_SC89890=m
CONFIG_CHARGER_SGM41542=m
CONFIG_CHR_DEV_SG=m
CONFIG_COMMON_CLK_PWM=m
CONFIG_COMMON_CLK_RK808=m
CONFIG_COMMON_CLK_ROCKCHIP=m
CONFIG_COMMON_CLK_SCMI=m
CONFIG_COMPASS_AK8963=m
CONFIG_COMPASS_AK8975=m
CONFIG_COMPASS_DEVICE=m
CONFIG_CPUFREQ_DT=m
CONFIG_CPU_FREQ_GOV_ONDEMAND=m
CONFIG_CPU_FREQ_GOV_USERSPACE=m
CONFIG_CPU_PX30=y
CONFIG_CPU_RK3399=y
CONFIG_CPU_RK3562=y
CONFIG_CPU_RK3568=y
CONFIG_CPU_RK3588=y
CONFIG_CRYPTO_AES_ARM64_CE_CCM=m
CONFIG_CRYPTO_DEV_ROCKCHIP=m
CONFIG_CRYPTO_DEV_ROCKCHIP_DEV=m
CONFIG_CRYPTO_SHA1_ARM64_CE=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_DEVFREQ_EVENT_ROCKCHIP_NOCP=m
CONFIG_DMABUF_HEAPS_CMA=m
CONFIG_DMABUF_HEAPS_SYSTEM=m
CONFIG_DRAGONRISE_FF=y
CONFIG_DRM_DISPLAY_CONNECTOR=m
CONFIG_DRM_DW_HDMI_CEC=m
CONFIG_DRM_DW_HDMI_I2S_AUDIO=m
CONFIG_DRM_MAXIM_MAX96745=m
CONFIG_DRM_MAXIM_MAX96755F=m
CONFIG_DRM_PANEL_SIMPLE=m
CONFIG_DRM_RK1000_TVE=m
CONFIG_DRM_RK630_TVE=m
CONFIG_DRM_ROCKCHIP=m
CONFIG_DRM_ROCKCHIP_RK618=m
CONFIG_DRM_ROCKCHIP_RK628=m
CONFIG_DRM_ROHM_BU18XL82=m
CONFIG_DRM_SII902X=m
CONFIG_DTC_SYMBOLS=y
# CONFIG_DWMAC_GENERIC is not set
# CONFIG_DWMAC_IPQ806X is not set
# CONFIG_DWMAC_QCOM_ETHQOS is not set
# CONFIG_DWMAC_SUN8I is not set
# CONFIG_DWMAC_SUNXI is not set
CONFIG_DW_WATCHDOG=m
CONFIG_FIQ_DEBUGGER=m
CONFIG_FIQ_DEBUGGER_CONSOLE=y
CONFIG_FIQ_DEBUGGER_CONSOLE_DEFAULT_ENABLE=y
CONFIG_FIQ_DEBUGGER_NO_SLEEP=y
CONFIG_FIQ_DEBUGGER_TRUST_ZONE=y
CONFIG_GPIO_ROCKCHIP=m
CONFIG_GREENASIA_FF=y
CONFIG_GSENSOR_DEVICE=m
CONFIG_GS_DA223=m
CONFIG_GS_KXTJ9=m
CONFIG_GS_LIS3DH=m
CONFIG_GS_LSM303D=m
CONFIG_GS_MC3230=m
CONFIG_GS_MMA7660=m
CONFIG_GS_MMA8452=m
CONFIG_GS_MXC6655XA=m
CONFIG_GS_SC7660=m
CONFIG_GS_SC7A20=m
CONFIG_GS_SC7A30=m
CONFIG_GYROSCOPE_DEVICE=m
CONFIG_GYRO_EWTSA=m
CONFIG_GYRO_L3G20D=m
CONFIG_GYRO_L3G4200D=m
CONFIG_GYRO_LSM330=m
CONFIG_GYRO_MPU6500=m
CONFIG_GYRO_MPU6880=m
CONFIG_HALL_DEVICE=m
CONFIG_HID_A4TECH=m
CONFIG_HID_ACRUX=m
CONFIG_HID_ACRUX_FF=y
CONFIG_HID_ALPS=m
CONFIG_HID_APPLEIR=m
CONFIG_HID_AUREAL=m
CONFIG_HID_BELKIN=m
CONFIG_HID_CHERRY=m
CONFIG_HID_CHICONY=m
CONFIG_HID_CYPRESS=m
CONFIG_HID_DRAGONRISE=m
CONFIG_HID_EMS_FF=m
CONFIG_HID_EZKEY=m
CONFIG_HID_GREENASIA=m
CONFIG_HID_GYRATION=m
CONFIG_HID_HOLTEK=m
CONFIG_HID_ICADE=m
CONFIG_HID_KENSINGTON=m
CONFIG_HID_KEYTOUCH=m
CONFIG_HID_KYE=m
CONFIG_HID_LCPOWER=m
CONFIG_HID_LENOVO=m
CONFIG_HID_MONTEREY=m
CONFIG_HID_NTRIG=m
CONFIG_HID_ORTEK=m
CONFIG_HID_PANTHERLORD=m
CONFIG_HID_PETALYNX=m
CONFIG_HID_PRIMAX=m
CONFIG_HID_SAITEK=m
CONFIG_HID_SAMSUNG=m
CONFIG_HID_SMARTJOYPLUS=m
CONFIG_HID_SPEEDLINK=m
CONFIG_HID_STEELSERIES=m
CONFIG_HID_SUNPLUS=m
CONFIG_HID_THINGM=m
CONFIG_HID_THRUSTMASTER=m
CONFIG_HID_TIVO=m
CONFIG_HID_TOPSEED=m
CONFIG_HID_TWINHAN=m
CONFIG_HID_WALTOP=m
CONFIG_HID_ZEROPLUS=m
CONFIG_HID_ZYDACRON=m
CONFIG_HS_MH248=m
CONFIG_HW_RANDOM_ROCKCHIP=m
CONFIG_I2C_CHARDEV=m
CONFIG_I2C_GPIO=m
CONFIG_I2C_HID_OF=m
CONFIG_I2C_RK3X=m
CONFIG_IEP=m
CONFIG_IIO_BUFFER_CB=m
CONFIG_INPUT_RK805_PWRKEY=m
CONFIG_KEYBOARD_ADC=m
CONFIG_LEDS_GPIO=m
CONFIG_LEDS_RGB13H=m
CONFIG_LEDS_TRIGGER_BACKLIGHT=m
CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
CONFIG_LEDS_TRIGGER_HEARTBEAT=m
CONFIG_LIGHT_DEVICE=m
CONFIG_LSM330_ACC=m
CONFIG_LS_CM3217=m
CONFIG_LS_CM3218=m
CONFIG_LS_STK3410=m
CONFIG_LS_UCS14620=m
CONFIG_MALI_BIFROST=m
CONFIG_MALI_BIFROST_DEBUG=y
CONFIG_MALI_BIFROST_EXPERT=y
CONFIG_MALI_CSF_SUPPORT=y
CONFIG_MALI_PLATFORM_NAME="rk"
CONFIG_MALI_PWRSOFT_765=y
CONFIG_MFD_RK618=m
CONFIG_MFD_RK628=m
CONFIG_MFD_RK630_I2C=m
CONFIG_MFD_RK806_SPI=m
CONFIG_MFD_RK808=m
CONFIG_MMC_DW=m
CONFIG_MMC_DW_ROCKCHIP=m
CONFIG_MMC_SDHCI_OF_ARASAN=m
CONFIG_MMC_SDHCI_OF_DWCMSHC=m
CONFIG_MPU6500_ACC=m
CONFIG_MPU6880_ACC=m
CONFIG_NVMEM_ROCKCHIP_EFUSE=m
CONFIG_NVMEM_ROCKCHIP_OTP=m
CONFIG_OPTEE=m
CONFIG_PANTHERLORD_FF=y
CONFIG_PCIEASPM_EXT=m
CONFIG_PCIE_DW_ROCKCHIP=m
CONFIG_PCIE_ROCKCHIP_HOST=m
CONFIG_PHY_ROCKCHIP_CSI2_DPHY=m
CONFIG_PHY_ROCKCHIP_DP=m
CONFIG_PHY_ROCKCHIP_EMMC=m
CONFIG_PHY_ROCKCHIP_INNO_DSIDPHY=m
CONFIG_PHY_ROCKCHIP_INNO_HDMI=m
CONFIG_PHY_ROCKCHIP_INNO_USB2=m
CONFIG_PHY_ROCKCHIP_INNO_USB3=m
CONFIG_PHY_ROCKCHIP_NANENG_COMBO_PHY=m
CONFIG_PHY_ROCKCHIP_NANENG_EDP=m
CONFIG_PHY_ROCKCHIP_PCIE=m
CONFIG_PHY_ROCKCHIP_SAMSUNG_DCPHY=m
CONFIG_PHY_ROCKCHIP_SAMSUNG_HDPTX=m
CONFIG_PHY_ROCKCHIP_SAMSUNG_HDPTX_HDMI=m
CONFIG_PHY_ROCKCHIP_SNPS_PCIE3=m
CONFIG_PHY_ROCKCHIP_TYPEC=m
CONFIG_PHY_ROCKCHIP_USB=m
CONFIG_PHY_ROCKCHIP_USBDP=m
CONFIG_PINCTRL_RK805=m
CONFIG_PINCTRL_RK806=m
CONFIG_PINCTRL_ROCKCHIP=m
CONFIG_PL330_DMA=m
CONFIG_PROXIMITY_DEVICE=m
CONFIG_PS_STK3410=m
CONFIG_PS_UCS14620=m
CONFIG_PWM_ROCKCHIP=m
CONFIG_REGULATOR_ACT8865=m
CONFIG_REGULATOR_FAN53555=m
CONFIG_REGULATOR_GPIO=m
CONFIG_REGULATOR_LP8752=m
CONFIG_REGULATOR_MP8865=m
CONFIG_REGULATOR_PWM=m
CONFIG_REGULATOR_RK806=m
CONFIG_REGULATOR_RK808=m
CONFIG_REGULATOR_RK860X=m
CONFIG_REGULATOR_TPS65132=m
CONFIG_REGULATOR_WL2868C=m
CONFIG_REGULATOR_XZ3216=m
CONFIG_RFKILL_RK=m
CONFIG_RK_CONSOLE_THREAD=y
CONFIG_RK_HEADSET=m
CONFIG_ROCKCHIP_ANALOGIX_DP=y
CONFIG_ROCKCHIP_CDN_DP=y
CONFIG_ROCKCHIP_CPUINFO=m
CONFIG_ROCKCHIP_DEBUG=m
CONFIG_ROCKCHIP_DW_DP=y
CONFIG_ROCKCHIP_DW_HDCP2=m
CONFIG_ROCKCHIP_DW_HDMI=y
CONFIG_ROCKCHIP_DW_MIPI_DSI=y
CONFIG_ROCKCHIP_GRF=m
CONFIG_ROCKCHIP_INNO_HDMI=y
CONFIG_ROCKCHIP_IODOMAIN=m
CONFIG_ROCKCHIP_IOMMU=m
CONFIG_ROCKCHIP_IPA=m
CONFIG_ROCKCHIP_LVDS=y
CONFIG_ROCKCHIP_MPP_AV1DEC=y
CONFIG_ROCKCHIP_MPP_IEP2=y
CONFIG_ROCKCHIP_MPP_JPGDEC=y
CONFIG_ROCKCHIP_MPP_RKVDEC=y
CONFIG_ROCKCHIP_MPP_RKVDEC2=y
CONFIG_ROCKCHIP_MPP_RKVENC=y
CONFIG_ROCKCHIP_MPP_RKVENC2=y
CONFIG_ROCKCHIP_MPP_SERVICE=m
CONFIG_ROCKCHIP_MPP_VDPU1=y
CONFIG_ROCKCHIP_MPP_VDPU2=y
CONFIG_ROCKCHIP_MPP_VEPU1=y
CONFIG_ROCKCHIP_MPP_VEPU2=y
CONFIG_ROCKCHIP_MULTI_RGA=m
CONFIG_ROCKCHIP_OPP=m
CONFIG_ROCKCHIP_PHY=m
CONFIG_ROCKCHIP_PM_DOMAINS=m
CONFIG_ROCKCHIP_PVTM=m
CONFIG_ROCKCHIP_RAM_VENDOR_STORAGE=m
CONFIG_ROCKCHIP_REMOTECTL=m
CONFIG_ROCKCHIP_REMOTECTL_PWM=m
CONFIG_ROCKCHIP_RGB=y
CONFIG_ROCKCHIP_RKNPU=m
CONFIG_ROCKCHIP_SARADC=m
CONFIG_ROCKCHIP_SIP=m
CONFIG_ROCKCHIP_SUSPEND_MODE=m
CONFIG_ROCKCHIP_SYSTEM_MONITOR=m
CONFIG_ROCKCHIP_THERMAL=m
CONFIG_ROCKCHIP_TIMER=m
CONFIG_ROCKCHIP_VENDOR_STORAGE=m
CONFIG_ROCKCHIP_VENDOR_STORAGE_UPDATE_LOADER=y
CONFIG_RTC_DRV_HYM8563=m
CONFIG_RTC_DRV_RK808=m
CONFIG_SENSOR_DEVICE=m
CONFIG_SMARTJOYPLUS_FF=y
CONFIG_SND_SIMPLE_CARD=m
CONFIG_SND_SOC_AW883XX=m
CONFIG_SND_SOC_BT_SCO=m
CONFIG_SND_SOC_CX2072X=m
CONFIG_SND_SOC_DUMMY_CODEC=m
CONFIG_SND_SOC_ES7202=m
CONFIG_SND_SOC_ES7210=m
CONFIG_SND_SOC_ES7243E=m
CONFIG_SND_SOC_ES8311=m
CONFIG_SND_SOC_ES8316=m
CONFIG_SND_SOC_ES8323=m
CONFIG_SND_SOC_ES8326=m
CONFIG_SND_SOC_ES8396=m
CONFIG_SND_SOC_RK3328=m
CONFIG_SND_SOC_RK817=m
CONFIG_SND_SOC_RK_CODEC_DIGITAL=m
CONFIG_SND_SOC_RK_DSM=m
CONFIG_SND_SOC_ROCKCHIP=m
CONFIG_SND_SOC_ROCKCHIP_HDMI=m
CONFIG_SND_SOC_ROCKCHIP_I2S=m
CONFIG_SND_SOC_ROCKCHIP_I2S_TDM=m
CONFIG_SND_SOC_ROCKCHIP_MULTICODECS=m
CONFIG_SND_SOC_ROCKCHIP_PDM=m
CONFIG_SND_SOC_ROCKCHIP_SAI=m
CONFIG_SND_SOC_ROCKCHIP_SPDIF=m
CONFIG_SND_SOC_ROCKCHIP_SPDIFRX=m
CONFIG_SND_SOC_RT5640=m
CONFIG_SND_SOC_SPDIF=m
CONFIG_SPI_ROCKCHIP=m
CONFIG_SPI_SPIDEV=m
CONFIG_STMMAC_ETH=m
CONFIG_SW_SYNC=m
CONFIG_SYSCON_REBOOT_MODE=m
CONFIG_TEE=m
CONFIG_TEST_POWER=m
CONFIG_TOUCHSCREEN_ELAN5515=m
CONFIG_TOUCHSCREEN_GSL3673=m
CONFIG_TOUCHSCREEN_GSLX680_PAD=m
CONFIG_TOUCHSCREEN_GT1X=m
CONFIG_TYPEC_FUSB302=m
CONFIG_TYPEC_HUSB311=m
CONFIG_UCS12CM0=m
CONFIG_USB_DWC2=m
CONFIG_USB_NET_CDC_MBIM=m
CONFIG_USB_NET_DM9601=m
CONFIG_USB_NET_GL620A=m
CONFIG_USB_NET_KALMIA=m
CONFIG_USB_NET_MCS7830=m
CONFIG_USB_NET_PLUSB=m
CONFIG_USB_NET_SMSC75XX=m
CONFIG_USB_NET_SMSC95XX=m
CONFIG_USB_OHCI_HCD=m
# CONFIG_USB_OHCI_HCD_PCI is not set
CONFIG_USB_OHCI_HCD_PLATFORM=m
CONFIG_USB_PRINTER=m
CONFIG_USB_TRANCEVIBRATOR=m
CONFIG_VIDEO_AW36518=m
CONFIG_VIDEO_AW8601=m
CONFIG_VIDEO_CN3927V=m
CONFIG_VIDEO_DW9714=m
CONFIG_VIDEO_FP5510=m
CONFIG_VIDEO_GC2145=m
CONFIG_VIDEO_GC2385=m
CONFIG_VIDEO_GC4C33=m
CONFIG_VIDEO_GC8034=m
CONFIG_VIDEO_IMX415=m
CONFIG_VIDEO_LT6911UXC=m
CONFIG_VIDEO_LT7911D=m
CONFIG_VIDEO_NVP6188=m
CONFIG_VIDEO_OV02B10=m
CONFIG_VIDEO_OV13850=m
CONFIG_VIDEO_OV13855=m
CONFIG_VIDEO_OV50C40=m
CONFIG_VIDEO_OV5695=m
CONFIG_VIDEO_OV8858=m
CONFIG_VIDEO_RK628_BT1120=m
CONFIG_VIDEO_RK628_CSI=m
CONFIG_VIDEO_RK_IRCUT=m
CONFIG_VIDEO_ROCKCHIP_CIF=m
CONFIG_VIDEO_ROCKCHIP_HDMIRX=m
CONFIG_VIDEO_ROCKCHIP_ISP=m
CONFIG_VIDEO_ROCKCHIP_ISPP=m
CONFIG_VIDEO_ROCKCHIP_RKISP1=m
CONFIG_VIDEO_S5K3L6XX=m
CONFIG_VIDEO_S5KJN1=m
CONFIG_VIDEO_SGM3784=m
CONFIG_VIDEO_THCV244=m
CONFIG_VL6180=m
CONFIG_WIFI_BUILD_MODULE=y
CONFIG_WL_ROCKCHIP=m
# CONFIG_USB_DUMMY_HCD is not set

View File

@ -259,6 +259,8 @@ extern unsigned long kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[];
DECLARE_KVM_NVHE_SYM(__per_cpu_start);
DECLARE_KVM_NVHE_SYM(__per_cpu_end);
extern unsigned long kvm_nvhe_sym(kvm_arm_hyp_host_fp_state)[];
DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
#define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)

View File

@ -414,10 +414,4 @@ static inline size_t pkvm_host_fp_state_size(void)
return sizeof(struct user_fpsimd_state);
}
static inline unsigned long hyp_host_fp_pages(unsigned long nr_cpus)
{
return PAGE_ALIGN(size_mul(nr_cpus, pkvm_host_fp_state_size())) >>
PAGE_SHIFT;
}
#endif /* __ARM64_KVM_PKVM_H__ */

View File

@ -1642,6 +1642,11 @@ static unsigned long nvhe_percpu_order(void)
return size ? get_order(size) : 0;
}
static inline size_t pkvm_host_fp_state_order(void)
{
return get_order(pkvm_host_fp_state_size());
}
/* A lookup table holding the hypervisor VA for each vector slot */
static void *hyp_spectre_vector_selector[BP_HARDEN_EL2_SLOTS];
@ -2006,6 +2011,8 @@ static void teardown_hyp_mode(void)
for_each_possible_cpu(cpu) {
free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order());
free_pages(kvm_nvhe_sym(kvm_arm_hyp_host_fp_state)[cpu],
pkvm_host_fp_state_order());
}
}
@ -2092,6 +2099,48 @@ static int kvm_hyp_init_protection(u32 hyp_va_bits)
return 0;
}
static int init_pkvm_host_fp_state(void)
{
int cpu;
if (!is_protected_kvm_enabled())
return 0;
/* Allocate pages for protected-mode host-fp state. */
for_each_possible_cpu(cpu) {
struct page *page;
unsigned long addr;
page = alloc_pages(GFP_KERNEL, pkvm_host_fp_state_order());
if (!page)
return -ENOMEM;
addr = (unsigned long)page_address(page);
kvm_nvhe_sym(kvm_arm_hyp_host_fp_state)[cpu] = addr;
}
/*
* Don't map the pages in hyp since these are only used in protected
* mode, which will (re)create its own mapping when initialized.
*/
return 0;
}
/*
* Finalizes the initialization of hyp mode, once everything else is initialized
* and the initialziation process cannot fail.
*/
static void finalize_init_hyp_mode(void)
{
int cpu;
for_each_possible_cpu(cpu) {
kvm_nvhe_sym(kvm_arm_hyp_host_fp_state)[cpu] =
kern_hyp_va(kvm_nvhe_sym(kvm_arm_hyp_host_fp_state)[cpu]);
}
}
/**
* Inits Hyp-mode on all online CPUs
*/
@ -2259,6 +2308,10 @@ static int init_hyp_mode(void)
cpu_prepare_hyp_mode(cpu);
}
err = init_pkvm_host_fp_state();
if (err)
goto out_err;
kvm_hyp_init_symbols();
/* TODO: Real .h interface */
@ -2417,6 +2470,13 @@ int kvm_arch_init(void *opaque)
kvm_info("Hyp mode initialized successfully\n");
}
/*
* This should be called after initialization is done and failure isn't
* possible anymore.
*/
if (!in_hyp_mode)
finalize_init_hyp_mode();
return 0;
out_hyp:

View File

@ -82,8 +82,6 @@ struct pkvm_hyp_vm {
struct pkvm_hyp_vcpu *vcpus[];
};
extern void *host_fp_state;
static inline struct pkvm_hyp_vm *
pkvm_hyp_vcpu_to_hyp_vm(struct pkvm_hyp_vcpu *hyp_vcpu)
{
@ -107,7 +105,6 @@ extern phys_addr_t pvmfw_base;
extern phys_addr_t pvmfw_size;
void pkvm_hyp_vm_table_init(void *tbl);
void pkvm_hyp_host_fp_init(void *host_fp);
int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva,
unsigned long pgd_hva, unsigned long last_ran_hva);

View File

@ -1383,11 +1383,15 @@ static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
handled = kvm_host_ffa_handler(host_ctxt);
if (!handled && smp_load_acquire(&default_host_smc_handler))
handled = default_host_smc_handler(host_ctxt);
if (!handled)
__kvm_hyp_host_forward_smc(host_ctxt);
trace_host_smc(func_id, !handled);
if (!handled) {
trace_hyp_exit();
__kvm_hyp_host_forward_smc(host_ctxt);
trace_hyp_enter();
}
/* SMC was trapped, move ELR past the current PC. */
kvm_skip_host_instr();
}

View File

@ -41,17 +41,15 @@ static DEFINE_PER_CPU(struct pkvm_hyp_vcpu *, loaded_hyp_vcpu);
*
* Only valid when (fp_state == FP_STATE_GUEST_OWNED) in the hyp vCPU structure.
*/
void *host_fp_state;
unsigned long __ro_after_init kvm_arm_hyp_host_fp_state[NR_CPUS];
static void *__get_host_fpsimd_bytes(void)
{
void *state = host_fp_state +
size_mul(pkvm_host_fp_state_size(), hyp_smp_processor_id());
if (state < host_fp_state)
return NULL;
return state;
/*
* The addresses in this array have been converted to hyp addresses
* in finalize_init_hyp_mode().
*/
return (void *)kvm_arm_hyp_host_fp_state[hyp_smp_processor_id()];
}
struct user_fpsimd_state *get_host_fpsimd_state(struct kvm_vcpu *vcpu)
@ -295,12 +293,6 @@ void pkvm_hyp_vm_table_init(void *tbl)
vm_table = tbl;
}
void pkvm_hyp_host_fp_init(void *host_fp)
{
WARN_ON(host_fp_state);
host_fp_state = host_fp;
}
/*
* Return the hyp vm structure corresponding to the handle.
*/

View File

@ -34,7 +34,6 @@ static void *vm_table_base;
static void *hyp_pgt_base;
static void *host_s2_pgt_base;
static void *ffa_proxy_pages;
static void *hyp_host_fp_base;
static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
static struct hyp_pool hpool;
@ -69,10 +68,21 @@ static int divide_memory_pool(void *virt, unsigned long size)
if (!ffa_proxy_pages)
return -ENOMEM;
nr_pages = hyp_host_fp_pages(hyp_nr_cpus);
hyp_host_fp_base = hyp_early_alloc_contig(nr_pages);
if (!hyp_host_fp_base)
return -ENOMEM;
return 0;
}
static int create_hyp_host_fp_mappings(void)
{
void *start, *end;
int ret, i;
for (i = 0; i < hyp_nr_cpus; i++) {
start = (void *)kern_hyp_va(kvm_arm_hyp_host_fp_state[i]);
end = start + PAGE_ALIGN(pkvm_host_fp_state_size());
ret = pkvm_create_mappings(start, end, PAGE_HYP);
if (ret)
return ret;
}
return 0;
}
@ -164,6 +174,8 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
params->stack_hyp_va = hyp_addr + (2 * PAGE_SIZE);
}
create_hyp_host_fp_mappings();
/*
* Map the host sections RO in the hypervisor, but transfer the
* ownership from the host to the hypervisor itself to make sure they
@ -405,7 +417,6 @@ void __noreturn __pkvm_init_finalise(void)
goto out;
pkvm_hyp_vm_table_init(vm_table_base);
pkvm_hyp_host_fp_init(hyp_host_fp_base);
out:
/*
* We tail-called to here from handle___pkvm_init() and will not return,

View File

@ -701,7 +701,7 @@ static int get_user_mapping_size(struct kvm *kvm, u64 addr)
static bool stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot prot)
{
return true;
return false;
}
static bool stage2_pte_is_counted(kvm_pte_t pte, u32 level)

View File

@ -173,7 +173,6 @@ void __init kvm_hyp_reserve(void)
hyp_mem_pages += hyp_vm_table_pages();
hyp_mem_pages += hyp_vmemmap_pages(STRUCT_HYP_PAGE_SIZE);
hyp_mem_pages += hyp_ffa_proxy_pages();
hyp_mem_pages += hyp_host_fp_pages(num_possible_cpus());
/*
* Try to allocate a PMD-aligned region to reduce TLB pressure once
@ -504,10 +503,6 @@ static int __init finalize_pkvm(void)
if (pkvm_load_early_modules())
pkvm_firmware_rmem_clear();
/* If no DMA protection. */
if (!pkvm_iommu_finalized())
pkvm_firmware_rmem_clear();
/*
* Exclude HYP sections from kmemleak so that they don't get peeked
* at, which would end badly once inaccessible.
@ -516,6 +511,12 @@ static int __init finalize_pkvm(void)
kmemleak_free_part(__hyp_data_start, __hyp_data_end - __hyp_data_start);
kmemleak_free_part_phys(hyp_mem_base, hyp_mem_size);
flush_deferred_probe_now();
/* If no DMA protection. */
if (!pkvm_iommu_finalized())
pkvm_firmware_rmem_clear();
ret = pkvm_drop_host_privileges();
if (ret) {
pr_err("Failed to de-privilege the host kernel: %d\n", ret);

View File

@ -0,0 +1,87 @@
# CONFIG_WERROR is not set
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_KERNEL_LZ4=y
# CONFIG_SWAP is not set
# CONFIG_CROSS_MEMORY_ATTACH is not set
CONFIG_NO_HZ_IDLE=y
CONFIG_PREEMPT=y
CONFIG_LOG_BUF_SHIFT=12
CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=12
# CONFIG_UTS_NS is not set
# CONFIG_TIME_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_GZIP is not set
# CONFIG_RD_BZIP2 is not set
# CONFIG_RD_LZMA is not set
# CONFIG_RD_XZ is not set
# CONFIG_RD_LZO is not set
# CONFIG_RD_LZ4 is not set
# CONFIG_RD_ZSTD is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_COMPAT_BRK is not set
# CONFIG_SLAB_MERGE_DEFAULT is not set
# CONFIG_RETPOLINE is not set
# CONFIG_X86_EXTENDED_PLATFORM is not set
# CONFIG_SCHED_OMIT_FRAME_POINTER is not set
# CONFIG_X86_MCE is not set
# CONFIG_PERF_EVENTS_AMD_UNCORE is not set
# CONFIG_X86_IOPL_IOPERM is not set
# CONFIG_MICROCODE is not set
# CONFIG_X86_5LEVEL is not set
# CONFIG_MTRR_SANITIZER is not set
# CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS is not set
CONFIG_CRASH_DUMP=y
CONFIG_PHYSICAL_START=0x100000
# CONFIG_RANDOMIZE_BASE is not set
CONFIG_LEGACY_VSYSCALL_NONE=y
# CONFIG_SUSPEND is not set
# CONFIG_ACPI is not set
# CONFIG_VIRTUALIZATION is not set
CONFIG_JUMP_LABEL=y
# CONFIG_SECCOMP is not set
# CONFIG_STACKPROTECTOR is not set
# CONFIG_VMAP_STACK is not set
# CONFIG_MQ_IOSCHED_DEADLINE is not set
# CONFIG_MQ_IOSCHED_KYBER is not set
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
# CONFIG_BINFMT_SCRIPT is not set
# CONFIG_SPARSEMEM_VMEMMAP is not set
# CONFIG_COMPACTION is not set
CONFIG_PCI=y
CONFIG_PCI_ENDPOINT=y
CONFIG_DEVTMPFS=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_DMIID is not set
# CONFIG_BLK_DEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO_I8042 is not set
# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=1
CONFIG_SERIAL_8250_RUNTIME_UARTS=1
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_DEV_BUS=y
# CONFIG_SERIAL_DEV_CTRL_TTYPORT is not set
CONFIG_VIRTIO_CONSOLE=y
# CONFIG_HW_RANDOM is not set
# CONFIG_DEVMEM is not set
# CONFIG_HWMON is not set
# CONFIG_HID is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_VIRTIO_PCI=y
# CONFIG_VIRTIO_PCI_LEGACY is not set
# CONFIG_VHOST_MENU is not set
# CONFIG_X86_PLATFORM_DEVICES is not set
# CONFIG_IOMMU_SUPPORT is not set
# CONFIG_DNOTIFY is not set
# CONFIG_INOTIFY_USER is not set
CONFIG_TMPFS=y
# CONFIG_MISC_FILESYSTEMS is not set
# CONFIG_PAGE_TABLE_ISOLATION is not set
# CONFIG_SYMBOLIC_ERRNAME is not set
# CONFIG_X86_VERBOSE_BOOTUP is not set
# CONFIG_RUNTIME_TESTING_MENU is not set

View File

@ -90,6 +90,7 @@ CONFIG_MODULE_SIG_PROTECT=y
CONFIG_BLK_DEV_ZONED=y
CONFIG_BLK_DEV_THROTTLING=y
CONFIG_BLK_CGROUP_IOCOST=y
CONFIG_BLK_CGROUP_IOPRIO=y
CONFIG_BLK_INLINE_ENCRYPTION=y
CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
CONFIG_IOSCHED_BFQ=y

View File

@ -0,0 +1,290 @@
CONFIG_KERNEL_LZ4=y
CONFIG_AUDIT=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_PREEMPT=y
CONFIG_IRQ_TIME_ACCOUNTING=y
CONFIG_TASKSTATS=y
CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_PSI=y
CONFIG_RCU_EXPERT=y
CONFIG_RCU_BOOST=y
CONFIG_RCU_NOCB_CPU=y
CONFIG_UCLAMP_TASK=y
CONFIG_UCLAMP_BUCKETS_COUNT=20
CONFIG_CGROUPS=y
CONFIG_MEMCG=y
CONFIG_BLK_CGROUP=y
CONFIG_CGROUP_SCHED=y
CONFIG_UCLAMP_TASK_GROUP=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
# CONFIG_UTS_NS is not set
# CONFIG_TIME_NS is not set
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
# CONFIG_RD_BZIP2 is not set
# CONFIG_RD_LZMA is not set
# CONFIG_RD_XZ is not set
# CONFIG_RD_LZO is not set
CONFIG_BOOT_CONFIG=y
CONFIG_PROFILING=y
CONFIG_SMP=y
CONFIG_X86_X2APIC=y
CONFIG_HYPERVISOR_GUEST=y
CONFIG_PARAVIRT=y
CONFIG_PARAVIRT_TIME_ACCOUNTING=y
CONFIG_NR_CPUS=32
# CONFIG_X86_MCE is not set
CONFIG_EFI=y
CONFIG_KEXEC_FILE=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="stack_depot_disable=on cgroup_disable=pressure ioremap_guard panic=-1 bootconfig acpi=noirq"
CONFIG_PM_WAKELOCKS=y
CONFIG_PM_WAKELOCKS_LIMIT=0
# CONFIG_PM_WAKELOCKS_GC is not set
CONFIG_CPU_FREQ_STAT=y
CONFIG_CPU_FREQ_TIMES=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
CONFIG_JUMP_LABEL=y
CONFIG_MODULES=y
CONFIG_BLK_DEV_ZONED=y
CONFIG_BLK_CGROUP_IOCOST=y
CONFIG_BLK_INLINE_ENCRYPTION=y
CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
CONFIG_PARTITION_ADVANCED=y
# CONFIG_MSDOS_PARTITION is not set
CONFIG_IOSCHED_BFQ=y
CONFIG_BFQ_GROUP_IOSCHED=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_MISC=y
# CONFIG_SLAB_MERGE_DEFAULT is not set
CONFIG_SLAB_FREELIST_RANDOM=y
CONFIG_SLAB_FREELIST_HARDENED=y
CONFIG_SHUFFLE_PAGE_ALLOCATOR=y
# CONFIG_COMPAT_BRK is not set
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_DEFAULT_MMAP_MIN_ADDR=32768
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y
CONFIG_ANON_VMA_NAME=y
CONFIG_USERFAULTFD=y
CONFIG_LRU_GEN=y
CONFIG_DAMON=y
CONFIG_DAMON_PADDR=y
CONFIG_DAMON_RECLAIM=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_XFRM_USER=y
CONFIG_XFRM_INTERFACE=y
CONFIG_XFRM_MIGRATE=y
CONFIG_XFRM_STATISTICS=y
CONFIG_NET_KEY=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
CONFIG_IP_MULTIPLE_TABLES=y
CONFIG_NET_IPIP=y
CONFIG_NET_IPGRE_DEMUX=y
CONFIG_NET_IPGRE=y
CONFIG_NET_IPVTI=y
CONFIG_INET_ESP=y
CONFIG_INET_UDP_DIAG=y
CONFIG_INET_DIAG_DESTROY=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
CONFIG_IPV6_OPTIMISTIC_DAD=y
CONFIG_INET6_ESP=y
CONFIG_INET6_IPCOMP=y
CONFIG_IPV6_MIP6=y
CONFIG_IPV6_VTI=y
CONFIG_IPV6_GRE=y
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_VSOCKETS=y
CONFIG_VIRTIO_VSOCKETS=y
CONFIG_CGROUP_NET_PRIO=y
# CONFIG_WIRELESS is not set
CONFIG_PCI=y
CONFIG_PCIEPORTBUS=y
CONFIG_PCIEAER=y
CONFIG_PCI_MSI=y
CONFIG_PCI_IOV=y
CONFIG_PCIE_DW_PLAT_EP=y
CONFIG_PCI_ENDPOINT=y
CONFIG_FW_LOADER_USER_HELPER=y
# CONFIG_FW_CACHE is not set
CONFIG_OF=y
CONFIG_ZRAM=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_LOOP_MIN_COUNT=16
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
CONFIG_VIRTIO_BLK=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
CONFIG_DM_INIT=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
CONFIG_DM_VERITY_FEC=y
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_8250=y
# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_SERIAL_8250_PCI is not set
CONFIG_SERIAL_8250_RUNTIME_UARTS=0
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_NULL_TTY=y
CONFIG_VIRTIO_CONSOLE=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_VIRTIO=y
# CONFIG_DEVMEM is not set
# CONFIG_DEVPORT is not set
CONFIG_HPET=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_GENERIC_PLATFORM=y
# CONFIG_HWMON is not set
CONFIG_THERMAL_NETLINK=y
CONFIG_THERMAL_STATISTICS=y
CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=100
CONFIG_THERMAL_WRITABLE_TRIPS=y
CONFIG_THERMAL_GOV_USER_SPACE=y
CONFIG_CPU_THERMAL=y
CONFIG_THERMAL_EMULATION=y
# CONFIG_X86_PKG_TEMP_THERMAL is not set
CONFIG_WATCHDOG=y
CONFIG_WATCHDOG_CORE=y
CONFIG_MFD_SYSCON=y
# CONFIG_HID is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_EDAC=y
CONFIG_RTC_CLASS=y
CONFIG_DMABUF_HEAPS=y
CONFIG_DMABUF_SYSFS_STATS=y
CONFIG_UIO=y
CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_BALLOON=y
CONFIG_STAGING=y
CONFIG_LIBNVDIMM=y
CONFIG_EXT4_FS=y
# CONFIG_EXT4_USE_FOR_EXT2 is not set
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
# CONFIG_DNOTIFY is not set
CONFIG_FUSE_FS=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
# CONFIG_EFIVAR_FS is not set
CONFIG_EROFS_FS=y
# CONFIG_NETWORK_FILESYSTEMS is not set
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_737=y
CONFIG_NLS_CODEPAGE_775=y
CONFIG_NLS_CODEPAGE_850=y
CONFIG_NLS_CODEPAGE_852=y
CONFIG_NLS_CODEPAGE_855=y
CONFIG_NLS_CODEPAGE_857=y
CONFIG_NLS_CODEPAGE_860=y
CONFIG_NLS_CODEPAGE_861=y
CONFIG_NLS_CODEPAGE_862=y
CONFIG_NLS_CODEPAGE_863=y
CONFIG_NLS_CODEPAGE_864=y
CONFIG_NLS_CODEPAGE_865=y
CONFIG_NLS_CODEPAGE_866=y
CONFIG_NLS_CODEPAGE_869=y
CONFIG_NLS_CODEPAGE_936=y
CONFIG_NLS_CODEPAGE_950=y
CONFIG_NLS_CODEPAGE_932=y
CONFIG_NLS_CODEPAGE_949=y
CONFIG_NLS_CODEPAGE_874=y
CONFIG_NLS_ISO8859_8=y
CONFIG_NLS_CODEPAGE_1250=y
CONFIG_NLS_CODEPAGE_1251=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_ISO8859_2=y
CONFIG_NLS_ISO8859_3=y
CONFIG_NLS_ISO8859_4=y
CONFIG_NLS_ISO8859_5=y
CONFIG_NLS_ISO8859_6=y
CONFIG_NLS_ISO8859_7=y
CONFIG_NLS_ISO8859_9=y
CONFIG_NLS_ISO8859_13=y
CONFIG_NLS_ISO8859_14=y
CONFIG_NLS_ISO8859_15=y
CONFIG_NLS_KOI8_R=y
CONFIG_NLS_KOI8_U=y
CONFIG_NLS_MAC_ROMAN=y
CONFIG_NLS_MAC_CELTIC=y
CONFIG_NLS_MAC_CENTEURO=y
CONFIG_NLS_MAC_CROATIAN=y
CONFIG_NLS_MAC_CYRILLIC=y
CONFIG_NLS_MAC_GAELIC=y
CONFIG_NLS_MAC_GREEK=y
CONFIG_NLS_MAC_ICELAND=y
CONFIG_NLS_MAC_INUIT=y
CONFIG_NLS_MAC_ROMANIAN=y
CONFIG_NLS_MAC_TURKISH=y
CONFIG_NLS_UTF8=y
CONFIG_UNICODE=y
CONFIG_SECURITY=y
CONFIG_SECURITYFS=y
CONFIG_SECURITY_NETWORK=y
CONFIG_HARDENED_USERCOPY=y
CONFIG_STATIC_USERMODEHELPER=y
CONFIG_STATIC_USERMODEHELPER_PATH=""
CONFIG_SECURITY_SELINUX=y
CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y
CONFIG_CRYPTO_DES=y
CONFIG_CRYPTO_ADIANTUM=y
CONFIG_CRYPTO_HCTR2=y
CONFIG_CRYPTO_CHACHA20POLY1305=y
CONFIG_CRYPTO_BLAKE2B=y
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_SHA1=y
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_LZO=y
CONFIG_CRYPTO_LZ4=y
CONFIG_CRYPTO_ZSTD=y
CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_CRYPTO_AES_NI_INTEL=y
CONFIG_CRYPTO_POLYVAL_CLMUL_NI=y
CONFIG_CRYPTO_SHA256_SSSE3=y
CONFIG_CRYPTO_SHA512_SSSE3=y
CONFIG_CRC8=y
CONFIG_XZ_DEC=y
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG_CORE=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_INFO_DWARF5=y
CONFIG_DEBUG_INFO_REDUCED=y
CONFIG_DEBUG_INFO_COMPRESSED=y
CONFIG_HEADERS_INSTALL=y
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
CONFIG_UBSAN=y
CONFIG_UBSAN_TRAP=y
CONFIG_UBSAN_LOCAL_BOUNDS=y
# CONFIG_UBSAN_SHIFT is not set
# CONFIG_UBSAN_BOOL is not set
# CONFIG_UBSAN_ENUM is not set
CONFIG_PAGE_OWNER=y
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_KFENCE=y
CONFIG_KFENCE_SAMPLE_INTERVAL=500
CONFIG_KFENCE_NUM_OBJECTS=63
CONFIG_PANIC_ON_OOPS=y
CONFIG_PANIC_TIMEOUT=-1
CONFIG_SOFTLOCKUP_DETECTOR=y
CONFIG_WQ_WATCHDOG=y
CONFIG_SCHEDSTATS=y
CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_HIST_TRIGGERS=y
CONFIG_UNWINDER_FRAME_POINTER=y

View File

@ -23,25 +23,28 @@
/**
* enum prio_policy - I/O priority class policy.
* @POLICY_NO_CHANGE: (default) do not modify the I/O priority class.
* @POLICY_NONE_TO_RT: modify IOPRIO_CLASS_NONE into IOPRIO_CLASS_RT.
* @POLICY_PROMOTE_TO_RT: modify no-IOPRIO_CLASS_RT to IOPRIO_CLASS_RT.
* @POLICY_RESTRICT_TO_BE: modify IOPRIO_CLASS_NONE and IOPRIO_CLASS_RT into
* IOPRIO_CLASS_BE.
* @POLICY_ALL_TO_IDLE: change the I/O priority class into IOPRIO_CLASS_IDLE.
* @POLICY_NONE_TO_RT: an alias for POLICY_PROMOTE_TO_RT.
*
* See also <linux/ioprio.h>.
*/
enum prio_policy {
POLICY_NO_CHANGE = 0,
POLICY_NONE_TO_RT = 1,
POLICY_PROMOTE_TO_RT = 1,
POLICY_RESTRICT_TO_BE = 2,
POLICY_ALL_TO_IDLE = 3,
POLICY_NONE_TO_RT = 4,
};
static const char *policy_name[] = {
[POLICY_NO_CHANGE] = "no-change",
[POLICY_NONE_TO_RT] = "none-to-rt",
[POLICY_PROMOTE_TO_RT] = "promote-to-rt",
[POLICY_RESTRICT_TO_BE] = "restrict-to-be",
[POLICY_ALL_TO_IDLE] = "idle",
[POLICY_NONE_TO_RT] = "none-to-rt",
};
static struct blkcg_policy ioprio_policy;
@ -189,6 +192,20 @@ void blkcg_set_ioprio(struct bio *bio)
if (!blkcg || blkcg->prio_policy == POLICY_NO_CHANGE)
return;
if (blkcg->prio_policy == POLICY_PROMOTE_TO_RT ||
blkcg->prio_policy == POLICY_NONE_TO_RT) {
/*
* For RT threads, the default priority level is 4 because
* task_nice is 0. By promoting non-RT io-priority to RT-class
* and default level 4, those requests that are already
* RT-class but need a higher io-priority can use ioprio_set()
* to achieve this.
*/
if (IOPRIO_PRIO_CLASS(bio->bi_ioprio) != IOPRIO_CLASS_RT)
bio->bi_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_RT, 4);
return;
}
/*
* Except for IOPRIO_CLASS_NONE, higher I/O priority numbers
* correspond to a lower priority. Hence, the max_t() below selects

1
build.config.crashdump Normal file
View File

@ -0,0 +1 @@
DEFCONFIG=crashdump_defconfig

View File

@ -0,0 +1,7 @@
KERNEL_DIR=common
. ${ROOT_DIR}/${KERNEL_DIR}/build.config.common
. ${ROOT_DIR}/${KERNEL_DIR}/build.config.aarch64
. ${ROOT_DIR}/${KERNEL_DIR}/build.config.crashdump
# No modules
IN_KERNEL_MODULES=

View File

@ -0,0 +1,7 @@
KERNEL_DIR=common
. ${ROOT_DIR}/${KERNEL_DIR}/build.config.common
. ${ROOT_DIR}/${KERNEL_DIR}/build.config.x86_64
. ${ROOT_DIR}/${KERNEL_DIR}/build.config.crashdump
# No modules
IN_KERNEL_MODULES=

1
build.config.microdroid Normal file
View File

@ -0,0 +1 @@
DEFCONFIG=microdroid_defconfig

View File

@ -0,0 +1,7 @@
KERNEL_DIR=common
. ${ROOT_DIR}/${KERNEL_DIR}/build.config.common
. ${ROOT_DIR}/${KERNEL_DIR}/build.config.aarch64
. ${ROOT_DIR}/${KERNEL_DIR}/build.config.microdroid
# No modules
IN_KERNEL_MODULES=

View File

@ -0,0 +1,7 @@
KERNEL_DIR=common
. ${ROOT_DIR}/${KERNEL_DIR}/build.config.common
. ${ROOT_DIR}/${KERNEL_DIR}/build.config.x86_64
. ${ROOT_DIR}/${KERNEL_DIR}/build.config.microdroid
# No modules
IN_KERNEL_MODULES=

15
build.config.rockchip Normal file
View File

@ -0,0 +1,15 @@
. ${ROOT_DIR}/${KERNEL_DIR}/build.config.common
. ${ROOT_DIR}/${KERNEL_DIR}/build.config.aarch64
BUILD_INITRAMFS=1
LZ4_RAMDISK=1
DEFCONFIG=rockchip_gki_defconfig
FRAGMENT_CONFIG=${KERNEL_DIR}/arch/arm64/configs/rockchip_gki.fragment
PRE_DEFCONFIG_CMDS="KCONFIG_CONFIG=${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/${DEFCONFIG} ${ROOT_DIR}/${KERNEL_DIR}/scripts/kconfig/merge_config.sh -m -r ${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/gki_defconfig ${ROOT_DIR}/${FRAGMENT_CONFIG}"
POST_DEFCONFIG_CMDS="rm ${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/${DEFCONFIG}"
DTC_INCLUDE=${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/boot/dts/rockchip
FILES="${FILES}
arch/arm64/boot/dts/rockchip/rk3588*.dtb
"

View File

@ -1761,6 +1761,7 @@ static void binder_free_transaction(struct binder_transaction *t)
{
struct binder_proc *target_proc = t->to_proc;
trace_android_vh_free_oem_binder_struct(t);
if (target_proc) {
binder_inner_proc_lock(target_proc);
target_proc->outstanding_txns--;
@ -2945,6 +2946,7 @@ static int binder_proc_transaction(struct binder_transaction *t,
bool pending_async = false;
struct binder_transaction *t_outdated = NULL;
bool skip = false;
bool enqueue_task = true;
BUG_ON(!node);
binder_node_lock(node);
@ -2984,7 +2986,10 @@ static int binder_proc_transaction(struct binder_transaction *t,
binder_transaction_priority(thread, t, node);
binder_enqueue_thread_work_ilocked(thread, &t->work);
} else if (!pending_async) {
binder_enqueue_work_ilocked(&t->work, &proc->todo);
trace_android_vh_binder_special_task(t, proc, thread,
&t->work, &proc->todo, !oneway, &enqueue_task);
if (enqueue_task)
binder_enqueue_work_ilocked(&t->work, &proc->todo);
} else {
if ((t->flags & TF_UPDATE_TXN) && proc->is_frozen) {
t_outdated = binder_find_outdated_transaction_ilocked(t,
@ -2997,7 +3002,10 @@ static int binder_proc_transaction(struct binder_transaction *t,
proc->outstanding_txns--;
}
}
binder_enqueue_work_ilocked(&t->work, &node->async_todo);
trace_android_vh_binder_special_task(t, proc, thread,
&t->work, &node->async_todo, !oneway, &enqueue_task);
if (enqueue_task)
binder_enqueue_work_ilocked(&t->work, &node->async_todo);
}
trace_android_vh_binder_proc_transaction_finish(proc, t,
@ -3474,6 +3482,7 @@ static void binder_transaction(struct binder_proc *proc,
t->buffer->target_node = target_node;
t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
trace_binder_transaction_alloc_buf(t->buffer);
trace_android_vh_alloc_oem_binder_struct(tr, t, target_proc);
if (binder_alloc_copy_user_to_buffer(
&target_proc->alloc,
@ -3978,6 +3987,9 @@ binder_free_buf(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_buffer *buffer, bool is_failure)
{
bool enqueue_task = true;
trace_android_vh_binder_free_buf(proc, thread, buffer);
binder_inner_proc_lock(proc);
if (buffer->transaction) {
buffer->transaction->buffer = NULL;
@ -3997,8 +4009,10 @@ binder_free_buf(struct binder_proc *proc,
if (!w) {
buf_node->has_async_transaction = false;
} else {
binder_enqueue_work_ilocked(
w, &proc->todo);
trace_android_vh_binder_special_task(NULL, proc, thread, w,
&proc->todo, false, &enqueue_task);
if (enqueue_task)
binder_enqueue_work_ilocked(w, &proc->todo);
binder_wakeup_proc_ilocked(proc);
}
binder_node_inner_unlock(buf_node);
@ -4940,6 +4954,7 @@ static int binder_thread_read(struct binder_proc *proc,
ptr += trsize;
trace_binder_transaction_received(t);
trace_android_vh_binder_transaction_received(t, proc, thread, cmd);
binder_stat_br(proc, thread, cmd);
binder_debug(BINDER_DEBUG_TRANSACTION,
"%d:%d %s %d %d:%d, cmd %u size %zd-%zd ptr %016llx-%016llx\n",

View File

@ -67,7 +67,7 @@
#include <trace/hooks/psi.h>
#include <trace/hooks/bl_hib.h>
#include <trace/hooks/regmap.h>
#include <trace/hooks/compaction.h>
/*
* Export tracepoints that act as a bare tracehook (ie: have no trace event
* associated with them) to allow external modules to probe them.
@ -182,6 +182,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_record_mutex_lock_starttime);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_record_rtmutex_lock_starttime);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_record_rwsem_lock_starttime);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_record_pcpu_rwsem_starttime);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_percpu_rwsem_wq_add);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_module_core_rw_nx);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_module_init_rw_nx);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_module_permit_before_init);
@ -329,3 +330,17 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_exit_check);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_freeze_whether_wake);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_use_amu_fie);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_scan_abort_check_wmarks);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_oem_binder_struct);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_transaction_received);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_oem_binder_struct);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_special_task);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_free_buf);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_compaction_exit);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_compaction_try_to_compact_pages_exit);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_alloc_pages_direct_reclaim_enter);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_alloc_pages_direct_reclaim_exit);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_alloc_pages_may_oom_exit);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_vmscan_kswapd_done);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_compaction_begin);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_compaction_end);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_bus_iommu_probe);

View File

@ -754,6 +754,29 @@ void wait_for_device_probe(void)
}
EXPORT_SYMBOL_GPL(wait_for_device_probe);
/**
* flush_deferred_probe_now
*
* This function should be used sparingly. It's meant for when we need to flush
* the deferred probe list at earlier initcall levels. Really meant only for KVM
* needs. This function should never be exported because it makes no sense for
* modules to call this.
*/
void flush_deferred_probe_now(void)
{
/*
* Really shouldn't using this if deferred probe has already been
* enabled
*/
if (WARN_ON(driver_deferred_probe_enable))
return;
driver_deferred_probe_enable = true;
driver_deferred_probe_trigger();
wait_for_device_probe();
driver_deferred_probe_enable = false;
}
static int __driver_probe_device(struct device_driver *drv, struct device *dev)
{
int ret = 0;

View File

@ -2923,10 +2923,10 @@ static int genpd_parse_state(struct genpd_power_state *genpd_state,
err = of_property_read_u32(state_node, "min-residency-us", &residency);
if (!err)
genpd_state->residency_ns = 1000 * residency;
genpd_state->residency_ns = 1000LL * residency;
genpd_state->power_on_latency_ns = 1000 * exit_latency;
genpd_state->power_off_latency_ns = 1000 * entry_latency;
genpd_state->power_on_latency_ns = 1000LL * exit_latency;
genpd_state->power_off_latency_ns = 1000LL * entry_latency;
genpd_state->fwnode = &state_node->fwnode;
return 0;

View File

@ -126,6 +126,7 @@ void clk_fractional_divider_general_approximation(struct clk_hw *hw,
GENMASK(fd->mwidth - 1, 0), GENMASK(fd->nwidth - 1, 0),
m, n);
}
EXPORT_SYMBOL_GPL(clk_fractional_divider_general_approximation);
static long clk_fd_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)

View File

@ -2055,10 +2055,6 @@ static struct protection_domain *protection_domain_alloc(unsigned int type)
int mode = DEFAULT_PGTABLE_LEVEL;
int ret;
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!domain)
return NULL;
/*
* Force IOMMU v1 page table when iommu=pt and
* when allocating domain for pass-through devices.
@ -2074,6 +2070,10 @@ static struct protection_domain *protection_domain_alloc(unsigned int type)
return NULL;
}
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!domain)
return NULL;
switch (pgtable) {
case AMD_IOMMU_V1:
ret = protection_domain_init_v1(domain, mode);

View File

@ -23,6 +23,7 @@
#include <linux/memremap.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/of_iommu.h>
#include <linux/pci.h>
#include <linux/scatterlist.h>
#include <linux/spinlock.h>
@ -392,6 +393,8 @@ void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode))
iort_iommu_get_resv_regions(dev, list);
if (dev->of_node)
of_iommu_get_resv_regions(dev, list);
}
EXPORT_SYMBOL(iommu_dma_get_resv_regions);

View File

@ -30,6 +30,7 @@
#include <linux/cc_platform.h>
#include <trace/events/iommu.h>
#include <linux/sched/mm.h>
#include <trace/hooks/iommu.h>
#include "dma-iommu.h"
@ -223,7 +224,8 @@ int iommu_device_register(struct iommu_device *iommu,
* already the de-facto behaviour, since any possible combination of
* existing drivers would compete for at least the PCI or platform bus.
*/
if (iommu_buses[0]->iommu_ops && iommu_buses[0]->iommu_ops != ops)
if (iommu_buses[0]->iommu_ops && iommu_buses[0]->iommu_ops != ops
&& !trace_android_vh_bus_iommu_probe_enabled())
return -EBUSY;
iommu->ops = ops;
@ -235,6 +237,11 @@ int iommu_device_register(struct iommu_device *iommu,
spin_unlock(&iommu_device_lock);
for (int i = 0; i < ARRAY_SIZE(iommu_buses) && !err; i++) {
bool skip = false;
trace_android_vh_bus_iommu_probe(iommu, iommu_buses[i], &skip);
if (skip)
continue;
iommu_buses[i]->iommu_ops = ops;
err = bus_iommu_probe(iommu_buses[i]);
}

View File

@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/msi.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_iommu.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
@ -172,3 +173,98 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
return ops;
}
static enum iommu_resv_type __maybe_unused
iommu_resv_region_get_type(struct device *dev,
struct resource *phys,
phys_addr_t start, size_t length)
{
phys_addr_t end = start + length - 1;
/*
* IOMMU regions without an associated physical region cannot be
* mapped and are simply reservations.
*/
if (phys->start >= phys->end)
return IOMMU_RESV_RESERVED;
/* may be IOMMU_RESV_DIRECT_RELAXABLE for certain cases */
if (start == phys->start && end == phys->end)
return IOMMU_RESV_DIRECT;
dev_warn(dev, "treating non-direct mapping [%pr] -> [%pap-%pap] as reservation\n", &phys,
&start, &end);
return IOMMU_RESV_RESERVED;
}
/**
* of_iommu_get_resv_regions - reserved region driver helper for device tree
* @dev: device for which to get reserved regions
* @list: reserved region list
*
* IOMMU drivers can use this to implement their .get_resv_regions() callback
* for memory regions attached to a device tree node. See the reserved-memory
* device tree bindings on how to use these:
*
* Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
*/
void of_iommu_get_resv_regions(struct device *dev, struct list_head *list)
{
#if IS_ENABLED(CONFIG_OF_ADDRESS)
struct of_phandle_iterator it;
int err;
of_for_each_phandle(&it, err, dev->of_node, "memory-region", NULL, 0) {
const __be32 *maps, *end;
struct resource phys;
int size;
memset(&phys, 0, sizeof(phys));
/*
* The "reg" property is optional and can be omitted by reserved-memory regions
* that represent reservations in the IOVA space, which are regions that should
* not be mapped.
*/
if (of_find_property(it.node, "reg", NULL)) {
err = of_address_to_resource(it.node, 0, &phys);
if (err < 0) {
dev_err(dev, "failed to parse memory region %pOF: %d\n",
it.node, err);
continue;
}
}
maps = of_get_property(it.node, "iommu-addresses", &size);
if (!maps)
continue;
end = maps + size / sizeof(__be32);
while (maps < end) {
struct device_node *np;
u32 phandle;
phandle = be32_to_cpup(maps++);
np = of_find_node_by_phandle(phandle);
if (np == dev->of_node) {
int prot = IOMMU_READ | IOMMU_WRITE;
struct iommu_resv_region *region;
enum iommu_resv_type type;
phys_addr_t iova;
size_t length;
maps = of_translate_dma_region(np, maps, &iova, &length);
type = iommu_resv_region_get_type(dev, &phys, iova, length);
region = iommu_alloc_resv_region(iova, length, prot, type,
GFP_KERNEL);
if (region)
list_add_tail(&region->list, list);
}
}
}
#endif
}
EXPORT_SYMBOL(of_iommu_get_resv_regions);

View File

@ -23,17 +23,20 @@
#include <linux/mm.h>
#include <linux/proc_fs.h>
#include <linux/profile.h>
#include <linux/rtmutex.h>
#include <linux/sched/cputime.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/spinlock_types.h>
#define UID_HASH_BITS 10
#define UID_HASH_NUMS (1 << UID_HASH_BITS)
DECLARE_HASHTABLE(hash_table, UID_HASH_BITS);
/*
* uid_lock[bkt] ensure consistency of hash_table[bkt]
*/
spinlock_t uid_lock[UID_HASH_NUMS];
static DEFINE_RT_MUTEX(uid_lock);
static struct proc_dir_entry *cpu_parent;
static struct proc_dir_entry *io_parent;
static struct proc_dir_entry *proc_parent;
@ -78,6 +81,32 @@ struct uid_entry {
#endif
};
static inline int trylock_uid(uid_t uid)
{
return spin_trylock(
&uid_lock[hash_min(uid, HASH_BITS(hash_table))]);
}
static inline void lock_uid(uid_t uid)
{
spin_lock(&uid_lock[hash_min(uid, HASH_BITS(hash_table))]);
}
static inline void unlock_uid(uid_t uid)
{
spin_unlock(&uid_lock[hash_min(uid, HASH_BITS(hash_table))]);
}
static inline void lock_uid_by_bkt(u32 bkt)
{
spin_lock(&uid_lock[bkt]);
}
static inline void unlock_uid_by_bkt(u32 bkt)
{
spin_unlock(&uid_lock[bkt]);
}
static u64 compute_write_bytes(struct task_io_accounting *ioac)
{
if (ioac->write_bytes <= ioac->cancelled_write_bytes)
@ -333,24 +362,29 @@ static int uid_cputime_show(struct seq_file *m, void *v)
struct user_namespace *user_ns = current_user_ns();
u64 utime;
u64 stime;
unsigned long bkt;
u32 bkt;
uid_t uid;
rt_mutex_lock(&uid_lock);
hash_for_each(hash_table, bkt, uid_entry, hash) {
uid_entry->active_stime = 0;
uid_entry->active_utime = 0;
for (bkt = 0, uid_entry = NULL; uid_entry == NULL &&
bkt < HASH_SIZE(hash_table); bkt++) {
lock_uid_by_bkt(bkt);
hlist_for_each_entry(uid_entry, &hash_table[bkt], hash) {
uid_entry->active_stime = 0;
uid_entry->active_utime = 0;
}
unlock_uid_by_bkt(bkt);
}
rcu_read_lock();
do_each_thread(temp, task) {
uid = from_kuid_munged(user_ns, task_uid(task));
lock_uid(uid);
if (!uid_entry || uid_entry->uid != uid)
uid_entry = find_or_register_uid(uid);
if (!uid_entry) {
rcu_read_unlock();
rt_mutex_unlock(&uid_lock);
unlock_uid(uid);
pr_err("%s: failed to find the uid_entry for uid %d\n",
__func__, uid);
return -ENOMEM;
@ -361,19 +395,24 @@ static int uid_cputime_show(struct seq_file *m, void *v)
uid_entry->active_utime += utime;
uid_entry->active_stime += stime;
}
unlock_uid(uid);
} while_each_thread(temp, task);
rcu_read_unlock();
hash_for_each(hash_table, bkt, uid_entry, hash) {
u64 total_utime = uid_entry->utime +
uid_entry->active_utime;
u64 total_stime = uid_entry->stime +
uid_entry->active_stime;
seq_printf(m, "%d: %llu %llu\n", uid_entry->uid,
ktime_to_us(total_utime), ktime_to_us(total_stime));
for (bkt = 0, uid_entry = NULL; uid_entry == NULL &&
bkt < HASH_SIZE(hash_table); bkt++) {
lock_uid_by_bkt(bkt);
hlist_for_each_entry(uid_entry, &hash_table[bkt], hash) {
u64 total_utime = uid_entry->utime +
uid_entry->active_utime;
u64 total_stime = uid_entry->stime +
uid_entry->active_stime;
seq_printf(m, "%d: %llu %llu\n", uid_entry->uid,
ktime_to_us(total_utime), ktime_to_us(total_stime));
}
unlock_uid_by_bkt(bkt);
}
rt_mutex_unlock(&uid_lock);
return 0;
}
@ -421,9 +460,8 @@ static ssize_t uid_remove_write(struct file *file,
return -EINVAL;
}
rt_mutex_lock(&uid_lock);
for (; uid_start <= uid_end; uid_start++) {
lock_uid(uid_start);
hash_for_each_possible_safe(hash_table, uid_entry, tmp,
hash, (uid_t)uid_start) {
if (uid_start == uid_entry->uid) {
@ -432,9 +470,9 @@ static ssize_t uid_remove_write(struct file *file,
kfree(uid_entry);
}
}
unlock_uid(uid_start);
}
rt_mutex_unlock(&uid_lock);
return count;
}
@ -472,41 +510,59 @@ static void add_uid_io_stats(struct uid_entry *uid_entry,
__add_uid_io_stats(uid_entry, &task->ioac, slot);
}
static void update_io_stats_all_locked(void)
static void update_io_stats_all(void)
{
struct uid_entry *uid_entry = NULL;
struct task_struct *task, *temp;
struct user_namespace *user_ns = current_user_ns();
unsigned long bkt;
u32 bkt;
uid_t uid;
hash_for_each(hash_table, bkt, uid_entry, hash) {
memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0,
sizeof(struct io_stats));
set_io_uid_tasks_zero(uid_entry);
for (bkt = 0, uid_entry = NULL; uid_entry == NULL && bkt < HASH_SIZE(hash_table);
bkt++) {
lock_uid_by_bkt(bkt);
hlist_for_each_entry(uid_entry, &hash_table[bkt], hash) {
memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0,
sizeof(struct io_stats));
set_io_uid_tasks_zero(uid_entry);
}
unlock_uid_by_bkt(bkt);
}
rcu_read_lock();
do_each_thread(temp, task) {
uid = from_kuid_munged(user_ns, task_uid(task));
lock_uid(uid);
if (!uid_entry || uid_entry->uid != uid)
uid_entry = find_or_register_uid(uid);
if (!uid_entry)
if (!uid_entry) {
unlock_uid(uid);
continue;
}
add_uid_io_stats(uid_entry, task, UID_STATE_TOTAL_CURR);
unlock_uid(uid);
} while_each_thread(temp, task);
rcu_read_unlock();
hash_for_each(hash_table, bkt, uid_entry, hash) {
compute_io_bucket_stats(&uid_entry->io[uid_entry->state],
&uid_entry->io[UID_STATE_TOTAL_CURR],
&uid_entry->io[UID_STATE_TOTAL_LAST],
&uid_entry->io[UID_STATE_DEAD_TASKS]);
compute_io_uid_tasks(uid_entry);
for (bkt = 0, uid_entry = NULL; uid_entry == NULL && bkt < HASH_SIZE(hash_table);
bkt++) {
lock_uid_by_bkt(bkt);
hlist_for_each_entry(uid_entry, &hash_table[bkt], hash) {
compute_io_bucket_stats(&uid_entry->io[uid_entry->state],
&uid_entry->io[UID_STATE_TOTAL_CURR],
&uid_entry->io[UID_STATE_TOTAL_LAST],
&uid_entry->io[UID_STATE_DEAD_TASKS]);
compute_io_uid_tasks(uid_entry);
}
unlock_uid_by_bkt(bkt);
}
}
#ifndef CONFIG_UID_SYS_STATS_DEBUG
static void update_io_stats_uid(struct uid_entry *uid_entry)
#else
static void update_io_stats_uid_locked(struct uid_entry *uid_entry)
#endif
{
struct task_struct *task, *temp;
struct user_namespace *user_ns = current_user_ns();
@ -534,14 +590,15 @@ static void update_io_stats_uid_locked(struct uid_entry *uid_entry)
static int uid_io_show(struct seq_file *m, void *v)
{
struct uid_entry *uid_entry;
unsigned long bkt;
u32 bkt;
rt_mutex_lock(&uid_lock);
update_io_stats_all();
for (bkt = 0, uid_entry = NULL; uid_entry == NULL && bkt < HASH_SIZE(hash_table);
bkt++) {
update_io_stats_all_locked();
hash_for_each(hash_table, bkt, uid_entry, hash) {
seq_printf(m, "%d %llu %llu %llu %llu %llu %llu %llu %llu %llu %llu\n",
lock_uid_by_bkt(bkt);
hlist_for_each_entry(uid_entry, &hash_table[bkt], hash) {
seq_printf(m, "%d %llu %llu %llu %llu %llu %llu %llu %llu %llu %llu\n",
uid_entry->uid,
uid_entry->io[UID_STATE_FOREGROUND].rchar,
uid_entry->io[UID_STATE_FOREGROUND].wchar,
@ -554,10 +611,11 @@ static int uid_io_show(struct seq_file *m, void *v)
uid_entry->io[UID_STATE_FOREGROUND].fsync,
uid_entry->io[UID_STATE_BACKGROUND].fsync);
show_io_uid_tasks(m, uid_entry);
show_io_uid_tasks(m, uid_entry);
}
unlock_uid_by_bkt(bkt);
}
rt_mutex_unlock(&uid_lock);
return 0;
}
@ -585,6 +643,9 @@ static ssize_t uid_procstat_write(struct file *file,
uid_t uid;
int argc, state;
char input[128];
#ifndef CONFIG_UID_SYS_STATS_DEBUG
struct uid_entry uid_entry_tmp;
#endif
if (count >= sizeof(input))
return -EINVAL;
@ -601,24 +662,51 @@ static ssize_t uid_procstat_write(struct file *file,
if (state != UID_STATE_BACKGROUND && state != UID_STATE_FOREGROUND)
return -EINVAL;
rt_mutex_lock(&uid_lock);
lock_uid(uid);
uid_entry = find_or_register_uid(uid);
if (!uid_entry) {
rt_mutex_unlock(&uid_lock);
unlock_uid(uid);
return -EINVAL;
}
if (uid_entry->state == state) {
rt_mutex_unlock(&uid_lock);
unlock_uid(uid);
return count;
}
#ifndef CONFIG_UID_SYS_STATS_DEBUG
/*
* Update_io_stats_uid_locked would take a long lock-time of uid_lock
* due to call do_each_thread to compute uid_entry->io, which would
* cause to lock competition sometime.
*
* Using uid_entry_tmp to get the result of Update_io_stats_uid,
* so that we can unlock_uid during update_io_stats_uid, in order
* to avoid the unnecessary lock-time of uid_lock.
*/
uid_entry_tmp.uid = uid_entry->uid;
memcpy(uid_entry_tmp.io, uid_entry->io,
sizeof(struct io_stats) * UID_STATE_SIZE);
unlock_uid(uid);
update_io_stats_uid(&uid_entry_tmp);
lock_uid(uid);
hlist_for_each_entry(uid_entry, &hash_table[hash_min(uid, HASH_BITS(hash_table))], hash) {
if (uid_entry->uid == uid_entry_tmp.uid) {
memcpy(uid_entry->io, uid_entry_tmp.io,
sizeof(struct io_stats) * UID_STATE_SIZE);
uid_entry->state = state;
break;
}
}
unlock_uid(uid);
#else
update_io_stats_uid_locked(uid_entry);
uid_entry->state = state;
rt_mutex_unlock(&uid_lock);
unlock_uid(uid);
#endif
return count;
}
@ -649,10 +737,9 @@ static void update_stats_workfn(struct work_struct *work)
struct task_entry *task_entry __maybe_unused;
struct llist_node *node;
rt_mutex_lock(&uid_lock);
node = llist_del_all(&work_usw);
llist_for_each_entry_safe(usw, t, node, node) {
lock_uid(usw->uid);
uid_entry = find_uid_entry(usw->uid);
if (!uid_entry)
goto next;
@ -669,12 +756,13 @@ static void update_stats_workfn(struct work_struct *work)
#endif
__add_uid_io_stats(uid_entry, &usw->ioac, UID_STATE_DEAD_TASKS);
next:
unlock_uid(usw->uid);
#ifdef CONFIG_UID_SYS_STATS_DEBUG
put_task_struct(usw->task);
#endif
kfree(usw);
}
rt_mutex_unlock(&uid_lock);
}
static DECLARE_WORK(update_stats_work, update_stats_workfn);
@ -690,7 +778,7 @@ static int process_notifier(struct notifier_block *self,
return NOTIFY_OK;
uid = from_kuid_munged(current_user_ns(), task_uid(task));
if (!rt_mutex_trylock(&uid_lock)) {
if (!trylock_uid(uid)) {
struct update_stats_work *usw;
usw = kmalloc(sizeof(struct update_stats_work), GFP_KERNEL);
@ -724,7 +812,7 @@ static int process_notifier(struct notifier_block *self,
add_uid_io_stats(uid_entry, task, UID_STATE_DEAD_TASKS);
exit:
rt_mutex_unlock(&uid_lock);
unlock_uid(uid);
return NOTIFY_OK;
}
@ -732,9 +820,18 @@ static struct notifier_block process_notifier_block = {
.notifier_call = process_notifier,
};
static void init_hash_table_and_lock(void)
{
int i;
hash_init(hash_table);
for (i = 0; i < UID_HASH_NUMS; i++)
spin_lock_init(&uid_lock[i]);
}
static int __init proc_uid_sys_stats_init(void)
{
hash_init(hash_table);
init_hash_table_and_lock();
cpu_parent = proc_mkdir("uid_cputime", NULL);
if (!cpu_parent) {

View File

@ -626,6 +626,47 @@ u64 of_translate_dma_address(struct device_node *dev, const __be32 *in_addr)
}
EXPORT_SYMBOL(of_translate_dma_address);
/**
* of_translate_dma_region - Translate device tree address and size tuple
* @dev: device tree node for which to translate
* @prop: pointer into array of cells
* @start: return value for the start of the DMA range
* @length: return value for the length of the DMA range
*
* Returns a pointer to the cell immediately following the translated DMA region.
*/
const __be32 *of_translate_dma_region(struct device_node *dev, const __be32 *prop,
phys_addr_t *start, size_t *length)
{
struct device_node *parent;
u64 address, size;
int na, ns;
parent = __of_get_dma_parent(dev);
if (!parent)
return NULL;
na = of_bus_n_addr_cells(parent);
ns = of_bus_n_size_cells(parent);
of_node_put(parent);
address = of_translate_dma_address(dev, prop);
if (address == OF_BAD_ADDR)
return NULL;
size = of_read_number(prop + na, ns);
if (start)
*start = address;
if (length)
*length = size;
return prop + na + ns;
}
EXPORT_SYMBOL(of_translate_dma_region);
const __be32 *__of_get_address(struct device_node *dev, int index, int bar_no,
u64 *size, unsigned int *flags)
{

View File

@ -2481,12 +2481,13 @@ static void gsm_error(struct gsm_mux *gsm)
static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc)
{
int i;
struct gsm_dlci *dlci = gsm->dlci[0];
struct gsm_dlci *dlci;
struct gsm_msg *txq, *ntxq;
gsm->dead = true;
mutex_lock(&gsm->mutex);
dlci = gsm->dlci[0];
if (dlci) {
if (disc && dlci->state != DLCI_CLOSED) {
gsm_dlci_begin_close(dlci);

View File

@ -7856,6 +7856,20 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
hba = shost_priv(cmd->device->host);
/*
* If runtime pm send SSU and got timeout, scsi_error_handler
* stuck at this function to wait for flush_work(&hba->eh_work).
* And ufshcd_err_handler(eh_work) stuck at wait for runtime pm active.
* Do ufshcd_link_recovery instead schedule eh_work can prevent
* dead lock to happen.
*/
if (hba->pm_op_in_progress) {
if (ufshcd_link_recovery(hba))
err = FAILED;
return err;
}
spin_lock_irqsave(hba->host->host_lock, flags);
hba->force_reset = true;
ufshcd_schedule_eh_work(hba);

View File

@ -3098,6 +3098,48 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
return status;
}
/*
* hub_port_stop_enumerate - stop USB enumeration or ignore port events
* @hub: target hub
* @port1: port num of the port
* @retries: port retries number of hub_port_init()
*
* Return:
* true: ignore port actions/events or give up connection attempts.
* false: keep original behavior.
*
* This function will be based on retries to check whether the port which is
* marked with early_stop attribute would stop enumeration or ignore events.
*
* Note:
* This function didn't change anything if early_stop is not set, and it will
* prevent all connection attempts when early_stop is set and the attempts of
* the port are more than 1.
*/
static bool hub_port_stop_enumerate(struct usb_hub *hub, int port1, int retries)
{
struct usb_port *port_dev = hub->ports[port1 - 1];
if (port_dev->early_stop) {
if (port_dev->ignore_event)
return true;
/*
* We want unsuccessful attempts to fail quickly.
* Since some devices may need one failure during
* port initialization, we allow two tries but no
* more.
*/
if (retries < 2)
return false;
port_dev->ignore_event = 1;
} else
port_dev->ignore_event = 0;
return port_dev->ignore_event;
}
/* Check if a port is power on */
int usb_port_is_power_on(struct usb_hub *hub, unsigned int portstatus)
{
@ -4813,6 +4855,11 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
do_new_scheme = use_new_scheme(udev, retry_counter, port_dev);
for (retries = 0; retries < GET_DESCRIPTOR_TRIES; (++retries, msleep(100))) {
if (hub_port_stop_enumerate(hub, port1, retries)) {
retval = -ENODEV;
break;
}
if (do_new_scheme) {
struct usb_device_descriptor *buf;
int r = 0;
@ -5263,6 +5310,11 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
status = 0;
for (i = 0; i < PORT_INIT_TRIES; i++) {
if (hub_port_stop_enumerate(hub, port1, i)) {
status = -ENODEV;
break;
}
usb_lock_port(port_dev);
mutex_lock(hcd->address0_mutex);
retry_locked = true;
@ -5631,6 +5683,10 @@ static void port_event(struct usb_hub *hub, int port1)
if (!pm_runtime_active(&port_dev->dev))
return;
/* skip port actions if ignore_event and early_stop are true */
if (port_dev->ignore_event && port_dev->early_stop)
return;
if (hub_handle_remote_wakeup(hub, port1, portstatus, portchange))
connect_change = 1;
@ -5954,6 +6010,10 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
mutex_lock(hcd->address0_mutex);
for (i = 0; i < PORT_INIT_TRIES; ++i) {
if (hub_port_stop_enumerate(parent_hub, port1, i)) {
ret = -ENODEV;
break;
}
/* ep0 maxpacket size may change; let the HCD know about it.
* Other endpoints will be handled by re-enumeration. */

View File

@ -92,6 +92,8 @@ struct usb_hub {
* @is_superspeed cache super-speed status
* @usb3_lpm_u1_permit: whether USB3 U1 LPM is permitted.
* @usb3_lpm_u2_permit: whether USB3 U2 LPM is permitted.
* @early_stop: whether port initialization will be stopped earlier.
* @ignore_event: whether events of the port are ignored.
*/
struct usb_port {
struct usb_device *child;
@ -107,6 +109,8 @@ struct usb_port {
u32 over_current_count;
u8 portnum;
u32 quirks;
unsigned int early_stop:1;
unsigned int ignore_event:1;
unsigned int is_superspeed:1;
unsigned int usb3_lpm_u1_permit:1;
unsigned int usb3_lpm_u2_permit:1;

View File

@ -17,6 +17,32 @@ static int usb_port_block_power_off;
static const struct attribute_group *port_dev_group[];
static ssize_t early_stop_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_port *port_dev = to_usb_port(dev);
return sysfs_emit(buf, "%s\n", port_dev->early_stop ? "yes" : "no");
}
static ssize_t early_stop_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct usb_port *port_dev = to_usb_port(dev);
bool value;
if (kstrtobool(buf, &value))
return -EINVAL;
if (value)
port_dev->early_stop = 1;
else
port_dev->early_stop = 0;
return count;
}
static DEVICE_ATTR_RW(early_stop);
static ssize_t disable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@ -247,6 +273,7 @@ static struct attribute *port_dev_attrs[] = {
&dev_attr_quirks.attr,
&dev_attr_over_current_count.attr,
&dev_attr_disable.attr,
&dev_attr_early_stop.attr,
NULL,
};

View File

@ -538,16 +538,20 @@ static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head,
static int gs_start_io(struct gs_port *port)
{
struct list_head *head = &port->read_pool;
struct usb_ep *ep = port->port_usb->out;
struct usb_ep *ep;
int status;
unsigned started;
if (!port->port_usb || !port->port.tty)
return -EIO;
/* Allocate RX and TX I/O buffers. We can't easily do this much
* earlier (with GFP_KERNEL) because the requests are coupled to
* endpoints, as are the packet sizes we'll be using. Different
* configurations may use different endpoints with a given port;
* and high speed vs full speed changes packet sizes too.
*/
ep = port->port_usb->out;
status = gs_alloc_requests(ep, head, gs_read_complete,
&port->read_allocated);
if (status)
@ -1419,10 +1423,19 @@ EXPORT_SYMBOL_GPL(gserial_disconnect);
void gserial_suspend(struct gserial *gser)
{
struct gs_port *port = gser->ioport;
struct gs_port *port;
unsigned long flags;
spin_lock_irqsave(&port->port_lock, flags);
spin_lock_irqsave(&serial_port_lock, flags);
port = gser->ioport;
if (!port) {
spin_unlock_irqrestore(&serial_port_lock, flags);
return;
}
spin_lock(&port->port_lock);
spin_unlock(&serial_port_lock);
port->suspended = true;
spin_unlock_irqrestore(&port->port_lock, flags);
}

View File

@ -156,7 +156,20 @@ EXPORT_SYMBOL_GPL(typec_altmode_exit);
*/
void typec_altmode_attention(struct typec_altmode *adev, u32 vdo)
{
struct typec_altmode *pdev = &to_altmode(adev)->partner->adev;
struct altmode *partner = to_altmode(adev)->partner;
struct typec_altmode *pdev;
/*
* If partner is NULL then a NULL pointer error occurs when
* dereferencing pdev and its operations. The original upstream commit
* changes the return type so the tcpm can log when this occurs, but
* due to KMI restrictions we can only silently prevent the error for
* now.
*/
if (!partner)
return;
pdev = &partner->adev;
if (pdev->ops && pdev->ops->attention)
pdev->ops->attention(pdev, vdo);

View File

@ -785,6 +785,7 @@ static void ucsi_handle_connector_change(struct work_struct *work)
if (ret < 0) {
dev_err(ucsi->dev, "%s: GET_CONNECTOR_STATUS failed (%d)\n",
__func__, ret);
clear_bit(EVENT_PENDING, &con->ucsi->flags);
goto out_unlock;
}

View File

@ -567,10 +567,7 @@ static int gh_rm_send_request(struct gh_rm *rm, u32 message_id,
hdr_template.seq = cpu_to_le16(connection->reply.seq);
hdr_template.msg_id = cpu_to_le32(message_id);
ret = mutex_lock_interruptible(&rm->send_lock);
if (ret)
return ret;
mutex_lock(&rm->send_lock);
do {
msg = kmem_cache_zalloc(rm->cache, GFP_KERNEL);
if (!msg) {

View File

@ -1479,6 +1479,7 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
cifs_parse_mount_err:
kfree_sensitive(ctx->password);
ctx->password = NULL;
return -EINVAL;
}

View File

@ -29,11 +29,15 @@ void erofs_put_metabuf(struct erofs_buf *buf)
buf->page = NULL;
}
/*
* Derive the block size from inode->i_blkbits to make compatible with
* anonymous inode in fscache mode.
*/
void *erofs_bread(struct erofs_buf *buf, struct inode *inode,
erofs_blk_t blkaddr, enum erofs_kmap_type type)
{
erofs_off_t offset = (erofs_off_t)blkaddr << inode->i_blkbits;
struct address_space *const mapping = inode->i_mapping;
erofs_off_t offset = blknr_to_addr(blkaddr);
pgoff_t index = offset >> PAGE_SHIFT;
struct page *page = buf->page;
struct folio *folio;
@ -84,36 +88,32 @@ static int erofs_map_blocks_flatmode(struct inode *inode,
erofs_blk_t nblocks, lastblk;
u64 offset = map->m_la;
struct erofs_inode *vi = EROFS_I(inode);
struct super_block *sb = inode->i_sb;
bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
nblocks = DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ);
nblocks = erofs_iblks(inode);
lastblk = nblocks - tailendpacking;
/* there is no hole in flatmode */
map->m_flags = EROFS_MAP_MAPPED;
if (offset < blknr_to_addr(lastblk)) {
map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la;
map->m_plen = blknr_to_addr(lastblk) - offset;
if (offset < erofs_pos(sb, lastblk)) {
map->m_pa = erofs_pos(sb, vi->raw_blkaddr) + map->m_la;
map->m_plen = erofs_pos(sb, lastblk) - offset;
} else if (tailendpacking) {
/* 2 - inode inline B: inode, [xattrs], inline last blk... */
struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize +
vi->xattr_isize + erofs_blkoff(map->m_la);
map->m_pa = erofs_iloc(inode) + vi->inode_isize +
vi->xattr_isize + erofs_blkoff(sb, offset);
map->m_plen = inode->i_size - offset;
/* inline data should be located in the same meta block */
if (erofs_blkoff(map->m_pa) + map->m_plen > EROFS_BLKSIZ) {
erofs_err(inode->i_sb,
"inline data cross block boundary @ nid %llu",
if (erofs_blkoff(sb, map->m_pa) + map->m_plen > sb->s_blocksize) {
erofs_err(sb, "inline data cross block boundary @ nid %llu",
vi->nid);
DBG_BUGON(1);
return -EFSCORRUPTED;
}
map->m_flags |= EROFS_MAP_META;
} else {
erofs_err(inode->i_sb,
"internal error @ nid: %llu (size %llu), m_la 0x%llx",
erofs_err(sb, "internal error @ nid: %llu (size %llu), m_la 0x%llx",
vi->nid, inode->i_size, map->m_la);
DBG_BUGON(1);
return -EIO;
@ -154,32 +154,32 @@ int erofs_map_blocks(struct inode *inode,
unit = EROFS_BLOCK_MAP_ENTRY_SIZE; /* block map */
chunknr = map->m_la >> vi->chunkbits;
pos = ALIGN(iloc(EROFS_SB(sb), vi->nid) + vi->inode_isize +
pos = ALIGN(erofs_iloc(inode) + vi->inode_isize +
vi->xattr_isize, unit) + unit * chunknr;
kaddr = erofs_read_metabuf(&buf, sb, erofs_blknr(pos), EROFS_KMAP);
kaddr = erofs_read_metabuf(&buf, sb, erofs_blknr(sb, pos), EROFS_KMAP);
if (IS_ERR(kaddr)) {
err = PTR_ERR(kaddr);
goto out;
}
map->m_la = chunknr << vi->chunkbits;
map->m_plen = min_t(erofs_off_t, 1UL << vi->chunkbits,
roundup(inode->i_size - map->m_la, EROFS_BLKSIZ));
round_up(inode->i_size - map->m_la, sb->s_blocksize));
/* handle block map */
if (!(vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)) {
__le32 *blkaddr = kaddr + erofs_blkoff(pos);
__le32 *blkaddr = kaddr + erofs_blkoff(sb, pos);
if (le32_to_cpu(*blkaddr) == EROFS_NULL_ADDR) {
map->m_flags = 0;
} else {
map->m_pa = blknr_to_addr(le32_to_cpu(*blkaddr));
map->m_pa = erofs_pos(sb, le32_to_cpu(*blkaddr));
map->m_flags = EROFS_MAP_MAPPED;
}
goto out_unlock;
}
/* parse chunk indexes */
idx = kaddr + erofs_blkoff(pos);
idx = kaddr + erofs_blkoff(sb, pos);
switch (le32_to_cpu(idx->blkaddr)) {
case EROFS_NULL_ADDR:
map->m_flags = 0;
@ -187,7 +187,7 @@ int erofs_map_blocks(struct inode *inode,
default:
map->m_deviceid = le16_to_cpu(idx->device_id) &
EROFS_SB(sb)->device_id_mask;
map->m_pa = blknr_to_addr(le32_to_cpu(idx->blkaddr));
map->m_pa = erofs_pos(sb, le32_to_cpu(idx->blkaddr));
map->m_flags = EROFS_MAP_MAPPED;
break;
}
@ -231,8 +231,8 @@ int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
if (!dif->mapped_blkaddr)
continue;
startoff = blknr_to_addr(dif->mapped_blkaddr);
length = blknr_to_addr(dif->blocks);
startoff = erofs_pos(sb, dif->mapped_blkaddr);
length = erofs_pos(sb, dif->blocks);
if (map->m_pa >= startoff &&
map->m_pa < startoff + length) {
@ -253,6 +253,7 @@ static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
unsigned int flags, struct iomap *iomap, struct iomap *srcmap)
{
int ret;
struct super_block *sb = inode->i_sb;
struct erofs_map_blocks map;
struct erofs_map_dev mdev;
@ -267,7 +268,7 @@ static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
.m_deviceid = map.m_deviceid,
.m_pa = map.m_pa,
};
ret = erofs_map_dev(inode->i_sb, &mdev);
ret = erofs_map_dev(sb, &mdev);
if (ret)
return ret;
@ -293,11 +294,11 @@ static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
iomap->type = IOMAP_INLINE;
ptr = erofs_read_metabuf(&buf, inode->i_sb,
erofs_blknr(mdev.m_pa), EROFS_KMAP);
ptr = erofs_read_metabuf(&buf, sb,
erofs_blknr(sb, mdev.m_pa), EROFS_KMAP);
if (IS_ERR(ptr))
return PTR_ERR(ptr);
iomap->inline_data = ptr + erofs_blkoff(mdev.m_pa);
iomap->inline_data = ptr + erofs_blkoff(sb, mdev.m_pa);
iomap->private = buf.base;
} else {
iomap->type = IOMAP_MAPPED;

View File

@ -42,7 +42,7 @@ int z_erofs_load_lz4_config(struct super_block *sb,
if (!sbi->lz4.max_pclusterblks) {
sbi->lz4.max_pclusterblks = 1; /* reserved case */
} else if (sbi->lz4.max_pclusterblks >
Z_EROFS_PCLUSTER_MAX_SIZE / EROFS_BLKSIZ) {
erofs_blknr(sb, Z_EROFS_PCLUSTER_MAX_SIZE)) {
erofs_err(sb, "too large lz4 pclusterblks %u",
sbi->lz4.max_pclusterblks);
return -EINVAL;
@ -221,13 +221,13 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
support_0padding = true;
ret = z_erofs_fixup_insize(rq, headpage + rq->pageofs_in,
min_t(unsigned int, rq->inputsize,
EROFS_BLKSIZ - rq->pageofs_in));
rq->sb->s_blocksize - rq->pageofs_in));
if (ret) {
kunmap_atomic(headpage);
return ret;
}
may_inplace = !((rq->pageofs_in + rq->inputsize) &
(EROFS_BLKSIZ - 1));
(rq->sb->s_blocksize - 1));
}
inputmargin = rq->pageofs_in;

View File

@ -166,8 +166,8 @@ int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq,
/* 1. get the exact LZMA compressed size */
kin = kmap(*rq->in);
err = z_erofs_fixup_insize(rq, kin + rq->pageofs_in,
min_t(unsigned int, rq->inputsize,
EROFS_BLKSIZ - rq->pageofs_in));
min_t(unsigned int, rq->inputsize,
rq->sb->s_blocksize - rq->pageofs_in));
if (err) {
kunmap(*rq->in);
return err;

View File

@ -67,9 +67,11 @@ static int erofs_readdir(struct file *f, struct dir_context *ctx)
{
struct inode *dir = file_inode(f);
struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
struct super_block *sb = dir->i_sb;
unsigned long bsz = sb->s_blocksize;
const size_t dirsize = i_size_read(dir);
unsigned int i = ctx->pos / EROFS_BLKSIZ;
unsigned int ofs = ctx->pos % EROFS_BLKSIZ;
unsigned int i = erofs_blknr(sb, ctx->pos);
unsigned int ofs = erofs_blkoff(sb, ctx->pos);
int err = 0;
bool initial = true;
@ -79,32 +81,28 @@ static int erofs_readdir(struct file *f, struct dir_context *ctx)
de = erofs_bread(&buf, dir, i, EROFS_KMAP);
if (IS_ERR(de)) {
erofs_err(dir->i_sb,
"fail to readdir of logical block %u of nid %llu",
erofs_err(sb, "fail to readdir of logical block %u of nid %llu",
i, EROFS_I(dir)->nid);
err = PTR_ERR(de);
break;
}
nameoff = le16_to_cpu(de->nameoff);
if (nameoff < sizeof(struct erofs_dirent) ||
nameoff >= EROFS_BLKSIZ) {
erofs_err(dir->i_sb,
"invalid de[0].nameoff %u @ nid %llu",
if (nameoff < sizeof(struct erofs_dirent) || nameoff >= bsz) {
erofs_err(sb, "invalid de[0].nameoff %u @ nid %llu",
nameoff, EROFS_I(dir)->nid);
err = -EFSCORRUPTED;
break;
}
maxsize = min_t(unsigned int,
dirsize - ctx->pos + ofs, EROFS_BLKSIZ);
maxsize = min_t(unsigned int, dirsize - ctx->pos + ofs, bsz);
/* search dirents at the arbitrary position */
if (initial) {
initial = false;
ofs = roundup(ofs, sizeof(struct erofs_dirent));
ctx->pos = blknr_to_addr(i) + ofs;
ctx->pos = erofs_pos(sb, i) + ofs;
if (ofs >= nameoff)
goto skip_this;
}
@ -114,7 +112,7 @@ static int erofs_readdir(struct file *f, struct dir_context *ctx)
if (err)
break;
skip_this:
ctx->pos = blknr_to_addr(i) + maxsize;
ctx->pos = erofs_pos(sb, i) + maxsize;
++i;
ofs = 0;
}

View File

@ -53,7 +53,7 @@ struct erofs_super_block {
__le32 magic; /* file system magic number */
__le32 checksum; /* crc32c(super_block) */
__le32 feature_compat;
__u8 blkszbits; /* support block_size == PAGE_SIZE only */
__u8 blkszbits; /* filesystem block size in bit shift */
__u8 sb_extslots; /* superblock size = 128 + sb_extslots * 16 */
__le16 root_nid; /* nid of root directory */
@ -75,7 +75,8 @@ struct erofs_super_block {
} __packed u1;
__le16 extra_devices; /* # of devices besides the primary device */
__le16 devt_slotoff; /* startoff = devt_slotoff * devt_slotsize */
__u8 reserved[6];
__u8 dirblkbits; /* directory block size in bit shift */
__u8 reserved[5];
__le64 packed_nid; /* nid of the special packed inode */
__u8 reserved2[24];
};

View File

@ -282,8 +282,8 @@ static int erofs_fscache_data_read(struct address_space *mapping,
void *src;
/* For tail packing layout, the offset may be non-zero. */
offset = erofs_blkoff(map.m_pa);
blknr = erofs_blknr(map.m_pa);
offset = erofs_blkoff(sb, map.m_pa);
blknr = erofs_blknr(sb, map.m_pa);
size = map.m_llen;
src = erofs_read_metabuf(&buf, sb, blknr, EROFS_KMAP);
@ -333,8 +333,6 @@ static int erofs_fscache_read_folio(struct file *file, struct folio *folio)
bool unlock;
int ret;
DBG_BUGON(folio_size(folio) != EROFS_BLKSIZ);
ret = erofs_fscache_data_read(folio_mapping(folio), folio_pos(folio),
folio_size(folio), &unlock);
if (unlock) {
@ -530,6 +528,7 @@ struct erofs_fscache *erofs_fscache_acquire_cookie(struct super_block *sb,
inode->i_size = OFFSET_MAX;
inode->i_mapping->a_ops = &erofs_fscache_meta_aops;
mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
inode->i_blkbits = EROFS_SB(sb)->blkszbits;
ctx->inode = inode;
}

View File

@ -14,7 +14,7 @@ static void *erofs_read_inode(struct erofs_buf *buf,
struct super_block *sb = inode->i_sb;
struct erofs_sb_info *sbi = EROFS_SB(sb);
struct erofs_inode *vi = EROFS_I(inode);
const erofs_off_t inode_loc = iloc(sbi, vi->nid);
const erofs_off_t inode_loc = erofs_iloc(inode);
erofs_blk_t blkaddr, nblks = 0;
void *kaddr;
@ -23,8 +23,8 @@ static void *erofs_read_inode(struct erofs_buf *buf,
unsigned int ifmt;
int err;
blkaddr = erofs_blknr(inode_loc);
*ofs = erofs_blkoff(inode_loc);
blkaddr = erofs_blknr(sb, inode_loc);
*ofs = erofs_blkoff(sb, inode_loc);
erofs_dbg("%s, reading inode nid %llu at %u of blkaddr %u",
__func__, vi->nid, *ofs, blkaddr);
@ -58,11 +58,11 @@ static void *erofs_read_inode(struct erofs_buf *buf,
case EROFS_INODE_LAYOUT_EXTENDED:
vi->inode_isize = sizeof(struct erofs_inode_extended);
/* check if the extended inode acrosses block boundary */
if (*ofs + vi->inode_isize <= EROFS_BLKSIZ) {
if (*ofs + vi->inode_isize <= sb->s_blocksize) {
*ofs += vi->inode_isize;
die = (struct erofs_inode_extended *)dic;
} else {
const unsigned int gotten = EROFS_BLKSIZ - *ofs;
const unsigned int gotten = sb->s_blocksize - *ofs;
copied = kmalloc(vi->inode_isize, GFP_NOFS);
if (!copied) {
@ -176,7 +176,7 @@ static void *erofs_read_inode(struct erofs_buf *buf,
err = -EOPNOTSUPP;
goto err_out;
}
vi->chunkbits = LOG_BLOCK_SIZE +
vi->chunkbits = sb->s_blocksize_bits +
(vi->chunkformat & EROFS_CHUNK_FORMAT_BLKBITS_MASK);
}
inode->i_mtime.tv_sec = inode->i_ctime.tv_sec;
@ -188,11 +188,12 @@ static void *erofs_read_inode(struct erofs_buf *buf,
if (test_opt(&sbi->opt, DAX_ALWAYS) && S_ISREG(inode->i_mode) &&
vi->datalayout == EROFS_INODE_FLAT_PLAIN)
inode->i_flags |= S_DAX;
if (!nblks)
/* measure inode.i_blocks as generic filesystems */
inode->i_blocks = roundup(inode->i_size, EROFS_BLKSIZ) >> 9;
inode->i_blocks = round_up(inode->i_size, sb->s_blocksize) >> 9;
else
inode->i_blocks = nblks << LOG_SECTORS_PER_BLOCK;
inode->i_blocks = nblks << (sb->s_blocksize_bits - 9);
return kaddr;
bogusimode:
@ -210,11 +211,12 @@ static int erofs_fill_symlink(struct inode *inode, void *kaddr,
unsigned int m_pofs)
{
struct erofs_inode *vi = EROFS_I(inode);
unsigned int bsz = i_blocksize(inode);
char *lnk;
/* if it cannot be handled with fast symlink scheme */
if (vi->datalayout != EROFS_INODE_FLAT_INLINE ||
inode->i_size >= EROFS_BLKSIZ || inode->i_size < 0) {
inode->i_size >= bsz || inode->i_size < 0) {
inode->i_op = &erofs_symlink_iops;
return 0;
}
@ -225,7 +227,7 @@ static int erofs_fill_symlink(struct inode *inode, void *kaddr,
m_pofs += vi->xattr_isize;
/* inline symlink data shouldn't cross block boundary */
if (m_pofs + inode->i_size > EROFS_BLKSIZ) {
if (m_pofs + inode->i_size > bsz) {
kfree(lnk);
erofs_err(inode->i_sb,
"inline data cross block boundary @ nid %llu",
@ -288,7 +290,8 @@ static int erofs_fill_inode(struct inode *inode)
}
if (erofs_inode_is_data_compressed(vi->datalayout)) {
if (!erofs_is_fscache_mode(inode->i_sb))
if (!erofs_is_fscache_mode(inode->i_sb) &&
inode->i_sb->s_blocksize_bits == PAGE_SHIFT)
err = z_erofs_fill_inode(inode);
else
err = -EOPNOTSUPP;

View File

@ -145,8 +145,8 @@ struct erofs_sb_info {
#endif
u16 device_id_mask; /* valid bits of device id to be used */
/* inode slot unit size in bit shift */
unsigned char islotbits;
unsigned char islotbits; /* inode slot unit size in bit shift */
unsigned char blkszbits; /* filesystem block size in bit shift */
u32 sb_size; /* total superblock size */
u32 build_time_nsec;
@ -239,21 +239,6 @@ static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
VAL != EROFS_LOCKED_MAGIC);
}
/* we strictly follow PAGE_SIZE and no buffer head yet */
#define LOG_BLOCK_SIZE PAGE_SHIFT
#undef LOG_SECTORS_PER_BLOCK
#define LOG_SECTORS_PER_BLOCK (PAGE_SHIFT - 9)
#undef SECTORS_PER_BLOCK
#define SECTORS_PER_BLOCK (1 << SECTORS_PER_BLOCK)
#define EROFS_BLKSIZ (1 << LOG_BLOCK_SIZE)
#if (EROFS_BLKSIZ % 4096 || !EROFS_BLKSIZ)
#error erofs cannot be used in this platform
#endif
enum erofs_kmap_type {
EROFS_NO_KMAP, /* don't map the buffer */
EROFS_KMAP, /* use kmap() to map the buffer */
@ -269,14 +254,10 @@ struct erofs_buf {
#define ROOT_NID(sb) ((sb)->root_nid)
#define erofs_blknr(addr) ((addr) / EROFS_BLKSIZ)
#define erofs_blkoff(addr) ((addr) % EROFS_BLKSIZ)
#define blknr_to_addr(nr) ((erofs_off_t)(nr) * EROFS_BLKSIZ)
static inline erofs_off_t iloc(struct erofs_sb_info *sbi, erofs_nid_t nid)
{
return blknr_to_addr(sbi->meta_blkaddr) + (nid << sbi->islotbits);
}
#define erofs_blknr(sb, addr) ((addr) >> (sb)->s_blocksize_bits)
#define erofs_blkoff(sb, addr) ((addr) & ((sb)->s_blocksize - 1))
#define erofs_pos(sb, blk) ((erofs_off_t)(blk) << (sb)->s_blocksize_bits)
#define erofs_iblks(i) (round_up((i)->i_size, i_blocksize(i)) >> (i)->i_blkbits)
#define EROFS_FEATURE_FUNCS(name, compat, feature) \
static inline bool erofs_sb_has_##name(struct erofs_sb_info *sbi) \
@ -342,13 +323,14 @@ struct erofs_inode {
struct inode vfs_inode;
};
#define EROFS_I(ptr) \
container_of(ptr, struct erofs_inode, vfs_inode)
#define EROFS_I(ptr) container_of(ptr, struct erofs_inode, vfs_inode)
static inline unsigned long erofs_inode_datablocks(struct inode *inode)
static inline erofs_off_t erofs_iloc(struct inode *inode)
{
/* since i_size cannot be changed */
return DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ);
struct erofs_sb_info *sbi = EROFS_I_SB(inode);
return erofs_pos(inode->i_sb, sbi->meta_blkaddr) +
(EROFS_I(inode)->nid << sbi->islotbits);
}
static inline unsigned int erofs_bitrange(unsigned int value, unsigned int bit,

View File

@ -5,7 +5,6 @@
* Copyright (C) 2022, Alibaba Cloud
*/
#include "xattr.h"
#include <trace/events/erofs.h>
struct erofs_qstr {
@ -87,19 +86,14 @@ static struct erofs_dirent *find_target_dirent(struct erofs_qstr *name,
return ERR_PTR(-ENOENT);
}
static void *find_target_block_classic(struct erofs_buf *target,
struct inode *dir,
struct erofs_qstr *name,
int *_ndirents)
static void *erofs_find_target_block(struct erofs_buf *target,
struct inode *dir, struct erofs_qstr *name, int *_ndirents)
{
unsigned int startprfx, endprfx;
int head, back;
unsigned int bsz = i_blocksize(dir);
int head = 0, back = erofs_iblks(dir) - 1;
unsigned int startprfx = 0, endprfx = 0;
void *candidate = ERR_PTR(-ENOENT);
startprfx = endprfx = 0;
head = 0;
back = erofs_inode_datablocks(dir) - 1;
while (head <= back) {
const int mid = head + (back - head) / 2;
struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
@ -107,8 +101,7 @@ static void *find_target_block_classic(struct erofs_buf *target,
de = erofs_bread(&buf, dir, mid, EROFS_KMAP);
if (!IS_ERR(de)) {
const int nameoff = nameoff_from_disk(de->nameoff,
EROFS_BLKSIZ);
const int nameoff = nameoff_from_disk(de->nameoff, bsz);
const int ndirents = nameoff / sizeof(*de);
int diff;
unsigned int matched;
@ -128,11 +121,10 @@ static void *find_target_block_classic(struct erofs_buf *target,
dname.name = (u8 *)de + nameoff;
if (ndirents == 1)
dname.end = (u8 *)de + EROFS_BLKSIZ;
dname.end = (u8 *)de + bsz;
else
dname.end = (u8 *)de +
nameoff_from_disk(de[1].nameoff,
EROFS_BLKSIZ);
nameoff_from_disk(de[1].nameoff, bsz);
/* string comparison without already matched prefix */
diff = erofs_dirnamecmp(name, &dname, &matched);
@ -180,13 +172,13 @@ int erofs_namei(struct inode *dir, const struct qstr *name, erofs_nid_t *nid,
qn.end = name->name + name->len;
ndirents = 0;
de = find_target_block_classic(&buf, dir, &qn, &ndirents);
de = erofs_find_target_block(&buf, dir, &qn, &ndirents);
if (IS_ERR(de))
return PTR_ERR(de);
if (ndirents)
de = find_target_dirent(&qn, (u8 *)de, EROFS_BLKSIZ, ndirents);
de = find_target_dirent(&qn, (u8 *)de, i_blocksize(dir),
ndirents);
if (!IS_ERR(de)) {
*nid = le64_to_cpu(de->nid);

View File

@ -53,18 +53,21 @@ void _erofs_info(struct super_block *sb, const char *function,
static int erofs_superblock_csum_verify(struct super_block *sb, void *sbdata)
{
size_t len = 1 << EROFS_SB(sb)->blkszbits;
struct erofs_super_block *dsb;
u32 expected_crc, crc;
dsb = kmemdup(sbdata + EROFS_SUPER_OFFSET,
EROFS_BLKSIZ - EROFS_SUPER_OFFSET, GFP_KERNEL);
if (len > EROFS_SUPER_OFFSET)
len -= EROFS_SUPER_OFFSET;
dsb = kmemdup(sbdata + EROFS_SUPER_OFFSET, len, GFP_KERNEL);
if (!dsb)
return -ENOMEM;
expected_crc = le32_to_cpu(dsb->checksum);
dsb->checksum = 0;
/* to allow for x86 boot sectors and other oddities. */
crc = crc32c(~0, dsb, EROFS_BLKSIZ - EROFS_SUPER_OFFSET);
crc = crc32c(~0, dsb, len);
kfree(dsb);
if (crc != expected_crc) {
@ -133,11 +136,11 @@ static void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf,
int len, i, cnt;
*offset = round_up(*offset, 4);
ptr = erofs_read_metabuf(buf, sb, erofs_blknr(*offset), EROFS_KMAP);
ptr = erofs_read_metabuf(buf, sb, erofs_blknr(sb, *offset), EROFS_KMAP);
if (IS_ERR(ptr))
return ptr;
len = le16_to_cpu(*(__le16 *)&ptr[erofs_blkoff(*offset)]);
len = le16_to_cpu(*(__le16 *)&ptr[erofs_blkoff(sb, *offset)]);
if (!len)
len = U16_MAX + 1;
buffer = kmalloc(len, GFP_KERNEL);
@ -147,14 +150,15 @@ static void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf,
*lengthp = len;
for (i = 0; i < len; i += cnt) {
cnt = min(EROFS_BLKSIZ - (int)erofs_blkoff(*offset), len - i);
ptr = erofs_read_metabuf(buf, sb, erofs_blknr(*offset),
cnt = min_t(int, sb->s_blocksize - erofs_blkoff(sb, *offset),
len - i);
ptr = erofs_read_metabuf(buf, sb, erofs_blknr(sb, *offset),
EROFS_KMAP);
if (IS_ERR(ptr)) {
kfree(buffer);
return ptr;
}
memcpy(buffer + i, ptr + erofs_blkoff(*offset), cnt);
memcpy(buffer + i, ptr + erofs_blkoff(sb, *offset), cnt);
*offset += cnt;
}
return buffer;
@ -229,10 +233,10 @@ static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb,
struct block_device *bdev;
void *ptr;
ptr = erofs_read_metabuf(buf, sb, erofs_blknr(*pos), EROFS_KMAP);
ptr = erofs_read_metabuf(buf, sb, erofs_blknr(sb, *pos), EROFS_KMAP);
if (IS_ERR(ptr))
return PTR_ERR(ptr);
dis = ptr + erofs_blkoff(*pos);
dis = ptr + erofs_blkoff(sb, *pos);
if (!dif->path) {
if (!dis->tag[0]) {
@ -330,7 +334,6 @@ static int erofs_read_superblock(struct super_block *sb)
struct erofs_sb_info *sbi;
struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
struct erofs_super_block *dsb;
unsigned int blkszbits;
void *data;
int ret;
@ -349,6 +352,16 @@ static int erofs_read_superblock(struct super_block *sb)
goto out;
}
sbi->blkszbits = dsb->blkszbits;
if (sbi->blkszbits < 9 || sbi->blkszbits > PAGE_SHIFT) {
erofs_err(sb, "blkszbits %u isn't supported", sbi->blkszbits);
goto out;
}
if (dsb->dirblkbits) {
erofs_err(sb, "dirblkbits %u isn't supported", dsb->dirblkbits);
goto out;
}
sbi->feature_compat = le32_to_cpu(dsb->feature_compat);
if (erofs_sb_has_sb_chksum(sbi)) {
ret = erofs_superblock_csum_verify(sb, data);
@ -357,19 +370,11 @@ static int erofs_read_superblock(struct super_block *sb)
}
ret = -EINVAL;
blkszbits = dsb->blkszbits;
/* 9(512 bytes) + LOG_SECTORS_PER_BLOCK == LOG_BLOCK_SIZE */
if (blkszbits != LOG_BLOCK_SIZE) {
erofs_err(sb, "blkszbits %u isn't supported on this platform",
blkszbits);
goto out;
}
if (!check_layout_compatibility(sb, dsb))
goto out;
sbi->sb_size = 128 + dsb->sb_extslots * EROFS_SB_EXTSLOT_SIZE;
if (sbi->sb_size > EROFS_BLKSIZ) {
if (sbi->sb_size > PAGE_SIZE - EROFS_SUPER_OFFSET) {
erofs_err(sb, "invalid sb_extslots %u (more than a fs block)",
sbi->sb_size);
goto out;
@ -724,9 +729,10 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
sbi->domain_id = ctx->domain_id;
ctx->domain_id = NULL;
sbi->blkszbits = PAGE_SHIFT;
if (erofs_is_fscache_mode(sb)) {
sb->s_blocksize = EROFS_BLKSIZ;
sb->s_blocksize_bits = LOG_BLOCK_SIZE;
sb->s_blocksize = PAGE_SIZE;
sb->s_blocksize_bits = PAGE_SHIFT;
err = erofs_fscache_register_fs(sb);
if (err)
@ -736,8 +742,8 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
if (err)
return err;
} else {
if (!sb_set_blocksize(sb, EROFS_BLKSIZ)) {
erofs_err(sb, "failed to set erofs blksize");
if (!sb_set_blocksize(sb, PAGE_SIZE)) {
errorfc(fc, "failed to set initial blksize");
return -EINVAL;
}
@ -750,12 +756,24 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
if (err)
return err;
if (test_opt(&sbi->opt, DAX_ALWAYS)) {
BUILD_BUG_ON(EROFS_BLKSIZ != PAGE_SIZE);
if (sb->s_blocksize_bits != sbi->blkszbits) {
if (erofs_is_fscache_mode(sb)) {
errorfc(fc, "unsupported blksize for fscache mode");
return -EINVAL;
}
if (!sb_set_blocksize(sb, 1 << sbi->blkszbits)) {
errorfc(fc, "failed to set erofs blksize");
return -EINVAL;
}
}
if (test_opt(&sbi->opt, DAX_ALWAYS)) {
if (!sbi->dax_dev) {
errorfc(fc, "DAX unsupported by block device. Turning off DAX.");
clear_opt(&sbi->opt, DAX_ALWAYS);
} else if (sbi->blkszbits != PAGE_SHIFT) {
errorfc(fc, "unsupported blocksize for DAX");
clear_opt(&sbi->opt, DAX_ALWAYS);
}
}
@ -1059,7 +1077,7 @@ static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf)
id = huge_encode_dev(sb->s_bdev->bd_dev);
buf->f_type = sb->s_magic;
buf->f_bsize = EROFS_BLKSIZ;
buf->f_bsize = sb->s_blocksize;
buf->f_blocks = sbi->total_blocks;
buf->f_bfree = buf->f_bavail = 0;

View File

@ -22,8 +22,7 @@ static int init_inode_xattrs(struct inode *inode)
struct xattr_iter it;
unsigned int i;
struct erofs_xattr_ibody_header *ih;
struct super_block *sb;
struct erofs_sb_info *sbi;
struct super_block *sb = inode->i_sb;
int ret = 0;
/* the most case is that xattrs of this inode are initialized. */
@ -52,15 +51,14 @@ static int init_inode_xattrs(struct inode *inode)
* undefined right now (maybe use later with some new sb feature).
*/
if (vi->xattr_isize == sizeof(struct erofs_xattr_ibody_header)) {
erofs_err(inode->i_sb,
erofs_err(sb,
"xattr_isize %d of nid %llu is not supported yet",
vi->xattr_isize, vi->nid);
ret = -EOPNOTSUPP;
goto out_unlock;
} else if (vi->xattr_isize < sizeof(struct erofs_xattr_ibody_header)) {
if (vi->xattr_isize) {
erofs_err(inode->i_sb,
"bogus xattr ibody @ nid %llu", vi->nid);
erofs_err(sb, "bogus xattr ibody @ nid %llu", vi->nid);
DBG_BUGON(1);
ret = -EFSCORRUPTED;
goto out_unlock; /* xattr ondisk layout error */
@ -69,11 +67,9 @@ static int init_inode_xattrs(struct inode *inode)
goto out_unlock;
}
sb = inode->i_sb;
sbi = EROFS_SB(sb);
it.buf = __EROFS_BUF_INITIALIZER;
it.blkaddr = erofs_blknr(iloc(sbi, vi->nid) + vi->inode_isize);
it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize);
it.blkaddr = erofs_blknr(sb, erofs_iloc(inode) + vi->inode_isize);
it.ofs = erofs_blkoff(sb, erofs_iloc(inode) + vi->inode_isize);
/* read in shared xattr array (non-atomic, see kmalloc below) */
it.kaddr = erofs_read_metabuf(&it.buf, sb, it.blkaddr, EROFS_KMAP);
@ -96,9 +92,9 @@ static int init_inode_xattrs(struct inode *inode)
it.ofs += sizeof(struct erofs_xattr_ibody_header);
for (i = 0; i < vi->xattr_shared_count; ++i) {
if (it.ofs >= EROFS_BLKSIZ) {
if (it.ofs >= sb->s_blocksize) {
/* cannot be unaligned */
DBG_BUGON(it.ofs != EROFS_BLKSIZ);
DBG_BUGON(it.ofs != sb->s_blocksize);
it.kaddr = erofs_read_metabuf(&it.buf, sb, ++it.blkaddr,
EROFS_KMAP);
@ -143,15 +139,15 @@ struct xattr_iter_handlers {
static inline int xattr_iter_fixup(struct xattr_iter *it)
{
if (it->ofs < EROFS_BLKSIZ)
if (it->ofs < it->sb->s_blocksize)
return 0;
it->blkaddr += erofs_blknr(it->ofs);
it->blkaddr += erofs_blknr(it->sb, it->ofs);
it->kaddr = erofs_read_metabuf(&it->buf, it->sb, it->blkaddr,
EROFS_KMAP_ATOMIC);
if (IS_ERR(it->kaddr))
return PTR_ERR(it->kaddr);
it->ofs = erofs_blkoff(it->ofs);
it->ofs = erofs_blkoff(it->sb, it->ofs);
return 0;
}
@ -159,7 +155,6 @@ static int inline_xattr_iter_begin(struct xattr_iter *it,
struct inode *inode)
{
struct erofs_inode *const vi = EROFS_I(inode);
struct erofs_sb_info *const sbi = EROFS_SB(inode->i_sb);
unsigned int xattr_header_sz, inline_xattr_ofs;
xattr_header_sz = inlinexattr_header_size(inode);
@ -170,9 +165,8 @@ static int inline_xattr_iter_begin(struct xattr_iter *it,
inline_xattr_ofs = vi->inode_isize + xattr_header_sz;
it->blkaddr = erofs_blknr(iloc(sbi, vi->nid) + inline_xattr_ofs);
it->ofs = erofs_blkoff(iloc(sbi, vi->nid) + inline_xattr_ofs);
it->blkaddr = erofs_blknr(it->sb, erofs_iloc(inode) + inline_xattr_ofs);
it->ofs = erofs_blkoff(it->sb, erofs_iloc(inode) + inline_xattr_ofs);
it->kaddr = erofs_read_metabuf(&it->buf, inode->i_sb, it->blkaddr,
EROFS_KMAP_ATOMIC);
if (IS_ERR(it->kaddr))
@ -228,8 +222,8 @@ static int xattr_foreach(struct xattr_iter *it,
processed = 0;
while (processed < entry.e_name_len) {
if (it->ofs >= EROFS_BLKSIZ) {
DBG_BUGON(it->ofs > EROFS_BLKSIZ);
if (it->ofs >= it->sb->s_blocksize) {
DBG_BUGON(it->ofs > it->sb->s_blocksize);
err = xattr_iter_fixup(it);
if (err)
@ -237,7 +231,7 @@ static int xattr_foreach(struct xattr_iter *it,
it->ofs = 0;
}
slice = min_t(unsigned int, EROFS_BLKSIZ - it->ofs,
slice = min_t(unsigned int, it->sb->s_blocksize - it->ofs,
entry.e_name_len - processed);
/* handle name */
@ -263,8 +257,8 @@ static int xattr_foreach(struct xattr_iter *it,
}
while (processed < value_sz) {
if (it->ofs >= EROFS_BLKSIZ) {
DBG_BUGON(it->ofs > EROFS_BLKSIZ);
if (it->ofs >= it->sb->s_blocksize) {
DBG_BUGON(it->ofs > it->sb->s_blocksize);
err = xattr_iter_fixup(it);
if (err)
@ -272,7 +266,7 @@ static int xattr_foreach(struct xattr_iter *it,
it->ofs = 0;
}
slice = min_t(unsigned int, EROFS_BLKSIZ - it->ofs,
slice = min_t(unsigned int, it->sb->s_blocksize - it->ofs,
value_sz - processed);
op->value(it, processed, it->kaddr + it->ofs, slice);
it->ofs += slice;
@ -358,15 +352,14 @@ static int shared_getxattr(struct inode *inode, struct getxattr_iter *it)
{
struct erofs_inode *const vi = EROFS_I(inode);
struct super_block *const sb = inode->i_sb;
struct erofs_sb_info *const sbi = EROFS_SB(sb);
unsigned int i;
int ret = -ENOATTR;
for (i = 0; i < vi->xattr_shared_count; ++i) {
erofs_blk_t blkaddr =
xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]);
xattrblock_addr(sb, vi->xattr_shared_xattrs[i]);
it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]);
it->it.ofs = xattrblock_offset(sb, vi->xattr_shared_xattrs[i]);
it->it.kaddr = erofs_read_metabuf(&it->it.buf, sb, blkaddr,
EROFS_KMAP_ATOMIC);
if (IS_ERR(it->it.kaddr))
@ -570,15 +563,14 @@ static int shared_listxattr(struct listxattr_iter *it)
struct inode *const inode = d_inode(it->dentry);
struct erofs_inode *const vi = EROFS_I(inode);
struct super_block *const sb = inode->i_sb;
struct erofs_sb_info *const sbi = EROFS_SB(sb);
unsigned int i;
int ret = 0;
for (i = 0; i < vi->xattr_shared_count; ++i) {
erofs_blk_t blkaddr =
xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]);
xattrblock_addr(sb, vi->xattr_shared_xattrs[i]);
it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]);
it->it.ofs = xattrblock_offset(sb, vi->xattr_shared_xattrs[i]);
it->it.kaddr = erofs_read_metabuf(&it->it.buf, sb, blkaddr,
EROFS_KMAP_ATOMIC);
if (IS_ERR(it->it.kaddr))

View File

@ -19,21 +19,21 @@ static inline unsigned int inlinexattr_header_size(struct inode *inode)
sizeof(u32) * EROFS_I(inode)->xattr_shared_count;
}
static inline erofs_blk_t xattrblock_addr(struct erofs_sb_info *sbi,
static inline erofs_blk_t xattrblock_addr(struct super_block *sb,
unsigned int xattr_id)
{
#ifdef CONFIG_EROFS_FS_XATTR
return sbi->xattr_blkaddr +
xattr_id * sizeof(__u32) / EROFS_BLKSIZ;
return EROFS_SB(sb)->xattr_blkaddr +
xattr_id * sizeof(__u32) / sb->s_blocksize;
#else
return 0;
#endif
}
static inline unsigned int xattrblock_offset(struct erofs_sb_info *sbi,
static inline unsigned int xattrblock_offset(struct super_block *sb,
unsigned int xattr_id)
{
return (xattr_id * sizeof(__u32)) % EROFS_BLKSIZ;
return (xattr_id * sizeof(__u32)) % sb->s_blocksize;
}
#ifdef CONFIG_EROFS_FS_XATTR

View File

@ -618,7 +618,7 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
if (ztailpacking) {
pcl->obj.index = 0; /* which indicates ztailpacking */
pcl->pageofs_in = erofs_blkoff(map->m_pa);
pcl->pageofs_in = erofs_blkoff(fe->inode->i_sb, map->m_pa);
pcl->tailpacking_size = map->m_plen;
} else {
pcl->obj.index = map->m_pa >> PAGE_SHIFT;
@ -747,6 +747,7 @@ static int z_erofs_read_fragment(struct inode *inode, erofs_off_t pos,
struct page *page, unsigned int pageofs,
unsigned int len)
{
struct super_block *sb = inode->i_sb;
struct inode *packed_inode = EROFS_I_SB(inode)->packed_inode;
struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
u8 *src, *dst;
@ -758,16 +759,16 @@ static int z_erofs_read_fragment(struct inode *inode, erofs_off_t pos,
pos += EROFS_I(inode)->z_fragmentoff;
for (i = 0; i < len; i += cnt) {
cnt = min_t(unsigned int, len - i,
EROFS_BLKSIZ - erofs_blkoff(pos));
sb->s_blocksize - erofs_blkoff(sb, pos));
src = erofs_bread(&buf, packed_inode,
erofs_blknr(pos), EROFS_KMAP);
erofs_blknr(sb, pos), EROFS_KMAP);
if (IS_ERR(src)) {
erofs_put_metabuf(&buf);
return PTR_ERR(src);
}
dst = kmap_local_page(page);
memcpy(dst + pageofs + i, src + erofs_blkoff(pos), cnt);
memcpy(dst + pageofs + i, src + erofs_blkoff(sb, pos), cnt);
kunmap_local(dst);
pos += cnt;
}
@ -825,7 +826,8 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
void *mp;
mp = erofs_read_metabuf(&fe->map.buf, inode->i_sb,
erofs_blknr(map->m_pa), EROFS_NO_KMAP);
erofs_blknr(inode->i_sb, map->m_pa),
EROFS_NO_KMAP);
if (IS_ERR(mp)) {
err = PTR_ERR(mp);
erofs_err(inode->i_sb,
@ -1555,11 +1557,11 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
/* no device id here, thus it will always succeed */
mdev = (struct erofs_map_dev) {
.m_pa = blknr_to_addr(pcl->obj.index),
.m_pa = erofs_pos(sb, pcl->obj.index),
};
(void)erofs_map_dev(sb, &mdev);
cur = erofs_blknr(mdev.m_pa);
cur = erofs_blknr(sb, mdev.m_pa);
end = cur + pcl->pclusterpages;
do {
@ -1593,7 +1595,7 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
last_bdev = mdev.m_bdev;
bio->bi_iter.bi_sector = (sector_t)cur <<
LOG_SECTORS_PER_BLOCK;
(sb->s_blocksize_bits - 9);
bio->bi_private = bi_private;
if (f->readahead)
bio->bi_opf |= REQ_RAHEAD;

View File

@ -7,10 +7,6 @@
#include <asm/unaligned.h>
#include <trace/events/erofs.h>
static int z_erofs_do_map_blocks(struct inode *inode,
struct erofs_map_blocks *map,
int flags);
int z_erofs_fill_inode(struct inode *inode)
{
struct erofs_inode *const vi = EROFS_I(inode);
@ -22,133 +18,13 @@ int z_erofs_fill_inode(struct inode *inode)
vi->z_advise = 0;
vi->z_algorithmtype[0] = 0;
vi->z_algorithmtype[1] = 0;
vi->z_logical_clusterbits = LOG_BLOCK_SIZE;
vi->z_logical_clusterbits = inode->i_sb->s_blocksize_bits;
set_bit(EROFS_I_Z_INITED_BIT, &vi->flags);
}
inode->i_mapping->a_ops = &z_erofs_aops;
return 0;
}
static int z_erofs_fill_inode_lazy(struct inode *inode)
{
struct erofs_inode *const vi = EROFS_I(inode);
struct super_block *const sb = inode->i_sb;
int err, headnr;
erofs_off_t pos;
struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
void *kaddr;
struct z_erofs_map_header *h;
if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) {
/*
* paired with smp_mb() at the end of the function to ensure
* fields will only be observed after the bit is set.
*/
smp_mb();
return 0;
}
if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_Z_BIT, TASK_KILLABLE))
return -ERESTARTSYS;
err = 0;
if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags))
goto out_unlock;
pos = ALIGN(iloc(EROFS_SB(sb), vi->nid) + vi->inode_isize +
vi->xattr_isize, 8);
kaddr = erofs_read_metabuf(&buf, sb, erofs_blknr(pos), EROFS_KMAP);
if (IS_ERR(kaddr)) {
err = PTR_ERR(kaddr);
goto out_unlock;
}
h = kaddr + erofs_blkoff(pos);
/*
* if the highest bit of the 8-byte map header is set, the whole file
* is stored in the packed inode. The rest bits keeps z_fragmentoff.
*/
if (h->h_clusterbits >> Z_EROFS_FRAGMENT_INODE_BIT) {
vi->z_advise = Z_EROFS_ADVISE_FRAGMENT_PCLUSTER;
vi->z_fragmentoff = le64_to_cpu(*(__le64 *)h) ^ (1ULL << 63);
vi->z_tailextent_headlcn = 0;
goto done;
}
vi->z_advise = le16_to_cpu(h->h_advise);
vi->z_algorithmtype[0] = h->h_algorithmtype & 15;
vi->z_algorithmtype[1] = h->h_algorithmtype >> 4;
headnr = 0;
if (vi->z_algorithmtype[0] >= Z_EROFS_COMPRESSION_MAX ||
vi->z_algorithmtype[++headnr] >= Z_EROFS_COMPRESSION_MAX) {
erofs_err(sb, "unknown HEAD%u format %u for nid %llu, please upgrade kernel",
headnr + 1, vi->z_algorithmtype[headnr], vi->nid);
err = -EOPNOTSUPP;
goto out_put_metabuf;
}
vi->z_logical_clusterbits = LOG_BLOCK_SIZE + (h->h_clusterbits & 7);
if (!erofs_sb_has_big_pcluster(EROFS_SB(sb)) &&
vi->z_advise & (Z_EROFS_ADVISE_BIG_PCLUSTER_1 |
Z_EROFS_ADVISE_BIG_PCLUSTER_2)) {
erofs_err(sb, "per-inode big pcluster without sb feature for nid %llu",
vi->nid);
err = -EFSCORRUPTED;
goto out_put_metabuf;
}
if (vi->datalayout == EROFS_INODE_FLAT_COMPRESSION &&
!(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1) ^
!(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2)) {
erofs_err(sb, "big pcluster head1/2 of compact indexes should be consistent for nid %llu",
vi->nid);
err = -EFSCORRUPTED;
goto out_put_metabuf;
}
if (vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER) {
struct erofs_map_blocks map = {
.buf = __EROFS_BUF_INITIALIZER
};
vi->z_idata_size = le16_to_cpu(h->h_idata_size);
err = z_erofs_do_map_blocks(inode, &map,
EROFS_GET_BLOCKS_FINDTAIL);
erofs_put_metabuf(&map.buf);
if (!map.m_plen ||
erofs_blkoff(map.m_pa) + map.m_plen > EROFS_BLKSIZ) {
erofs_err(sb, "invalid tail-packing pclustersize %llu",
map.m_plen);
err = -EFSCORRUPTED;
}
if (err < 0)
goto out_put_metabuf;
}
if (vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER &&
!(h->h_clusterbits >> Z_EROFS_FRAGMENT_INODE_BIT)) {
struct erofs_map_blocks map = {
.buf = __EROFS_BUF_INITIALIZER
};
vi->z_fragmentoff = le32_to_cpu(h->h_fragmentoff);
err = z_erofs_do_map_blocks(inode, &map,
EROFS_GET_BLOCKS_FINDTAIL);
erofs_put_metabuf(&map.buf);
if (err < 0)
goto out_put_metabuf;
}
done:
/* paired with smp_mb() at the beginning of the function */
smp_mb();
set_bit(EROFS_I_Z_INITED_BIT, &vi->flags);
out_put_metabuf:
erofs_put_metabuf(&buf);
out_unlock:
clear_and_wake_up_bit(EROFS_I_BL_Z_BIT, &vi->flags);
return err;
}
struct z_erofs_maprecorder {
struct inode *inode;
struct erofs_map_blocks *map;
@ -169,22 +45,21 @@ static int legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m,
{
struct inode *const inode = m->inode;
struct erofs_inode *const vi = EROFS_I(inode);
const erofs_off_t ibase = iloc(EROFS_I_SB(inode), vi->nid);
const erofs_off_t pos =
Z_EROFS_VLE_LEGACY_INDEX_ALIGN(ibase + vi->inode_isize +
vi->xattr_isize) +
Z_EROFS_VLE_LEGACY_INDEX_ALIGN(erofs_iloc(inode) +
vi->inode_isize + vi->xattr_isize) +
lcn * sizeof(struct z_erofs_vle_decompressed_index);
struct z_erofs_vle_decompressed_index *di;
unsigned int advise, type;
m->kaddr = erofs_read_metabuf(&m->map->buf, inode->i_sb,
erofs_blknr(pos), EROFS_KMAP_ATOMIC);
erofs_blknr(inode->i_sb, pos), EROFS_KMAP_ATOMIC);
if (IS_ERR(m->kaddr))
return PTR_ERR(m->kaddr);
m->nextpackoff = pos + sizeof(struct z_erofs_vle_decompressed_index);
m->lcn = lcn;
di = m->kaddr + erofs_blkoff(pos);
di = m->kaddr + erofs_blkoff(inode->i_sb, pos);
advise = le16_to_cpu(di->di_advise);
type = (advise >> Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT) &
@ -285,7 +160,7 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
(vcnt << amortizedshift);
big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1;
encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt;
eofs = erofs_blkoff(pos);
eofs = erofs_blkoff(m->inode->i_sb, pos);
base = round_down(eofs, vcnt << amortizedshift);
in = m->kaddr + base;
@ -375,10 +250,9 @@ static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m,
{
struct inode *const inode = m->inode;
struct erofs_inode *const vi = EROFS_I(inode);
const erofs_off_t ebase = ALIGN(iloc(EROFS_I_SB(inode), vi->nid) +
vi->inode_isize + vi->xattr_isize, 8) +
sizeof(struct z_erofs_map_header);
const unsigned int totalidx = DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ);
const erofs_off_t ebase = sizeof(struct z_erofs_map_header) +
ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, 8);
unsigned int totalidx = erofs_iblks(inode);
unsigned int compacted_4b_initial, compacted_2b;
unsigned int amortizedshift;
erofs_off_t pos;
@ -416,7 +290,7 @@ static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m,
out:
pos += lcn * (1 << amortizedshift);
m->kaddr = erofs_read_metabuf(&m->map->buf, inode->i_sb,
erofs_blknr(pos), EROFS_KMAP_ATOMIC);
erofs_blknr(inode->i_sb, pos), EROFS_KMAP_ATOMIC);
if (IS_ERR(m->kaddr))
return PTR_ERR(m->kaddr);
return unpack_compacted_index(m, amortizedshift, pos, lookahead);
@ -486,6 +360,7 @@ static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m,
static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m,
unsigned int initial_lcn)
{
struct super_block *sb = m->inode->i_sb;
struct erofs_inode *const vi = EROFS_I(m->inode);
struct erofs_map_blocks *const map = m->map;
const unsigned int lclusterbits = vi->z_logical_clusterbits;
@ -532,7 +407,7 @@ static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m,
* if the 1st NONHEAD lcluster is actually PLAIN or HEAD type
* rather than CBLKCNT, it's a 1 lcluster-sized pcluster.
*/
m->compressedblks = 1 << (lclusterbits - LOG_BLOCK_SIZE);
m->compressedblks = 1 << (lclusterbits - sb->s_blocksize_bits);
break;
case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
if (m->delta[0] != 1)
@ -548,7 +423,7 @@ static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m,
return -EFSCORRUPTED;
}
out:
map->m_plen = (u64)m->compressedblks << LOG_BLOCK_SIZE;
map->m_plen = erofs_pos(sb, m->compressedblks);
return 0;
err_bonus_cblkcnt:
erofs_err(m->inode->i_sb,
@ -691,7 +566,7 @@ static int z_erofs_do_map_blocks(struct inode *inode,
} else if (fragment && m.lcn == vi->z_tailextent_headlcn) {
map->m_flags |= EROFS_MAP_FRAGMENT;
} else {
map->m_pa = blknr_to_addr(m.pblk);
map->m_pa = erofs_pos(inode->i_sb, m.pblk);
err = z_erofs_get_extent_compressedlen(&m, initial_lcn);
if (err)
goto unmap_out;
@ -718,7 +593,7 @@ static int z_erofs_do_map_blocks(struct inode *inode,
if ((flags & EROFS_GET_BLOCKS_FIEMAP) ||
((flags & EROFS_GET_BLOCKS_READMORE) &&
map->m_algorithmformat == Z_EROFS_COMPRESSION_LZMA &&
map->m_llen >= EROFS_BLKSIZ)) {
map->m_llen >= i_blocksize(inode))) {
err = z_erofs_get_extent_decompressedlen(&m);
if (!err)
map->m_flags |= EROFS_MAP_FULL_MAPPED;
@ -732,6 +607,125 @@ static int z_erofs_do_map_blocks(struct inode *inode,
return err;
}
static int z_erofs_fill_inode_lazy(struct inode *inode)
{
struct erofs_inode *const vi = EROFS_I(inode);
struct super_block *const sb = inode->i_sb;
int err, headnr;
erofs_off_t pos;
struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
void *kaddr;
struct z_erofs_map_header *h;
if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) {
/*
* paired with smp_mb() at the end of the function to ensure
* fields will only be observed after the bit is set.
*/
smp_mb();
return 0;
}
if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_Z_BIT, TASK_KILLABLE))
return -ERESTARTSYS;
err = 0;
if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags))
goto out_unlock;
pos = ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, 8);
kaddr = erofs_read_metabuf(&buf, sb, erofs_blknr(sb, pos), EROFS_KMAP);
if (IS_ERR(kaddr)) {
err = PTR_ERR(kaddr);
goto out_unlock;
}
h = kaddr + erofs_blkoff(sb, pos);
/*
* if the highest bit of the 8-byte map header is set, the whole file
* is stored in the packed inode. The rest bits keeps z_fragmentoff.
*/
if (h->h_clusterbits >> Z_EROFS_FRAGMENT_INODE_BIT) {
vi->z_advise = Z_EROFS_ADVISE_FRAGMENT_PCLUSTER;
vi->z_fragmentoff = le64_to_cpu(*(__le64 *)h) ^ (1ULL << 63);
vi->z_tailextent_headlcn = 0;
goto done;
}
vi->z_advise = le16_to_cpu(h->h_advise);
vi->z_algorithmtype[0] = h->h_algorithmtype & 15;
vi->z_algorithmtype[1] = h->h_algorithmtype >> 4;
headnr = 0;
if (vi->z_algorithmtype[0] >= Z_EROFS_COMPRESSION_MAX ||
vi->z_algorithmtype[++headnr] >= Z_EROFS_COMPRESSION_MAX) {
erofs_err(sb, "unknown HEAD%u format %u for nid %llu, please upgrade kernel",
headnr + 1, vi->z_algorithmtype[headnr], vi->nid);
err = -EOPNOTSUPP;
goto out_put_metabuf;
}
vi->z_logical_clusterbits = sb->s_blocksize_bits + (h->h_clusterbits & 7);
if (!erofs_sb_has_big_pcluster(EROFS_SB(sb)) &&
vi->z_advise & (Z_EROFS_ADVISE_BIG_PCLUSTER_1 |
Z_EROFS_ADVISE_BIG_PCLUSTER_2)) {
erofs_err(sb, "per-inode big pcluster without sb feature for nid %llu",
vi->nid);
err = -EFSCORRUPTED;
goto out_put_metabuf;
}
if (vi->datalayout == EROFS_INODE_FLAT_COMPRESSION &&
!(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1) ^
!(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2)) {
erofs_err(sb, "big pcluster head1/2 of compact indexes should be consistent for nid %llu",
vi->nid);
err = -EFSCORRUPTED;
goto out_put_metabuf;
}
if (vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER) {
struct erofs_map_blocks map = {
.buf = __EROFS_BUF_INITIALIZER
};
vi->z_idata_size = le16_to_cpu(h->h_idata_size);
err = z_erofs_do_map_blocks(inode, &map,
EROFS_GET_BLOCKS_FINDTAIL);
erofs_put_metabuf(&map.buf);
if (!map.m_plen ||
erofs_blkoff(sb, map.m_pa) + map.m_plen > sb->s_blocksize) {
erofs_err(sb, "invalid tail-packing pclustersize %llu",
map.m_plen);
err = -EFSCORRUPTED;
}
if (err < 0)
goto out_put_metabuf;
}
if (vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER &&
!(h->h_clusterbits >> Z_EROFS_FRAGMENT_INODE_BIT)) {
struct erofs_map_blocks map = {
.buf = __EROFS_BUF_INITIALIZER
};
vi->z_fragmentoff = le32_to_cpu(h->h_fragmentoff);
err = z_erofs_do_map_blocks(inode, &map,
EROFS_GET_BLOCKS_FINDTAIL);
erofs_put_metabuf(&map.buf);
if (err < 0)
goto out_put_metabuf;
}
done:
/* paired with smp_mb() at the beginning of the function */
smp_mb();
set_bit(EROFS_I_Z_INITED_BIT, &vi->flags);
out_put_metabuf:
erofs_put_metabuf(&buf);
out_unlock:
clear_and_wake_up_bit(EROFS_I_BL_Z_BIT, &vi->flags);
return err;
}
int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
int flags)
{

View File

@ -134,6 +134,7 @@ extern struct device_driver *driver_find(const char *name,
struct bus_type *bus);
extern int driver_probe_done(void);
extern void wait_for_device_probe(void);
extern void flush_deferred_probe_now(void);
void __init wait_for_init_devices_probe(void);
/* sysfs interface for exporting driver attributes */

View File

@ -38,6 +38,8 @@ struct of_pci_range {
/* Translate a DMA address from device space to CPU space */
extern u64 of_translate_dma_address(struct device_node *dev,
const __be32 *in_addr);
extern const __be32 *of_translate_dma_region(struct device_node *dev, const __be32 *addr,
phys_addr_t *start, size_t *length);
#ifdef CONFIG_OF_ADDRESS
extern u64 of_translate_address(struct device_node *np, const __be32 *addr);

View File

@ -12,6 +12,9 @@ extern const struct iommu_ops *of_iommu_configure(struct device *dev,
struct device_node *master_np,
const u32 *id);
extern void of_iommu_get_resv_regions(struct device *dev,
struct list_head *list);
#else
static inline const struct iommu_ops *of_iommu_configure(struct device *dev,
@ -21,6 +24,11 @@ static inline const struct iommu_ops *of_iommu_configure(struct device *dev,
return NULL;
}
static inline void of_iommu_get_resv_regions(struct device *dev,
struct list_head *list)
{
}
#endif /* CONFIG_OF_IOMMU */
#endif /* __OF_IOMMU_H */

View File

@ -66,8 +66,8 @@ TRACE_EVENT(erofs_fill_inode,
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->nid = EROFS_I(inode)->nid;
__entry->blkaddr = erofs_blknr(iloc(EROFS_I_SB(inode), __entry->nid));
__entry->ofs = erofs_blkoff(iloc(EROFS_I_SB(inode), __entry->nid));
__entry->blkaddr = erofs_blknr(inode->i_sb, erofs_iloc(inode));
__entry->ofs = erofs_blkoff(inode->i_sb, erofs_iloc(inode));
),
TP_printk("dev = (%d,%d), nid = %llu, blkaddr %u ofs %u",

View File

@ -11,7 +11,11 @@
* mechanism for vendor modules to hook and extend functionality
*/
struct binder_transaction;
struct binder_transaction_data;
struct task_struct;
struct binder_work;
struct binder_buffer;
DECLARE_HOOK(android_vh_binder_transaction_init,
TP_PROTO(struct binder_transaction *t),
TP_ARGS(t));
@ -93,6 +97,26 @@ DECLARE_HOOK(android_vh_binder_new_ref,
DECLARE_HOOK(android_vh_binder_del_ref,
TP_PROTO(struct task_struct *proc, uint32_t ref_desc),
TP_ARGS(proc, ref_desc));
DECLARE_HOOK(android_vh_alloc_oem_binder_struct,
TP_PROTO(struct binder_transaction_data *tr, struct binder_transaction *t,
struct binder_proc *proc),
TP_ARGS(tr, t, proc));
DECLARE_HOOK(android_vh_binder_transaction_received,
TP_PROTO(struct binder_transaction *t, struct binder_proc *proc,
struct binder_thread *thread, uint32_t cmd),
TP_ARGS(t, proc, thread, cmd));
DECLARE_HOOK(android_vh_free_oem_binder_struct,
TP_PROTO(struct binder_transaction *t),
TP_ARGS(t));
DECLARE_HOOK(android_vh_binder_special_task,
TP_PROTO(struct binder_transaction *t, struct binder_proc *proc,
struct binder_thread *thread, struct binder_work *w,
struct list_head *head, bool sync, bool *special_task),
TP_ARGS(t, proc, thread, w, head, sync, special_task));
DECLARE_HOOK(android_vh_binder_free_buf,
TP_PROTO(struct binder_proc *proc, struct binder_thread *thread,
struct binder_buffer *buffer),
TP_ARGS(proc, thread, buffer));
#endif /* _TRACE_HOOK_BINDER_H */
/* This part must be outside protection */

View File

@ -0,0 +1,20 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM compaction
#define TRACE_INCLUDE_PATH trace/hooks
#if !defined(_TRACE_HOOK_COMPACTION_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_HOOK_COMPACTION_H
#include <trace/hooks/vendor_hooks.h>
DECLARE_HOOK(android_vh_compaction_exit,
TP_PROTO(int node_id, int order, const int highest_zoneidx),
TP_ARGS(node_id, order, highest_zoneidx));
enum compact_result;
DECLARE_HOOK(android_vh_compaction_try_to_compact_pages_exit,
TP_PROTO(enum compact_result *compact_result),
TP_ARGS(compact_result));
#endif /* _TRACE_HOOK_COMPACTION_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@ -84,6 +84,9 @@ DECLARE_HOOK(android_vh_record_pcpu_rwsem_starttime,
DECLARE_HOOK(android_vh_record_pcpu_rwsem_time_early,
TP_PROTO(unsigned long settime_jiffies, struct percpu_rw_semaphore *sem),
TP_ARGS(settime_jiffies, sem));
DECLARE_HOOK(android_vh_percpu_rwsem_wq_add,
TP_PROTO(struct percpu_rw_semaphore *sem, bool reader),
TP_ARGS(sem, reader));
struct mutex_waiter;
DECLARE_HOOK(android_vh_alter_mutex_list_add,

View File

@ -15,6 +15,7 @@ DECLARE_RESTRICTED_HOOK(android_rvh_iommu_setup_dma_ops,
struct iova_domain;
struct iova;
struct iommu_device;
DECLARE_RESTRICTED_HOOK(android_rvh_iommu_alloc_insert_iova,
TP_PROTO(struct iova_domain *iovad, unsigned long size,
@ -40,6 +41,10 @@ DECLARE_RESTRICTED_HOOK(android_rvh_iommu_limit_align_shift,
unsigned long *shift),
TP_ARGS(iovad, size, shift), 1);
DECLARE_HOOK(android_vh_bus_iommu_probe,
TP_PROTO(struct iommu_device *iommu, struct bus_type *bus, bool *skip),
TP_ARGS(iommu, bus, skip));
#endif /* _TRACE_HOOK_IOMMU_H */
/* This part must be outside protection */

View File

@ -12,6 +12,7 @@
struct shmem_inode_info;
struct folio;
struct page_vma_mapped_walk;
struct compact_control;
DECLARE_RESTRICTED_HOOK(android_rvh_shmem_get_folio,
TP_PROTO(struct shmem_inode_info *info, struct folio **folio),
@ -106,6 +107,12 @@ DECLARE_HOOK(android_vh_madvise_cold_pageout_skip,
TP_PROTO(struct vm_area_struct *vma, struct page *page, bool pageout, bool *need_skip),
TP_ARGS(vma, page, pageout, need_skip));
DECLARE_HOOK(android_vh_mm_compaction_begin,
TP_PROTO(struct compact_control *cc, long *vendor_ret),
TP_ARGS(cc, vendor_ret));
DECLARE_HOOK(android_vh_mm_compaction_end,
TP_PROTO(struct compact_control *cc, long vendor_ret),
TP_ARGS(cc, vendor_ret));
struct mem_cgroup;
DECLARE_HOOK(android_vh_mem_cgroup_alloc,
TP_PROTO(struct mem_cgroup *memcg),
@ -141,6 +148,16 @@ DECLARE_HOOK(android_vh_look_around,
struct vm_area_struct *vma, int *referenced),
TP_ARGS(pvmw, folio, vma, referenced));
DECLARE_HOOK(android_vh_mm_alloc_pages_direct_reclaim_enter,
TP_PROTO(unsigned int order),
TP_ARGS(order));
DECLARE_HOOK(android_vh_mm_alloc_pages_direct_reclaim_exit,
TP_PROTO(unsigned long did_some_progress, int retry_times),
TP_ARGS(did_some_progress, retry_times));
struct oom_control;
DECLARE_HOOK(android_vh_mm_alloc_pages_may_oom_exit,
TP_PROTO(struct oom_control *oc, unsigned long did_some_progress),
TP_ARGS(oc, did_some_progress));
#endif /* _TRACE_HOOK_MM_H */
/* This part must be outside protection */

View File

@ -49,6 +49,10 @@ DECLARE_HOOK(android_vh_tune_swappiness,
DECLARE_HOOK(android_vh_scan_abort_check_wmarks,
TP_PROTO(bool *check_wmarks),
TP_ARGS(check_wmarks));
DECLARE_HOOK(android_vh_vmscan_kswapd_done,
TP_PROTO(int node_id, unsigned int highest_zoneidx, unsigned int alloc_order,
unsigned int reclaim_order),
TP_ARGS(node_id, highest_zoneidx, alloc_order, reclaim_order));
#endif /* _TRACE_HOOK_VMSCAN_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@ -181,6 +181,7 @@ static void percpu_rwsem_wait(struct percpu_rw_semaphore *sem, bool reader)
if (wait) {
wq_entry.flags |= WQ_FLAG_EXCLUSIVE | reader * WQ_FLAG_CUSTOM;
__add_wait_queue_entry_tail(&sem->waiters, &wq_entry);
trace_android_vh_percpu_rwsem_wq_add(sem, reader);
}
spin_unlock_irq(&sem->waiters.lock);

View File

@ -2246,6 +2246,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
!cpumask_test_cpu(later_rq->cpu, &task->cpus_mask) ||
task_on_cpu(rq, task) ||
!dl_task(task) ||
is_migration_disabled(task) ||
!task_on_rq_queued(task))) {
double_unlock_balance(rq, later_rq);
later_rq = NULL;

View File

@ -2098,11 +2098,15 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
* the mean time, task could have
* migrated already or had its affinity changed.
* Also make sure that it wasn't scheduled on its rq.
* It is possible the task was scheduled, set
* "migrate_disabled" and then got preempted, so we must
* check the task migration disable flag here too.
*/
if (unlikely(task_rq(task) != rq ||
!cpumask_test_cpu(lowest_rq->cpu, &task->cpus_mask) ||
task_on_cpu(rq, task) ||
!rt_task(task) ||
is_migration_disabled(task) ||
!task_on_rq_queued(task))) {
double_unlock_balance(rq, lowest_rq);

View File

@ -49,6 +49,9 @@ static inline void count_compact_events(enum vm_event_item item, long delta)
#define CREATE_TRACE_POINTS
#include <trace/events/compaction.h>
#undef CREATE_TRACE_POINTS
#include <trace/hooks/compaction.h>
#include <trace/hooks/mm.h>
#define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order))
#define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order))
@ -2321,6 +2324,7 @@ compact_zone(struct compact_control *cc, struct capture_control *capc)
const bool sync = cc->mode != MIGRATE_ASYNC;
bool update_cached;
unsigned int nr_succeeded = 0;
long vendor_ret;
/*
* These counters track activities during zone compaction. Initialize
@ -2391,6 +2395,7 @@ compact_zone(struct compact_control *cc, struct capture_control *capc)
cc->zone->compact_cached_migrate_pfn[0] == cc->zone->compact_cached_migrate_pfn[1];
trace_mm_compaction_begin(cc, start_pfn, end_pfn, sync);
trace_android_vh_mm_compaction_begin(cc, &vendor_ret);
/* lru_add_drain_all could be expensive with involving other CPUs */
lru_add_drain();
@ -2516,6 +2521,7 @@ compact_zone(struct compact_control *cc, struct capture_control *capc)
count_compact_events(COMPACTMIGRATE_SCANNED, cc->total_migrate_scanned);
count_compact_events(COMPACTFREE_SCANNED, cc->total_free_scanned);
trace_android_vh_mm_compaction_end(cc, vendor_ret);
trace_mm_compaction_end(cc, start_pfn, end_pfn, sync, ret);
return ret;
@ -2655,7 +2661,7 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
|| fatal_signal_pending(current))
break;
}
trace_android_vh_compaction_try_to_compact_pages_exit(&rc);
return rc;
}
@ -2902,6 +2908,7 @@ static void kcompactd_do_work(pg_data_t *pgdat)
VM_BUG_ON(!list_empty(&cc.freepages));
VM_BUG_ON(!list_empty(&cc.migratepages));
}
trace_android_vh_compaction_exit(pgdat->node_id, cc.order, cc.highest_zoneidx);
/*
* Regardless of success, we are done until woken up next. But remember

View File

@ -78,6 +78,7 @@ struct cgroup_subsys memory_cgrp_subsys __read_mostly;
EXPORT_SYMBOL(memory_cgrp_subsys);
struct mem_cgroup *root_mem_cgroup __read_mostly;
EXPORT_SYMBOL_GPL(root_mem_cgroup);
/* Active memory cgroup to use from an interrupt context */
DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
@ -827,6 +828,7 @@ void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
if (!mem_cgroup_disabled())
__mod_memcg_lruvec_state(lruvec, idx, val);
}
EXPORT_SYMBOL_GPL(__mod_lruvec_state);
void __mod_lruvec_page_state(struct page *page, enum node_stat_item idx,
int val)
@ -1392,6 +1394,7 @@ void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
if (nr_pages > 0)
*lru_size += nr_pages;
}
EXPORT_SYMBOL_GPL(mem_cgroup_update_lru_size);
/**
* mem_cgroup_margin - calculate chargeable space of a memory cgroup

View File

@ -3500,6 +3500,12 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
return wp_page_shared(vmf);
}
copy:
if ((vmf->flags & FAULT_FLAG_VMA_LOCK) && !vma->anon_vma) {
pte_unmap_unlock(vmf->pte, vmf->ptl);
vma_end_read(vmf->vma);
return VM_FAULT_RETRY;
}
/*
* Ok, we need to copy. Oh, well..
*/

View File

@ -4503,6 +4503,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
if (!mutex_trylock(&oom_lock)) {
*did_some_progress = 1;
schedule_timeout_uninterruptible(1);
trace_android_vh_mm_alloc_pages_may_oom_exit(&oc, *did_some_progress);
return NULL;
}
@ -4565,6 +4566,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
}
out:
mutex_unlock(&oom_lock);
trace_android_vh_mm_alloc_pages_may_oom_exit(&oc, *did_some_progress);
return page;
}
@ -4868,10 +4870,12 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
unsigned int alloc_flags, const struct alloc_context *ac,
unsigned long *did_some_progress)
{
int retry_times = 0;
struct page *page = NULL;
unsigned long pflags;
bool drained = false;
trace_android_vh_mm_alloc_pages_direct_reclaim_enter(order);
psi_memstall_enter(&pflags);
*did_some_progress = __perform_reclaim(gfp_mask, order, ac);
if (unlikely(!(*did_some_progress)))
@ -4889,11 +4893,12 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
unreserve_highatomic_pageblock(ac, false);
drain_all_pages(NULL);
drained = true;
++retry_times;
goto retry;
}
out:
psi_memstall_leave(&pflags);
trace_android_vh_mm_alloc_pages_direct_reclaim_exit(*did_some_progress, retry_times);
return page;
}

View File

@ -936,6 +936,7 @@ int folio_referenced(struct folio *folio, int is_locked,
return rwc.contended ? -1 : pra.referenced;
}
EXPORT_SYMBOL_GPL(folio_referenced);
static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw)
{

View File

@ -805,14 +805,16 @@ unsigned long shmem_partial_swap_usage(struct address_space *mapping,
XA_STATE(xas, &mapping->i_pages, start);
struct page *page;
unsigned long swapped = 0;
unsigned long max = end - 1;
rcu_read_lock();
xas_for_each(&xas, page, end - 1) {
xas_for_each(&xas, page, max) {
if (xas_retry(&xas, page))
continue;
if (xa_is_value(page))
swapped++;
if (xas.xa_index == max)
break;
if (need_resched()) {
xas_pause(&xas);
cond_resched_rcu();

View File

@ -4702,10 +4702,11 @@ static void lru_gen_rotate_memcg(struct lruvec *lruvec, int op)
{
int seg;
int old, new;
unsigned long flags;
int bin = get_random_u32_below(MEMCG_NR_BINS);
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
spin_lock(&pgdat->memcg_lru.lock);
spin_lock_irqsave(&pgdat->memcg_lru.lock, flags);
VM_WARN_ON_ONCE(hlist_nulls_unhashed(&lruvec->lrugen.list));
@ -4740,7 +4741,7 @@ static void lru_gen_rotate_memcg(struct lruvec *lruvec, int op)
if (!pgdat->memcg_lru.nr_memcgs[old] && old == get_memcg_gen(pgdat->memcg_lru.seq))
WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1);
spin_unlock(&pgdat->memcg_lru.lock);
spin_unlock_irqrestore(&pgdat->memcg_lru.lock, flags);
}
void lru_gen_online_memcg(struct mem_cgroup *memcg)
@ -4753,7 +4754,7 @@ void lru_gen_online_memcg(struct mem_cgroup *memcg)
struct pglist_data *pgdat = NODE_DATA(nid);
struct lruvec *lruvec = get_lruvec(memcg, nid);
spin_lock(&pgdat->memcg_lru.lock);
spin_lock_irq(&pgdat->memcg_lru.lock);
VM_WARN_ON_ONCE(!hlist_nulls_unhashed(&lruvec->lrugen.list));
@ -4764,7 +4765,7 @@ void lru_gen_online_memcg(struct mem_cgroup *memcg)
lruvec->lrugen.gen = gen;
spin_unlock(&pgdat->memcg_lru.lock);
spin_unlock_irq(&pgdat->memcg_lru.lock);
}
}
@ -4788,7 +4789,7 @@ void lru_gen_release_memcg(struct mem_cgroup *memcg)
struct pglist_data *pgdat = NODE_DATA(nid);
struct lruvec *lruvec = get_lruvec(memcg, nid);
spin_lock(&pgdat->memcg_lru.lock);
spin_lock_irq(&pgdat->memcg_lru.lock);
VM_WARN_ON_ONCE(hlist_nulls_unhashed(&lruvec->lrugen.list));
@ -4800,7 +4801,7 @@ void lru_gen_release_memcg(struct mem_cgroup *memcg)
if (!pgdat->memcg_lru.nr_memcgs[gen] && gen == get_memcg_gen(pgdat->memcg_lru.seq))
WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1);
spin_unlock(&pgdat->memcg_lru.lock);
spin_unlock_irq(&pgdat->memcg_lru.lock);
}
}
@ -7788,6 +7789,8 @@ int kswapd(void *p)
alloc_order);
reclaim_order = balance_pgdat(pgdat, alloc_order,
highest_zoneidx);
trace_android_vh_vmscan_kswapd_done(pgdat->node_id, highest_zoneidx,
alloc_order, reclaim_order);
if (reclaim_order < alloc_order)
goto kswapd_try_sleep;
}

View File

@ -414,8 +414,9 @@ static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test,
{
struct sock *sk;
int err = 0;
unsigned long flags;
raw_spin_lock_bh(&stab->lock);
raw_spin_lock_irqsave(&stab->lock, flags);
sk = *psk;
if (!sk_test || sk_test == sk)
sk = xchg(psk, NULL);
@ -425,7 +426,7 @@ static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test,
else
err = -EINVAL;
raw_spin_unlock_bh(&stab->lock);
raw_spin_unlock_irqrestore(&stab->lock, flags);
return err;
}
@ -923,11 +924,12 @@ static int sock_hash_delete_elem(struct bpf_map *map, void *key)
struct bpf_shtab_bucket *bucket;
struct bpf_shtab_elem *elem;
int ret = -ENOENT;
unsigned long flags;
hash = sock_hash_bucket_hash(key, key_size);
bucket = sock_hash_select_bucket(htab, hash);
raw_spin_lock_bh(&bucket->lock);
raw_spin_lock_irqsave(&bucket->lock, flags);
elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
if (elem) {
hlist_del_rcu(&elem->node);
@ -935,7 +937,7 @@ static int sock_hash_delete_elem(struct bpf_map *map, void *key)
sock_hash_free_elem(htab, elem);
ret = 0;
}
raw_spin_unlock_bh(&bucket->lock);
raw_spin_unlock_irqrestore(&bucket->lock, flags);
return ret;
}

View File

@ -1213,6 +1213,7 @@ EXPORT_INDIRECT_CALLABLE(ipv4_dst_check);
static void ipv4_send_dest_unreach(struct sk_buff *skb)
{
struct net_device *dev;
struct ip_options opt;
int res;
@ -1230,7 +1231,8 @@ static void ipv4_send_dest_unreach(struct sk_buff *skb)
opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr);
rcu_read_lock();
res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
dev = skb->dev ? skb->dev : skb_rtable(skb)->dst.dev;
res = __ip_options_compile(dev_net(dev), &opt, skb, NULL);
rcu_read_unlock();
if (res)

View File

@ -1322,7 +1322,7 @@ static int nft_flush_table(struct nft_ctx *ctx)
if (!nft_is_active_next(ctx->net, chain))
continue;
if (nft_chain_is_bound(chain))
if (nft_chain_binding(chain))
continue;
ctx->chain = chain;
@ -1367,7 +1367,7 @@ static int nft_flush_table(struct nft_ctx *ctx)
if (!nft_is_active_next(ctx->net, chain))
continue;
if (nft_chain_is_bound(chain))
if (nft_chain_binding(chain))
continue;
ctx->chain = chain;
@ -2684,6 +2684,9 @@ static int nf_tables_delchain(struct sk_buff *skb, const struct nfnl_info *info,
return PTR_ERR(chain);
}
if (nft_chain_binding(chain))
return -EOPNOTSUPP;
if (info->nlh->nlmsg_flags & NLM_F_NONREC &&
chain->use > 0)
return -EBUSY;
@ -3666,6 +3669,11 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
}
if (info->nlh->nlmsg_flags & NLM_F_REPLACE) {
if (nft_chain_binding(chain)) {
err = -EOPNOTSUPP;
goto err_destroy_flow_rule;
}
err = nft_delrule(&ctx, old_rule);
if (err < 0)
goto err_destroy_flow_rule;
@ -3771,7 +3779,7 @@ static int nf_tables_delrule(struct sk_buff *skb, const struct nfnl_info *info,
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN]);
return PTR_ERR(chain);
}
if (nft_chain_is_bound(chain))
if (nft_chain_binding(chain))
return -EOPNOTSUPP;
}
@ -3801,7 +3809,7 @@ static int nf_tables_delrule(struct sk_buff *skb, const struct nfnl_info *info,
list_for_each_entry(chain, &table->chains, list) {
if (!nft_is_active_next(net, chain))
continue;
if (nft_chain_is_bound(chain))
if (nft_chain_binding(chain))
continue;
ctx.chain = chain;

View File

@ -315,6 +315,14 @@ static int nfnl_osf_add_callback(struct sk_buff *skb,
f = nla_data(osf_attrs[OSF_ATTR_FINGER]);
if (f->opt_num > ARRAY_SIZE(f->opt))
return -EINVAL;
if (!memchr(f->genre, 0, MAXGENRELEN) ||
!memchr(f->subtype, 0, MAXGENRELEN) ||
!memchr(f->version, 0, MAXGENRELEN))
return -EINVAL;
kf = kmalloc(sizeof(struct nf_osf_finger), GFP_KERNEL);
if (!kf)
return -ENOMEM;

Some files were not shown because too many files have changed in this diff Show More