Merge branch android14-6.1 into android14-6.1-lts

Sync up with the android14-6.1 branch due to the latest ABI update that
happened on it.  This includes the following commits:

08f3189337 ANDROID: ABI: Update oplus symbol list
3efffff553 ANDROID: Allow vendor module to reclaim a memcg
f627d47d36 ANDROID: Export memcg functions to allow module to add new files
032458b9cb ANDROID: vendor_hooks: add hooks in mem_cgroup subsystem
ed1e17fc4b ANDROID: GKI: Update symbols to symbol list
0ab6144add ANDROID: block: export tracepoints
57750518de ANDROID: vendor_hook: Avoid clearing protect-flag before waking waiters
aacbded3ac ANDROID: KVM: arm64: Deprecate late pKVM module loading
0fbbb18ab0 ANDROID: ABI: Update symbol for Exynos SoC
a1c9b5c53b ANDROID: 6/7/2023 KMI update
e98206b0af ANDROID: db845c: Update symbol list
9f9c67c073 FROMLIST: ufs: core: Add error handling for MCQ mode
4f319f6367 FROMLIST: ufs: mcq: Use ufshcd_mcq_poll_cqe_lock() in mcq mode
e3d62e38a9 FROMLIST: ufs: mcq: Added ufshcd_mcq_abort()
d61719a08e FROMLIST: ufs: mcq: Add support for clean up mcq resources
2bbead6020 FROMLIST: ufs: mcq: Add supporting functions for mcq abort
1cb86cc92e FROMLIST: ufs: core: Update the ufshcd_clear_cmds() functionality
0866848447 FROMLIST: ufs: core: Combine 32-bit command_desc_base_addr_lo/hi
8326170bb6 ANDROID: GKI: Add OEM data to mutex/rwsem
b0375cb69c BACKPORT: mm: Multi-gen LRU: remove wait_event_killable()
5d26262b76 FROMGIT: Binder: Add async from to transaction record
f00a145958 BACKPORT: FROMGIT: Binder: Add timestamp to transaction record
d7b3d8d1e5 ANDROID: GKI: add ANDROID_OEM_DATA() in struct request_queue
643c727c8e ANDROID: GKI: Add ANDROID_OEM_DATA() in struct ufs_hba
7c76ad4d21 ANDROID: GKI: add ANDROID_OEM_DATA in struct bio
fcfa42b67b ANDROID: GKI: Add ANDROID_OEM_DATA() in struct queue_limits
72515e57f0 ANDROID: GKI: Add ANDROID_OEM_DATA in struct ufs_dev_info
a10d9e3fd6 ANDROID: sched: Add oem data in struct rq
87881c4dd1 ANDROID: nl80211: reserve UAPI attributes to support backporting after KMI freeze
10cb62e6ca ANDROID: GKI: mm: Add Android ABI padding to MGLRU structures
4081f368be ANDROID: abi_gki_aarch64_qcom: Add secure hibernation APIs
3cd0cc9e1b ANDROID: abi_gki_aarch64_qcom: Add hibernation APIs
154b4b9f1a ANDROID: vendor hooks: Encrypt snapshot for bootloader based hibernation
defdc4d1dd UPSTREAM: tty: serial: qcom-geni-serial: Add support for Hibernation feature
c81f4841b4 ANDROID: abi_gki_aarch64_qcom: Add clk_restore_context and clk_save_context
d7e1f4f021 ANDROID: vendor hooks: Add hooks to support bootloader based hibernation
d57acebb45 ANDROID: gki_defconfig: Enable CONFIG_HIBERNATION flag
62db17973a ANDROID: vendor hooks: Export symbols for bootloader based hibernation
17537ba480 ANDROID: gki_defconfig: enable CONFIG_LED_TRIGGER_PHY
640d3b737d ANDROID: add vendor fields to swap_info_struct to record swap stats
672eca4c06 ANDROID: add vendor fields to swap_slots_cache to support multiple swap devices
3cea2a67dc ANDROID: add vendor fields to lruvec to record refault stats
a80f228e59 ANDROID: vendor_hooks: add a field in mem_cgroup
ddb14b6f45 ANDROID: vendor_hooks: add a field in pglist_data
9920a720cc ANDROID: GKI: Update db845c symbol list for down_write
ed10529c6d ANDROID: fix atomisp driver missing in 6.4 kernel
a264d8efcb BACKPORT: mm: do not increment pgfault stats when page fault handler retries
3fd1d7d4f2 ANDROID: GKI: enable per-vma lock stats
78c6875e2f UPSTREAM: mm: change per-VMA lock statistics to be disabled by default
bfef68d9a6 UPSTREAM: s390/mm: try VMA lock-based page fault handling first
200745f46a UPSTREAM: mm: separate vma->lock from vm_area_struct
23fcd3167e UPSTREAM: mm/mmap: free vm_area_struct without call_rcu in exit_mmap
e04309717e UPSTREAM: powerc/mm: try VMA lock-based page fault handling first
e6bf4cded6 UPSTREAM: arm64/mm: try VMA lock-based page fault handling first
51b88c16a3 UPSTREAM: x86/mm: try VMA lock-based page fault handling first
ebbbcdfeaf UPSTREAM: mm: introduce per-VMA lock statistics
4e4c6989ae UPSTREAM: mm: prevent userfaults to be handled under per-vma lock
6e306e82ac UPSTREAM: mm: prevent do_swap_page from handling page faults under VMA lock
318bf22529 UPSTREAM: mm: add FAULT_FLAG_VMA_LOCK flag
c06661eab5 UPSTREAM: mm: fall back to mmap_lock if vma->anon_vma is not yet set
5949b78f6c UPSTREAM: mm: introduce lock_vma_under_rcu to be used from arch-specific code
35ffa4830e BACKPORT: mm: introduce vma detached flag
3c6748cd51 UPSTREAM: mm/mmap: prevent pagefault handler from racing with mmu_notifier registration
3d18d86ea9 UPSTREAM: kernel/fork: assert no VMA readers during its destruction
9cc64c7fb9 UPSTREAM: mm: conditionally write-lock VMA in free_pgtables
5f1e1ab919 UPSTREAM: mm: write-lock VMAs before removing them from VMA tree
24ecdbc5e2 UPSTREAM: mm/mremap: write-lock VMA while remapping it to a new address range
2554cb4775 FROMLIST: mm/mmap: write-lock VMAs affected by VMA expansion
57b3f8a5ab FROMLIST: mm/mmap: write-lock VMAs in vma_adjust
998ec9f54d FROMLIST: mm/mmap: write-lock VMAs before merging, splitting or expanding them
d73ebe031c UPSTREAM: mm/khugepaged: write-lock VMA while collapsing a huge page
3771808d64 FROMLIST: mm/mmap: move VMA locking before vma_adjust_trans_huge call
6764412400 UPSTREAM: mm: mark VMA as being written when changing vm_flags
a9ea3113d4 UPSTREAM: mm: add per-VMA lock and helper functions to control it
882c3512ec UPSTREAM: mm: move mmap_lock assert function definitions
e28a0a0e5c UPSTREAM: mm: rcu safe VMA freeing
04f73ad5b4 UPSTREAM: mm: introduce CONFIG_PER_VMA_LOCK
ef8351241d UPSTREAM: mm: introduce vm_flags_reset_once to replace WRITE_ONCE vm_flags updates
75977e5919 UPSTREAM: mm: export dump_mm()
2ff3b23c7f UPSTREAM: mm: introduce __vm_flags_mod and use it in untrack_pfn
6f5e28fd18 UPSTREAM: mm: replace vma->vm_flags indirect modification in ksm_madvise
5dd0547a3e UPSTREAM: mm: replace vma->vm_flags direct modifications with modifier calls
bf16383ebd UPSTREAM: mm: replace VM_LOCKED_CLEAR_MASK with VM_LOCKED_MASK
6e7028dadd UPSTREAM: mm: introduce vma->vm_flags wrapper functions
db341ea4da UPSTREAM: kernel/fork: convert vma assignment to a memcpy
d8e7ccc003 ANDROID: GKI: dentry: add Android ABI padding to some structures
05a42c34b2 ANDROID: GKI: sched: add Android ABI padding to some structures
1c5cd2e4ad ANDROID: GKI: elevator: add Android ABI padding to some structures
5fe1040d35 ANDROID: GKI: phy: add Android ABI padding to some structures
8b849a574b ANDROID: GKI: workqueue.h: add Android ABI padding to some structures
86db2d740e ANDROID: GKI: scsi: add Android ABI padding to some structures
f456f5cc1c ANDROID: GKI: device.h: add Android ABI padding to some structures
61963aff03 ANDROID: GKI: timer.h: add Android ABI padding to a structure
f12cb09789 ANDROID: GKI: USB: add Android ABI padding to some structures
f79a11f8bc ANDROID: GKI: sched.h: add Android ABI padding to some structures
801c9e6233 ANDROID: GKI: module.h: add Android ABI padding to some structures
15e9bd7cf2 ANDROID: GKI: sock.h: add Android ABI padding to some structures
41afe8d0bf ANDROID: GKI: sched: add Android ABI padding to some structures
c425034c95 ANDROID: GKI: mount.h: add Android ABI padding to some structures
1276e53f17 ANDROID: GKI: mm: add Android ABI padding to some structures
d58a77ead7 ANDROID: GKI: add some padding to some driver core structures
854f9f2e6c ANDROID: GKI: add padding to struct HID structures
bf55b78067 ANDROID: GKI: USB: XHCI: add Android ABI padding to lots of xhci structures
807752459e ANDROID: GKI: dwc3: add Android ABI padding to some structures
faf9904e25 ANDROID: GKI: sound/usb/card.h: add Android ABI padding to struct snd_usb_endpoint
cb27de5e20 ANDROID: GKI: user_namespace.h: add Android ABI padding to a structure
ad192a1c1c ANDROID: GKI: quota.h: add Android ABI padding to some structures
7be2a2a4c3 ANDROID: GKI: mmu_notifier.h: add Android ABI padding to some structures
4298070a54 ANDROID: GKI: mm.h: add Android ABI padding to a structure
ec2d1e2e62 ANDROID: GKI: kobject.h: add Android ABI padding to some structures
e69be4de29 ANDROID: GKI: kernfs.h: add Android ABI padding to some structures
2baf3e23bb ANDROID: GKI: irqdomain.h: add Android ABI padding to a structure
064d2c58f2 ANDROID: GKI: ioport.h: add Android ABI padding to a structure
d1cd3e8b1b ANDROID: GKI: iomap.h: add Android ABI padding to a structure
c2c04df929 ANDROID: GKI: hrtimer.h: add Android ABI padding to a structure
caf18df2e2 ANDROID: GKI: genhd.h: add Android ABI padding to some structures
0205308795 ANDROID: GKI: ethtool.h: add Android ABI padding to a structure
e319ef95b6 ANDROID: GKI: dma-mapping.h: add Android ABI padding to a structure
608ed5cfb3 ANDROID: GKI: networking: add Android ABI padding to a lot of networking structures
8d09c29ee1 ANDROID: GKI: blk_types.h: add Android ABI padding to a structure
e7a017ab91 ANDROID: GKI: scsi.h: add Android ABI padding to a structure
48da4dd64b ANDROID: GKI: pci: add Android ABI padding to some structures
eba9ceccd8 ANDROID: GKI: add Android ABI padding to struct nf_conn
97752aa71c ANDROID: GKI: add kabi padding for structures for the android14 release
1d39b94f8c UPSTREAM: ipv{4,6}/raw: fix output xfrm lookup wrt protocol
9713594a2b UPSTREAM: inet: Add IP_LOCAL_PORT_RANGE socket option
5376c2a57f UPSTREAM: sched/fair: Fixes for capacity inversion detection
7c8500869b UPSTREAM: sched/fair: Consider capacity inversion in util_fits_cpu()
b9346f206f UPSTREAM: sched/fair: Detect capacity inversion
8691a0dae0 ANDROID: KVM: arm64: Initialize hfgwtr_el2 correctly
fd04f99bfa UPSTREAM: net: cdc_ncm: Deal with too low values of dwNtbOutMaxSize
70586aaa7c ANDROID: GKI: Add symbols and update symbol list for Unisoc(2nd)
94e4512c31 ANDROID: ABI: update symbol list related to meminfo for galaxy
a390414140 ANDROID: vendor_hooks: add hooks for extra memory
c3ee18fefa BACKPORT: FROMLIST: ovl: get_acl: Fix null pointer dereference at realinode in rcu-walk mode
8d59a7676c BACKPORT: FROMLIST: ovl: ovl_permission: Fix null pointer dereference at realinode in rcu-walk mode
2de1ee6dd8 BACKPORT: FROMLIST: ovl: Let helper ovl_i_path_real() return the realinode
963667856e ANDROID: GKI: Update symbol list for xiaomi
508ca06639 ANDROID: vendor_hooks:vendor hook for control memory dirty rate
c959a3fc91 ANDROID: f2fs: enable cleancache
2ea053d317 FROMGIT: userfaultfd: fix regression in userfaultfd_unmap_prep()
2f5f352e6a FROMGIT: BACKPORT: mm: avoid rewalk in mmap_region
86d9de0898 FROMGIT: maple_tree: clear up index and last setting in single entry tree
9288f53a31 FROMGIT: maple_tree: add mas_prev_range() and mas_find_range_rev interface
c4f5714056 FROMGIT: maple_tree: introduce mas_prev_slot() interface
dbc16cc86a FROMGIT: maple_tree: relocate mas_rewalk() and mas_rewalk_if_dead()
b83cfb2dc4 FROMGIT: maple_tree: add mas_next_range() and mas_find_range() interfaces
84719794e0 FROMGIT: maple_tree: introduce mas_next_slot() interface
c5c507cfec FROMGIT: maple_tree: try harder to keep active node with mas_prev()
cb6d9fa6ad FROMGIT: maple_tree: try harder to keep active node after mas_next()
5ff9438fe1 FROMGIT: BACKPORT: mm/mmap: change do_vmi_align_munmap() for maple tree iterator changes
133fbad5bd FROMLIST: BACKPORT: maple_tree: Refine mas_preallocate() node calculations
ce9ebd83aa ANDROID: maple_tree: Move mas_wr_modify node size calculation to mas_wr_node_size()
b6734cb2ce FROMLIST: BACKPORT: maple_tree: Move mas_wr_end_piv() below mas_wr_extend_null()
aede79b81e ANDROID: mm: Fix __vma_adjust() writes for the maple tree
b802573f44 FROMLIST: BACKPORT: mm: Set up vma iterator for vma_iter_prealloc() calls
c3118993c9 FROMGIT: maple_tree: avoid unnecessary ascending
e9fdabfc2a FROMLIST: BACKPORT: mm: Change do_vmi_align_munmap() side tree index
25bed2fdbc UPSTREAM: mm/mmap: remove preallocation from do_mas_align_munmap()
312dfb3b7e ANDROID: abi_gki_aarch64_qcom: Update QCOM symbol list
6b3daa3bba ANDROID: GKI: Update symbol list for xiaomi
ec196511bf ANDROID: vendor_hooks:vendor hook for mmput
571c04e945 ANDROID: ABI: update symbol list for galaxy
847b3f6c96 ANDROID: task_mmu: add vendor hook for swap entry
aee36dd530 ANDROID: mm: add vendor hooks in madvise for swap entry
c0cfeeaa88 BACKPORT: FROMLIST: arm64: mte: Simplify swap tag restoration logic
131714e34b FROMLIST: mm: Call arch_swap_restore() from unuse_pte()
3805b879f5 FROMLIST: mm: Call arch_swap_restore() from do_swap_page()
098028adf7 ANDROID: abi_gki_aarch64_qcom: Update symbol list
71844b8ed9 ANDROID: GKI: Update symbol list for xiaomi
025b5a487b ANDROID: vendor_hooks:vendor hook for __alloc_pages_slowpath.
60b0f85e24 ANDROID: ABI: Update honor symbol list
3e2dc32f59 ANDROID: mm: create vendor hooks for memory reclaim
8e6a28c815 UPSTREAM: kcsan: Avoid READ_ONCE() in read_instrumented_memory()
675bc3a00e ANDROID: abi_gki_aarch64_qcom: update symbol list
7b14897460 ANDROID: Update ABI as part of crash fix
9e2fa0a396 ANDROID: Fix incorrect hook declaration
e57fe10b5a ANDROID: abi_gki_aarch64_qcom: Add GIC and hibernation APIs
227d23d61d ANDROID: gic-v3: Export gic_v3_resume() for vendor GIC enhancements
275c8705e5 ANDROID: irqchip/irq-gic-v3: Add vendor hook for gic suspend
c9539979a9 ANDROID: abi_gki_aarch64_qcom: Update abi_gki_aarch64_qcom for DMA
6da02f9101 ANDROID: GKI: Update symbols to abi_gki_aarch64_oplus for extend copy & fbarrier feature
87b384408e ANDROID: abi_gki_aarch64_qcom: Update symbol list
a3a743e67f ANDROID: mmc: core: Export core functions for kernel modules usage
631a2db5a3 ANDROID: vendor_hooks: Define new hooks in _mmc_suspend/resume
e82e89e170 ANDROID: update symbol for unisoc vendor_hooks
b7a7fd15ed ANDROID: vendor_hooks: psci: add hook to check if cpu is allowed to power off
3be7d118e7 ANDROID: Add vendor hook to the effective_cpu_util
0c2142745d ANDROID: Update symbol list for mtk
6f7dc871a6 ANDROID: ABI: update symbol list for galaxy
b0a752c3aa ANDROID: update symbol for unisoc vendor_hooks
ce7ceff8c8 ANDROID: thermal: Add vendor hook to check power range
7191b6a759 ANDROID: thermal: Add vendor hook to get thermal zone device
1fe511720a ANDROID: thermal: Add hook for cpufreq thermal
78fe8913d1 UPSTREAM: mm,kfence: decouple kfence from page granularity mapping judgement

Change-Id: If850e76826ace89e477988751792ea6055c8879c
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-06-08 11:18:41 +00:00
commit da95c44967
381 changed files with 37289 additions and 26492 deletions

File diff suppressed because it is too large Load Diff

View File

@ -1057,6 +1057,7 @@
dma_buf_unmap_attachment
dma_get_sgtable_attrs
down_read
down_write
find_vma
__mmap_lock_do_trace_acquire_returned
__mmap_lock_do_trace_released
@ -1068,6 +1069,7 @@
__tracepoint_mmap_lock_released
__tracepoint_mmap_lock_start_locking
up_read
up_write
# required by gpi.ko
krealloc
@ -2043,7 +2045,7 @@
ufshcd_init_pwr_dev_param
ufshcd_mcq_config_esi
ufshcd_mcq_enable_esi
ufshcd_mcq_poll_cqe_nolock
ufshcd_mcq_poll_cqe_lock
ufshcd_mcq_write_cqis
ufshcd_pltfrm_init
ufshcd_pltfrm_shutdown
@ -2101,3 +2103,4 @@
fwnode_get_name
icc_provider_add
icc_provider_del
ufshcd_mcq_poll_cqe_nolock

View File

@ -107,9 +107,6 @@
crc32_le
crypto_alloc_shash
crypto_destroy_tfm
crypto_shash_digest
crypto_shash_update
csum_partial
_ctype
debugfs_create_bool
debugfs_create_dir
@ -123,9 +120,6 @@
del_timer
del_timer_sync
destroy_workqueue
dev_addr_mod
dev_alloc_name
dev_close
dev_driver_string
_dev_emerg
_dev_err
@ -201,8 +195,6 @@
dev_pm_qos_read_value
dev_pm_qos_remove_request
dev_pm_qos_update_request
_dev_printk
__dev_queue_xmit
devres_add
__devres_alloc_node
dev_set_name
@ -328,7 +320,6 @@
drm_vblank_init
dump_stack
enable_irq
ether_setup
eth_type_trans
fd_install
_find_first_bit
@ -348,7 +339,6 @@
free_netdev
__free_pages
free_pages
free_percpu
freq_qos_update_request
gcd
generic_file_llseek
@ -415,10 +405,6 @@
ida_alloc_range
ida_free
idr_alloc
idr_destroy
idr_find
idr_for_each
idr_get_next
idr_remove
init_dummy_netdev
init_net
@ -468,7 +454,6 @@
__kfifo_to_user
kfree
kfree_const
kfree_sensitive
kfree_skb_reason
kimage_voffset
__kmalloc
@ -521,7 +506,6 @@
__list_add_valid
__list_del_entry_valid
list_sort
__local_bh_enable_ip
log_post_read_mmio
log_post_write_mmio
log_read_mmio
@ -561,12 +545,9 @@
__napi_schedule
napi_schedule_prep
__netdev_alloc_skb
netif_carrier_off
netif_carrier_on
netif_napi_add_weight
netif_receive_skb
netif_rx
netif_tx_stop_all_queues
netif_tx_wake_queue
nla_memcpy
nla_put
@ -703,7 +684,6 @@
proc_remove
proc_set_size
proc_set_user
__pskb_pull_tail
put_device
__put_task_struct
put_unused_fd
@ -740,10 +720,7 @@
__register_chrdev
register_chrdev_region
register_die_notifier
register_inet6addr_notifier
register_inetaddr_notifier
register_netdevice
register_netdevice_notifier
register_pm_notifier
register_reboot_notifier
register_restart_handler
@ -810,8 +787,6 @@
simple_write_to_buffer
single_open
single_release
skb_clone
skb_copy
skb_copy_bits
skb_copy_expand
skb_dequeue
@ -884,7 +859,6 @@
strsep
strstr
subsys_system_register
__sw_hweight16
__sw_hweight32
__sw_hweight64
__sw_hweight8
@ -904,7 +878,6 @@
sysfs_remove_file_ns
sysfs_remove_group
sysfs_remove_link
system_freezable_wq
system_highpri_wq
system_long_wq
system_state
@ -915,7 +888,6 @@
tasklet_init
tasklet_kill
__tasklet_schedule
tasklet_setup
thermal_of_cooling_device_register
thermal_zone_device_update
thermal_zone_get_temp
@ -952,9 +924,6 @@
unpin_user_page
__unregister_chrdev
unregister_chrdev_region
unregister_inet6addr_notifier
unregister_inetaddr_notifier
unregister_netdevice_notifier
unregister_netdevice_queue
unregister_pm_notifier
unregister_reboot_notifier
@ -1044,8 +1013,10 @@
# required by cfg80211.ko
bpf_trace_run10
csum_partial
debugfs_rename
__dev_change_net_namespace
dev_close
__dev_get_by_index
dev_get_by_index
device_add
@ -1059,6 +1030,7 @@
key_create_or_update
key_put
keyring_alloc
kfree_sensitive
ktime_get_coarse_with_offset
netdev_err
netlink_broadcast
@ -1070,7 +1042,9 @@
__nla_parse
nla_put_64bit
__nla_validate
__pskb_pull_tail
__put_net
register_netdevice_notifier
register_pernet_device
rfkill_alloc
rfkill_blocked
@ -1082,7 +1056,9 @@
skb_add_rx_frag
__sock_create
sock_release
__sw_hweight16
system_power_efficient_wq
unregister_netdevice_notifier
unregister_pernet_device
verify_pkcs7_signature
wireless_nlevent_flush
@ -1113,6 +1089,7 @@
regmap_raw_write_async
# required by drm_display_helper.ko
_dev_printk
drm_atomic_get_new_private_obj_state
drm_atomic_get_old_private_obj_state
drm_crtc_commit_wait
@ -1654,8 +1631,12 @@
# required by hardlockup-watchdog.ko
cpus_read_lock
cpus_read_unlock
free_percpu
smp_call_on_cpu
# required by hdcp2.ko
crypto_shash_digest
# required by hook.ko
__traceiter_android_vh_do_wake_up_sync
__traceiter_android_vh_set_wake_flags
@ -1732,63 +1713,9 @@
led_classdev_register_ext
led_classdev_unregister
# required by mac80211.ko
__alloc_percpu_gfp
arc4_crypt
arc4_setkey
crc32_be
crypto_aead_decrypt
crypto_aead_encrypt
crypto_aead_setauthsize
crypto_aead_setkey
crypto_alloc_aead
crypto_alloc_skcipher
__crypto_memneq
crypto_shash_finup
crypto_shash_setkey
crypto_skcipher_decrypt
crypto_skcipher_encrypt
crypto_skcipher_setkey
__crypto_xor
dev_fetch_sw_netstats
eth_mac_addr
ethtool_op_get_link
get_random_u16
__hw_addr_init
__hw_addr_sync
__hw_addr_unsync
kernel_param_lock
kernel_param_unlock
kfree_skb_list_reason
ktime_get_seconds
memmove
netdev_info
netdev_set_default_ethtool_ops
netif_receive_skb_list
net_ratelimit
pskb_expand_head
___pskb_trim
rhashtable_free_and_destroy
rhashtable_insert_slow
rhltable_init
__rht_bucket_nested
rht_bucket_nested
rht_bucket_nested_insert
round_jiffies
round_jiffies_relative
round_jiffies_up
sg_init_one
skb_checksum_help
skb_clone_sk
skb_complete_wifi_ack
skb_ensure_writable
__skb_get_hash
__skb_gso_segment
tasklet_unlock_wait
unregister_netdevice_many
# required by mcDrvModule.ko
crypto_shash_final
crypto_shash_update
down_read
freezer_active
freezing_slow_path
@ -1810,7 +1737,6 @@
# required by mpam_arch.ko
bitmap_alloc
__cpu_present_mask
kobj_sysfs_ops
kstrtou16
on_each_cpu_cond_mask
@ -1850,6 +1776,7 @@
kstrdup_const
llist_add_batch
rb_first
tasklet_setup
# required by pablo-self-tests.ko
debugfs_create_blob
@ -1957,6 +1884,7 @@
devm_rtc_device_register
rtc_update_irq
rtc_valid_tm
system_freezable_wq
# required by s3c2410_wdt.ko
watchdog_init_timeout
@ -2070,22 +1998,30 @@
# required by scsc_wlan.ko
arp_tbl
dev_addr_mod
dev_alloc_name
__dev_queue_xmit
down_trylock
dql_completed
dql_reset
dst_release
ether_setup
for_each_kernel_tracepoint
in4_pton
in6_pton
ip_route_output_flow
ip_send_check
linkwatch_fire_event
__local_bh_enable_ip
napi_disable
napi_gro_flush
neigh_destroy
neigh_lookup
netif_carrier_off
netif_carrier_on
__netif_napi_del
netif_schedule_queue
netif_tx_stop_all_queues
nla_put_nohdr
_raw_read_lock_bh
_raw_read_unlock_bh
@ -2093,8 +2029,14 @@
_raw_write_lock_bh
_raw_write_unlock
_raw_write_unlock_bh
register_inet6addr_notifier
register_inetaddr_notifier
register_kretprobe
skb_clone
skb_copy
skb_realloc_headroom
unregister_inet6addr_notifier
unregister_inetaddr_notifier
unregister_kretprobe
# required by scsi_srpmb.ko
@ -2214,11 +2156,17 @@
handle_simple_irq
i2c_bit_add_bus
ida_destroy
idr_destroy
idr_find
idr_for_each
idr_get_next
idr_replace
jiffies64_to_msecs
memremap
memunmap
mmu_notifier_synchronize
page_pinner_inited
__page_pinner_put_page
param_ops_hexint
pci_assign_unassigned_bus_resources
pci_bus_resource_n

View File

@ -146,6 +146,7 @@
kernel_cpustat
kernfs_find_and_get_ns
kfree
__kfree_skb
kimage_vaddr
kimage_voffset
__kmalloc
@ -266,6 +267,7 @@
simple_strtol
single_open
single_release
skb_copy_ubufs
snd_soc_find_dai
snd_soc_info_volsw_sx
snd_soc_put_volsw_sx
@ -307,24 +309,34 @@
__traceiter_android_rvh_report_bug
__traceiter_android_vh_ipi_stop
__traceiter_android_vh_is_fpsimd_save
__traceiter_android_vh_madvise_pageout_swap_entry
__traceiter_android_vh_madvise_swapin_walk_pmd_entry
__traceiter_android_vh_mutex_wait_finish
__traceiter_android_vh_mutex_wait_start
__traceiter_android_vh_ptype_head
__traceiter_android_vh_rtmutex_wait_finish
__traceiter_android_vh_rtmutex_wait_start
__traceiter_android_vh_rwsem_read_wait_finish
__traceiter_android_vh_rwsem_read_wait_start
__traceiter_android_vh_rwsem_write_wait_finish
__traceiter_android_vh_rwsem_write_wait_start
__traceiter_android_vh_process_madvise_end
__traceiter_android_vh_sched_show_task
__traceiter_android_vh_show_smap
__traceiter_android_vh_smaps_pte_entry
__traceiter_android_vh_try_to_freeze_todo
__traceiter_android_vh_try_to_freeze_todo_unfrozen
__traceiter_android_vh_watchdog_timer_softlockup
__traceiter_android_vh_meminfo_cache_adjust
__traceiter_android_vh_si_mem_available_adjust
__traceiter_android_vh_si_meminfo_adjust
__traceiter_block_rq_insert
__traceiter_console
__traceiter_hrtimer_expire_entry
__traceiter_hrtimer_expire_exit
__traceiter_irq_handler_entry
__traceiter_irq_handler_exit
__traceiter_kfree_skb
__traceiter_sched_switch
__traceiter_workqueue_execute_start
__tracepoint_android_rvh_arm64_serror_panic
@ -338,24 +350,34 @@
__tracepoint_android_rvh_report_bug
__tracepoint_android_vh_ipi_stop
__tracepoint_android_vh_is_fpsimd_save
__tracepoint_android_vh_madvise_pageout_swap_entry
__tracepoint_android_vh_madvise_swapin_walk_pmd_entry
__tracepoint_android_vh_mutex_wait_finish
__tracepoint_android_vh_mutex_wait_start
__tracepoint_android_vh_ptype_head
__tracepoint_android_vh_rtmutex_wait_finish
__tracepoint_android_vh_rtmutex_wait_start
__tracepoint_android_vh_rwsem_read_wait_finish
__tracepoint_android_vh_rwsem_read_wait_start
__tracepoint_android_vh_rwsem_write_wait_finish
__tracepoint_android_vh_rwsem_write_wait_start
__tracepoint_android_vh_process_madvise_end
__tracepoint_android_vh_sched_show_task
__tracepoint_android_vh_show_smap
__tracepoint_android_vh_smaps_pte_entry
__tracepoint_android_vh_try_to_freeze_todo
__tracepoint_android_vh_try_to_freeze_todo_unfrozen
__tracepoint_android_vh_watchdog_timer_softlockup
__tracepoint_android_vh_meminfo_cache_adjust
__tracepoint_android_vh_si_mem_available_adjust
__tracepoint_android_vh_si_meminfo_adjust
__tracepoint_block_rq_insert
__tracepoint_console
__tracepoint_hrtimer_expire_entry
__tracepoint_hrtimer_expire_exit
__tracepoint_irq_handler_entry
__tracepoint_irq_handler_exit
__tracepoint_kfree_skb
tracepoint_probe_register
tracepoint_probe_unregister
__tracepoint_sched_switch
@ -366,6 +388,7 @@
unregister_restart_handler
unregister_syscore_ops
up_write
usb_alloc_dev
usb_gstrings_attach
usbnet_get_endpoints
usbnet_link_change

View File

@ -3,9 +3,15 @@
__tracepoint_android_vh_page_add_new_anon_rmap
__tracepoint_android_vh_do_shrink_slab
__tracepoint_android_vh_shrink_slab_bypass
__tracepoint_android_vh_mutex_init
__tracepoint_android_vh_file_is_tiny_bypass
__tracepoint_android_vh_modify_scan_control
__tracepoint_android_vh_should_continue_reclaim
__traceiter_android_vh_get_page_wmark
__traceiter_android_vh_page_add_new_anon_rmap
__traceiter_android_vh_do_shrink_slab
__traceiter_android_vh_shrink_slab_bypass
__traceiter_android_vh_mutex_init
__tracepoint_android_vh_mutex_init
__traceiter_android_vh_file_is_tiny_bypass
__traceiter_android_vh_modify_scan_control
__traceiter_android_vh_should_continue_reclaim

View File

@ -186,6 +186,8 @@
clk_unprepare
clockevents_config_and_register
clocks_calc_mult_shift
clocksource_mmio_init
clocksource_mmio_readl_up
close_fd
cma_alloc
cma_release
@ -246,7 +248,9 @@
cpu_hwcaps
cpuidle_driver_state_disabled
cpuidle_get_driver
cpuidle_governor_latency_req
cpuidle_pause_and_lock
cpuidle_register_governor
cpuidle_resume_and_unlock
cpu_latency_qos_add_request
cpu_latency_qos_remove_request
@ -302,6 +306,7 @@
crypto_skcipher_setkey
__crypto_xor
css_next_child
csum_ipv6_magic
csum_partial
ct_idle_enter
ct_idle_exit
@ -378,6 +383,7 @@
dev_get_flags
dev_get_regmap
dev_get_stats
dev_get_tstats64
device_add
device_add_disk
device_attach
@ -413,6 +419,7 @@
__device_reset
device_set_of_node_from_dev
device_set_wakeup_capable
device_set_wakeup_enable
device_show_bool
device_store_bool
device_unregister
@ -545,6 +552,8 @@
dev_pm_opp_put_opp_table
dev_pm_opp_remove_all_dynamic
dev_pm_opp_set_config
dev_pm_qos_add_notifier
dev_pm_qos_remove_notifier
dev_pm_qos_update_request
dev_pm_set_dedicated_wake_irq_reverse
_dev_printk
@ -583,6 +592,10 @@
dma_buf_get_each
dma_buf_map_attachment
dma_buf_mmap
dmabuf_page_pool_alloc
dmabuf_page_pool_create
dmabuf_page_pool_destroy
dmabuf_page_pool_free
dma_buf_put
dma_buf_unmap_attachment
dma_buf_vmap
@ -760,6 +773,7 @@
drm_gem_prime_fd_to_handle
drm_gem_prime_handle_to_fd
drm_gem_prime_import
drm_gem_prime_import_dev
drm_gem_private_object_init
drm_gem_vm_close
drm_gem_vm_open
@ -836,6 +850,9 @@
eth_header_cache_update
eth_header_parse
eth_mac_addr
eth_platform_get_mac_address
ethtool_convert_legacy_u32_to_link_mode
ethtool_convert_link_mode_to_legacy_u32
__ethtool_get_link_ksettings
ethtool_op_get_link
ethtool_op_get_ts_info
@ -855,6 +872,7 @@
fget
file_path
filp_close
filp_open_block
_find_first_bit
_find_first_zero_bit
find_get_pid
@ -933,6 +951,7 @@
genlmsg_put
genl_register_family
genl_unregister_family
genphy_resume
gen_pool_add_owner
gen_pool_alloc_algo_owner
gen_pool_avail
@ -1012,6 +1031,7 @@
handle_simple_irq
handle_sysrq
have_governor_per_policy
hex2bin
hex_asc
hex_asc_upper
hex_dump_to_buffer
@ -1029,6 +1049,7 @@
hrtimer_active
hrtimer_cancel
hrtimer_forward
__hrtimer_get_remaining
hrtimer_init
hrtimer_start_range_ns
hrtimer_try_to_cancel
@ -1136,8 +1157,10 @@
input_unregister_device
input_unregister_handle
input_unregister_handler
int_sqrt
iomem_resource
iommu_alloc_resv_region
iommu_dev_disable_feature
iommu_device_register
iommu_device_sysfs_add
iommu_device_sysfs_remove
@ -1208,6 +1231,7 @@
irq_work_queue_on
irq_work_run
irq_work_sync
is_dma_buf_file
is_vmalloc_addr
iterate_dir
iterate_fd
@ -1256,6 +1280,7 @@
__kmalloc
kmalloc_caches
kmalloc_large
kmalloc_large_node
__kmalloc_node
kmalloc_node_trace
__kmalloc_node_track_caller
@ -1366,6 +1391,11 @@
mbox_free_channel
mbox_request_channel
mbox_send_message
mdiobus_alloc_size
mdiobus_free
mdiobus_get_phy
__mdiobus_register
mdiobus_unregister
media_create_ancillary_link
media_create_intf_link
media_create_pad_link
@ -1471,6 +1501,7 @@
mutex_lock_killable
mutex_trylock
mutex_unlock
__napi_alloc_skb
napi_complete_done
napi_disable
napi_enable
@ -1488,6 +1519,7 @@
netdev_err
netdev_info
netdev_name_in_use
netdev_notice
netdev_printk
netdev_set_default_ethtool_ops
netdev_update_features
@ -1504,8 +1536,11 @@
netif_receive_skb
netif_receive_skb_list
netif_rx
netif_set_tso_max_size
netif_stacked_transfer_operstate
netif_tx_lock
netif_tx_stop_all_queues
netif_tx_unlock
netif_tx_wake_queue
netlink_broadcast
netlink_capable
@ -1519,6 +1554,9 @@
net_namespace_list
net_ns_type_operations
net_ratelimit
net_selftest
net_selftest_get_count
net_selftest_get_strings
nf_conntrack_destroy
nf_register_net_hooks
nf_unregister_net_hooks
@ -1617,6 +1655,7 @@
of_n_addr_cells
of_n_size_cells
__of_parse_phandle_with_args
of_pci_get_max_link_speed
of_phandle_iterator_init
of_phandle_iterator_next
of_phy_simple_xlate
@ -1726,13 +1765,35 @@
perf_trace_buf_alloc
perf_trace_run_bpf_submit
pfn_is_map_memory
phy_attached_info
phy_connect
phy_disconnect
phy_do_ioctl_running
phy_ethtool_get_link_ksettings
phy_ethtool_nway_reset
phy_ethtool_set_link_ksettings
phy_exit
phy_get
phy_init
phylink_connect_phy
phylink_create
phylink_destroy
phylink_disconnect_phy
phylink_ethtool_get_pauseparam
phylink_ethtool_set_pauseparam
phylink_generic_validate
phylink_resume
phylink_start
phylink_stop
phylink_suspend
phy_power_off
phy_power_on
phy_print_status
phy_put
phy_set_mode_ext
phy_start
phy_stop
phy_suspend
pick_migrate_task
pid_task
pinconf_generic_parse_dt_config
@ -1823,6 +1884,7 @@
print_hex_dump
_printk
_printk_deferred
__printk_ratelimit
proc_create
proc_create_data
proc_create_net_data
@ -2062,6 +2124,7 @@
rtnl_unregister_all
runqueues
sched_clock
sched_clock_register
sched_feat_keys
sched_setattr
sched_setattr_nocheck
@ -2335,6 +2398,7 @@
spi_controller_suspend
spi_delay_to_ns
spi_finalize_current_transfer
spi_mem_default_supports_op
__spi_register_driver
spi_setup
spi_sync
@ -2444,12 +2508,15 @@
thermal_zone_get_temp
thermal_zone_get_zone_by_name
thread_group_cputime_adjusted
tick_nohz_get_sleep_length
timecounter_init
timecounter_read
timer_of_init
timer_unstable_counter_workaround
topology_clear_scale_freq_source
topology_update_thermal_pressure
_totalram_pages
_trace_android_vh_record_pcpu_rwsem_starttime
__trace_bprintk
trace_event_buffer_commit
trace_event_buffer_reserve
@ -2524,6 +2591,8 @@
__traceiter_android_vh_check_file_open
__traceiter_android_vh_check_mmap_file
__traceiter_android_vh_cpufreq_fast_switch
__traceiter_android_vh_cpu_idle_enter
__traceiter_android_vh_cpu_idle_exit
__traceiter_android_vh_iommu_iovad_alloc_iova
__traceiter_android_vh_iommu_iovad_free_iova
__traceiter_android_vh_is_fpsimd_save
@ -2539,6 +2608,8 @@
__traceiter_android_vh_ufs_clock_scaling
__traceiter_cpu_frequency
__traceiter_gpu_mem_total
__traceiter_ipi_entry
__traceiter_ipi_raise
__traceiter_mmap_lock_acquire_returned
__traceiter_mmap_lock_released
__traceiter_mmap_lock_start_locking
@ -2614,6 +2685,8 @@
__tracepoint_android_vh_check_file_open
__tracepoint_android_vh_check_mmap_file
__tracepoint_android_vh_cpufreq_fast_switch
__tracepoint_android_vh_cpu_idle_enter
__tracepoint_android_vh_cpu_idle_exit
__tracepoint_android_vh_iommu_iovad_alloc_iova
__tracepoint_android_vh_iommu_iovad_free_iova
__tracepoint_android_vh_is_fpsimd_save
@ -2629,12 +2702,15 @@
__tracepoint_android_vh_ufs_clock_scaling
__tracepoint_cpu_frequency
__tracepoint_gpu_mem_total
__tracepoint_ipi_entry
__tracepoint_ipi_raise
__tracepoint_mmap_lock_acquire_returned
__tracepoint_mmap_lock_released
__tracepoint_mmap_lock_start_locking
__tracepoint_pelt_rt_tp
__tracepoint_pelt_se_tp
tracepoint_probe_register
tracepoint_probe_register_prio
tracepoint_probe_unregister
__tracepoint_sched_update_nr_running_tp
tracepoint_srcu
@ -2791,6 +2867,7 @@
usb_add_hcd
usb_alloc_coherent
usb_alloc_urb
usb_altnum_to_altsetting
usb_anchor_urb
usb_assign_descriptors
usb_autopm_get_interface
@ -2803,6 +2880,7 @@
usb_composite_unregister
usb_control_msg
usb_control_msg_recv
usb_control_msg_send
usb_copy_descriptors
usb_create_hcd
usb_create_shared_hcd
@ -2812,6 +2890,8 @@
usb_disabled
usb_driver_claim_interface
usb_driver_release_interface
usb_driver_set_configuration
usb_enable_lpm
usb_ep_alloc_request
usb_ep_autoconfig
usb_ep_dequeue
@ -2844,6 +2924,7 @@
usb_get_intf
usb_get_maximum_speed
usb_get_role_switch_default_mode
usb_get_urb
usb_gstrings_attach
usb_hcd_check_unlink_urb
usb_hcd_end_port_resume
@ -2871,9 +2952,12 @@
usb_put_function_instance
usb_put_hcd
usb_put_intf
usb_queue_reset_device
usb_register_driver
usb_remove_function
usb_remove_hcd
usb_reset_configuration
usb_reset_device
usb_role_string
usb_role_switch_get
usb_role_switch_get_drvdata
@ -2881,11 +2965,14 @@
usb_role_switch_set_role
usb_role_switch_unregister
usb_root_hub_lost_power
usb_set_interface
usb_show_dynids
usb_speed_string
usb_store_new_id
usb_string
usb_string_id
usb_submit_urb
usb_unlink_urb
usb_unpoison_urb
usb_wakeup_notification
__usecs_to_jiffies
@ -3053,6 +3140,7 @@
wait_woken
__wake_up
wake_up_bit
wake_up_if_idle
__wake_up_locked
wake_up_process
wakeup_source_add
@ -3060,6 +3148,10 @@
wakeup_source_destroy
wakeup_source_register
wakeup_source_remove
wakeup_sources_read_lock
wakeup_sources_read_unlock
wakeup_sources_walk_next
wakeup_sources_walk_start
wakeup_source_unregister
__wake_up_sync_key
__warn_printk

View File

@ -1,9 +1,14 @@
[abi_symbol_list]
address_space_init_once
bio_add_page
bio_associate_blkg
bio_put
__blkdev_issue_discard
blk_finish_plug
blk_rq_map_user
blk_rq_map_user_iov
blk_start_plug
cgroup_add_legacy_cftypes
console_printk
cpufreq_frequency_table_get_index
cpufreq_policy_transition_delay_us
@ -35,6 +40,7 @@
ktime_get_coarse_real_ts64
memory_cgrp_subsys
memory_cgrp_subsys_enabled_key
mem_cgroup_from_id
mipi_dsi_generic_write
mmc_wait_for_cmd
nf_ct_attach
@ -42,10 +48,14 @@
nf_register_net_hooks
nf_unregister_net_hooks
of_css
__page_file_index
__page_mapcount
pm_get_active_wakeup_sources
__printk_ratelimit
prepare_to_wait_exclusive
proc_symlink
radix_tree_lookup_slot
radix_tree_replace_slot
_raw_write_trylock
reciprocal_value
register_tcf_proto_ops
@ -111,11 +121,16 @@
__traceiter_android_vh_cgroup_set_task
__traceiter_android_vh_dup_task_struct
__traceiter_android_vh_exit_signal
__traceiter_android_vh_mem_cgroup_id_remove
__traceiter_android_vh_mem_cgroup_css_offline
__traceiter_android_vh_mem_cgroup_css_online
__traceiter_android_vh_mem_cgroup_free
__traceiter_android_vh_mem_cgroup_alloc
__traceiter_android_vh_irqtime_account_process_tick
__traceiter_android_vh_mutex_can_spin_on_owner
__traceiter_android_vh_mutex_opt_spin_finish
__traceiter_android_vh_mutex_opt_spin_start
__traceiter_android_vh_cpufreq_acct_update_power
__traceiter_android_vh_cpufreq_acct_update_power
__traceiter_android_vh_cleanup_old_buffers_bypass
__traceiter_android_vh_dm_bufio_shrink_scan_bypass
__traceiter_android_vh_mutex_unlock_slowpath
@ -126,6 +141,12 @@
__traceiter_android_vh_sched_stat_runtime_rt
__traceiter_android_vh_shrink_node_memcgs
__traceiter_android_vh_sync_txn_recvd
__traceiter_block_bio_queue
__traceiter_block_getrq
__traceiter_block_rq_complete
__traceiter_block_rq_issue
__traceiter_block_rq_merge
__traceiter_block_rq_requeue
__traceiter_sched_stat_blocked
__traceiter_sched_stat_iowait
__traceiter_sched_stat_runtime
@ -156,6 +177,11 @@
__tracepoint_android_vh_do_futex
__tracepoint_android_vh_dup_task_struct
__tracepoint_android_vh_exit_signal
__tracepoint_android_vh_mem_cgroup_id_remove
__tracepoint_android_vh_mem_cgroup_css_offline
__tracepoint_android_vh_mem_cgroup_css_online
__tracepoint_android_vh_mem_cgroup_free
__tracepoint_android_vh_mem_cgroup_alloc
__tracepoint_android_vh_futex_sleep_start
__tracepoint_android_vh_futex_wait_end
__tracepoint_android_vh_futex_wait_start
@ -177,6 +203,12 @@
__tracepoint_android_vh_sched_stat_runtime_rt
__tracepoint_android_vh_shrink_node_memcgs
__tracepoint_android_vh_sync_txn_recvd
__tracepoint_block_bio_queue
__tracepoint_block_getrq
__tracepoint_block_rq_complete
__tracepoint_block_rq_issue
__tracepoint_block_rq_merge
__tracepoint_block_rq_requeue
__tracepoint_sched_stat_blocked
__tracepoint_sched_stat_iowait
__tracepoint_sched_stat_runtime
@ -184,6 +216,7 @@
__tracepoint_sched_stat_wait
__tracepoint_sched_waking
__tracepoint_task_rename
try_to_free_mem_cgroup_pages
typec_mux_get_drvdata
unregister_memory_notifier
unregister_tcf_proto_ops

View File

@ -31,6 +31,7 @@
__alloc_percpu_gfp
__alloc_skb
alloc_skb_with_frags
alloc_swapdev_block
alloc_workqueue
alt_cb_patch_nops
amba_bustype
@ -141,10 +142,10 @@
bpf_prog_put
bpf_prog_sub
bpf_stats_enabled_key
bpf_trace_run1
bpf_trace_run10
bpf_trace_run11
bpf_trace_run12
bpf_trace_run1
bpf_trace_run2
bpf_trace_run3
bpf_trace_run4
@ -299,6 +300,7 @@
clk_register
clk_restore_context
clk_round_rate
clk_save_context
clk_set_parent
clk_set_rate
clk_sync_state
@ -694,6 +696,7 @@
dev_pm_opp_of_register_em
dev_pm_opp_of_remove_table
dev_pm_opp_put
dev_pm_opp_remove
dev_pm_opp_remove_all_dynamic
dev_pm_opp_set_config
dev_pm_opp_set_opp
@ -731,6 +734,7 @@
divider_ro_round_rate_parent
divider_round_rate_parent
dma_alloc_attrs
dma_alloc_noncontiguous
dma_alloc_pages
dma_async_device_register
dma_async_device_unregister
@ -769,6 +773,7 @@
dma_fence_signal_timestamp_locked
dma_fence_wait_timeout
dma_free_attrs
dma_free_noncontiguous
dma_free_pages
dma_get_sgtable_attrs
dma_get_slave_channel
@ -803,6 +808,8 @@
dma_unmap_page_attrs
dma_unmap_resource
dma_unmap_sg_attrs
dma_vmap_noncontiguous
dma_vunmap_noncontiguous
do_trace_netlink_extack
do_trace_rcu_torture_read
double_rq_lock
@ -1227,6 +1234,9 @@
gh_rm_notifier_unregister
gh_rm_register_platform_ops
gh_rm_unregister_platform_ops
gic_v3_cpu_init
gic_v3_dist_init
gic_v3_dist_wait_for_rwp
gic_nonsecure_priorities
gov_attr_set_init
gov_attr_set_put
@ -1873,8 +1883,8 @@
memremap_pages
memscan
mem_section
memset64
memset
memset64
__memset_io
memstart_addr
memunmap
@ -1892,12 +1902,37 @@
__mmap_lock_do_trace_acquire_returned
__mmap_lock_do_trace_released
__mmap_lock_do_trace_start_locking
__mmc_claim_host
mmc_cqe_request_done
mmc_execute_tuning
mmc_get_card
mmc_get_ext_csd
mmc_hs200_tuning
mmc_issue_type
mmc_of_parse
__mmc_poll_for_busy
mmc_prepare_busy_cmd
mmc_put_card
mmc_regulator_get_supply
mmc_regulator_set_ocr
mmc_regulator_set_vqmmc
mmc_release_host
mmc_retune_hold
mmc_retune_release
mmc_select_bus_width
mmc_select_card
mmc_select_hs
mmc_select_hs400
mmc_select_hs_ddr
mmc_select_timing
mmc_send_status
mmc_send_tuning
mmc_set_bus_mode
mmc_set_bus_width
mmc_set_clock
mmc_set_initial_state
mmc_set_timing
mmc_wait_for_cmd
mmput
mod_delayed_work_on
mod_node_page_state
@ -2017,10 +2052,10 @@
nla_find
nla_memcpy
__nla_parse
nla_put_64bit
nla_put
nla_reserve_64bit
nla_put_64bit
nla_reserve
nla_reserve_64bit
nla_strscpy
__nla_validate
__nlmsg_put
@ -2159,6 +2194,8 @@
page_ext_put
page_is_ram
page_mapping
page_pinner_inited
__page_pinner_put_page
page_pool_alloc_pages
page_pool_create
page_pool_destroy
@ -2772,6 +2809,7 @@
sdhci_cqe_disable
sdhci_cqe_enable
sdhci_cqe_irq
sdhci_dumpregs
sdhci_enable_clk
sdhci_get_property
sdhci_pltfm_free
@ -2932,6 +2970,7 @@
smp_call_function
smp_call_function_single
smp_call_function_single_async
snapshot_get_image_size
snd_ctl_remove
snd_hwdep_new
snd_info_create_card_entry
@ -3202,6 +3241,7 @@
topology_update_done
topology_update_thermal_pressure
_totalram_pages
_trace_android_vh_record_pcpu_rwsem_starttime
trace_array_put
__trace_bprintk
trace_clock_local
@ -3246,6 +3286,8 @@
__traceiter_android_rvh_iommu_setup_dma_ops
__traceiter_android_rvh_is_cpu_allowed
__traceiter_android_rvh_migrate_queued_task
__traceiter_android_rvh_mmc_resume
__traceiter_android_rvh_mmc_suspend
__traceiter_android_rvh_new_task_stats
__traceiter_android_rvh_pick_next_entity
__traceiter_android_rvh_place_entity
@ -3298,6 +3340,7 @@
__traceiter_android_vh_cpufreq_fast_switch
__traceiter_android_vh_cpufreq_resolve_freq
__traceiter_android_vh_cpufreq_target
__traceiter_android_vh_check_hibernation_swap
__traceiter_android_vh_cpu_idle_enter
__traceiter_android_vh_cpu_idle_exit
__traceiter_android_vh_cpuidle_psci_enter
@ -3306,22 +3349,30 @@
__traceiter_android_vh_do_wake_up_sync
__traceiter_android_vh_dump_throttled_rt_tasks
__traceiter_android_vh_free_task
__traceiter_android_vh_encrypt_page
__traceiter_android_vh_ftrace_dump_buffer
__traceiter_android_vh_ftrace_format_check
__traceiter_android_vh_ftrace_oops_enter
__traceiter_android_vh_ftrace_oops_exit
__traceiter_android_vh_ftrace_size_check
__traceiter_android_vh_ignore_dmabuf_vmap_bounds
__traceiter_android_vh_gic_v3_suspend
__traceiter_android_vh_ipi_stop
__traceiter_android_vh_jiffies_update
__traceiter_android_vh_kswapd_per_node
__traceiter_android_vh_mpam_set
__traceiter_android_vh_init_aes_encrypt
__traceiter_android_vh_post_image_save
__traceiter_android_vh_printk_hotplug
__traceiter_android_vh_rproc_recovery
__traceiter_android_vh_rproc_recovery_set
__traceiter_android_vh_save_cpu_resume
__traceiter_android_vh_save_hib_resume_bdev
__traceiter_android_vh_scheduler_tick
__traceiter_android_vh_setscheduler_uclamp
__traceiter_android_vh_show_resume_epoch_val
__traceiter_android_vh_show_suspend_epoch_val
__traceiter_android_vh_skip_swap_map_write
__traceiter_android_vh_timer_calc_index
__traceiter_android_vh_ufs_check_int_errors
__traceiter_android_vh_ufs_compl_command
@ -3378,6 +3429,8 @@
__tracepoint_android_rvh_iommu_setup_dma_ops
__tracepoint_android_rvh_is_cpu_allowed
__tracepoint_android_rvh_migrate_queued_task
__tracepoint_android_rvh_mmc_resume
__tracepoint_android_rvh_mmc_suspend
__tracepoint_android_rvh_new_task_stats
__tracepoint_android_rvh_pick_next_entity
__tracepoint_android_rvh_place_entity
@ -3430,6 +3483,7 @@
__tracepoint_android_vh_cpufreq_fast_switch
__tracepoint_android_vh_cpufreq_resolve_freq
__tracepoint_android_vh_cpufreq_target
__tracepoint_android_vh_check_hibernation_swap
__tracepoint_android_vh_cpu_idle_enter
__tracepoint_android_vh_cpu_idle_exit
__tracepoint_android_vh_cpuidle_psci_enter
@ -3438,22 +3492,30 @@
__tracepoint_android_vh_do_wake_up_sync
__tracepoint_android_vh_dump_throttled_rt_tasks
__tracepoint_android_vh_free_task
__tracepoint_android_vh_encrypt_page
__tracepoint_android_vh_ftrace_dump_buffer
__tracepoint_android_vh_ftrace_format_check
__tracepoint_android_vh_ftrace_oops_enter
__tracepoint_android_vh_ftrace_oops_exit
__tracepoint_android_vh_ftrace_size_check
__tracepoint_android_vh_ignore_dmabuf_vmap_bounds
__tracepoint_android_vh_gic_v3_suspend
__tracepoint_android_vh_ipi_stop
__tracepoint_android_vh_jiffies_update
__tracepoint_android_vh_kswapd_per_node
__tracepoint_android_vh_mpam_set
__tracepoint_android_vh_init_aes_encrypt
__tracepoint_android_vh_post_image_save
__tracepoint_android_vh_printk_hotplug
__tracepoint_android_vh_rproc_recovery
__tracepoint_android_vh_rproc_recovery_set
__tracepoint_android_vh_save_cpu_resume
__tracepoint_android_vh_save_hib_resume_bdev
__tracepoint_android_vh_scheduler_tick
__tracepoint_android_vh_setscheduler_uclamp
__tracepoint_android_vh_show_resume_epoch_val
__tracepoint_android_vh_show_suspend_epoch_val
__tracepoint_android_vh_skip_swap_map_write
__tracepoint_android_vh_timer_calc_index
__tracepoint_android_vh_ufs_check_int_errors
__tracepoint_android_vh_ufs_compl_command

File diff suppressed because it is too large Load Diff

View File

@ -223,8 +223,20 @@
#required by io_limit.ko
__traceiter_android_vh_psi_event
__traceiter_android_vh_psi_group
__traceiter_android_rvh_ctl_dirty_rate
__tracepoint_android_vh_psi_event
__tracepoint_android_vh_psi_group
__tracepoint_android_rvh_ctl_dirty_rate
#required by touch module
power_supply_is_system_supplied
#required by mi_mempool.ko
__traceiter_android_vh_alloc_pages_reclaim_bypass
__traceiter_android_vh_alloc_pages_failure_bypass
__tracepoint_android_vh_alloc_pages_reclaim_bypass
__tracepoint_android_vh_alloc_pages_failure_bypass
#required by mi_mempool.ko
__traceiter_android_vh_mmput
__tracepoint_android_vh_mmput

View File

@ -316,7 +316,7 @@ static int __init gate_vma_init(void)
gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
gate_vma.vm_start = 0xffff0000;
gate_vma.vm_end = 0xffff0000 + PAGE_SIZE;
gate_vma.vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
vm_flags_init(&gate_vma, VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC);
return 0;
}
arch_initcall(gate_vma_init);

View File

@ -96,6 +96,7 @@ config ARM64
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
select ARCH_SUPPORTS_NUMA_BALANCING
select ARCH_SUPPORTS_PAGE_TABLE_CHECK
select ARCH_SUPPORTS_PER_VMA_LOCK
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT
select ARCH_WANT_DEFAULT_BPF_JIT
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT

View File

@ -62,6 +62,7 @@ CONFIG_UNWIND_PATCH_PAC_INTO_SCS=y
CONFIG_CMDLINE="console=ttynull stack_depot_disable=on cgroup_disable=pressure kasan.page_alloc.sample=10 kasan.stacktrace=off kvm-arm.mode=protected bootconfig ioremap_guard"
CONFIG_CMDLINE_EXTEND=y
# CONFIG_DMI is not set
CONFIG_HIBERNATION=y
CONFIG_PM_WAKELOCKS=y
CONFIG_PM_WAKELOCKS_LIMIT=0
# CONFIG_PM_WAKELOCKS_GC is not set
@ -326,6 +327,7 @@ CONFIG_IFB=y
CONFIG_MACSEC=y
CONFIG_TUN=y
CONFIG_VETH=y
CONFIG_LED_TRIGGER_PHY=y
CONFIG_AX88796B_PHY=y
CONFIG_CAN_VCAN=m
CONFIG_CAN_SLCAN=m
@ -704,6 +706,7 @@ CONFIG_UBSAN_LOCAL_BOUNDS=y
# CONFIG_UBSAN_ENUM is not set
CONFIG_PAGE_OWNER=y
CONFIG_PAGE_PINNER=y
CONFIG_PER_VMA_LOCK_STATS=y
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_KASAN=y

View File

@ -19,4 +19,14 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
return true;
}
#ifdef CONFIG_KFENCE
extern bool kfence_early_init;
static inline bool arm64_kfence_can_set_direct_map(void)
{
return !kfence_early_init;
}
#else /* CONFIG_KFENCE */
static inline bool arm64_kfence_can_set_direct_map(void) { return false; }
#endif /* CONFIG_KFENCE */
#endif /* __ASM_KFENCE_H */

View File

@ -63,17 +63,11 @@ enum __kvm_host_smccc_func {
__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa,
__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid,
__KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context,
/*
* __pkvm_alloc_module_va may temporarily serve as the privileged hcall
* limit when module loading is enabled, see early_pkvm_enable_modules().
*/
__KVM_HOST_SMCCC_FUNC___pkvm_alloc_module_va,
__KVM_HOST_SMCCC_FUNC___pkvm_map_module_page,
__KVM_HOST_SMCCC_FUNC___pkvm_unmap_module_page,
__KVM_HOST_SMCCC_FUNC___pkvm_init_module,
__KVM_HOST_SMCCC_FUNC___pkvm_register_hcall,
__KVM_HOST_SMCCC_FUNC___pkvm_close_module_registration,
__KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize,
/* Hypercalls available after pKVM finalisation */

View File

@ -25,7 +25,7 @@ unsigned long mte_copy_tags_to_user(void __user *to, void *from,
unsigned long n);
int mte_save_tags(struct page *page);
void mte_save_page_tags(const void *page_addr, void *tag_storage);
bool mte_restore_tags(swp_entry_t entry, struct page *page);
void mte_restore_tags(swp_entry_t entry, struct page *page);
void mte_restore_page_tags(void *page_addr, const void *tag_storage);
void mte_invalidate_tags(int type, pgoff_t offset);
void mte_invalidate_tags_area(int type);
@ -61,7 +61,7 @@ static inline bool page_mte_tagged(struct page *page)
}
void mte_zero_clear_page_tags(void *addr);
void mte_sync_tags(pte_t old_pte, pte_t pte);
void mte_sync_tags(pte_t pte);
void mte_copy_page_tags(void *kto, const void *kfrom);
void mte_thread_init_user(void);
void mte_thread_switch(struct task_struct *next);
@ -89,7 +89,7 @@ static inline bool page_mte_tagged(struct page *page)
static inline void mte_zero_clear_page_tags(void *addr)
{
}
static inline void mte_sync_tags(pte_t old_pte, pte_t pte)
static inline void mte_sync_tags(pte_t pte)
{
}
static inline void mte_copy_page_tags(void *kto, const void *kfrom)

View File

@ -335,18 +335,8 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
* don't expose tags (instruction fetches don't check tags).
*/
if (system_supports_mte() && pte_access_permitted(pte, false) &&
!pte_special(pte)) {
pte_t old_pte = READ_ONCE(*ptep);
/*
* We only need to synchronise if the new PTE has tags enabled
* or if swapping in (in which case another mapping may have
* set tags in the past even if this PTE isn't tagged).
* (!pte_none() && !pte_present()) is an open coded version of
* is_swap_pte()
*/
if (pte_tagged(pte) || (!pte_none(old_pte) && !pte_present(old_pte)))
mte_sync_tags(old_pte, pte);
}
!pte_special(pte) && pte_tagged(pte))
mte_sync_tags(pte);
__check_racy_pte_update(mm, ptep, pte);
@ -1066,8 +1056,8 @@ static inline void arch_swap_invalidate_area(int type)
#define __HAVE_ARCH_SWAP_RESTORE
static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio)
{
if (system_supports_mte() && mte_restore_tags(entry, &folio->page))
set_page_mte_tagged(&folio->page);
if (system_supports_mte())
mte_restore_tags(entry, &folio->page);
}
#endif /* CONFIG_ARM64_MTE */

View File

@ -33,6 +33,7 @@
#include <asm/sysreg.h>
#include <asm/trans_pgd.h>
#include <asm/virt.h>
#include <trace/hooks/bl_hib.h>
/*
* Hibernate core relies on this value being 0 on resume, and marks it
@ -80,6 +81,8 @@ static struct arch_hibernate_hdr {
phys_addr_t __hyp_stub_vectors;
u64 sleep_cpu_mpidr;
ANDROID_VENDOR_DATA(1);
} resume_hdr;
static inline void arch_hdr_invariants(struct arch_hibernate_hdr_invariants *i)
@ -117,6 +120,9 @@ int arch_hibernation_header_save(void *addr, unsigned int max_size)
hdr->ttbr1_el1 = __pa_symbol(swapper_pg_dir);
hdr->reenter_kernel = _cpu_resume;
trace_android_vh_save_cpu_resume(&hdr->android_vendor_data1,
__pa(cpu_resume));
/* We can't use __hyp_get_vectors() because kvm may still be loaded */
if (el2_reset_needed())
hdr->__hyp_stub_vectors = __pa_symbol(__hyp_stub_vectors);

View File

@ -35,47 +35,18 @@ DEFINE_STATIC_KEY_FALSE(mte_async_or_asymm_mode);
EXPORT_SYMBOL_GPL(mte_async_or_asymm_mode);
#endif
static void mte_sync_page_tags(struct page *page, pte_t old_pte,
bool check_swap, bool pte_is_tagged)
{
if (check_swap && is_swap_pte(old_pte)) {
swp_entry_t entry = pte_to_swp_entry(old_pte);
if (!non_swap_entry(entry) && mte_restore_tags(entry, page)) {
set_page_mte_tagged(page);
return;
}
}
if (!pte_is_tagged)
return;
/*
* Test PG_mte_tagged again in case it was racing with another
* set_pte_at().
*/
if (!page_mte_tagged(page)) {
mte_clear_page_tags(page_address(page));
set_page_mte_tagged(page);
}
}
void mte_sync_tags(pte_t old_pte, pte_t pte)
void mte_sync_tags(pte_t pte)
{
struct page *page = pte_page(pte);
long i, nr_pages = compound_nr(page);
bool check_swap = nr_pages == 1;
bool pte_is_tagged = pte_tagged(pte);
/* Early out if there's nothing to do */
if (!check_swap && !pte_is_tagged)
return;
/* if PG_mte_tagged is set, tags have already been initialised */
for (i = 0; i < nr_pages; i++, page++)
if (!page_mte_tagged(page))
mte_sync_page_tags(page, old_pte, check_swap,
pte_is_tagged);
for (i = 0; i < nr_pages; i++, page++) {
if (!page_mte_tagged(page)) {
mte_clear_page_tags(page_address(page));
set_page_mte_tagged(page);
}
}
/* ensure the tags are visible before the PTE is set */
smp_wmb();

View File

@ -1687,6 +1687,7 @@ static void cpu_prepare_hyp_mode(int cpu)
else
params->hcr_el2 = HCR_HOST_NVHE_FLAGS;
params->vttbr = params->vtcr = 0;
params->hfgwtr_el2 = HFGxTR_EL2_nSMPRI_EL1_MASK | HFGxTR_EL2_nTPIDR2_EL0_MASK;
/*
* Flush the init params from the data cache because the struct will

View File

@ -11,13 +11,10 @@ int __pkvm_register_hyp_panic_notifier(void (*cb)(struct kvm_cpu_context *));
enum pkvm_psci_notification;
int __pkvm_register_psci_notifier(void (*cb)(enum pkvm_psci_notification, struct kvm_cpu_context *));
int reset_pkvm_priv_hcall_limit(void);
#ifdef CONFIG_MODULES
int __pkvm_init_module(void *module_init);
int __pkvm_register_hcall(unsigned long hfn_hyp_va);
int handle_host_dynamic_hcall(struct kvm_cpu_context *host_ctxt);
int __pkvm_close_late_module_registration(void);
void __pkvm_close_module_registration(void);
#else
static inline int __pkvm_init_module(void *module_init) { return -EOPNOTSUPP; }
@ -27,6 +24,5 @@ static inline int handle_host_dynamic_hcall(struct kvm_cpu_context *host_ctxt)
{
return HCALL_UNHANDLED;
}
static inline int __pkvm_close_late_module_registration(void) { return -EOPNOTSUPP; }
static inline void __pkvm_close_module_registration(void) { }
#endif

View File

@ -1212,12 +1212,6 @@ static void handle___pkvm_register_hcall(struct kvm_cpu_context *host_ctxt)
cpu_reg(host_ctxt, 1) = __pkvm_register_hcall(hfn_hyp_va);
}
static void
handle___pkvm_close_module_registration(struct kvm_cpu_context *host_ctxt)
{
cpu_reg(host_ctxt, 1) = __pkvm_close_late_module_registration();
}
static void handle___pkvm_load_tracing(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(unsigned long, pack_hva, host_ctxt, 1);
@ -1290,13 +1284,11 @@ static const hcall_t host_hcall[] = {
HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa),
HANDLE_FUNC(__kvm_tlb_flush_vmid),
HANDLE_FUNC(__kvm_flush_cpu_context),
HANDLE_FUNC(__pkvm_alloc_module_va),
HANDLE_FUNC(__pkvm_map_module_page),
HANDLE_FUNC(__pkvm_unmap_module_page),
HANDLE_FUNC(__pkvm_init_module),
HANDLE_FUNC(__pkvm_register_hcall),
HANDLE_FUNC(__pkvm_close_module_registration),
HANDLE_FUNC(__pkvm_prot_finalize),
HANDLE_FUNC(__pkvm_host_share_hyp),
@ -1330,22 +1322,6 @@ static const hcall_t host_hcall[] = {
#endif
};
unsigned long pkvm_priv_hcall_limit __ro_after_init = __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize;
int reset_pkvm_priv_hcall_limit(void)
{
unsigned long *addr;
if (pkvm_priv_hcall_limit == __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize)
return -EACCES;
addr = hyp_fixmap_map(__hyp_pa(&pkvm_priv_hcall_limit));
*addr = __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize;
hyp_fixmap_unmap();
return 0;
}
static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(unsigned long, id, host_ctxt, 0);
@ -1365,7 +1341,7 @@ static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
* returns -EPERM after the first call for a given CPU.
*/
if (static_branch_unlikely(&kvm_protected_mode_initialized))
hcall_min = pkvm_priv_hcall_limit;
hcall_min = __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize;
id -= KVM_HOST_SMCCC_ID(0);

View File

@ -470,8 +470,6 @@ int __pkvm_iommu_finalize(int err)
if (!ret && err)
pkvm_handle_system_misconfiguration(NO_DMA_ISOLATION);
__pkvm_close_late_module_registration();
return ret;
}

View File

@ -77,15 +77,6 @@ void __pkvm_close_module_registration(void)
*/
}
int __pkvm_close_late_module_registration(void)
{
__pkvm_close_module_registration();
return reset_pkvm_priv_hcall_limit();
/* The fuse is blown! No way back until reset */
}
const struct pkvm_module_ops module_ops = {
.create_private_mapping = __pkvm_create_private_mapping,
.alloc_module_va = __pkvm_alloc_module_va,

View File

@ -578,26 +578,14 @@ int pkvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
#ifdef CONFIG_MODULES
static char early_pkvm_modules[COMMAND_LINE_SIZE] __initdata;
static int __init pkvm_enable_module_late_loading(void)
{
extern unsigned long kvm_nvhe_sym(pkvm_priv_hcall_limit);
WARN(1, "Loading pKVM modules with kvm-arm.protected_modules is deprecated\n"
"Use kvm-arm.protected_modules=<module1>,<module2>");
/*
* Move the limit to allow module loading HVCs. It will be moved back to
* its original position in __pkvm_close_module_registration().
*/
kvm_nvhe_sym(pkvm_priv_hcall_limit) = __KVM_HOST_SMCCC_FUNC___pkvm_alloc_module_va;
return 0;
}
static int __init early_pkvm_modules_cfg(char *arg)
{
/*
* Loading pKVM modules with kvm-arm.protected_modules is deprecated
* Use kvm-arm.protected_modules=<module1>,<module2>
*/
if (!arg)
return pkvm_enable_module_late_loading();
return -EINVAL;
strscpy(early_pkvm_modules, arg, COMMAND_LINE_SIZE);
@ -800,7 +788,8 @@ int __pkvm_load_el2_module(struct module *this, unsigned long *token)
int ret, i, secs_first;
size_t offset, size;
if (!is_protected_kvm_enabled())
/* The pKVM hyp only allows loading before it is fully initialized */
if (!is_protected_kvm_enabled() || is_pkvm_initialized())
return -EOPNOTSUPP;
for (i = 0; i < ARRAY_SIZE(secs_map); i++) {

View File

@ -554,6 +554,9 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
unsigned long vm_flags;
unsigned int mm_flags = FAULT_FLAG_DEFAULT;
unsigned long addr = untagged_addr(far);
#ifdef CONFIG_PER_VMA_LOCK
struct vm_area_struct *vma;
#endif
if (kprobe_page_fault(regs, esr))
return 0;
@ -611,6 +614,36 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
#ifdef CONFIG_PER_VMA_LOCK
if (!(mm_flags & FAULT_FLAG_USER))
goto lock_mmap;
vma = lock_vma_under_rcu(mm, addr);
if (!vma)
goto lock_mmap;
if (!(vma->vm_flags & vm_flags)) {
vma_end_read(vma);
goto lock_mmap;
}
fault = handle_mm_fault(vma, addr & PAGE_MASK,
mm_flags | FAULT_FLAG_VMA_LOCK, regs);
vma_end_read(vma);
if (!(fault & VM_FAULT_RETRY)) {
count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
goto done;
}
count_vm_vma_lock_event(VMA_LOCK_RETRY);
/* Quick path to respond to signals */
if (fault_signal_pending(fault, regs)) {
if (!user_mode(regs))
goto no_context;
return 0;
}
lock_mmap:
#endif /* CONFIG_PER_VMA_LOCK */
/*
* As per x86, we may deadlock here. However, since the kernel only
* validly references user space from well defined areas of the code,
@ -654,6 +687,9 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
}
mmap_read_unlock(mm);
#ifdef CONFIG_PER_VMA_LOCK
done:
#endif
/*
* Handle the "normal" (no error) case first.
*/

View File

@ -24,6 +24,7 @@
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/set_memory.h>
#include <linux/kfence.h>
#include <asm/barrier.h>
#include <asm/cputype.h>
@ -38,6 +39,7 @@
#include <asm/ptdump.h>
#include <asm/tlbflush.h>
#include <asm/pgalloc.h>
#include <asm/kfence.h>
#define NO_BLOCK_MAPPINGS BIT(0)
#define NO_CONT_MAPPINGS BIT(1)
@ -521,12 +523,67 @@ static int __init enable_crash_mem_map(char *arg)
}
early_param("crashkernel", enable_crash_mem_map);
#ifdef CONFIG_KFENCE
bool __ro_after_init kfence_early_init = !!CONFIG_KFENCE_SAMPLE_INTERVAL;
/* early_param() will be parsed before map_mem() below. */
static int __init parse_kfence_early_init(char *arg)
{
int val;
if (get_option(&arg, &val))
kfence_early_init = !!val;
return 0;
}
early_param("kfence.sample_interval", parse_kfence_early_init);
static phys_addr_t __init arm64_kfence_alloc_pool(void)
{
phys_addr_t kfence_pool;
if (!kfence_early_init)
return 0;
kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
if (!kfence_pool) {
pr_err("failed to allocate kfence pool\n");
kfence_early_init = false;
return 0;
}
/* Temporarily mark as NOMAP. */
memblock_mark_nomap(kfence_pool, KFENCE_POOL_SIZE);
return kfence_pool;
}
static void __init arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp)
{
if (!kfence_pool)
return;
/* KFENCE pool needs page-level mapping. */
__map_memblock(pgdp, kfence_pool, kfence_pool + KFENCE_POOL_SIZE,
pgprot_tagged(PAGE_KERNEL),
NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE);
__kfence_pool = phys_to_virt(kfence_pool);
}
#else /* CONFIG_KFENCE */
static inline phys_addr_t arm64_kfence_alloc_pool(void) { return 0; }
static inline void arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp) { }
#endif /* CONFIG_KFENCE */
static void __init map_mem(pgd_t *pgdp)
{
static const u64 direct_map_end = _PAGE_END(VA_BITS_MIN);
phys_addr_t kernel_start = __pa_symbol(_stext);
phys_addr_t kernel_end = __pa_symbol(__init_begin);
phys_addr_t start, end;
phys_addr_t early_kfence_pool;
int flags = NO_EXEC_MAPPINGS;
u64 i;
@ -539,6 +596,8 @@ static void __init map_mem(pgd_t *pgdp)
*/
BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end));
early_kfence_pool = arm64_kfence_alloc_pool();
if (can_set_direct_map())
flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
@ -604,6 +663,8 @@ static void __init map_mem(pgd_t *pgdp)
}
}
#endif
arm64_kfence_map_pool(early_kfence_pool, pgdp);
}
void mark_rodata_ro(void)

View File

@ -46,21 +46,19 @@ int mte_save_tags(struct page *page)
return 0;
}
bool mte_restore_tags(swp_entry_t entry, struct page *page)
void mte_restore_tags(swp_entry_t entry, struct page *page)
{
void *tags = xa_load(&mte_pages, entry.val);
if (!tags)
return false;
return;
/*
* Test PG_mte_tagged again in case it was racing with another
* set_pte_at().
* Test PG_mte_tagged in case the tags were restored before
* (e.g. CoW pages).
*/
if (!test_and_set_bit(PG_mte_tagged, &page->flags))
mte_restore_page_tags(page_address(page), tags);
return true;
}
void mte_invalidate_tags(int type, pgoff_t offset)

View File

@ -11,6 +11,7 @@
#include <asm/cacheflush.h>
#include <asm/set_memory.h>
#include <asm/tlbflush.h>
#include <asm/kfence.h>
struct page_change_data {
pgprot_t set_mask;
@ -22,12 +23,14 @@ bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED
bool can_set_direct_map(void)
{
/*
* rodata_full, DEBUG_PAGEALLOC and KFENCE require linear map to be
* rodata_full and DEBUG_PAGEALLOC require linear map to be
* mapped at page granularity, so that it is possible to
* protect/unprotect single pages.
*
* KFENCE pool requires page-granular mapping if initialized late.
*/
return (rodata_enabled && rodata_full) || debug_pagealloc_enabled() ||
IS_ENABLED(CONFIG_KFENCE);
arm64_kfence_can_set_direct_map();
}
static int change_page_range(pte_t *ptep, unsigned long addr, void *data)

View File

@ -109,7 +109,7 @@ ia64_init_addr_space (void)
vma_set_anonymous(vma);
vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
vma->vm_end = vma->vm_start + PAGE_SIZE;
vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
vm_flags_init(vma, VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT);
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
mmap_write_lock(current->mm);
if (insert_vm_struct(current->mm, vma)) {
@ -127,8 +127,8 @@ ia64_init_addr_space (void)
vma_set_anonymous(vma);
vma->vm_end = PAGE_SIZE;
vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
VM_DONTEXPAND | VM_DONTDUMP;
vm_flags_init(vma, VM_READ | VM_MAYREAD | VM_IO |
VM_DONTEXPAND | VM_DONTDUMP);
mmap_write_lock(current->mm);
if (insert_vm_struct(current->mm, vma)) {
mmap_write_unlock(current->mm);
@ -272,7 +272,7 @@ static int __init gate_vma_init(void)
vma_init(&gate_vma, NULL);
gate_vma.vm_start = FIXADDR_USER_START;
gate_vma.vm_end = FIXADDR_USER_END;
gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
vm_flags_init(&gate_vma, VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC);
gate_vma.vm_page_prot = __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX);
return 0;

View File

@ -149,7 +149,7 @@ static inline void tlb_flush(struct mmu_gather *tlb)
struct vm_area_struct vma;
vma.vm_mm = tlb->mm;
vma.vm_flags = 0;
vm_flags_init(&vma, 0);
if (tlb->fullmm) {
flush_tlb_mm(tlb->mm);
return;

View File

@ -393,6 +393,7 @@ static int kvmppc_memslot_page_merge(struct kvm *kvm,
{
unsigned long gfn = memslot->base_gfn;
unsigned long end, start = gfn_to_hva(kvm, gfn);
unsigned long vm_flags;
int ret = 0;
struct vm_area_struct *vma;
int merge_flag = (merge) ? MADV_MERGEABLE : MADV_UNMERGEABLE;
@ -409,12 +410,15 @@ static int kvmppc_memslot_page_merge(struct kvm *kvm,
ret = H_STATE;
break;
}
/* Copy vm_flags to avoid partial modifications in ksm_madvise */
vm_flags = vma->vm_flags;
ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
merge_flag, &vma->vm_flags);
merge_flag, &vm_flags);
if (ret) {
ret = H_STATE;
break;
}
vm_flags_reset(vma, vm_flags);
start = vma->vm_end;
} while (end > vma->vm_end);

View File

@ -325,7 +325,7 @@ static int kvmppc_xive_native_mmap(struct kvm_device *dev,
return -EINVAL;
}
vma->vm_flags |= VM_IO | VM_PFNMAP;
vm_flags_set(vma, VM_IO | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
/*

View File

@ -156,7 +156,7 @@ static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr,
* VM_NOHUGEPAGE and split them.
*/
for_each_vma_range(vmi, vma, addr + len) {
vma->vm_flags |= VM_NOHUGEPAGE;
vm_flags_set(vma, VM_NOHUGEPAGE);
walk_page_vma(vma, &subpage_walk_ops, NULL);
}
}

View File

@ -474,6 +474,40 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address,
if (is_exec)
flags |= FAULT_FLAG_INSTRUCTION;
#ifdef CONFIG_PER_VMA_LOCK
if (!(flags & FAULT_FLAG_USER))
goto lock_mmap;
vma = lock_vma_under_rcu(mm, address);
if (!vma)
goto lock_mmap;
if (unlikely(access_pkey_error(is_write, is_exec,
(error_code & DSISR_KEYFAULT), vma))) {
vma_end_read(vma);
goto lock_mmap;
}
if (unlikely(access_error(is_write, is_exec, vma))) {
vma_end_read(vma);
goto lock_mmap;
}
fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs);
vma_end_read(vma);
if (!(fault & VM_FAULT_RETRY)) {
count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
goto done;
}
count_vm_vma_lock_event(VMA_LOCK_RETRY);
if (fault_signal_pending(fault, regs))
return user_mode(regs) ? 0 : SIGBUS;
lock_mmap:
#endif /* CONFIG_PER_VMA_LOCK */
/* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in the
* kernel and should generate an OOPS. Unfortunately, in the case of an
@ -550,6 +584,9 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address,
mmap_read_unlock(current->mm);
#ifdef CONFIG_PER_VMA_LOCK
done:
#endif
if (unlikely(fault & VM_FAULT_ERROR))
return mm_fault_error(regs, address, fault);

View File

@ -525,7 +525,7 @@ static int coproc_mmap(struct file *fp, struct vm_area_struct *vma)
pfn = paste_addr >> PAGE_SHIFT;
/* flags, page_prot from cxl_mmap(), except we want cachable */
vma->vm_flags |= VM_IO | VM_PFNMAP;
vm_flags_set(vma, VM_IO | VM_PFNMAP);
vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
prot = __pgprot(pgprot_val(vma->vm_page_prot) | _PAGE_DIRTY);

View File

@ -291,7 +291,7 @@ static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
vma->vm_flags |= VM_IO | VM_PFNMAP;
vm_flags_set(vma, VM_IO | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
vma->vm_ops = &spufs_mem_mmap_vmops;
@ -381,7 +381,7 @@ static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
vma->vm_flags |= VM_IO | VM_PFNMAP;
vm_flags_set(vma, VM_IO | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &spufs_cntl_mmap_vmops;
@ -1043,7 +1043,7 @@ static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
vma->vm_flags |= VM_IO | VM_PFNMAP;
vm_flags_set(vma, VM_IO | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &spufs_signal1_mmap_vmops;
@ -1179,7 +1179,7 @@ static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
vma->vm_flags |= VM_IO | VM_PFNMAP;
vm_flags_set(vma, VM_IO | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &spufs_signal2_mmap_vmops;
@ -1302,7 +1302,7 @@ static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
vma->vm_flags |= VM_IO | VM_PFNMAP;
vm_flags_set(vma, VM_IO | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &spufs_mss_mmap_vmops;
@ -1364,7 +1364,7 @@ static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
vma->vm_flags |= VM_IO | VM_PFNMAP;
vm_flags_set(vma, VM_IO | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &spufs_psmap_mmap_vmops;
@ -1424,7 +1424,7 @@ static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
vma->vm_flags |= VM_IO | VM_PFNMAP;
vm_flags_set(vma, VM_IO | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &spufs_mfc_mmap_vmops;

View File

@ -16,6 +16,7 @@ config PPC_POWERNV
select PPC_DOORBELL
select MMU_NOTIFIER
select FORCE_SMP
select ARCH_SUPPORTS_PER_VMA_LOCK
default y
config OPAL_PRD

View File

@ -21,6 +21,7 @@ config PPC_PSERIES
select HOTPLUG_CPU
select FORCE_SMP
select SWIOTLB
select ARCH_SUPPORTS_PER_VMA_LOCK
default y
config PARAVIRT

View File

@ -115,6 +115,7 @@ config S390
select ARCH_SUPPORTS_DEBUG_PAGEALLOC
select ARCH_SUPPORTS_HUGETLBFS
select ARCH_SUPPORTS_NUMA_BALANCING
select ARCH_SUPPORTS_PER_VMA_LOCK
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_WANTS_DYNAMIC_TASK_STRUCT

View File

@ -403,6 +403,30 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
access = VM_WRITE;
if (access == VM_WRITE)
flags |= FAULT_FLAG_WRITE;
#ifdef CONFIG_PER_VMA_LOCK
if (!(flags & FAULT_FLAG_USER))
goto lock_mmap;
vma = lock_vma_under_rcu(mm, address);
if (!vma)
goto lock_mmap;
if (!(vma->vm_flags & access)) {
vma_end_read(vma);
goto lock_mmap;
}
fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs);
vma_end_read(vma);
if (!(fault & VM_FAULT_RETRY)) {
count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
goto out;
}
count_vm_vma_lock_event(VMA_LOCK_RETRY);
/* Quick path to respond to signals */
if (fault_signal_pending(fault, regs)) {
fault = VM_FAULT_SIGNAL;
goto out;
}
lock_mmap:
#endif /* CONFIG_PER_VMA_LOCK */
mmap_read_lock(mm);
gmap = NULL;

View File

@ -2518,8 +2518,7 @@ static inline void thp_split_mm(struct mm_struct *mm)
VMA_ITERATOR(vmi, mm, 0);
for_each_vma(vmi, vma) {
vma->vm_flags &= ~VM_HUGEPAGE;
vma->vm_flags |= VM_NOHUGEPAGE;
vm_flags_mod(vma, VM_NOHUGEPAGE, VM_HUGEPAGE);
walk_page_vma(vma, &thp_split_walk_ops, NULL);
}
mm->def_flags |= VM_NOHUGEPAGE;
@ -2584,14 +2583,18 @@ int gmap_mark_unmergeable(void)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long vm_flags;
int ret;
VMA_ITERATOR(vmi, mm, 0);
for_each_vma(vmi, vma) {
/* Copy vm_flags to avoid partial modifications in ksm_madvise */
vm_flags = vma->vm_flags;
ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
MADV_UNMERGEABLE, &vma->vm_flags);
MADV_UNMERGEABLE, &vm_flags);
if (ret)
return ret;
vm_flags_reset(vma, vm_flags);
}
mm->def_flags &= ~VM_MERGEABLE;
return 0;

View File

@ -27,6 +27,7 @@ config X86_64
# Options that are inherently 64-bit kernel only:
select ARCH_HAS_GIGANTIC_PAGE
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
select ARCH_SUPPORTS_PER_VMA_LOCK
select ARCH_USE_CMPXCHG_LOCKREF
select HAVE_ARCH_SOFT_DIRTY
select MODULES_USE_ELF_RELA

View File

@ -61,6 +61,7 @@ CONFIG_NR_CPUS=32
CONFIG_EFI=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="console=ttynull stack_depot_disable=on cgroup_disable=pressure bootconfig"
CONFIG_HIBERNATION=y
CONFIG_PM_WAKELOCKS=y
CONFIG_PM_WAKELOCKS_LIMIT=0
# CONFIG_PM_WAKELOCKS_GC is not set
@ -312,6 +313,7 @@ CONFIG_IFB=y
CONFIG_MACSEC=y
CONFIG_TUN=y
CONFIG_VETH=y
CONFIG_LED_TRIGGER_PHY=y
CONFIG_AX88796B_PHY=y
CONFIG_CAN_VCAN=m
CONFIG_CAN_SLCAN=m
@ -646,6 +648,7 @@ CONFIG_UBSAN_LOCAL_BOUNDS=y
# CONFIG_UBSAN_BOOL is not set
# CONFIG_UBSAN_ENUM is not set
CONFIG_PAGE_OWNER=y
CONFIG_PER_VMA_LOCK_STATS=y
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_KFENCE=y

View File

@ -391,7 +391,7 @@ void __init map_vsyscall(void)
}
if (vsyscall_mode == XONLY)
gate_vma.vm_flags = VM_EXEC;
vm_flags_init(&gate_vma, VM_EXEC);
BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
(unsigned long)VSYSCALL_ADDR);

View File

@ -95,7 +95,7 @@ static int sgx_mmap(struct file *file, struct vm_area_struct *vma)
return ret;
vma->vm_ops = &sgx_vm_ops;
vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO);
vma->vm_private_data = encl;
return 0;

View File

@ -105,7 +105,7 @@ static int sgx_vepc_mmap(struct file *file, struct vm_area_struct *vma)
vma->vm_ops = &sgx_vepc_vm_ops;
/* Don't copy VMA in fork() */
vma->vm_flags |= VM_PFNMAP | VM_IO | VM_DONTDUMP | VM_DONTCOPY;
vm_flags_set(vma, VM_PFNMAP | VM_IO | VM_DONTDUMP | VM_DONTCOPY);
vma->vm_private_data = vepc;
return 0;

View File

@ -19,6 +19,7 @@
#include <linux/uaccess.h> /* faulthandler_disabled() */
#include <linux/efi.h> /* efi_crash_gracefully_on_page_fault()*/
#include <linux/mm_types.h>
#include <linux/mm.h> /* find_and_lock_vma() */
#include <asm/cpufeature.h> /* boot_cpu_has, ... */
#include <asm/traps.h> /* dotraplinkage, ... */
@ -1354,6 +1355,38 @@ void do_user_addr_fault(struct pt_regs *regs,
}
#endif
#ifdef CONFIG_PER_VMA_LOCK
if (!(flags & FAULT_FLAG_USER))
goto lock_mmap;
vma = lock_vma_under_rcu(mm, address);
if (!vma)
goto lock_mmap;
if (unlikely(access_error(error_code, vma))) {
vma_end_read(vma);
goto lock_mmap;
}
fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs);
vma_end_read(vma);
if (!(fault & VM_FAULT_RETRY)) {
count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
goto done;
}
count_vm_vma_lock_event(VMA_LOCK_RETRY);
/* Quick path to respond to signals */
if (fault_signal_pending(fault, regs)) {
if (!user_mode(regs))
kernelmode_fixup_or_oops(regs, error_code, address,
SIGBUS, BUS_ADRERR,
ARCH_DEFAULT_PKEY);
return;
}
lock_mmap:
#endif /* CONFIG_PER_VMA_LOCK */
/*
* Kernel-mode access to the user address space should only occur
* on well-defined single instructions listed in the exception
@ -1454,6 +1487,9 @@ void do_user_addr_fault(struct pt_regs *regs,
}
mmap_read_unlock(mm);
#ifdef CONFIG_PER_VMA_LOCK
done:
#endif
if (likely(!(fault & VM_FAULT_ERROR)))
return;

View File

@ -1047,7 +1047,7 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
ret = reserve_pfn_range(paddr, size, prot, 0);
if (ret == 0 && vma)
vma->vm_flags |= VM_PAT;
vm_flags_set(vma, VM_PAT);
return ret;
}
@ -1093,7 +1093,7 @@ void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, pfn_t pfn)
* can be for the entire vma (in which case pfn, size are zero).
*/
void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
unsigned long size)
unsigned long size, bool mm_wr_locked)
{
resource_size_t paddr;
unsigned long prot;
@ -1112,8 +1112,12 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
size = vma->vm_end - vma->vm_start;
}
free_pfn_range(paddr, size);
if (vma)
vma->vm_flags &= ~VM_PAT;
if (vma) {
if (mm_wr_locked)
vm_flags_clear(vma, VM_PAT);
else
__vm_flags_mod(vma, 0, VM_PAT);
}
}
/*
@ -1123,7 +1127,7 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
*/
void untrack_pfn_moved(struct vm_area_struct *vma)
{
vma->vm_flags &= ~VM_PAT;
vm_flags_clear(vma, VM_PAT);
}
pgprot_t pgprot_writecombine(pgprot_t prot)

View File

@ -16,7 +16,7 @@ static int __init gate_vma_init(void)
vma_init(&gate_vma, NULL);
gate_vma.vm_start = FIXADDR_USER_START;
gate_vma.vm_end = FIXADDR_USER_END;
gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
vm_flags_init(&gate_vma, VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC);
gate_vma.vm_page_prot = PAGE_READONLY;
return 0;

View File

@ -58,6 +58,12 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_insert);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_queue);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_getrq);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_issue);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_merge);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_requeue);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_complete);
DEFINE_IDA(blk_queue_ida);

View File

@ -4,6 +4,7 @@
#include <linux/percpu.h>
#include <linux/hashtable.h>
#include <linux/android_kabi.h>
struct io_cq;
struct elevator_type;
@ -46,6 +47,11 @@ struct elevator_mq_ops {
struct request *(*next_request)(struct request_queue *, struct request *);
void (*init_icq)(struct io_cq *);
void (*exit_icq)(struct io_cq *);
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
ANDROID_KABI_RESERVE(3);
ANDROID_KABI_RESERVE(4);
};
#define ELV_NAME_MAX (16)
@ -82,6 +88,9 @@ struct elevator_type
/* managed by elevator core */
char icq_cache_name[ELV_NAME_MAX + 6]; /* elvname + "_io_cq" */
struct list_head list;
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
};
#define ELV_HASH_BITS 6

View File

@ -1,6 +1,6 @@
. ${ROOT_DIR}/${KERNEL_DIR}/build.config.constants
KMI_GENERATION=7
KMI_GENERATION=8
LLVM=1
DEPMOD=depmod

View File

@ -310,7 +310,7 @@ pfrt_log_mmap(struct file *file, struct vm_area_struct *vma)
return -EROFS;
/* changing from read to write with mprotect is not allowed */
vma->vm_flags &= ~VM_MAYWRITE;
vm_flags_clear(vma, VM_MAYWRITE);
pfrt_log_dev = to_pfrt_log_dev(file);

View File

@ -66,6 +66,7 @@
#include <linux/syscalls.h>
#include <linux/task_work.h>
#include <linux/sizes.h>
#include <linux/ktime.h>
#include <linux/android_vendor.h>
#include <uapi/linux/sched/types.h>
@ -3113,6 +3114,7 @@ static void binder_transaction(struct binder_proc *proc,
binder_size_t last_fixup_min_off = 0;
struct binder_context *context = proc->context;
int t_debug_id = atomic_inc_return(&binder_last_id);
ktime_t t_start_time = ktime_get();
char *secctx = NULL;
u32 secctx_sz = 0;
struct list_head sgc_head;
@ -3358,6 +3360,7 @@ static void binder_transaction(struct binder_proc *proc,
binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
t->debug_id = t_debug_id;
t->start_time = t_start_time;
if (reply)
binder_debug(BINDER_DEBUG_TRANSACTION,
@ -3382,6 +3385,8 @@ static void binder_transaction(struct binder_proc *proc,
t->from = thread;
else
t->from = NULL;
t->from_pid = proc->pid;
t->from_tid = thread->pid;
t->sender_euid = task_euid(proc->tsk);
t->to_proc = target_proc;
t->to_thread = target_thread;
@ -5823,8 +5828,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
return -EPERM;
}
vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
vma->vm_flags &= ~VM_MAYWRITE;
vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE);
vma->vm_ops = &binder_vm_ops;
vma->vm_private_data = proc;
@ -6181,18 +6185,20 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
{
struct binder_proc *to_proc;
struct binder_buffer *buffer = t->buffer;
ktime_t current_time = ktime_get();
spin_lock(&t->lock);
to_proc = t->to_proc;
seq_printf(m,
"%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %d:%d r%d",
"%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %d:%d r%d elapsed %lldms",
prefix, t->debug_id, t,
t->from ? t->from->proc->pid : 0,
t->from ? t->from->pid : 0,
t->from_pid,
t->from_tid,
to_proc ? to_proc->pid : 0,
t->to_thread ? t->to_thread->pid : 0,
t->code, t->flags, t->priority.sched_policy,
t->priority.prio, t->need_reply);
t->priority.prio, t->need_reply,
ktime_ms_delta(current_time, t->start_time));
spin_unlock(&t->lock);
if (proc != to_proc) {

View File

@ -552,6 +552,8 @@ struct binder_transaction {
int debug_id;
struct binder_work work;
struct binder_thread *from;
pid_t from_pid;
pid_t from_tid;
struct binder_transaction *from_parent;
struct binder_proc *to_proc;
struct binder_thread *to_thread;
@ -567,6 +569,7 @@ struct binder_transaction {
bool set_priority_called;
bool is_nested;
kuid_t sender_euid;
ktime_t start_time;
struct list_head fd_fixups;
binder_uintptr_t security_ctx;
/**

View File

@ -29,6 +29,7 @@
#include <trace/hooks/preemptirq.h>
#include <trace/hooks/ftrace_dump.h>
#include <trace/hooks/ufshcd.h>
#include <trace/hooks/mmc.h>
#include <trace/hooks/cgroup.h>
#include <trace/hooks/sys.h>
#include <trace/hooks/iommu.h>
@ -62,7 +63,9 @@
#include <trace/hooks/typec.h>
#include <trace/hooks/user.h>
#include <trace/hooks/signal.h>
#include <trace/hooks/psci.h>
#include <trace/hooks/psi.h>
#include <trace/hooks/bl_hib.h>
/*
* Export tracepoints that act as a bare tracehook (ie: have no trace event
@ -116,6 +119,9 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_irqs_disable);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_irqs_enable);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpu_cgroup_attach);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpu_cgroup_online);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_meminfo_cache_adjust);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_si_mem_available_adjust);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_si_meminfo_adjust);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ftrace_oops_enter);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ftrace_oops_exit);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ftrace_size_check);
@ -225,6 +231,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_panic_unhandled);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_arm64_serror_panic);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_disable_thermal_cooling_stats);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_gic_resume);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_gic_v3_suspend);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_audio_usb_offload_connect);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_audio_usb_offload_disconnect);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_kswapd_per_node);
@ -248,6 +255,11 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_read_done);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_preset);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_uid);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_user);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mem_cgroup_id_remove);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mem_cgroup_css_offline);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mem_cgroup_css_online);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mem_cgroup_free);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mem_cgroup_alloc);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpuset_fork);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_alloc_new_buf_locked);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_reply);
@ -258,9 +270,16 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_shrink_slab_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_do_shrink_slab);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_get_page_wmark);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_page_add_new_anon_rmap);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_psci_tos_resident_on);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_psci_cpu_suspend);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_psi_event);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_psi_group);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpufreq_acct_update_power);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_modify_scan_control);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_should_continue_reclaim);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_file_is_tiny_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_mmc_resume);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_mmc_suspend);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_exit_signal);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_shrink_node_memcgs);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_modify_thermal_request_freq);
@ -268,3 +287,22 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_modify_thermal_target_freq);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_proc_transaction);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_new_ref);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_del_ref);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_thermal_register);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_thermal_unregister);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_get_thermal_zone_device);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_thermal_power_cap);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_pages_reclaim_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_pages_failure_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_madvise_pageout_swap_entry);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_madvise_swapin_walk_pmd_entry);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_process_madvise_end);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_smaps_pte_entry);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_show_smap);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_ctl_dirty_rate);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_check_hibernation_swap);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_save_cpu_resume);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_save_hib_resume_bdev);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_encrypt_page);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_init_aes_encrypt);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_skip_swap_map_write);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_post_image_save);

View File

@ -206,7 +206,7 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma,
refcount_set(&vdata->refcnt, 1);
vma->vm_private_data = vdata;
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
if (vdata->type == MSPEC_UNCACHED)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &mspec_vm_ops;

View File

@ -32,6 +32,7 @@
#include <linux/units.h>
#include <trace/events/power.h>
#include <trace/hooks/cpufreq.h>
#include <trace/hooks/thermal.h>
static LIST_HEAD(cpufreq_policy_list);
@ -1537,8 +1538,10 @@ static int cpufreq_online(unsigned int cpu)
if (cpufreq_driver->ready)
cpufreq_driver->ready(policy);
if (cpufreq_thermal_control_enabled(cpufreq_driver))
if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
policy->cdev = of_cpufreq_cooling_register(policy);
trace_android_vh_thermal_register(policy);
}
pr_debug("initialization complete\n");
@ -1623,6 +1626,7 @@ static void __cpufreq_offline(unsigned int cpu, struct cpufreq_policy *policy)
if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
cpufreq_cooling_unregister(policy->cdev);
trace_android_vh_thermal_unregister(policy);
policy->cdev = NULL;
}

View File

@ -3333,7 +3333,7 @@ static int hisi_qm_uacce_mmap(struct uacce_queue *q,
return -EINVAL;
}
vma->vm_flags |= VM_IO;
vm_flags_set(vma, VM_IO);
return remap_pfn_range(vma, vma->vm_start,
phys_base >> PAGE_SHIFT,

View File

@ -308,7 +308,7 @@ static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
return rc;
vma->vm_ops = &dax_vm_ops;
vma->vm_flags |= VM_HUGEPAGE;
vm_flags_set(vma, VM_HUGEPAGE);
return 0;
}

View File

@ -201,7 +201,7 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
if (rc < 0)
return rc;
vma->vm_flags |= VM_DONTCOPY;
vm_flags_set(vma, VM_DONTCOPY);
pfn = (base + idxd_get_wq_portal_full_offset(wq->id,
IDXD_PORTAL_LIMITED)) >> PAGE_SHIFT;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);

View File

@ -28,6 +28,7 @@
#include <asm/system_misc.h>
#include <asm/smp_plat.h>
#include <asm/suspend.h>
#include <trace/hooks/psci.h>
/*
* While a 64-bit OS can make calls with SMC32 calling conventions, for some
@ -53,6 +54,12 @@ static enum arm_smccc_conduit psci_conduit = SMCCC_CONDUIT_NONE;
bool psci_tos_resident_on(int cpu)
{
bool resident = false;
trace_android_rvh_psci_tos_resident_on(cpu, &resident);
if (resident)
return resident;
return cpu == resident_cpu;
}
@ -175,6 +182,11 @@ static __always_inline int
__psci_cpu_suspend(u32 fn, u32 state, unsigned long entry_point)
{
int err;
bool deny = false;
trace_android_rvh_psci_cpu_suspend(state, &deny);
if (deny)
return -EPERM;
err = invoke_psci_fn(fn, state, entry_point, 0);
return psci_to_linux_errno(err);

View File

@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/cdev.h>
#include <linux/rwsem.h>
#include <linux/android_kabi.h>
#define GPIOCHIP_NAME "gpiochip"
@ -75,6 +76,7 @@ struct gpio_device {
*/
struct list_head pin_ranges;
#endif
ANDROID_KABI_RESERVE(1);
};
/* gpio suffixes used for ACPI and device tree lookup */
@ -100,6 +102,7 @@ struct gpio_array {
struct gpio_chip *chip;
unsigned long *get_mask;
unsigned long *set_mask;
ANDROID_KABI_RESERVE(1);
unsigned long invert_mask[];
};
@ -179,6 +182,7 @@ struct gpio_desc {
/* debounce period in microseconds */
unsigned int debounce_period_us;
#endif
ANDROID_KABI_RESERVE(1);
};
#define gpiod_not_found(desc) (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT)

View File

@ -256,7 +256,7 @@ static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_str
*/
if (is_cow_mapping(vma->vm_flags) &&
!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
vma->vm_flags &= ~VM_MAYWRITE;
vm_flags_clear(vma, VM_MAYWRITE);
return drm_gem_ttm_mmap(obj, vma);
}

View File

@ -2888,8 +2888,8 @@ static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process,
address = dev->adev->rmmio_remap.bus_addr;
vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
VM_DONTDUMP | VM_PFNMAP;
vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
VM_DONTDUMP | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);

View File

@ -159,8 +159,8 @@ int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process,
address = kfd_get_process_doorbells(pdd);
if (!address)
return -ENOMEM;
vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
VM_DONTDUMP | VM_PFNMAP;
vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
VM_DONTDUMP | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);

View File

@ -1049,8 +1049,8 @@ int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
pfn = __pa(page->kernel_address);
pfn >>= PAGE_SHIFT;
vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE
| VM_DONTDUMP | VM_PFNMAP;
vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE
| VM_DONTDUMP | VM_PFNMAP);
pr_debug("Mapping signal page\n");
pr_debug(" start user address == 0x%08lx\n", vma->vm_start);

View File

@ -2031,8 +2031,8 @@ int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
return -ENOMEM;
}
vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND
| VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP;
vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND
| VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP);
/* Mapping pages to user process */
return remap_pfn_range(vma, vma->vm_start,
PFN_DOWN(__pa(qpd->cwsr_kaddr)),

View File

@ -1047,7 +1047,7 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
goto err_drm_gem_object_put;
}
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
}

View File

@ -530,8 +530,7 @@ int drm_gem_dma_mmap(struct drm_gem_dma_object *dma_obj, struct vm_area_struct *
* the whole buffer.
*/
vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_flags |= VM_DONTEXPAND;
vm_flags_mod(vma, VM_DONTEXPAND, VM_PFNMAP);
if (dma_obj->map_noncoherent) {
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);

View File

@ -636,7 +636,7 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct
if (ret)
return ret;
vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
if (shmem->map_wc)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);

View File

@ -476,7 +476,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
if (!capable(CAP_SYS_ADMIN) &&
(dma->flags & _DRM_DMA_USE_PCI_RO)) {
vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
vm_flags_clear(vma, VM_WRITE | VM_MAYWRITE);
#if defined(__i386__) || defined(__x86_64__)
pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
#else
@ -492,7 +492,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
vma->vm_ops = &drm_vm_dma_ops;
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
drm_vm_open_locked(dev, vma);
return 0;
@ -560,7 +560,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
return -EINVAL;
if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
vm_flags_clear(vma, VM_WRITE | VM_MAYWRITE);
#if defined(__i386__) || defined(__x86_64__)
pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
#else
@ -628,7 +628,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
default:
return -EINVAL; /* This should never happen. */
}
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
drm_vm_open_locked(dev, vma);
return 0;

View File

@ -130,7 +130,7 @@ static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
{
pgprot_t vm_page_prot;
vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
vm_flags_set(vma, VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP);
vm_page_prot = vm_get_page_prot(vma->vm_flags);

View File

@ -274,7 +274,7 @@ static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
unsigned long vm_size;
int ret;
vma->vm_flags &= ~VM_PFNMAP;
vm_flags_clear(vma, VM_PFNMAP);
vma->vm_pgoff = 0;
vm_size = vma->vm_end - vma->vm_start;
@ -368,7 +368,7 @@ static int exynos_drm_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct
if (obj->import_attach)
return dma_buf_mmap(obj->dma_buf, vma, 0);
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "flags = 0x%x\n",
exynos_gem->flags);

View File

@ -139,7 +139,7 @@ static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
*/
vma->vm_ops = &psbfb_vm_ops;
vma->vm_private_data = (void *)fb;
vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
vm_flags_set(vma, VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP);
return 0;
}

View File

@ -102,7 +102,7 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
buf = dev_priv->mmap_buffer;
buf_priv = buf->dev_private;
vma->vm_flags |= VM_DONTCOPY;
vm_flags_set(vma, VM_DONTCOPY);
buf_priv->currently_mapped = I810_BUF_MAPPED;

View File

@ -979,7 +979,7 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
i915_gem_object_put(obj);
return -EINVAL;
}
vma->vm_flags &= ~VM_MAYWRITE;
vm_flags_clear(vma, VM_MAYWRITE);
}
anon = mmap_singleton(to_i915(dev));
@ -988,7 +988,7 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return PTR_ERR(anon);
}
vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO);
/*
* We keep the ref on mmo->obj, not vm_file, but we require

View File

@ -158,7 +158,7 @@ static int mtk_drm_gem_object_mmap(struct drm_gem_object *obj,
* dma_alloc_attrs() allocated a struct page table for mtk_gem, so clear
* VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
*/
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);

View File

@ -1012,7 +1012,7 @@ static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
return 0;

View File

@ -543,8 +543,7 @@ int omap_gem_mmap_obj(struct drm_gem_object *obj,
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_flags |= VM_MIXEDMAP;
vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP);
if (omap_obj->flags & OMAP_BO_WC) {
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));

View File

@ -250,8 +250,7 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
* We allocated a struct page table for rk_obj, so clear
* VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
*/
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_flags &= ~VM_PFNMAP;
vm_flags_mod(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP, VM_PFNMAP);
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);

View File

@ -573,7 +573,7 @@ int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
* and set the vm_pgoff (used as a fake buffer offset by DRM)
* to 0 as we want to map the whole buffer.
*/
vma->vm_flags &= ~VM_PFNMAP;
vm_flags_clear(vma, VM_PFNMAP);
vma->vm_pgoff = 0;
err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
@ -587,8 +587,7 @@ int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
} else {
pgprot_t prot = vm_get_page_prot(vma->vm_flags);
vma->vm_flags |= VM_MIXEDMAP;
vma->vm_flags &= ~VM_PFNMAP;
vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP);
vma->vm_page_prot = pgprot_writecombine(prot);
}

View File

@ -468,8 +468,7 @@ int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
vma->vm_private_data = bo;
vma->vm_flags |= VM_PFNMAP;
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
vm_flags_set(vma, VM_PFNMAP | VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
return 0;
}
EXPORT_SYMBOL(ttm_bo_mmap_obj);

View File

@ -46,7 +46,7 @@ static int virtio_gpu_vram_mmap(struct drm_gem_object *obj,
return -EINVAL;
vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
vm_flags_set(vma, VM_MIXEDMAP | VM_DONTEXPAND);
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
vma->vm_ops = &virtio_gpu_vram_vm_ops;

View File

@ -97,7 +97,7 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
/* Use VM_PFNMAP rather than VM_MIXEDMAP if not a COW mapping */
if (!is_cow_mapping(vma->vm_flags))
vma->vm_flags = (vma->vm_flags & ~VM_MIXEDMAP) | VM_PFNMAP;
vm_flags_mod(vma, VM_PFNMAP, VM_MIXEDMAP);
ttm_bo_put(bo); /* release extra ref taken by ttm_bo_mmap_obj() */

View File

@ -70,8 +70,7 @@ static int xen_drm_front_gem_object_mmap(struct drm_gem_object *gem_obj,
* vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
* the whole buffer.
*/
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
vm_flags_mod(vma, VM_MIXEDMAP | VM_DONTEXPAND, VM_PFNMAP);
vma->vm_pgoff = 0;
/*

View File

@ -1264,7 +1264,7 @@ static int cs_char_mmap(struct file *file, struct vm_area_struct *vma)
if (vma_pages(vma) != 1)
return -EINVAL;
vma->vm_flags |= VM_IO | VM_DONTDUMP | VM_DONTEXPAND;
vm_flags_set(vma, VM_IO | VM_DONTDUMP | VM_DONTEXPAND);
vma->vm_ops = &cs_char_vm_ops;
vma->vm_private_data = file->private_data;

View File

@ -1659,7 +1659,7 @@ static int intel_th_msc_mmap(struct file *file, struct vm_area_struct *vma)
atomic_dec(&msc->user_count);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_flags |= VM_DONTEXPAND | VM_DONTCOPY;
vm_flags_set(vma, VM_DONTEXPAND | VM_DONTCOPY);
vma->vm_ops = &msc_mmap_ops;
return ret;
}

View File

@ -715,7 +715,7 @@ static int stm_char_mmap(struct file *file, struct vm_area_struct *vma)
pm_runtime_get_sync(&stm->dev);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_ops = &stm_mmap_vmops;
vm_iomap_memory(vma, phys, size);

View File

@ -403,7 +403,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
ret = -EPERM;
goto done;
}
vma->vm_flags &= ~VM_MAYWRITE;
vm_flags_clear(vma, VM_MAYWRITE);
addr = vma->vm_start;
for (i = 0 ; i < uctxt->egrbufs.numbufs; i++) {
memlen = uctxt->egrbufs.buffers[i].len;
@ -528,7 +528,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
goto done;
}
vma->vm_flags = flags;
vm_flags_reset(vma, flags);
hfi1_cdbg(PROC,
"%u:%u type:%u io/vf:%d/%d, addr:0x%llx, len:%lu(%lu), flags:0x%lx\n",
ctxt, subctxt, type, mapio, vmf, memaddr, memlen,

View File

@ -2091,7 +2091,7 @@ static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
if (vma->vm_flags & (VM_WRITE | VM_EXEC))
return -EPERM;
vma->vm_flags &= ~VM_MAYWRITE;
vm_flags_clear(vma, VM_MAYWRITE);
if (!dev->mdev->clock_info)
return -EOPNOTSUPP;
@ -2315,7 +2315,7 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
if (vma->vm_flags & VM_WRITE)
return -EPERM;
vma->vm_flags &= ~VM_MAYWRITE;
vm_flags_clear(vma, VM_MAYWRITE);
/* Don't expose to user-space information it shouldn't have */
if (PAGE_SIZE > 4096)

View File

@ -733,7 +733,7 @@ static int qib_mmap_mem(struct vm_area_struct *vma, struct qib_ctxtdata *rcd,
}
/* don't allow them to later change with mprotect */
vma->vm_flags &= ~VM_MAYWRITE;
vm_flags_clear(vma, VM_MAYWRITE);
}
pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT;
@ -769,7 +769,7 @@ static int mmap_ureg(struct vm_area_struct *vma, struct qib_devdata *dd,
phys = dd->physaddr + ureg;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
vm_flags_set(vma, VM_DONTCOPY | VM_DONTEXPAND);
ret = io_remap_pfn_range(vma, vma->vm_start,
phys >> PAGE_SHIFT,
vma->vm_end - vma->vm_start,
@ -810,8 +810,7 @@ static int mmap_piobufs(struct vm_area_struct *vma,
* don't allow them to later change to readable with mprotect (for when
* not initially mapped readable, as is normally the case)
*/
vma->vm_flags &= ~VM_MAYREAD;
vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
vm_flags_mod(vma, VM_DONTCOPY | VM_DONTEXPAND, VM_MAYREAD);
/* We used PAT if wc_cookie == 0 */
if (!dd->wc_cookie)
@ -852,7 +851,7 @@ static int mmap_rcvegrbufs(struct vm_area_struct *vma,
goto bail;
}
/* don't allow them to later change to writable with mprotect */
vma->vm_flags &= ~VM_MAYWRITE;
vm_flags_clear(vma, VM_MAYWRITE);
start = vma->vm_start;
@ -944,7 +943,7 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
* Don't allow permission to later change to writable
* with mprotect.
*/
vma->vm_flags &= ~VM_MAYWRITE;
vm_flags_clear(vma, VM_MAYWRITE);
} else
goto bail;
len = vma->vm_end - vma->vm_start;
@ -955,7 +954,7 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
vma->vm_ops = &qib_file_vm_ops;
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
ret = 1;
bail:

View File

@ -672,7 +672,7 @@ int usnic_ib_mmap(struct ib_ucontext *context,
usnic_dbg("\n");
us_ibdev = to_usdev(context->device);
vma->vm_flags |= VM_IO;
vm_flags_set(vma, VM_IO);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vfid = vma->vm_pgoff;
usnic_dbg("Page Offset %lu PAGE_SHIFT %u VFID %u\n",

View File

@ -408,7 +408,7 @@ int pvrdma_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
}
/* Map UAR to kernel space, VM_LOCKED? */
vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
vm_flags_set(vma, VM_DONTCOPY | VM_DONTEXPAND);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (io_remap_pfn_range(vma, start, context->uar.pfn, size,
vma->vm_page_prot))

View File

@ -211,10 +211,11 @@ static void gic_do_wait_for_rwp(void __iomem *base, u32 bit)
}
/* Wait for completion of a distributor change */
static void gic_dist_wait_for_rwp(void)
void gic_v3_dist_wait_for_rwp(void)
{
gic_do_wait_for_rwp(gic_data.dist_base, GICD_CTLR_RWP);
}
EXPORT_SYMBOL_GPL(gic_v3_dist_wait_for_rwp);
/* Wait for completion of a redistributor change */
static void gic_redist_wait_for_rwp(void)
@ -361,7 +362,7 @@ static void gic_mask_irq(struct irq_data *d)
if (gic_irq_in_rdist(d))
gic_redist_wait_for_rwp();
else
gic_dist_wait_for_rwp();
gic_v3_dist_wait_for_rwp();
}
static void gic_eoimode1_mask_irq(struct irq_data *d)
@ -817,7 +818,7 @@ static bool gic_has_group0(void)
return val != 0;
}
static void __init gic_dist_init(void)
void gic_v3_dist_init(void)
{
unsigned int i;
u64 affinity;
@ -826,7 +827,7 @@ static void __init gic_dist_init(void)
/* Disable the distributor */
writel_relaxed(0, base + GICD_CTLR);
gic_dist_wait_for_rwp();
gic_v3_dist_wait_for_rwp();
/*
* Configure SPIs as non-secure Group-1. This will only matter
@ -863,7 +864,7 @@ static void __init gic_dist_init(void)
/* Enable distributor with ARE, Group1, and wait for it to drain */
writel_relaxed(val, base + GICD_CTLR);
gic_dist_wait_for_rwp();
gic_v3_dist_wait_for_rwp();
/*
* Set all global interrupts to the boot CPU only. ARE must be
@ -880,6 +881,7 @@ static void __init gic_dist_init(void)
gic_write_irouter(affinity, base + GICD_IROUTERnE + i * 8);
}
}
EXPORT_SYMBOL_GPL(gic_v3_dist_init);
static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *))
{
@ -1175,7 +1177,7 @@ static int gic_dist_supports_lpis(void)
!gicv3_nolpi);
}
static void gic_cpu_init(void)
void gic_v3_cpu_init(void)
{
void __iomem *rbase;
int i;
@ -1202,6 +1204,7 @@ static void gic_cpu_init(void)
/* initialise system registers */
gic_cpu_sys_reg_init();
}
EXPORT_SYMBOL_GPL(gic_v3_cpu_init);
#ifdef CONFIG_SMP
@ -1210,7 +1213,7 @@ static void gic_cpu_init(void)
static int gic_starting_cpu(unsigned int cpu)
{
gic_cpu_init();
gic_v3_cpu_init();
if (gic_dist_supports_lpis())
its_cpu_init();
@ -1396,13 +1399,21 @@ static inline void gic_cpu_pm_init(void) { }
#endif /* CONFIG_CPU_PM */
#ifdef CONFIG_PM
static void gic_resume(void)
void gic_v3_resume(void)
{
trace_android_vh_gic_resume(&gic_data);
}
EXPORT_SYMBOL_GPL(gic_v3_resume);
static int gic_v3_suspend(void)
{
trace_android_vh_gic_v3_suspend(&gic_data);
return 0;
}
static struct syscore_ops gic_syscore_ops = {
.resume = gic_resume,
.resume = gic_v3_resume,
.suspend = gic_v3_suspend,
};
static void gic_syscore_init(void)
@ -1412,6 +1423,8 @@ static void gic_syscore_init(void)
#else
static inline void gic_syscore_init(void) { }
void gic_v3_resume(void) { }
static int gic_v3_suspend(void) { return 0; }
#endif
@ -1899,8 +1912,8 @@ static int __init gic_init_bases(void __iomem *dist_base,
gic_update_rdist_properties();
gic_dist_init();
gic_cpu_init();
gic_v3_dist_init();
gic_v3_cpu_init();
gic_smp_init();
gic_cpu_pm_init();
gic_syscore_init();

View File

@ -292,7 +292,7 @@ static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
return ret;
}
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_private_data = &buf->handler;
vma->vm_ops = &vb2_common_vm_ops;

View File

@ -182,7 +182,7 @@ static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
/*
* Make sure that vm_areas for 2 buffers won't be merged together
*/
vma->vm_flags |= VM_DONTEXPAND;
vm_flags_set(vma, VM_DONTEXPAND);
/*
* Use common vm_area operations to track buffer refcount.

View File

@ -314,7 +314,7 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
}
vma->vm_ops = &videobuf_vm_ops;
vma->vm_flags |= VM_DONTEXPAND;
vm_flags_set(vma, VM_DONTEXPAND);
vma->vm_private_data = map;
dev_dbg(q->dev, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",

View File

@ -634,8 +634,8 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
map->count = 1;
map->q = q;
vma->vm_ops = &videobuf_vm_ops;
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_flags &= ~VM_IO; /* using shared anonymous pages */
/* using shared anonymous pages */
vm_flags_mod(vma, VM_DONTEXPAND | VM_DONTDUMP, VM_IO);
vma->vm_private_data = map;
dprintk(1, "mmap %p: q=%p %08lx-%08lx pgoff %08lx bufs %d-%d\n",
map, q, vma->vm_start, vma->vm_end, vma->vm_pgoff, first, last);

Some files were not shown because too many files have changed in this diff Show More