Merge "Merge keystone/android14-6.1-keystone-qcom-release.6.1.25 (8823053
) into qcom-6.1"
This commit is contained in:
commit
bf01a58857
@ -1 +1 @@
|
||||
6da02f91017480cf77c492c9de0fa145e4f13728
|
||||
1c4d2aa0c7120ab303d98f5fdacefd13a4e8ced2
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1057,6 +1057,7 @@
|
||||
dma_buf_unmap_attachment
|
||||
dma_get_sgtable_attrs
|
||||
down_read
|
||||
down_write
|
||||
find_vma
|
||||
__mmap_lock_do_trace_acquire_returned
|
||||
__mmap_lock_do_trace_released
|
||||
@ -1068,6 +1069,7 @@
|
||||
__tracepoint_mmap_lock_released
|
||||
__tracepoint_mmap_lock_start_locking
|
||||
up_read
|
||||
up_write
|
||||
|
||||
# required by gpi.ko
|
||||
krealloc
|
||||
@ -2043,7 +2045,7 @@
|
||||
ufshcd_init_pwr_dev_param
|
||||
ufshcd_mcq_config_esi
|
||||
ufshcd_mcq_enable_esi
|
||||
ufshcd_mcq_poll_cqe_nolock
|
||||
ufshcd_mcq_poll_cqe_lock
|
||||
ufshcd_mcq_write_cqis
|
||||
ufshcd_pltfrm_init
|
||||
ufshcd_pltfrm_shutdown
|
||||
@ -2101,3 +2103,4 @@
|
||||
fwnode_get_name
|
||||
icc_provider_add
|
||||
icc_provider_del
|
||||
ufshcd_mcq_poll_cqe_nolock
|
||||
|
@ -107,9 +107,6 @@
|
||||
crc32_le
|
||||
crypto_alloc_shash
|
||||
crypto_destroy_tfm
|
||||
crypto_shash_digest
|
||||
crypto_shash_update
|
||||
csum_partial
|
||||
_ctype
|
||||
debugfs_create_bool
|
||||
debugfs_create_dir
|
||||
@ -123,9 +120,6 @@
|
||||
del_timer
|
||||
del_timer_sync
|
||||
destroy_workqueue
|
||||
dev_addr_mod
|
||||
dev_alloc_name
|
||||
dev_close
|
||||
dev_driver_string
|
||||
_dev_emerg
|
||||
_dev_err
|
||||
@ -201,8 +195,6 @@
|
||||
dev_pm_qos_read_value
|
||||
dev_pm_qos_remove_request
|
||||
dev_pm_qos_update_request
|
||||
_dev_printk
|
||||
__dev_queue_xmit
|
||||
devres_add
|
||||
__devres_alloc_node
|
||||
dev_set_name
|
||||
@ -328,7 +320,6 @@
|
||||
drm_vblank_init
|
||||
dump_stack
|
||||
enable_irq
|
||||
ether_setup
|
||||
eth_type_trans
|
||||
fd_install
|
||||
_find_first_bit
|
||||
@ -348,7 +339,6 @@
|
||||
free_netdev
|
||||
__free_pages
|
||||
free_pages
|
||||
free_percpu
|
||||
freq_qos_update_request
|
||||
gcd
|
||||
generic_file_llseek
|
||||
@ -415,10 +405,6 @@
|
||||
ida_alloc_range
|
||||
ida_free
|
||||
idr_alloc
|
||||
idr_destroy
|
||||
idr_find
|
||||
idr_for_each
|
||||
idr_get_next
|
||||
idr_remove
|
||||
init_dummy_netdev
|
||||
init_net
|
||||
@ -468,7 +454,6 @@
|
||||
__kfifo_to_user
|
||||
kfree
|
||||
kfree_const
|
||||
kfree_sensitive
|
||||
kfree_skb_reason
|
||||
kimage_voffset
|
||||
__kmalloc
|
||||
@ -521,7 +506,6 @@
|
||||
__list_add_valid
|
||||
__list_del_entry_valid
|
||||
list_sort
|
||||
__local_bh_enable_ip
|
||||
log_post_read_mmio
|
||||
log_post_write_mmio
|
||||
log_read_mmio
|
||||
@ -561,12 +545,9 @@
|
||||
__napi_schedule
|
||||
napi_schedule_prep
|
||||
__netdev_alloc_skb
|
||||
netif_carrier_off
|
||||
netif_carrier_on
|
||||
netif_napi_add_weight
|
||||
netif_receive_skb
|
||||
netif_rx
|
||||
netif_tx_stop_all_queues
|
||||
netif_tx_wake_queue
|
||||
nla_memcpy
|
||||
nla_put
|
||||
@ -703,7 +684,6 @@
|
||||
proc_remove
|
||||
proc_set_size
|
||||
proc_set_user
|
||||
__pskb_pull_tail
|
||||
put_device
|
||||
__put_task_struct
|
||||
put_unused_fd
|
||||
@ -740,10 +720,7 @@
|
||||
__register_chrdev
|
||||
register_chrdev_region
|
||||
register_die_notifier
|
||||
register_inet6addr_notifier
|
||||
register_inetaddr_notifier
|
||||
register_netdevice
|
||||
register_netdevice_notifier
|
||||
register_pm_notifier
|
||||
register_reboot_notifier
|
||||
register_restart_handler
|
||||
@ -810,8 +787,6 @@
|
||||
simple_write_to_buffer
|
||||
single_open
|
||||
single_release
|
||||
skb_clone
|
||||
skb_copy
|
||||
skb_copy_bits
|
||||
skb_copy_expand
|
||||
skb_dequeue
|
||||
@ -884,7 +859,6 @@
|
||||
strsep
|
||||
strstr
|
||||
subsys_system_register
|
||||
__sw_hweight16
|
||||
__sw_hweight32
|
||||
__sw_hweight64
|
||||
__sw_hweight8
|
||||
@ -904,7 +878,6 @@
|
||||
sysfs_remove_file_ns
|
||||
sysfs_remove_group
|
||||
sysfs_remove_link
|
||||
system_freezable_wq
|
||||
system_highpri_wq
|
||||
system_long_wq
|
||||
system_state
|
||||
@ -915,7 +888,6 @@
|
||||
tasklet_init
|
||||
tasklet_kill
|
||||
__tasklet_schedule
|
||||
tasklet_setup
|
||||
thermal_of_cooling_device_register
|
||||
thermal_zone_device_update
|
||||
thermal_zone_get_temp
|
||||
@ -932,6 +904,7 @@
|
||||
__traceiter_android_vh_cpu_idle_exit
|
||||
__traceiter_android_vh_cpuidle_psci_enter
|
||||
__traceiter_android_vh_cpuidle_psci_exit
|
||||
__traceiter_android_vh_show_mem
|
||||
__traceiter_device_pm_callback_end
|
||||
__traceiter_device_pm_callback_start
|
||||
__traceiter_suspend_resume
|
||||
@ -939,6 +912,7 @@
|
||||
__tracepoint_android_vh_cpu_idle_exit
|
||||
__tracepoint_android_vh_cpuidle_psci_enter
|
||||
__tracepoint_android_vh_cpuidle_psci_exit
|
||||
__tracepoint_android_vh_show_mem
|
||||
__tracepoint_device_pm_callback_end
|
||||
__tracepoint_device_pm_callback_start
|
||||
tracepoint_probe_register
|
||||
@ -952,9 +926,6 @@
|
||||
unpin_user_page
|
||||
__unregister_chrdev
|
||||
unregister_chrdev_region
|
||||
unregister_inet6addr_notifier
|
||||
unregister_inetaddr_notifier
|
||||
unregister_netdevice_notifier
|
||||
unregister_netdevice_queue
|
||||
unregister_pm_notifier
|
||||
unregister_reboot_notifier
|
||||
@ -1044,8 +1015,10 @@
|
||||
|
||||
# required by cfg80211.ko
|
||||
bpf_trace_run10
|
||||
csum_partial
|
||||
debugfs_rename
|
||||
__dev_change_net_namespace
|
||||
dev_close
|
||||
__dev_get_by_index
|
||||
dev_get_by_index
|
||||
device_add
|
||||
@ -1059,6 +1032,7 @@
|
||||
key_create_or_update
|
||||
key_put
|
||||
keyring_alloc
|
||||
kfree_sensitive
|
||||
ktime_get_coarse_with_offset
|
||||
netdev_err
|
||||
netlink_broadcast
|
||||
@ -1070,7 +1044,9 @@
|
||||
__nla_parse
|
||||
nla_put_64bit
|
||||
__nla_validate
|
||||
__pskb_pull_tail
|
||||
__put_net
|
||||
register_netdevice_notifier
|
||||
register_pernet_device
|
||||
rfkill_alloc
|
||||
rfkill_blocked
|
||||
@ -1082,7 +1058,9 @@
|
||||
skb_add_rx_frag
|
||||
__sock_create
|
||||
sock_release
|
||||
__sw_hweight16
|
||||
system_power_efficient_wq
|
||||
unregister_netdevice_notifier
|
||||
unregister_pernet_device
|
||||
verify_pkcs7_signature
|
||||
wireless_nlevent_flush
|
||||
@ -1113,6 +1091,7 @@
|
||||
regmap_raw_write_async
|
||||
|
||||
# required by drm_display_helper.ko
|
||||
_dev_printk
|
||||
drm_atomic_get_new_private_obj_state
|
||||
drm_atomic_get_old_private_obj_state
|
||||
drm_crtc_commit_wait
|
||||
@ -1487,6 +1466,9 @@
|
||||
# required by exynos-reboot.ko
|
||||
pm_power_off
|
||||
|
||||
# required by exynos-s2i.ko
|
||||
smp_call_function_many
|
||||
|
||||
# required by exynos-ssld.ko
|
||||
driver_register
|
||||
|
||||
@ -1654,8 +1636,12 @@
|
||||
# required by hardlockup-watchdog.ko
|
||||
cpus_read_lock
|
||||
cpus_read_unlock
|
||||
free_percpu
|
||||
smp_call_on_cpu
|
||||
|
||||
# required by hdcp2.ko
|
||||
crypto_shash_digest
|
||||
|
||||
# required by hook.ko
|
||||
__traceiter_android_vh_do_wake_up_sync
|
||||
__traceiter_android_vh_set_wake_flags
|
||||
@ -1732,63 +1718,9 @@
|
||||
led_classdev_register_ext
|
||||
led_classdev_unregister
|
||||
|
||||
# required by mac80211.ko
|
||||
__alloc_percpu_gfp
|
||||
arc4_crypt
|
||||
arc4_setkey
|
||||
crc32_be
|
||||
crypto_aead_decrypt
|
||||
crypto_aead_encrypt
|
||||
crypto_aead_setauthsize
|
||||
crypto_aead_setkey
|
||||
crypto_alloc_aead
|
||||
crypto_alloc_skcipher
|
||||
__crypto_memneq
|
||||
crypto_shash_finup
|
||||
crypto_shash_setkey
|
||||
crypto_skcipher_decrypt
|
||||
crypto_skcipher_encrypt
|
||||
crypto_skcipher_setkey
|
||||
__crypto_xor
|
||||
dev_fetch_sw_netstats
|
||||
eth_mac_addr
|
||||
ethtool_op_get_link
|
||||
get_random_u16
|
||||
__hw_addr_init
|
||||
__hw_addr_sync
|
||||
__hw_addr_unsync
|
||||
kernel_param_lock
|
||||
kernel_param_unlock
|
||||
kfree_skb_list_reason
|
||||
ktime_get_seconds
|
||||
memmove
|
||||
netdev_info
|
||||
netdev_set_default_ethtool_ops
|
||||
netif_receive_skb_list
|
||||
net_ratelimit
|
||||
pskb_expand_head
|
||||
___pskb_trim
|
||||
rhashtable_free_and_destroy
|
||||
rhashtable_insert_slow
|
||||
rhltable_init
|
||||
__rht_bucket_nested
|
||||
rht_bucket_nested
|
||||
rht_bucket_nested_insert
|
||||
round_jiffies
|
||||
round_jiffies_relative
|
||||
round_jiffies_up
|
||||
sg_init_one
|
||||
skb_checksum_help
|
||||
skb_clone_sk
|
||||
skb_complete_wifi_ack
|
||||
skb_ensure_writable
|
||||
__skb_get_hash
|
||||
__skb_gso_segment
|
||||
tasklet_unlock_wait
|
||||
unregister_netdevice_many
|
||||
|
||||
# required by mcDrvModule.ko
|
||||
crypto_shash_final
|
||||
crypto_shash_update
|
||||
down_read
|
||||
freezer_active
|
||||
freezing_slow_path
|
||||
@ -1810,7 +1742,6 @@
|
||||
|
||||
# required by mpam_arch.ko
|
||||
bitmap_alloc
|
||||
__cpu_present_mask
|
||||
kobj_sysfs_ops
|
||||
kstrtou16
|
||||
on_each_cpu_cond_mask
|
||||
@ -1850,6 +1781,7 @@
|
||||
kstrdup_const
|
||||
llist_add_batch
|
||||
rb_first
|
||||
tasklet_setup
|
||||
|
||||
# required by pablo-self-tests.ko
|
||||
debugfs_create_blob
|
||||
@ -1957,6 +1889,7 @@
|
||||
devm_rtc_device_register
|
||||
rtc_update_irq
|
||||
rtc_valid_tm
|
||||
system_freezable_wq
|
||||
|
||||
# required by s3c2410_wdt.ko
|
||||
watchdog_init_timeout
|
||||
@ -2002,8 +1935,6 @@
|
||||
dma_heap_get_name
|
||||
is_dma_buf_file
|
||||
iterate_fd
|
||||
__traceiter_android_vh_show_mem
|
||||
__tracepoint_android_vh_show_mem
|
||||
|
||||
# required by samsung_iommu_v9.ko
|
||||
device_link_add
|
||||
@ -2070,22 +2001,30 @@
|
||||
|
||||
# required by scsc_wlan.ko
|
||||
arp_tbl
|
||||
dev_addr_mod
|
||||
dev_alloc_name
|
||||
__dev_queue_xmit
|
||||
down_trylock
|
||||
dql_completed
|
||||
dql_reset
|
||||
dst_release
|
||||
ether_setup
|
||||
for_each_kernel_tracepoint
|
||||
in4_pton
|
||||
in6_pton
|
||||
ip_route_output_flow
|
||||
ip_send_check
|
||||
linkwatch_fire_event
|
||||
__local_bh_enable_ip
|
||||
napi_disable
|
||||
napi_gro_flush
|
||||
neigh_destroy
|
||||
neigh_lookup
|
||||
netif_carrier_off
|
||||
netif_carrier_on
|
||||
__netif_napi_del
|
||||
netif_schedule_queue
|
||||
netif_tx_stop_all_queues
|
||||
nla_put_nohdr
|
||||
_raw_read_lock_bh
|
||||
_raw_read_unlock_bh
|
||||
@ -2093,8 +2032,14 @@
|
||||
_raw_write_lock_bh
|
||||
_raw_write_unlock
|
||||
_raw_write_unlock_bh
|
||||
register_inet6addr_notifier
|
||||
register_inetaddr_notifier
|
||||
register_kretprobe
|
||||
skb_clone
|
||||
skb_copy
|
||||
skb_realloc_headroom
|
||||
unregister_inet6addr_notifier
|
||||
unregister_inetaddr_notifier
|
||||
unregister_kretprobe
|
||||
|
||||
# required by scsi_srpmb.ko
|
||||
@ -2214,11 +2159,17 @@
|
||||
handle_simple_irq
|
||||
i2c_bit_add_bus
|
||||
ida_destroy
|
||||
idr_destroy
|
||||
idr_find
|
||||
idr_for_each
|
||||
idr_get_next
|
||||
idr_replace
|
||||
jiffies64_to_msecs
|
||||
memremap
|
||||
memunmap
|
||||
mmu_notifier_synchronize
|
||||
page_pinner_inited
|
||||
__page_pinner_put_page
|
||||
param_ops_hexint
|
||||
pci_assign_unassigned_bus_resources
|
||||
pci_bus_resource_n
|
||||
|
@ -309,6 +309,8 @@
|
||||
__traceiter_android_rvh_report_bug
|
||||
__traceiter_android_vh_ipi_stop
|
||||
__traceiter_android_vh_is_fpsimd_save
|
||||
__traceiter_android_vh_madvise_pageout_swap_entry
|
||||
__traceiter_android_vh_madvise_swapin_walk_pmd_entry
|
||||
__traceiter_android_vh_mutex_wait_finish
|
||||
__traceiter_android_vh_mutex_wait_start
|
||||
__traceiter_android_vh_ptype_head
|
||||
@ -318,10 +320,16 @@
|
||||
__traceiter_android_vh_rwsem_read_wait_start
|
||||
__traceiter_android_vh_rwsem_write_wait_finish
|
||||
__traceiter_android_vh_rwsem_write_wait_start
|
||||
__traceiter_android_vh_process_madvise_end
|
||||
__traceiter_android_vh_sched_show_task
|
||||
__traceiter_android_vh_show_smap
|
||||
__traceiter_android_vh_smaps_pte_entry
|
||||
__traceiter_android_vh_try_to_freeze_todo
|
||||
__traceiter_android_vh_try_to_freeze_todo_unfrozen
|
||||
__traceiter_android_vh_watchdog_timer_softlockup
|
||||
__traceiter_android_vh_meminfo_cache_adjust
|
||||
__traceiter_android_vh_si_mem_available_adjust
|
||||
__traceiter_android_vh_si_meminfo_adjust
|
||||
__traceiter_block_rq_insert
|
||||
__traceiter_console
|
||||
__traceiter_hrtimer_expire_entry
|
||||
@ -342,6 +350,8 @@
|
||||
__tracepoint_android_rvh_report_bug
|
||||
__tracepoint_android_vh_ipi_stop
|
||||
__tracepoint_android_vh_is_fpsimd_save
|
||||
__tracepoint_android_vh_madvise_pageout_swap_entry
|
||||
__tracepoint_android_vh_madvise_swapin_walk_pmd_entry
|
||||
__tracepoint_android_vh_mutex_wait_finish
|
||||
__tracepoint_android_vh_mutex_wait_start
|
||||
__tracepoint_android_vh_ptype_head
|
||||
@ -351,10 +361,16 @@
|
||||
__tracepoint_android_vh_rwsem_read_wait_start
|
||||
__tracepoint_android_vh_rwsem_write_wait_finish
|
||||
__tracepoint_android_vh_rwsem_write_wait_start
|
||||
__tracepoint_android_vh_process_madvise_end
|
||||
__tracepoint_android_vh_sched_show_task
|
||||
__tracepoint_android_vh_show_smap
|
||||
__tracepoint_android_vh_smaps_pte_entry
|
||||
__tracepoint_android_vh_try_to_freeze_todo
|
||||
__tracepoint_android_vh_try_to_freeze_todo_unfrozen
|
||||
__tracepoint_android_vh_watchdog_timer_softlockup
|
||||
__tracepoint_android_vh_meminfo_cache_adjust
|
||||
__tracepoint_android_vh_si_mem_available_adjust
|
||||
__tracepoint_android_vh_si_meminfo_adjust
|
||||
__tracepoint_block_rq_insert
|
||||
__tracepoint_console
|
||||
__tracepoint_hrtimer_expire_entry
|
||||
|
@ -3,9 +3,15 @@
|
||||
__tracepoint_android_vh_page_add_new_anon_rmap
|
||||
__tracepoint_android_vh_do_shrink_slab
|
||||
__tracepoint_android_vh_shrink_slab_bypass
|
||||
__tracepoint_android_vh_mutex_init
|
||||
__tracepoint_android_vh_file_is_tiny_bypass
|
||||
__tracepoint_android_vh_modify_scan_control
|
||||
__tracepoint_android_vh_should_continue_reclaim
|
||||
__traceiter_android_vh_get_page_wmark
|
||||
__traceiter_android_vh_page_add_new_anon_rmap
|
||||
__traceiter_android_vh_do_shrink_slab
|
||||
__traceiter_android_vh_shrink_slab_bypass
|
||||
__traceiter_android_vh_mutex_init
|
||||
__tracepoint_android_vh_mutex_init
|
||||
__traceiter_android_vh_file_is_tiny_bypass
|
||||
__traceiter_android_vh_modify_scan_control
|
||||
__traceiter_android_vh_should_continue_reclaim
|
@ -8,6 +8,7 @@
|
||||
blk_rq_map_user
|
||||
blk_rq_map_user_iov
|
||||
blk_start_plug
|
||||
cgroup_add_legacy_cftypes
|
||||
console_printk
|
||||
cpufreq_frequency_table_get_index
|
||||
cpufreq_policy_transition_delay_us
|
||||
@ -39,6 +40,7 @@
|
||||
ktime_get_coarse_real_ts64
|
||||
memory_cgrp_subsys
|
||||
memory_cgrp_subsys_enabled_key
|
||||
mem_cgroup_from_id
|
||||
mipi_dsi_generic_write
|
||||
mmc_wait_for_cmd
|
||||
nf_ct_attach
|
||||
@ -119,11 +121,16 @@
|
||||
__traceiter_android_vh_cgroup_set_task
|
||||
__traceiter_android_vh_dup_task_struct
|
||||
__traceiter_android_vh_exit_signal
|
||||
__traceiter_android_vh_mem_cgroup_id_remove
|
||||
__traceiter_android_vh_mem_cgroup_css_offline
|
||||
__traceiter_android_vh_mem_cgroup_css_online
|
||||
__traceiter_android_vh_mem_cgroup_free
|
||||
__traceiter_android_vh_mem_cgroup_alloc
|
||||
__traceiter_android_vh_irqtime_account_process_tick
|
||||
__traceiter_android_vh_mutex_can_spin_on_owner
|
||||
__traceiter_android_vh_mutex_opt_spin_finish
|
||||
__traceiter_android_vh_mutex_opt_spin_start
|
||||
__traceiter_android_vh_cpufreq_acct_update_power
|
||||
__traceiter_android_vh_cpufreq_acct_update_power
|
||||
__traceiter_android_vh_cleanup_old_buffers_bypass
|
||||
__traceiter_android_vh_dm_bufio_shrink_scan_bypass
|
||||
__traceiter_android_vh_mutex_unlock_slowpath
|
||||
@ -134,6 +141,12 @@
|
||||
__traceiter_android_vh_sched_stat_runtime_rt
|
||||
__traceiter_android_vh_shrink_node_memcgs
|
||||
__traceiter_android_vh_sync_txn_recvd
|
||||
__traceiter_block_bio_queue
|
||||
__traceiter_block_getrq
|
||||
__traceiter_block_rq_complete
|
||||
__traceiter_block_rq_issue
|
||||
__traceiter_block_rq_merge
|
||||
__traceiter_block_rq_requeue
|
||||
__traceiter_sched_stat_blocked
|
||||
__traceiter_sched_stat_iowait
|
||||
__traceiter_sched_stat_runtime
|
||||
@ -164,6 +177,11 @@
|
||||
__tracepoint_android_vh_do_futex
|
||||
__tracepoint_android_vh_dup_task_struct
|
||||
__tracepoint_android_vh_exit_signal
|
||||
__tracepoint_android_vh_mem_cgroup_id_remove
|
||||
__tracepoint_android_vh_mem_cgroup_css_offline
|
||||
__tracepoint_android_vh_mem_cgroup_css_online
|
||||
__tracepoint_android_vh_mem_cgroup_free
|
||||
__tracepoint_android_vh_mem_cgroup_alloc
|
||||
__tracepoint_android_vh_futex_sleep_start
|
||||
__tracepoint_android_vh_futex_wait_end
|
||||
__tracepoint_android_vh_futex_wait_start
|
||||
@ -185,6 +203,12 @@
|
||||
__tracepoint_android_vh_sched_stat_runtime_rt
|
||||
__tracepoint_android_vh_shrink_node_memcgs
|
||||
__tracepoint_android_vh_sync_txn_recvd
|
||||
__tracepoint_block_bio_queue
|
||||
__tracepoint_block_getrq
|
||||
__tracepoint_block_rq_complete
|
||||
__tracepoint_block_rq_issue
|
||||
__tracepoint_block_rq_merge
|
||||
__tracepoint_block_rq_requeue
|
||||
__tracepoint_sched_stat_blocked
|
||||
__tracepoint_sched_stat_iowait
|
||||
__tracepoint_sched_stat_runtime
|
||||
@ -192,6 +216,7 @@
|
||||
__tracepoint_sched_stat_wait
|
||||
__tracepoint_sched_waking
|
||||
__tracepoint_task_rename
|
||||
try_to_free_mem_cgroup_pages
|
||||
typec_mux_get_drvdata
|
||||
unregister_memory_notifier
|
||||
unregister_tcf_proto_ops
|
||||
|
@ -957,7 +957,9 @@
|
||||
int_to_scsilun
|
||||
iomem_resource
|
||||
iommu_alloc_resv_region
|
||||
iommu_attach_device_pasid
|
||||
iommu_attach_group
|
||||
iommu_detach_device_pasid
|
||||
iommu_device_register
|
||||
iommu_device_sysfs_add
|
||||
iommu_device_sysfs_remove
|
||||
@ -968,6 +970,7 @@
|
||||
iommu_fwspec_add_ids
|
||||
iommu_fwspec_free
|
||||
iommu_get_domain_for_dev
|
||||
iommu_get_domain_for_dev_pasid
|
||||
iommu_group_alloc
|
||||
iommu_group_for_each_dev
|
||||
iommu_group_get
|
||||
|
@ -31,6 +31,7 @@
|
||||
__alloc_percpu_gfp
|
||||
__alloc_skb
|
||||
alloc_skb_with_frags
|
||||
alloc_swapdev_block
|
||||
alloc_workqueue
|
||||
alt_cb_patch_nops
|
||||
amba_bustype
|
||||
@ -299,6 +300,7 @@
|
||||
clk_register
|
||||
clk_restore_context
|
||||
clk_round_rate
|
||||
clk_save_context
|
||||
clk_set_parent
|
||||
clk_set_rate
|
||||
clk_sync_state
|
||||
@ -732,6 +734,7 @@
|
||||
divider_ro_round_rate_parent
|
||||
divider_round_rate_parent
|
||||
dma_alloc_attrs
|
||||
dma_alloc_noncontiguous
|
||||
dma_alloc_pages
|
||||
dma_async_device_register
|
||||
dma_async_device_unregister
|
||||
@ -770,6 +773,7 @@
|
||||
dma_fence_signal_timestamp_locked
|
||||
dma_fence_wait_timeout
|
||||
dma_free_attrs
|
||||
dma_free_noncontiguous
|
||||
dma_free_pages
|
||||
dma_get_sgtable_attrs
|
||||
dma_get_slave_channel
|
||||
@ -804,6 +808,8 @@
|
||||
dma_unmap_page_attrs
|
||||
dma_unmap_resource
|
||||
dma_unmap_sg_attrs
|
||||
dma_vmap_noncontiguous
|
||||
dma_vunmap_noncontiguous
|
||||
do_trace_netlink_extack
|
||||
do_trace_rcu_torture_read
|
||||
double_rq_lock
|
||||
@ -1229,6 +1235,9 @@
|
||||
gh_rm_register_platform_ops
|
||||
gh_rm_unregister_platform_ops
|
||||
gic_nonsecure_priorities
|
||||
gic_v3_cpu_init
|
||||
gic_v3_dist_init
|
||||
gic_v3_dist_wait_for_rwp
|
||||
gov_attr_set_init
|
||||
gov_attr_set_put
|
||||
governor_sysfs_ops
|
||||
@ -2800,6 +2809,7 @@
|
||||
sdhci_cqe_disable
|
||||
sdhci_cqe_enable
|
||||
sdhci_cqe_irq
|
||||
sdhci_dumpregs
|
||||
sdhci_enable_clk
|
||||
sdhci_get_property
|
||||
sdhci_pltfm_free
|
||||
@ -2960,6 +2970,7 @@
|
||||
smp_call_function
|
||||
smp_call_function_single
|
||||
smp_call_function_single_async
|
||||
snapshot_get_image_size
|
||||
snd_ctl_remove
|
||||
snd_hwdep_new
|
||||
snd_info_create_card_entry
|
||||
@ -3324,6 +3335,7 @@
|
||||
__traceiter_android_vh_binder_set_priority
|
||||
__traceiter_android_vh_binder_wakeup_ilocked
|
||||
__traceiter_android_vh_build_sched_domains
|
||||
__traceiter_android_vh_check_hibernation_swap
|
||||
__traceiter_android_vh_check_uninterrupt_tasks
|
||||
__traceiter_android_vh_check_uninterrupt_tasks_done
|
||||
__traceiter_android_vh_cpufreq_fast_switch
|
||||
@ -3336,23 +3348,31 @@
|
||||
__traceiter_android_vh_disable_thermal_cooling_stats
|
||||
__traceiter_android_vh_do_wake_up_sync
|
||||
__traceiter_android_vh_dump_throttled_rt_tasks
|
||||
__traceiter_android_vh_encrypt_page
|
||||
__traceiter_android_vh_free_task
|
||||
__traceiter_android_vh_ftrace_dump_buffer
|
||||
__traceiter_android_vh_ftrace_format_check
|
||||
__traceiter_android_vh_ftrace_oops_enter
|
||||
__traceiter_android_vh_ftrace_oops_exit
|
||||
__traceiter_android_vh_ftrace_size_check
|
||||
__traceiter_android_vh_gic_v3_suspend
|
||||
__traceiter_android_vh_ignore_dmabuf_vmap_bounds
|
||||
__traceiter_android_vh_init_aes_encrypt
|
||||
__traceiter_android_vh_ipi_stop
|
||||
__traceiter_android_vh_jiffies_update
|
||||
__traceiter_android_vh_kswapd_per_node
|
||||
__traceiter_android_vh_mpam_set
|
||||
__traceiter_android_vh_post_image_save
|
||||
__traceiter_android_vh_printk_hotplug
|
||||
__traceiter_android_vh_rproc_recovery
|
||||
__traceiter_android_vh_rproc_recovery_set
|
||||
__traceiter_android_vh_save_cpu_resume
|
||||
__traceiter_android_vh_save_hib_resume_bdev
|
||||
__traceiter_android_vh_scheduler_tick
|
||||
__traceiter_android_vh_setscheduler_uclamp
|
||||
__traceiter_android_vh_show_resume_epoch_val
|
||||
__traceiter_android_vh_show_suspend_epoch_val
|
||||
__traceiter_android_vh_skip_swap_map_write
|
||||
__traceiter_android_vh_timer_calc_index
|
||||
__traceiter_android_vh_ufs_check_int_errors
|
||||
__traceiter_android_vh_ufs_compl_command
|
||||
@ -3458,6 +3478,7 @@
|
||||
__tracepoint_android_vh_binder_set_priority
|
||||
__tracepoint_android_vh_binder_wakeup_ilocked
|
||||
__tracepoint_android_vh_build_sched_domains
|
||||
__tracepoint_android_vh_check_hibernation_swap
|
||||
__tracepoint_android_vh_check_uninterrupt_tasks
|
||||
__tracepoint_android_vh_check_uninterrupt_tasks_done
|
||||
__tracepoint_android_vh_cpufreq_fast_switch
|
||||
@ -3470,23 +3491,31 @@
|
||||
__tracepoint_android_vh_disable_thermal_cooling_stats
|
||||
__tracepoint_android_vh_do_wake_up_sync
|
||||
__tracepoint_android_vh_dump_throttled_rt_tasks
|
||||
__tracepoint_android_vh_encrypt_page
|
||||
__tracepoint_android_vh_free_task
|
||||
__tracepoint_android_vh_ftrace_dump_buffer
|
||||
__tracepoint_android_vh_ftrace_format_check
|
||||
__tracepoint_android_vh_ftrace_oops_enter
|
||||
__tracepoint_android_vh_ftrace_oops_exit
|
||||
__tracepoint_android_vh_ftrace_size_check
|
||||
__tracepoint_android_vh_gic_v3_suspend
|
||||
__tracepoint_android_vh_ignore_dmabuf_vmap_bounds
|
||||
__tracepoint_android_vh_init_aes_encrypt
|
||||
__tracepoint_android_vh_ipi_stop
|
||||
__tracepoint_android_vh_jiffies_update
|
||||
__tracepoint_android_vh_kswapd_per_node
|
||||
__tracepoint_android_vh_mpam_set
|
||||
__tracepoint_android_vh_post_image_save
|
||||
__tracepoint_android_vh_printk_hotplug
|
||||
__tracepoint_android_vh_rproc_recovery
|
||||
__tracepoint_android_vh_rproc_recovery_set
|
||||
__tracepoint_android_vh_save_cpu_resume
|
||||
__tracepoint_android_vh_save_hib_resume_bdev
|
||||
__tracepoint_android_vh_scheduler_tick
|
||||
__tracepoint_android_vh_setscheduler_uclamp
|
||||
__tracepoint_android_vh_show_resume_epoch_val
|
||||
__tracepoint_android_vh_show_suspend_epoch_val
|
||||
__tracepoint_android_vh_skip_swap_map_write
|
||||
__tracepoint_android_vh_timer_calc_index
|
||||
__tracepoint_android_vh_ufs_check_int_errors
|
||||
__tracepoint_android_vh_ufs_compl_command
|
||||
@ -3608,6 +3637,7 @@
|
||||
ufshcd_hold
|
||||
ufshcd_mcq_config_esi
|
||||
ufshcd_mcq_enable_esi
|
||||
ufshcd_mcq_poll_cqe_lock
|
||||
ufshcd_mcq_poll_cqe_nolock
|
||||
ufshcd_mcq_write_cqis
|
||||
ufshcd_pltfrm_init
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -223,8 +223,24 @@
|
||||
#required by io_limit.ko
|
||||
__traceiter_android_vh_psi_event
|
||||
__traceiter_android_vh_psi_group
|
||||
__traceiter_android_rvh_ctl_dirty_rate
|
||||
__tracepoint_android_vh_psi_event
|
||||
__tracepoint_android_vh_psi_group
|
||||
__tracepoint_android_rvh_ctl_dirty_rate
|
||||
|
||||
#required by touch module
|
||||
power_supply_is_system_supplied
|
||||
|
||||
#required by mi_mempool.ko
|
||||
__traceiter_android_vh_alloc_pages_reclaim_bypass
|
||||
__traceiter_android_vh_alloc_pages_failure_bypass
|
||||
__tracepoint_android_vh_alloc_pages_reclaim_bypass
|
||||
__tracepoint_android_vh_alloc_pages_failure_bypass
|
||||
|
||||
#required by mi_mempool.ko
|
||||
__traceiter_android_vh_mmput
|
||||
__tracepoint_android_vh_mmput
|
||||
|
||||
#required by mi_mempool.ko
|
||||
__traceiter_android_vh_madvise_cold_pageout_skip
|
||||
__tracepoint_android_vh_madvise_cold_pageout_skip
|
||||
|
@ -316,7 +316,7 @@ static int __init gate_vma_init(void)
|
||||
gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
|
||||
gate_vma.vm_start = 0xffff0000;
|
||||
gate_vma.vm_end = 0xffff0000 + PAGE_SIZE;
|
||||
gate_vma.vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
|
||||
vm_flags_init(&gate_vma, VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC);
|
||||
return 0;
|
||||
}
|
||||
arch_initcall(gate_vma_init);
|
||||
|
@ -96,6 +96,7 @@ config ARM64
|
||||
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
|
||||
select ARCH_SUPPORTS_NUMA_BALANCING
|
||||
select ARCH_SUPPORTS_PAGE_TABLE_CHECK
|
||||
select ARCH_SUPPORTS_PER_VMA_LOCK
|
||||
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT
|
||||
select ARCH_WANT_DEFAULT_BPF_JIT
|
||||
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
|
||||
|
@ -17,7 +17,7 @@ CONFIG_RCU_BOOST=y
|
||||
CONFIG_RCU_NOCB_CPU=y
|
||||
CONFIG_IKCONFIG=y
|
||||
CONFIG_IKCONFIG_PROC=y
|
||||
CONFIG_IKHEADERS=y
|
||||
CONFIG_IKHEADERS=m
|
||||
CONFIG_UCLAMP_TASK=y
|
||||
CONFIG_UCLAMP_BUCKETS_COUNT=20
|
||||
CONFIG_CGROUPS=y
|
||||
@ -62,6 +62,7 @@ CONFIG_UNWIND_PATCH_PAC_INTO_SCS=y
|
||||
CONFIG_CMDLINE="console=ttynull stack_depot_disable=on cgroup_disable=pressure kasan.page_alloc.sample=10 kasan.stacktrace=off kvm-arm.mode=protected bootconfig ioremap_guard"
|
||||
CONFIG_CMDLINE_EXTEND=y
|
||||
# CONFIG_DMI is not set
|
||||
CONFIG_HIBERNATION=y
|
||||
CONFIG_PM_WAKELOCKS=y
|
||||
CONFIG_PM_WAKELOCKS_LIMIT=0
|
||||
# CONFIG_PM_WAKELOCKS_GC is not set
|
||||
@ -306,6 +307,7 @@ CONFIG_BLK_DEV_LOOP_MIN_COUNT=16
|
||||
CONFIG_BLK_DEV_RAM=y
|
||||
CONFIG_BLK_DEV_RAM_SIZE=8192
|
||||
CONFIG_BLK_DEV_UBLK=y
|
||||
CONFIG_BLK_DEV_NVME=y
|
||||
CONFIG_SRAM=y
|
||||
CONFIG_UID_SYS_STATS=y
|
||||
CONFIG_SCSI=y
|
||||
@ -326,6 +328,7 @@ CONFIG_IFB=y
|
||||
CONFIG_MACSEC=y
|
||||
CONFIG_TUN=y
|
||||
CONFIG_VETH=y
|
||||
CONFIG_LED_TRIGGER_PHY=y
|
||||
CONFIG_AX88796B_PHY=y
|
||||
CONFIG_CAN_VCAN=m
|
||||
CONFIG_CAN_SLCAN=m
|
||||
@ -704,6 +707,7 @@ CONFIG_UBSAN_LOCAL_BOUNDS=y
|
||||
# CONFIG_UBSAN_ENUM is not set
|
||||
CONFIG_PAGE_OWNER=y
|
||||
CONFIG_PAGE_PINNER=y
|
||||
CONFIG_PER_VMA_LOCK_STATS=y
|
||||
CONFIG_DEBUG_STACK_USAGE=y
|
||||
CONFIG_DEBUG_MEMORY_INIT=y
|
||||
CONFIG_KASAN=y
|
||||
|
@ -63,17 +63,11 @@ enum __kvm_host_smccc_func {
|
||||
__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa,
|
||||
__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid,
|
||||
__KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context,
|
||||
|
||||
/*
|
||||
* __pkvm_alloc_module_va may temporarily serve as the privileged hcall
|
||||
* limit when module loading is enabled, see early_pkvm_enable_modules().
|
||||
*/
|
||||
__KVM_HOST_SMCCC_FUNC___pkvm_alloc_module_va,
|
||||
__KVM_HOST_SMCCC_FUNC___pkvm_map_module_page,
|
||||
__KVM_HOST_SMCCC_FUNC___pkvm_unmap_module_page,
|
||||
__KVM_HOST_SMCCC_FUNC___pkvm_init_module,
|
||||
__KVM_HOST_SMCCC_FUNC___pkvm_register_hcall,
|
||||
__KVM_HOST_SMCCC_FUNC___pkvm_close_module_registration,
|
||||
__KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize,
|
||||
|
||||
/* Hypercalls available after pKVM finalisation */
|
||||
|
@ -406,6 +406,8 @@ int pkvm_iommu_resume(struct device *dev);
|
||||
*/
|
||||
int pkvm_iommu_finalize(int err);
|
||||
|
||||
bool pkvm_iommu_finalized(void);
|
||||
|
||||
struct vcpu_reset_state {
|
||||
unsigned long pc;
|
||||
unsigned long r0;
|
||||
|
@ -25,7 +25,7 @@ unsigned long mte_copy_tags_to_user(void __user *to, void *from,
|
||||
unsigned long n);
|
||||
int mte_save_tags(struct page *page);
|
||||
void mte_save_page_tags(const void *page_addr, void *tag_storage);
|
||||
bool mte_restore_tags(swp_entry_t entry, struct page *page);
|
||||
void mte_restore_tags(swp_entry_t entry, struct page *page);
|
||||
void mte_restore_page_tags(void *page_addr, const void *tag_storage);
|
||||
void mte_invalidate_tags(int type, pgoff_t offset);
|
||||
void mte_invalidate_tags_area(int type);
|
||||
@ -61,7 +61,7 @@ static inline bool page_mte_tagged(struct page *page)
|
||||
}
|
||||
|
||||
void mte_zero_clear_page_tags(void *addr);
|
||||
void mte_sync_tags(pte_t old_pte, pte_t pte);
|
||||
void mte_sync_tags(pte_t pte);
|
||||
void mte_copy_page_tags(void *kto, const void *kfrom);
|
||||
void mte_thread_init_user(void);
|
||||
void mte_thread_switch(struct task_struct *next);
|
||||
@ -89,7 +89,7 @@ static inline bool page_mte_tagged(struct page *page)
|
||||
static inline void mte_zero_clear_page_tags(void *addr)
|
||||
{
|
||||
}
|
||||
static inline void mte_sync_tags(pte_t old_pte, pte_t pte)
|
||||
static inline void mte_sync_tags(pte_t pte)
|
||||
{
|
||||
}
|
||||
static inline void mte_copy_page_tags(void *kto, const void *kfrom)
|
||||
|
@ -335,18 +335,8 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
* don't expose tags (instruction fetches don't check tags).
|
||||
*/
|
||||
if (system_supports_mte() && pte_access_permitted(pte, false) &&
|
||||
!pte_special(pte)) {
|
||||
pte_t old_pte = READ_ONCE(*ptep);
|
||||
/*
|
||||
* We only need to synchronise if the new PTE has tags enabled
|
||||
* or if swapping in (in which case another mapping may have
|
||||
* set tags in the past even if this PTE isn't tagged).
|
||||
* (!pte_none() && !pte_present()) is an open coded version of
|
||||
* is_swap_pte()
|
||||
*/
|
||||
if (pte_tagged(pte) || (!pte_none(old_pte) && !pte_present(old_pte)))
|
||||
mte_sync_tags(old_pte, pte);
|
||||
}
|
||||
!pte_special(pte) && pte_tagged(pte))
|
||||
mte_sync_tags(pte);
|
||||
|
||||
__check_racy_pte_update(mm, ptep, pte);
|
||||
|
||||
@ -1066,8 +1056,8 @@ static inline void arch_swap_invalidate_area(int type)
|
||||
#define __HAVE_ARCH_SWAP_RESTORE
|
||||
static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio)
|
||||
{
|
||||
if (system_supports_mte() && mte_restore_tags(entry, &folio->page))
|
||||
set_page_mte_tagged(&folio->page);
|
||||
if (system_supports_mte())
|
||||
mte_restore_tags(entry, &folio->page);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_ARM64_MTE */
|
||||
|
@ -33,6 +33,7 @@
|
||||
#include <asm/sysreg.h>
|
||||
#include <asm/trans_pgd.h>
|
||||
#include <asm/virt.h>
|
||||
#include <trace/hooks/bl_hib.h>
|
||||
|
||||
/*
|
||||
* Hibernate core relies on this value being 0 on resume, and marks it
|
||||
@ -80,6 +81,8 @@ static struct arch_hibernate_hdr {
|
||||
phys_addr_t __hyp_stub_vectors;
|
||||
|
||||
u64 sleep_cpu_mpidr;
|
||||
|
||||
ANDROID_VENDOR_DATA(1);
|
||||
} resume_hdr;
|
||||
|
||||
static inline void arch_hdr_invariants(struct arch_hibernate_hdr_invariants *i)
|
||||
@ -117,6 +120,9 @@ int arch_hibernation_header_save(void *addr, unsigned int max_size)
|
||||
hdr->ttbr1_el1 = __pa_symbol(swapper_pg_dir);
|
||||
hdr->reenter_kernel = _cpu_resume;
|
||||
|
||||
trace_android_vh_save_cpu_resume(&hdr->android_vendor_data1,
|
||||
__pa(cpu_resume));
|
||||
|
||||
/* We can't use __hyp_get_vectors() because kvm may still be loaded */
|
||||
if (el2_reset_needed())
|
||||
hdr->__hyp_stub_vectors = __pa_symbol(__hyp_stub_vectors);
|
||||
|
@ -35,47 +35,18 @@ DEFINE_STATIC_KEY_FALSE(mte_async_or_asymm_mode);
|
||||
EXPORT_SYMBOL_GPL(mte_async_or_asymm_mode);
|
||||
#endif
|
||||
|
||||
static void mte_sync_page_tags(struct page *page, pte_t old_pte,
|
||||
bool check_swap, bool pte_is_tagged)
|
||||
{
|
||||
if (check_swap && is_swap_pte(old_pte)) {
|
||||
swp_entry_t entry = pte_to_swp_entry(old_pte);
|
||||
|
||||
if (!non_swap_entry(entry) && mte_restore_tags(entry, page)) {
|
||||
set_page_mte_tagged(page);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (!pte_is_tagged)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Test PG_mte_tagged again in case it was racing with another
|
||||
* set_pte_at().
|
||||
*/
|
||||
if (!page_mte_tagged(page)) {
|
||||
mte_clear_page_tags(page_address(page));
|
||||
set_page_mte_tagged(page);
|
||||
}
|
||||
}
|
||||
|
||||
void mte_sync_tags(pte_t old_pte, pte_t pte)
|
||||
void mte_sync_tags(pte_t pte)
|
||||
{
|
||||
struct page *page = pte_page(pte);
|
||||
long i, nr_pages = compound_nr(page);
|
||||
bool check_swap = nr_pages == 1;
|
||||
bool pte_is_tagged = pte_tagged(pte);
|
||||
|
||||
/* Early out if there's nothing to do */
|
||||
if (!check_swap && !pte_is_tagged)
|
||||
return;
|
||||
|
||||
/* if PG_mte_tagged is set, tags have already been initialised */
|
||||
for (i = 0; i < nr_pages; i++, page++)
|
||||
if (!page_mte_tagged(page))
|
||||
mte_sync_page_tags(page, old_pte, check_swap,
|
||||
pte_is_tagged);
|
||||
for (i = 0; i < nr_pages; i++, page++) {
|
||||
if (!page_mte_tagged(page)) {
|
||||
mte_clear_page_tags(page_address(page));
|
||||
set_page_mte_tagged(page);
|
||||
}
|
||||
}
|
||||
|
||||
/* ensure the tags are visible before the PTE is set */
|
||||
smp_wmb();
|
||||
|
@ -1687,6 +1687,7 @@ static void cpu_prepare_hyp_mode(int cpu)
|
||||
else
|
||||
params->hcr_el2 = HCR_HOST_NVHE_FLAGS;
|
||||
params->vttbr = params->vtcr = 0;
|
||||
params->hfgwtr_el2 = HFGxTR_EL2_nSMPRI_EL1_MASK | HFGxTR_EL2_nTPIDR2_EL0_MASK;
|
||||
|
||||
/*
|
||||
* Flush the init params from the data cache because the struct will
|
||||
|
@ -11,13 +11,10 @@ int __pkvm_register_hyp_panic_notifier(void (*cb)(struct kvm_cpu_context *));
|
||||
enum pkvm_psci_notification;
|
||||
int __pkvm_register_psci_notifier(void (*cb)(enum pkvm_psci_notification, struct kvm_cpu_context *));
|
||||
|
||||
int reset_pkvm_priv_hcall_limit(void);
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
int __pkvm_init_module(void *module_init);
|
||||
int __pkvm_register_hcall(unsigned long hfn_hyp_va);
|
||||
int handle_host_dynamic_hcall(struct kvm_cpu_context *host_ctxt);
|
||||
int __pkvm_close_late_module_registration(void);
|
||||
void __pkvm_close_module_registration(void);
|
||||
#else
|
||||
static inline int __pkvm_init_module(void *module_init) { return -EOPNOTSUPP; }
|
||||
@ -27,6 +24,5 @@ static inline int handle_host_dynamic_hcall(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
return HCALL_UNHANDLED;
|
||||
}
|
||||
static inline int __pkvm_close_late_module_registration(void) { return -EOPNOTSUPP; }
|
||||
static inline void __pkvm_close_module_registration(void) { }
|
||||
#endif
|
||||
|
@ -162,9 +162,4 @@ int pkvm_load_pvmfw_pages(struct pkvm_hyp_vm *vm, u64 ipa, phys_addr_t phys,
|
||||
u64 size);
|
||||
void pkvm_poison_pvmfw_pages(void);
|
||||
|
||||
/*
|
||||
* Notify pKVM about events that can undermine pKVM security.
|
||||
*/
|
||||
void pkvm_handle_system_misconfiguration(enum pkvm_system_misconfiguration event);
|
||||
|
||||
#endif /* __ARM64_KVM_NVHE_PKVM_H__ */
|
||||
|
@ -1212,12 +1212,6 @@ static void handle___pkvm_register_hcall(struct kvm_cpu_context *host_ctxt)
|
||||
cpu_reg(host_ctxt, 1) = __pkvm_register_hcall(hfn_hyp_va);
|
||||
}
|
||||
|
||||
static void
|
||||
handle___pkvm_close_module_registration(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
cpu_reg(host_ctxt, 1) = __pkvm_close_late_module_registration();
|
||||
}
|
||||
|
||||
static void handle___pkvm_load_tracing(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
DECLARE_REG(unsigned long, pack_hva, host_ctxt, 1);
|
||||
@ -1290,13 +1284,11 @@ static const hcall_t host_hcall[] = {
|
||||
HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa),
|
||||
HANDLE_FUNC(__kvm_tlb_flush_vmid),
|
||||
HANDLE_FUNC(__kvm_flush_cpu_context),
|
||||
|
||||
HANDLE_FUNC(__pkvm_alloc_module_va),
|
||||
HANDLE_FUNC(__pkvm_map_module_page),
|
||||
HANDLE_FUNC(__pkvm_unmap_module_page),
|
||||
HANDLE_FUNC(__pkvm_init_module),
|
||||
HANDLE_FUNC(__pkvm_register_hcall),
|
||||
HANDLE_FUNC(__pkvm_close_module_registration),
|
||||
HANDLE_FUNC(__pkvm_prot_finalize),
|
||||
|
||||
HANDLE_FUNC(__pkvm_host_share_hyp),
|
||||
@ -1330,22 +1322,6 @@ static const hcall_t host_hcall[] = {
|
||||
#endif
|
||||
};
|
||||
|
||||
unsigned long pkvm_priv_hcall_limit __ro_after_init = __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize;
|
||||
|
||||
int reset_pkvm_priv_hcall_limit(void)
|
||||
{
|
||||
unsigned long *addr;
|
||||
|
||||
if (pkvm_priv_hcall_limit == __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize)
|
||||
return -EACCES;
|
||||
|
||||
addr = hyp_fixmap_map(__hyp_pa(&pkvm_priv_hcall_limit));
|
||||
*addr = __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize;
|
||||
hyp_fixmap_unmap();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
DECLARE_REG(unsigned long, id, host_ctxt, 0);
|
||||
@ -1365,7 +1341,7 @@ static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
|
||||
* returns -EPERM after the first call for a given CPU.
|
||||
*/
|
||||
if (static_branch_unlikely(&kvm_protected_mode_initialized))
|
||||
hcall_min = pkvm_priv_hcall_limit;
|
||||
hcall_min = __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize;
|
||||
|
||||
id -= KVM_HOST_SMCCC_ID(0);
|
||||
|
||||
|
@ -456,6 +456,9 @@ int __pkvm_iommu_finalize(int err)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
/* Err is not currently used in EL2.*/
|
||||
WARN_ON(err);
|
||||
|
||||
hyp_spin_lock(&iommu_registration_lock);
|
||||
if (!iommu_finalized)
|
||||
iommu_finalized = true;
|
||||
@ -463,15 +466,6 @@ int __pkvm_iommu_finalize(int err)
|
||||
ret = -EPERM;
|
||||
hyp_spin_unlock(&iommu_registration_lock);
|
||||
|
||||
/*
|
||||
* If finalize failed in EL1 driver for any reason, this means we can't trust the DMA
|
||||
* isolation. So we have to inform pKVM to properly protect itself.
|
||||
*/
|
||||
if (!ret && err)
|
||||
pkvm_handle_system_misconfiguration(NO_DMA_ISOLATION);
|
||||
|
||||
__pkvm_close_late_module_registration();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -77,15 +77,6 @@ void __pkvm_close_module_registration(void)
|
||||
*/
|
||||
}
|
||||
|
||||
int __pkvm_close_late_module_registration(void)
|
||||
{
|
||||
__pkvm_close_module_registration();
|
||||
|
||||
return reset_pkvm_priv_hcall_limit();
|
||||
|
||||
/* The fuse is blown! No way back until reset */
|
||||
}
|
||||
|
||||
const struct pkvm_module_ops module_ops = {
|
||||
.create_private_mapping = __pkvm_create_private_mapping,
|
||||
.alloc_module_va = __pkvm_alloc_module_va,
|
||||
|
@ -1570,14 +1570,3 @@ bool kvm_hyp_handle_hvc64(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Notify pKVM about events that can undermine pKVM security.
|
||||
*/
|
||||
void pkvm_handle_system_misconfiguration(enum pkvm_system_misconfiguration event)
|
||||
{
|
||||
if (event == NO_DMA_ISOLATION)
|
||||
pkvm_poison_pvmfw_pages();
|
||||
else
|
||||
BUG();
|
||||
}
|
||||
|
@ -6,6 +6,9 @@
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
|
||||
/* Did all IOMMUs register as expected. */
|
||||
static bool finalised;
|
||||
|
||||
static unsigned long dev_to_id(struct device *dev)
|
||||
{
|
||||
/* Use the struct device pointer as a unique identifier. */
|
||||
@ -59,6 +62,12 @@ EXPORT_SYMBOL(pkvm_iommu_resume);
|
||||
|
||||
int pkvm_iommu_finalize(int err)
|
||||
{
|
||||
return kvm_call_hyp_nvhe(__pkvm_iommu_finalize, err);
|
||||
finalised = !err;
|
||||
return kvm_call_hyp_nvhe(__pkvm_iommu_finalize, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pkvm_iommu_finalize);
|
||||
|
||||
bool pkvm_iommu_finalized(void)
|
||||
{
|
||||
return finalised;
|
||||
}
|
||||
EXPORT_SYMBOL(pkvm_iommu_finalize);
|
||||
|
@ -448,6 +448,9 @@ static int __init pkvm_firmware_rmem_clear(void)
|
||||
return -EINVAL;
|
||||
|
||||
memset(addr, 0, size);
|
||||
/* Clear so user space doesn't get stale info via IOCTL. */
|
||||
pkvm_firmware_mem = NULL;
|
||||
|
||||
dcache_clean_poc((unsigned long)addr, (unsigned long)addr + size);
|
||||
memunmap(addr);
|
||||
return 0;
|
||||
@ -501,6 +504,10 @@ static int __init finalize_pkvm(void)
|
||||
if (pkvm_load_early_modules())
|
||||
pkvm_firmware_rmem_clear();
|
||||
|
||||
/* If no DMA protection. */
|
||||
if (!pkvm_iommu_finalized())
|
||||
pkvm_firmware_rmem_clear();
|
||||
|
||||
/*
|
||||
* Exclude HYP sections from kmemleak so that they don't get peeked
|
||||
* at, which would end badly once inaccessible.
|
||||
@ -578,26 +585,14 @@ int pkvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
|
||||
#ifdef CONFIG_MODULES
|
||||
static char early_pkvm_modules[COMMAND_LINE_SIZE] __initdata;
|
||||
|
||||
static int __init pkvm_enable_module_late_loading(void)
|
||||
{
|
||||
extern unsigned long kvm_nvhe_sym(pkvm_priv_hcall_limit);
|
||||
|
||||
WARN(1, "Loading pKVM modules with kvm-arm.protected_modules is deprecated\n"
|
||||
"Use kvm-arm.protected_modules=<module1>,<module2>");
|
||||
|
||||
/*
|
||||
* Move the limit to allow module loading HVCs. It will be moved back to
|
||||
* its original position in __pkvm_close_module_registration().
|
||||
*/
|
||||
kvm_nvhe_sym(pkvm_priv_hcall_limit) = __KVM_HOST_SMCCC_FUNC___pkvm_alloc_module_va;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init early_pkvm_modules_cfg(char *arg)
|
||||
{
|
||||
/*
|
||||
* Loading pKVM modules with kvm-arm.protected_modules is deprecated
|
||||
* Use kvm-arm.protected_modules=<module1>,<module2>
|
||||
*/
|
||||
if (!arg)
|
||||
return pkvm_enable_module_late_loading();
|
||||
return -EINVAL;
|
||||
|
||||
strscpy(early_pkvm_modules, arg, COMMAND_LINE_SIZE);
|
||||
|
||||
@ -800,7 +795,8 @@ int __pkvm_load_el2_module(struct module *this, unsigned long *token)
|
||||
int ret, i, secs_first;
|
||||
size_t offset, size;
|
||||
|
||||
if (!is_protected_kvm_enabled())
|
||||
/* The pKVM hyp only allows loading before it is fully initialized */
|
||||
if (!is_protected_kvm_enabled() || is_pkvm_initialized())
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(secs_map); i++) {
|
||||
|
@ -554,6 +554,9 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
|
||||
unsigned long vm_flags;
|
||||
unsigned int mm_flags = FAULT_FLAG_DEFAULT;
|
||||
unsigned long addr = untagged_addr(far);
|
||||
#ifdef CONFIG_PER_VMA_LOCK
|
||||
struct vm_area_struct *vma;
|
||||
#endif
|
||||
|
||||
if (kprobe_page_fault(regs, esr))
|
||||
return 0;
|
||||
@ -611,6 +614,36 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
|
||||
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
|
||||
|
||||
#ifdef CONFIG_PER_VMA_LOCK
|
||||
if (!(mm_flags & FAULT_FLAG_USER))
|
||||
goto lock_mmap;
|
||||
|
||||
vma = lock_vma_under_rcu(mm, addr);
|
||||
if (!vma)
|
||||
goto lock_mmap;
|
||||
|
||||
if (!(vma->vm_flags & vm_flags)) {
|
||||
vma_end_read(vma);
|
||||
goto lock_mmap;
|
||||
}
|
||||
fault = handle_mm_fault(vma, addr & PAGE_MASK,
|
||||
mm_flags | FAULT_FLAG_VMA_LOCK, regs);
|
||||
vma_end_read(vma);
|
||||
|
||||
if (!(fault & VM_FAULT_RETRY)) {
|
||||
count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
|
||||
goto done;
|
||||
}
|
||||
count_vm_vma_lock_event(VMA_LOCK_RETRY);
|
||||
|
||||
/* Quick path to respond to signals */
|
||||
if (fault_signal_pending(fault, regs)) {
|
||||
if (!user_mode(regs))
|
||||
goto no_context;
|
||||
return 0;
|
||||
}
|
||||
lock_mmap:
|
||||
#endif /* CONFIG_PER_VMA_LOCK */
|
||||
/*
|
||||
* As per x86, we may deadlock here. However, since the kernel only
|
||||
* validly references user space from well defined areas of the code,
|
||||
@ -654,6 +687,9 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
|
||||
}
|
||||
mmap_read_unlock(mm);
|
||||
|
||||
#ifdef CONFIG_PER_VMA_LOCK
|
||||
done:
|
||||
#endif
|
||||
/*
|
||||
* Handle the "normal" (no error) case first.
|
||||
*/
|
||||
|
@ -46,21 +46,19 @@ int mte_save_tags(struct page *page)
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool mte_restore_tags(swp_entry_t entry, struct page *page)
|
||||
void mte_restore_tags(swp_entry_t entry, struct page *page)
|
||||
{
|
||||
void *tags = xa_load(&mte_pages, entry.val);
|
||||
|
||||
if (!tags)
|
||||
return false;
|
||||
return;
|
||||
|
||||
/*
|
||||
* Test PG_mte_tagged again in case it was racing with another
|
||||
* set_pte_at().
|
||||
* Test PG_mte_tagged in case the tags were restored before
|
||||
* (e.g. CoW pages).
|
||||
*/
|
||||
if (!test_and_set_bit(PG_mte_tagged, &page->flags))
|
||||
mte_restore_page_tags(page_address(page), tags);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void mte_invalidate_tags(int type, pgoff_t offset)
|
||||
|
@ -109,7 +109,7 @@ ia64_init_addr_space (void)
|
||||
vma_set_anonymous(vma);
|
||||
vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
|
||||
vma->vm_end = vma->vm_start + PAGE_SIZE;
|
||||
vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
|
||||
vm_flags_init(vma, VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT);
|
||||
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
|
||||
mmap_write_lock(current->mm);
|
||||
if (insert_vm_struct(current->mm, vma)) {
|
||||
@ -127,8 +127,8 @@ ia64_init_addr_space (void)
|
||||
vma_set_anonymous(vma);
|
||||
vma->vm_end = PAGE_SIZE;
|
||||
vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
|
||||
vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
|
||||
VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vm_flags_init(vma, VM_READ | VM_MAYREAD | VM_IO |
|
||||
VM_DONTEXPAND | VM_DONTDUMP);
|
||||
mmap_write_lock(current->mm);
|
||||
if (insert_vm_struct(current->mm, vma)) {
|
||||
mmap_write_unlock(current->mm);
|
||||
@ -272,7 +272,7 @@ static int __init gate_vma_init(void)
|
||||
vma_init(&gate_vma, NULL);
|
||||
gate_vma.vm_start = FIXADDR_USER_START;
|
||||
gate_vma.vm_end = FIXADDR_USER_END;
|
||||
gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
|
||||
vm_flags_init(&gate_vma, VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC);
|
||||
gate_vma.vm_page_prot = __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX);
|
||||
|
||||
return 0;
|
||||
|
@ -149,7 +149,7 @@ static inline void tlb_flush(struct mmu_gather *tlb)
|
||||
struct vm_area_struct vma;
|
||||
|
||||
vma.vm_mm = tlb->mm;
|
||||
vma.vm_flags = 0;
|
||||
vm_flags_init(&vma, 0);
|
||||
if (tlb->fullmm) {
|
||||
flush_tlb_mm(tlb->mm);
|
||||
return;
|
||||
|
@ -393,6 +393,7 @@ static int kvmppc_memslot_page_merge(struct kvm *kvm,
|
||||
{
|
||||
unsigned long gfn = memslot->base_gfn;
|
||||
unsigned long end, start = gfn_to_hva(kvm, gfn);
|
||||
unsigned long vm_flags;
|
||||
int ret = 0;
|
||||
struct vm_area_struct *vma;
|
||||
int merge_flag = (merge) ? MADV_MERGEABLE : MADV_UNMERGEABLE;
|
||||
@ -409,12 +410,15 @@ static int kvmppc_memslot_page_merge(struct kvm *kvm,
|
||||
ret = H_STATE;
|
||||
break;
|
||||
}
|
||||
/* Copy vm_flags to avoid partial modifications in ksm_madvise */
|
||||
vm_flags = vma->vm_flags;
|
||||
ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
|
||||
merge_flag, &vma->vm_flags);
|
||||
merge_flag, &vm_flags);
|
||||
if (ret) {
|
||||
ret = H_STATE;
|
||||
break;
|
||||
}
|
||||
vm_flags_reset(vma, vm_flags);
|
||||
start = vma->vm_end;
|
||||
} while (end > vma->vm_end);
|
||||
|
||||
|
@ -325,7 +325,7 @@ static int kvmppc_xive_native_mmap(struct kvm_device *dev,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_PFNMAP;
|
||||
vm_flags_set(vma, VM_IO | VM_PFNMAP);
|
||||
vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
|
||||
|
||||
/*
|
||||
|
@ -156,7 +156,7 @@ static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr,
|
||||
* VM_NOHUGEPAGE and split them.
|
||||
*/
|
||||
for_each_vma_range(vmi, vma, addr + len) {
|
||||
vma->vm_flags |= VM_NOHUGEPAGE;
|
||||
vm_flags_set(vma, VM_NOHUGEPAGE);
|
||||
walk_page_vma(vma, &subpage_walk_ops, NULL);
|
||||
}
|
||||
}
|
||||
|
@ -474,6 +474,40 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address,
|
||||
if (is_exec)
|
||||
flags |= FAULT_FLAG_INSTRUCTION;
|
||||
|
||||
#ifdef CONFIG_PER_VMA_LOCK
|
||||
if (!(flags & FAULT_FLAG_USER))
|
||||
goto lock_mmap;
|
||||
|
||||
vma = lock_vma_under_rcu(mm, address);
|
||||
if (!vma)
|
||||
goto lock_mmap;
|
||||
|
||||
if (unlikely(access_pkey_error(is_write, is_exec,
|
||||
(error_code & DSISR_KEYFAULT), vma))) {
|
||||
vma_end_read(vma);
|
||||
goto lock_mmap;
|
||||
}
|
||||
|
||||
if (unlikely(access_error(is_write, is_exec, vma))) {
|
||||
vma_end_read(vma);
|
||||
goto lock_mmap;
|
||||
}
|
||||
|
||||
fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs);
|
||||
vma_end_read(vma);
|
||||
|
||||
if (!(fault & VM_FAULT_RETRY)) {
|
||||
count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
|
||||
goto done;
|
||||
}
|
||||
count_vm_vma_lock_event(VMA_LOCK_RETRY);
|
||||
|
||||
if (fault_signal_pending(fault, regs))
|
||||
return user_mode(regs) ? 0 : SIGBUS;
|
||||
|
||||
lock_mmap:
|
||||
#endif /* CONFIG_PER_VMA_LOCK */
|
||||
|
||||
/* When running in the kernel we expect faults to occur only to
|
||||
* addresses in user space. All other faults represent errors in the
|
||||
* kernel and should generate an OOPS. Unfortunately, in the case of an
|
||||
@ -550,6 +584,9 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address,
|
||||
|
||||
mmap_read_unlock(current->mm);
|
||||
|
||||
#ifdef CONFIG_PER_VMA_LOCK
|
||||
done:
|
||||
#endif
|
||||
if (unlikely(fault & VM_FAULT_ERROR))
|
||||
return mm_fault_error(regs, address, fault);
|
||||
|
||||
|
@ -525,7 +525,7 @@ static int coproc_mmap(struct file *fp, struct vm_area_struct *vma)
|
||||
pfn = paste_addr >> PAGE_SHIFT;
|
||||
|
||||
/* flags, page_prot from cxl_mmap(), except we want cachable */
|
||||
vma->vm_flags |= VM_IO | VM_PFNMAP;
|
||||
vm_flags_set(vma, VM_IO | VM_PFNMAP);
|
||||
vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
|
||||
|
||||
prot = __pgprot(pgprot_val(vma->vm_page_prot) | _PAGE_DIRTY);
|
||||
|
@ -291,7 +291,7 @@ static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
if (!(vma->vm_flags & VM_SHARED))
|
||||
return -EINVAL;
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_PFNMAP;
|
||||
vm_flags_set(vma, VM_IO | VM_PFNMAP);
|
||||
vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
|
||||
|
||||
vma->vm_ops = &spufs_mem_mmap_vmops;
|
||||
@ -381,7 +381,7 @@ static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
if (!(vma->vm_flags & VM_SHARED))
|
||||
return -EINVAL;
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_PFNMAP;
|
||||
vm_flags_set(vma, VM_IO | VM_PFNMAP);
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
|
||||
vma->vm_ops = &spufs_cntl_mmap_vmops;
|
||||
@ -1043,7 +1043,7 @@ static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
if (!(vma->vm_flags & VM_SHARED))
|
||||
return -EINVAL;
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_PFNMAP;
|
||||
vm_flags_set(vma, VM_IO | VM_PFNMAP);
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
|
||||
vma->vm_ops = &spufs_signal1_mmap_vmops;
|
||||
@ -1179,7 +1179,7 @@ static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
if (!(vma->vm_flags & VM_SHARED))
|
||||
return -EINVAL;
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_PFNMAP;
|
||||
vm_flags_set(vma, VM_IO | VM_PFNMAP);
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
|
||||
vma->vm_ops = &spufs_signal2_mmap_vmops;
|
||||
@ -1302,7 +1302,7 @@ static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
if (!(vma->vm_flags & VM_SHARED))
|
||||
return -EINVAL;
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_PFNMAP;
|
||||
vm_flags_set(vma, VM_IO | VM_PFNMAP);
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
|
||||
vma->vm_ops = &spufs_mss_mmap_vmops;
|
||||
@ -1364,7 +1364,7 @@ static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
if (!(vma->vm_flags & VM_SHARED))
|
||||
return -EINVAL;
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_PFNMAP;
|
||||
vm_flags_set(vma, VM_IO | VM_PFNMAP);
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
|
||||
vma->vm_ops = &spufs_psmap_mmap_vmops;
|
||||
@ -1424,7 +1424,7 @@ static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
if (!(vma->vm_flags & VM_SHARED))
|
||||
return -EINVAL;
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_PFNMAP;
|
||||
vm_flags_set(vma, VM_IO | VM_PFNMAP);
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
|
||||
vma->vm_ops = &spufs_mfc_mmap_vmops;
|
||||
|
@ -16,6 +16,7 @@ config PPC_POWERNV
|
||||
select PPC_DOORBELL
|
||||
select MMU_NOTIFIER
|
||||
select FORCE_SMP
|
||||
select ARCH_SUPPORTS_PER_VMA_LOCK
|
||||
default y
|
||||
|
||||
config OPAL_PRD
|
||||
|
@ -21,6 +21,7 @@ config PPC_PSERIES
|
||||
select HOTPLUG_CPU
|
||||
select FORCE_SMP
|
||||
select SWIOTLB
|
||||
select ARCH_SUPPORTS_PER_VMA_LOCK
|
||||
default y
|
||||
|
||||
config PARAVIRT
|
||||
|
@ -115,6 +115,7 @@ config S390
|
||||
select ARCH_SUPPORTS_DEBUG_PAGEALLOC
|
||||
select ARCH_SUPPORTS_HUGETLBFS
|
||||
select ARCH_SUPPORTS_NUMA_BALANCING
|
||||
select ARCH_SUPPORTS_PER_VMA_LOCK
|
||||
select ARCH_USE_BUILTIN_BSWAP
|
||||
select ARCH_USE_CMPXCHG_LOCKREF
|
||||
select ARCH_WANTS_DYNAMIC_TASK_STRUCT
|
||||
|
@ -403,6 +403,30 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
|
||||
access = VM_WRITE;
|
||||
if (access == VM_WRITE)
|
||||
flags |= FAULT_FLAG_WRITE;
|
||||
#ifdef CONFIG_PER_VMA_LOCK
|
||||
if (!(flags & FAULT_FLAG_USER))
|
||||
goto lock_mmap;
|
||||
vma = lock_vma_under_rcu(mm, address);
|
||||
if (!vma)
|
||||
goto lock_mmap;
|
||||
if (!(vma->vm_flags & access)) {
|
||||
vma_end_read(vma);
|
||||
goto lock_mmap;
|
||||
}
|
||||
fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs);
|
||||
vma_end_read(vma);
|
||||
if (!(fault & VM_FAULT_RETRY)) {
|
||||
count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
|
||||
goto out;
|
||||
}
|
||||
count_vm_vma_lock_event(VMA_LOCK_RETRY);
|
||||
/* Quick path to respond to signals */
|
||||
if (fault_signal_pending(fault, regs)) {
|
||||
fault = VM_FAULT_SIGNAL;
|
||||
goto out;
|
||||
}
|
||||
lock_mmap:
|
||||
#endif /* CONFIG_PER_VMA_LOCK */
|
||||
mmap_read_lock(mm);
|
||||
|
||||
gmap = NULL;
|
||||
|
@ -2518,8 +2518,7 @@ static inline void thp_split_mm(struct mm_struct *mm)
|
||||
VMA_ITERATOR(vmi, mm, 0);
|
||||
|
||||
for_each_vma(vmi, vma) {
|
||||
vma->vm_flags &= ~VM_HUGEPAGE;
|
||||
vma->vm_flags |= VM_NOHUGEPAGE;
|
||||
vm_flags_mod(vma, VM_NOHUGEPAGE, VM_HUGEPAGE);
|
||||
walk_page_vma(vma, &thp_split_walk_ops, NULL);
|
||||
}
|
||||
mm->def_flags |= VM_NOHUGEPAGE;
|
||||
@ -2584,14 +2583,18 @@ int gmap_mark_unmergeable(void)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
struct vm_area_struct *vma;
|
||||
unsigned long vm_flags;
|
||||
int ret;
|
||||
VMA_ITERATOR(vmi, mm, 0);
|
||||
|
||||
for_each_vma(vmi, vma) {
|
||||
/* Copy vm_flags to avoid partial modifications in ksm_madvise */
|
||||
vm_flags = vma->vm_flags;
|
||||
ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
|
||||
MADV_UNMERGEABLE, &vma->vm_flags);
|
||||
MADV_UNMERGEABLE, &vm_flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
vm_flags_reset(vma, vm_flags);
|
||||
}
|
||||
mm->def_flags &= ~VM_MERGEABLE;
|
||||
return 0;
|
||||
|
@ -27,6 +27,7 @@ config X86_64
|
||||
# Options that are inherently 64-bit kernel only:
|
||||
select ARCH_HAS_GIGANTIC_PAGE
|
||||
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
|
||||
select ARCH_SUPPORTS_PER_VMA_LOCK
|
||||
select ARCH_USE_CMPXCHG_LOCKREF
|
||||
select HAVE_ARCH_SOFT_DIRTY
|
||||
select MODULES_USE_ELF_RELA
|
||||
|
@ -19,7 +19,7 @@ CONFIG_RCU_BOOST=y
|
||||
CONFIG_RCU_NOCB_CPU=y
|
||||
CONFIG_IKCONFIG=y
|
||||
CONFIG_IKCONFIG_PROC=y
|
||||
CONFIG_IKHEADERS=y
|
||||
CONFIG_IKHEADERS=m
|
||||
CONFIG_UCLAMP_TASK=y
|
||||
CONFIG_UCLAMP_BUCKETS_COUNT=20
|
||||
CONFIG_CGROUPS=y
|
||||
@ -61,6 +61,7 @@ CONFIG_NR_CPUS=32
|
||||
CONFIG_EFI=y
|
||||
CONFIG_CMDLINE_BOOL=y
|
||||
CONFIG_CMDLINE="console=ttynull stack_depot_disable=on cgroup_disable=pressure bootconfig"
|
||||
CONFIG_HIBERNATION=y
|
||||
CONFIG_PM_WAKELOCKS=y
|
||||
CONFIG_PM_WAKELOCKS_LIMIT=0
|
||||
# CONFIG_PM_WAKELOCKS_GC is not set
|
||||
@ -292,6 +293,7 @@ CONFIG_BLK_DEV_LOOP_MIN_COUNT=16
|
||||
CONFIG_BLK_DEV_RAM=y
|
||||
CONFIG_BLK_DEV_RAM_SIZE=8192
|
||||
CONFIG_BLK_DEV_UBLK=y
|
||||
CONFIG_BLK_DEV_NVME=y
|
||||
CONFIG_SRAM=y
|
||||
CONFIG_UID_SYS_STATS=y
|
||||
CONFIG_SCSI=y
|
||||
@ -312,6 +314,7 @@ CONFIG_IFB=y
|
||||
CONFIG_MACSEC=y
|
||||
CONFIG_TUN=y
|
||||
CONFIG_VETH=y
|
||||
CONFIG_LED_TRIGGER_PHY=y
|
||||
CONFIG_AX88796B_PHY=y
|
||||
CONFIG_CAN_VCAN=m
|
||||
CONFIG_CAN_SLCAN=m
|
||||
@ -646,6 +649,7 @@ CONFIG_UBSAN_LOCAL_BOUNDS=y
|
||||
# CONFIG_UBSAN_BOOL is not set
|
||||
# CONFIG_UBSAN_ENUM is not set
|
||||
CONFIG_PAGE_OWNER=y
|
||||
CONFIG_PER_VMA_LOCK_STATS=y
|
||||
CONFIG_DEBUG_STACK_USAGE=y
|
||||
CONFIG_DEBUG_MEMORY_INIT=y
|
||||
CONFIG_KFENCE=y
|
||||
|
@ -391,7 +391,7 @@ void __init map_vsyscall(void)
|
||||
}
|
||||
|
||||
if (vsyscall_mode == XONLY)
|
||||
gate_vma.vm_flags = VM_EXEC;
|
||||
vm_flags_init(&gate_vma, VM_EXEC);
|
||||
|
||||
BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
|
||||
(unsigned long)VSYSCALL_ADDR);
|
||||
|
@ -95,7 +95,7 @@ static int sgx_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
return ret;
|
||||
|
||||
vma->vm_ops = &sgx_vm_ops;
|
||||
vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
|
||||
vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO);
|
||||
vma->vm_private_data = encl;
|
||||
|
||||
return 0;
|
||||
|
@ -105,7 +105,7 @@ static int sgx_vepc_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
|
||||
vma->vm_ops = &sgx_vepc_vm_ops;
|
||||
/* Don't copy VMA in fork() */
|
||||
vma->vm_flags |= VM_PFNMAP | VM_IO | VM_DONTDUMP | VM_DONTCOPY;
|
||||
vm_flags_set(vma, VM_PFNMAP | VM_IO | VM_DONTDUMP | VM_DONTCOPY);
|
||||
vma->vm_private_data = vepc;
|
||||
|
||||
return 0;
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include <linux/uaccess.h> /* faulthandler_disabled() */
|
||||
#include <linux/efi.h> /* efi_crash_gracefully_on_page_fault()*/
|
||||
#include <linux/mm_types.h>
|
||||
#include <linux/mm.h> /* find_and_lock_vma() */
|
||||
|
||||
#include <asm/cpufeature.h> /* boot_cpu_has, ... */
|
||||
#include <asm/traps.h> /* dotraplinkage, ... */
|
||||
@ -1354,6 +1355,38 @@ void do_user_addr_fault(struct pt_regs *regs,
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PER_VMA_LOCK
|
||||
if (!(flags & FAULT_FLAG_USER))
|
||||
goto lock_mmap;
|
||||
|
||||
vma = lock_vma_under_rcu(mm, address);
|
||||
if (!vma)
|
||||
goto lock_mmap;
|
||||
|
||||
if (unlikely(access_error(error_code, vma))) {
|
||||
vma_end_read(vma);
|
||||
goto lock_mmap;
|
||||
}
|
||||
fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs);
|
||||
vma_end_read(vma);
|
||||
|
||||
if (!(fault & VM_FAULT_RETRY)) {
|
||||
count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
|
||||
goto done;
|
||||
}
|
||||
count_vm_vma_lock_event(VMA_LOCK_RETRY);
|
||||
|
||||
/* Quick path to respond to signals */
|
||||
if (fault_signal_pending(fault, regs)) {
|
||||
if (!user_mode(regs))
|
||||
kernelmode_fixup_or_oops(regs, error_code, address,
|
||||
SIGBUS, BUS_ADRERR,
|
||||
ARCH_DEFAULT_PKEY);
|
||||
return;
|
||||
}
|
||||
lock_mmap:
|
||||
#endif /* CONFIG_PER_VMA_LOCK */
|
||||
|
||||
/*
|
||||
* Kernel-mode access to the user address space should only occur
|
||||
* on well-defined single instructions listed in the exception
|
||||
@ -1454,6 +1487,9 @@ void do_user_addr_fault(struct pt_regs *regs,
|
||||
}
|
||||
|
||||
mmap_read_unlock(mm);
|
||||
#ifdef CONFIG_PER_VMA_LOCK
|
||||
done:
|
||||
#endif
|
||||
if (likely(!(fault & VM_FAULT_ERROR)))
|
||||
return;
|
||||
|
||||
|
@ -1047,7 +1047,7 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
|
||||
|
||||
ret = reserve_pfn_range(paddr, size, prot, 0);
|
||||
if (ret == 0 && vma)
|
||||
vma->vm_flags |= VM_PAT;
|
||||
vm_flags_set(vma, VM_PAT);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1093,7 +1093,7 @@ void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, pfn_t pfn)
|
||||
* can be for the entire vma (in which case pfn, size are zero).
|
||||
*/
|
||||
void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
|
||||
unsigned long size)
|
||||
unsigned long size, bool mm_wr_locked)
|
||||
{
|
||||
resource_size_t paddr;
|
||||
unsigned long prot;
|
||||
@ -1112,8 +1112,12 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
|
||||
size = vma->vm_end - vma->vm_start;
|
||||
}
|
||||
free_pfn_range(paddr, size);
|
||||
if (vma)
|
||||
vma->vm_flags &= ~VM_PAT;
|
||||
if (vma) {
|
||||
if (mm_wr_locked)
|
||||
vm_flags_clear(vma, VM_PAT);
|
||||
else
|
||||
__vm_flags_mod(vma, 0, VM_PAT);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1123,7 +1127,7 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
|
||||
*/
|
||||
void untrack_pfn_moved(struct vm_area_struct *vma)
|
||||
{
|
||||
vma->vm_flags &= ~VM_PAT;
|
||||
vm_flags_clear(vma, VM_PAT);
|
||||
}
|
||||
|
||||
pgprot_t pgprot_writecombine(pgprot_t prot)
|
||||
|
@ -16,7 +16,7 @@ static int __init gate_vma_init(void)
|
||||
vma_init(&gate_vma, NULL);
|
||||
gate_vma.vm_start = FIXADDR_USER_START;
|
||||
gate_vma.vm_end = FIXADDR_USER_END;
|
||||
gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
|
||||
vm_flags_init(&gate_vma, VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC);
|
||||
gate_vma.vm_page_prot = PAGE_READONLY;
|
||||
|
||||
return 0;
|
||||
|
@ -58,6 +58,12 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_insert);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_queue);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(block_getrq);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_issue);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_merge);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_requeue);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_complete);
|
||||
|
||||
DEFINE_IDA(blk_queue_ida);
|
||||
|
||||
|
@ -4,6 +4,7 @@
|
||||
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/hashtable.h>
|
||||
#include <linux/android_kabi.h>
|
||||
|
||||
struct io_cq;
|
||||
struct elevator_type;
|
||||
@ -46,6 +47,11 @@ struct elevator_mq_ops {
|
||||
struct request *(*next_request)(struct request_queue *, struct request *);
|
||||
void (*init_icq)(struct io_cq *);
|
||||
void (*exit_icq)(struct io_cq *);
|
||||
|
||||
ANDROID_KABI_RESERVE(1);
|
||||
ANDROID_KABI_RESERVE(2);
|
||||
ANDROID_KABI_RESERVE(3);
|
||||
ANDROID_KABI_RESERVE(4);
|
||||
};
|
||||
|
||||
#define ELV_NAME_MAX (16)
|
||||
@ -82,6 +88,9 @@ struct elevator_type
|
||||
/* managed by elevator core */
|
||||
char icq_cache_name[ELV_NAME_MAX + 6]; /* elvname + "_io_cq" */
|
||||
struct list_head list;
|
||||
|
||||
ANDROID_KABI_RESERVE(1);
|
||||
ANDROID_KABI_RESERVE(2);
|
||||
};
|
||||
|
||||
#define ELV_HASH_BITS 6
|
||||
|
@ -1,6 +1,6 @@
|
||||
. ${ROOT_DIR}/${KERNEL_DIR}/build.config.constants
|
||||
|
||||
KMI_GENERATION=7
|
||||
KMI_GENERATION=8
|
||||
|
||||
LLVM=1
|
||||
DEPMOD=depmod
|
||||
|
@ -310,7 +310,7 @@ pfrt_log_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
return -EROFS;
|
||||
|
||||
/* changing from read to write with mprotect is not allowed */
|
||||
vma->vm_flags &= ~VM_MAYWRITE;
|
||||
vm_flags_clear(vma, VM_MAYWRITE);
|
||||
|
||||
pfrt_log_dev = to_pfrt_log_dev(file);
|
||||
|
||||
|
@ -66,6 +66,7 @@
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/task_work.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <linux/ktime.h>
|
||||
#include <linux/android_vendor.h>
|
||||
|
||||
#include <uapi/linux/sched/types.h>
|
||||
@ -3113,6 +3114,7 @@ static void binder_transaction(struct binder_proc *proc,
|
||||
binder_size_t last_fixup_min_off = 0;
|
||||
struct binder_context *context = proc->context;
|
||||
int t_debug_id = atomic_inc_return(&binder_last_id);
|
||||
ktime_t t_start_time = ktime_get();
|
||||
char *secctx = NULL;
|
||||
u32 secctx_sz = 0;
|
||||
struct list_head sgc_head;
|
||||
@ -3358,6 +3360,7 @@ static void binder_transaction(struct binder_proc *proc,
|
||||
binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
|
||||
|
||||
t->debug_id = t_debug_id;
|
||||
t->start_time = t_start_time;
|
||||
|
||||
if (reply)
|
||||
binder_debug(BINDER_DEBUG_TRANSACTION,
|
||||
@ -3382,6 +3385,8 @@ static void binder_transaction(struct binder_proc *proc,
|
||||
t->from = thread;
|
||||
else
|
||||
t->from = NULL;
|
||||
t->from_pid = proc->pid;
|
||||
t->from_tid = thread->pid;
|
||||
t->sender_euid = task_euid(proc->tsk);
|
||||
t->to_proc = target_proc;
|
||||
t->to_thread = target_thread;
|
||||
@ -5823,8 +5828,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
|
||||
return -EPERM;
|
||||
}
|
||||
vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
|
||||
vma->vm_flags &= ~VM_MAYWRITE;
|
||||
vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE);
|
||||
|
||||
vma->vm_ops = &binder_vm_ops;
|
||||
vma->vm_private_data = proc;
|
||||
@ -6181,18 +6185,20 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
|
||||
{
|
||||
struct binder_proc *to_proc;
|
||||
struct binder_buffer *buffer = t->buffer;
|
||||
ktime_t current_time = ktime_get();
|
||||
|
||||
spin_lock(&t->lock);
|
||||
to_proc = t->to_proc;
|
||||
seq_printf(m,
|
||||
"%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %d:%d r%d",
|
||||
"%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %d:%d r%d elapsed %lldms",
|
||||
prefix, t->debug_id, t,
|
||||
t->from ? t->from->proc->pid : 0,
|
||||
t->from ? t->from->pid : 0,
|
||||
t->from_pid,
|
||||
t->from_tid,
|
||||
to_proc ? to_proc->pid : 0,
|
||||
t->to_thread ? t->to_thread->pid : 0,
|
||||
t->code, t->flags, t->priority.sched_policy,
|
||||
t->priority.prio, t->need_reply);
|
||||
t->priority.prio, t->need_reply,
|
||||
ktime_ms_delta(current_time, t->start_time));
|
||||
spin_unlock(&t->lock);
|
||||
|
||||
if (proc != to_proc) {
|
||||
|
@ -552,6 +552,8 @@ struct binder_transaction {
|
||||
int debug_id;
|
||||
struct binder_work work;
|
||||
struct binder_thread *from;
|
||||
pid_t from_pid;
|
||||
pid_t from_tid;
|
||||
struct binder_transaction *from_parent;
|
||||
struct binder_proc *to_proc;
|
||||
struct binder_thread *to_thread;
|
||||
@ -567,6 +569,7 @@ struct binder_transaction {
|
||||
bool set_priority_called;
|
||||
bool is_nested;
|
||||
kuid_t sender_euid;
|
||||
ktime_t start_time;
|
||||
struct list_head fd_fixups;
|
||||
binder_uintptr_t security_ctx;
|
||||
/**
|
||||
|
@ -65,6 +65,7 @@
|
||||
#include <trace/hooks/signal.h>
|
||||
#include <trace/hooks/psci.h>
|
||||
#include <trace/hooks/psi.h>
|
||||
#include <trace/hooks/bl_hib.h>
|
||||
|
||||
/*
|
||||
* Export tracepoints that act as a bare tracehook (ie: have no trace event
|
||||
@ -118,6 +119,9 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_irqs_disable);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_irqs_enable);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpu_cgroup_attach);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpu_cgroup_online);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_meminfo_cache_adjust);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_si_mem_available_adjust);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_si_meminfo_adjust);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ftrace_oops_enter);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ftrace_oops_exit);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ftrace_size_check);
|
||||
@ -227,6 +231,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_panic_unhandled);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_arm64_serror_panic);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_disable_thermal_cooling_stats);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_gic_resume);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_gic_v3_suspend);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_audio_usb_offload_connect);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_audio_usb_offload_disconnect);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_kswapd_per_node);
|
||||
@ -250,6 +255,11 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_read_done);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_preset);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_uid);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_user);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mem_cgroup_id_remove);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mem_cgroup_css_offline);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mem_cgroup_css_online);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mem_cgroup_free);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mem_cgroup_alloc);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpuset_fork);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_alloc_new_buf_locked);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_reply);
|
||||
@ -265,6 +275,9 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_psci_cpu_suspend);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_psi_event);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_psi_group);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpufreq_acct_update_power);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_modify_scan_control);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_should_continue_reclaim);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_file_is_tiny_bypass);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_mmc_resume);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_mmc_suspend);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_exit_signal);
|
||||
@ -278,3 +291,19 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_thermal_register);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_thermal_unregister);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_get_thermal_zone_device);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_thermal_power_cap);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_pages_reclaim_bypass);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_pages_failure_bypass);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_madvise_pageout_swap_entry);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_madvise_swapin_walk_pmd_entry);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_process_madvise_end);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_smaps_pte_entry);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_show_smap);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_ctl_dirty_rate);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_check_hibernation_swap);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_save_cpu_resume);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_save_hib_resume_bdev);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_encrypt_page);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_init_aes_encrypt);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_skip_swap_map_write);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_post_image_save);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_madvise_cold_pageout_skip);
|
||||
|
@ -206,7 +206,7 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma,
|
||||
refcount_set(&vdata->refcnt, 1);
|
||||
vma->vm_private_data = vdata;
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
|
||||
if (vdata->type == MSPEC_UNCACHED)
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
vma->vm_ops = &mspec_vm_ops;
|
||||
|
@ -2148,6 +2148,7 @@ unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
|
||||
arch_set_freq_scale(policy->related_cpus, freq,
|
||||
policy->cpuinfo.max_freq);
|
||||
cpufreq_stats_record_transition(policy, freq);
|
||||
cpufreq_times_record_transition(policy, freq);
|
||||
trace_android_rvh_cpufreq_transition(policy);
|
||||
|
||||
if (trace_cpu_frequency_enabled()) {
|
||||
|
@ -3333,7 +3333,7 @@ static int hisi_qm_uacce_mmap(struct uacce_queue *q,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
vma->vm_flags |= VM_IO;
|
||||
vm_flags_set(vma, VM_IO);
|
||||
|
||||
return remap_pfn_range(vma, vma->vm_start,
|
||||
phys_base >> PAGE_SHIFT,
|
||||
|
@ -308,7 +308,7 @@ static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
return rc;
|
||||
|
||||
vma->vm_ops = &dax_vm_ops;
|
||||
vma->vm_flags |= VM_HUGEPAGE;
|
||||
vm_flags_set(vma, VM_HUGEPAGE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -339,6 +339,16 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
|
||||
return events;
|
||||
}
|
||||
|
||||
static long _dma_buf_set_name(struct dma_buf *dmabuf, const char *name)
|
||||
{
|
||||
spin_lock(&dmabuf->name_lock);
|
||||
kfree(dmabuf->name);
|
||||
dmabuf->name = name;
|
||||
spin_unlock(&dmabuf->name_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
|
||||
* It could support changing the name of the dma-buf if the same
|
||||
@ -352,19 +362,35 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
|
||||
* devices, return -EBUSY.
|
||||
*
|
||||
*/
|
||||
static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
|
||||
long dma_buf_set_name(struct dma_buf *dmabuf, const char *name)
|
||||
{
|
||||
long ret = 0;
|
||||
char *buf = kstrndup(name, DMA_BUF_NAME_LEN, GFP_KERNEL);
|
||||
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = _dma_buf_set_name(dmabuf, buf);
|
||||
if (ret)
|
||||
kfree(buf);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_buf_set_name);
|
||||
|
||||
static long dma_buf_set_name_user(struct dma_buf *dmabuf, const char __user *buf)
|
||||
{
|
||||
char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
|
||||
long ret = 0;
|
||||
|
||||
if (IS_ERR(name))
|
||||
return PTR_ERR(name);
|
||||
|
||||
spin_lock(&dmabuf->name_lock);
|
||||
kfree(dmabuf->name);
|
||||
dmabuf->name = name;
|
||||
spin_unlock(&dmabuf->name_lock);
|
||||
ret = _dma_buf_set_name(dmabuf, name);
|
||||
if (ret)
|
||||
kfree(name);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_SYNC_FILE)
|
||||
@ -513,7 +539,7 @@ static long dma_buf_ioctl(struct file *file,
|
||||
|
||||
case DMA_BUF_SET_NAME_A:
|
||||
case DMA_BUF_SET_NAME_B:
|
||||
return dma_buf_set_name(dmabuf, (const char __user *)arg);
|
||||
return dma_buf_set_name_user(dmabuf, (const char __user *)arg);
|
||||
|
||||
#if IS_ENABLED(CONFIG_SYNC_FILE)
|
||||
case DMA_BUF_IOCTL_EXPORT_SYNC_FILE:
|
||||
|
@ -201,7 +201,7 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
vma->vm_flags |= VM_DONTCOPY;
|
||||
vm_flags_set(vma, VM_DONTCOPY);
|
||||
pfn = (base + idxd_get_wq_portal_full_offset(wq->id,
|
||||
IDXD_PORTAL_LIMITED)) >> PAGE_SHIFT;
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/cdev.h>
|
||||
#include <linux/rwsem.h>
|
||||
#include <linux/android_kabi.h>
|
||||
|
||||
#define GPIOCHIP_NAME "gpiochip"
|
||||
|
||||
@ -75,6 +76,7 @@ struct gpio_device {
|
||||
*/
|
||||
struct list_head pin_ranges;
|
||||
#endif
|
||||
ANDROID_KABI_RESERVE(1);
|
||||
};
|
||||
|
||||
/* gpio suffixes used for ACPI and device tree lookup */
|
||||
@ -100,6 +102,7 @@ struct gpio_array {
|
||||
struct gpio_chip *chip;
|
||||
unsigned long *get_mask;
|
||||
unsigned long *set_mask;
|
||||
ANDROID_KABI_RESERVE(1);
|
||||
unsigned long invert_mask[];
|
||||
};
|
||||
|
||||
@ -179,6 +182,7 @@ struct gpio_desc {
|
||||
/* debounce period in microseconds */
|
||||
unsigned int debounce_period_us;
|
||||
#endif
|
||||
ANDROID_KABI_RESERVE(1);
|
||||
};
|
||||
|
||||
#define gpiod_not_found(desc) (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT)
|
||||
|
@ -256,7 +256,7 @@ static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_str
|
||||
*/
|
||||
if (is_cow_mapping(vma->vm_flags) &&
|
||||
!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
|
||||
vma->vm_flags &= ~VM_MAYWRITE;
|
||||
vm_flags_clear(vma, VM_MAYWRITE);
|
||||
|
||||
return drm_gem_ttm_mmap(obj, vma);
|
||||
}
|
||||
|
@ -2888,8 +2888,8 @@ static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process,
|
||||
|
||||
address = dev->adev->rmmio_remap.bus_addr;
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
|
||||
VM_DONTDUMP | VM_PFNMAP;
|
||||
vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
|
||||
VM_DONTDUMP | VM_PFNMAP);
|
||||
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
|
||||
|
@ -159,8 +159,8 @@ int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process,
|
||||
address = kfd_get_process_doorbells(pdd);
|
||||
if (!address)
|
||||
return -ENOMEM;
|
||||
vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
|
||||
VM_DONTDUMP | VM_PFNMAP;
|
||||
vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
|
||||
VM_DONTDUMP | VM_PFNMAP);
|
||||
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
|
||||
|
@ -1049,8 +1049,8 @@ int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
|
||||
pfn = __pa(page->kernel_address);
|
||||
pfn >>= PAGE_SHIFT;
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE
|
||||
| VM_DONTDUMP | VM_PFNMAP;
|
||||
vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE
|
||||
| VM_DONTDUMP | VM_PFNMAP);
|
||||
|
||||
pr_debug("Mapping signal page\n");
|
||||
pr_debug(" start user address == 0x%08lx\n", vma->vm_start);
|
||||
|
@ -2031,8 +2031,8 @@ int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND
|
||||
| VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP;
|
||||
vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND
|
||||
| VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP);
|
||||
/* Mapping pages to user process */
|
||||
return remap_pfn_range(vma, vma->vm_start,
|
||||
PFN_DOWN(__pa(qpd->cwsr_kaddr)),
|
||||
|
@ -1047,7 +1047,7 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
|
||||
goto err_drm_gem_object_put;
|
||||
}
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
|
||||
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
|
||||
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
|
||||
}
|
||||
|
@ -530,8 +530,7 @@ int drm_gem_dma_mmap(struct drm_gem_dma_object *dma_obj, struct vm_area_struct *
|
||||
* the whole buffer.
|
||||
*/
|
||||
vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
|
||||
vma->vm_flags &= ~VM_PFNMAP;
|
||||
vma->vm_flags |= VM_DONTEXPAND;
|
||||
vm_flags_mod(vma, VM_DONTEXPAND, VM_PFNMAP);
|
||||
|
||||
if (dma_obj->map_noncoherent) {
|
||||
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
|
||||
|
@ -636,7 +636,7 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
|
||||
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
|
||||
if (shmem->map_wc)
|
||||
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
|
||||
|
@ -476,7 +476,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN) &&
|
||||
(dma->flags & _DRM_DMA_USE_PCI_RO)) {
|
||||
vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
|
||||
vm_flags_clear(vma, VM_WRITE | VM_MAYWRITE);
|
||||
#if defined(__i386__) || defined(__x86_64__)
|
||||
pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
|
||||
#else
|
||||
@ -492,7 +492,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
|
||||
|
||||
vma->vm_ops = &drm_vm_dma_ops;
|
||||
|
||||
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
|
||||
|
||||
drm_vm_open_locked(dev, vma);
|
||||
return 0;
|
||||
@ -560,7 +560,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
|
||||
return -EINVAL;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
|
||||
vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
|
||||
vm_flags_clear(vma, VM_WRITE | VM_MAYWRITE);
|
||||
#if defined(__i386__) || defined(__x86_64__)
|
||||
pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
|
||||
#else
|
||||
@ -628,7 +628,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
|
||||
default:
|
||||
return -EINVAL; /* This should never happen. */
|
||||
}
|
||||
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
|
||||
|
||||
drm_vm_open_locked(dev, vma);
|
||||
return 0;
|
||||
|
@ -130,7 +130,7 @@ static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
|
||||
{
|
||||
pgprot_t vm_page_prot;
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vm_flags_set(vma, VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP);
|
||||
|
||||
vm_page_prot = vm_get_page_prot(vma->vm_flags);
|
||||
|
||||
|
@ -274,7 +274,7 @@ static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
|
||||
unsigned long vm_size;
|
||||
int ret;
|
||||
|
||||
vma->vm_flags &= ~VM_PFNMAP;
|
||||
vm_flags_clear(vma, VM_PFNMAP);
|
||||
vma->vm_pgoff = 0;
|
||||
|
||||
vm_size = vma->vm_end - vma->vm_start;
|
||||
@ -368,7 +368,7 @@ static int exynos_drm_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct
|
||||
if (obj->import_attach)
|
||||
return dma_buf_mmap(obj->dma_buf, vma, 0);
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
|
||||
|
||||
DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "flags = 0x%x\n",
|
||||
exynos_gem->flags);
|
||||
|
@ -139,7 +139,7 @@ static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
|
||||
*/
|
||||
vma->vm_ops = &psbfb_vm_ops;
|
||||
vma->vm_private_data = (void *)fb;
|
||||
vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vm_flags_set(vma, VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -102,7 +102,7 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
|
||||
buf = dev_priv->mmap_buffer;
|
||||
buf_priv = buf->dev_private;
|
||||
|
||||
vma->vm_flags |= VM_DONTCOPY;
|
||||
vm_flags_set(vma, VM_DONTCOPY);
|
||||
|
||||
buf_priv->currently_mapped = I810_BUF_MAPPED;
|
||||
|
||||
|
@ -979,7 +979,7 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
i915_gem_object_put(obj);
|
||||
return -EINVAL;
|
||||
}
|
||||
vma->vm_flags &= ~VM_MAYWRITE;
|
||||
vm_flags_clear(vma, VM_MAYWRITE);
|
||||
}
|
||||
|
||||
anon = mmap_singleton(to_i915(dev));
|
||||
@ -988,7 +988,7 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
return PTR_ERR(anon);
|
||||
}
|
||||
|
||||
vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
|
||||
vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO);
|
||||
|
||||
/*
|
||||
* We keep the ref on mmo->obj, not vm_file, but we require
|
||||
|
@ -158,7 +158,7 @@ static int mtk_drm_gem_object_mmap(struct drm_gem_object *obj,
|
||||
* dma_alloc_attrs() allocated a struct page table for mtk_gem, so clear
|
||||
* VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
|
||||
*/
|
||||
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
|
||||
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
|
||||
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
|
||||
|
||||
|
@ -1012,7 +1012,7 @@ static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct
|
||||
{
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
|
||||
vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
|
||||
vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
|
||||
|
||||
return 0;
|
||||
|
@ -543,8 +543,7 @@ int omap_gem_mmap_obj(struct drm_gem_object *obj,
|
||||
{
|
||||
struct omap_gem_object *omap_obj = to_omap_bo(obj);
|
||||
|
||||
vma->vm_flags &= ~VM_PFNMAP;
|
||||
vma->vm_flags |= VM_MIXEDMAP;
|
||||
vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP);
|
||||
|
||||
if (omap_obj->flags & OMAP_BO_WC) {
|
||||
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
|
||||
|
@ -250,8 +250,7 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
|
||||
* We allocated a struct page table for rk_obj, so clear
|
||||
* VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
|
||||
*/
|
||||
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vma->vm_flags &= ~VM_PFNMAP;
|
||||
vm_flags_mod(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP, VM_PFNMAP);
|
||||
|
||||
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
|
||||
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
|
||||
|
@ -573,7 +573,7 @@ int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
|
||||
* and set the vm_pgoff (used as a fake buffer offset by DRM)
|
||||
* to 0 as we want to map the whole buffer.
|
||||
*/
|
||||
vma->vm_flags &= ~VM_PFNMAP;
|
||||
vm_flags_clear(vma, VM_PFNMAP);
|
||||
vma->vm_pgoff = 0;
|
||||
|
||||
err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
|
||||
@ -587,8 +587,7 @@ int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
|
||||
} else {
|
||||
pgprot_t prot = vm_get_page_prot(vma->vm_flags);
|
||||
|
||||
vma->vm_flags |= VM_MIXEDMAP;
|
||||
vma->vm_flags &= ~VM_PFNMAP;
|
||||
vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP);
|
||||
|
||||
vma->vm_page_prot = pgprot_writecombine(prot);
|
||||
}
|
||||
|
@ -468,8 +468,7 @@ int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
|
||||
|
||||
vma->vm_private_data = bo;
|
||||
|
||||
vma->vm_flags |= VM_PFNMAP;
|
||||
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vm_flags_set(vma, VM_PFNMAP | VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_mmap_obj);
|
||||
|
@ -46,7 +46,7 @@ static int virtio_gpu_vram_mmap(struct drm_gem_object *obj,
|
||||
return -EINVAL;
|
||||
|
||||
vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
|
||||
vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
|
||||
vm_flags_set(vma, VM_MIXEDMAP | VM_DONTEXPAND);
|
||||
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
|
||||
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
|
||||
vma->vm_ops = &virtio_gpu_vram_vm_ops;
|
||||
|
@ -97,7 +97,7 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
|
||||
/* Use VM_PFNMAP rather than VM_MIXEDMAP if not a COW mapping */
|
||||
if (!is_cow_mapping(vma->vm_flags))
|
||||
vma->vm_flags = (vma->vm_flags & ~VM_MIXEDMAP) | VM_PFNMAP;
|
||||
vm_flags_mod(vma, VM_PFNMAP, VM_MIXEDMAP);
|
||||
|
||||
ttm_bo_put(bo); /* release extra ref taken by ttm_bo_mmap_obj() */
|
||||
|
||||
|
@ -70,8 +70,7 @@ static int xen_drm_front_gem_object_mmap(struct drm_gem_object *gem_obj,
|
||||
* vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
|
||||
* the whole buffer.
|
||||
*/
|
||||
vma->vm_flags &= ~VM_PFNMAP;
|
||||
vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
|
||||
vm_flags_mod(vma, VM_MIXEDMAP | VM_DONTEXPAND, VM_PFNMAP);
|
||||
vma->vm_pgoff = 0;
|
||||
|
||||
/*
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include <linux/hiddev.h>
|
||||
#include <linux/hid-debug.h>
|
||||
#include <linux/hidraw.h>
|
||||
#include <linux/uhid.h>
|
||||
|
||||
#include "hid-ids.h"
|
||||
|
||||
@ -261,6 +262,7 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
|
||||
{
|
||||
struct hid_report *report;
|
||||
struct hid_field *field;
|
||||
unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
|
||||
unsigned int usages;
|
||||
unsigned int offset;
|
||||
unsigned int i;
|
||||
@ -291,8 +293,11 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
|
||||
offset = report->size;
|
||||
report->size += parser->global.report_size * parser->global.report_count;
|
||||
|
||||
if (parser->device->ll_driver == &uhid_hid_driver)
|
||||
max_buffer_size = UHID_DATA_MAX;
|
||||
|
||||
/* Total size check: Allow for possible report index byte */
|
||||
if (report->size > (HID_MAX_BUFFER_SIZE - 1) << 3) {
|
||||
if (report->size > (max_buffer_size - 1) << 3) {
|
||||
hid_err(parser->device, "report is too long\n");
|
||||
return -1;
|
||||
}
|
||||
@ -1966,6 +1971,7 @@ int hid_report_raw_event(struct hid_device *hid, enum hid_report_type type, u8 *
|
||||
struct hid_report_enum *report_enum = hid->report_enum + type;
|
||||
struct hid_report *report;
|
||||
struct hid_driver *hdrv;
|
||||
int max_buffer_size = HID_MAX_BUFFER_SIZE;
|
||||
u32 rsize, csize = size;
|
||||
u8 *cdata = data;
|
||||
int ret = 0;
|
||||
@ -1981,10 +1987,13 @@ int hid_report_raw_event(struct hid_device *hid, enum hid_report_type type, u8 *
|
||||
|
||||
rsize = hid_compute_report_size(report);
|
||||
|
||||
if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE)
|
||||
rsize = HID_MAX_BUFFER_SIZE - 1;
|
||||
else if (rsize > HID_MAX_BUFFER_SIZE)
|
||||
rsize = HID_MAX_BUFFER_SIZE;
|
||||
if (hid->ll_driver == &uhid_hid_driver)
|
||||
max_buffer_size = UHID_DATA_MAX;
|
||||
|
||||
if (report_enum->numbered && rsize >= max_buffer_size)
|
||||
rsize = max_buffer_size - 1;
|
||||
else if (rsize > max_buffer_size)
|
||||
rsize = max_buffer_size;
|
||||
|
||||
if (csize < rsize) {
|
||||
dbg_hid("report %d is too short, (%d < %d)\n", report->id,
|
||||
@ -2387,7 +2396,12 @@ int hid_hw_raw_request(struct hid_device *hdev,
|
||||
unsigned char reportnum, __u8 *buf,
|
||||
size_t len, enum hid_report_type rtype, enum hid_class_request reqtype)
|
||||
{
|
||||
if (len < 1 || len > HID_MAX_BUFFER_SIZE || !buf)
|
||||
unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
|
||||
|
||||
if (hdev->ll_driver == &uhid_hid_driver)
|
||||
max_buffer_size = UHID_DATA_MAX;
|
||||
|
||||
if (len < 1 || len > max_buffer_size || !buf)
|
||||
return -EINVAL;
|
||||
|
||||
return hdev->ll_driver->raw_request(hdev, reportnum, buf, len,
|
||||
@ -2406,7 +2420,12 @@ EXPORT_SYMBOL_GPL(hid_hw_raw_request);
|
||||
*/
|
||||
int hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len)
|
||||
{
|
||||
if (len < 1 || len > HID_MAX_BUFFER_SIZE || !buf)
|
||||
unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
|
||||
|
||||
if (hdev->ll_driver == &uhid_hid_driver)
|
||||
max_buffer_size = UHID_DATA_MAX;
|
||||
|
||||
if (len < 1 || len > max_buffer_size || !buf)
|
||||
return -EINVAL;
|
||||
|
||||
if (hdev->ll_driver->output_report)
|
||||
|
@ -1264,7 +1264,7 @@ static int cs_char_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
if (vma_pages(vma) != 1)
|
||||
return -EINVAL;
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_DONTDUMP | VM_DONTEXPAND;
|
||||
vm_flags_set(vma, VM_IO | VM_DONTDUMP | VM_DONTEXPAND);
|
||||
vma->vm_ops = &cs_char_vm_ops;
|
||||
vma->vm_private_data = file->private_data;
|
||||
|
||||
|
@ -1659,7 +1659,7 @@ static int intel_th_msc_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
atomic_dec(&msc->user_count);
|
||||
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
vma->vm_flags |= VM_DONTEXPAND | VM_DONTCOPY;
|
||||
vm_flags_set(vma, VM_DONTEXPAND | VM_DONTCOPY);
|
||||
vma->vm_ops = &msc_mmap_ops;
|
||||
return ret;
|
||||
}
|
||||
|
@ -715,7 +715,7 @@ static int stm_char_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
pm_runtime_get_sync(&stm->dev);
|
||||
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
|
||||
vma->vm_ops = &stm_mmap_vmops;
|
||||
vm_iomap_memory(vma, phys, size);
|
||||
|
||||
|
@ -403,7 +403,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
|
||||
ret = -EPERM;
|
||||
goto done;
|
||||
}
|
||||
vma->vm_flags &= ~VM_MAYWRITE;
|
||||
vm_flags_clear(vma, VM_MAYWRITE);
|
||||
addr = vma->vm_start;
|
||||
for (i = 0 ; i < uctxt->egrbufs.numbufs; i++) {
|
||||
memlen = uctxt->egrbufs.buffers[i].len;
|
||||
@ -528,7 +528,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
|
||||
goto done;
|
||||
}
|
||||
|
||||
vma->vm_flags = flags;
|
||||
vm_flags_reset(vma, flags);
|
||||
hfi1_cdbg(PROC,
|
||||
"%u:%u type:%u io/vf:%d/%d, addr:0x%llx, len:%lu(%lu), flags:0x%lx\n",
|
||||
ctxt, subctxt, type, mapio, vmf, memaddr, memlen,
|
||||
|
@ -2091,7 +2091,7 @@ static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
|
||||
|
||||
if (vma->vm_flags & (VM_WRITE | VM_EXEC))
|
||||
return -EPERM;
|
||||
vma->vm_flags &= ~VM_MAYWRITE;
|
||||
vm_flags_clear(vma, VM_MAYWRITE);
|
||||
|
||||
if (!dev->mdev->clock_info)
|
||||
return -EOPNOTSUPP;
|
||||
@ -2315,7 +2315,7 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
|
||||
|
||||
if (vma->vm_flags & VM_WRITE)
|
||||
return -EPERM;
|
||||
vma->vm_flags &= ~VM_MAYWRITE;
|
||||
vm_flags_clear(vma, VM_MAYWRITE);
|
||||
|
||||
/* Don't expose to user-space information it shouldn't have */
|
||||
if (PAGE_SIZE > 4096)
|
||||
|
@ -733,7 +733,7 @@ static int qib_mmap_mem(struct vm_area_struct *vma, struct qib_ctxtdata *rcd,
|
||||
}
|
||||
|
||||
/* don't allow them to later change with mprotect */
|
||||
vma->vm_flags &= ~VM_MAYWRITE;
|
||||
vm_flags_clear(vma, VM_MAYWRITE);
|
||||
}
|
||||
|
||||
pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT;
|
||||
@ -769,7 +769,7 @@ static int mmap_ureg(struct vm_area_struct *vma, struct qib_devdata *dd,
|
||||
phys = dd->physaddr + ureg;
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
|
||||
vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
|
||||
vm_flags_set(vma, VM_DONTCOPY | VM_DONTEXPAND);
|
||||
ret = io_remap_pfn_range(vma, vma->vm_start,
|
||||
phys >> PAGE_SHIFT,
|
||||
vma->vm_end - vma->vm_start,
|
||||
@ -810,8 +810,7 @@ static int mmap_piobufs(struct vm_area_struct *vma,
|
||||
* don't allow them to later change to readable with mprotect (for when
|
||||
* not initially mapped readable, as is normally the case)
|
||||
*/
|
||||
vma->vm_flags &= ~VM_MAYREAD;
|
||||
vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
|
||||
vm_flags_mod(vma, VM_DONTCOPY | VM_DONTEXPAND, VM_MAYREAD);
|
||||
|
||||
/* We used PAT if wc_cookie == 0 */
|
||||
if (!dd->wc_cookie)
|
||||
@ -852,7 +851,7 @@ static int mmap_rcvegrbufs(struct vm_area_struct *vma,
|
||||
goto bail;
|
||||
}
|
||||
/* don't allow them to later change to writable with mprotect */
|
||||
vma->vm_flags &= ~VM_MAYWRITE;
|
||||
vm_flags_clear(vma, VM_MAYWRITE);
|
||||
|
||||
start = vma->vm_start;
|
||||
|
||||
@ -944,7 +943,7 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
|
||||
* Don't allow permission to later change to writable
|
||||
* with mprotect.
|
||||
*/
|
||||
vma->vm_flags &= ~VM_MAYWRITE;
|
||||
vm_flags_clear(vma, VM_MAYWRITE);
|
||||
} else
|
||||
goto bail;
|
||||
len = vma->vm_end - vma->vm_start;
|
||||
@ -955,7 +954,7 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
|
||||
|
||||
vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
|
||||
vma->vm_ops = &qib_file_vm_ops;
|
||||
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
|
||||
ret = 1;
|
||||
|
||||
bail:
|
||||
|
@ -672,7 +672,7 @@ int usnic_ib_mmap(struct ib_ucontext *context,
|
||||
usnic_dbg("\n");
|
||||
|
||||
us_ibdev = to_usdev(context->device);
|
||||
vma->vm_flags |= VM_IO;
|
||||
vm_flags_set(vma, VM_IO);
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
vfid = vma->vm_pgoff;
|
||||
usnic_dbg("Page Offset %lu PAGE_SHIFT %u VFID %u\n",
|
||||
|
@ -408,7 +408,7 @@ int pvrdma_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
|
||||
}
|
||||
|
||||
/* Map UAR to kernel space, VM_LOCKED? */
|
||||
vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
|
||||
vm_flags_set(vma, VM_DONTCOPY | VM_DONTEXPAND);
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
if (io_remap_pfn_range(vma, start, context->uar.pfn, size,
|
||||
vma->vm_page_prot))
|
||||
|
@ -211,10 +211,11 @@ static void gic_do_wait_for_rwp(void __iomem *base, u32 bit)
|
||||
}
|
||||
|
||||
/* Wait for completion of a distributor change */
|
||||
static void gic_dist_wait_for_rwp(void)
|
||||
void gic_v3_dist_wait_for_rwp(void)
|
||||
{
|
||||
gic_do_wait_for_rwp(gic_data.dist_base, GICD_CTLR_RWP);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gic_v3_dist_wait_for_rwp);
|
||||
|
||||
/* Wait for completion of a redistributor change */
|
||||
static void gic_redist_wait_for_rwp(void)
|
||||
@ -361,7 +362,7 @@ static void gic_mask_irq(struct irq_data *d)
|
||||
if (gic_irq_in_rdist(d))
|
||||
gic_redist_wait_for_rwp();
|
||||
else
|
||||
gic_dist_wait_for_rwp();
|
||||
gic_v3_dist_wait_for_rwp();
|
||||
}
|
||||
|
||||
static void gic_eoimode1_mask_irq(struct irq_data *d)
|
||||
@ -817,7 +818,7 @@ static bool gic_has_group0(void)
|
||||
return val != 0;
|
||||
}
|
||||
|
||||
static void __init gic_dist_init(void)
|
||||
void gic_v3_dist_init(void)
|
||||
{
|
||||
unsigned int i;
|
||||
u64 affinity;
|
||||
@ -826,7 +827,7 @@ static void __init gic_dist_init(void)
|
||||
|
||||
/* Disable the distributor */
|
||||
writel_relaxed(0, base + GICD_CTLR);
|
||||
gic_dist_wait_for_rwp();
|
||||
gic_v3_dist_wait_for_rwp();
|
||||
|
||||
/*
|
||||
* Configure SPIs as non-secure Group-1. This will only matter
|
||||
@ -863,7 +864,7 @@ static void __init gic_dist_init(void)
|
||||
|
||||
/* Enable distributor with ARE, Group1, and wait for it to drain */
|
||||
writel_relaxed(val, base + GICD_CTLR);
|
||||
gic_dist_wait_for_rwp();
|
||||
gic_v3_dist_wait_for_rwp();
|
||||
|
||||
/*
|
||||
* Set all global interrupts to the boot CPU only. ARE must be
|
||||
@ -880,6 +881,7 @@ static void __init gic_dist_init(void)
|
||||
gic_write_irouter(affinity, base + GICD_IROUTERnE + i * 8);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gic_v3_dist_init);
|
||||
|
||||
static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *))
|
||||
{
|
||||
@ -1175,7 +1177,7 @@ static int gic_dist_supports_lpis(void)
|
||||
!gicv3_nolpi);
|
||||
}
|
||||
|
||||
static void gic_cpu_init(void)
|
||||
void gic_v3_cpu_init(void)
|
||||
{
|
||||
void __iomem *rbase;
|
||||
int i;
|
||||
@ -1202,6 +1204,7 @@ static void gic_cpu_init(void)
|
||||
/* initialise system registers */
|
||||
gic_cpu_sys_reg_init();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gic_v3_cpu_init);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
@ -1210,7 +1213,7 @@ static void gic_cpu_init(void)
|
||||
|
||||
static int gic_starting_cpu(unsigned int cpu)
|
||||
{
|
||||
gic_cpu_init();
|
||||
gic_v3_cpu_init();
|
||||
|
||||
if (gic_dist_supports_lpis())
|
||||
its_cpu_init();
|
||||
@ -1396,13 +1399,21 @@ static inline void gic_cpu_pm_init(void) { }
|
||||
#endif /* CONFIG_CPU_PM */
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static void gic_resume(void)
|
||||
void gic_v3_resume(void)
|
||||
{
|
||||
trace_android_vh_gic_resume(&gic_data);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gic_v3_resume);
|
||||
|
||||
static int gic_v3_suspend(void)
|
||||
{
|
||||
trace_android_vh_gic_v3_suspend(&gic_data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct syscore_ops gic_syscore_ops = {
|
||||
.resume = gic_resume,
|
||||
.resume = gic_v3_resume,
|
||||
.suspend = gic_v3_suspend,
|
||||
};
|
||||
|
||||
static void gic_syscore_init(void)
|
||||
@ -1412,6 +1423,8 @@ static void gic_syscore_init(void)
|
||||
|
||||
#else
|
||||
static inline void gic_syscore_init(void) { }
|
||||
void gic_v3_resume(void) { }
|
||||
static int gic_v3_suspend(void) { return 0; }
|
||||
#endif
|
||||
|
||||
|
||||
@ -1899,8 +1912,8 @@ static int __init gic_init_bases(void __iomem *dist_base,
|
||||
|
||||
gic_update_rdist_properties();
|
||||
|
||||
gic_dist_init();
|
||||
gic_cpu_init();
|
||||
gic_v3_dist_init();
|
||||
gic_v3_cpu_init();
|
||||
gic_smp_init();
|
||||
gic_cpu_pm_init();
|
||||
gic_syscore_init();
|
||||
|
@ -165,8 +165,6 @@ int gh_msgq_init(struct device *parent, struct gh_msgq *msgq, struct mbox_client
|
||||
if (ret)
|
||||
goto err_tx_ghrsc;
|
||||
|
||||
enable_irq_wake(msgq->tx_ghrsc->irq);
|
||||
|
||||
tasklet_setup(&msgq->txdone_tasklet, gh_msgq_txdone_tasklet);
|
||||
}
|
||||
|
||||
@ -177,8 +175,6 @@ int gh_msgq_init(struct device *parent, struct gh_msgq *msgq, struct mbox_client
|
||||
IRQF_ONESHOT, "gh_msgq_rx", msgq);
|
||||
if (ret)
|
||||
goto err_tx_irq;
|
||||
|
||||
enable_irq_wake(msgq->rx_ghrsc->irq);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user