Merge branch 'android14-6.1' into branch 'android14-6.1-lts'

This catches the -lts branch up with all of the recent changes that have
gone into the non-lts branch, INCLUDING the ABI update which we want
here to ensure that we do NOT break any newly added dependent symbols
(and to bring back in the reverts that were required before the ABI
break).

This includes the following commits:

88959a53f4 ANDROID: 6/16/2023 KMI update
21bc72f339 UPSTREAM: binder: fix UAF of alloc->vma in race with munmap()
62c6dbdccd UPSTREAM: binder: add lockless binder_alloc_(set|get)_vma()
3cac174682 UPSTREAM: Revert "android: binder: stop saving a pointer to the VMA"
dadb40b436 UPSTREAM: Revert "binder_alloc: add missing mmap_lock calls when using the VMA"
fcdbf469c5 UPSTREAM: tipc: check the bearer min mtu properly when setting it by netlink
e48a801737 UPSTREAM: tipc: do not update mtu if msg_max is too small in mtu negotiation
461038ba5c UPSTREAM: tipc: add tipc_bearer_min_mtu to calculate min mtu
d0be9e79ee UPSTREAM: Revert "usb: gadget: udc: core: Invoke usb_gadget_connect only when started"
66a5c03404 UPSTREAM: ASoC: fsl_micfil: Fix error handler with pm_runtime_enable
6e721f991f UPSTREAM: platform: Provide a remove callback that returns no value
07a8c09137 UPSTREAM: firmware: arm_sdei: Fix sleep from invalid context BUG
b065972b7b UPSTREAM: uapi/linux/const.h: prefer ISO-friendly __typeof__
aaf6ccb6f3 UPSTREAM: posix-cpu-timers: Implement the missing timer_wait_running callback
f3b712fcb5 ANDROID: GKI: reserve extra arm64 cpucaps for ABI preservation
d1c7974b1f ANDROID: arm64: errata: Add WORKAROUND_NXP_ERR050104 cpucaps
b489c53001 ANDROID: KVM: arm64: Allow setting {P,U}XN in stage-2 PTEs
b7aff5c603 ANDROID: KVM: arm64: Restrict host-to-hyp MMIO donations
f5f8c19f6c ANDROID: KVM: arm64: Allow state changes of MMIO pages
4ddb4ed818 ANDROID: KVM: arm64: Allow MMIO perm changes from modules
5d0225cdf0 ANDROID: KVM: arm64: Don't allocate from handle_host_mem_abort
5136a28ab6 ANDROID: KVM: arm64: Donate IOMMU regions to pKVM
23b62ec342 ANDROID: KVM: arm64: Map MMIO donation as device at EL2
adc78128b3 ANDROID: KVM: arm64: Don't recycle pages from host mem abort
452ef5ae7b ANDROID: KVM: arm64: Pin host stage-2 tables
a8bba661e3 ANDROID: KVM: arm64: Move kvm_pte_follow() to header
04ddc7eec0 ANDROID: KVM: arm64: Pre-populate host stage2
0b6736459a ANDROID: KVM: arm64: Fix the host ownership later
cf2d193d9b ANDROID: KVM: arm64: Don't recycle non-default PTEs
a701418f2f ANDROID: KVM: arm64: Introduce kvm_pgtable_stage2_reclaim_leaves
5224fbb5b8 ANDROID: GKI: enable CONFIG_BLK_CGROUP_IOCOST
fe10954309 BACKPORT: FROMGIT: usb: core: add sysfs entry for usb device state
251efd6587 ANDROID: GKI: Update symbols to symbol list
71761b36c3 ANDROID: HID; Over-ride default maximum buffer size when using UHID
c3f3dc31f9 UPSTREAM: maple_tree: make maple state reusable after mas_empty_area()
d31ddcdbb8 Revert "Revert "mm/mmap: regression fix for unmapped_area{_topdown}""
6852d5ccb9 FROMLIST: scsi: ufs: ufs-mediatek: Set UFSHCD_QUIRK_MCQ_BROKEN_RTC quirk
274d5965b8 FROMLIST: scsi: ufs: ufs-mediatek: Set UFSHCD_QUIRK_MCQ_BROKEN_INTR quirk
0171df9359 FROMLIST: scsi: ufs: core: Add host quirk UFSHCD_QUIRK_MCQ_BROKEN_RTC
27b569b568 FROMLIST: scsi: ufs: core: Add host quirk UFSHCD_QUIRK_MCQ_BROKEN_INTR
46554e08b3 ANDROID: GKI: Add symbols and update symbol list for Unisoc
e59544b857 UPSTREAM: mailbox: mailbox-test: fix a locking issue in mbox_test_message_write()
749386a02e UPSTREAM: mailbox: mailbox-test: Fix potential double-free in mbox_test_message_write()
9c6866c99b ANDROID: dma-buf: support users to change dma_buf.name
09e0f85096 ANDROID: set CONFIG_IKHEADERS=m for gki_defconfig.
7641ff0a30 ANDROID: mm: Avoid merging cma with others
9b16d612bf ANDROID: cpufreq: times: record fast switch frequency transitions
d645236cfd ANDROID: fix kernelci build failure in vmscan.c
8a609c5eb4 ANDROID: ABI: Update symbol list for Exynos SoC
25058fea51 ANDROID: gki_defconfig: enable NVME
e8f6ddbd4c ANDROID: ABI: Update symbols to unisoc whitelist for the scheduler
82a015300e UPSTREAM: usb: gadget: uvc: queue empty isoc requests if no video buffer is available
855f25e32c ANDROID: GKI: Update symbol list for xiaomi
87f8c82651 ANDROID: vendor_hooks:vendor hook for madvise_cold_or_pageout_pte_range.
f73aafc29b ANDROID: ABI: Update pixel symbol list
a0d46c1dd1 ANDROID: KVM: arm64: iommu: Erase pvmfw from EL1 if possible

Change-Id: I76a89d70290eb13ac671ccfdab80d80dad3030eb
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-06-14 17:24:14 +00:00
commit c5df11c964
61 changed files with 6900 additions and 5456 deletions

View File

@ -264,6 +264,16 @@ Description:
attached to the port will not be detected, initialized,
or enumerated.
What: /sys/bus/usb/devices/.../<hub_interface>/port<X>/state
Date: June 2023
Contact: Roy Luo <royluo@google.com>
Description:
Indicates current state of the USB device attached to the port.
Valid states are: 'not-attached', 'attached', 'powered',
'reconnecting', 'unauthenticated', 'default', 'addressed',
'configured', and 'suspended'. This file supports poll() to
monitor the state change from user space.
What: /sys/bus/usb/devices/.../power/usb2_lpm_l1_timeout
Date: May 2013
Contact: Mathias Nyman <mathias.nyman@linux.intel.com>

File diff suppressed because it is too large Load Diff

View File

@ -904,6 +904,7 @@
__traceiter_android_vh_cpu_idle_exit
__traceiter_android_vh_cpuidle_psci_enter
__traceiter_android_vh_cpuidle_psci_exit
__traceiter_android_vh_show_mem
__traceiter_device_pm_callback_end
__traceiter_device_pm_callback_start
__traceiter_suspend_resume
@ -911,6 +912,7 @@
__tracepoint_android_vh_cpu_idle_exit
__tracepoint_android_vh_cpuidle_psci_enter
__tracepoint_android_vh_cpuidle_psci_exit
__tracepoint_android_vh_show_mem
__tracepoint_device_pm_callback_end
__tracepoint_device_pm_callback_start
tracepoint_probe_register
@ -1464,6 +1466,9 @@
# required by exynos-reboot.ko
pm_power_off
# required by exynos-s2i.ko
smp_call_function_many
# required by exynos-ssld.ko
driver_register
@ -1930,8 +1935,6 @@
dma_heap_get_name
is_dma_buf_file
iterate_fd
__traceiter_android_vh_show_mem
__tracepoint_android_vh_show_mem
# required by samsung_iommu_v9.ko
device_link_add

View File

@ -54,6 +54,7 @@
__printk_ratelimit
prepare_to_wait_exclusive
proc_symlink
public_key_verify_signature
radix_tree_lookup_slot
radix_tree_replace_slot
_raw_write_trylock

View File

@ -957,7 +957,9 @@
int_to_scsilun
iomem_resource
iommu_alloc_resv_region
iommu_attach_device_pasid
iommu_attach_group
iommu_detach_device_pasid
iommu_device_register
iommu_device_sysfs_add
iommu_device_sysfs_remove
@ -968,6 +970,7 @@
iommu_fwspec_add_ids
iommu_fwspec_free
iommu_get_domain_for_dev
iommu_get_domain_for_dev_pasid
iommu_group_alloc
iommu_group_for_each_dev
iommu_group_get

View File

@ -1,7 +1,9 @@
[abi_symbol_list]
# commonly used symbols
add_cpu
add_wait_queue
add_wait_queue_exclusive
activate_task
alloc_chrdev_region
alloc_etherdev_mqs
alloc_netdev_mqs
@ -12,6 +14,7 @@
__alloc_skb
alloc_workqueue
alt_cb_patch_nops
arch_freq_scale
__arch_copy_from_user
__arch_copy_to_user
arm64_use_ng_mappings
@ -19,7 +22,10 @@
atomic_notifier_call_chain
atomic_notifier_chain_register
atomic_notifier_chain_unregister
available_idle_cpu
balance_push_callback
bcmp
blockdev_superblock
bpf_trace_run1
bpf_trace_run2
bpf_trace_run3
@ -33,6 +39,7 @@
cdev_add
cdev_del
cdev_init
check_preempt_curr
__check_object_size
__class_create
class_destroy
@ -55,10 +62,21 @@
__cpuhp_setup_state
__cpuhp_state_add_instance
__cpuhp_state_remove_instance
__cpufreq_driver_target
cpufreq_disable_fast_switch
cpufreq_driver_fast_switch
cpufreq_driver_resolve_freq
cpufreq_enable_fast_switch
cpufreq_policy_transition_delay_us
cpufreq_this_cpu_can_update
cpufreq_register_governor
cpufreq_unregister_governor
cpu_hwcaps
cpu_number
__cpu_active_mask
__cpu_online_mask
__cpu_possible_mask
__cpu_present_mask
crc32_le
crypto_aead_decrypt
crypto_aead_encrypt
@ -68,7 +86,9 @@
crypto_alloc_shash
crypto_destroy_tfm
crypto_has_alg
css_next_child
datagram_poll
deactivate_task
debugfs_create_dir
debugfs_create_file
debugfs_create_u8
@ -150,6 +170,7 @@
dma_sync_single_for_device
dma_unmap_page_attrs
do_trace_netlink_extack
double_rq_lock
down_read
down_write
driver_unregister
@ -160,6 +181,7 @@
__dynamic_dev_dbg
__dynamic_netdev_dbg
__dynamic_pr_debug
init_task
enable_irq
eth_mac_addr
eth_platform_get_mac_address
@ -266,10 +288,12 @@
kmemdup
kobject_uevent
kobject_uevent_env
ksoftirqd
kstrtobool
kstrtoint
kstrtouint
kstrtoull
kthread_cancel_work_sync
kthread_create_on_node
kthread_should_stop
kthread_stop
@ -292,6 +316,7 @@
memmove
memset
memstart_addr
migrate_swap
misc_deregister
misc_register
mod_timer
@ -350,6 +375,7 @@
of_find_device_by_node
of_find_node_opts_by_path
of_find_property
of_get_cpu_node
of_get_next_child
of_get_parent
of_get_property
@ -367,6 +393,7 @@
param_ops_bool
param_ops_int
param_ops_uint
pcpu_nr_pages
__per_cpu_offset
perf_trace_buf_alloc
perf_trace_run_bpf_submit
@ -397,9 +424,11 @@
preempt_schedule_notrace
prepare_to_wait_event
_printk
_printk_deferred
proc_create
proc_create_net_data
proc_create_net_single
proc_dointvec_minmax
proc_doulongvec_minmax
_proc_mkdir
proc_mkdir
@ -431,6 +460,8 @@
_raw_write_lock_bh
_raw_write_unlock
_raw_write_unlock_bh
raw_spin_rq_lock_nested
raw_spin_rq_unlock
rcu_barrier
__rcu_read_lock
__rcu_read_unlock
@ -443,6 +474,7 @@
register_pernet_subsys
register_pm_notifier
register_syscore_ops
register_sysctl_table
regmap_read
regmap_update_bits_base
regmap_write
@ -464,29 +496,37 @@
request_firmware
__request_module
request_threaded_irq
return_address
root_task_group
rtc_time64_to_tm
rtnl_is_locked
rtnl_link_register
rtnl_link_unregister
rtnl_lock
rtnl_unlock
runqueues
sched_setscheduler
sched_uclamp_used
schedule
schedule_timeout
schedule_timeout_uninterruptible
scnprintf
security_sk_clone
security_sock_graft
seq_buf_printf
seq_lseek
seq_printf
seq_putc
seq_puts
seq_read
setup_udp_tunnel_sock
set_task_cpu
set_user_nice
sg_init_one
sg_init_table
sg_next
si_mem_available
si_swapinfo
simple_attr_open
simple_attr_release
simple_read_from_buffer
@ -542,7 +582,11 @@
__spi_alloc_controller
sprintf
sscanf
static_key_disable
__stack_chk_fail
stop_machine
stop_one_cpu
stop_one_cpu_nowait
strcmp
strcpy
strlcat
@ -556,9 +600,12 @@
synchronize_rcu
syscon_regmap_lookup_by_phandle
syscon_regmap_lookup_by_phandle_args
sysctl_vals
sysfs_create_files
sysfs_create_group
sysfs_create_groups
sysfs_notify
sysfs_remove_files
sysfs_remove_group
sysfs_remove_groups
sysfs_streq
@ -567,29 +614,172 @@
__tasklet_schedule
tasklet_setup
tasklet_unlock_wait
tasklist_lock
tick_nohz_get_idle_calls_cpu
topology_clear_scale_freq_source
topology_update_done
trace_event_buffer_commit
trace_event_buffer_reserve
trace_event_printf
trace_event_raw_init
trace_event_reg
trace_handle_return
__trace_bprintk
__traceiter_android_rvh_account_irq
__traceiter_android_rvh_after_dequeue_task
__traceiter_android_rvh_after_enqueue_task
__traceiter_android_rvh_build_perf_domains
__traceiter_android_rvh_can_migrate_task
__traceiter_android_rvh_check_preempt_tick
__traceiter_android_rvh_check_preempt_wakeup
__traceiter_android_rvh_check_preempt_wakeup_ignore
__traceiter_android_rvh_cpu_cgroup_attach
__traceiter_android_rvh_cpu_cgroup_online
__traceiter_android_rvh_cpu_overutilized
__traceiter_android_rvh_dequeue_entity
__traceiter_android_rvh_dequeue_task
__traceiter_android_rvh_dequeue_task_fair
__traceiter_android_rvh_do_sched_yield
__traceiter_android_rvh_effective_cpu_util
__traceiter_android_rvh_enqueue_entity
__traceiter_android_rvh_enqueue_task
__traceiter_android_rvh_enqueue_task_fair
__traceiter_android_rvh_entity_tick
__traceiter_android_rvh_find_busiest_group
__traceiter_android_rvh_find_busiest_queue
__traceiter_android_rvh_find_lowest_rq
__traceiter_android_rvh_is_cpu_allowed
__traceiter_android_rvh_migrate_queued_task
__traceiter_android_rvh_new_task_stats
__traceiter_android_rvh_pick_next_entity
__traceiter_android_rvh_place_entity
__traceiter_android_rvh_replace_next_task_fair
__traceiter_android_rvh_rto_next_cpu
__traceiter_android_rvh_sched_balance_rt
__traceiter_android_rvh_sched_cpu_dying
__traceiter_android_rvh_sched_cpu_starting
__traceiter_android_rvh_sched_exec
__traceiter_android_rvh_sched_fork
__traceiter_android_rvh_sched_fork_init
__traceiter_android_rvh_sched_newidle_balance
__traceiter_android_rvh_sched_nohz_balancer_kick
__traceiter_android_rvh_sched_rebalance_domains
__traceiter_android_rvh_sched_setaffinity
__traceiter_android_rvh_schedule
__traceiter_android_rvh_select_fallback_rq
__traceiter_android_rvh_select_task_rq_fair
__traceiter_android_rvh_select_task_rq_rt
__traceiter_android_rvh_set_cpus_allowed_by_task
__traceiter_android_rvh_setscheduler
__traceiter_android_rvh_set_task_cpu
__traceiter_android_rvh_tick_entry
__traceiter_android_rvh_try_to_wake_up
__traceiter_android_rvh_try_to_wake_up_success
__traceiter_android_rvh_update_misfit_status
__traceiter_android_rvh_update_thermal_stats
__traceiter_android_rvh_wake_up_new_task
__traceiter_android_rvh_psci_cpu_suspend
__traceiter_android_rvh_psci_tos_resident_on
__traceiter_android_vh_build_sched_domains
__traceiter_android_vh_check_uninterrupt_tasks
__traceiter_android_vh_check_uninterrupt_tasks_done
__traceiter_android_vh_cpufreq_fast_switch
__traceiter_android_vh_cpufreq_resolve_freq
__traceiter_android_vh_cpufreq_target
__traceiter_android_vh_cpu_idle_enter
__traceiter_android_vh_cpu_idle_exit
__traceiter_android_vh_get_thermal_zone_device
__traceiter_android_vh_modify_thermal_request_freq
__traceiter_android_vh_modify_thermal_target_freq
__traceiter_android_vh_scheduler_tick
__traceiter_android_vh_thermal_power_cap
__traceiter_android_vh_thermal_register
__traceiter_android_vh_thermal_unregister
__traceiter_android_vh_update_topology_flags_workfn
__traceiter_clock_set_rate
__traceiter_cpu_frequency
__traceiter_cpu_frequency_limits
__traceiter_sched_overutilized_tp
__traceiter_sched_switch
trace_output_call
__tracepoint_android_rvh_account_irq
__tracepoint_android_rvh_after_dequeue_task
__tracepoint_android_rvh_after_enqueue_task
__tracepoint_android_rvh_build_perf_domains
__tracepoint_android_rvh_can_migrate_task
__tracepoint_android_rvh_check_preempt_tick
__tracepoint_android_rvh_check_preempt_wakeup
__tracepoint_android_rvh_check_preempt_wakeup_ignore
__tracepoint_android_rvh_cpu_cgroup_attach
__tracepoint_android_rvh_cpu_cgroup_online
__tracepoint_android_rvh_cpu_overutilized
__tracepoint_android_rvh_dequeue_entity
__tracepoint_android_rvh_dequeue_task
__tracepoint_android_rvh_dequeue_task_fair
__tracepoint_android_rvh_do_sched_yield
__tracepoint_android_rvh_effective_cpu_util
__tracepoint_android_rvh_enqueue_entity
__tracepoint_android_rvh_enqueue_task
__tracepoint_android_rvh_enqueue_task_fair
__tracepoint_android_rvh_entity_tick
__tracepoint_android_rvh_find_busiest_group
__tracepoint_android_rvh_find_busiest_queue
__tracepoint_android_rvh_find_lowest_rq
__tracepoint_android_rvh_is_cpu_allowed
__tracepoint_android_rvh_migrate_queued_task
__tracepoint_android_rvh_new_task_stats
__tracepoint_android_rvh_pick_next_entity
__tracepoint_android_rvh_place_entity
__tracepoint_android_rvh_replace_next_task_fair
__tracepoint_android_rvh_rto_next_cpu
__tracepoint_android_rvh_sched_balance_rt
__tracepoint_android_rvh_sched_cpu_dying
__tracepoint_android_rvh_sched_cpu_starting
__tracepoint_android_rvh_sched_exec
__tracepoint_android_rvh_sched_fork
__tracepoint_android_rvh_sched_fork_init
__tracepoint_android_rvh_sched_newidle_balance
__tracepoint_android_rvh_sched_nohz_balancer_kick
__tracepoint_android_rvh_sched_rebalance_domains
__tracepoint_android_rvh_sched_setaffinity
__tracepoint_android_rvh_schedule
__tracepoint_android_rvh_select_fallback_rq
__tracepoint_android_rvh_select_task_rq_fair
__tracepoint_android_rvh_select_task_rq_rt
__tracepoint_android_rvh_set_cpus_allowed_by_task
__tracepoint_android_rvh_setscheduler
__tracepoint_android_rvh_set_task_cpu
__tracepoint_android_rvh_tick_entry
__tracepoint_android_rvh_try_to_wake_up
__tracepoint_android_rvh_try_to_wake_up_success
__tracepoint_android_rvh_update_misfit_status
__tracepoint_android_rvh_update_thermal_stats
__tracepoint_android_rvh_wake_up_new_task
__tracepoint_android_rvh_psci_cpu_suspend
__tracepoint_android_rvh_psci_tos_resident_on
__tracepoint_android_vh_build_sched_domains
__tracepoint_android_vh_check_uninterrupt_tasks
__tracepoint_android_vh_check_uninterrupt_tasks_done
__tracepoint_android_vh_cpufreq_fast_switch
__tracepoint_android_vh_cpufreq_resolve_freq
__tracepoint_android_vh_cpufreq_target
__tracepoint_android_vh_cpu_idle_enter
__tracepoint_android_vh_cpu_idle_exit
__tracepoint_android_vh_get_thermal_zone_device
__tracepoint_android_vh_modify_thermal_request_freq
__tracepoint_android_vh_modify_thermal_target_freq
__tracepoint_android_vh_scheduler_tick
__tracepoint_android_vh_thermal_power_cap
__tracepoint_android_vh_thermal_register
__tracepoint_android_vh_thermal_unregister
__tracepoint_android_vh_update_topology_flags_workfn
__tracepoint_clock_set_rate
__tracepoint_cpu_frequency
__tracepoint_cpu_frequency_limits
__tracepoint_sched_overutilized_tp
__tracepoint_sched_switch
tracepoint_probe_register
tracepoint_probe_unregister
trace_print_symbols_seq
trace_raw_output_prep
__trace_trigger_soft_disabled
@ -631,6 +821,7 @@
unregister_pm_notifier
up_read
up_write
update_rq_clock
usb_alloc_urb
usb_anchor_urb
usb_autopm_get_interface
@ -665,7 +856,11 @@
usleep_range_state
vfree
vmalloc
vmalloc_nr_pages
vmap
vm_memory_committed
vm_node_stat
vm_zone_stat
vsnprintf
vunmap
vzalloc
@ -1009,6 +1204,16 @@
sock_common_recvmsg
sock_common_setsockopt
# required by kfifo_buf.ko
devres_add
__devres_alloc_node
devres_free
iio_buffer_init
iio_buffer_put
iio_device_attach_buffer
__kfifo_from_user
__kfifo_to_user
# required by l2tp_core.ko
idr_alloc_u32
idr_get_next_ul
@ -1275,6 +1480,25 @@
sdhci_set_bus_width
sdhci_setup_host
# required by sensorhub.ko
debugfs_create_symlink
iio_alloc_pollfunc
iio_dealloc_pollfunc
iio_device_alloc
iio_device_free
iio_device_id
iio_device_unregister
iio_pollfunc_store_time
iio_push_to_buffers
__iio_trigger_alloc
iio_trigger_free
iio_trigger_notify_done
iio_trigger_poll
iio_trigger_register
iio_trigger_unregister
pm_wakeup_ws_event
sysfs_create_link
# required by sipc-core.ko
mbox_free_channel
mbox_request_channel

View File

@ -240,3 +240,7 @@
#required by mi_mempool.ko
__traceiter_android_vh_mmput
__tracepoint_android_vh_mmput
#required by mi_mempool.ko
__traceiter_android_vh_madvise_cold_pageout_skip
__tracepoint_android_vh_madvise_cold_pageout_skip

View File

@ -17,7 +17,7 @@ CONFIG_RCU_BOOST=y
CONFIG_RCU_NOCB_CPU=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_IKHEADERS=y
CONFIG_IKHEADERS=m
CONFIG_UCLAMP_TASK=y
CONFIG_UCLAMP_BUCKETS_COUNT=20
CONFIG_CGROUPS=y
@ -94,6 +94,7 @@ CONFIG_MODULE_SIG_PROTECT=y
CONFIG_MODPROBE_PATH="/system/bin/modprobe"
CONFIG_BLK_DEV_ZONED=y
CONFIG_BLK_DEV_THROTTLING=y
CONFIG_BLK_CGROUP_IOCOST=y
CONFIG_BLK_INLINE_ENCRYPTION=y
CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
CONFIG_IOSCHED_BFQ=y
@ -307,6 +308,7 @@ CONFIG_BLK_DEV_LOOP_MIN_COUNT=16
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
CONFIG_BLK_DEV_UBLK=y
CONFIG_BLK_DEV_NVME=y
CONFIG_SRAM=y
CONFIG_UID_SYS_STATS=y
CONFIG_SCSI=y

View File

@ -406,6 +406,8 @@ int pkvm_iommu_resume(struct device *dev);
*/
int pkvm_iommu_finalize(int err);
bool pkvm_iommu_finalized(void);
struct vcpu_reset_state {
unsigned long pc;
unsigned long r0;

View File

@ -72,7 +72,10 @@ typedef u64 kvm_pte_t;
#define KVM_PTE_LEAF_ATTR_HI_S1_XN BIT(54)
#define KVM_PTE_LEAF_ATTR_HI_S2_XN BIT(54)
#define KVM_PTE_LEAF_ATTR_HI_S2_XN_PXN 1
#define KVM_PTE_LEAF_ATTR_HI_S2_XN_UXN 3
#define KVM_PTE_LEAF_ATTR_HI_S2_XN_XN 2
#define KVM_PTE_LEAF_ATTR_HI_S2_XN GENMASK(54, 53)
static inline bool kvm_pte_valid(kvm_pte_t pte)
{
@ -167,6 +170,11 @@ struct kvm_pgtable_mm_ops {
void (*icache_inval_pou)(void *addr, size_t size);
};
static inline kvm_pte_t *kvm_pte_follow(kvm_pte_t pte, struct kvm_pgtable_mm_ops *mm_ops)
{
return mm_ops->phys_to_virt(kvm_pte_to_phys(pte));
}
/**
* enum kvm_pgtable_stage2_flags - Stage-2 page-table flags.
* @KVM_PGTABLE_S2_NOFWB: Don't enforce Normal-WB even if the CPUs have
@ -184,7 +192,9 @@ enum kvm_pgtable_stage2_flags {
* @KVM_PGTABLE_PROT_W: Write permission.
* @KVM_PGTABLE_PROT_R: Read permission.
* @KVM_PGTABLE_PROT_DEVICE: Device attributes.
* @KVM_PGTABLE_PROT_NC: Normal non-cacheable attributes.
* @KVM_PGTABLE_PROT_NC: Normal non-cacheable attributes.
* @KVM_PGTABLE_PROT_PXN: Privileged execute-never.
* @KVM_PGTABLE_PROT_UXN: Unprivileged execute-never.
* @KVM_PGTABLE_PROT_SW0: Software bit 0.
* @KVM_PGTABLE_PROT_SW1: Software bit 1.
* @KVM_PGTABLE_PROT_SW2: Software bit 2.
@ -197,6 +207,8 @@ enum kvm_pgtable_prot {
KVM_PGTABLE_PROT_DEVICE = BIT(3),
KVM_PGTABLE_PROT_NC = BIT(4),
KVM_PGTABLE_PROT_PXN = BIT(5),
KVM_PGTABLE_PROT_UXN = BIT(6),
KVM_PGTABLE_PROT_SW0 = BIT(55),
KVM_PGTABLE_PROT_SW1 = BIT(56),
@ -490,6 +502,21 @@ int kvm_pgtable_stage2_annotate(struct kvm_pgtable *pgt, u64 addr, u64 size,
*/
int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
/**
* kvm_pgtable_stage2_reclaim_leaves() - Attempt to reclaim leaf page-table
* pages by coalescing table entries into
* block mappings.
* @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
* @addr: Intermediate physical address from which to reclaim leaves.
* @size: Size of the range.
*
* The offset of @addr within a page is ignored and @size is rounded-up to
* the next page boundary.
*
* Return: 0 on success, negative error code on failure.
*/
int kvm_pgtable_stage2_reclaim_leaves(struct kvm_pgtable *pgt, u64 addr, u64 size);
/**
* kvm_pgtable_stage2_wrprotect() - Write-protect guest stage-2 address range
* without TLB invalidation.

View File

@ -93,8 +93,6 @@ int __pkvm_iommu_register(unsigned long dev_id, unsigned long drv_id,
int __pkvm_iommu_pm_notify(unsigned long dev_id,
enum pkvm_iommu_pm_event event);
int __pkvm_iommu_finalize(int err);
int pkvm_iommu_host_stage2_adjust_range(phys_addr_t addr, phys_addr_t *start,
phys_addr_t *end);
bool pkvm_iommu_host_dabt_handler(struct kvm_cpu_context *host_ctxt, u32 esr,
phys_addr_t fault_pa);
void pkvm_iommu_host_stage2_idmap(phys_addr_t start, phys_addr_t end,

View File

@ -72,6 +72,8 @@ int __pkvm_host_share_hyp(u64 pfn);
int __pkvm_host_unshare_hyp(u64 pfn);
int __pkvm_host_reclaim_page(struct pkvm_hyp_vm *vm, u64 pfn, u64 ipa);
int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages);
int ___pkvm_host_donate_hyp(u64 pfn, u64 nr_pages, bool accept_mmio);
int __pkvm_host_donate_hyp_locked(u64 pfn, u64 nr_pages);
int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages);
int __pkvm_host_share_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu);
int __pkvm_host_donate_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu);

View File

@ -162,9 +162,4 @@ int pkvm_load_pvmfw_pages(struct pkvm_hyp_vm *vm, u64 ipa, phys_addr_t phys,
u64 size);
void pkvm_poison_pvmfw_pages(void);
/*
* Notify pKVM about events that can undermine pKVM security.
*/
void pkvm_handle_system_misconfiguration(enum pkvm_system_misconfiguration event);
#endif /* __ARM64_KVM_NVHE_PKVM_H__ */

View File

@ -392,6 +392,7 @@ int __pkvm_iommu_register(unsigned long dev_id, unsigned long drv_id,
.id = dev_id,
.ops = drv->ops,
.pa = dev_pa,
.va = hyp_phys_to_virt(dev_pa),
.size = dev_size,
.flags = flags,
};
@ -421,22 +422,11 @@ int __pkvm_iommu_register(unsigned long dev_id, unsigned long drv_id,
goto out_free;
}
/*
* Unmap the device's MMIO range from host stage-2. If registration
* is successful, future attempts to re-map will be blocked by
* pkvm_iommu_host_stage2_adjust_range.
*/
ret = host_stage2_unmap_reg_locked(dev_pa, dev_size);
ret = __pkvm_host_donate_hyp_locked(hyp_phys_to_pfn(dev_pa),
PAGE_ALIGN(dev_size) >> PAGE_SHIFT);
if (ret)
goto out_free;
/* Create EL2 mapping for the device. */
ret = __pkvm_create_private_mapping(dev_pa, dev_size,
PAGE_HYP_DEVICE, (unsigned long *)(&dev->va));
if (ret){
goto out_free;
}
/* Register device and prevent host from mapping the MMIO range. */
list_add_tail(&dev->list, &iommu_list);
if (dev->parent)
@ -456,6 +446,9 @@ int __pkvm_iommu_finalize(int err)
{
int ret = 0;
/* Err is not currently used in EL2.*/
WARN_ON(err);
hyp_spin_lock(&iommu_registration_lock);
if (!iommu_finalized)
iommu_finalized = true;
@ -463,13 +456,6 @@ int __pkvm_iommu_finalize(int err)
ret = -EPERM;
hyp_spin_unlock(&iommu_registration_lock);
/*
* If finalize failed in EL1 driver for any reason, this means we can't trust the DMA
* isolation. So we have to inform pKVM to properly protect itself.
*/
if (!ret && err)
pkvm_handle_system_misconfiguration(NO_DMA_ISOLATION);
return ret;
}
@ -499,39 +485,6 @@ int __pkvm_iommu_pm_notify(unsigned long dev_id, enum pkvm_iommu_pm_event event)
return ret;
}
/*
* Check host memory access against IOMMUs' MMIO regions.
* Returns -EPERM if the address is within the bounds of a registered device.
* Otherwise returns zero and adjusts boundaries of the new mapping to avoid
* MMIO regions of registered IOMMUs.
*/
int pkvm_iommu_host_stage2_adjust_range(phys_addr_t addr, phys_addr_t *start,
phys_addr_t *end)
{
struct pkvm_iommu *dev;
phys_addr_t new_start = *start;
phys_addr_t new_end = *end;
phys_addr_t dev_start, dev_end;
assert_host_component_locked();
list_for_each_entry(dev, &iommu_list, list) {
dev_start = dev->pa;
dev_end = dev_start + dev->size;
if (addr < dev_start)
new_end = min(new_end, dev_start);
else if (addr >= dev_end)
new_start = max(new_start, dev_end);
else
return -EPERM;
}
*start = new_start;
*end = new_end;
return 0;
}
bool pkvm_iommu_host_dabt_handler(struct kvm_cpu_context *host_ctxt, u32 esr,
phys_addr_t pa)
{

View File

@ -79,10 +79,35 @@ static void hyp_unlock_component(void)
hyp_spin_unlock(&pkvm_pgd_lock);
}
static void assert_host_can_alloc(void)
{
/* We can always get back to the host from guest context */
if (read_sysreg(vttbr_el2) != kvm_get_vttbr(&host_mmu.arch.mmu))
return;
/*
* An error code must be returned to EL1 to handle memory allocation
* failures cleanly. That's doable for explicit calls into higher
* ELs, but not so much for other EL2 entry reasons such as mem aborts.
* Thankfully we don't need memory allocation in these cases by
* construction, so let's enforce the invariant.
*/
switch (ESR_ELx_EC(read_sysreg(esr_el2))) {
case ESR_ELx_EC_HVC64:
case ESR_ELx_EC_SMC64:
break;
default:
WARN_ON(1);
}
}
static void *host_s2_zalloc_pages_exact(size_t size)
{
void *addr = hyp_alloc_pages(&host_s2_pool, get_order(size));
void *addr;
assert_host_can_alloc();
addr = hyp_alloc_pages(&host_s2_pool, get_order(size));
hyp_split_page(hyp_virt_to_page(addr));
/*
@ -97,6 +122,8 @@ static void *host_s2_zalloc_pages_exact(size_t size)
static void *host_s2_zalloc_page(void *pool)
{
assert_host_can_alloc();
return hyp_alloc_pages(pool, 0);
}
@ -146,6 +173,27 @@ static void prepare_host_vtcr(void)
id_aa64mmfr1_el1_sys_val, phys_shift);
}
static int prepopulate_host_stage2(void)
{
struct memblock_region *reg;
u64 addr = 0;
int i, ret;
for (i = 0; i < hyp_memblock_nr; i++) {
reg = &hyp_memory[i];
ret = host_stage2_idmap_locked(addr, reg->base - addr, PKVM_HOST_MMIO_PROT, false);
if (ret)
return ret;
ret = host_stage2_idmap_locked(reg->base, reg->size, PKVM_HOST_MEM_PROT, false);
if (ret)
return ret;
addr = reg->base + reg->size;
}
return host_stage2_idmap_locked(addr, BIT(host_mmu.pgt.ia_bits) - addr, PKVM_HOST_MMIO_PROT,
false);
}
int kvm_host_prepare_stage2(void *pgt_pool_base)
{
struct kvm_s2_mmu *mmu = &host_mmu.arch.mmu;
@ -172,7 +220,7 @@ int kvm_host_prepare_stage2(void *pgt_pool_base)
mmu->pgt = &host_mmu.pgt;
atomic64_set(&mmu->vmid.id, 0);
return 0;
return prepopulate_host_stage2();
}
static bool guest_stage2_force_pte_cb(u64 addr, u64 end,
@ -398,7 +446,7 @@ int host_stage2_unmap_reg_locked(phys_addr_t start, u64 size)
hyp_assert_lock_held(&host_mmu.lock);
ret = kvm_pgtable_stage2_unmap(&host_mmu.pgt, start, size);
ret = kvm_pgtable_stage2_reclaim_leaves(&host_mmu.pgt, start, size);
if (ret)
return ret;
@ -466,6 +514,11 @@ static enum kvm_pgtable_prot default_host_prot(bool is_memory)
return is_memory ? PKVM_HOST_MEM_PROT : PKVM_HOST_MMIO_PROT;
}
static enum kvm_pgtable_prot default_hyp_prot(phys_addr_t phys)
{
return addr_is_memory(phys) ? PAGE_HYP : PAGE_HYP_DEVICE;
}
bool addr_is_memory(phys_addr_t phys)
{
struct kvm_mem_range range;
@ -763,22 +816,15 @@ static int host_stage2_idmap(struct kvm_vcpu_fault_info *fault, u64 addr)
}
}
/*
* Adjust against IOMMU devices first. host_stage2_adjust_range() should
* be called last for proper alignment.
*/
if (!is_memory) {
ret = pkvm_iommu_host_stage2_adjust_range(addr, &range.start,
&range.end);
if (ret)
return ret;
}
ret = host_stage2_adjust_range(addr, &range, level);
if (ret)
return ret;
return host_stage2_idmap_locked(range.start, range.end - range.start, prot, false);
/*
* We're guaranteed not to require memory allocation by construction,
* no need to bother even trying to recycle pages.
*/
return __host_stage2_idmap(range.start, range.end, prot, false);
}
static void (*illegal_abt_notifier)(struct kvm_cpu_context *host_ctxt);
@ -972,7 +1018,7 @@ static enum pkvm_page_state host_get_page_state(kvm_pte_t pte, u64 addr)
if (is_memory && hyp_phys_to_page(addr)->flags & MODULE_OWNED_PAGE)
return PKVM_MODULE_DONT_TOUCH;
if (!addr_is_allowed_memory(addr))
if (is_memory && !addr_is_allowed_memory(addr))
return PKVM_NOPAGE;
if (!kvm_pte_valid(pte) && pte)
@ -1186,8 +1232,10 @@ static int hyp_ack_share(u64 addr, const struct pkvm_mem_transition *tx,
enum kvm_pgtable_prot perms)
{
u64 size = tx->nr_pages * PAGE_SIZE;
phys_addr_t phys = hyp_virt_to_phys((void *)addr);
enum kvm_pgtable_prot prot = default_hyp_prot(phys);
if (perms != PAGE_HYP)
if (!addr_is_memory(phys) || perms != prot)
return -EPERM;
if (__hyp_ack_skip_pgtable_check(tx))
@ -1242,8 +1290,10 @@ static int hyp_complete_donation(u64 addr,
const struct pkvm_mem_transition *tx)
{
void *start = (void *)addr, *end = start + (tx->nr_pages * PAGE_SIZE);
enum kvm_pgtable_prot prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_OWNED);
phys_addr_t phys = hyp_virt_to_phys(start);
enum kvm_pgtable_prot prot = default_hyp_prot(phys);
prot = pkvm_mkstate(prot, PKVM_PAGE_OWNED);
return pkvm_create_mappings_locked(start, end, prot);
}
@ -1280,7 +1330,7 @@ static int guest_ack_share(u64 addr, const struct pkvm_mem_transition *tx,
{
u64 size = tx->nr_pages * PAGE_SIZE;
if (perms != KVM_PGTABLE_PROT_RWX)
if (!addr_is_memory(tx->completer.guest.phys) || perms != KVM_PGTABLE_PROT_RWX)
return -EPERM;
return __guest_check_page_state_range(tx->completer.guest.hyp_vcpu,
@ -1291,6 +1341,9 @@ static int guest_ack_donation(u64 addr, const struct pkvm_mem_transition *tx)
{
u64 size = tx->nr_pages * PAGE_SIZE;
if (!addr_is_memory(tx->completer.guest.phys))
return -EPERM;
return __guest_check_page_state_range(tx->completer.guest.hyp_vcpu,
addr, size, PKVM_NOPAGE);
}
@ -1776,7 +1829,7 @@ int __pkvm_host_share_hyp(u64 pfn)
.id = PKVM_ID_HYP,
},
},
.completer_prot = PAGE_HYP,
.completer_prot = default_hyp_prot(host_addr),
};
host_lock_component();
@ -1873,7 +1926,7 @@ int __pkvm_host_unshare_hyp(u64 pfn)
.id = PKVM_ID_HYP,
},
},
.completer_prot = PAGE_HYP,
.completer_prot = default_hyp_prot(host_addr),
};
host_lock_component();
@ -1888,6 +1941,27 @@ int __pkvm_host_unshare_hyp(u64 pfn)
}
int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages)
{
return ___pkvm_host_donate_hyp(pfn, nr_pages, false);
}
int ___pkvm_host_donate_hyp(u64 pfn, u64 nr_pages, bool accept_mmio)
{
phys_addr_t start = hyp_pfn_to_phys(pfn);
phys_addr_t end = start + (nr_pages << PAGE_SHIFT);
int ret;
if (!accept_mmio && !range_is_memory(start, end))
return -EPERM;
host_lock_component();
ret = __pkvm_host_donate_hyp_locked(pfn, nr_pages);
host_unlock_component();
return ret;
}
int __pkvm_host_donate_hyp_locked(u64 pfn, u64 nr_pages)
{
int ret;
u64 host_addr = hyp_pfn_to_phys(pfn);
@ -1908,13 +1982,12 @@ int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages)
},
};
host_lock_component();
hyp_assert_lock_held(&host_mmu.lock);
hyp_lock_component();
ret = do_donate(&donation);
hyp_unlock_component();
host_unlock_component();
return ret;
}
@ -1964,15 +2037,19 @@ static int restrict_host_page_perms(u64 addr, kvm_pte_t pte, u32 level, enum kvm
return ret;
}
#define MODULE_PROT_ALLOWLIST (KVM_PGTABLE_PROT_RWX | \
KVM_PGTABLE_PROT_NC | \
KVM_PGTABLE_PROT_PXN | \
KVM_PGTABLE_PROT_UXN)
int module_change_host_page_prot(u64 pfn, enum kvm_pgtable_prot prot)
{
u64 addr = hyp_pfn_to_phys(pfn);
struct hyp_page *page;
struct hyp_page *page = NULL;
kvm_pte_t pte;
u32 level;
int ret;
if ((prot & KVM_PGTABLE_PROT_RWX) != prot || !addr_is_memory(addr))
if ((prot & MODULE_PROT_ALLOWLIST) != prot)
return -EINVAL;
host_lock_component();
@ -1980,6 +2057,14 @@ int module_change_host_page_prot(u64 pfn, enum kvm_pgtable_prot prot)
if (ret)
goto unlock;
/*
* There is no hyp_vmemmap covering MMIO regions, which makes tracking
* of module-owned MMIO regions hard, so we trust the modules not to
* mess things up.
*/
if (!addr_is_memory(addr))
goto update;
ret = -EPERM;
page = hyp_phys_to_page(addr);
@ -1994,14 +2079,15 @@ int module_change_host_page_prot(u64 pfn, enum kvm_pgtable_prot prot)
goto unlock;
}
if (prot == KVM_PGTABLE_PROT_RWX)
update:
if (prot == default_host_prot(!!page))
ret = host_stage2_set_owner_locked(addr, PAGE_SIZE, PKVM_ID_HOST);
else if (!prot)
ret = host_stage2_set_owner_locked(addr, PAGE_SIZE, PKVM_ID_PROTECTED);
else
ret = restrict_host_page_perms(addr, pte, level, prot);
if (ret)
if (ret || !page)
goto unlock;
if (prot != KVM_PGTABLE_PROT_RWX)

View File

@ -77,6 +77,11 @@ void __pkvm_close_module_registration(void)
*/
}
static int __pkvm_module_host_donate_hyp(u64 pfn, u64 nr_pages)
{
return ___pkvm_host_donate_hyp(pfn, nr_pages, true);
}
const struct pkvm_module_ops module_ops = {
.create_private_mapping = __pkvm_create_private_mapping,
.alloc_module_va = __pkvm_alloc_module_va,
@ -99,7 +104,7 @@ const struct pkvm_module_ops module_ops = {
.register_illegal_abt_notifier = __pkvm_register_illegal_abt_notifier,
.register_psci_notifier = __pkvm_register_psci_notifier,
.register_hyp_panic_notifier = __pkvm_register_hyp_panic_notifier,
.host_donate_hyp = __pkvm_host_donate_hyp,
.host_donate_hyp = __pkvm_module_host_donate_hyp,
.hyp_donate_host = __pkvm_hyp_donate_host,
.host_share_hyp = __pkvm_host_share_hyp,
.host_unshare_hyp = __pkvm_host_unshare_hyp,

View File

@ -1570,14 +1570,3 @@ bool kvm_hyp_handle_hvc64(struct kvm_vcpu *vcpu, u64 *exit_code)
return false;
}
/*
* Notify pKVM about events that can undermine pKVM security.
*/
void pkvm_handle_system_misconfiguration(enum pkvm_system_misconfiguration event)
{
if (event == NO_DMA_ISOLATION)
pkvm_poison_pvmfw_pages();
else
BUG();
}

View File

@ -277,6 +277,29 @@ static int fix_hyp_pgtable_refcnt_walker(u64 addr, u64 end, u32 level,
return 0;
}
static int pin_table_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
enum kvm_pgtable_walk_flags flag, void * const arg)
{
struct kvm_pgtable_mm_ops *mm_ops = arg;
kvm_pte_t pte = *ptep;
if (kvm_pte_valid(pte))
mm_ops->get_page(kvm_pte_follow(pte, mm_ops));
return 0;
}
static int pin_host_tables(void)
{
struct kvm_pgtable_walker walker = {
.cb = pin_table_walker,
.flags = KVM_PGTABLE_WALK_TABLE_POST,
.arg = &host_mmu.mm_ops,
};
return kvm_pgtable_walk(&host_mmu.pgt, 0, BIT(host_mmu.pgt.ia_bits), &walker);
}
static int fix_host_ownership(void)
{
struct kvm_pgtable_walker walker = {
@ -357,10 +380,6 @@ void __noreturn __pkvm_init_finalise(void)
};
pkvm_pgtable.mm_ops = &pkvm_pgtable_mm_ops;
ret = fix_host_ownership();
if (ret)
goto out;
ret = fix_hyp_pgtable_refcnt();
if (ret)
goto out;
@ -369,10 +388,18 @@ void __noreturn __pkvm_init_finalise(void)
if (ret)
goto out;
ret = fix_host_ownership();
if (ret)
goto out;
ret = unmap_protected_regions();
if (ret)
goto out;
ret = pin_host_tables();
if (ret)
goto out;
ret = hyp_ffa_init(ffa_proxy_pages);
if (ret)
goto out;

View File

@ -76,11 +76,6 @@ static u32 kvm_pgd_pages(u32 ia_bits, u32 start_level)
return __kvm_pgd_page_idx(&pgt, -1ULL) + 1;
}
static kvm_pte_t *kvm_pte_follow(kvm_pte_t pte, struct kvm_pgtable_mm_ops *mm_ops)
{
return mm_ops->phys_to_virt(kvm_pte_to_phys(pte));
}
static void kvm_clear_pte(kvm_pte_t *ptep)
{
WRITE_ONCE(*ptep, 0);
@ -281,7 +276,8 @@ static int hyp_set_prot_attr(enum kvm_pgtable_prot prot, kvm_pte_t *ptep)
kvm_pte_t attr;
u32 mtype;
if (!(prot & KVM_PGTABLE_PROT_R) || (device && nc))
if (!(prot & KVM_PGTABLE_PROT_R) || (device && nc) ||
(prot & (KVM_PGTABLE_PROT_PXN | KVM_PGTABLE_PROT_UXN)))
return -EINVAL;
if (device)
@ -570,16 +566,15 @@ static bool stage2_has_fwb(struct kvm_pgtable *pgt)
#define KVM_S2_MEMATTR(pgt, attr) PAGE_S2_MEMATTR(attr, stage2_has_fwb(pgt))
static int stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot prot,
kvm_pte_t *ptep)
kvm_pte_t *ptep)
{
u64 exec_type = KVM_PTE_LEAF_ATTR_HI_S2_XN_XN;
bool device = prot & KVM_PGTABLE_PROT_DEVICE;
u32 sh = KVM_PTE_LEAF_ATTR_LO_S2_SH_IS;
bool nc = prot & KVM_PGTABLE_PROT_NC;
enum kvm_pgtable_prot exec_prot;
kvm_pte_t attr;
if (device && nc)
return -EINVAL;
if (device)
attr = KVM_S2_MEMATTR(pgt, DEVICE_nGnRE);
else if (nc)
@ -587,11 +582,23 @@ static int stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot p
else
attr = KVM_S2_MEMATTR(pgt, NORMAL);
if (!(prot & KVM_PGTABLE_PROT_X))
attr |= KVM_PTE_LEAF_ATTR_HI_S2_XN;
else if (device)
return -EINVAL;
exec_prot = prot & (KVM_PGTABLE_PROT_X | KVM_PGTABLE_PROT_PXN | KVM_PGTABLE_PROT_UXN);
switch(exec_prot) {
case KVM_PGTABLE_PROT_X:
goto set_ap;
case KVM_PGTABLE_PROT_PXN:
exec_type = KVM_PTE_LEAF_ATTR_HI_S2_XN_PXN;
break;
case KVM_PGTABLE_PROT_UXN:
exec_type = KVM_PTE_LEAF_ATTR_HI_S2_XN_UXN;
break;
default:
if (exec_prot)
return -EINVAL;
}
attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_HI_S2_XN, exec_type);
set_ap:
if (prot & KVM_PGTABLE_PROT_R)
attr |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R;
@ -617,8 +624,21 @@ enum kvm_pgtable_prot kvm_pgtable_stage2_pte_prot(kvm_pte_t pte)
prot |= KVM_PGTABLE_PROT_R;
if (pte & KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W)
prot |= KVM_PGTABLE_PROT_W;
if (!(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN))
switch(FIELD_GET(KVM_PTE_LEAF_ATTR_HI_S2_XN, pte)) {
case 0:
prot |= KVM_PGTABLE_PROT_X;
break;
case KVM_PTE_LEAF_ATTR_HI_S2_XN_PXN:
prot |= KVM_PGTABLE_PROT_PXN;
break;
case KVM_PTE_LEAF_ATTR_HI_S2_XN_UXN:
prot |= KVM_PGTABLE_PROT_UXN;
break;
case KVM_PTE_LEAF_ATTR_HI_S2_XN_XN:
break;
default:
WARN_ON(1);
}
return prot;
}
@ -660,7 +680,9 @@ static bool stage2_pte_cacheable(struct kvm_pgtable *pgt, kvm_pte_t pte)
static bool stage2_pte_executable(kvm_pte_t pte)
{
return kvm_pte_valid(pte) && !(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN);
kvm_pte_t xn = FIELD_GET(KVM_PTE_LEAF_ATTR_HI_S2_XN, pte);
return kvm_pte_valid(pte) && xn != KVM_PTE_LEAF_ATTR_HI_S2_XN_XN;
}
static bool stage2_leaf_mapping_allowed(u64 addr, u64 end, u32 level,
@ -1017,6 +1039,30 @@ int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
return kvm_pgtable_walk(pgt, addr, size, &walker);
}
static int stage2_reclaim_leaf_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
enum kvm_pgtable_walk_flags flag, void * const arg)
{
stage2_coalesce_walk_table_post(addr, end, level, ptep, arg);
return 0;
}
int kvm_pgtable_stage2_reclaim_leaves(struct kvm_pgtable *pgt, u64 addr, u64 size)
{
struct stage2_map_data map_data = {
.phys = KVM_PHYS_INVALID,
.mmu = pgt->mmu,
.mm_ops = pgt->mm_ops,
};
struct kvm_pgtable_walker walker = {
.cb = stage2_reclaim_leaf_walker,
.arg = &map_data,
.flags = KVM_PGTABLE_WALK_TABLE_POST,
};
return kvm_pgtable_walk(pgt, addr, size, &walker);
}
struct stage2_attr_data {
kvm_pte_t attr_set;
kvm_pte_t attr_clr;
@ -1135,7 +1181,7 @@ int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
u32 level;
kvm_pte_t set = 0, clr = 0;
if (prot & KVM_PTE_LEAF_ATTR_HI_SW)
if (prot & !KVM_PGTABLE_PROT_RWX)
return -EINVAL;
if (prot & KVM_PGTABLE_PROT_R)

View File

@ -6,6 +6,9 @@
#include <linux/kvm_host.h>
/* Did all IOMMUs register as expected. */
static bool finalised;
static unsigned long dev_to_id(struct device *dev)
{
/* Use the struct device pointer as a unique identifier. */
@ -59,6 +62,12 @@ EXPORT_SYMBOL(pkvm_iommu_resume);
int pkvm_iommu_finalize(int err)
{
return kvm_call_hyp_nvhe(__pkvm_iommu_finalize, err);
finalised = !err;
return kvm_call_hyp_nvhe(__pkvm_iommu_finalize, 0);
}
EXPORT_SYMBOL_GPL(pkvm_iommu_finalize);
bool pkvm_iommu_finalized(void)
{
return finalised;
}
EXPORT_SYMBOL(pkvm_iommu_finalize);

View File

@ -448,6 +448,9 @@ static int __init pkvm_firmware_rmem_clear(void)
return -EINVAL;
memset(addr, 0, size);
/* Clear so user space doesn't get stale info via IOCTL. */
pkvm_firmware_mem = NULL;
dcache_clean_poc((unsigned long)addr, (unsigned long)addr + size);
memunmap(addr);
return 0;
@ -501,6 +504,10 @@ static int __init finalize_pkvm(void)
if (pkvm_load_early_modules())
pkvm_firmware_rmem_clear();
/* If no DMA protection. */
if (!pkvm_iommu_finalized())
pkvm_firmware_rmem_clear();
/*
* Exclude HYP sections from kmemleak so that they don't get peeked
* at, which would end badly once inaccessible.

View File

@ -82,6 +82,27 @@ WORKAROUND_CAVIUM_TX2_219_TVM
WORKAROUND_CLEAN_CACHE
WORKAROUND_DEVICE_LOAD_ACQUIRE
WORKAROUND_NVIDIA_CARMEL_CNP
WORKAROUND_NXP_ERR050104
WORKAROUND_QCOM_FALKOR_E1003
WORKAROUND_REPEAT_TLBI
WORKAROUND_SPECULATIVE_AT
ANDROID_KABI_RESERVE_01
ANDROID_KABI_RESERVE_02
ANDROID_KABI_RESERVE_03
ANDROID_KABI_RESERVE_04
ANDROID_KABI_RESERVE_05
ANDROID_KABI_RESERVE_06
ANDROID_KABI_RESERVE_07
ANDROID_KABI_RESERVE_08
ANDROID_KABI_RESERVE_09
ANDROID_KABI_RESERVE_10
ANDROID_KABI_RESERVE_11
ANDROID_KABI_RESERVE_12
ANDROID_KABI_RESERVE_13
ANDROID_KABI_RESERVE_14
ANDROID_KABI_RESERVE_15
ANDROID_KABI_RESERVE_16
ANDROID_KABI_RESERVE_17
ANDROID_KABI_RESERVE_18
ANDROID_KABI_RESERVE_19
ANDROID_KABI_RESERVE_20

View File

@ -19,7 +19,7 @@ CONFIG_RCU_BOOST=y
CONFIG_RCU_NOCB_CPU=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_IKHEADERS=y
CONFIG_IKHEADERS=m
CONFIG_UCLAMP_TASK=y
CONFIG_UCLAMP_BUCKETS_COUNT=20
CONFIG_CGROUPS=y
@ -89,6 +89,7 @@ CONFIG_MODULE_SIG=y
CONFIG_MODULE_SIG_PROTECT=y
CONFIG_BLK_DEV_ZONED=y
CONFIG_BLK_DEV_THROTTLING=y
CONFIG_BLK_CGROUP_IOCOST=y
CONFIG_BLK_INLINE_ENCRYPTION=y
CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
CONFIG_IOSCHED_BFQ=y
@ -293,6 +294,7 @@ CONFIG_BLK_DEV_LOOP_MIN_COUNT=16
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
CONFIG_BLK_DEV_UBLK=y
CONFIG_BLK_DEV_NVME=y
CONFIG_SRAM=y
CONFIG_UID_SYS_STATS=y
CONFIG_SCSI=y

View File

@ -1,6 +1,6 @@
. ${ROOT_DIR}/${KERNEL_DIR}/build.config.constants
KMI_GENERATION=8
KMI_GENERATION=9
LLVM=1
DEPMOD=depmod

View File

@ -213,8 +213,8 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
mm = alloc->mm;
if (mm) {
mmap_read_lock(mm);
vma = vma_lookup(mm, alloc->vma_addr);
mmap_write_lock(mm);
vma = alloc->vma;
}
if (!vma && need_mm) {
@ -271,7 +271,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
trace_binder_alloc_page_end(alloc, index);
}
if (mm) {
mmap_read_unlock(mm);
mmap_write_unlock(mm);
mmput(mm);
}
return 0;
@ -304,21 +304,24 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
}
err_no_vma:
if (mm) {
mmap_read_unlock(mm);
mmap_write_unlock(mm);
mmput(mm);
}
return vma ? -ENOMEM : -ESRCH;
}
static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
struct vm_area_struct *vma)
{
/* pairs with smp_load_acquire in binder_alloc_get_vma() */
smp_store_release(&alloc->vma, vma);
}
static inline struct vm_area_struct *binder_alloc_get_vma(
struct binder_alloc *alloc)
{
struct vm_area_struct *vma = NULL;
if (alloc->vma_addr)
vma = vma_lookup(alloc->mm, alloc->vma_addr);
return vma;
/* pairs with smp_store_release in binder_alloc_set_vma() */
return smp_load_acquire(&alloc->vma);
}
static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
@ -381,15 +384,13 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
size_t size, data_offsets_size;
int ret;
mmap_read_lock(alloc->mm);
/* Check binder_alloc is fully initialized */
if (!binder_alloc_get_vma(alloc)) {
mmap_read_unlock(alloc->mm);
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
"%d: binder_alloc_buf, no vma\n",
alloc->pid);
return ERR_PTR(-ESRCH);
}
mmap_read_unlock(alloc->mm);
data_offsets_size = ALIGN(data_size, sizeof(void *)) +
ALIGN(offsets_size, sizeof(void *));
@ -780,7 +781,9 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
buffer->free = 1;
binder_insert_free_buffer(alloc, buffer);
alloc->free_async_space = alloc->buffer_size / 2;
alloc->vma_addr = vma->vm_start;
/* Signal binder_alloc is fully initialized */
binder_alloc_set_vma(alloc, vma);
return 0;
@ -810,8 +813,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
buffers = 0;
mutex_lock(&alloc->mutex);
BUG_ON(alloc->vma_addr &&
vma_lookup(alloc->mm, alloc->vma_addr));
BUG_ON(alloc->vma);
while ((n = rb_first(&alloc->allocated_buffers))) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
@ -918,25 +920,17 @@ void binder_alloc_print_pages(struct seq_file *m,
* Make sure the binder_alloc is fully initialized, otherwise we might
* read inconsistent state.
*/
mmap_read_lock(alloc->mm);
if (binder_alloc_get_vma(alloc) == NULL) {
mmap_read_unlock(alloc->mm);
goto uninitialized;
if (binder_alloc_get_vma(alloc) != NULL) {
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
page = &alloc->pages[i];
if (!page->page_ptr)
free++;
else if (list_empty(&page->lru))
active++;
else
lru++;
}
}
mmap_read_unlock(alloc->mm);
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
page = &alloc->pages[i];
if (!page->page_ptr)
free++;
else if (list_empty(&page->lru))
active++;
else
lru++;
}
uninitialized:
mutex_unlock(&alloc->mutex);
seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
@ -971,7 +965,7 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
*/
void binder_alloc_vma_close(struct binder_alloc *alloc)
{
alloc->vma_addr = 0;
binder_alloc_set_vma(alloc, NULL);
}
/**

View File

@ -75,7 +75,7 @@ struct binder_lru_page {
/**
* struct binder_alloc - per-binder proc state for binder allocator
* @mutex: protects binder_alloc fields
* @vma_addr: vm_area_struct->vm_start passed to mmap_handler
* @vma: vm_area_struct passed to mmap_handler
* (invariant after mmap)
* @mm: copy of task->mm (invariant after open)
* @buffer: base of per-proc address space mapped via mmap
@ -99,7 +99,7 @@ struct binder_lru_page {
*/
struct binder_alloc {
struct mutex mutex;
unsigned long vma_addr;
struct vm_area_struct *vma;
struct mm_struct *mm;
void __user *buffer;
struct list_head buffers;

View File

@ -287,7 +287,7 @@ void binder_selftest_alloc(struct binder_alloc *alloc)
if (!binder_selftest_run)
return;
mutex_lock(&binder_selftest_lock);
if (!binder_selftest_run || !alloc->vma_addr)
if (!binder_selftest_run || !alloc->vma)
goto done;
pr_info("STARTED\n");
binder_selftest_alloc_offset(alloc, end_offset, 0);

View File

@ -306,3 +306,4 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_encrypt_page);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_init_aes_encrypt);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_skip_swap_map_write);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_post_image_save);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_madvise_cold_pageout_skip);

View File

@ -1416,7 +1416,9 @@ static void platform_remove(struct device *_dev)
struct platform_driver *drv = to_platform_driver(_dev->driver);
struct platform_device *dev = to_platform_device(_dev);
if (drv->remove) {
if (drv->remove_new) {
drv->remove_new(dev);
} else if (drv->remove) {
int ret = drv->remove(dev);
if (ret)

View File

@ -2148,6 +2148,7 @@ unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
arch_set_freq_scale(policy->related_cpus, freq,
policy->cpuinfo.max_freq);
cpufreq_stats_record_transition(policy, freq);
cpufreq_times_record_transition(policy, freq);
trace_android_rvh_cpufreq_transition(policy);
if (trace_cpu_frequency_enabled()) {

View File

@ -339,6 +339,16 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
return events;
}
static long _dma_buf_set_name(struct dma_buf *dmabuf, const char *name)
{
spin_lock(&dmabuf->name_lock);
kfree(dmabuf->name);
dmabuf->name = name;
spin_unlock(&dmabuf->name_lock);
return 0;
}
/**
* dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
* It could support changing the name of the dma-buf if the same
@ -352,19 +362,35 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
* devices, return -EBUSY.
*
*/
static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
long dma_buf_set_name(struct dma_buf *dmabuf, const char *name)
{
long ret = 0;
char *buf = kstrndup(name, DMA_BUF_NAME_LEN, GFP_KERNEL);
if (!buf)
return -ENOMEM;
ret = _dma_buf_set_name(dmabuf, buf);
if (ret)
kfree(buf);
return ret;
}
EXPORT_SYMBOL_GPL(dma_buf_set_name);
static long dma_buf_set_name_user(struct dma_buf *dmabuf, const char __user *buf)
{
char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
long ret = 0;
if (IS_ERR(name))
return PTR_ERR(name);
spin_lock(&dmabuf->name_lock);
kfree(dmabuf->name);
dmabuf->name = name;
spin_unlock(&dmabuf->name_lock);
ret = _dma_buf_set_name(dmabuf, name);
if (ret)
kfree(name);
return 0;
return ret;
}
#if IS_ENABLED(CONFIG_SYNC_FILE)
@ -513,7 +539,7 @@ static long dma_buf_ioctl(struct file *file,
case DMA_BUF_SET_NAME_A:
case DMA_BUF_SET_NAME_B:
return dma_buf_set_name(dmabuf, (const char __user *)arg);
return dma_buf_set_name_user(dmabuf, (const char __user *)arg);
#if IS_ENABLED(CONFIG_SYNC_FILE)
case DMA_BUF_IOCTL_EXPORT_SYNC_FILE:

View File

@ -43,6 +43,8 @@ static asmlinkage void (*sdei_firmware_call)(unsigned long function_id,
/* entry point from firmware to arch asm code */
static unsigned long sdei_entry_point;
static int sdei_hp_state;
struct sdei_event {
/* These three are protected by the sdei_list_lock */
struct list_head list;
@ -301,8 +303,6 @@ int sdei_mask_local_cpu(void)
{
int err;
WARN_ON_ONCE(preemptible());
err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_MASK, 0, 0, 0, 0, 0, NULL);
if (err && err != -EIO) {
pr_warn_once("failed to mask CPU[%u]: %d\n",
@ -315,6 +315,7 @@ int sdei_mask_local_cpu(void)
static void _ipi_mask_cpu(void *ignored)
{
WARN_ON_ONCE(preemptible());
sdei_mask_local_cpu();
}
@ -322,8 +323,6 @@ int sdei_unmask_local_cpu(void)
{
int err;
WARN_ON_ONCE(preemptible());
err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_UNMASK, 0, 0, 0, 0, 0, NULL);
if (err && err != -EIO) {
pr_warn_once("failed to unmask CPU[%u]: %d\n",
@ -336,6 +335,7 @@ int sdei_unmask_local_cpu(void)
static void _ipi_unmask_cpu(void *ignored)
{
WARN_ON_ONCE(preemptible());
sdei_unmask_local_cpu();
}
@ -343,6 +343,8 @@ static void _ipi_private_reset(void *ignored)
{
int err;
WARN_ON_ONCE(preemptible());
err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PRIVATE_RESET, 0, 0, 0, 0, 0,
NULL);
if (err && err != -EIO)
@ -389,8 +391,6 @@ static void _local_event_enable(void *data)
int err;
struct sdei_crosscall_args *arg = data;
WARN_ON_ONCE(preemptible());
err = sdei_api_event_enable(arg->event->event_num);
sdei_cross_call_return(arg, err);
@ -479,8 +479,6 @@ static void _local_event_unregister(void *data)
int err;
struct sdei_crosscall_args *arg = data;
WARN_ON_ONCE(preemptible());
err = sdei_api_event_unregister(arg->event->event_num);
sdei_cross_call_return(arg, err);
@ -561,8 +559,6 @@ static void _local_event_register(void *data)
struct sdei_registered_event *reg;
struct sdei_crosscall_args *arg = data;
WARN_ON(preemptible());
reg = per_cpu_ptr(arg->event->private_registered, smp_processor_id());
err = sdei_api_event_register(arg->event->event_num, sdei_entry_point,
reg, 0, 0);
@ -717,6 +713,8 @@ static int sdei_pm_notifier(struct notifier_block *nb, unsigned long action,
{
int rv;
WARN_ON_ONCE(preemptible());
switch (action) {
case CPU_PM_ENTER:
rv = sdei_mask_local_cpu();
@ -765,7 +763,7 @@ static int sdei_device_freeze(struct device *dev)
int err;
/* unregister private events */
cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING);
cpuhp_remove_state(sdei_entry_point);
err = sdei_unregister_shared();
if (err)
@ -786,12 +784,15 @@ static int sdei_device_thaw(struct device *dev)
return err;
}
err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI",
err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SDEI",
&sdei_cpuhp_up, &sdei_cpuhp_down);
if (err)
if (err < 0) {
pr_warn("Failed to re-register CPU hotplug notifier...\n");
return err;
}
return err;
sdei_hp_state = err;
return 0;
}
static int sdei_device_restore(struct device *dev)
@ -823,7 +824,7 @@ static int sdei_reboot_notifier(struct notifier_block *nb, unsigned long action,
* We are going to reset the interface, after this there is no point
* doing work when we take CPUs offline.
*/
cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING);
cpuhp_remove_state(sdei_hp_state);
sdei_platform_reset();
@ -1003,13 +1004,15 @@ static int sdei_probe(struct platform_device *pdev)
goto remove_cpupm;
}
err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI",
err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SDEI",
&sdei_cpuhp_up, &sdei_cpuhp_down);
if (err) {
if (err < 0) {
pr_warn("Failed to register CPU hotplug notifier...\n");
goto remove_reboot;
}
sdei_hp_state = err;
return 0;
remove_reboot:

View File

@ -32,6 +32,7 @@
#include <linux/hiddev.h>
#include <linux/hid-debug.h>
#include <linux/hidraw.h>
#include <linux/uhid.h>
#include "hid-ids.h"
@ -261,6 +262,7 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
{
struct hid_report *report;
struct hid_field *field;
unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
unsigned int usages;
unsigned int offset;
unsigned int i;
@ -291,8 +293,11 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
offset = report->size;
report->size += parser->global.report_size * parser->global.report_count;
if (parser->device->ll_driver == &uhid_hid_driver)
max_buffer_size = UHID_DATA_MAX;
/* Total size check: Allow for possible report index byte */
if (report->size > (HID_MAX_BUFFER_SIZE - 1) << 3) {
if (report->size > (max_buffer_size - 1) << 3) {
hid_err(parser->device, "report is too long\n");
return -1;
}
@ -1966,6 +1971,7 @@ int hid_report_raw_event(struct hid_device *hid, enum hid_report_type type, u8 *
struct hid_report_enum *report_enum = hid->report_enum + type;
struct hid_report *report;
struct hid_driver *hdrv;
int max_buffer_size = HID_MAX_BUFFER_SIZE;
u32 rsize, csize = size;
u8 *cdata = data;
int ret = 0;
@ -1981,10 +1987,13 @@ int hid_report_raw_event(struct hid_device *hid, enum hid_report_type type, u8 *
rsize = hid_compute_report_size(report);
if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE)
rsize = HID_MAX_BUFFER_SIZE - 1;
else if (rsize > HID_MAX_BUFFER_SIZE)
rsize = HID_MAX_BUFFER_SIZE;
if (hid->ll_driver == &uhid_hid_driver)
max_buffer_size = UHID_DATA_MAX;
if (report_enum->numbered && rsize >= max_buffer_size)
rsize = max_buffer_size - 1;
else if (rsize > max_buffer_size)
rsize = max_buffer_size;
if (csize < rsize) {
dbg_hid("report %d is too short, (%d < %d)\n", report->id,
@ -2387,7 +2396,12 @@ int hid_hw_raw_request(struct hid_device *hdev,
unsigned char reportnum, __u8 *buf,
size_t len, enum hid_report_type rtype, enum hid_class_request reqtype)
{
if (len < 1 || len > HID_MAX_BUFFER_SIZE || !buf)
unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
if (hdev->ll_driver == &uhid_hid_driver)
max_buffer_size = UHID_DATA_MAX;
if (len < 1 || len > max_buffer_size || !buf)
return -EINVAL;
return hdev->ll_driver->raw_request(hdev, reportnum, buf, len,
@ -2406,7 +2420,12 @@ EXPORT_SYMBOL_GPL(hid_hw_raw_request);
*/
int hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len)
{
if (len < 1 || len > HID_MAX_BUFFER_SIZE || !buf)
unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
if (hdev->ll_driver == &uhid_hid_driver)
max_buffer_size = UHID_DATA_MAX;
if (len < 1 || len > max_buffer_size || !buf)
return -EINVAL;
if (hdev->ll_driver->output_report)

View File

@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/poll.h>
@ -38,6 +39,7 @@ struct mbox_test_device {
char *signal;
char *message;
spinlock_t lock;
struct mutex mutex;
wait_queue_head_t waitq;
struct fasync_struct *async_queue;
struct dentry *root_debugfs_dir;
@ -95,6 +97,7 @@ static ssize_t mbox_test_message_write(struct file *filp,
size_t count, loff_t *ppos)
{
struct mbox_test_device *tdev = filp->private_data;
char *message;
void *data;
int ret;
@ -110,10 +113,13 @@ static ssize_t mbox_test_message_write(struct file *filp,
return -EINVAL;
}
tdev->message = kzalloc(MBOX_MAX_MSG_LEN, GFP_KERNEL);
if (!tdev->message)
message = kzalloc(MBOX_MAX_MSG_LEN, GFP_KERNEL);
if (!message)
return -ENOMEM;
mutex_lock(&tdev->mutex);
tdev->message = message;
ret = copy_from_user(tdev->message, userbuf, count);
if (ret) {
ret = -EFAULT;
@ -144,6 +150,8 @@ static ssize_t mbox_test_message_write(struct file *filp,
kfree(tdev->message);
tdev->signal = NULL;
mutex_unlock(&tdev->mutex);
return ret < 0 ? ret : count;
}
@ -392,6 +400,7 @@ static int mbox_test_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, tdev);
spin_lock_init(&tdev->lock);
mutex_init(&tdev->mutex);
if (tdev->rx_channel) {
tdev->rx_buffer = devm_kzalloc(&pdev->dev,

View File

@ -468,6 +468,9 @@ static int ufshcd_mcq_sq_stop(struct ufs_hba *hba, struct ufs_hw_queue *hwq)
u32 id = hwq->id, val;
int err;
if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC)
return -ETIMEDOUT;
writel(SQ_STOP, mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTC);
reg = mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTS;
err = read_poll_timeout(readl, val, val & SQ_STS, 20,
@ -484,6 +487,9 @@ static int ufshcd_mcq_sq_start(struct ufs_hba *hba, struct ufs_hw_queue *hwq)
u32 id = hwq->id, val;
int err;
if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC)
return -ETIMEDOUT;
writel(SQ_START, mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTC);
reg = mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTS;
err = read_poll_timeout(readl, val, !(val & SQ_STS), 20,
@ -511,6 +517,9 @@ int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag)
u32 nexus, id, val;
int err;
if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC)
return -ETIMEDOUT;
if (task_tag != hba->nutrs - UFSHCD_NUM_RESERVED) {
if (!cmd)
return -EINVAL;
@ -593,6 +602,9 @@ static bool ufshcd_mcq_sqe_search(struct ufs_hba *hba,
u64 addr, match;
u32 sq_head_slot;
if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC)
return true;
mutex_lock(&hwq->sq_mutex);
ufshcd_mcq_sq_stop(hba, hwq);

View File

@ -8712,11 +8712,15 @@ static int ufshcd_alloc_mcq(struct ufs_hba *hba)
static void ufshcd_config_mcq(struct ufs_hba *hba)
{
int ret;
u32 intrs;
ret = ufshcd_mcq_vops_config_esi(hba);
dev_info(hba->dev, "ESI %sconfigured\n", ret ? "is not " : "");
ufshcd_enable_intr(hba, UFSHCD_ENABLE_MCQ_INTRS);
intrs = UFSHCD_ENABLE_MCQ_INTRS;
if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_INTR)
intrs &= ~MCQ_CQ_EVENT_STATUS;
ufshcd_enable_intr(hba, intrs);
ufshcd_mcq_make_queues_operational(hba);
ufshcd_mcq_config_mac(hba, hba->nutrs);

View File

@ -901,6 +901,8 @@ static int ufs_mtk_init(struct ufs_hba *hba)
hba->caps |= UFSHCD_CAP_CLK_SCALING;
hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL;
hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_INTR;
hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_RTC;
hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
if (host->caps & UFS_MTK_CAP_DISABLE_AH8)

View File

@ -2018,6 +2018,19 @@ bool usb_device_is_owned(struct usb_device *udev)
return !!hub->ports[udev->portnum - 1]->port_owner;
}
static void update_port_device_state(struct usb_device *udev)
{
struct usb_hub *hub;
struct usb_port *port_dev;
if (udev->parent) {
hub = usb_hub_to_struct_hub(udev->parent);
port_dev = hub->ports[udev->portnum - 1];
WRITE_ONCE(port_dev->state, udev->state);
sysfs_notify_dirent(port_dev->state_kn);
}
}
static void recursively_mark_NOTATTACHED(struct usb_device *udev)
{
struct usb_hub *hub = usb_hub_to_struct_hub(udev);
@ -2030,6 +2043,7 @@ static void recursively_mark_NOTATTACHED(struct usb_device *udev)
if (udev->state == USB_STATE_SUSPENDED)
udev->active_duration -= jiffies;
udev->state = USB_STATE_NOTATTACHED;
update_port_device_state(udev);
}
/**
@ -2086,6 +2100,7 @@ void usb_set_device_state(struct usb_device *udev,
udev->state != USB_STATE_SUSPENDED)
udev->active_duration += jiffies;
udev->state = new_state;
update_port_device_state(udev);
} else
recursively_mark_NOTATTACHED(udev);
spin_unlock_irqrestore(&device_state_lock, flags);

View File

@ -84,6 +84,8 @@ struct usb_hub {
* @peer: related usb2 and usb3 ports (share the same connector)
* @req: default pm qos request for hubs without port power control
* @connect_type: port's connect type
* @state: device state of the usb device attached to the port
* @state_kn: kernfs_node of the sysfs attribute that accesses @state
* @location: opaque representation of platform connector location
* @status_lock: synchronize port_event() vs usb_port_{suspend|resume}
* @portnum: port index num based one
@ -98,6 +100,8 @@ struct usb_port {
struct usb_port *peer;
struct dev_pm_qos_request *req;
enum usb_port_connect_type connect_type;
enum usb_device_state state;
struct kernfs_node *state_kn;
usb_port_location_t location;
struct mutex status_lock;
u32 over_current_count;

View File

@ -133,6 +133,16 @@ static ssize_t connect_type_show(struct device *dev,
}
static DEVICE_ATTR_RO(connect_type);
static ssize_t state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_port *port_dev = to_usb_port(dev);
enum usb_device_state state = READ_ONCE(port_dev->state);
return sysfs_emit(buf, "%s\n", usb_state_string(state));
}
static DEVICE_ATTR_RO(state);
static ssize_t over_current_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@ -232,6 +242,7 @@ static DEVICE_ATTR_RW(usb3_lpm_permit);
static struct attribute *port_dev_attrs[] = {
&dev_attr_connect_type.attr,
&dev_attr_state.attr,
&dev_attr_location.attr,
&dev_attr_quirks.attr,
&dev_attr_over_current_count.attr,
@ -677,19 +688,24 @@ int usb_hub_create_port_device(struct usb_hub *hub, int port1)
return retval;
}
port_dev->state_kn = sysfs_get_dirent(port_dev->dev.kobj.sd, "state");
if (!port_dev->state_kn) {
dev_err(&port_dev->dev, "failed to sysfs_get_dirent 'state'\n");
retval = -ENODEV;
goto err_unregister;
}
/* Set default policy of port-poweroff disabled. */
retval = dev_pm_qos_add_request(&port_dev->dev, port_dev->req,
DEV_PM_QOS_FLAGS, PM_QOS_FLAG_NO_POWER_OFF);
if (retval < 0) {
device_unregister(&port_dev->dev);
return retval;
goto err_put_kn;
}
retval = component_add(&port_dev->dev, &connector_ops);
if (retval) {
dev_warn(&port_dev->dev, "failed to add component\n");
device_unregister(&port_dev->dev);
return retval;
goto err_put_kn;
}
find_and_link_peer(hub, port1);
@ -726,6 +742,13 @@ int usb_hub_create_port_device(struct usb_hub *hub, int port1)
port_dev->req = NULL;
}
return 0;
err_put_kn:
sysfs_put(port_dev->state_kn);
err_unregister:
device_unregister(&port_dev->dev);
return retval;
}
void usb_hub_remove_port_device(struct usb_hub *hub, int port1)
@ -737,5 +760,6 @@ void usb_hub_remove_port_device(struct usb_hub *hub, int port1)
if (peer)
unlink_peers(port_dev, peer);
component_del(&port_dev->dev, &connector_ops);
sysfs_put(port_dev->state_kn);
device_unregister(&port_dev->dev);
}

View File

@ -386,6 +386,9 @@ static void uvcg_video_pump(struct work_struct *work)
struct uvc_buffer *buf;
unsigned long flags;
int ret;
bool buf_int;
/* video->max_payload_size is only set when using bulk transfer */
bool is_bulk = video->max_payload_size;
while (video->ep->enabled) {
/*
@ -408,20 +411,35 @@ static void uvcg_video_pump(struct work_struct *work)
*/
spin_lock_irqsave(&queue->irqlock, flags);
buf = uvcg_queue_head(queue);
if (buf == NULL) {
if (buf != NULL) {
video->encode(req, video, buf);
/* Always interrupt for the last request of a video buffer */
buf_int = buf->state == UVC_BUF_STATE_DONE;
} else if (!(queue->flags & UVC_QUEUE_DISCONNECTED) && !is_bulk) {
/*
* No video buffer available; the queue is still connected and
* we're traferring over ISOC. Queue a 0 length request to
* prevent missed ISOC transfers.
*/
req->length = 0;
buf_int = false;
} else {
/*
* Either queue has been disconnected or no video buffer
* available to bulk transfer. Either way, stop processing
* further.
*/
spin_unlock_irqrestore(&queue->irqlock, flags);
break;
}
video->encode(req, video, buf);
/*
* With usb3 we have more requests. This will decrease the
* interrupt load to a quarter but also catches the corner
* cases, which needs to be handled.
*/
if (list_empty(&video->req_free) ||
buf->state == UVC_BUF_STATE_DONE ||
if (list_empty(&video->req_free) || buf_int ||
!(video->req_int_count %
DIV_ROUND_UP(video->uvc_num_requests, 4))) {
video->req_int_count = 0;
@ -441,8 +459,7 @@ static void uvcg_video_pump(struct work_struct *work)
/* Endpoint now owns the request */
req = NULL;
if (buf->state != UVC_BUF_STATE_DONE)
video->req_int_count++;
video->req_int_count++;
}
if (!req)
@ -527,4 +544,3 @@ int uvcg_video_init(struct uvc_video *video, struct uvc_device *uvc)
V4L2_BUF_TYPE_VIDEO_OUTPUT, &video->mutex);
return 0;
}

View File

@ -37,10 +37,6 @@ static struct bus_type gadget_bus_type;
* @vbus: for udcs who care about vbus status, this value is real vbus status;
* for udcs who do not care about vbus status, this value is always true
* @started: the UDC's started state. True if the UDC had started.
* @connect_lock: protects udc->vbus, udc->started, gadget->connect, gadget->deactivate related
* functions. usb_gadget_connect_locked, usb_gadget_disconnect_locked,
* usb_udc_connect_control_locked, usb_gadget_udc_start_locked, usb_gadget_udc_stop_locked are
* called with this lock held.
*
* This represents the internal data structure which is used by the UDC-class
* to hold information about udc driver and gadget together.
@ -52,7 +48,6 @@ struct usb_udc {
struct list_head list;
bool vbus;
bool started;
struct mutex connect_lock;
};
static struct class *udc_class;
@ -665,9 +660,17 @@ int usb_gadget_vbus_disconnect(struct usb_gadget *gadget)
}
EXPORT_SYMBOL_GPL(usb_gadget_vbus_disconnect);
/* Internal version of usb_gadget_connect needs to be called with connect_lock held. */
static int usb_gadget_connect_locked(struct usb_gadget *gadget)
__must_hold(&gadget->udc->connect_lock)
/**
* usb_gadget_connect - software-controlled connect to USB host
* @gadget:the peripheral being connected
*
* Enables the D+ (or potentially D-) pullup. The host will start
* enumerating this gadget when the pullup is active and a VBUS session
* is active (the link is powered).
*
* Returns zero on success, else negative errno.
*/
int usb_gadget_connect(struct usb_gadget *gadget)
{
int ret = 0;
@ -676,12 +679,10 @@ static int usb_gadget_connect_locked(struct usb_gadget *gadget)
goto out;
}
if (gadget->deactivated || !gadget->udc->started) {
if (gadget->deactivated) {
/*
* If gadget is deactivated we only save new state.
* Gadget will be connected automatically after activation.
*
* udc first needs to be started before gadget can be pulled up.
*/
gadget->connected = true;
goto out;
@ -696,69 +697,8 @@ static int usb_gadget_connect_locked(struct usb_gadget *gadget)
return ret;
}
/**
* usb_gadget_connect - software-controlled connect to USB host
* @gadget:the peripheral being connected
*
* Enables the D+ (or potentially D-) pullup. The host will start
* enumerating this gadget when the pullup is active and a VBUS session
* is active (the link is powered).
*
* Returns zero on success, else negative errno.
*/
int usb_gadget_connect(struct usb_gadget *gadget)
{
int ret;
mutex_lock(&gadget->udc->connect_lock);
ret = usb_gadget_connect_locked(gadget);
mutex_unlock(&gadget->udc->connect_lock);
return ret;
}
EXPORT_SYMBOL_GPL(usb_gadget_connect);
/* Internal version of usb_gadget_disconnect needs to be called with connect_lock held. */
static int usb_gadget_disconnect_locked(struct usb_gadget *gadget)
__must_hold(&gadget->udc->connect_lock)
{
int ret = 0;
if (!gadget->ops->pullup) {
ret = -EOPNOTSUPP;
goto out;
}
if (!gadget->connected)
goto out;
if (gadget->deactivated || !gadget->udc->started) {
/*
* If gadget is deactivated we only save new state.
* Gadget will stay disconnected after activation.
*
* udc should have been started before gadget being pulled down.
*/
gadget->connected = false;
goto out;
}
ret = gadget->ops->pullup(gadget, 0);
if (!ret)
gadget->connected = 0;
mutex_lock(&udc_lock);
if (gadget->udc->driver)
gadget->udc->driver->disconnect(gadget);
mutex_unlock(&udc_lock);
out:
trace_usb_gadget_disconnect(gadget, ret);
return ret;
}
/**
* usb_gadget_disconnect - software-controlled disconnect from USB host
* @gadget:the peripheral being disconnected
@ -774,11 +714,36 @@ static int usb_gadget_disconnect_locked(struct usb_gadget *gadget)
*/
int usb_gadget_disconnect(struct usb_gadget *gadget)
{
int ret;
int ret = 0;
mutex_lock(&gadget->udc->connect_lock);
ret = usb_gadget_disconnect_locked(gadget);
mutex_unlock(&gadget->udc->connect_lock);
if (!gadget->ops->pullup) {
ret = -EOPNOTSUPP;
goto out;
}
if (!gadget->connected)
goto out;
if (gadget->deactivated) {
/*
* If gadget is deactivated we only save new state.
* Gadget will stay disconnected after activation.
*/
gadget->connected = false;
goto out;
}
ret = gadget->ops->pullup(gadget, 0);
if (!ret)
gadget->connected = 0;
mutex_lock(&udc_lock);
if (gadget->udc->driver)
gadget->udc->driver->disconnect(gadget);
mutex_unlock(&udc_lock);
out:
trace_usb_gadget_disconnect(gadget, ret);
return ret;
}
@ -802,11 +767,10 @@ int usb_gadget_deactivate(struct usb_gadget *gadget)
if (gadget->deactivated)
goto out;
mutex_lock(&gadget->udc->connect_lock);
if (gadget->connected) {
ret = usb_gadget_disconnect_locked(gadget);
ret = usb_gadget_disconnect(gadget);
if (ret)
goto unlock;
goto out;
/*
* If gadget was being connected before deactivation, we want
@ -816,8 +780,6 @@ int usb_gadget_deactivate(struct usb_gadget *gadget)
}
gadget->deactivated = true;
unlock:
mutex_unlock(&gadget->udc->connect_lock);
out:
trace_usb_gadget_deactivate(gadget, ret);
@ -841,7 +803,6 @@ int usb_gadget_activate(struct usb_gadget *gadget)
if (!gadget->deactivated)
goto out;
mutex_lock(&gadget->udc->connect_lock);
gadget->deactivated = false;
/*
@ -849,8 +810,7 @@ int usb_gadget_activate(struct usb_gadget *gadget)
* while it was being deactivated, we call usb_gadget_connect().
*/
if (gadget->connected)
ret = usb_gadget_connect_locked(gadget);
mutex_unlock(&gadget->udc->connect_lock);
ret = usb_gadget_connect(gadget);
out:
trace_usb_gadget_activate(gadget, ret);
@ -1091,13 +1051,12 @@ EXPORT_SYMBOL_GPL(usb_gadget_set_state);
/* ------------------------------------------------------------------------- */
/* Acquire connect_lock before calling this function. */
static void usb_udc_connect_control_locked(struct usb_udc *udc) __must_hold(&udc->connect_lock)
static void usb_udc_connect_control(struct usb_udc *udc)
{
if (udc->vbus && udc->started)
usb_gadget_connect_locked(udc->gadget);
if (udc->vbus)
usb_gadget_connect(udc->gadget);
else
usb_gadget_disconnect_locked(udc->gadget);
usb_gadget_disconnect(udc->gadget);
}
/**
@ -1113,12 +1072,10 @@ void usb_udc_vbus_handler(struct usb_gadget *gadget, bool status)
{
struct usb_udc *udc = gadget->udc;
mutex_lock(&udc->connect_lock);
if (udc) {
udc->vbus = status;
usb_udc_connect_control_locked(udc);
usb_udc_connect_control(udc);
}
mutex_unlock(&udc->connect_lock);
}
EXPORT_SYMBOL_GPL(usb_udc_vbus_handler);
@ -1140,7 +1097,7 @@ void usb_gadget_udc_reset(struct usb_gadget *gadget,
EXPORT_SYMBOL_GPL(usb_gadget_udc_reset);
/**
* usb_gadget_udc_start_locked - tells usb device controller to start up
* usb_gadget_udc_start - tells usb device controller to start up
* @udc: The UDC to be started
*
* This call is issued by the UDC Class driver when it's about
@ -1151,11 +1108,8 @@ EXPORT_SYMBOL_GPL(usb_gadget_udc_reset);
* necessary to have it powered on.
*
* Returns zero on success, else negative errno.
*
* Caller should acquire connect_lock before invoking this function.
*/
static inline int usb_gadget_udc_start_locked(struct usb_udc *udc)
__must_hold(&udc->connect_lock)
static inline int usb_gadget_udc_start(struct usb_udc *udc)
{
int ret;
@ -1172,7 +1126,7 @@ static inline int usb_gadget_udc_start_locked(struct usb_udc *udc)
}
/**
* usb_gadget_udc_stop_locked - tells usb device controller we don't need it anymore
* usb_gadget_udc_stop - tells usb device controller we don't need it anymore
* @udc: The UDC to be stopped
*
* This call is issued by the UDC Class driver after calling
@ -1181,11 +1135,8 @@ static inline int usb_gadget_udc_start_locked(struct usb_udc *udc)
* The details are implementation specific, but it can go as
* far as powering off UDC completely and disable its data
* line pullups.
*
* Caller should acquire connect lock before invoking this function.
*/
static inline void usb_gadget_udc_stop_locked(struct usb_udc *udc)
__must_hold(&udc->connect_lock)
static inline void usb_gadget_udc_stop(struct usb_udc *udc)
{
if (!udc->started) {
dev_err(&udc->dev, "UDC had already stopped\n");
@ -1344,7 +1295,6 @@ int usb_add_gadget(struct usb_gadget *gadget)
udc->gadget = gadget;
gadget->udc = udc;
mutex_init(&udc->connect_lock);
udc->started = false;
@ -1546,15 +1496,11 @@ static int gadget_bind_driver(struct device *dev)
if (ret)
goto err_bind;
mutex_lock(&udc->connect_lock);
ret = usb_gadget_udc_start_locked(udc);
if (ret) {
mutex_unlock(&udc->connect_lock);
ret = usb_gadget_udc_start(udc);
if (ret)
goto err_start;
}
usb_gadget_enable_async_callbacks(udc);
usb_udc_connect_control_locked(udc);
mutex_unlock(&udc->connect_lock);
usb_udc_connect_control(udc);
kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
return 0;
@ -1585,14 +1531,12 @@ static void gadget_unbind_driver(struct device *dev)
kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
mutex_lock(&udc->connect_lock);
usb_gadget_disconnect_locked(gadget);
usb_gadget_disconnect(gadget);
usb_gadget_disable_async_callbacks(udc);
if (gadget->irq)
synchronize_irq(gadget->irq);
udc->driver->unbind(gadget);
usb_gadget_udc_stop_locked(udc);
mutex_unlock(&udc->connect_lock);
usb_gadget_udc_stop(udc);
mutex_lock(&udc_lock);
driver->is_bound = false;
@ -1678,15 +1622,11 @@ static ssize_t soft_connect_store(struct device *dev,
}
if (sysfs_streq(buf, "connect")) {
mutex_lock(&udc->connect_lock);
usb_gadget_udc_start_locked(udc);
usb_gadget_connect_locked(udc->gadget);
mutex_unlock(&udc->connect_lock);
usb_gadget_udc_start(udc);
usb_gadget_connect(udc->gadget);
} else if (sysfs_streq(buf, "disconnect")) {
mutex_lock(&udc->connect_lock);
usb_gadget_disconnect_locked(udc->gadget);
usb_gadget_udc_stop_locked(udc);
mutex_unlock(&udc->connect_lock);
usb_gadget_disconnect(udc->gadget);
usb_gadget_udc_stop(udc);
} else {
dev_err(dev, "unsupported command '%s'\n", buf);
ret = -EINVAL;

View File

@ -161,7 +161,6 @@ enum cpuhp_state {
CPUHP_AP_PERF_X86_CSTATE_STARTING,
CPUHP_AP_PERF_XTENSA_STARTING,
CPUHP_AP_MIPS_OP_LOONGSON3_STARTING,
CPUHP_AP_ARM_SDEI_STARTING,
CPUHP_AP_ARM_VFP_STARTING,
CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING,
CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING,

View File

@ -729,5 +729,6 @@ int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
unsigned long);
int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map);
void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map);
long dma_buf_set_name(struct dma_buf *dmabuf, const char *name);
int dma_buf_get_flags(struct dma_buf *dmabuf, unsigned long *flags);
#endif /* __DMA_BUF_H__ */

View File

@ -94,7 +94,7 @@ static inline bool is_migrate_movable(int mt)
*/
static inline bool migratetype_is_mergeable(int mt)
{
return mt < MIGRATE_PCPTYPES;
return mt <= MIGRATE_RECLAIMABLE;
}
#define for_each_migratetype_order(order, type) \

View File

@ -213,7 +213,18 @@ extern void platform_device_put(struct platform_device *pdev);
struct platform_driver {
int (*probe)(struct platform_device *);
/*
* Traditionally the remove callback returned an int which however is
* ignored by the driver core. This led to wrong expectations by driver
* authors who thought returning an error code was a valid error
* handling strategy. To convert to a callback returning void, new
* drivers should implement .remove_new() until the conversion it done
* that eventually makes .remove() return void.
*/
int (*remove)(struct platform_device *);
void (*remove_new)(struct platform_device *);
void (*shutdown)(struct platform_device *);
int (*suspend)(struct platform_device *, pm_message_t state);
int (*resume)(struct platform_device *);

View File

@ -4,6 +4,7 @@
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/alarmtimer.h>
#include <linux/timerqueue.h>
@ -62,16 +63,18 @@ static inline int clockid_to_fd(const clockid_t clk)
* cpu_timer - Posix CPU timer representation for k_itimer
* @node: timerqueue node to queue in the task/sig
* @head: timerqueue head on which this timer is queued
* @task: Pointer to target task
* @pid: Pointer to target task PID
* @elist: List head for the expiry list
* @firing: Timer is currently firing
* @handling: Pointer to the task which handles expiry
*/
struct cpu_timer {
struct timerqueue_node node;
struct timerqueue_head *head;
struct pid *pid;
struct list_head elist;
int firing;
struct timerqueue_node node;
struct timerqueue_head *head;
struct pid *pid;
struct list_head elist;
int firing;
struct task_struct __rcu *handling;
};
static inline bool cpu_timer_enqueue(struct timerqueue_head *head,
@ -135,10 +138,12 @@ struct posix_cputimers {
/**
* posix_cputimers_work - Container for task work based posix CPU timer expiry
* @work: The task work to be scheduled
* @mutex: Mutex held around expiry in context of this task work
* @scheduled: @work has been scheduled already, no further processing
*/
struct posix_cputimers_work {
struct callback_head work;
struct mutex mutex;
unsigned int scheduled;
};

View File

@ -98,10 +98,12 @@ DECLARE_HOOK(android_vh_si_mem_available_adjust,
DECLARE_HOOK(android_vh_si_meminfo_adjust,
TP_PROTO(unsigned long *totalram, unsigned long *freeram),
TP_ARGS(totalram, freeram));
DECLARE_RESTRICTED_HOOK(android_rvh_ctl_dirty_rate,
TP_PROTO(void *unused),
TP_ARGS(unused), 1);
DECLARE_HOOK(android_vh_madvise_cold_pageout_skip,
TP_PROTO(struct vm_area_struct *vma, struct page *page, bool pageout, bool *need_skip),
TP_ARGS(vma, page, pageout, need_skip));
struct mem_cgroup;
DECLARE_HOOK(android_vh_mem_cgroup_alloc,

View File

@ -28,7 +28,7 @@
#define _BITUL(x) (_UL(1) << (x))
#define _BITULL(x) (_ULL(1) << (x))
#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (__typeof__(x))(a) - 1)
#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))

View File

@ -616,6 +616,19 @@ enum ufshcd_quirks {
* auto-hibernate capability but it's FASTAUTO only.
*/
UFSHCD_QUIRK_HIBERN_FASTAUTO = 1 << 18,
/*
* Some host raises interrupt (per queue) in addition to
* CQES (traditional) when ESI is disabled.
* Enable this quirk will disable CQES and use per queue interrupt.
*/
UFSHCD_QUIRK_MCQ_BROKEN_INTR = 1 << 20,
/*
* Some host does not implement SQ Run Time Command (SQRTC) register
* thus need this quirk to skip related flow.
*/
UFSHCD_QUIRK_MCQ_BROKEN_RTC = 1 << 21,
};
enum ufshcd_android_quirks {

View File

@ -847,6 +847,8 @@ static u64 collect_timerqueue(struct timerqueue_head *head,
return expires;
ctmr->firing = 1;
/* See posix_cpu_timer_wait_running() */
rcu_assign_pointer(ctmr->handling, current);
cpu_timer_dequeue(ctmr);
list_add_tail(&ctmr->elist, firing);
}
@ -1162,7 +1164,49 @@ static void handle_posix_cpu_timers(struct task_struct *tsk);
#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
static void posix_cpu_timers_work(struct callback_head *work)
{
struct posix_cputimers_work *cw = container_of(work, typeof(*cw), work);
mutex_lock(&cw->mutex);
handle_posix_cpu_timers(current);
mutex_unlock(&cw->mutex);
}
/*
* Invoked from the posix-timer core when a cancel operation failed because
* the timer is marked firing. The caller holds rcu_read_lock(), which
* protects the timer and the task which is expiring it from being freed.
*/
static void posix_cpu_timer_wait_running(struct k_itimer *timr)
{
struct task_struct *tsk = rcu_dereference(timr->it.cpu.handling);
/* Has the handling task completed expiry already? */
if (!tsk)
return;
/* Ensure that the task cannot go away */
get_task_struct(tsk);
/* Now drop the RCU protection so the mutex can be locked */
rcu_read_unlock();
/* Wait on the expiry mutex */
mutex_lock(&tsk->posix_cputimers_work.mutex);
/* Release it immediately again. */
mutex_unlock(&tsk->posix_cputimers_work.mutex);
/* Drop the task reference. */
put_task_struct(tsk);
/* Relock RCU so the callsite is balanced */
rcu_read_lock();
}
static void posix_cpu_timer_wait_running_nsleep(struct k_itimer *timr)
{
/* Ensure that timr->it.cpu.handling task cannot go away */
rcu_read_lock();
spin_unlock_irq(&timr->it_lock);
posix_cpu_timer_wait_running(timr);
rcu_read_unlock();
/* @timr is on stack and is valid */
spin_lock_irq(&timr->it_lock);
}
/*
@ -1178,6 +1222,7 @@ void clear_posix_cputimers_work(struct task_struct *p)
sizeof(p->posix_cputimers_work.work));
init_task_work(&p->posix_cputimers_work.work,
posix_cpu_timers_work);
mutex_init(&p->posix_cputimers_work.mutex);
p->posix_cputimers_work.scheduled = false;
}
@ -1256,6 +1301,18 @@ static inline void __run_posix_cpu_timers(struct task_struct *tsk)
lockdep_posixtimer_exit();
}
static void posix_cpu_timer_wait_running(struct k_itimer *timr)
{
cpu_relax();
}
static void posix_cpu_timer_wait_running_nsleep(struct k_itimer *timr)
{
spin_unlock_irq(&timr->it_lock);
cpu_relax();
spin_lock_irq(&timr->it_lock);
}
static inline bool posix_cpu_timers_work_scheduled(struct task_struct *tsk)
{
return false;
@ -1364,6 +1421,8 @@ static void handle_posix_cpu_timers(struct task_struct *tsk)
*/
if (likely(cpu_firing >= 0))
cpu_timer_fire(timer);
/* See posix_cpu_timer_wait_running() */
rcu_assign_pointer(timer->it.cpu.handling, NULL);
spin_unlock(&timer->it_lock);
}
}
@ -1498,23 +1557,16 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
expires = cpu_timer_getexpires(&timer.it.cpu);
error = posix_cpu_timer_set(&timer, 0, &zero_it, &it);
if (!error) {
/*
* Timer is now unarmed, deletion can not fail.
*/
/* Timer is now unarmed, deletion can not fail. */
posix_cpu_timer_del(&timer);
} else {
while (error == TIMER_RETRY) {
posix_cpu_timer_wait_running_nsleep(&timer);
error = posix_cpu_timer_del(&timer);
}
}
spin_unlock_irq(&timer.it_lock);
while (error == TIMER_RETRY) {
/*
* We need to handle case when timer was or is in the
* middle of firing. In other cases we already freed
* resources.
*/
spin_lock_irq(&timer.it_lock);
error = posix_cpu_timer_del(&timer);
spin_unlock_irq(&timer.it_lock);
}
spin_unlock_irq(&timer.it_lock);
if ((it.it_value.tv_sec | it.it_value.tv_nsec) == 0) {
/*
@ -1624,6 +1676,7 @@ const struct k_clock clock_posix_cpu = {
.timer_del = posix_cpu_timer_del,
.timer_get = posix_cpu_timer_get,
.timer_rearm = posix_cpu_timer_rearm,
.timer_wait_running = posix_cpu_timer_wait_running,
};
const struct k_clock clock_process = {

View File

@ -846,6 +846,10 @@ static struct k_itimer *timer_wait_running(struct k_itimer *timer,
rcu_read_lock();
unlock_timer(timer, *flags);
/*
* kc->timer_wait_running() might drop RCU lock. So @timer
* cannot be touched anymore after the function returns!
*/
if (!WARN_ON_ONCE(!kc->timer_wait_running))
kc->timer_wait_running(timer);

View File

@ -438,6 +438,8 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
flush_tlb_batched_pending(mm);
arch_enter_lazy_mmu_mode();
for (; addr < end; pte++, addr += PAGE_SIZE) {
bool need_skip = false;
ptent = *pte;
if (pte_none(ptent))
@ -454,6 +456,12 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
if (!page || is_zone_device_page(page))
continue;
trace_android_vh_madvise_cold_pageout_skip(vma, page, pageout,
&need_skip);
if (need_skip)
continue;
/*
* Creating a THP page is expensive so split it only if we
* are sure it's worth. Split it if we are only owner.

View File

@ -1607,7 +1607,8 @@ static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags)
*/
static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
{
unsigned long length, gap;
unsigned long length, gap, low_limit;
struct vm_area_struct *tmp;
MA_STATE(mas, &current->mm->mm_mt, 0, 0);
@ -1616,12 +1617,29 @@ static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
if (length < info->length)
return -ENOMEM;
if (mas_empty_area(&mas, info->low_limit, info->high_limit - 1,
length))
low_limit = info->low_limit;
retry:
if (mas_empty_area(&mas, low_limit, info->high_limit - 1, length))
return -ENOMEM;
gap = mas.index;
gap += (info->align_offset - gap) & info->align_mask;
tmp = mas_next(&mas, ULONG_MAX);
if (tmp && (tmp->vm_flags & VM_GROWSDOWN)) { /* Avoid prev check if possible */
if (vm_start_gap(tmp) < gap + length - 1) {
low_limit = tmp->vm_end;
mas_reset(&mas);
goto retry;
}
} else {
tmp = mas_prev(&mas, 0);
if (tmp && vm_end_gap(tmp) > gap) {
low_limit = vm_end_gap(tmp);
mas_reset(&mas);
goto retry;
}
}
return gap;
}
@ -1637,7 +1655,8 @@ static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
*/
static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
{
unsigned long length, gap;
unsigned long length, gap, high_limit, gap_end;
struct vm_area_struct *tmp;
MA_STATE(mas, &current->mm->mm_mt, 0, 0);
/* Adjust search length to account for worst case alignment overhead */
@ -1645,12 +1664,31 @@ static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
if (length < info->length)
return -ENOMEM;
if (mas_empty_area_rev(&mas, info->low_limit, info->high_limit - 1,
high_limit = info->high_limit;
retry:
if (mas_empty_area_rev(&mas, info->low_limit, high_limit - 1,
length))
return -ENOMEM;
gap = mas.last + 1 - info->length;
gap -= (gap - info->align_offset) & info->align_mask;
gap_end = mas.last;
tmp = mas_next(&mas, ULONG_MAX);
if (tmp && (tmp->vm_flags & VM_GROWSDOWN)) { /* Avoid prev check if possible */
if (vm_start_gap(tmp) <= gap_end) {
high_limit = vm_start_gap(tmp);
mas_reset(&mas);
goto retry;
}
} else {
tmp = mas_prev(&mas, 0);
if (tmp && vm_end_gap(tmp) > gap) {
high_limit = tmp->vm_start;
mas_reset(&mas);
goto retry;
}
}
return gap;
}

View File

@ -6359,8 +6359,10 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat,
if (can_reclaim_anon_pages(NULL, pgdat->node_id, sc))
inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON);
#ifdef CONFIG_ANDROID_VENDOR_OEM_DATA
trace_android_vh_should_continue_reclaim(&sc->android_vendor_data1,
&sc->nr_to_reclaim, &sc->nr_reclaimed, &continue_reclaim);
#endif
if (!continue_reclaim)
return false;
@ -6722,9 +6724,11 @@ static void modify_scan_control(struct scan_control *sc)
{
bool file_is_tiny = false, may_writepage = true;
#ifdef CONFIG_ANDROID_VENDOR_OEM_DATA
trace_android_vh_modify_scan_control(&sc->android_vendor_data1,
&sc->nr_to_reclaim, sc->target_mem_cgroup, &file_is_tiny,
&may_writepage);
#endif
if (file_is_tiny)
sc->file_is_tiny = true;

View File

@ -37,6 +37,7 @@ COMMON_GKI_MODULES_LIST = [
"drivers/usb/class/cdc-acm.ko",
"drivers/usb/serial/ftdi_sio.ko",
"drivers/usb/serial/usbserial.ko",
"kernel/kheaders.ko",
"lib/crypto/libarc4.ko",
"mm/zsmalloc.ko",
"net/6lowpan/6lowpan.ko",

View File

@ -541,6 +541,19 @@ int tipc_bearer_mtu(struct net *net, u32 bearer_id)
return mtu;
}
int tipc_bearer_min_mtu(struct net *net, u32 bearer_id)
{
int mtu = TIPC_MIN_BEARER_MTU;
struct tipc_bearer *b;
rcu_read_lock();
b = bearer_get(net, bearer_id);
if (b)
mtu += b->encap_hlen;
rcu_read_unlock();
return mtu;
}
/* tipc_bearer_xmit_skb - sends buffer to destination over bearer
*/
void tipc_bearer_xmit_skb(struct net *net, u32 bearer_id,
@ -1138,8 +1151,8 @@ int __tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
}
#ifdef CONFIG_TIPC_MEDIA_UDP
if (tipc_udp_mtu_bad(nla_get_u32
(props[TIPC_NLA_PROP_MTU]))) {
if (nla_get_u32(props[TIPC_NLA_PROP_MTU]) <
b->encap_hlen + TIPC_MIN_BEARER_MTU) {
NL_SET_ERR_MSG(info->extack,
"MTU value is out-of-range");
return -EINVAL;

View File

@ -146,6 +146,7 @@ struct tipc_media {
* @identity: array index of this bearer within TIPC bearer array
* @disc: ptr to link setup request
* @net_plane: network plane ('A' through 'H') currently associated with bearer
* @encap_hlen: encap headers length
* @up: bearer up flag (bit 0)
* @refcnt: tipc_bearer reference counter
*
@ -170,6 +171,7 @@ struct tipc_bearer {
u32 identity;
struct tipc_discoverer *disc;
char net_plane;
u16 encap_hlen;
unsigned long up;
refcount_t refcnt;
};
@ -232,6 +234,7 @@ int tipc_bearer_setup(void);
void tipc_bearer_cleanup(void);
void tipc_bearer_stop(struct net *net);
int tipc_bearer_mtu(struct net *net, u32 bearer_id);
int tipc_bearer_min_mtu(struct net *net, u32 bearer_id);
bool tipc_bearer_bcast_support(struct net *net, u32 bearer_id);
void tipc_bearer_xmit_skb(struct net *net, u32 bearer_id,
struct sk_buff *skb,

View File

@ -2200,7 +2200,7 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
struct tipc_msg *hdr = buf_msg(skb);
struct tipc_gap_ack_blks *ga = NULL;
bool reply = msg_probe(hdr), retransmitted = false;
u32 dlen = msg_data_sz(hdr), glen = 0;
u32 dlen = msg_data_sz(hdr), glen = 0, msg_max;
u16 peers_snd_nxt = msg_next_sent(hdr);
u16 peers_tol = msg_link_tolerance(hdr);
u16 peers_prio = msg_linkprio(hdr);
@ -2239,6 +2239,9 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
switch (mtyp) {
case RESET_MSG:
case ACTIVATE_MSG:
msg_max = msg_max_pkt(hdr);
if (msg_max < tipc_bearer_min_mtu(l->net, l->bearer_id))
break;
/* Complete own link name with peer's interface name */
if_name = strrchr(l->name, ':') + 1;
if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
@ -2283,8 +2286,8 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
l->peer_session = msg_session(hdr);
l->in_session = true;
l->peer_bearer_id = msg_bearer_id(hdr);
if (l->mtu > msg_max_pkt(hdr))
l->mtu = msg_max_pkt(hdr);
if (l->mtu > msg_max)
l->mtu = msg_max;
break;
case STATE_MSG:

View File

@ -738,8 +738,8 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
udp_conf.local_ip.s_addr = local.ipv4.s_addr;
udp_conf.use_udp_checksums = false;
ub->ifindex = dev->ifindex;
if (tipc_mtu_bad(dev, sizeof(struct iphdr) +
sizeof(struct udphdr))) {
b->encap_hlen = sizeof(struct iphdr) + sizeof(struct udphdr);
if (tipc_mtu_bad(dev, b->encap_hlen)) {
err = -EINVAL;
goto err;
}
@ -760,6 +760,7 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
else
udp_conf.local_ip6 = local.ipv6;
ub->ifindex = dev->ifindex;
b->encap_hlen = sizeof(struct ipv6hdr) + sizeof(struct udphdr);
b->mtu = 1280;
#endif
} else {

View File

@ -712,7 +712,7 @@ static int fsl_micfil_probe(struct platform_device *pdev)
ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
if (ret) {
dev_err(&pdev->dev, "failed to pcm register\n");
return ret;
goto err_pm_disable;
}
fsl_micfil_dai.capture.formats = micfil->soc->formats;
@ -722,9 +722,20 @@ static int fsl_micfil_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "failed to register component %s\n",
fsl_micfil_component.name);
goto err_pm_disable;
}
return ret;
err_pm_disable:
pm_runtime_disable(&pdev->dev);
return ret;
}
static void fsl_micfil_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
}
static int __maybe_unused fsl_micfil_runtime_suspend(struct device *dev)
@ -785,6 +796,7 @@ static const struct dev_pm_ops fsl_micfil_pm_ops = {
static struct platform_driver fsl_micfil_driver = {
.probe = fsl_micfil_probe,
.remove_new = fsl_micfil_remove,
.driver = {
.name = "fsl-micfil-dai",
.pm = &fsl_micfil_pm_ops,