Merge "build.config.msm.gki: Enable mixed build for gki"

This commit is contained in:
qctecmdr 2021-05-11 15:46:27 -07:00 committed by Gerrit - the friendly Code Review server
commit dc92568766
124 changed files with 71253 additions and 69476 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 32
SUBLEVEL = 34
EXTRAVERSION =
NAME = Dare mighty things

File diff suppressed because it is too large Load Diff

View File

@ -50,6 +50,7 @@
bio_endio
bio_init
bio_put
__bitmap_clear
bitmap_from_arr32
bitmap_parse
bitmap_parselist
@ -79,9 +80,10 @@
blocking_notifier_call_chain
blocking_notifier_chain_register
blocking_notifier_chain_unregister
bpf_trace_run1
bpf_trace_run10
bpf_trace_run11
bpf_trace_run1
bpf_trace_run12
bpf_trace_run2
bpf_trace_run3
bpf_trace_run4
@ -207,11 +209,13 @@
cpufreq_driver_fast_switch
cpufreq_driver_resolve_freq
__cpufreq_driver_target
cpufreq_driver_target
cpufreq_enable_fast_switch
cpufreq_freq_transition_begin
cpufreq_freq_transition_end
cpufreq_frequency_table_verify
cpufreq_generic_attr
cpufreq_get
cpufreq_policy_transition_delay_us
cpufreq_quick_get
cpufreq_register_driver
@ -246,6 +250,11 @@
cpu_subsys
crc8
crc8_populate_msb
crypto_aead_decrypt
crypto_aead_encrypt
crypto_aead_setauthsize
crypto_aead_setkey
crypto_alloc_aead
crypto_alloc_base
crypto_alloc_shash
crypto_comp_compress
@ -254,6 +263,8 @@
crypto_register_alg
crypto_register_scomp
crypto_shash_digest
crypto_shash_finup
crypto_shash_setkey
crypto_unregister_alg
crypto_unregister_scomp
csum_ipv6_magic
@ -374,6 +385,7 @@
devm_platform_ioremap_resource
devm_platform_ioremap_resource_byname
devm_power_supply_register
devm_pwm_get
__devm_regmap_init
__devm_regmap_init_i2c
__devm_regmap_init_spi
@ -498,8 +510,10 @@
drm_atomic_add_affected_connectors
drm_atomic_add_affected_planes
drm_atomic_commit
drm_atomic_get_connector_state
drm_atomic_get_crtc_state
drm_atomic_get_new_connector_for_encoder
drm_atomic_get_old_connector_for_encoder
drm_atomic_get_plane_state
drm_atomic_get_private_obj_state
drm_atomic_helper_bridge_destroy_state
@ -531,11 +545,13 @@
__drm_atomic_helper_private_obj_duplicate_state
drm_atomic_helper_set_config
drm_atomic_helper_setup_commit
drm_atomic_helper_shutdown
drm_atomic_helper_swap_state
drm_atomic_helper_update_plane
drm_atomic_helper_wait_for_dependencies
drm_atomic_helper_wait_for_fences
drm_atomic_helper_wait_for_flip_done
drm_atomic_nonblocking_commit
drm_atomic_normalize_zpos
drm_atomic_private_obj_fini
drm_atomic_private_obj_init
@ -559,6 +575,7 @@
drm_connector_unregister
drm_crtc_arm_vblank_event
drm_crtc_cleanup
__drm_crtc_commit_free
drm_crtc_enable_color_mgmt
drm_crtc_handle_vblank
drm_crtc_init_with_planes
@ -657,6 +674,7 @@
drm_rect_intersect
drm_release
drm_rotation_simplify
drm_self_refresh_helper_alter_state
drm_send_event
drm_universal_plane_init
drm_vblank_init
@ -690,6 +708,7 @@
extcon_set_property
extcon_set_property_capability
extcon_set_state_sync
failure_tracking
fasync_helper
__fdget
fd_install
@ -758,6 +777,7 @@
get_unused_fd_flags
get_user_pages
get_user_pages_fast
get_user_pages_remote
get_vaddr_frames
gic_nonsecure_priorities
glob_match
@ -794,10 +814,17 @@
handle_edge_irq
handle_level_irq
handle_nested_irq
handle_simple_irq
handle_sysrq
have_governor_per_policy
hex_dump_to_buffer
hex_to_bin
hid_add_device
hid_allocate_device
hid_debug
hid_destroy_device
hid_input_report
hid_parse_report
hrtimer_active
hrtimer_cancel
hrtimer_forward
@ -931,8 +958,8 @@
irq_work_queue
irq_work_sync
is_vmalloc_addr
jiffies64_to_msecs
jiffies
jiffies64_to_msecs
jiffies_to_msecs
jiffies_to_usecs
kasan_flag_enabled
@ -943,11 +970,13 @@
kernel_restart
kern_mount
kern_unmount
__kfifo_alloc
__kfifo_free
__kfifo_in
__kfifo_init
__kfifo_out
kfree
kfree_sensitive
kfree_skb
kill_anon_super
kill_fasync
@ -1045,8 +1074,8 @@
memmove
memparse
memremap
memset64
memset
memset64
memstart_addr
memunmap
mfd_add_devices
@ -1108,8 +1137,8 @@
nla_append
nla_memcpy
__nla_parse
nla_put_64bit
nla_put
nla_put_64bit
nla_put_nohdr
nla_reserve
nla_strlcpy
@ -1184,6 +1213,7 @@
of_property_read_string_helper
of_property_read_u32_index
of_property_read_u64
of_property_read_u64_index
of_property_read_variable_u16_array
of_property_read_variable_u32_array
of_property_read_variable_u64_array
@ -1200,6 +1230,7 @@
of_usb_host_tpl_support
page_endio
page_mapping
__page_pinner_migration_failed
panic
panic_notifier_list
param_array_ops
@ -1332,10 +1363,12 @@
prepare_to_wait_event
print_hex_dump
printk
printk_deferred
proc_create
proc_create_data
proc_dointvec
proc_dostring
proc_douintvec_minmax
proc_mkdir
proc_mkdir_data
proc_remove
@ -1407,6 +1440,7 @@
register_restart_handler
register_shrinker
register_syscore_ops
register_sysctl
register_sysctl_table
register_virtio_device
register_virtio_driver
@ -1769,6 +1803,10 @@
__traceiter_android_rvh_cpu_overutilized
__traceiter_android_rvh_dequeue_task
__traceiter_android_rvh_find_energy_efficient_cpu
__traceiter_android_rvh_irqs_disable
__traceiter_android_rvh_irqs_enable
__traceiter_android_rvh_preempt_disable
__traceiter_android_rvh_preempt_enable
__traceiter_android_rvh_select_task_rq_rt
__traceiter_android_rvh_set_iowait
__traceiter_android_rvh_typec_tcpci_chk_contaminant
@ -1815,6 +1853,7 @@
__traceiter_rwmmio_write
__traceiter_sched_cpu_capacity_tp
__traceiter_sched_overutilized_tp
__traceiter_sched_switch
__traceiter_sched_util_est_cfs_tp
__traceiter_sched_util_est_se_tp
__traceiter_suspend_resume
@ -1822,6 +1861,10 @@
__tracepoint_android_rvh_cpu_overutilized
__tracepoint_android_rvh_dequeue_task
__tracepoint_android_rvh_find_energy_efficient_cpu
__tracepoint_android_rvh_irqs_disable
__tracepoint_android_rvh_irqs_enable
__tracepoint_android_rvh_preempt_disable
__tracepoint_android_rvh_preempt_enable
__tracepoint_android_rvh_select_task_rq_rt
__tracepoint_android_rvh_set_iowait
__tracepoint_android_rvh_typec_tcpci_chk_contaminant
@ -1870,6 +1913,7 @@
__tracepoint_rwmmio_write
__tracepoint_sched_cpu_capacity_tp
__tracepoint_sched_overutilized_tp
__tracepoint_sched_switch
__tracepoint_sched_util_est_cfs_tp
__tracepoint_sched_util_est_se_tp
__tracepoint_suspend_resume
@ -1945,6 +1989,7 @@
usb_copy_descriptors
__usb_create_hcd
usb_disabled
usb_enable_autosuspend
usb_ep_autoconfig
usb_ep_disable
usb_ep_enable
@ -1953,6 +1998,7 @@
usb_gadget_set_state
usb_hcd_is_primary_hcd
usb_hcd_platform_shutdown
usb_hub_find_child
usb_interface_id
usb_otg_state_string
usb_put_function_instance
@ -2090,6 +2136,9 @@
__xfrm_state_destroy
xfrm_state_lookup_byspi
xfrm_stateonly_find
xhci_address_device
xhci_bus_resume
xhci_bus_suspend
xhci_gen_setup
xhci_get_ep_ctx
xhci_get_slot_ctx

View File

@ -85,10 +85,10 @@
blocking_notifier_call_chain
blocking_notifier_chain_register
blocking_notifier_chain_unregister
bpf_trace_run1
bpf_trace_run10
bpf_trace_run11
bpf_trace_run12
bpf_trace_run1
bpf_trace_run2
bpf_trace_run3
bpf_trace_run4
@ -291,28 +291,43 @@
crypto_aead_encrypt
crypto_aead_setauthsize
crypto_aead_setkey
crypto_ahash_digest
crypto_ahash_setkey
crypto_alloc_aead
crypto_alloc_ahash
crypto_alloc_base
crypto_alloc_shash
crypto_alloc_skcipher
crypto_alloc_sync_skcipher
crypto_cipher_encrypt_one
crypto_cipher_setkey
crypto_comp_compress
crypto_comp_decompress
crypto_dequeue_request
crypto_destroy_tfm
crypto_enqueue_request
crypto_has_alg
crypto_init_queue
crypto_register_aead
crypto_register_ahash
crypto_register_alg
crypto_register_algs
crypto_register_rngs
crypto_register_scomp
crypto_register_skcipher
crypto_shash_final
crypto_shash_setkey
crypto_shash_update
crypto_skcipher_decrypt
crypto_skcipher_encrypt
crypto_skcipher_setkey
crypto_unregister_aead
crypto_unregister_ahash
crypto_unregister_alg
crypto_unregister_algs
crypto_unregister_rngs
crypto_unregister_scomp
crypto_unregister_skcipher
css_next_child
csum_ipv6_magic
csum_partial
@ -866,6 +881,7 @@
extcon_register_notifier
extcon_set_state_sync
extcon_unregister_notifier
failure_tracking
fasync_helper
__fdget
fd_install
@ -1364,8 +1380,8 @@
mempool_free
mempool_free_slab
memremap
memset64
memset
memset64
__memset_io
memstart_addr
memunmap
@ -1433,10 +1449,10 @@
nla_find
nla_memcpy
__nla_parse
nla_put_64bit
nla_put
nla_reserve_64bit
nla_put_64bit
nla_reserve
nla_reserve_64bit
__nla_validate
__nlmsg_put
no_llseek
@ -1567,6 +1583,7 @@
overflowuid
page_endio
page_mapping
__page_pinner_migration_failed
panic
panic_notifier_list
panic_timeout
@ -1993,6 +2010,8 @@
rtnl_unlock
rtnl_unregister
runqueues
scatterwalk_ffwd
scatterwalk_map_and_copy
sched_clock
sched_feat_keys
sched_feat_names
@ -2064,6 +2083,8 @@
set_user_nice
sg_alloc_table
sg_alloc_table_from_pages
sg_copy_from_buffer
sg_copy_to_buffer
sg_free_table
sg_init_one
sg_init_table

View File

@ -165,6 +165,13 @@
orr \res, \res, \tmp1, lsl #24
.endm
.macro be32tocpu, val, tmp
#ifndef __ARMEB__
/* convert to little endian */
rev_l \val, \tmp
#endif
.endm
.section ".start", "ax"
/*
* sort out different calling conventions
@ -345,13 +352,7 @@ restart: adr r0, LC1
/* Get the initial DTB size */
ldr r5, [r6, #4]
#ifndef __ARMEB__
/* convert to little endian */
eor r1, r5, r5, ror #16
bic r1, r1, #0x00ff0000
mov r5, r5, ror #8
eor r5, r5, r1, lsr #8
#endif
be32tocpu r5, r1
dbgadtb r6, r5
/* 50% DTB growth should be good enough */
add r5, r5, r5, lsr #1
@ -403,13 +404,7 @@ restart: adr r0, LC1
/* Get the current DTB size */
ldr r5, [r6, #4]
#ifndef __ARMEB__
/* convert r5 (dtb size) to little endian */
eor r1, r5, r5, ror #16
bic r1, r1, #0x00ff0000
mov r5, r5, ror #8
eor r5, r5, r1, lsr #8
#endif
be32tocpu r5, r1
/* preserve 64-bit alignment */
add r5, r5, #7

View File

@ -24,6 +24,9 @@ aliases {
i2c0 = &i2c1;
i2c1 = &i2c2;
i2c2 = &i2c3;
mmc0 = &mmc1;
mmc1 = &mmc2;
mmc2 = &mmc3;
serial0 = &uart1;
serial1 = &uart2;
serial2 = &uart3;

View File

@ -85,8 +85,8 @@ static int __init blake2b_neon_mod_init(void)
static void __exit blake2b_neon_mod_exit(void)
{
return crypto_unregister_shashes(blake2b_neon_algs,
ARRAY_SIZE(blake2b_neon_algs));
crypto_unregister_shashes(blake2b_neon_algs,
ARRAY_SIZE(blake2b_neon_algs));
}
module_init(blake2b_neon_mod_init);

View File

@ -8,6 +8,7 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
// Registers used to hold message words temporarily. There aren't
// enough ARM registers to hold the whole message block, so we have to
@ -38,6 +39,23 @@
#endif
.endm
.macro _le32_bswap a, tmp
#ifdef __ARMEB__
rev_l \a, \tmp
#endif
.endm
.macro _le32_bswap_8x a, b, c, d, e, f, g, h, tmp
_le32_bswap \a, \tmp
_le32_bswap \b, \tmp
_le32_bswap \c, \tmp
_le32_bswap \d, \tmp
_le32_bswap \e, \tmp
_le32_bswap \f, \tmp
_le32_bswap \g, \tmp
_le32_bswap \h, \tmp
.endm
// Execute a quarter-round of BLAKE2s by mixing two columns or two diagonals.
// (a0, b0, c0, d0) and (a1, b1, c1, d1) give the registers containing the two
// columns/diagonals. s0-s1 are the word offsets to the message words the first
@ -180,8 +198,10 @@ ENTRY(blake2s_compress_arch)
tst r1, #3
bne .Lcopy_block_misaligned
ldmia r1!, {r2-r9}
_le32_bswap_8x r2, r3, r4, r5, r6, r7, r8, r9, r14
stmia r12!, {r2-r9}
ldmia r1!, {r2-r9}
_le32_bswap_8x r2, r3, r4, r5, r6, r7, r8, r9, r14
stmia r12, {r2-r9}
.Lcopy_block_done:
str r1, [sp, #68] // Update message pointer
@ -268,6 +288,7 @@ ENTRY(blake2s_compress_arch)
1:
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
ldr r3, [r1], #4
_le32_bswap r3, r4
#else
ldrb r3, [r1, #0]
ldrb r4, [r1, #1]

View File

@ -578,4 +578,21 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
__adldst_l str, \src, \sym, \tmp, \cond
.endm
/*
* rev_l - byte-swap a 32-bit value
*
* @val: source/destination register
* @tmp: scratch register
*/
.macro rev_l, val:req, tmp:req
.if __LINUX_ARM_ARCH__ < 6
eor \tmp, \val, \val, ror #16
bic \tmp, \tmp, #0x00ff0000
mov \val, \val, ror #8
eor \val, \val, \tmp, lsr #8
.else
rev \val, \val
.endif
.endm
#endif /* __ASM_ASSEMBLER_H__ */

View File

@ -10,5 +10,5 @@ / {
};
&mmc0 {
cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 push-push switch */
broken-cd; /* card detect is broken on *some* boards */
};

View File

@ -0,0 +1,83 @@
# CONFIG_ATOMIC64_SELFTEST is not set
# CONFIG_BACKTRACE_SELF_TEST is not set
# CONFIG_BITFIELD_KUNIT is not set
# CONFIG_BITS_TEST is not set
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=1
CONFIG_CMA_DEBUG=y
CONFIG_CMDLINE="kasan.stacktrace=off stack_depot_disable=off page_owner=on no_hash_pointers panic_on_taint=0x20"
CONFIG_DEBUG_KMEMLEAK=y
# CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN is not set
CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE=16000
# CONFIG_DEBUG_KMEMLEAK_TEST is not set
# CONFIG_DEBUG_KOBJECT_RELEASE is not set
CONFIG_DEBUG_MUTEXES=y
CONFIG_DEBUG_OBJECTS=y
CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT=1
CONFIG_DEBUG_OBJECTS_FREE=y
# CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER is not set
# CONFIG_DEBUG_OBJECTS_RCU_HEAD is not set
# CONFIG_DEBUG_OBJECTS_SELFTEST is not set
CONFIG_DEBUG_OBJECTS_TIMERS=y
CONFIG_DEBUG_OBJECTS_WORK=y
CONFIG_DEBUG_PREEMPT=y
CONFIG_DEBUG_RWSEMS=y
CONFIG_DEBUG_SPINLOCK=y
# CONFIG_DPM_WATCHDOG is not set
CONFIG_DYNAMIC_DEBUG=y
# CONFIG_FIND_BIT_BENCHMARK is not set
CONFIG_GKI_HACKS_FOR_CONSOLIDATE=y
CONFIG_I2C_CHARDEV=y
# CONFIG_INTERVAL_TREE_TEST is not set
# CONFIG_KPROBES_SANITY_TEST is not set
# CONFIG_LINEAR_RANGES_TEST is not set
# CONFIG_LIST_KUNIT_TEST is not set
# CONFIG_LKDTM is not set
CONFIG_LOCALVERSION="-qki-consolidate"
CONFIG_LOCKUP_DETECTOR=y
# CONFIG_PERCPU_TEST is not set
CONFIG_PID_IN_CONTEXTIDR=y
# CONFIG_PM_ADVANCED_DEBUG is not set
CONFIG_PM_DEBUG=y
CONFIG_PM_SLEEP_DEBUG=y
# CONFIG_PM_TEST_SUSPEND is not set
CONFIG_PREEMPTIRQ_TRACEPOINTS=y
# CONFIG_RBTREE_TEST is not set
# CONFIG_REED_SOLOMON_TEST is not set
CONFIG_RUNTIME_TESTING_MENU=y
CONFIG_SOFTLOCKUP_DETECTOR=y
CONFIG_SPI_SPIDEV=y
# CONFIG_SYSCTL_KUNIT_TEST is not set
CONFIG_TASKS_RUDE_RCU=y
# CONFIG_TEST_BITMAP is not set
# CONFIG_TEST_BITOPS is not set
# CONFIG_TEST_BLACKHOLE_DEV is not set
# CONFIG_TEST_BPF is not set
# CONFIG_TEST_FIRMWARE is not set
# CONFIG_TEST_FREE_PAGES is not set
# CONFIG_TEST_HASH is not set
# CONFIG_TEST_HEXDUMP is not set
# CONFIG_TEST_IDA is not set
# CONFIG_TEST_KMOD is not set
# CONFIG_TEST_KSTRTOX is not set
# CONFIG_TEST_LIST_SORT is not set
# CONFIG_TEST_LKM is not set
# CONFIG_TEST_MEMCAT_P is not set
# CONFIG_TEST_MEMINIT is not set
# CONFIG_TEST_MIN_HEAP is not set
# CONFIG_TEST_OVERFLOW is not set
# CONFIG_TEST_PRINTF is not set
# CONFIG_TEST_RHASHTABLE is not set
# CONFIG_TEST_SORT is not set
# CONFIG_TEST_STACKINIT is not set
# CONFIG_TEST_STATIC_KEYS is not set
# CONFIG_TEST_STRING_HELPERS is not set
# CONFIG_TEST_STRSCPY is not set
# CONFIG_TEST_SYSCTL is not set
# CONFIG_TEST_UDELAY is not set
# CONFIG_TEST_USER_COPY is not set
# CONFIG_TEST_UUID is not set
# CONFIG_TEST_VMALLOC is not set
# CONFIG_TEST_XARRAY is not set
CONFIG_TRACE_PREEMPT_TOGGLE=y

View File

@ -13,7 +13,6 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_IKHEADERS=y
CONFIG_UCLAMP_TASK=y
CONFIG_UCLAMP_BUCKETS_COUNT=20
CONFIG_CGROUPS=y
CONFIG_BLK_CGROUP=y
CONFIG_CGROUP_SCHED=y
@ -466,6 +465,7 @@ CONFIG_USB_CONFIGFS_MASS_STORAGE=y
CONFIG_USB_CONFIGFS_F_FS=y
CONFIG_USB_CONFIGFS_F_ACC=y
CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
CONFIG_USB_CONFIGFS_F_UAC2=y
CONFIG_USB_CONFIGFS_F_MIDI=y
CONFIG_USB_CONFIGFS_F_HID=y
CONFIG_TYPEC=y
@ -500,6 +500,7 @@ CONFIG_COMMON_CLK_SCPI=y
CONFIG_HWSPINLOCK=y
CONFIG_SUN4I_TIMER=y
# CONFIG_SUN50I_ERRATUM_UNKNOWN1 is not set
CONFIG_MTK_TIMER=y
CONFIG_MAILBOX=y
CONFIG_IOMMU_LIMIT_IOVA_ALIGNMENT=y
CONFIG_REMOTEPROC=y
@ -545,6 +546,7 @@ CONFIG_OVERLAY_FS=y
CONFIG_INCREMENTAL_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_EXFAT_FS=y
CONFIG_TMPFS=y
# CONFIG_EFIVAR_FS is not set
CONFIG_PSTORE=y
@ -638,12 +640,13 @@ CONFIG_UBSAN_TRAP=y
CONFIG_UBSAN_LOCAL_BOUNDS=y
# CONFIG_UBSAN_MISC is not set
CONFIG_PAGE_OWNER=y
CONFIG_PAGE_PINNER=y
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_KASAN=y
CONFIG_KASAN_HW_TAGS=y
CONFIG_KFENCE=y
CONFIG_KFENCE_SAMPLE_INTERVAL=0
CONFIG_KFENCE_SAMPLE_INTERVAL=500
CONFIG_KFENCE_NUM_OBJECTS=63
CONFIG_PANIC_ON_OOPS=y
CONFIG_PANIC_TIMEOUT=-1

View File

@ -1,51 +1,15 @@
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=1
CONFIG_BUG_ON_HW_MEM_ONLINE_FAIL=y
CONFIG_CMA_DEBUG=y
CONFIG_CMA_DEBUGFS=y
CONFIG_CMDLINE="kasan.stacktrace=off stack_depot_disable=off page_owner=on no_hash_pointers panic_on_taint=0x20"
CONFIG_CNSS2_DEBUG=y
CONFIG_CORESIGHT_SOURCE_ETM4X=m
# CONFIG_CORESIGHT_TRBE is not set
CONFIG_DEBUG_KMEMLEAK=y
# CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN is not set
CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE=16000
# CONFIG_DEBUG_KMEMLEAK_TEST is not set
# CONFIG_DEBUG_KOBJECT_RELEASE is not set
CONFIG_DEBUG_MUTEXES=y
CONFIG_DEBUG_OBJECTS=y
CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT=1
CONFIG_DEBUG_OBJECTS_FREE=y
# CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER is not set
# CONFIG_DEBUG_OBJECTS_RCU_HEAD is not set
# CONFIG_DEBUG_OBJECTS_SELFTEST is not set
CONFIG_DEBUG_OBJECTS_TIMERS=y
CONFIG_DEBUG_OBJECTS_WORK=y
CONFIG_DEBUG_PREEMPT=y
CONFIG_DEBUG_RWSEMS=y
CONFIG_DEBUG_SPINLOCK=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_DPM_WATCHDOG is not set
CONFIG_DYNAMIC_DEBUG=y
CONFIG_EDAC_KRYO_ARM64_PANIC_ON_CE=y
# CONFIG_ETM4X_IMPDEF_FEATURE is not set
CONFIG_I2C_CHARDEV=y
CONFIG_LKDTM=m
CONFIG_LOCALVERSION="-gki-consolidate"
CONFIG_LOCKUP_DETECTOR=y
CONFIG_LOCK_TORTURE_TEST=m
CONFIG_MHI_BUS_DEBUG=y
CONFIG_MSM_GPI_DMA_DEBUG=y
CONFIG_NL80211_TESTMODE=y
CONFIG_OF_RESERVED_MEM_CHECK=y
CONFIG_PAGE_OWNER=y
CONFIG_PID_IN_CONTEXTIDR=y
# CONFIG_PM_ADVANCED_DEBUG is not set
CONFIG_PM_DEBUG=y
CONFIG_PM_SLEEP_DEBUG=y
# CONFIG_PM_TEST_SUSPEND is not set
CONFIG_QCOM_DYN_MINIDUMP_STACK=y
CONFIG_QCOM_MINIDUMP_PANIC_CPU_CONTEXT=y
CONFIG_QCOM_RTB=m
@ -53,11 +17,8 @@ CONFIG_QCOM_RTB_SEPARATE_CPUS=y
CONFIG_QTI_PMIC_GLINK_CLIENT_DEBUG=y
CONFIG_RCU_TORTURE_TEST=m
CONFIG_REGMAP_QTI_DEBUGFS_ALLOW_WRITE=y
CONFIG_RUNTIME_TESTING_MENU=y
CONFIG_SCHED_WALT_DEBUG=m
CONFIG_SERIAL_MSM_GENI_CONSOLE_DEFAULT_ENABLED=y
CONFIG_SOFTLOCKUP_DETECTOR=y
CONFIG_SPI_SPIDEV=y
CONFIG_SPS=m
CONFIG_SPS_SUPPORT_NDP_BAM=y
CONFIG_TEST_USER_COPY=m

View File

@ -235,6 +235,7 @@ static inline const void *__tag_set(const void *addr, u8 tag)
#define arch_enable_tagging_sync() mte_enable_kernel_sync()
#define arch_enable_tagging_async() mte_enable_kernel_async()
#define arch_set_tagging_report_once(state) mte_set_report_once(state)
#define arch_force_async_tag_fault() mte_check_tfsr_exit()
#define arch_init_tags(max_tag) mte_init_tags(max_tag)
#define arch_get_random_tag() mte_get_random_tag()
#define arch_get_mem_tag(addr) mte_get_mem_tag(addr)

View File

@ -41,6 +41,7 @@ void mte_sync_tags(pte_t *ptep, pte_t pte);
void mte_copy_page_tags(void *kto, const void *kfrom);
void flush_mte_state(void);
void mte_thread_switch(struct task_struct *next);
void mte_suspend_enter(void);
void mte_suspend_exit(void);
long set_mte_ctrl(struct task_struct *task, unsigned long arg);
long get_mte_ctrl(struct task_struct *task);
@ -66,6 +67,9 @@ static inline void flush_mte_state(void)
static inline void mte_thread_switch(struct task_struct *next)
{
}
static inline void mte_suspend_enter(void)
{
}
static inline void mte_suspend_exit(void)
{
}
@ -90,5 +94,49 @@ static inline void mte_assign_mem_tag_range(void *addr, size_t size)
#endif /* CONFIG_ARM64_MTE */
#ifdef CONFIG_KASAN_HW_TAGS
/* Whether the MTE asynchronous mode is enabled. */
DECLARE_STATIC_KEY_FALSE(mte_async_mode);
static inline bool system_uses_mte_async_mode(void)
{
return static_branch_unlikely(&mte_async_mode);
}
void mte_check_tfsr_el1(void);
static inline void mte_check_tfsr_entry(void)
{
mte_check_tfsr_el1();
}
static inline void mte_check_tfsr_exit(void)
{
/*
* The asynchronous faults are sync'ed automatically with
* TFSR_EL1 on kernel entry but for exit an explicit dsb()
* is required.
*/
dsb(nsh);
isb();
mte_check_tfsr_el1();
}
#else
static inline bool system_uses_mte_async_mode(void)
{
return false;
}
static inline void mte_check_tfsr_el1(void)
{
}
static inline void mte_check_tfsr_entry(void)
{
}
static inline void mte_check_tfsr_exit(void)
{
}
#endif /* CONFIG_KASAN_HW_TAGS */
#endif /* __ASSEMBLY__ */
#endif /* __ASM_MTE_H */

View File

@ -20,6 +20,7 @@
#include <asm/cpufeature.h>
#include <asm/mmu.h>
#include <asm/mte.h>
#include <asm/ptrace.h>
#include <asm/memory.h>
#include <asm/extable.h>
@ -231,6 +232,23 @@ static inline void __uaccess_enable_tco(void)
ARM64_MTE, CONFIG_KASAN_HW_TAGS));
}
/*
* These functions disable tag checking only if in MTE async mode
* since the sync mode generates exceptions synchronously and the
* nofault or load_unaligned_zeropad can handle them.
*/
static inline void __uaccess_disable_tco_async(void)
{
if (system_uses_mte_async_mode())
__uaccess_disable_tco();
}
static inline void __uaccess_enable_tco_async(void)
{
if (system_uses_mte_async_mode())
__uaccess_enable_tco();
}
static inline void uaccess_disable_privileged(void)
{
__uaccess_disable_tco();
@ -357,8 +375,10 @@ do { \
do { \
int __gkn_err = 0; \
\
__uaccess_enable_tco_async(); \
__raw_get_mem("ldr", *((type *)(dst)), \
(__force type *)(src), __gkn_err); \
__uaccess_disable_tco_async(); \
if (unlikely(__gkn_err)) \
goto err_label; \
} while (0)
@ -430,8 +450,10 @@ do { \
do { \
int __pkn_err = 0; \
\
__uaccess_enable_tco_async(); \
__raw_put_mem("str", *((type *)(src)), \
(__force type *)(dst), __pkn_err); \
__uaccess_disable_tco_async(); \
if (unlikely(__pkn_err)) \
goto err_label; \
} while(0)

View File

@ -55,6 +55,8 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
{
unsigned long ret, tmp;
__uaccess_enable_tco_async();
/* Load word from unaligned pointer addr */
asm(
"1: ldr %0, %3\n"
@ -76,6 +78,8 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
: "=&r" (ret), "=&r" (tmp)
: "r" (addr), "Q" (*(unsigned long *)addr));
__uaccess_disable_tco_async();
return ret;
}

View File

@ -995,8 +995,6 @@ static void relax_cpu_ftr_reg(u32 sys_id, int field)
WARN_ON(!ftrp->width);
}
static void update_compat_elf_hwcaps(void);
static void update_mismatched_32bit_el0_cpu_features(struct cpuinfo_arm64 *info,
struct cpuinfo_arm64 *boot)
{
@ -1010,7 +1008,6 @@ static void update_mismatched_32bit_el0_cpu_features(struct cpuinfo_arm64 *info,
boot->aarch32 = info->aarch32;
init_32bit_cpu_features(&boot->aarch32);
update_compat_elf_hwcaps();
boot_cpu_32bit_regs_overridden = true;
}
@ -1287,51 +1284,6 @@ has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope)
return feature_matches(val, entry);
}
static int enable_mismatched_32bit_el0(unsigned int cpu)
{
static int lucky_winner = -1;
struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
bool cpu_32bit = id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0);
if (cpu_32bit) {
cpumask_set_cpu(cpu, cpu_32bit_el0_mask);
static_branch_enable_cpuslocked(&arm64_mismatched_32bit_el0);
}
if (cpumask_test_cpu(0, cpu_32bit_el0_mask) == cpu_32bit)
return 0;
if (lucky_winner >= 0)
return 0;
/*
* We've detected a mismatch. We need to keep one of our CPUs with
* 32-bit EL0 online so that is_cpu_allowed() doesn't end up rejecting
* every CPU in the system for a 32-bit task.
*/
lucky_winner = cpu_32bit ? cpu : cpumask_any_and(cpu_32bit_el0_mask,
cpu_active_mask);
get_cpu_device(lucky_winner)->offline_disabled = true;
pr_info("Asymmetric 32-bit EL0 support detected on CPU %u; CPU hot-unplug disabled on CPU %u\n",
cpu, lucky_winner);
return 0;
}
static int __init init_32bit_el0_mask(void)
{
if (!allow_mismatched_32bit_el0)
return 0;
if (!zalloc_cpumask_var(&cpu_32bit_el0_mask, GFP_KERNEL))
return -ENOMEM;
return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
"arm64/mismatched_32bit_el0:online",
enable_mismatched_32bit_el0, NULL);
}
subsys_initcall_sync(init_32bit_el0_mask);
const struct cpumask *system_32bit_el0_cpumask(void)
{
if (!system_supports_32bit_el0())
@ -2567,12 +2519,6 @@ static void setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
cap_set_elf_hwcap(hwcaps);
}
static void update_compat_elf_hwcaps(void)
{
if (system_capabilities_finalized())
setup_elf_hwcaps(compat_elf_hwcaps);
}
static void update_cpu_capabilities(u16 scope_mask)
{
int i;
@ -2953,6 +2899,52 @@ void __init setup_cpu_features(void)
ARCH_DMA_MINALIGN);
}
static int enable_mismatched_32bit_el0(unsigned int cpu)
{
static int lucky_winner = -1;
struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
bool cpu_32bit = id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0);
if (cpu_32bit) {
cpumask_set_cpu(cpu, cpu_32bit_el0_mask);
static_branch_enable_cpuslocked(&arm64_mismatched_32bit_el0);
}
if (cpumask_test_cpu(0, cpu_32bit_el0_mask) == cpu_32bit)
return 0;
if (lucky_winner >= 0)
return 0;
/*
* We've detected a mismatch. We need to keep one of our CPUs with
* 32-bit EL0 online so that is_cpu_allowed() doesn't end up rejecting
* every CPU in the system for a 32-bit task.
*/
lucky_winner = cpu_32bit ? cpu : cpumask_any_and(cpu_32bit_el0_mask,
cpu_active_mask);
get_cpu_device(lucky_winner)->offline_disabled = true;
setup_elf_hwcaps(compat_elf_hwcaps);
pr_info("Asymmetric 32-bit EL0 support detected on CPU %u; CPU hot-unplug disabled on CPU %u\n",
cpu, lucky_winner);
return 0;
}
static int __init init_32bit_el0_mask(void)
{
if (!allow_mismatched_32bit_el0)
return 0;
if (!zalloc_cpumask_var(&cpu_32bit_el0_mask, GFP_KERNEL))
return -ENOMEM;
return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
"arm64/mismatched_32bit_el0:online",
enable_mismatched_32bit_el0, NULL);
}
subsys_initcall_sync(init_32bit_el0_mask);
static bool __maybe_unused
cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
{

View File

@ -37,6 +37,8 @@ static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
lockdep_hardirqs_off(CALLER_ADDR0);
rcu_irq_enter_check_tick();
trace_hardirqs_off_finish();
mte_check_tfsr_entry();
}
/*
@ -47,6 +49,8 @@ static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
{
lockdep_assert_irqs_disabled();
mte_check_tfsr_exit();
if (interrupts_enabled(regs)) {
if (regs->exit_rcu) {
trace_hardirqs_on_prepare();
@ -243,6 +247,8 @@ asmlinkage void noinstr enter_from_user_mode(void)
asmlinkage void noinstr exit_to_user_mode(void)
{
mte_check_tfsr_exit();
trace_hardirqs_on_prepare();
lockdep_hardirqs_on_prepare(CALLER_ADDR0);
user_enter_irqoff();

View File

@ -26,6 +26,12 @@ u64 gcr_kernel_excl __ro_after_init;
static bool report_fault_once = true;
#ifdef CONFIG_KASAN_HW_TAGS
/* Whether the MTE asynchronous mode is enabled. */
DEFINE_STATIC_KEY_FALSE(mte_async_mode);
EXPORT_SYMBOL_GPL(mte_async_mode);
#endif
static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap)
{
pte_t old_pte = READ_ONCE(*ptep);
@ -116,15 +122,35 @@ static inline void __mte_enable_kernel(const char *mode, unsigned long tcf)
pr_info_once("MTE: enabled in %s mode at EL1\n", mode);
}
#ifdef CONFIG_KASAN_HW_TAGS
void mte_enable_kernel_sync(void)
{
/*
* Make sure we enter this function when no PE has set
* async mode previously.
*/
WARN_ONCE(system_uses_mte_async_mode(),
"MTE async mode enabled system wide!");
__mte_enable_kernel("synchronous", SCTLR_ELx_TCF_SYNC);
}
void mte_enable_kernel_async(void)
{
__mte_enable_kernel("asynchronous", SCTLR_ELx_TCF_ASYNC);
/*
* MTE async mode is set system wide by the first PE that
* executes this function.
*
* Note: If in future KASAN acquires a runtime switching
* mode in between sync and async, this strategy needs
* to be reviewed.
*/
if (!system_uses_mte_async_mode())
static_branch_enable(&mte_async_mode);
}
#endif
void mte_set_report_once(bool state)
{
@ -136,6 +162,29 @@ bool mte_report_once(void)
return READ_ONCE(report_fault_once);
}
#ifdef CONFIG_KASAN_HW_TAGS
void mte_check_tfsr_el1(void)
{
u64 tfsr_el1;
if (!system_supports_mte())
return;
tfsr_el1 = read_sysreg_s(SYS_TFSR_EL1);
if (unlikely(tfsr_el1 & SYS_TFSR_EL1_TF1)) {
/*
* Note: isb() is not required after this direct write
* because there is no indirect read subsequent to it
* (per ARM DDI 0487F.c table D13-1).
*/
write_sysreg_s(0, SYS_TFSR_EL1);
kasan_report_async();
}
}
#endif
static void update_sctlr_el1_tcf0(u64 tcf0)
{
/* ISB required for the kernel uaccess routines */
@ -201,6 +250,35 @@ void mte_thread_switch(struct task_struct *next)
/* avoid expensive SCTLR_EL1 accesses if no change */
if (current->thread.sctlr_tcf0 != next->thread.sctlr_tcf0)
update_sctlr_el1_tcf0(next->thread.sctlr_tcf0);
else
isb();
/*
* Check if an async tag exception occurred at EL1.
*
* Note: On the context switch path we rely on the dsb() present
* in __switch_to() to guarantee that the indirect writes to TFSR_EL1
* are synchronized before this point.
* isb() above is required for the same reason.
*
*/
mte_check_tfsr_el1();
}
void mte_suspend_enter(void)
{
if (!system_supports_mte())
return;
/*
* The barriers are required to guarantee that the indirect writes
* to TFSR_EL1 are synchronized before we report the state.
*/
dsb(nsh);
isb();
/* Report SYS_TFSR_EL1 before suspend entry */
mte_check_tfsr_el1();
}
void mte_suspend_exit(void)

View File

@ -286,10 +286,12 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
if (!instruction_pointer(regs))
BUG();
if (kcb->kprobe_status == KPROBE_REENTER)
if (kcb->kprobe_status == KPROBE_REENTER) {
restore_previous_kprobe(kcb);
else
} else {
kprobes_restore_local_irqflag(kcb, regs);
reset_current_kprobe();
}
break;
case KPROBE_HIT_ACTIVE:

View File

@ -92,6 +92,9 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
unsigned long flags;
struct sleep_stack_data state;
/* Report any MTE async fault before going to suspend */
mte_suspend_enter();
/*
* From this point debug exceptions are disabled to prevent
* updates to mdscr register (saved and restored along with

View File

@ -576,7 +576,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned int esr,
* let's try a speculative page fault without grabbing the
* mmap_sem.
*/
fault = handle_speculative_fault(mm, addr, mm_flags, &vma);
fault = handle_speculative_fault(mm, addr, mm_flags, &vma, regs);
if (fault != VM_FAULT_RETRY)
goto done;

View File

@ -292,7 +292,7 @@ config FORCE_MAX_ZONEORDER
int "Maximum zone order"
default "11"
config RAM_BASE
config DRAM_BASE
hex "DRAM start addr (the same with memory-section in dts)"
default 0x0

View File

@ -28,7 +28,7 @@
#define SSEG_SIZE 0x20000000
#define LOWMEM_LIMIT (SSEG_SIZE * 2)
#define PHYS_OFFSET_OFFSET (CONFIG_RAM_BASE & (SSEG_SIZE - 1))
#define PHYS_OFFSET_OFFSET (CONFIG_DRAM_BASE & (SSEG_SIZE - 1))
#ifndef __ASSEMBLY__

View File

@ -94,7 +94,7 @@ static int __init build_node_maps(unsigned long start, unsigned long len,
* acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been
* called yet. Note that node 0 will also count all non-existent cpus.
*/
static int __meminit early_nr_cpus_node(int node)
static int early_nr_cpus_node(int node)
{
int cpu, n = 0;
@ -109,7 +109,7 @@ static int __meminit early_nr_cpus_node(int node)
* compute_pernodesize - compute size of pernode data
* @node: the node id.
*/
static unsigned long __meminit compute_pernodesize(int node)
static unsigned long compute_pernodesize(int node)
{
unsigned long pernodesize = 0, cpus;
@ -366,7 +366,7 @@ static void __init reserve_pernode_space(void)
}
}
static void __meminit scatter_node_data(void)
static void scatter_node_data(void)
{
pg_data_t **dst;
int node;

View File

@ -994,6 +994,7 @@ ENDPROC(ext_int_handler)
* Load idle PSW.
*/
ENTRY(psw_idle)
stg %r14,(__SF_GPRS+8*8)(%r15)
stg %r3,__SF_EMPTY(%r15)
larl %r1,.Lpsw_idle_exit
stg %r1,__SF_EMPTY+8(%r15)

View File

@ -15,7 +15,6 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_IKHEADERS=y
CONFIG_UCLAMP_TASK=y
CONFIG_UCLAMP_BUCKETS_COUNT=20
CONFIG_CGROUPS=y
CONFIG_BLK_CGROUP=y
CONFIG_CGROUP_SCHED=y
@ -483,6 +482,7 @@ CONFIG_OVERLAY_FS=y
CONFIG_INCREMENTAL_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_EXFAT_FS=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
# CONFIG_EFIVAR_FS is not set
@ -578,10 +578,11 @@ CONFIG_UBSAN_TRAP=y
CONFIG_UBSAN_LOCAL_BOUNDS=y
# CONFIG_UBSAN_MISC is not set
CONFIG_PAGE_OWNER=y
CONFIG_PAGE_PINNER=y
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_KFENCE=y
CONFIG_KFENCE_SAMPLE_INTERVAL=0
CONFIG_KFENCE_SAMPLE_INTERVAL=500
CONFIG_KFENCE_NUM_OBJECTS=63
CONFIG_PANIC_ON_OOPS=y
CONFIG_PANIC_TIMEOUT=-1

View File

@ -4387,7 +4387,7 @@ static const struct x86_cpu_desc isolation_ucodes[] = {
INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 3, 0x07000009),
INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 4, 0x0f000009),
INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 5, 0x0e000002),
INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X, 2, 0x0b000014),
INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X, 1, 0x0b000014),
INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 3, 0x00000021),
INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 4, 0x00000000),
INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 5, 0x00000000),

View File

@ -1159,7 +1159,6 @@ enum {
SNBEP_PCI_QPI_PORT0_FILTER,
SNBEP_PCI_QPI_PORT1_FILTER,
BDX_PCI_QPI_PORT2_FILTER,
HSWEP_PCI_PCU_3,
};
static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
@ -2816,22 +2815,33 @@ static struct intel_uncore_type *hswep_msr_uncores[] = {
NULL,
};
#define HSWEP_PCU_DID 0x2fc0
#define HSWEP_PCU_CAPID4_OFFET 0x94
#define hswep_get_chop(_cap) (((_cap) >> 6) & 0x3)
static bool hswep_has_limit_sbox(unsigned int device)
{
struct pci_dev *dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
u32 capid4;
if (!dev)
return false;
pci_read_config_dword(dev, HSWEP_PCU_CAPID4_OFFET, &capid4);
if (!hswep_get_chop(capid4))
return true;
return false;
}
void hswep_uncore_cpu_init(void)
{
int pkg = boot_cpu_data.logical_proc_id;
if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
/* Detect 6-8 core systems with only two SBOXes */
if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
u32 capid4;
pci_read_config_dword(uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3],
0x94, &capid4);
if (((capid4 >> 6) & 0x3) == 0)
hswep_uncore_sbox.num_boxes = 2;
}
if (hswep_has_limit_sbox(HSWEP_PCU_DID))
hswep_uncore_sbox.num_boxes = 2;
uncore_msr_uncores = hswep_msr_uncores;
}
@ -3094,11 +3104,6 @@ static const struct pci_device_id hswep_uncore_pci_ids[] = {
.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
SNBEP_PCI_QPI_PORT1_FILTER),
},
{ /* PCU.3 (for Capability registers) */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0),
.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
HSWEP_PCI_PCU_3),
},
{ /* end: all zeroes */ }
};
@ -3190,27 +3195,18 @@ static struct event_constraint bdx_uncore_pcu_constraints[] = {
EVENT_CONSTRAINT_END
};
#define BDX_PCU_DID 0x6fc0
void bdx_uncore_cpu_init(void)
{
int pkg = topology_phys_to_logical_pkg(boot_cpu_data.phys_proc_id);
if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
uncore_msr_uncores = bdx_msr_uncores;
/* BDX-DE doesn't have SBOX */
if (boot_cpu_data.x86_model == 86) {
uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
/* Detect systems with no SBOXes */
} else if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
struct pci_dev *pdev;
u32 capid4;
if ((boot_cpu_data.x86_model == 86) || hswep_has_limit_sbox(BDX_PCU_DID))
uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
pdev = uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3];
pci_read_config_dword(pdev, 0x94, &capid4);
if (((capid4 >> 6) & 0x3) == 0)
bdx_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
}
hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
}
@ -3431,11 +3427,6 @@ static const struct pci_device_id bdx_uncore_pci_ids[] = {
.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
BDX_PCI_QPI_PORT2_FILTER),
},
{ /* PCU.3 (for Capability registers) */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fc0),
.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
HSWEP_PCI_PCU_3),
},
{ /* end: all zeroes */ }
};

View File

@ -337,7 +337,7 @@ int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params)
struct crash_memmap_data cmd;
struct crash_mem *cmem;
cmem = vzalloc(sizeof(struct crash_mem));
cmem = vzalloc(struct_size(cmem, ranges, 1));
if (!cmem)
return -ENOMEM;

View File

@ -1316,7 +1316,7 @@ void do_user_addr_fault(struct pt_regs *regs,
* protection keys since it can't be resolved.
*/
if (!(hw_error_code & X86_PF_PK)) {
fault = handle_speculative_fault(mm, address, flags, &vma);
fault = handle_speculative_fault(mm, address, flags, &vma, regs);
if (fault != VM_FAULT_RETRY)
goto done;
}

View File

@ -98,6 +98,8 @@ static int blkdev_reread_part(struct block_device *bdev, fmode_t mode)
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (bdev->bd_part_count)
return -EBUSY;
/*
* Reopen the device to revalidate the driver state and force a

View File

@ -1,5 +1,5 @@
BRANCH=android12-5.10
KMI_GENERATION=2
KMI_GENERATION=3
LLVM=1
DEPMOD=depmod

View File

@ -22,7 +22,7 @@ FILES="${FILES}
arch/arm64/boot/Image.lz4
"
TRIM_NONLISTED_KMI=1
TRIM_NONLISTED_KMI=${TRIM_NONLISTED_KMI:-1}
KMI_SYMBOL_LIST_STRICT_MODE=${KMI_SYMBOL_LIST_STRICT_MODE:-1}
MODULES_ORDER=android/gki_aarch64_modules
KMI_ENFORCED=1

View File

@ -0,0 +1,4 @@
DEFCONFIG=consolidate_defconfig
FRAGMENT_CONFIG=${KERNEL_DIR}/arch/arm64/configs/consolidate.fragment
PRE_DEFCONFIG_CMDS="KCONFIG_CONFIG=${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/${DEFCONFIG} ${ROOT_DIR}/${KERNEL_DIR}/scripts/kconfig/merge_config.sh -m -r ${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/gki_defconfig ${ROOT_DIR}/${FRAGMENT_CONFIG}"
POST_DEFCONFIG_CMDS="rm ${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/${DEFCONFIG}"

View File

@ -13,22 +13,26 @@ BUILD_INITRAMFS=1
ABI_DEFINITION=android/abi_gki_aarch64.xml
KMI_SYMBOL_LIST=android/abi_gki_aarch64_qcom
TRIM_NONLISTED_KMI=1
KMI_SYMBOL_LIST_STRICT_MODE=1
KMI_SYMBOL_LIST_MODULE_GROUPING=0
KMI_SYMBOL_LIST_ADD_ONLY=1
KMI_ENFORCED=1
MAKE_GOALS="modules dtbs"
GKI_BUILD_CONFIG=common/build.config.gki.aarch64
GKI_SKIP_IF_VERSION_MATCHES=1
GKI_SKIP_CP_KERNEL_HDR=1
function build_defconfig_fragments() {
if [[ "${VARIANT}" =~ ^(gki|consolidate)$ ]]; then
apply_defconfig_fragment ${KERNEL_DIR}/arch/${ARCH}/configs/vendor/${MSM_ARCH}_GKI.config vendor/${MSM_ARCH}-gki_defconfig
if [ "${VARIANT}" = gki ]; then
return
fi
GKI_BUILD_CONFIG=common/build.config.gki-debug.aarch64
GKI_GKI_DEFCONFIG_FRAGMENT=common/build.config.gki_consolidate.aarch64
# ABI comparison isn't applicable on consolidate variant
unset ABI_DEFINITION
unset TRIM_NONLISTED_KMI
unset KMI_SYMBOL_LIST_STRICT_MODE
apply_defconfig_fragment common/arch/${ARCH}/configs/consolidate.fragment consolidate_defconfig
apply_defconfig_fragment ${KERNEL_DIR}/arch/${ARCH}/configs/vendor/${MSM_ARCH}_consolidate.config vendor/${MSM_ARCH}-consolidate_defconfig
elif [ "${VARIANT}" = gki-ack ]; then
# In gki-only, then using ACK's GKI config directly

View File

@ -729,6 +729,7 @@ static void binder_transaction_priority(struct task_struct *task,
bool inherit_rt)
{
struct binder_priority desired_prio = t->priority;
bool skip = false;
if (t->set_priority_called)
return;
@ -737,6 +738,10 @@ static void binder_transaction_priority(struct task_struct *task,
t->saved_priority.sched_policy = task->policy;
t->saved_priority.prio = task->normal_prio;
trace_android_vh_binder_transaction_priority_skip(task, &skip);
if (skip)
return;
if (!inherit_rt && is_rt_policy(desired_prio.sched_policy)) {
desired_prio.prio = NICE_TO_PRIO(0);
desired_prio.sched_policy = SCHED_NORMAL;
@ -1197,6 +1202,7 @@ static struct binder_ref *binder_get_ref_for_node_olocked(
"%d new ref %d desc %d for node %d\n",
proc->pid, new_ref->data.debug_id, new_ref->data.desc,
node->debug_id);
trace_android_vh_binder_new_ref(proc->tsk, new_ref->data.desc, new_ref->node->debug_id);
binder_node_unlock(node);
return new_ref;
}
@ -1364,6 +1370,7 @@ static struct binder_node *binder_get_node_from_ref(
*/
static void binder_free_ref(struct binder_ref *ref)
{
trace_android_vh_binder_del_ref(ref->proc ? ref->proc->tsk : 0, ref->data.desc);
if (ref->node)
binder_free_node(ref->node);
kfree(ref->death);
@ -2497,6 +2504,9 @@ static int binder_proc_transaction(struct binder_transaction *t,
if (!thread && !pending_async)
thread = binder_select_thread_ilocked(proc);
trace_android_vh_binder_proc_transaction(current, proc->tsk,
thread ? thread->task : 0, node->debug_id, t->code, pending_async);
if (thread) {
binder_transaction_priority(thread->task, t, node_prio,
node->inherit_rt);

View File

@ -53,6 +53,7 @@
#include <trace/hooks/logbuf.h>
#include <trace/hooks/vmscan.h>
#include <trace/hooks/psi.h>
#include <trace/hooks/selinux.h>
/*
* Export tracepoints that act as a bare tracehook (ie: have no trace event
@ -74,6 +75,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_setscheduler);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_arch_set_freq_scale);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_is_fpsimd_save);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_transaction_init);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_transaction_priority_skip);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_set_priority);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_restore_priority);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_wakeup_ilocked);
@ -191,6 +193,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_set_wake_flags);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_uclamp_eff_get);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpufreq_transition);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cgroup_set_task);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cgroup_force_kthread_migration);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_syscall_prctl_finished);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_create_worker);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_check_preempt_tick);
@ -238,12 +241,16 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_alloc_new_buf_locked);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_reply);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_trans);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_preset);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_proc_transaction);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_new_ref);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_del_ref);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_post_init_entity_util_avg);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_typec_tcpm_get_timer);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_typec_tcpm_adj_current_limit);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_logbuf);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tune_scan_type);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tune_swappiness);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_shrink_slab_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_psi_event);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_psi_group);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpuset_fork);
@ -259,3 +266,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_enqueue_task_fair);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_dequeue_task_fair);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_prepare_update_load_avg_se);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_finish_update_load_avg_se);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_selinux_is_initialized);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tune_inactive_ratio);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_update_topology_flags_workfn);

View File

@ -129,6 +129,7 @@ static void update_topology_flags_workfn(struct work_struct *work)
{
update_topology = 1;
rebuild_sched_domains();
trace_android_vh_update_topology_flags_workfn(NULL);
pr_debug("sched_domain hierarchy rebuilt, flags updated\n");
update_topology = 0;
}

View File

@ -477,7 +477,7 @@ config SYS_SUPPORTS_SH_CMT
bool
config MTK_TIMER
bool "Mediatek timer driver" if COMPILE_TEST
bool "Mediatek timer driver"
depends on HAS_IOMEM
select TIMER_OF
select CLKSRC_MMIO

View File

@ -221,15 +221,30 @@ void dma_buf_stats_teardown(struct dma_buf *dmabuf)
kobject_put(&sysfs_entry->kobj);
}
/*
* Statistics files do not need to send uevents.
*/
static int dmabuf_sysfs_uevent_filter(struct kset *kset, struct kobject *kobj)
{
return 0;
}
static const struct kset_uevent_ops dmabuf_sysfs_no_uevent_ops = {
.filter = dmabuf_sysfs_uevent_filter,
};
static struct kset *dma_buf_stats_kset;
static struct kset *dma_buf_per_buffer_stats_kset;
int dma_buf_init_sysfs_statistics(void)
{
dma_buf_stats_kset = kset_create_and_add("dmabuf", NULL, kernel_kobj);
dma_buf_stats_kset = kset_create_and_add("dmabuf",
&dmabuf_sysfs_no_uevent_ops,
kernel_kobj);
if (!dma_buf_stats_kset)
return -ENOMEM;
dma_buf_per_buffer_stats_kset = kset_create_and_add("buffers", NULL,
dma_buf_per_buffer_stats_kset = kset_create_and_add("buffers",
&dmabuf_sysfs_no_uevent_ops,
&dma_buf_stats_kset->kobj);
if (!dma_buf_per_buffer_stats_kset) {
kset_unregister(dma_buf_stats_kset);
@ -275,7 +290,8 @@ int dma_buf_stats_setup(struct dma_buf *dmabuf)
goto err_sysfs_dmabuf;
/* create the directory for attachment stats */
attach_stats_kset = kset_create_and_add("attachments", NULL,
attach_stats_kset = kset_create_and_add("attachments",
&dmabuf_sysfs_no_uevent_ops,
&sysfs_entry->kobj);
if (!attach_stats_kset) {
ret = -ENOMEM;

View File

@ -723,7 +723,7 @@ static void tegra_dma_issue_pending(struct dma_chan *dc)
goto end;
}
if (!tdc->busy) {
err = pm_runtime_get_sync(tdc->tdma->dev);
err = pm_runtime_resume_and_get(tdc->tdma->dev);
if (err < 0) {
dev_err(tdc2dev(tdc), "Failed to enable DMA\n");
goto end;
@ -818,7 +818,7 @@ static void tegra_dma_synchronize(struct dma_chan *dc)
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
int err;
err = pm_runtime_get_sync(tdc->tdma->dev);
err = pm_runtime_resume_and_get(tdc->tdma->dev);
if (err < 0) {
dev_err(tdc2dev(tdc), "Failed to synchronize DMA: %d\n", err);
return;

View File

@ -839,6 +839,7 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
struct xilinx_dpdma_tx_desc *desc;
struct virt_dma_desc *vdesc;
u32 reg, channels;
bool first_frame;
lockdep_assert_held(&chan->lock);
@ -852,14 +853,6 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
chan->running = true;
}
if (chan->video_group)
channels = xilinx_dpdma_chan_video_group_ready(chan);
else
channels = BIT(chan->id);
if (!channels)
return;
vdesc = vchan_next_desc(&chan->vchan);
if (!vdesc)
return;
@ -884,13 +877,26 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
FIELD_PREP(XILINX_DPDMA_CH_DESC_START_ADDRE_MASK,
upper_32_bits(sw_desc->dma_addr)));
if (chan->first_frame)
first_frame = chan->first_frame;
chan->first_frame = false;
if (chan->video_group) {
channels = xilinx_dpdma_chan_video_group_ready(chan);
/*
* Trigger the transfer only when all channels in the group are
* ready.
*/
if (!channels)
return;
} else {
channels = BIT(chan->id);
}
if (first_frame)
reg = XILINX_DPDMA_GBL_TRIG_MASK(channels);
else
reg = XILINX_DPDMA_GBL_RETRIG_MASK(channels);
chan->first_frame = false;
dpdma_write(xdev->reg, XILINX_DPDMA_GBL, reg);
}
@ -1042,13 +1048,14 @@ static int xilinx_dpdma_chan_stop(struct xilinx_dpdma_chan *chan)
*/
static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan)
{
struct xilinx_dpdma_tx_desc *active = chan->desc.active;
struct xilinx_dpdma_tx_desc *active;
unsigned long flags;
spin_lock_irqsave(&chan->lock, flags);
xilinx_dpdma_debugfs_desc_done_irq(chan);
active = chan->desc.active;
if (active)
vchan_cyclic_callback(&active->vdesc);
else

View File

@ -29,6 +29,7 @@
#define OMAP4_GPIO_DEBOUNCINGTIME_MASK 0xFF
struct gpio_regs {
u32 sysconfig;
u32 irqenable1;
u32 irqenable2;
u32 wake_en;
@ -1072,6 +1073,7 @@ static void omap_gpio_init_context(struct gpio_bank *p)
const struct omap_gpio_reg_offs *regs = p->regs;
void __iomem *base = p->base;
p->context.sysconfig = readl_relaxed(base + regs->sysconfig);
p->context.ctrl = readl_relaxed(base + regs->ctrl);
p->context.oe = readl_relaxed(base + regs->direction);
p->context.wake_en = readl_relaxed(base + regs->wkup_en);
@ -1091,6 +1093,7 @@ static void omap_gpio_restore_context(struct gpio_bank *bank)
const struct omap_gpio_reg_offs *regs = bank->regs;
void __iomem *base = bank->base;
writel_relaxed(bank->context.sysconfig, base + regs->sysconfig);
writel_relaxed(bank->context.wake_en, base + regs->wkup_en);
writel_relaxed(bank->context.ctrl, base + regs->ctrl);
writel_relaxed(bank->context.leveldetect0, base + regs->leveldetect0);
@ -1118,6 +1121,10 @@ static void omap_gpio_idle(struct gpio_bank *bank, bool may_lose_context)
bank->saved_datain = readl_relaxed(base + bank->regs->datain);
/* Save syconfig, it's runtime value can be different from init value */
if (bank->loses_context)
bank->context.sysconfig = readl_relaxed(base + bank->regs->sysconfig);
if (!bank->enabled_non_wakeup_gpios)
goto update_gpio_context_count;
@ -1282,6 +1289,7 @@ static int gpio_omap_cpu_notifier(struct notifier_block *nb,
static const struct omap_gpio_reg_offs omap2_gpio_regs = {
.revision = OMAP24XX_GPIO_REVISION,
.sysconfig = OMAP24XX_GPIO_SYSCONFIG,
.direction = OMAP24XX_GPIO_OE,
.datain = OMAP24XX_GPIO_DATAIN,
.dataout = OMAP24XX_GPIO_DATAOUT,
@ -1305,6 +1313,7 @@ static const struct omap_gpio_reg_offs omap2_gpio_regs = {
static const struct omap_gpio_reg_offs omap4_gpio_regs = {
.revision = OMAP4_GPIO_REVISION,
.sysconfig = OMAP4_GPIO_SYSCONFIG,
.direction = OMAP4_GPIO_OE,
.datain = OMAP4_GPIO_DATAIN,
.dataout = OMAP4_GPIO_DATAOUT,

View File

@ -761,6 +761,7 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi)
if (input_register_device(data->input2)) {
input_free_device(input2);
ret = -ENOENT;
goto exit;
}
}

View File

@ -161,6 +161,7 @@ struct cp2112_device {
atomic_t read_avail;
atomic_t xfer_avail;
struct gpio_chip gc;
struct irq_chip irq;
u8 *in_out_buffer;
struct mutex lock;
@ -1175,16 +1176,6 @@ static int cp2112_gpio_irq_type(struct irq_data *d, unsigned int type)
return 0;
}
static struct irq_chip cp2112_gpio_irqchip = {
.name = "cp2112-gpio",
.irq_startup = cp2112_gpio_irq_startup,
.irq_shutdown = cp2112_gpio_irq_shutdown,
.irq_ack = cp2112_gpio_irq_ack,
.irq_mask = cp2112_gpio_irq_mask,
.irq_unmask = cp2112_gpio_irq_unmask,
.irq_set_type = cp2112_gpio_irq_type,
};
static int __maybe_unused cp2112_allocate_irq(struct cp2112_device *dev,
int pin)
{
@ -1339,8 +1330,17 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
dev->gc.can_sleep = 1;
dev->gc.parent = &hdev->dev;
dev->irq.name = "cp2112-gpio";
dev->irq.irq_startup = cp2112_gpio_irq_startup;
dev->irq.irq_shutdown = cp2112_gpio_irq_shutdown;
dev->irq.irq_ack = cp2112_gpio_irq_ack;
dev->irq.irq_mask = cp2112_gpio_irq_mask;
dev->irq.irq_unmask = cp2112_gpio_irq_unmask;
dev->irq.irq_set_type = cp2112_gpio_irq_type;
dev->irq.flags = IRQCHIP_MASK_ON_SUSPEND;
girq = &dev->gc.irq;
girq->chip = &cp2112_gpio_irqchip;
girq->chip = &dev->irq;
/* The event comes from the outside so no parent handler */
girq->parent_handler = NULL;
girq->num_parents = 0;

View File

@ -526,6 +526,8 @@ static void hammer_remove(struct hid_device *hdev)
}
static const struct hid_device_id hammer_devices[] = {
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_DON) },
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) },
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,

View File

@ -486,6 +486,7 @@
#define USB_DEVICE_ID_GOOGLE_MASTERBALL 0x503c
#define USB_DEVICE_ID_GOOGLE_MAGNEMITE 0x503d
#define USB_DEVICE_ID_GOOGLE_MOONBALL 0x5044
#define USB_DEVICE_ID_GOOGLE_DON 0x5050
#define USB_VENDOR_ID_GOTOP 0x08f2
#define USB_DEVICE_ID_SUPER_Q2 0x007f

View File

@ -2533,7 +2533,7 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac,
!wacom_wac->shared->is_touch_on) {
if (!wacom_wac->shared->touch_down)
return;
prox = 0;
prox = false;
}
wacom_wac->hid_data.num_received++;

View File

@ -106,6 +106,7 @@
#define MEI_DEV_ID_ADP_S 0x7AE8 /* Alder Lake Point S */
#define MEI_DEV_ID_ADP_LP 0x7A60 /* Alder Lake Point LP */
#define MEI_DEV_ID_ADP_P 0x51E0 /* Alder Lake Point P */
/*
* MEI HW Section

View File

@ -111,6 +111,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_S, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_LP, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_P, MEI_ME_PCH15_CFG)},
/* required last entry */
{0, }

View File

@ -2997,6 +2997,37 @@ static bool sdhci_request_done(struct sdhci_host *host)
return true;
}
/*
* The controller needs a reset of internal state machines
* upon error conditions.
*/
if (sdhci_needs_reset(host, mrq)) {
/*
* Do not finish until command and data lines are available for
* reset. Note there can only be one other mrq, so it cannot
* also be in mrqs_done, otherwise host->cmd and host->data_cmd
* would both be null.
*/
if (host->cmd || host->data_cmd) {
spin_unlock_irqrestore(&host->lock, flags);
return true;
}
/* Some controllers need this kick or reset won't work here */
if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
/* This is to force an update */
host->ops->set_clock(host, host->clock);
/*
* Spec says we should do both at the same time, but Ricoh
* controllers do not like that.
*/
sdhci_do_reset(host, SDHCI_RESET_CMD);
sdhci_do_reset(host, SDHCI_RESET_DATA);
host->pending_reset = false;
}
/*
* Always unmap the data buffers if they were mapped by
* sdhci_prepare_data() whenever we finish with a request.
@ -3060,35 +3091,6 @@ static bool sdhci_request_done(struct sdhci_host *host)
}
}
/*
* The controller needs a reset of internal state machines
* upon error conditions.
*/
if (sdhci_needs_reset(host, mrq)) {
/*
* Do not finish until command and data lines are available for
* reset. Note there can only be one other mrq, so it cannot
* also be in mrqs_done, otherwise host->cmd and host->data_cmd
* would both be null.
*/
if (host->cmd || host->data_cmd) {
spin_unlock_irqrestore(&host->lock, flags);
return true;
}
/* Some controllers need this kick or reset won't work here */
if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
/* This is to force an update */
host->ops->set_clock(host, host->clock);
/* Spec says we should do both at the same time, but Ricoh
controllers do not like that. */
sdhci_do_reset(host, SDHCI_RESET_CMD);
sdhci_do_reset(host, SDHCI_RESET_DATA);
host->pending_reset = false;
}
host->mrqs_done[i] = NULL;
spin_unlock_irqrestore(&host->lock, flags);

View File

@ -412,7 +412,7 @@
| CN6XXX_INTR_M0UNWI_ERR \
| CN6XXX_INTR_M1UPB0_ERR \
| CN6XXX_INTR_M1UPWI_ERR \
| CN6XXX_INTR_M1UPB0_ERR \
| CN6XXX_INTR_M1UNB0_ERR \
| CN6XXX_INTR_M1UNWI_ERR \
| CN6XXX_INTR_INSTR_DB_OF_ERR \
| CN6XXX_INTR_SLIST_DB_OF_ERR \

View File

@ -890,6 +890,9 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
return -EINVAL;
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info,
geneve->cfg.info.key.tp_dst, sport);
@ -984,6 +987,9 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))
return -EINVAL;
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info,
geneve->cfg.info.key.tp_dst, sport);

View File

@ -3104,7 +3104,7 @@ static void hso_free_interface(struct usb_interface *interface)
cancel_work_sync(&serial_table[i]->async_put_intf);
cancel_work_sync(&serial_table[i]->async_get_intf);
hso_serial_tty_unregister(serial);
kref_put(&serial_table[i]->ref, hso_serial_ref_free);
kref_put(&serial->parent->ref, hso_serial_ref_free);
}
}

View File

@ -87,6 +87,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
struct iwl_tfh_tfd *tfd;
unsigned long flags;
copy_size = sizeof(struct iwl_cmd_header_wide);
cmd_size = sizeof(struct iwl_cmd_header_wide);
@ -155,14 +156,14 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
goto free_dup_buf;
}
spin_lock_bh(&txq->lock);
spin_lock_irqsave(&txq->lock, flags);
idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr);
memset(tfd, 0, sizeof(*tfd));
if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
spin_unlock_bh(&txq->lock);
spin_unlock_irqrestore(&txq->lock, flags);
IWL_ERR(trans, "No space in command queue\n");
iwl_op_mode_cmd_queue_full(trans->op_mode);
@ -297,7 +298,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
spin_unlock(&trans_pcie->reg_lock);
out:
spin_unlock_bh(&txq->lock);
spin_unlock_irqrestore(&txq->lock, flags);
free_dup_buf:
if (idx < 0)
kfree(dup_buf);

View File

@ -824,11 +824,15 @@ static void connect(struct backend_info *be)
xenvif_carrier_on(be->vif);
unregister_hotplug_status_watch(be);
err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, NULL,
hotplug_status_changed,
"%s/%s", dev->nodename, "hotplug-status");
if (!err)
if (xenbus_exists(XBT_NIL, dev->nodename, "hotplug-status")) {
err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch,
NULL, hotplug_status_changed,
"%s/%s", dev->nodename,
"hotplug-status");
if (err)
goto err;
be->have_hotplug_status_watch = 1;
}
netif_tx_wake_all_queues(be->vif->dev);

View File

@ -1604,8 +1604,8 @@ static int pinctrl_pins_show(struct seq_file *s, void *what)
unsigned i, pin;
#ifdef CONFIG_GPIOLIB
struct pinctrl_gpio_range *range;
unsigned int gpio_num;
struct gpio_chip *chip;
int gpio_num;
#endif
seq_printf(s, "registered pins: %d\n", pctldev->desc->npins);
@ -1625,7 +1625,7 @@ static int pinctrl_pins_show(struct seq_file *s, void *what)
seq_printf(s, "pin %d (%s) ", pin, desc->name);
#ifdef CONFIG_GPIOLIB
gpio_num = 0;
gpio_num = -1;
list_for_each_entry(range, &pctldev->gpio_ranges, node) {
if ((pin >= range->pin_base) &&
(pin < (range->pin_base + range->npins))) {
@ -1633,10 +1633,12 @@ static int pinctrl_pins_show(struct seq_file *s, void *what)
break;
}
}
chip = gpio_to_chip(gpio_num);
if (chip && chip->gpiodev && chip->gpiodev->base)
seq_printf(s, "%u:%s ", gpio_num -
chip->gpiodev->base, chip->label);
if (gpio_num >= 0)
chip = gpio_to_chip(gpio_num);
else
chip = NULL;
if (chip)
seq_printf(s, "%u:%s ", gpio_num - chip->gpiodev->base, chip->label);
else
seq_puts(s, "0:? ");
#endif

View File

@ -299,9 +299,9 @@ static const struct pinctrl_pin_desc lbg_pins[] = {
static const struct intel_community lbg_communities[] = {
LBG_COMMUNITY(0, 0, 71),
LBG_COMMUNITY(1, 72, 132),
LBG_COMMUNITY(3, 133, 144),
LBG_COMMUNITY(4, 145, 180),
LBG_COMMUNITY(5, 181, 246),
LBG_COMMUNITY(3, 133, 143),
LBG_COMMUNITY(4, 144, 178),
LBG_COMMUNITY(5, 179, 246),
};
static const struct intel_pinctrl_soc_data lbg_soc_data = {

View File

@ -482,7 +482,7 @@ static int copy_dma_range_map(struct device *to, struct device *from)
/**
* rproc_handle_vdev() - handle a vdev fw resource
* @rproc: the remote processor
* @rsc: the vring resource descriptor
* @ptr: the vring resource descriptor
* @offset: offset of the resource entry
* @avail: size of available data (for sanity checking the image)
*
@ -507,9 +507,10 @@ static int copy_dma_range_map(struct device *to, struct device *from)
*
* Returns 0 on success, or an appropriate error code otherwise
*/
static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
static int rproc_handle_vdev(struct rproc *rproc, void *ptr,
int offset, int avail)
{
struct fw_rsc_vdev *rsc = ptr;
struct device *dev = &rproc->dev;
struct rproc_vdev *rvdev;
int i, ret;
@ -627,7 +628,7 @@ void rproc_vdev_release(struct kref *ref)
/**
* rproc_handle_trace() - handle a shared trace buffer resource
* @rproc: the remote processor
* @rsc: the trace resource descriptor
* @ptr: the trace resource descriptor
* @offset: offset of the resource entry
* @avail: size of available data (for sanity checking the image)
*
@ -641,9 +642,10 @@ void rproc_vdev_release(struct kref *ref)
*
* Returns 0 on success, or an appropriate error code otherwise
*/
static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc,
static int rproc_handle_trace(struct rproc *rproc, void *ptr,
int offset, int avail)
{
struct fw_rsc_trace *rsc = ptr;
struct rproc_debug_trace *trace;
struct device *dev = &rproc->dev;
char name[15];
@ -693,7 +695,7 @@ static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc,
/**
* rproc_handle_devmem() - handle devmem resource entry
* @rproc: remote processor handle
* @rsc: the devmem resource entry
* @ptr: the devmem resource entry
* @offset: offset of the resource entry
* @avail: size of available data (for sanity checking the image)
*
@ -716,9 +718,10 @@ static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc,
* and not allow firmwares to request access to physical addresses that
* are outside those ranges.
*/
static int rproc_handle_devmem(struct rproc *rproc, struct fw_rsc_devmem *rsc,
static int rproc_handle_devmem(struct rproc *rproc, void *ptr,
int offset, int avail)
{
struct fw_rsc_devmem *rsc = ptr;
struct rproc_mem_entry *mapping;
struct device *dev = &rproc->dev;
int ret;
@ -896,7 +899,7 @@ static int rproc_release_carveout(struct rproc *rproc,
/**
* rproc_handle_carveout() - handle phys contig memory allocation requests
* @rproc: rproc handle
* @rsc: the resource entry
* @ptr: the resource entry
* @offset: offset of the resource entry
* @avail: size of available data (for image validation)
*
@ -913,9 +916,9 @@ static int rproc_release_carveout(struct rproc *rproc,
* pressure is important; it may have a substantial impact on performance.
*/
static int rproc_handle_carveout(struct rproc *rproc,
struct fw_rsc_carveout *rsc,
int offset, int avail)
void *ptr, int offset, int avail)
{
struct fw_rsc_carveout *rsc = ptr;
struct rproc_mem_entry *carveout;
struct device *dev = &rproc->dev;
@ -1097,10 +1100,10 @@ EXPORT_SYMBOL(rproc_of_parse_firmware);
* enum fw_resource_type.
*/
static rproc_handle_resource_t rproc_loading_handlers[RSC_LAST] = {
[RSC_CARVEOUT] = (rproc_handle_resource_t)rproc_handle_carveout,
[RSC_DEVMEM] = (rproc_handle_resource_t)rproc_handle_devmem,
[RSC_TRACE] = (rproc_handle_resource_t)rproc_handle_trace,
[RSC_VDEV] = (rproc_handle_resource_t)rproc_handle_vdev,
[RSC_CARVEOUT] = rproc_handle_carveout,
[RSC_DEVMEM] = rproc_handle_devmem,
[RSC_TRACE] = rproc_handle_trace,
[RSC_VDEV] = rproc_handle_vdev,
};
/* handle firmware resource entries before booting the remote processor */

View File

@ -741,6 +741,9 @@ int geni_icc_get(struct geni_se *se, const char *icc_ddr)
int i, err;
const char *icc_names[] = {"qup-core", "qup-config", icc_ddr};
if (has_acpi_companion(se->dev))
return 0;
for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++) {
if (!icc_names[i])
continue;

View File

@ -1637,12 +1637,13 @@ static int acm_resume(struct usb_interface *intf)
struct urb *urb;
int rv = 0;
acm_unpoison_urbs(acm);
spin_lock_irq(&acm->write_lock);
if (--acm->susp_count)
goto out;
acm_unpoison_urbs(acm);
if (tty_port_initialized(&acm->port)) {
rv = usb_submit_urb(acm->ctrlurb, GFP_ATOMIC);

View File

@ -2411,7 +2411,7 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
port->nr_sink_caps = cnt;
port->sink_cap_done = true;
if (port->ams == GET_SINK_CAPABILITIES)
tcpm_pd_handle_state(port, ready_state(port), NONE_AMS, 0);
tcpm_set_state(port, ready_state(port), 0);
/* Unexpected Sink Capabilities */
else
tcpm_pd_handle_msg(port,

View File

@ -273,8 +273,10 @@ static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr
mr->log_size = log_entity_size;
mr->nsg = nsg;
mr->nent = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
if (!mr->nent)
if (!mr->nent) {
err = -ENOMEM;
goto err_map;
}
err = create_direct_mr(mvdev, mr);
if (err)

View File

@ -749,9 +749,11 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
const struct vdpa_config_ops *ops = vdpa->config;
int r = 0;
mutex_lock(&dev->mutex);
r = vhost_dev_check_owner(dev);
if (r)
return r;
goto unlock;
switch (msg->type) {
case VHOST_IOTLB_UPDATE:
@ -772,6 +774,8 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
r = -EINVAL;
break;
}
unlock:
mutex_unlock(&dev->mutex);
return r;
}

View File

@ -669,6 +669,7 @@ drivers/clk/hisilicon/reset.h
drivers/clocksource/arm_arch_timer.c
drivers/clocksource/dummy_timer.c
drivers/clocksource/mmio.c
drivers/clocksource/timer-mediatek.c
drivers/clocksource/timer-of.c
drivers/clocksource/timer-of.h
drivers/clocksource/timer-probe.c
@ -1567,12 +1568,15 @@ drivers/usb/gadget/function/f_midi.c
drivers/usb/gadget/function/f_ncm.c
drivers/usb/gadget/function/f_rndis.c
drivers/usb/gadget/function/f_serial.c
drivers/usb/gadget/function/f_uac2.c
drivers/usb/gadget/function/ndis.h
drivers/usb/gadget/function/rndis.c
drivers/usb/gadget/function/rndis.h
drivers/usb/gadget/functions.c
drivers/usb/gadget/function/storage_common.c
drivers/usb/gadget/function/storage_common.h
drivers/usb/gadget/function/u_audio.c
drivers/usb/gadget/function/u_audio.h
drivers/usb/gadget/function/u_ether.c
drivers/usb/gadget/function/u_ether_configfs.h
drivers/usb/gadget/function/u_ether.h
@ -1583,6 +1587,7 @@ drivers/usb/gadget/function/u_ncm.h
drivers/usb/gadget/function/u_rndis.h
drivers/usb/gadget/function/u_serial.c
drivers/usb/gadget/function/u_serial.h
drivers/usb/gadget/function/u_uac2.h
drivers/usb/gadget/udc/core.c
drivers/usb/gadget/udc/dummy_hcd.c
drivers/usb/gadget/udc/trace.c
@ -1730,6 +1735,18 @@ fs/drop_caches.c
fs/eventfd.c
fs/eventpoll.c
fs/exec.c
fs/exfat/balloc.c
fs/exfat/cache.c
fs/exfat/dir.c
fs/exfat/exfat_fs.h
fs/exfat/exfat_raw.h
fs/exfat/fatent.c
fs/exfat/file.c
fs/exfat/inode.c
fs/exfat/misc.c
fs/exfat/namei.c
fs/exfat/nls.c
fs/exfat/super.c
fs/exportfs/expfs.c
fs/ext4/acl.c
fs/ext4/acl.h
@ -2971,6 +2988,7 @@ include/linux/page_idle.h
include/linux/page-isolation.h
include/linux/pagemap.h
include/linux/page_owner.h
include/linux/page_pinner.h
include/linux/page_ref.h
include/linux/page_reporting.h
include/linux/pagevec.h
@ -3864,6 +3882,7 @@ include/trace/hooks/printk.h
include/trace/hooks/psi.h
include/trace/hooks/rwsem.h
include/trace/hooks/sched.h
include/trace/hooks/selinux.h
include/trace/hooks/signal.h
include/trace/hooks/softlockup.h
include/trace/hooks/sys.h
@ -4945,6 +4964,7 @@ mm/page_ext.c
mm/page_io.c
mm/page_isolation.c
mm/page_owner.c
mm/page_pinner.c
mm/page_reporting.c
mm/page_reporting.h
mm/page_vma_mapped.c

View File

@ -900,7 +900,7 @@ int dump_user_range(struct coredump_params *cprm, unsigned long start,
stop = !dump_emit(cprm, kaddr, PAGE_SIZE);
kunmap(page);
put_page(page);
put_user_page(page);
} else {
stop = !dump_skip(cprm, PAGE_SIZE);
}

View File

@ -232,7 +232,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
static void put_arg_page(struct page *page)
{
put_page(page);
put_user_page(page);
}
static void free_arg_pages(struct linux_binprm *bprm)

View File

@ -684,7 +684,11 @@ static void fuse_copy_finish(struct fuse_copy_state *cs)
flush_dcache_page(cs->pg);
set_page_dirty_lock(cs->pg);
}
put_page(cs->pg);
/*
* The page could be GUP page(see iov_iter_get_pages in
* fuse_copy_fill) so use put_user_page to release it.
*/
put_user_page(cs->pg);
}
cs->pg = NULL;
}

View File

@ -1624,6 +1624,9 @@ static int incfs_getattr(const struct path *path,
generic_fillattr(inode, stat);
if (inode->i_ino < INCFS_START_INO_RANGE)
return 0;
stat->attributes &= ~STATX_ATTR_VERITY;
if (IS_VERITY(inode))
stat->attributes |= STATX_ATTR_VERITY;

View File

@ -843,8 +843,17 @@ static int do_dentry_open(struct file *f,
* XXX: Huge page cache doesn't support writing yet. Drop all page
* cache for this file before processing writes.
*/
if ((f->f_mode & FMODE_WRITE) && filemap_nr_thps(inode->i_mapping))
truncate_pagecache(inode, 0);
if (f->f_mode & FMODE_WRITE) {
/*
* Paired with smp_mb() in collapse_file() to ensure nr_thps
* is up to date and the update to i_writecount by
* get_write_access() is visible. Ensures subsequent insertion
* of THPs into the page cache will fail.
*/
smp_mb();
if (filemap_nr_thps(inode->i_mapping))
truncate_pagecache(inode, 0);
}
return 0;

View File

@ -159,7 +159,7 @@ static void seq_print_vma_name(struct seq_file *m, struct vm_area_struct *vma)
write_len = strnlen(kaddr + page_offset, len);
seq_write(m, kaddr + page_offset, write_len);
kunmap(page);
put_page(page);
put_user_page(page);
/* if strnlen hit a null terminator then we're done */
if (write_len != len)

View File

@ -1259,6 +1259,11 @@ static inline bool bpf_allow_ptr_leaks(void)
return perfmon_capable();
}
static inline bool bpf_allow_uninit_stack(void)
{
return perfmon_capable();
}
static inline bool bpf_allow_ptr_to_map_access(void)
{
return perfmon_capable();

View File

@ -187,7 +187,7 @@ struct bpf_func_state {
* 0 = main function, 1 = first callee.
*/
u32 frameno;
/* subprog number == index within subprog_stack_depth
/* subprog number == index within subprog_info
* zero == main subprog
*/
u32 subprogno;
@ -390,6 +390,7 @@ struct bpf_verifier_env {
u32 used_map_cnt; /* number of used maps */
u32 id_gen; /* used to generate unique reg IDs */
bool allow_ptr_leaks;
bool allow_uninit_stack;
bool allow_ptr_to_map_access;
bool bpf_capable;
bool bypass_spec_v1;

View File

@ -32,6 +32,7 @@
#include <linux/sched.h>
#include <linux/pgtable.h>
#include <linux/kasan.h>
#include <linux/page_pinner.h>
#include <linux/android_kabi.h>
struct mempolicy;
@ -1231,6 +1232,8 @@ static inline void put_page(struct page *page)
{
page = compound_head(page);
page_pinner_migration_failed(page);
/*
* For devmap managed pages we need to catch refcount transition from
* 2 to 1, when refcount reach one it means the page is free and we
@ -1279,6 +1282,7 @@ static inline void put_page(struct page *page)
*/
#define GUP_PIN_COUNTING_BIAS (1U << 10)
void put_user_page(struct page *page);
void unpin_user_page(struct page *page);
void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
bool make_dirty);
@ -1788,11 +1792,13 @@ extern int fixup_user_fault(struct mm_struct *mm,
extern vm_fault_t __handle_speculative_fault(struct mm_struct *mm,
unsigned long address,
unsigned int flags,
struct vm_area_struct **vma);
struct vm_area_struct **vma,
struct pt_regs *regs);
static inline vm_fault_t handle_speculative_fault(struct mm_struct *mm,
unsigned long address,
unsigned int flags,
struct vm_area_struct **vma)
struct vm_area_struct **vma,
struct pt_regs *regs)
{
/*
* Try speculative page fault for multithreaded user space task only.
@ -1801,7 +1807,7 @@ static inline vm_fault_t handle_speculative_fault(struct mm_struct *mm,
*vma = NULL;
return VM_FAULT_RETRY;
}
return __handle_speculative_fault(mm, address, flags, vma);
return __handle_speculative_fault(mm, address, flags, vma, regs);
}
extern bool can_reuse_spf_vma(struct vm_area_struct *vma,
unsigned long address);
@ -1809,7 +1815,8 @@ extern bool can_reuse_spf_vma(struct vm_area_struct *vma,
static inline vm_fault_t handle_speculative_fault(struct mm_struct *mm,
unsigned long address,
unsigned int flags,
struct vm_area_struct **vma)
struct vm_area_struct **vma,
struct pt_regs *regs)
{
return VM_FAULT_RETRY;
}

View File

@ -19,6 +19,12 @@ struct page_ext_operations {
enum page_ext_flags {
PAGE_EXT_OWNER,
PAGE_EXT_OWNER_ALLOCATED,
#if defined(CONFIG_PAGE_PINNER)
/* page refcount was increased by GUP or follow_page(FOLL_GET) */
PAGE_EXT_GET,
/* page migration failed */
PAGE_EXT_PINNER_MIGRATION_FAILED,
#endif
#if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
PAGE_EXT_YOUNG,
PAGE_EXT_IDLE,

View File

@ -0,0 +1,77 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_PAGE_PINNER_H
#define __LINUX_PAGE_PINNER_H
#include <linux/jump_label.h>
#ifdef CONFIG_PAGE_PINNER
extern struct static_key_false page_pinner_inited;
extern struct static_key_true failure_tracking;
extern struct page_ext_operations page_pinner_ops;
extern void __reset_page_pinner(struct page *page, unsigned int order, bool free);
extern void __set_page_pinner(struct page *page, unsigned int order);
extern void __dump_page_pinner(struct page *page);
void __page_pinner_migration_failed(struct page *page);
void __page_pinner_mark_migration_failed_pages(struct list_head *page_list);
static inline void reset_page_pinner(struct page *page, unsigned int order)
{
if (static_branch_unlikely(&page_pinner_inited))
__reset_page_pinner(page, order, false);
}
static inline void free_page_pinner(struct page *page, unsigned int order)
{
if (static_branch_unlikely(&page_pinner_inited))
__reset_page_pinner(page, order, true);
}
static inline void set_page_pinner(struct page *page, unsigned int order)
{
if (static_branch_unlikely(&page_pinner_inited))
__set_page_pinner(page, order);
}
static inline void dump_page_pinner(struct page *page)
{
if (static_branch_unlikely(&page_pinner_inited))
__dump_page_pinner(page);
}
static inline void page_pinner_migration_failed(struct page *page)
{
if (!static_branch_unlikely(&failure_tracking))
return;
__page_pinner_migration_failed(page);
}
static inline void page_pinner_mark_migration_failed_pages(struct list_head *page_list)
{
if (!static_branch_unlikely(&failure_tracking))
return;
__page_pinner_mark_migration_failed_pages(page_list);
}
#else
static inline void reset_page_pinner(struct page *page, unsigned int order)
{
}
static inline void free_page_pinner(struct page *page, unsigned int order)
{
}
static inline void set_page_pinner(struct page *page, unsigned int order)
{
}
static inline void dump_page_pinner(struct page *page)
{
}
static inline void page_pinner_migration_failed(struct page *page)
{
}
static inline void page_pinner_mark_migration_failed_pages(struct list_head *page_list)
{
}
#endif /* CONFIG_PAGE_PINNER */
#endif /* __LINUX_PAGE_PINNER_H */

View File

@ -85,6 +85,7 @@
* omap2+ specific GPIO registers
*/
#define OMAP24XX_GPIO_REVISION 0x0000
#define OMAP24XX_GPIO_SYSCONFIG 0x0010
#define OMAP24XX_GPIO_IRQSTATUS1 0x0018
#define OMAP24XX_GPIO_IRQSTATUS2 0x0028
#define OMAP24XX_GPIO_IRQENABLE2 0x002c
@ -108,6 +109,7 @@
#define OMAP24XX_GPIO_SETDATAOUT 0x0094
#define OMAP4_GPIO_REVISION 0x0000
#define OMAP4_GPIO_SYSCONFIG 0x0010
#define OMAP4_GPIO_EOI 0x0020
#define OMAP4_GPIO_IRQSTATUSRAW0 0x0024
#define OMAP4_GPIO_IRQSTATUSRAW1 0x0028
@ -148,6 +150,7 @@
#ifndef __ASSEMBLER__
struct omap_gpio_reg_offs {
u16 revision;
u16 sysconfig;
u16 direction;
u16 datain;
u16 dataout;

View File

@ -20,6 +20,9 @@ struct binder_transaction_data;
DECLARE_HOOK(android_vh_binder_transaction_init,
TP_PROTO(struct binder_transaction *t),
TP_ARGS(t));
DECLARE_HOOK(android_vh_binder_transaction_priority_skip,
TP_PROTO(struct task_struct *task, bool *skip),
TP_ARGS(task, skip));
DECLARE_HOOK(android_vh_binder_set_priority,
TP_PROTO(struct binder_transaction *t, struct task_struct *task),
TP_ARGS(t, task));
@ -51,6 +54,17 @@ DECLARE_HOOK(android_vh_binder_trans,
DECLARE_HOOK(android_vh_binder_preset,
TP_PROTO(struct hlist_head *hhead, struct mutex *lock),
TP_ARGS(hhead, lock));
DECLARE_HOOK(android_vh_binder_proc_transaction,
TP_PROTO(struct task_struct *caller_task, struct task_struct *binder_proc_task,
struct task_struct *binder_th_task, int node_debug_id,
unsigned int code, bool pending_async),
TP_ARGS(caller_task, binder_proc_task, binder_th_task, node_debug_id, code, pending_async));
DECLARE_HOOK(android_vh_binder_new_ref,
TP_PROTO(struct task_struct *proc, uint32_t ref_desc, int node_debug_id),
TP_ARGS(proc, ref_desc, node_debug_id));
DECLARE_HOOK(android_vh_binder_del_ref,
TP_PROTO(struct task_struct *proc, uint32_t ref_desc),
TP_ARGS(proc, ref_desc));
/* macro versions of hooks are no longer required */

View File

@ -15,6 +15,9 @@ DECLARE_HOOK(android_vh_cgroup_set_task,
DECLARE_RESTRICTED_HOOK(android_rvh_cpuset_fork,
TP_PROTO(struct task_struct *p, int *inherit_cpus),
TP_ARGS(p, inherit_cpus), 1);
DECLARE_RESTRICTED_HOOK(android_rvh_cgroup_force_kthread_migration,
TP_PROTO(struct task_struct *tsk, struct cgroup *dst_cgrp, bool *force_migration),
TP_ARGS(tsk, dst_cgrp, force_migration), 1);
#endif
#include <trace/define_trace.h>

View File

@ -0,0 +1,21 @@
/* SPDX-License-Identifier: GPL-2.0 */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM selinux
#define TRACE_INCLUDE_PATH trace/hooks
#if !defined(_TRACE_HOOK_SELINUX_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_HOOK_SELINUX_H
#include <linux/tracepoint.h>
#include <trace/hooks/vendor_hooks.h>
/*
* Following tracepoints are not exported in tracefs and provide a
* mechanism for vendor modules to hook and extend functionality
*/
struct selinux_state;
DECLARE_HOOK(android_vh_selinux_is_initialized,
TP_PROTO(const struct selinux_state *state),
TP_ARGS(state));
#endif /* _TRACE_HOOK_SELINUX_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@ -16,6 +16,10 @@ DECLARE_HOOK(android_vh_arch_set_freq_scale,
unsigned long max, unsigned long *scale),
TP_ARGS(cpus, freq, max, scale));
DECLARE_HOOK(android_vh_update_topology_flags_workfn,
TP_PROTO(void *unused),
TP_ARGS(unused));
/* macro versions of hooks are no longer required */
#endif /* _TRACE_HOOK_TOPOLOGY_H */

View File

@ -16,6 +16,12 @@ DECLARE_HOOK(android_vh_tune_scan_type,
DECLARE_HOOK(android_vh_tune_swappiness,
TP_PROTO(int *swappiness),
TP_ARGS(swappiness));
DECLARE_HOOK(android_vh_shrink_slab_bypass,
TP_PROTO(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg, int priority, bool *bypass),
TP_ARGS(gfp_mask, nid, memcg, priority, bypass));
DECLARE_HOOK(android_vh_tune_inactive_ratio,
TP_PROTO(unsigned long *inactive_ratio, int file),
TP_ARGS(inactive_ratio, file))
#endif /* _TRACE_HOOK_VMSCAN_H */
/* This part must be outside protection */
#include <trace/define_trace.h>
#include <trace/define_trace.h>

View File

@ -2447,3 +2447,4 @@ config ARCH_HAS_SYSCALL_WRAPPER
def_bool n
source "init/Kconfig.gki"
source "init/Kconfig.gki-debug"

9
init/Kconfig.gki-debug Normal file
View File

@ -0,0 +1,9 @@
config GKI_HIDDEN_RCUTORTURE
bool
select TASKS_RUDE_RCU
config GKI_HACKS_FOR_CONSOLIDATE
bool "GKI Dummy config options for consolidate"
select TRACE_PREEMPT_TOGGLE
select TRACE_IRQFLAGS
select GKI_HIDDEN_RCUTORTURE

1
kernel/.gitignore vendored
View File

@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
config_data
kheaders.md5
timeconst.h
hz.bc

View File

@ -139,10 +139,15 @@ obj-$(CONFIG_SCF_TORTURE_TEST) += scftorture.o
$(obj)/configs.o: $(obj)/config_data.gz
targets += config_data.gz
$(obj)/config_data.gz: $(KCONFIG_CONFIG) FORCE
targets += config_data config_data.gz
$(obj)/config_data.gz: $(obj)/config_data FORCE
$(call if_changed,gzip)
filechk_cat = cat $<
$(obj)/config_data: $(KCONFIG_CONFIG) FORCE
$(call filechk,cat)
$(obj)/kheaders.o: $(obj)/kheaders_data.tar.xz
quiet_cmd_genikh = CHK $(obj)/kheaders_data.tar.xz

File diff suppressed because it is too large Load Diff

View File

@ -232,7 +232,8 @@ int cgroup_migrate(struct task_struct *leader, bool threadgroup,
int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader,
bool threadgroup);
struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup,
bool *locked)
bool *locked,
struct cgroup *dst_cgrp);
__acquires(&cgroup_threadgroup_rwsem);
void cgroup_procs_write_finish(struct task_struct *task, bool locked)
__releases(&cgroup_threadgroup_rwsem);

View File

@ -498,7 +498,7 @@ static ssize_t __cgroup1_procs_write(struct kernfs_open_file *of,
if (!cgrp)
return -ENODEV;
task = cgroup_procs_write_start(buf, threadgroup, &locked);
task = cgroup_procs_write_start(buf, threadgroup, &locked, cgrp);
ret = PTR_ERR_OR_ZERO(task);
if (ret)
goto out_unlock;

View File

@ -61,6 +61,9 @@
#define CREATE_TRACE_POINTS
#include <trace/events/cgroup.h>
#undef CREATE_TRACE_POINTS
#include <trace/hooks/cgroup.h>
#define CGROUP_FILE_NAME_MAX (MAX_CGROUP_TYPE_NAMELEN + \
MAX_CFTYPE_NAME + 2)
@ -2749,11 +2752,13 @@ int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader,
}
struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup,
bool *locked)
bool *locked,
struct cgroup *dst_cgrp)
__acquires(&cgroup_threadgroup_rwsem)
{
struct task_struct *tsk;
pid_t pid;
bool force_migration = false;
if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
return ERR_PTR(-EINVAL);
@ -2788,13 +2793,16 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup,
if (threadgroup)
tsk = tsk->group_leader;
if (tsk->flags & PF_KTHREAD)
trace_android_rvh_cgroup_force_kthread_migration(tsk, dst_cgrp, &force_migration);
/*
* kthreads may acquire PF_NO_SETAFFINITY during initialization.
* If userland migrates such a kthread to a non-root cgroup, it can
* become trapped in a cpuset, or RT kthread may be born in a
* cgroup with no rt_runtime allocated. Just say no.
*/
if (tsk->no_cgroup_migration || (tsk->flags & PF_NO_SETAFFINITY)) {
if (!force_migration && (tsk->no_cgroup_migration || (tsk->flags & PF_NO_SETAFFINITY))) {
tsk = ERR_PTR(-EINVAL);
goto out_unlock_threadgroup;
}
@ -4746,7 +4754,7 @@ static ssize_t cgroup_procs_write(struct kernfs_open_file *of,
if (!dst_cgrp)
return -ENODEV;
task = cgroup_procs_write_start(buf, true, &locked);
task = cgroup_procs_write_start(buf, true, &locked, dst_cgrp);
ret = PTR_ERR_OR_ZERO(task);
if (ret)
goto out_unlock;
@ -4790,7 +4798,7 @@ static ssize_t cgroup_threads_write(struct kernfs_open_file *of,
if (!dst_cgrp)
return -ENODEV;
task = cgroup_procs_write_start(buf, false, &locked);
task = cgroup_procs_write_start(buf, false, &locked, dst_cgrp);
ret = PTR_ERR_OR_ZERO(task);
if (ret)
goto out_unlock;

View File

@ -588,7 +588,7 @@ static int get_futex_key(u32 __user *uaddr, bool fshared, union futex_key *key,
lock_page(page);
shmem_swizzled = PageSwapCache(page) || page->mapping;
unlock_page(page);
put_page(page);
put_user_page(page);
if (shmem_swizzled)
goto again;
@ -638,7 +638,7 @@ static int get_futex_key(u32 __user *uaddr, bool fshared, union futex_key *key,
if (READ_ONCE(page->mapping) != mapping) {
rcu_read_unlock();
put_page(page);
put_user_page(page);
goto again;
}
@ -646,7 +646,7 @@ static int get_futex_key(u32 __user *uaddr, bool fshared, union futex_key *key,
inode = READ_ONCE(mapping->host);
if (!inode) {
rcu_read_unlock();
put_page(page);
put_user_page(page);
goto again;
}
@ -658,7 +658,7 @@ static int get_futex_key(u32 __user *uaddr, bool fshared, union futex_key *key,
}
out:
put_page(page);
put_user_page(page);
return err;
}

View File

@ -280,6 +280,7 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
return ret;
}
EXPORT_SYMBOL_GPL(irq_do_set_affinity);
#ifdef CONFIG_GENERIC_PENDING_IRQ
static inline int irq_set_affinity_pending(struct irq_data *data,

View File

@ -61,6 +61,8 @@ EXPORT_SYMBOL(queued_read_lock_slowpath);
*/
void queued_write_lock_slowpath(struct qrwlock *lock)
{
int cnts;
/* Put the writer into the wait queue */
arch_spin_lock(&lock->wait_lock);
@ -74,9 +76,8 @@ void queued_write_lock_slowpath(struct qrwlock *lock)
/* When no more readers or writers, set the locked flag */
do {
atomic_cond_read_acquire(&lock->cnts, VAL == _QW_WAITING);
} while (atomic_cmpxchg_relaxed(&lock->cnts, _QW_WAITING,
_QW_LOCKED) != _QW_WAITING);
cnts = atomic_cond_read_relaxed(&lock->cnts, VAL == _QW_WAITING);
} while (!atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED));
unlock:
arch_spin_unlock(&lock->wait_lock);
}

View File

@ -476,7 +476,8 @@ EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
do { } while (0)
#endif
#if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST)
#if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST) \
|| IS_ENABLED(CONFIG_GKI_HIDDEN_RCUTORTURE)
/* Get rcutorture access to sched_setaffinity(). */
long rcutorture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
{

View File

@ -967,7 +967,7 @@ EXPORT_SYMBOL_GPL(sched_uclamp_used);
static inline unsigned int uclamp_bucket_id(unsigned int clamp_value)
{
return clamp_value / UCLAMP_BUCKET_DELTA;
return min_t(unsigned int, clamp_value / UCLAMP_BUCKET_DELTA, UCLAMP_BUCKETS - 1);
}
static inline unsigned int uclamp_none(enum uclamp_id clamp_id)
@ -6932,6 +6932,11 @@ static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf, bool force)
*/
update_rq_clock(rq);
#ifdef CONFIG_SCHED_DEBUG
/* note the clock update in orf */
orf.clock_update_flags |= RQCF_UPDATED;
#endif
for (;;) {
/*
* There's this thread running, bail when that's the only

View File

@ -7,6 +7,7 @@ menuconfig KFENCE
bool "KFENCE: low-overhead sampling-based memory safety error detector"
depends on HAVE_ARCH_KFENCE && (SLAB || SLUB)
select STACKTRACE
select IRQ_WORK
help
KFENCE is a low-overhead sampling-based detector of heap out-of-bounds
access, use-after-free, and invalid-free errors. KFENCE is designed

View File

@ -69,10 +69,10 @@ static void kasan_test_exit(struct kunit *test)
* resource named "kasan_data". Do not use this name for KUnit resources
* outside of KASAN tests.
*
* For hardware tag-based KASAN, when a tag fault happens, tag checking is
* normally auto-disabled. When this happens, this test handler reenables
* tag checking. As tag checking can be only disabled or enabled per CPU, this
* handler disables migration (preemption).
* For hardware tag-based KASAN in sync mode, when a tag fault happens, tag
* checking is auto-disabled. When this happens, this test handler reenables
* tag checking. As tag checking can be only disabled or enabled per CPU,
* this handler disables migration (preemption).
*
* Since the compiler doesn't see that the expression can change the fail_data
* fields, it can reorder or optimize away the accesses to those fields.
@ -80,7 +80,8 @@ static void kasan_test_exit(struct kunit *test)
* expression to prevent that.
*/
#define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \
if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) \
if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
!kasan_async_mode_enabled()) \
migrate_disable(); \
WRITE_ONCE(fail_data.report_expected, true); \
WRITE_ONCE(fail_data.report_found, false); \
@ -92,10 +93,14 @@ static void kasan_test_exit(struct kunit *test)
barrier(); \
expression; \
barrier(); \
if (kasan_async_mode_enabled()) \
kasan_force_async_fault(); \
barrier(); \
KUNIT_EXPECT_EQ(test, \
READ_ONCE(fail_data.report_expected), \
READ_ONCE(fail_data.report_found)); \
if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) { \
if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
!kasan_async_mode_enabled()) { \
if (READ_ONCE(fail_data.report_found)) \
kasan_enable_tagging_sync(); \
migrate_enable(); \

View File

@ -62,6 +62,22 @@ config PAGE_OWNER
If unsure, say N.
config PAGE_PINNER
bool "Track page pinner"
depends on DEBUG_KERNEL && STACKTRACE_SUPPORT
select DEBUG_FS
select STACKTRACE
select STACKDEPOT
select PAGE_EXTENSION
help
This keeps track of what call chain is the pinner of a page, may
help to find page migration failures. Even if you include this
feature in your build, it is disabled by default. You should pass
"page_pinner=on" to boot parameter in order to enable it. Eats
a fair amount of memory if enabled.
If unsure, say N.
config PAGE_POISONING
bool "Poison pages after freeing"
help

Some files were not shown because too many files have changed in this diff Show More