Merge android11-5.4.61+ (874de1d) into msm-5.4

* refs/heads/tmp-874de1d:
  ANDROID: GKI: enable CONFIG_CPU_FREQ_STAT and more thermal configs
  ANDROID: ABI: Update allowed list for QCOM
  ANDROID: sched: add restrict vendor hook to modify load balance behavior
  ANDROID: GKI: Update abi_gki_aarch64_oneplus
  ANDROID: scs: use vmapped shadow stacks by default
  ANDROID: ABI: update allowed list for QCOM
  UPSTREAM: sched/fair/util_est: Implement faster ramp-up EWMA on utilization increases
  ANDROID: GKI: Update abi_gki_aarch64_exynos
  ANDROID: kbuild: disable GCOV with CFI
  ANDROID: GKI: add built-in PCIE_DW_PLAT_EP
  ANDROID: PCI: dwc: export symbols for ep driver
  ANDROID: recordmcount: avoid STT_FILE as base for mcount offset relocation
  ANDROID: iommu: Enable CONFIG_IOMMU_IO_PGTABLE_ARMV7S
  BACKPORT: FROMLIST: iommu/io-pgtable-arm-v7s: Quad lvl1 pgtable for MediaTek
  BACKPORT: FROMLIST: iommu/io-pgtable-arm-v7s: Add cfg as a param in some macros
  BACKPORT: FROMLIST: iommu/io-pgtable-arm-v7s: Extend PA34 for MediaTek
  BACKPORT: FROMLIST: iommu/io-pgtable-arm-v7s: Use ias to check the valid iova in unmap
  ANDROID: ABI: Update allowed list for QCOM
  ANDROID: modules: fix suspicious rcu usage

 Conflicts:
	kernel/sched/fair.c
	kernel/sched/features.h

Change-Id: I5d8dc058a0aca2b90ca702f18f71f663af9774fc
Signed-off-by: Blagovest Kolenichev <bkolenichev@codeaurora.org>
This commit is contained in:
Blagovest Kolenichev 2020-10-29 13:01:46 -07:00
commit d9a7f5879a
17 changed files with 3513 additions and 1505 deletions

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -19,6 +19,7 @@
amba_driver_register
amba_driver_unregister
anon_inode_getfd
__arch_clear_user
__arch_copy_from_user
__arch_copy_in_user
__arch_copy_to_user
@ -165,6 +166,7 @@
config_ep_by_speed
config_group_init_type_name
console_lock
console_stop
console_trylock
console_unlock
__const_udelay
@ -1547,6 +1549,7 @@
__tracepoint_android_vh_is_fpsimd_save
__tracepoint_device_pm_callback_end
__tracepoint_device_pm_callback_start
__tracepoint_gpu_mem_total
__tracepoint_pelt_cfs_tp
tracepoint_probe_register
__tracepoint_suspend_resume
@ -1596,6 +1599,7 @@
ufshcd_dme_get_attr
ufshcd_dme_set_attr
ufshcd_pltfrm_init
ufshcd_query_descriptor_retry
ufshcd_remove
ufshcd_shutdown
ufshcd_system_resume

File diff suppressed because it is too large Load Diff

View File

@ -254,6 +254,7 @@
cpufreq_freq_attr_scaling_boost_freqs
cpufreq_generic_attr
cpufreq_generic_frequency_table_verify
cpufreq_get_policy
cpufreq_quick_get_max
cpufreq_register_driver
cpufreq_register_notifier
@ -957,6 +958,7 @@
get_random_bytes
get_random_u32
__get_task_comm
get_task_pid
get_tree_single
get_unmapped_area
get_unused_fd_flags
@ -1777,6 +1779,7 @@
put_device
put_disk
__put_page
put_pid
__put_task_struct
put_unused_fd
put_vaddr_frames

View File

@ -606,6 +606,7 @@ endchoice
config CFI_CLANG
bool "Use Clang's Control Flow Integrity (CFI)"
depends on LTO_CLANG && KALLSYMS
depends on !GCOV_KERNEL
help
This option enables Clang's Control Flow Integrity (CFI), which adds
runtime checking for indirect function calls.

View File

@ -68,6 +68,7 @@ CONFIG_CPU_IDLE=y
CONFIG_ARM_CPUIDLE=y
CONFIG_ARM_PSCI_CPUIDLE=y
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_STAT=y
CONFIG_CPU_FREQ_TIMES=y
CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
@ -228,8 +229,10 @@ CONFIG_PCI=y
CONFIG_PCIEPORTBUS=y
# CONFIG_PCIEAER is not set
CONFIG_PCI_HOST_GENERIC=y
CONFIG_PCIE_DW_PLAT_EP=y
CONFIG_PCIE_QCOM=y
CONFIG_PCIE_KIRIN=y
CONFIG_PCI_ENDPOINT=y
CONFIG_FW_LOADER_USER_HELPER=y
CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
# CONFIG_FW_CACHE is not set
@ -340,10 +343,13 @@ CONFIG_POWER_AVS=y
CONFIG_POWER_RESET_HISI=y
# CONFIG_HWMON is not set
CONFIG_THERMAL=y
CONFIG_THERMAL_STATISTICS=y
CONFIG_THERMAL_WRITABLE_TRIPS=y
CONFIG_THERMAL_GOV_USER_SPACE=y
CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
CONFIG_CPU_THERMAL=y
CONFIG_DEVFREQ_THERMAL=y
CONFIG_THERMAL_EMULATION=y
CONFIG_WATCHDOG=y
CONFIG_WATCHDOG_CORE=y
CONFIG_MFD_ACT8945A=y
@ -441,6 +447,7 @@ CONFIG_HWSPINLOCK=y
CONFIG_MTK_TIMER=y
CONFIG_MAILBOX=y
CONFIG_IOMMU_LIMIT_IOVA_ALIGNMENT=y
CONFIG_IOMMU_IO_PGTABLE_ARMV7S=y
CONFIG_REMOTEPROC=y
CONFIG_RPMSG_CHAR=y
CONFIG_DEVFREQ_GOV_PERFORMANCE=y

View File

@ -55,6 +55,7 @@ CONFIG_EFI=y
CONFIG_PM_WAKELOCKS=y
CONFIG_PM_WAKELOCKS_LIMIT=0
# CONFIG_PM_WAKELOCKS_GC is not set
CONFIG_CPU_FREQ_STAT=y
CONFIG_CPU_FREQ_TIMES=y
CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
@ -204,6 +205,8 @@ CONFIG_PCI=y
CONFIG_PCIEPORTBUS=y
# CONFIG_PCIEAER is not set
CONFIG_PCI_MSI=y
CONFIG_PCIE_DW_PLAT_EP=y
CONFIG_PCI_ENDPOINT=y
CONFIG_FW_LOADER_USER_HELPER=y
CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
# CONFIG_FW_CACHE is not set
@ -292,8 +295,13 @@ CONFIG_HPET=y
CONFIG_SPI=y
CONFIG_GPIOLIB=y
# CONFIG_HWMON is not set
CONFIG_THERMAL_STATISTICS=y
CONFIG_THERMAL_WRITABLE_TRIPS=y
CONFIG_THERMAL_GOV_USER_SPACE=y
CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
CONFIG_CPU_THERMAL=y
CONFIG_DEVFREQ_THERMAL=y
CONFIG_THERMAL_EMULATION=y
# CONFIG_X86_PKG_TEMP_THERMAL is not set
CONFIG_WATCHDOG=y
CONFIG_WATCHDOG_CORE=y

View File

@ -62,3 +62,4 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_printk_store);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_show_regs);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_wq_lockup_pool);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sysrq_crash);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_find_busiest_group);

View File

@ -50,20 +50,26 @@
*/
#define ARM_V7S_ADDR_BITS 32
#define _ARM_V7S_LVL_BITS(lvl) (16 - (lvl) * 4)
/* MediaTek: totally 34bits, 14bits at lvl1 and 8bits at lvl2. */
#define _ARM_V7S_LVL_BITS_MTK(lvl) (20 - (lvl) * 6)
#define ARM_V7S_LVL_SHIFT(lvl) (ARM_V7S_ADDR_BITS - (4 + 8 * (lvl)))
#define ARM_V7S_TABLE_SHIFT 10
#define ARM_V7S_PTES_PER_LVL(lvl) (1 << _ARM_V7S_LVL_BITS(lvl))
#define ARM_V7S_TABLE_SIZE(lvl) \
(ARM_V7S_PTES_PER_LVL(lvl) * sizeof(arm_v7s_iopte))
#define ARM_V7S_PTES_PER_LVL(lvl, cfg) ({ \
!arm_v7s_is_mtk_enabled(cfg) ? \
(1 << _ARM_V7S_LVL_BITS(lvl)) : (1 << _ARM_V7S_LVL_BITS_MTK(lvl));\
})
#define ARM_V7S_TABLE_SIZE(lvl, cfg) \
(ARM_V7S_PTES_PER_LVL(lvl, cfg) * sizeof(arm_v7s_iopte))
#define ARM_V7S_BLOCK_SIZE(lvl) (1UL << ARM_V7S_LVL_SHIFT(lvl))
#define ARM_V7S_LVL_MASK(lvl) ((u32)(~0U << ARM_V7S_LVL_SHIFT(lvl)))
#define ARM_V7S_TABLE_MASK ((u32)(~0U << ARM_V7S_TABLE_SHIFT))
#define _ARM_V7S_IDX_MASK(lvl) (ARM_V7S_PTES_PER_LVL(lvl) - 1)
#define ARM_V7S_LVL_IDX(addr, lvl) ({ \
#define _ARM_V7S_IDX_MASK(lvl, cfg) (ARM_V7S_PTES_PER_LVL(lvl, cfg) - 1)
#define ARM_V7S_LVL_IDX(addr, lvl, cfg) ({ \
int _l = lvl; \
((u32)(addr) >> ARM_V7S_LVL_SHIFT(_l)) & _ARM_V7S_IDX_MASK(_l); \
((addr) >> ARM_V7S_LVL_SHIFT(_l)) & _ARM_V7S_IDX_MASK(_l, cfg); \
})
/*
@ -112,9 +118,10 @@
#define ARM_V7S_TEX_MASK 0x7
#define ARM_V7S_ATTR_TEX(val) (((val) & ARM_V7S_TEX_MASK) << ARM_V7S_TEX_SHIFT)
/* MediaTek extend the two bits for PA 32bit/33bit */
/* MediaTek extend the bits below for PA 32bit/33bit/34bit */
#define ARM_V7S_ATTR_MTK_PA_BIT32 BIT(9)
#define ARM_V7S_ATTR_MTK_PA_BIT33 BIT(4)
#define ARM_V7S_ATTR_MTK_PA_BIT34 BIT(5)
/* *well, except for TEX on level 2 large pages, of course :( */
#define ARM_V7S_CONT_PAGE_TEX_SHIFT 6
@ -196,6 +203,8 @@ static arm_v7s_iopte paddr_to_iopte(phys_addr_t paddr, int lvl,
pte |= ARM_V7S_ATTR_MTK_PA_BIT32;
if (paddr & BIT_ULL(33))
pte |= ARM_V7S_ATTR_MTK_PA_BIT33;
if (paddr & BIT_ULL(34))
pte |= ARM_V7S_ATTR_MTK_PA_BIT34;
return pte;
}
@ -220,6 +229,8 @@ static phys_addr_t iopte_to_paddr(arm_v7s_iopte pte, int lvl,
paddr |= BIT_ULL(32);
if (pte & ARM_V7S_ATTR_MTK_PA_BIT33)
paddr |= BIT_ULL(33);
if (pte & ARM_V7S_ATTR_MTK_PA_BIT34)
paddr |= BIT_ULL(34);
return paddr;
}
@ -236,7 +247,7 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
struct device *dev = cfg->iommu_dev;
phys_addr_t phys;
dma_addr_t dma;
size_t size = ARM_V7S_TABLE_SIZE(lvl);
size_t size = ARM_V7S_TABLE_SIZE(lvl, cfg);
void *table = NULL;
if (lvl == 1)
@ -282,7 +293,7 @@ static void __arm_v7s_free_table(void *table, int lvl,
{
struct io_pgtable_cfg *cfg = &data->iop.cfg;
struct device *dev = cfg->iommu_dev;
size_t size = ARM_V7S_TABLE_SIZE(lvl);
size_t size = ARM_V7S_TABLE_SIZE(lvl, cfg);
if (!cfg->coherent_walk)
dma_unmap_single(dev, __arm_v7s_dma_addr(table), size,
@ -426,7 +437,7 @@ static int arm_v7s_init_pte(struct arm_v7s_io_pgtable *data,
arm_v7s_iopte *tblp;
size_t sz = ARM_V7S_BLOCK_SIZE(lvl);
tblp = ptep - ARM_V7S_LVL_IDX(iova, lvl);
tblp = ptep - ARM_V7S_LVL_IDX(iova, lvl, cfg);
if (WARN_ON(__arm_v7s_unmap(data, NULL, iova + i * sz,
sz, lvl, tblp) != sz))
return -EINVAL;
@ -479,7 +490,7 @@ static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova,
int num_entries = size >> ARM_V7S_LVL_SHIFT(lvl);
/* Find our entry at the current level */
ptep += ARM_V7S_LVL_IDX(iova, lvl);
ptep += ARM_V7S_LVL_IDX(iova, lvl, cfg);
/* If we can install a leaf entry at this level, then do so */
if (num_entries)
@ -552,7 +563,7 @@ static void arm_v7s_free_pgtable(struct io_pgtable *iop)
struct arm_v7s_io_pgtable *data = io_pgtable_to_data(iop);
int i;
for (i = 0; i < ARM_V7S_PTES_PER_LVL(1); i++) {
for (i = 0; i < ARM_V7S_PTES_PER_LVL(1, &data->iop.cfg); i++) {
arm_v7s_iopte pte = data->pgd[i];
if (ARM_V7S_PTE_IS_TABLE(pte, 1))
@ -604,9 +615,9 @@ static size_t arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data,
if (!tablep)
return 0; /* Bytes unmapped */
num_ptes = ARM_V7S_PTES_PER_LVL(2);
num_ptes = ARM_V7S_PTES_PER_LVL(2, cfg);
num_entries = size >> ARM_V7S_LVL_SHIFT(2);
unmap_idx = ARM_V7S_LVL_IDX(iova, 2);
unmap_idx = ARM_V7S_LVL_IDX(iova, 2, cfg);
pte = arm_v7s_prot_to_pte(arm_v7s_pte_to_prot(blk_pte, 1), 2, cfg);
if (num_entries > 1)
@ -648,7 +659,7 @@ static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data,
if (WARN_ON(lvl > 2))
return 0;
idx = ARM_V7S_LVL_IDX(iova, lvl);
idx = ARM_V7S_LVL_IDX(iova, lvl, &iop->cfg);
ptep += idx;
do {
pte[i] = READ_ONCE(ptep[i]);
@ -719,7 +730,7 @@ static size_t arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova,
{
struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
if (WARN_ON(upper_32_bits(iova)))
if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias)))
return 0;
return __arm_v7s_unmap(data, gather, iova, size, 1, data->pgd);
@ -734,7 +745,7 @@ static phys_addr_t arm_v7s_iova_to_phys(struct io_pgtable_ops *ops,
u32 mask;
do {
ptep += ARM_V7S_LVL_IDX(iova, ++lvl);
ptep += ARM_V7S_LVL_IDX(iova, ++lvl, &data->iop.cfg);
pte = READ_ONCE(*ptep);
ptep = iopte_deref(pte, lvl, data);
} while (ARM_V7S_PTE_IS_TABLE(pte, lvl));
@ -753,10 +764,10 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
{
struct arm_v7s_io_pgtable *data;
if (cfg->ias > ARM_V7S_ADDR_BITS)
if (cfg->ias > (arm_v7s_is_mtk_enabled(cfg) ? 34 : ARM_V7S_ADDR_BITS))
return NULL;
if (cfg->oas > (arm_v7s_is_mtk_enabled(cfg) ? 34 : ARM_V7S_ADDR_BITS))
if (cfg->oas > (arm_v7s_is_mtk_enabled(cfg) ? 35 : ARM_V7S_ADDR_BITS))
return NULL;
if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
@ -777,8 +788,8 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
spin_lock_init(&data->split_lock);
data->l2_tables = kmem_cache_create("io-pgtable_armv7s_l2",
ARM_V7S_TABLE_SIZE(2),
ARM_V7S_TABLE_SIZE(2),
ARM_V7S_TABLE_SIZE(2, cfg),
ARM_V7S_TABLE_SIZE(2, cfg),
ARM_V7S_TABLE_SLAB_FLAGS, NULL);
if (!data->l2_tables)
goto out_free_data;

View File

@ -18,6 +18,7 @@ void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
pci_epc_linkup(epc);
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_linkup);
static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar,
int flags)
@ -607,3 +608,4 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
return 0;
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_init);

View File

@ -100,8 +100,8 @@ struct io_pgtable_cfg {
* TLB maintenance when mapping as well as when unmapping.
*
* IO_PGTABLE_QUIRK_ARM_MTK_EXT: (ARM v7s format) MediaTek IOMMUs extend
* to support up to 34 bits PA where the bit32 and bit33 are
* encoded in the bit9 and bit4 of the PTE respectively.
* to support up to 35 bits PA where the bit32, bit33 and bit34 are
* encoded in the bit9, bit4 and bit5 of the PTE respectively.
*
* IO_PGTABLE_QUIRK_NON_STRICT: Skip issuing synchronous leaf TLBIs
* on unmap, for DMA domains using the flush queue mechanism for

View File

@ -65,6 +65,11 @@ DECLARE_RESTRICTED_HOOK(android_rvh_set_user_nice,
DECLARE_RESTRICTED_HOOK(android_rvh_setscheduler,
TP_PROTO(struct task_struct *p),
TP_ARGS(p), 1);
struct sched_group;
DECLARE_RESTRICTED_HOOK(android_rvh_find_busiest_group,
TP_PROTO(struct sched_group *busiest, struct rq *dst_rq, int *out_balance),
TP_ARGS(busiest, dst_rq, out_balance), 1);
#else
#define trace_android_rvh_select_task_rq_fair(p, prev_cpu, sd_flag, wake_flags, new_cpu)
#define trace_android_rvh_select_task_rq_rt(p, prev_cpu, sd_flag, wake_flags, new_cpu)
@ -79,6 +84,7 @@ DECLARE_RESTRICTED_HOOK(android_rvh_setscheduler,
#define trace_android_rvh_rtmutex_prepare_setprio(p, pi_task)
#define trace_android_rvh_set_user_nice(p, nice)
#define trace_android_rvh_setscheduler(p)
#define trace_android_rvh_find_busiest_group(busiest, dst_rq, out_balance)
#endif
#endif /* _TRACE_HOOK_SCHED_H */
/* This part must be outside protection */

View File

@ -4352,10 +4352,10 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
static void cfi_init(struct module *mod)
{
#ifdef CONFIG_CFI_CLANG
rcu_read_lock();
rcu_read_lock_sched();
mod->cfi_check = (cfi_check_fn)find_kallsyms_symbol_value(mod,
CFI_CHECK_FN_NAME);
rcu_read_unlock();
rcu_read_unlock_sched();
cfi_module_add(mod, module_addr_min, module_addr_max);
#endif
}

View File

@ -3799,11 +3799,22 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
if (ue.enqueued & UTIL_AVG_UNCHANGED)
return;
/*
* Reset EWMA on utilization increases, the moving average is used only
* to smooth utilization decreases.
*/
ue.enqueued = (task_util(p) | UTIL_AVG_UNCHANGED);
if (sched_feat(UTIL_EST_FASTUP)) {
if (ue.ewma < ue.enqueued) {
ue.ewma = ue.enqueued;
goto done;
}
}
/*
* Skip update of task's estimated utilization when its EWMA is
* already ~1% close to its last activation value.
*/
ue.enqueued = (task_util(p) | UTIL_AVG_UNCHANGED);
last_ewma_diff = ue.enqueued - ue.ewma;
if (within_margin(last_ewma_diff, (SCHED_CAPACITY_SCALE / 100)))
return;
@ -3836,6 +3847,7 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
ue.ewma <<= UTIL_EST_WEIGHT_SHIFT;
ue.ewma += last_ewma_diff;
ue.ewma >>= UTIL_EST_WEIGHT_SHIFT;
done:
WRITE_ONCE(p->se.avg.util_est, ue);
}
@ -9764,7 +9776,12 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
#ifdef CONFIG_SCHED_WALT
if (rcu_dereference(rd->pd) && !sd_overutilized(env->sd)) {
#else
if (rcu_dereference(rd->pd) && !READ_ONCE(rd->overutilized)) {
int out_balance = 1;
trace_android_rvh_find_busiest_group(sds.busiest, env->dst_rq,
&out_balance);
if (rcu_dereference(rd->pd) && !READ_ONCE(rd->overutilized)
&& out_balance) {
#endif
int cpu_local, cpu_busiest;
unsigned long capacity_local, capacity_busiest;

View File

@ -94,3 +94,4 @@ SCHED_FEAT(UTIL_EST, true)
* Request max frequency from schedutil whenever a RT task is running.
*/
SCHED_FEAT(SUGOV_RT_MAX_FREQ, false)
SCHED_FEAT(UTIL_EST_FASTUP, true)

View File

@ -529,6 +529,9 @@ static int find_secsym_ndx(unsigned const txtndx,
if (txtndx == get_symindex(symp, symtab, symtab_shndx)
/* avoid STB_WEAK */
&& (STB_LOCAL == st_bind || STB_GLOBAL == st_bind)) {
/* avoid file symbols */
if (ELF_ST_TYPE(symp->st_info) == STT_FILE)
continue;
/* function symbols on ARM have quirks, avoid them */
if (w2(ehdr->e_machine) == EM_ARM
&& ELF_ST_TYPE(symp->st_info) == STT_FUNC)