Merge keystone/android12-5.10-keystone-qcom-release.66+ (26ea52b) into msm-5.10

* refs/heads/tmp-26ea52b:
  UPSTREAM: sched/core: Mitigate race cpus_share_cache()/update_top_cache_domain()
  ANDROID: Update symbol list for mtk
  UPSTREAM: erofs: fix unsafe pagevec reuse of hooked pclusters
  UPSTREAM: erofs: remove the occupied parameter from z_erofs_pagevec_enqueue()
  UPSTREAg: usb: dwc3: gadget: Fix null pointer exception
  ANDROID: fips140: support "evaluation testing" builds via build.sh
  FROMGIT: sched/scs: Reset task stack state in bringup_cpu()
  ANDROID: dma-buf: heaps: fix dma-buf heap pool pages stat

Change-Id: I6a1e6ded5547917de20a040c7df866405ddd37d0
Signed-off-by: Sivasri Kumar, Vanka <quic_svanka@quicinc.com>
This commit is contained in:
Sivasri Kumar, Vanka 2021-12-02 11:51:45 +05:30
commit eea17f79f4
11 changed files with 5352 additions and 5470 deletions

View File

@ -1 +1 @@
851990cc99ff27e674f07d42d149affa6f855e2f
3c54070823074c74d676f8525218a43ac91d9c02

File diff suppressed because it is too large Load Diff

4
android/abi_gki_aarch64_mtk Normal file → Executable file
View File

@ -975,6 +975,7 @@
kfree_skb
kfree_skb_list
kill_anon_super
kill_pid
kimage_vaddr
kimage_voffset
__kmalloc
@ -1022,6 +1023,7 @@
kthread_destroy_worker
kthread_flush_work
kthread_flush_worker
kthread_freezable_should_stop
__kthread_init_worker
kthread_queue_delayed_work
kthread_queue_work
@ -2056,6 +2058,7 @@
trace_raw_output_prep
trace_seq_printf
trace_seq_putc
trace_set_clr_event
tracing_off
try_wait_for_completion
tty_flip_buffer_push
@ -2370,6 +2373,7 @@
vscnprintf
vsnprintf
vsprintf
vsscanf
vunmap
vzalloc
wait_for_completion

View File

@ -0,0 +1 @@
CONFIG_CRYPTO_FIPS140_MOD_EVAL_TESTING=y

View File

@ -0,0 +1,3 @@
. ${ROOT_DIR}/${KERNEL_DIR}/build.config.gki.aarch64.fips140
PRE_DEFCONFIG_CMDS+=" cat ${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/fips140_gki_eval_testing.fragment >> ${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/${DEFCONFIG};"

View File

@ -44,9 +44,9 @@ static void dmabuf_page_pool_add(struct dmabuf_page_pool *pool, struct page *pag
mutex_lock(&pool->mutex);
list_add_tail(&page->lru, &pool->items[index]);
pool->count[index]++;
mutex_unlock(&pool->mutex);
mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
1 << pool->order);
mutex_unlock(&pool->mutex);
}
static struct page *dmabuf_page_pool_remove(struct dmabuf_page_pool *pool, int index)

View File

@ -3264,6 +3264,9 @@ static bool dwc3_gadget_endpoint_trbs_complete(struct dwc3_ep *dep,
struct dwc3 *dwc = dep->dwc;
bool no_started_trb = true;
if (!dep->endpoint.desc)
return no_started_trb;
dwc3_gadget_ep_cleanup_completed_requests(dep, event, status);
if (dep->flags & DWC3_EP_END_TRANSFER_PENDING)
@ -3311,6 +3314,9 @@ static void dwc3_gadget_endpoint_transfer_in_progress(struct dwc3_ep *dep,
{
int status = 0;
if (!dep->endpoint.desc)
return;
if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
dwc3_gadget_endpoint_frame_from_event(dep, event);

View File

@ -376,11 +376,10 @@ static bool z_erofs_try_inplace_io(struct z_erofs_collector *clt,
/* callers must be with collection lock held */
static int z_erofs_attach_page(struct z_erofs_collector *clt,
struct page *page,
enum z_erofs_page_type type)
struct page *page, enum z_erofs_page_type type,
bool pvec_safereuse)
{
int ret;
bool occupied;
/* give priority for inplaceio */
if (clt->mode >= COLLECT_PRIMARY &&
@ -388,10 +387,9 @@ static int z_erofs_attach_page(struct z_erofs_collector *clt,
z_erofs_try_inplace_io(clt, page))
return 0;
ret = z_erofs_pagevec_enqueue(&clt->vector,
page, type, &occupied);
ret = z_erofs_pagevec_enqueue(&clt->vector, page, type,
pvec_safereuse);
clt->cl->vcnt += (unsigned int)ret;
return ret ? 0 : -EAGAIN;
}
@ -737,7 +735,8 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
tight &= (clt->mode >= COLLECT_PRIMARY_FOLLOWED);
retry:
err = z_erofs_attach_page(clt, page, page_type);
err = z_erofs_attach_page(clt, page, page_type,
clt->mode >= COLLECT_PRIMARY_FOLLOWED);
/* should allocate an additional short-lived page for pagevec */
if (err == -EAGAIN) {
struct page *const newpage =
@ -745,7 +744,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
set_page_private(newpage, Z_EROFS_SHORTLIVED_PAGE);
err = z_erofs_attach_page(clt, newpage,
Z_EROFS_PAGE_TYPE_EXCLUSIVE);
Z_EROFS_PAGE_TYPE_EXCLUSIVE, true);
if (!err)
goto retry;
}

View File

@ -108,12 +108,17 @@ static inline void z_erofs_pagevec_ctor_init(struct z_erofs_pagevec_ctor *ctor,
static inline bool z_erofs_pagevec_enqueue(struct z_erofs_pagevec_ctor *ctor,
struct page *page,
enum z_erofs_page_type type,
bool *occupied)
bool pvec_safereuse)
{
*occupied = false;
if (!ctor->next && type)
if (ctor->index + 1 == ctor->nr)
if (!ctor->next) {
/* some pages cannot be reused as pvec safely without I/O */
if (type == Z_EROFS_PAGE_TYPE_EXCLUSIVE && !pvec_safereuse)
type = Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED;
if (type != Z_EROFS_PAGE_TYPE_EXCLUSIVE &&
ctor->index + 1 == ctor->nr)
return false;
}
if (ctor->index >= ctor->nr)
z_erofs_pagevec_ctor_pagedown(ctor, false);
@ -125,7 +130,6 @@ static inline bool z_erofs_pagevec_enqueue(struct z_erofs_pagevec_ctor *ctor,
/* should remind that collector->next never equal to 1, 2 */
if (type == (uintptr_t)ctor->next) {
ctor->next = page;
*occupied = true;
}
ctor->pages[ctor->index++] = tagptr_fold(erofs_vtptr_t, page, type);
return true;

View File

@ -31,6 +31,7 @@
#include <linux/smpboot.h>
#include <linux/relay.h>
#include <linux/slab.h>
#include <linux/scs.h>
#include <linux/percpu-rwsem.h>
#include <linux/cpuset.h>
#include <uapi/linux/sched/types.h>
@ -558,6 +559,12 @@ static int bringup_cpu(unsigned int cpu)
struct task_struct *idle = idle_thread_get(cpu);
int ret;
/*
* Reset stale stack state from the last time this CPU was online.
*/
scs_task_reset(idle);
kasan_unpoison_task_stack(idle);
/*
* Some architectures have to walk the irq descriptors to
* setup the vector space for the cpu which comes online.

View File

@ -2843,6 +2843,9 @@ EXPORT_SYMBOL_GPL(wake_up_if_idle);
bool cpus_share_cache(int this_cpu, int that_cpu)
{
if (this_cpu == that_cpu)
return true;
return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
}
@ -6775,9 +6778,6 @@ void __init init_idle(struct task_struct *idle, int cpu)
idle->se.exec_start = sched_clock();
idle->flags |= PF_IDLE;
scs_task_reset(idle);
kasan_unpoison_task_stack(idle);
#ifdef CONFIG_SMP
/*
* Its possible that init_idle() gets called multiple times on a task,
@ -6933,7 +6933,6 @@ void idle_task_exit(void)
finish_arch_post_lock_switch();
}
scs_task_reset(current);
/* finish_cpu(), as ran on the BP, will clean up the active_mm state */
}