From 5723833390aca77a0f2f61a24278958dec47982f Mon Sep 17 00:00:00 2001 From: Richard Chang Date: Wed, 29 Nov 2023 06:41:13 +0000 Subject: [PATCH] ANDROID: mm: lru_cache_disable skips lru cache drainnig lru_cache_disable is not trivial cost since it should run work from every cores in the system. Thus, repeated call of the function whenever alloc_contig_range in the cma's allocation loop is called is expensive. This patch makes the lru_cache_disable smarter in that it will not run __lru_add_drain_all since it knows the cache was already disabled by someone else. With that, user of alloc_contig_range can disable the lru cache in advance in their context so that subsequent alloc_contig_range for user's operation will avoid the costly function call. Bug: 313795505 Change-Id: Icbb0e6dbf74644d45f562fd1d845888ca1f1f347 Signed-off-by: Richard Chang (cherry picked from commit 816567b17866ca3f9a11e670bd52c82335cbb448) --- mm/swap.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/mm/swap.c b/mm/swap.c index 955930f41d20..32bc97b73831 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -933,6 +933,7 @@ void lru_add_drain_all(void) #endif /* CONFIG_SMP */ atomic_t lru_disable_count = ATOMIC_INIT(0); +EXPORT_SYMBOL_GPL(lru_disable_count); /* * lru_cache_disable() needs to be called before we start compiling @@ -944,7 +945,12 @@ atomic_t lru_disable_count = ATOMIC_INIT(0); */ void lru_cache_disable(void) { - atomic_inc(&lru_disable_count); + /* + * If someone is already disabled lru_cache, just return with + * increasing the lru_disable_count. + */ + if (atomic_inc_not_zero(&lru_disable_count)) + return; /* * Readers of lru_disable_count are protected by either disabling * preemption or rcu_read_lock: @@ -964,7 +970,9 @@ void lru_cache_disable(void) #else lru_add_and_bh_lrus_drain(); #endif + atomic_inc(&lru_disable_count); } +EXPORT_SYMBOL_GPL(lru_cache_disable); /** * release_pages - batched put_page()