FROMGIT: maple_tree: add GFP_KERNEL to allocations in mas_expected_entries()
Users complained about OOM errors during fork without triggering compaction. This can be fixed by modifying the flags used in mas_expected_entries() so that the compaction will be triggered in low memory situations. Since mas_expected_entries() is only used during fork, the extra argument does not need to be passed through. Additionally, the two test_maple_tree test cases and one benchmark test were altered to use the correct locking type so that allocations would not trigger sleeping and thus fail. Testing was completed with lockdep atomic sleep detection. The additional locking change requires rwsem support additions to the tools/ directory through the use of pthreads pthread_rwlock_t. With this change test_maple_tree works in userspace, as a module, and in-kernel. Users may notice that the system gave up early on attempting to start new processes instead of attempting to reclaim memory. Link: https://lore.kernel.org/linux-mm/20231012155233.2272446-1-Liam.Howlett@oracle.com/ Fixes:54a611b605
("Maple Tree: add new data structure") Cc: <stable@vger.kernel.org> Cc: jason.sim@samsung.com Cc: Peng Zhang <zhangpeng.00@bytedance.com> Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com> (cherry picked from commit 099d7439ce03d0e7bc8f0c3d7878b562f3a48d3d https: //git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-unstable) Bug: 305860603 Bug: 308052647 Change-Id: I1ea250b37198fb3234136da4737b75c6ee3d9817 Signed-off-by: john.hsu <john.hsu@mediatek.com> (cherry picked from commit2dd1c535d1
)
This commit is contained in:
parent
835b6458fa
commit
fffe3966fa
@ -5894,7 +5894,7 @@ int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries)
|
||||
/* Internal nodes */
|
||||
nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap);
|
||||
/* Add working room for split (2 nodes) + new parents */
|
||||
mas_node_count(mas, nr_nodes + 3);
|
||||
mas_node_count_gfp(mas, nr_nodes + 3, GFP_KERNEL);
|
||||
|
||||
/* Detect if allocations run out */
|
||||
mas->mas_flags |= MA_STATE_PREALLOC;
|
||||
|
@ -9,6 +9,7 @@
|
||||
|
||||
#include <linux/maple_tree.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/rwsem.h>
|
||||
|
||||
#define MTREE_ALLOC_MAX 0x2000000000000Ul
|
||||
#ifndef CONFIG_DEBUG_MAPLE_TREE
|
||||
@ -1678,17 +1679,21 @@ static noinline void check_forking(struct maple_tree *mt)
|
||||
void *val;
|
||||
MA_STATE(mas, mt, 0, 0);
|
||||
MA_STATE(newmas, mt, 0, 0);
|
||||
struct rw_semaphore newmt_lock;
|
||||
|
||||
init_rwsem(&newmt_lock);
|
||||
|
||||
for (i = 0; i <= nr_entries; i++)
|
||||
mtree_store_range(mt, i*10, i*10 + 5,
|
||||
xa_mk_value(i), GFP_KERNEL);
|
||||
|
||||
mt_set_non_kernel(99999);
|
||||
mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE);
|
||||
mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
|
||||
mt_set_external_lock(&newmt, &newmt_lock);
|
||||
newmas.tree = &newmt;
|
||||
mas_reset(&newmas);
|
||||
mas_reset(&mas);
|
||||
mas_lock(&newmas);
|
||||
down_write(&newmt_lock);
|
||||
mas.index = 0;
|
||||
mas.last = 0;
|
||||
if (mas_expected_entries(&newmas, nr_entries)) {
|
||||
@ -1703,10 +1708,10 @@ static noinline void check_forking(struct maple_tree *mt)
|
||||
}
|
||||
rcu_read_unlock();
|
||||
mas_destroy(&newmas);
|
||||
mas_unlock(&newmas);
|
||||
mt_validate(&newmt);
|
||||
mt_set_non_kernel(0);
|
||||
mtree_destroy(&newmt);
|
||||
__mt_destroy(&newmt);
|
||||
up_write(&newmt_lock);
|
||||
}
|
||||
|
||||
static noinline void check_mas_store_gfp(struct maple_tree *mt)
|
||||
@ -1750,6 +1755,10 @@ static noinline void bench_forking(struct maple_tree *mt)
|
||||
void *val;
|
||||
MA_STATE(mas, mt, 0, 0);
|
||||
MA_STATE(newmas, mt, 0, 0);
|
||||
struct rw_semaphore newmt_lock;
|
||||
|
||||
init_rwsem(&newmt_lock);
|
||||
mt_set_external_lock(&newmt, &newmt_lock);
|
||||
|
||||
for (i = 0; i <= nr_entries; i++)
|
||||
mtree_store_range(mt, i*10, i*10 + 5,
|
||||
@ -1764,7 +1773,7 @@ static noinline void bench_forking(struct maple_tree *mt)
|
||||
mas.index = 0;
|
||||
mas.last = 0;
|
||||
rcu_read_lock();
|
||||
mas_lock(&newmas);
|
||||
down_write(&newmt_lock);
|
||||
if (mas_expected_entries(&newmas, nr_entries)) {
|
||||
printk("OOM!");
|
||||
BUG_ON(1);
|
||||
@ -1775,11 +1784,11 @@ static noinline void bench_forking(struct maple_tree *mt)
|
||||
mas_store(&newmas, val);
|
||||
}
|
||||
mas_destroy(&newmas);
|
||||
mas_unlock(&newmas);
|
||||
rcu_read_unlock();
|
||||
mt_validate(&newmt);
|
||||
mt_set_non_kernel(0);
|
||||
mtree_destroy(&newmt);
|
||||
__mt_destroy(&newmt);
|
||||
up_write(&newmt_lock);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@ -2380,6 +2389,10 @@ static noinline void check_dup_gaps(struct maple_tree *mt,
|
||||
void *tmp;
|
||||
MA_STATE(mas, mt, 0, 0);
|
||||
MA_STATE(newmas, &newmt, 0, 0);
|
||||
struct rw_semaphore newmt_lock;
|
||||
|
||||
init_rwsem(&newmt_lock);
|
||||
mt_set_external_lock(&newmt, &newmt_lock);
|
||||
|
||||
if (!zero_start)
|
||||
i = 1;
|
||||
@ -2389,9 +2402,9 @@ static noinline void check_dup_gaps(struct maple_tree *mt,
|
||||
mtree_store_range(mt, i*10, (i+1)*10 - gap,
|
||||
xa_mk_value(i), GFP_KERNEL);
|
||||
|
||||
mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE);
|
||||
mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
|
||||
mt_set_non_kernel(99999);
|
||||
mas_lock(&newmas);
|
||||
down_write(&newmt_lock);
|
||||
ret = mas_expected_entries(&newmas, nr_entries);
|
||||
mt_set_non_kernel(0);
|
||||
MT_BUG_ON(mt, ret != 0);
|
||||
@ -2404,9 +2417,9 @@ static noinline void check_dup_gaps(struct maple_tree *mt,
|
||||
}
|
||||
rcu_read_unlock();
|
||||
mas_destroy(&newmas);
|
||||
mas_unlock(&newmas);
|
||||
|
||||
mtree_destroy(&newmt);
|
||||
__mt_destroy(&newmt);
|
||||
up_write(&newmt_lock);
|
||||
}
|
||||
|
||||
/* Duplicate many sizes of trees. Mainly to test expected entry values */
|
||||
|
40
tools/include/linux/rwsem.h
Normal file
40
tools/include/linux/rwsem.h
Normal file
@ -0,0 +1,40 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0+ */
|
||||
#ifndef _TOOLS__RWSEM_H
|
||||
#define _TOOLS__RWSEM_H
|
||||
|
||||
#include <pthread.h>
|
||||
|
||||
struct rw_semaphore {
|
||||
pthread_rwlock_t lock;
|
||||
};
|
||||
|
||||
static inline int init_rwsem(struct rw_semaphore *sem)
|
||||
{
|
||||
return pthread_rwlock_init(&sem->lock, NULL);
|
||||
}
|
||||
|
||||
static inline int exit_rwsem(struct rw_semaphore *sem)
|
||||
{
|
||||
return pthread_rwlock_destroy(&sem->lock);
|
||||
}
|
||||
|
||||
static inline int down_read(struct rw_semaphore *sem)
|
||||
{
|
||||
return pthread_rwlock_rdlock(&sem->lock);
|
||||
}
|
||||
|
||||
static inline int up_read(struct rw_semaphore *sem)
|
||||
{
|
||||
return pthread_rwlock_unlock(&sem->lock);
|
||||
}
|
||||
|
||||
static inline int down_write(struct rw_semaphore *sem)
|
||||
{
|
||||
return pthread_rwlock_wrlock(&sem->lock);
|
||||
}
|
||||
|
||||
static inline int up_write(struct rw_semaphore *sem)
|
||||
{
|
||||
return pthread_rwlock_unlock(&sem->lock);
|
||||
}
|
||||
#endif /* _TOOLS_RWSEM_H */
|
Loading…
Reference in New Issue
Block a user