ANDROID: zram: allow zram to allocate CMA pages

Though zram pages are movable, they aren't allowed to enter
MIGRATE_CMA pageblocks. zram is not seen to pin pages for
long which can cause an issue. Moreover allowing zram to
pick CMA pages can be helpful in cases seen where zram order
0 alloc fails when there are lots of free cma pages, resulting
in kswapd or direct reclaim not making enough progress.

Bug: 158645321
Link: https://lore.kernel.org/linux-mm/4c77bb100706b714213ff840d827a48e40ac9177.1604282969.git.cgoldswo@codeaurora.org/
Change-Id: I31f4a21781cdb31982a768daa59e9546d7667b08
Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org>
Signed-off-by: Chris Goldsworthy <cgoldswo@codeaurora.org>
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
[isaacm@codeaurora.org: Resolve trivial merge conflicts]
Signed-off-by: Isaac J. Manjarres <isaacm@codeaurora.org>
Signed-off-by: Sukadev Bhattiprolu <quic_sukadev@quicinc.com>
This commit is contained in:
Vinayak Menon 2020-11-02 06:39:22 -08:00 committed by Carlos Llamas
parent 433445e9a1
commit 31f15608bb
2 changed files with 5 additions and 4 deletions

View File

@ -1395,13 +1395,14 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
__GFP_KSWAPD_RECLAIM |
__GFP_NOWARN |
__GFP_HIGHMEM |
__GFP_MOVABLE);
__GFP_MOVABLE |
__GFP_CMA);
if (IS_ERR((void *)handle)) {
zcomp_stream_put(zram->comp);
atomic64_inc(&zram->stats.writestall);
handle = zs_malloc(zram->mem_pool, comp_len,
GFP_NOIO | __GFP_HIGHMEM |
__GFP_MOVABLE);
__GFP_MOVABLE | __GFP_CMA);
if (IS_ERR((void *)handle))
return PTR_ERR((void *)handle);

View File

@ -336,7 +336,7 @@ static void destroy_cache(struct zs_pool *pool)
static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp)
{
return (unsigned long)kmem_cache_alloc(pool->handle_cachep,
gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE|__GFP_CMA));
}
static void cache_free_handle(struct zs_pool *pool, unsigned long handle)
@ -347,7 +347,7 @@ static void cache_free_handle(struct zs_pool *pool, unsigned long handle)
static struct zspage *cache_alloc_zspage(struct zs_pool *pool, gfp_t flags)
{
return kmem_cache_zalloc(pool->zspage_cachep,
flags & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
flags & ~(__GFP_HIGHMEM|__GFP_MOVABLE|__GFP_CMA));
}
static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage)