ANDROID: mm: cma: introduce __cma_alloc API
This patch enhances the CMA API with support for failfast mode, utilizing the __GFP_NORETRY flag. This mode is specifically designed for high-order bulk allocation scenarios, enabling the CMA API to avoid prolonged stalls resulting from blocking pages such as those undergoing page writeback or page locking. Instead of stalling, the API will continue searching for readily migratable pages across different pageblocks. The original patch link: Link: https://lore.kernel.org/linux-mm/YAnM5PbNJZlk%2F%2FiX@google.com/T/#m36b144ff81fe0a8f0ecaf6813de4819ecc41f8fe Bug: 308881290 Change-Id: I1c623f17fb49c26005aaffc17330cf820ce6585c Signed-off-by: Richard Chang <richardycc@google.com> (cherry picked from commit 3390547fec36527ed15dd213ee55d397f83ffa46)
This commit is contained in:
parent
db9d7ba706
commit
0de2f42977
@ -48,6 +48,8 @@ extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
|
||||
unsigned int order_per_bit,
|
||||
const char *name,
|
||||
struct cma **res_cma);
|
||||
extern struct page *__cma_alloc(struct cma *cma, unsigned long count, unsigned int align,
|
||||
gfp_t gfp_mask);
|
||||
extern struct page *cma_alloc(struct cma *cma, unsigned long count, unsigned int align,
|
||||
bool no_warn);
|
||||
extern bool cma_pages_valid(struct cma *cma, const struct page *pages, unsigned long count);
|
||||
|
43
mm/cma.c
43
mm/cma.c
@ -416,17 +416,18 @@ static inline void cma_debug_show_areas(struct cma *cma) { }
|
||||
#endif
|
||||
|
||||
/**
|
||||
* cma_alloc() - allocate pages from contiguous area
|
||||
* __cma_alloc() - allocate pages from contiguous area
|
||||
* @cma: Contiguous memory region for which the allocation is performed.
|
||||
* @count: Requested number of pages.
|
||||
* @align: Requested alignment of pages (in PAGE_SIZE order).
|
||||
* @no_warn: Avoid printing message about failed allocation
|
||||
* @gfp_mask: GFP mask to use during the cma allocation.
|
||||
*
|
||||
* This function allocates part of contiguous memory on specific
|
||||
* contiguous memory area.
|
||||
* This function is same with cma_alloc but supports gfp_mask.
|
||||
* Currently, the gfp_mask supports only __GFP_NOWARN and __GFP_NORETRY.
|
||||
* If user passes other flags, it fails the allocation.
|
||||
*/
|
||||
struct page *cma_alloc(struct cma *cma, unsigned long count,
|
||||
unsigned int align, bool no_warn)
|
||||
struct page *__cma_alloc(struct cma *cma, unsigned long count,
|
||||
unsigned int align, gfp_t gfp_mask)
|
||||
{
|
||||
unsigned long mask, offset;
|
||||
unsigned long pfn = -1;
|
||||
@ -438,6 +439,10 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
|
||||
int num_attempts = 0;
|
||||
int max_retries = 5;
|
||||
|
||||
if (WARN_ON_ONCE((gfp_mask & GFP_KERNEL) == 0 ||
|
||||
(gfp_mask & ~(GFP_KERNEL|__GFP_NOWARN|__GFP_NORETRY)) != 0))
|
||||
goto out;
|
||||
|
||||
if (!cma || !cma->count || !cma->bitmap)
|
||||
goto out;
|
||||
|
||||
@ -466,7 +471,8 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
|
||||
if ((num_attempts < max_retries) && (ret == -EBUSY)) {
|
||||
spin_unlock_irq(&cma->lock);
|
||||
|
||||
if (fatal_signal_pending(current))
|
||||
if (fatal_signal_pending(current) ||
|
||||
(gfp_mask & __GFP_NORETRY))
|
||||
break;
|
||||
|
||||
/*
|
||||
@ -496,8 +502,7 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
|
||||
|
||||
pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
|
||||
mutex_lock(&cma_mutex);
|
||||
ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
|
||||
GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
|
||||
ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, gfp_mask);
|
||||
mutex_unlock(&cma_mutex);
|
||||
if (ret == 0) {
|
||||
page = pfn_to_page(pfn);
|
||||
@ -529,7 +534,7 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
|
||||
page_kasan_tag_reset(page + i);
|
||||
}
|
||||
|
||||
if (ret && !no_warn) {
|
||||
if (ret && !(gfp_mask & __GFP_NOWARN)) {
|
||||
pr_err_ratelimited("%s: %s: alloc failed, req-size: %lu pages, ret: %d\n",
|
||||
__func__, cma->name, count, ret);
|
||||
cma_debug_show_areas(cma);
|
||||
@ -548,6 +553,24 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
|
||||
|
||||
return page;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__cma_alloc);
|
||||
|
||||
/**
|
||||
* cma_alloc() - allocate pages from contiguous area
|
||||
* @cma: Contiguous memory region for which the allocation is performed.
|
||||
* @count: Requested number of pages.
|
||||
* @align: Requested alignment of pages (in PAGE_SIZE order).
|
||||
* @no_warn: Avoid printing message about failed allocation
|
||||
*
|
||||
* This function allocates part of contiguous memory on specific
|
||||
* contiguous memory area.
|
||||
*/
|
||||
struct page *cma_alloc(struct cma *cma, unsigned long count,
|
||||
unsigned int align, bool no_warn)
|
||||
{
|
||||
return __cma_alloc(cma, count, align, GFP_KERNEL |
|
||||
(no_warn ? __GFP_NOWARN : 0));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cma_alloc);
|
||||
|
||||
bool cma_pages_valid(struct cma *cma, const struct page *pages,
|
||||
|
@ -9336,12 +9336,16 @@ int __alloc_contig_migrate_range(struct compact_control *cc,
|
||||
unsigned int nr_reclaimed;
|
||||
unsigned long pfn = start;
|
||||
unsigned int tries = 0;
|
||||
unsigned int max_tries = 5;
|
||||
int ret = 0;
|
||||
struct migration_target_control mtc = {
|
||||
.nid = zone_to_nid(cc->zone),
|
||||
.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
|
||||
};
|
||||
|
||||
if (cc->gfp_mask & __GFP_NORETRY)
|
||||
max_tries = 1;
|
||||
|
||||
lru_cache_disable();
|
||||
|
||||
while (pfn < end || !list_empty(&cc->migratepages)) {
|
||||
@ -9357,7 +9361,7 @@ int __alloc_contig_migrate_range(struct compact_control *cc,
|
||||
break;
|
||||
pfn = cc->migrate_pfn;
|
||||
tries = 0;
|
||||
} else if (++tries == 5) {
|
||||
} else if (++tries == max_tries) {
|
||||
ret = -EBUSY;
|
||||
break;
|
||||
}
|
||||
@ -9428,7 +9432,11 @@ int alloc_contig_range(unsigned long start, unsigned long end,
|
||||
.nr_migratepages = 0,
|
||||
.order = -1,
|
||||
.zone = page_zone(pfn_to_page(start)),
|
||||
.mode = MIGRATE_SYNC,
|
||||
/*
|
||||
* Use MIGRATE_ASYNC for __GFP_NORETRY requests as it never
|
||||
* blocks.
|
||||
*/
|
||||
.mode = gfp_mask & __GFP_NORETRY ? MIGRATE_ASYNC : MIGRATE_SYNC,
|
||||
.ignore_skip_hint = true,
|
||||
.no_set_skip_hint = true,
|
||||
.gfp_mask = current_gfp_context(gfp_mask),
|
||||
@ -9474,7 +9482,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
|
||||
* -EBUSY is not accidentally used or returned to caller.
|
||||
*/
|
||||
ret = __alloc_contig_migrate_range(&cc, start, end);
|
||||
if (ret && ret != -EBUSY)
|
||||
if (ret && (ret != -EBUSY || (gfp_mask & __GFP_NORETRY)))
|
||||
goto done;
|
||||
ret = 0;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user