UPSTREAM: erofs: remove the member readahead from struct z_erofs_decompress_frontend

The struct member is only used to add REQ_RAHEAD during I/O submission.
So it is cleaner to pass it as a parameter than keep it in the struct.

Also, rename function z_erofs_get_sync_decompress_policy() to
z_erofs_is_sync_decompress() for better clarity and conciseness.

Signed-off-by: Yue Hu <huyue2@coolpad.com>
Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20230524063944.1655-1-zbestahu@gmail.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>

Bug: 318378021
(cherry picked from commit ef4b4b46c6aaf8edeea9a79320627fe10993f153)
Change-Id: I59cc13e7499968a1e93e13df1cb43a5123d510d9
Signed-off-by: Sandeep Dhavale <dhavale@google.com>
This commit is contained in:
Yue Hu 2023-05-24 14:39:44 +08:00 committed by Treehugger Robot
parent 66595bb17c
commit 5e861fa97e

View File

@ -534,7 +534,6 @@ struct z_erofs_decompress_frontend {
z_erofs_next_pcluster_t owned_head;
enum z_erofs_pclustermode mode;
bool readahead;
/* used for applying cache strategy on the fly */
bool backmost;
erofs_off_t headoffset;
@ -1076,7 +1075,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
return err;
}
static bool z_erofs_get_sync_decompress_policy(struct erofs_sb_info *sbi,
static bool z_erofs_is_sync_decompress(struct erofs_sb_info *sbi,
unsigned int readahead_pages)
{
/* auto: enable for read_folio, disable for readahead */
@ -1637,7 +1636,7 @@ static void z_erofs_decompressqueue_endio(struct bio *bio)
static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
struct page **pagepool,
struct z_erofs_decompressqueue *fgq,
bool *force_fg)
bool *force_fg, bool readahead)
{
struct super_block *sb = f->inode->i_sb;
struct address_space *mc = MNGD_MAPPING(EROFS_SB(sb));
@ -1723,7 +1722,7 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
bio->bi_iter.bi_sector = (sector_t)cur <<
(sb->s_blocksize_bits - 9);
bio->bi_private = q[JQ_SUBMIT];
if (f->readahead)
if (readahead)
bio->bi_opf |= REQ_RAHEAD;
++nr_bios;
}
@ -1759,13 +1758,13 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
}
static void z_erofs_runqueue(struct z_erofs_decompress_frontend *f,
struct page **pagepool, bool force_fg)
struct page **pagepool, bool force_fg, bool ra)
{
struct z_erofs_decompressqueue io[NR_JOBQUEUES];
if (f->owned_head == Z_EROFS_PCLUSTER_TAIL)
return;
z_erofs_submit_queue(f, pagepool, io, &force_fg);
z_erofs_submit_queue(f, pagepool, io, &force_fg, ra);
/* handle bypass queue (no i/o pclusters) immediately */
z_erofs_decompress_queue(&io[JQ_BYPASS], pagepool);
@ -1863,8 +1862,8 @@ static int z_erofs_read_folio(struct file *file, struct folio *folio)
(void)z_erofs_collector_end(&f);
/* if some compressed cluster ready, need submit them anyway */
z_erofs_runqueue(&f, &pagepool,
z_erofs_get_sync_decompress_policy(sbi, 0));
z_erofs_runqueue(&f, &pagepool, z_erofs_is_sync_decompress(sbi, 0),
false);
if (err)
erofs_err(inode->i_sb, "failed to read, err [%d]", err);
@ -1882,7 +1881,6 @@ static void z_erofs_readahead(struct readahead_control *rac)
struct page *pagepool = NULL, *head = NULL, *page;
unsigned int nr_pages;
f.readahead = true;
f.headoffset = readahead_pos(rac);
z_erofs_pcluster_readmore(&f, rac, f.headoffset +
@ -1913,7 +1911,7 @@ static void z_erofs_readahead(struct readahead_control *rac)
(void)z_erofs_collector_end(&f);
z_erofs_runqueue(&f, &pagepool,
z_erofs_get_sync_decompress_policy(sbi, nr_pages));
z_erofs_is_sync_decompress(sbi, nr_pages), true);
erofs_put_metabuf(&f.map.buf);
erofs_release_pages(&pagepool);
}