BACKPORT: erofs: adapt managed inode operations into folios
This patch gets rid of erofs_try_to_free_cached_page() and fold it into .release_folio(). It also moves managed inode operations into zdata.c, which simplifies the code a bit. No logic changes. Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com> Reviewed-by: Yue Hu <huyue2@coolpad.com> Link: https://lore.kernel.org/r/20230526201459.128169-5-hsiangkao@linux.alibaba.com Bug: 318378021 Change-Id: I5cb1e44769f68edce788cb4f8084bb3d45b594b3 (cherry picked from commit 7b4e372c36fcd33c74ba3cbd65fa534b9c558184) [dhavale: changes to internal.h applied manually] Signed-off-by: Sandeep Dhavale <dhavale@google.com>
This commit is contained in:
parent
3d93182661
commit
187d034575
@ -544,7 +544,7 @@ int __init z_erofs_init_zip_subsystem(void);
|
|||||||
void z_erofs_exit_zip_subsystem(void);
|
void z_erofs_exit_zip_subsystem(void);
|
||||||
int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
|
int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
|
||||||
struct erofs_workgroup *egrp);
|
struct erofs_workgroup *egrp);
|
||||||
int erofs_try_to_free_cached_page(struct page *page);
|
int erofs_init_managed_cache(struct super_block *sb);
|
||||||
int z_erofs_load_lz4_config(struct super_block *sb,
|
int z_erofs_load_lz4_config(struct super_block *sb,
|
||||||
struct erofs_super_block *dsb,
|
struct erofs_super_block *dsb,
|
||||||
struct z_erofs_lz4_cfgs *lz4, int len);
|
struct z_erofs_lz4_cfgs *lz4, int len);
|
||||||
@ -565,6 +565,7 @@ static inline int z_erofs_load_lz4_config(struct super_block *sb,
|
|||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
static inline int erofs_init_managed_cache(struct super_block *sb) { return 0; }
|
||||||
#endif /* !CONFIG_EROFS_FS_ZIP */
|
#endif /* !CONFIG_EROFS_FS_ZIP */
|
||||||
|
|
||||||
#ifdef CONFIG_EROFS_FS_ZIP_LZMA
|
#ifdef CONFIG_EROFS_FS_ZIP_LZMA
|
||||||
|
@ -597,68 +597,6 @@ static int erofs_fc_parse_param(struct fs_context *fc,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_EROFS_FS_ZIP
|
|
||||||
static const struct address_space_operations managed_cache_aops;
|
|
||||||
|
|
||||||
static bool erofs_managed_cache_release_folio(struct folio *folio, gfp_t gfp)
|
|
||||||
{
|
|
||||||
bool ret = true;
|
|
||||||
struct address_space *const mapping = folio->mapping;
|
|
||||||
|
|
||||||
DBG_BUGON(!folio_test_locked(folio));
|
|
||||||
DBG_BUGON(mapping->a_ops != &managed_cache_aops);
|
|
||||||
|
|
||||||
if (folio_test_private(folio))
|
|
||||||
ret = erofs_try_to_free_cached_page(&folio->page);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* It will be called only on inode eviction. In case that there are still some
|
|
||||||
* decompression requests in progress, wait with rescheduling for a bit here.
|
|
||||||
* We could introduce an extra locking instead but it seems unnecessary.
|
|
||||||
*/
|
|
||||||
static void erofs_managed_cache_invalidate_folio(struct folio *folio,
|
|
||||||
size_t offset, size_t length)
|
|
||||||
{
|
|
||||||
const size_t stop = length + offset;
|
|
||||||
|
|
||||||
DBG_BUGON(!folio_test_locked(folio));
|
|
||||||
|
|
||||||
/* Check for potential overflow in debug mode */
|
|
||||||
DBG_BUGON(stop > folio_size(folio) || stop < length);
|
|
||||||
|
|
||||||
if (offset == 0 && stop == folio_size(folio))
|
|
||||||
while (!erofs_managed_cache_release_folio(folio, GFP_NOFS))
|
|
||||||
cond_resched();
|
|
||||||
}
|
|
||||||
|
|
||||||
static const struct address_space_operations managed_cache_aops = {
|
|
||||||
.release_folio = erofs_managed_cache_release_folio,
|
|
||||||
.invalidate_folio = erofs_managed_cache_invalidate_folio,
|
|
||||||
};
|
|
||||||
|
|
||||||
static int erofs_init_managed_cache(struct super_block *sb)
|
|
||||||
{
|
|
||||||
struct erofs_sb_info *const sbi = EROFS_SB(sb);
|
|
||||||
struct inode *const inode = new_inode(sb);
|
|
||||||
|
|
||||||
if (!inode)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
set_nlink(inode, 1);
|
|
||||||
inode->i_size = OFFSET_MAX;
|
|
||||||
|
|
||||||
inode->i_mapping->a_ops = &managed_cache_aops;
|
|
||||||
mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
|
|
||||||
sbi->managed_cache = inode;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
static int erofs_init_managed_cache(struct super_block *sb) { return 0; }
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static struct inode *erofs_nfs_get_inode(struct super_block *sb,
|
static struct inode *erofs_nfs_get_inode(struct super_block *sb,
|
||||||
u64 ino, u32 generation)
|
u64 ino, u32 generation)
|
||||||
{
|
{
|
||||||
|
@ -668,29 +668,72 @@ int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int erofs_try_to_free_cached_page(struct page *page)
|
static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp)
|
||||||
{
|
{
|
||||||
struct z_erofs_pcluster *const pcl = (void *)page_private(page);
|
struct z_erofs_pcluster *pcl = folio_get_private(folio);
|
||||||
int ret, i;
|
bool ret;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
if (!folio_test_private(folio))
|
||||||
|
return true;
|
||||||
|
|
||||||
if (!erofs_workgroup_try_to_freeze(&pcl->obj, 1))
|
if (!erofs_workgroup_try_to_freeze(&pcl->obj, 1))
|
||||||
return 0;
|
return false;
|
||||||
|
|
||||||
ret = 0;
|
ret = false;
|
||||||
DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
|
DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
|
||||||
for (i = 0; i < pcl->pclusterpages; ++i) {
|
for (i = 0; i < pcl->pclusterpages; ++i) {
|
||||||
if (pcl->compressed_bvecs[i].page == page) {
|
if (pcl->compressed_bvecs[i].page == &folio->page) {
|
||||||
WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
|
WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
|
||||||
ret = 1;
|
ret = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
erofs_workgroup_unfreeze(&pcl->obj, 1);
|
erofs_workgroup_unfreeze(&pcl->obj, 1);
|
||||||
|
|
||||||
if (ret)
|
if (ret)
|
||||||
detach_page_private(page);
|
folio_detach_private(folio);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* It will be called only on inode eviction. In case that there are still some
|
||||||
|
* decompression requests in progress, wait with rescheduling for a bit here.
|
||||||
|
* An extra lock could be introduced instead but it seems unnecessary.
|
||||||
|
*/
|
||||||
|
static void z_erofs_cache_invalidate_folio(struct folio *folio,
|
||||||
|
size_t offset, size_t length)
|
||||||
|
{
|
||||||
|
const size_t stop = length + offset;
|
||||||
|
|
||||||
|
/* Check for potential overflow in debug mode */
|
||||||
|
DBG_BUGON(stop > folio_size(folio) || stop < length);
|
||||||
|
|
||||||
|
if (offset == 0 && stop == folio_size(folio))
|
||||||
|
while (!z_erofs_cache_release_folio(folio, GFP_NOFS))
|
||||||
|
cond_resched();
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct address_space_operations z_erofs_cache_aops = {
|
||||||
|
.release_folio = z_erofs_cache_release_folio,
|
||||||
|
.invalidate_folio = z_erofs_cache_invalidate_folio,
|
||||||
|
};
|
||||||
|
|
||||||
|
int erofs_init_managed_cache(struct super_block *sb)
|
||||||
|
{
|
||||||
|
struct inode *const inode = new_inode(sb);
|
||||||
|
|
||||||
|
if (!inode)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
set_nlink(inode, 1);
|
||||||
|
inode->i_size = OFFSET_MAX;
|
||||||
|
inode->i_mapping->a_ops = &z_erofs_cache_aops;
|
||||||
|
mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
|
||||||
|
EROFS_SB(sb)->managed_cache = inode;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static bool z_erofs_try_inplace_io(struct z_erofs_decompress_frontend *fe,
|
static bool z_erofs_try_inplace_io(struct z_erofs_decompress_frontend *fe,
|
||||||
struct z_erofs_bvec *bvec)
|
struct z_erofs_bvec *bvec)
|
||||||
{
|
{
|
||||||
|
Loading…
Reference in New Issue
Block a user