FROMGIT: f2fs: move f2fs to use reader-unfair rwsems
f2fs rw_semaphores work better if writers can starve readers, especially for the checkpoint thread, because writers are strictly more important than reader threads. This prevents significant priority inversion between low-priority readers that blocked while trying to acquire the read lock and a second acquisition of the write lock that might be blocking high priority work. Bug: 214413989 Signed-off-by: Tim Murray <timmurray@google.com> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org> (cherry picked from commit e4544b63a7ee49e7fbebf35ece0a6acd3b9617ae git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs.git dev) Change-Id: Ia0eb86447488c5ba9845a6b2eb98652200e08281
This commit is contained in:
parent
23686f5ee8
commit
7e6f112beb
@ -350,13 +350,13 @@ static int f2fs_write_meta_pages(struct address_space *mapping,
|
||||
goto skip_write;
|
||||
|
||||
/* if locked failed, cp will flush dirty pages instead */
|
||||
if (!down_write_trylock(&sbi->cp_global_sem))
|
||||
if (!f2fs_down_write_trylock(&sbi->cp_global_sem))
|
||||
goto skip_write;
|
||||
|
||||
trace_f2fs_writepages(mapping->host, wbc, META);
|
||||
diff = nr_pages_to_write(sbi, META, wbc);
|
||||
written = f2fs_sync_meta_pages(sbi, META, wbc->nr_to_write, FS_META_IO);
|
||||
up_write(&sbi->cp_global_sem);
|
||||
f2fs_up_write(&sbi->cp_global_sem);
|
||||
wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff);
|
||||
return 0;
|
||||
|
||||
@ -1148,7 +1148,8 @@ static bool __need_flush_quota(struct f2fs_sb_info *sbi)
|
||||
if (!is_journalled_quota(sbi))
|
||||
return false;
|
||||
|
||||
down_write(&sbi->quota_sem);
|
||||
if (!f2fs_down_write_trylock(&sbi->quota_sem))
|
||||
return true;
|
||||
if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH)) {
|
||||
ret = false;
|
||||
} else if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR)) {
|
||||
@ -1159,7 +1160,7 @@ static bool __need_flush_quota(struct f2fs_sb_info *sbi)
|
||||
} else if (get_pages(sbi, F2FS_DIRTY_QDATA)) {
|
||||
ret = true;
|
||||
}
|
||||
up_write(&sbi->quota_sem);
|
||||
f2fs_up_write(&sbi->quota_sem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1216,10 +1217,10 @@ static int block_operations(struct f2fs_sb_info *sbi)
|
||||
* POR: we should ensure that there are no dirty node pages
|
||||
* until finishing nat/sit flush. inode->i_blocks can be updated.
|
||||
*/
|
||||
down_write(&sbi->node_change);
|
||||
f2fs_down_write(&sbi->node_change);
|
||||
|
||||
if (get_pages(sbi, F2FS_DIRTY_IMETA)) {
|
||||
up_write(&sbi->node_change);
|
||||
f2fs_up_write(&sbi->node_change);
|
||||
f2fs_unlock_all(sbi);
|
||||
err = f2fs_sync_inode_meta(sbi);
|
||||
if (err)
|
||||
@ -1229,15 +1230,15 @@ static int block_operations(struct f2fs_sb_info *sbi)
|
||||
}
|
||||
|
||||
retry_flush_nodes:
|
||||
down_write(&sbi->node_write);
|
||||
f2fs_down_write(&sbi->node_write);
|
||||
|
||||
if (get_pages(sbi, F2FS_DIRTY_NODES)) {
|
||||
up_write(&sbi->node_write);
|
||||
f2fs_up_write(&sbi->node_write);
|
||||
atomic_inc(&sbi->wb_sync_req[NODE]);
|
||||
err = f2fs_sync_node_pages(sbi, &wbc, false, FS_CP_NODE_IO);
|
||||
atomic_dec(&sbi->wb_sync_req[NODE]);
|
||||
if (err) {
|
||||
up_write(&sbi->node_change);
|
||||
f2fs_up_write(&sbi->node_change);
|
||||
f2fs_unlock_all(sbi);
|
||||
return err;
|
||||
}
|
||||
@ -1250,13 +1251,13 @@ static int block_operations(struct f2fs_sb_info *sbi)
|
||||
* dirty node blocks and some checkpoint values by block allocation.
|
||||
*/
|
||||
__prepare_cp_block(sbi);
|
||||
up_write(&sbi->node_change);
|
||||
f2fs_up_write(&sbi->node_change);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void unblock_operations(struct f2fs_sb_info *sbi)
|
||||
{
|
||||
up_write(&sbi->node_write);
|
||||
f2fs_up_write(&sbi->node_write);
|
||||
f2fs_unlock_all(sbi);
|
||||
}
|
||||
|
||||
@ -1591,7 +1592,7 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
|
||||
f2fs_warn(sbi, "Start checkpoint disabled!");
|
||||
}
|
||||
if (cpc->reason != CP_RESIZE)
|
||||
down_write(&sbi->cp_global_sem);
|
||||
f2fs_down_write(&sbi->cp_global_sem);
|
||||
|
||||
if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) &&
|
||||
((cpc->reason & CP_FASTBOOT) || (cpc->reason & CP_SYNC) ||
|
||||
@ -1666,7 +1667,7 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
|
||||
trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint");
|
||||
out:
|
||||
if (cpc->reason != CP_RESIZE)
|
||||
up_write(&sbi->cp_global_sem);
|
||||
f2fs_up_write(&sbi->cp_global_sem);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -1714,9 +1715,9 @@ static int __write_checkpoint_sync(struct f2fs_sb_info *sbi)
|
||||
struct cp_control cpc = { .reason = CP_SYNC, };
|
||||
int err;
|
||||
|
||||
down_write(&sbi->gc_lock);
|
||||
f2fs_down_write(&sbi->gc_lock);
|
||||
err = f2fs_write_checkpoint(sbi, &cpc);
|
||||
up_write(&sbi->gc_lock);
|
||||
f2fs_up_write(&sbi->gc_lock);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -1804,9 +1805,9 @@ int f2fs_issue_checkpoint(struct f2fs_sb_info *sbi)
|
||||
if (!test_opt(sbi, MERGE_CHECKPOINT) || cpc.reason != CP_SYNC) {
|
||||
int ret;
|
||||
|
||||
down_write(&sbi->gc_lock);
|
||||
f2fs_down_write(&sbi->gc_lock);
|
||||
ret = f2fs_write_checkpoint(sbi, &cpc);
|
||||
up_write(&sbi->gc_lock);
|
||||
f2fs_up_write(&sbi->gc_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1203,7 +1203,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
|
||||
* checkpoint. This can only happen to quota writes which can cause
|
||||
* the below discard race condition.
|
||||
*/
|
||||
down_read(&sbi->node_write);
|
||||
f2fs_down_read(&sbi->node_write);
|
||||
} else if (!f2fs_trylock_op(sbi)) {
|
||||
goto out_free;
|
||||
}
|
||||
@ -1320,7 +1320,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
|
||||
|
||||
f2fs_put_dnode(&dn);
|
||||
if (IS_NOQUOTA(inode))
|
||||
up_read(&sbi->node_write);
|
||||
f2fs_up_read(&sbi->node_write);
|
||||
else
|
||||
f2fs_unlock_op(sbi);
|
||||
|
||||
@ -1346,7 +1346,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
|
||||
f2fs_put_dnode(&dn);
|
||||
out_unlock_op:
|
||||
if (IS_NOQUOTA(inode))
|
||||
up_read(&sbi->node_write);
|
||||
f2fs_up_read(&sbi->node_write);
|
||||
else
|
||||
f2fs_unlock_op(sbi);
|
||||
out_free:
|
||||
|
@ -593,7 +593,7 @@ static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
|
||||
enum page_type btype = PAGE_TYPE_OF_BIO(type);
|
||||
struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
|
||||
|
||||
down_write(&io->io_rwsem);
|
||||
f2fs_down_write(&io->io_rwsem);
|
||||
|
||||
/* change META to META_FLUSH in the checkpoint procedure */
|
||||
if (type >= META_FLUSH) {
|
||||
@ -604,7 +604,7 @@ static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
|
||||
io->fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
|
||||
}
|
||||
__submit_merged_bio(io);
|
||||
up_write(&io->io_rwsem);
|
||||
f2fs_up_write(&io->io_rwsem);
|
||||
}
|
||||
|
||||
static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
|
||||
@ -619,9 +619,9 @@ static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
|
||||
enum page_type btype = PAGE_TYPE_OF_BIO(type);
|
||||
struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
|
||||
|
||||
down_read(&io->io_rwsem);
|
||||
f2fs_down_read(&io->io_rwsem);
|
||||
ret = __has_merged_page(io->bio, inode, page, ino);
|
||||
up_read(&io->io_rwsem);
|
||||
f2fs_up_read(&io->io_rwsem);
|
||||
}
|
||||
if (ret)
|
||||
__f2fs_submit_merged_write(sbi, type, temp);
|
||||
@ -745,9 +745,9 @@ static void add_bio_entry(struct f2fs_sb_info *sbi, struct bio *bio,
|
||||
if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE)
|
||||
f2fs_bug_on(sbi, 1);
|
||||
|
||||
down_write(&io->bio_list_lock);
|
||||
f2fs_down_write(&io->bio_list_lock);
|
||||
list_add_tail(&be->list, &io->bio_list);
|
||||
up_write(&io->bio_list_lock);
|
||||
f2fs_up_write(&io->bio_list_lock);
|
||||
}
|
||||
|
||||
static void del_bio_entry(struct bio_entry *be)
|
||||
@ -769,7 +769,7 @@ static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio,
|
||||
struct list_head *head = &io->bio_list;
|
||||
struct bio_entry *be;
|
||||
|
||||
down_write(&io->bio_list_lock);
|
||||
f2fs_down_write(&io->bio_list_lock);
|
||||
list_for_each_entry(be, head, list) {
|
||||
if (be->bio != *bio)
|
||||
continue;
|
||||
@ -793,7 +793,7 @@ static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio,
|
||||
__submit_bio(sbi, *bio, DATA);
|
||||
break;
|
||||
}
|
||||
up_write(&io->bio_list_lock);
|
||||
f2fs_up_write(&io->bio_list_lock);
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
@ -819,7 +819,7 @@ void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
|
||||
if (list_empty(head))
|
||||
continue;
|
||||
|
||||
down_read(&io->bio_list_lock);
|
||||
f2fs_down_read(&io->bio_list_lock);
|
||||
list_for_each_entry(be, head, list) {
|
||||
if (target)
|
||||
found = (target == be->bio);
|
||||
@ -829,14 +829,14 @@ void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
|
||||
if (found)
|
||||
break;
|
||||
}
|
||||
up_read(&io->bio_list_lock);
|
||||
f2fs_up_read(&io->bio_list_lock);
|
||||
|
||||
if (!found)
|
||||
continue;
|
||||
|
||||
found = false;
|
||||
|
||||
down_write(&io->bio_list_lock);
|
||||
f2fs_down_write(&io->bio_list_lock);
|
||||
list_for_each_entry(be, head, list) {
|
||||
if (target)
|
||||
found = (target == be->bio);
|
||||
@ -849,7 +849,7 @@ void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
|
||||
break;
|
||||
}
|
||||
}
|
||||
up_write(&io->bio_list_lock);
|
||||
f2fs_up_write(&io->bio_list_lock);
|
||||
}
|
||||
|
||||
if (found)
|
||||
@ -909,7 +909,7 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio)
|
||||
|
||||
f2fs_bug_on(sbi, is_read_io(fio->op));
|
||||
|
||||
down_write(&io->io_rwsem);
|
||||
f2fs_down_write(&io->io_rwsem);
|
||||
next:
|
||||
if (fio->in_list) {
|
||||
spin_lock(&io->io_lock);
|
||||
@ -976,7 +976,7 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio)
|
||||
if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
|
||||
!f2fs_is_checkpoint_ready(sbi))
|
||||
__submit_merged_bio(io);
|
||||
up_write(&io->io_rwsem);
|
||||
f2fs_up_write(&io->io_rwsem);
|
||||
}
|
||||
|
||||
static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
|
||||
@ -1437,9 +1437,9 @@ void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
|
||||
{
|
||||
if (flag == F2FS_GET_BLOCK_PRE_AIO) {
|
||||
if (lock)
|
||||
down_read(&sbi->node_change);
|
||||
f2fs_down_read(&sbi->node_change);
|
||||
else
|
||||
up_read(&sbi->node_change);
|
||||
f2fs_up_read(&sbi->node_change);
|
||||
} else {
|
||||
if (lock)
|
||||
f2fs_lock_op(sbi);
|
||||
@ -2768,13 +2768,13 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
|
||||
* the below discard race condition.
|
||||
*/
|
||||
if (IS_NOQUOTA(inode))
|
||||
down_read(&sbi->node_write);
|
||||
f2fs_down_read(&sbi->node_write);
|
||||
|
||||
fio.need_lock = LOCK_DONE;
|
||||
err = f2fs_do_write_data_page(&fio);
|
||||
|
||||
if (IS_NOQUOTA(inode))
|
||||
up_read(&sbi->node_write);
|
||||
f2fs_up_read(&sbi->node_write);
|
||||
|
||||
goto done;
|
||||
}
|
||||
@ -3232,14 +3232,14 @@ static void f2fs_write_failed(struct address_space *mapping, loff_t to)
|
||||
|
||||
/* In the fs-verity case, f2fs_end_enable_verity() does the truncate */
|
||||
if (to > i_size && !f2fs_verity_in_progress(inode)) {
|
||||
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
|
||||
truncate_pagecache(inode, i_size);
|
||||
f2fs_truncate_blocks(inode, i_size, true);
|
||||
|
||||
up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3646,21 +3646,21 @@ static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
|
||||
iocb->ki_hint = WRITE_LIFE_NOT_SET;
|
||||
|
||||
if (iocb->ki_flags & IOCB_NOWAIT) {
|
||||
if (!down_read_trylock(&fi->i_gc_rwsem[rw])) {
|
||||
if (!f2fs_down_read_trylock(&fi->i_gc_rwsem[rw])) {
|
||||
iocb->ki_hint = hint;
|
||||
err = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
if (do_opu && !down_read_trylock(&fi->i_gc_rwsem[READ])) {
|
||||
up_read(&fi->i_gc_rwsem[rw]);
|
||||
if (do_opu && !f2fs_down_read_trylock(&fi->i_gc_rwsem[READ])) {
|
||||
f2fs_up_read(&fi->i_gc_rwsem[rw]);
|
||||
iocb->ki_hint = hint;
|
||||
err = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
down_read(&fi->i_gc_rwsem[rw]);
|
||||
f2fs_down_read(&fi->i_gc_rwsem[rw]);
|
||||
if (do_opu)
|
||||
down_read(&fi->i_gc_rwsem[READ]);
|
||||
f2fs_down_read(&fi->i_gc_rwsem[READ]);
|
||||
}
|
||||
|
||||
err = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
|
||||
@ -3670,9 +3670,9 @@ static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
|
||||
DIO_SKIP_HOLES);
|
||||
|
||||
if (do_opu)
|
||||
up_read(&fi->i_gc_rwsem[READ]);
|
||||
f2fs_up_read(&fi->i_gc_rwsem[READ]);
|
||||
|
||||
up_read(&fi->i_gc_rwsem[rw]);
|
||||
f2fs_up_read(&fi->i_gc_rwsem[rw]);
|
||||
|
||||
if (rw == WRITE) {
|
||||
if (whint_mode == WHINT_MODE_OFF)
|
||||
@ -3944,13 +3944,13 @@ static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
|
||||
unsigned int end_sec = secidx + blkcnt / blk_per_sec;
|
||||
int ret = 0;
|
||||
|
||||
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
|
||||
set_inode_flag(inode, FI_ALIGNED_WRITE);
|
||||
|
||||
for (; secidx < end_sec; secidx++) {
|
||||
down_write(&sbi->pin_sem);
|
||||
f2fs_down_write(&sbi->pin_sem);
|
||||
|
||||
f2fs_lock_op(sbi);
|
||||
f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
|
||||
@ -3964,7 +3964,7 @@ static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
|
||||
|
||||
page = f2fs_get_lock_data_page(inode, blkidx, true);
|
||||
if (IS_ERR(page)) {
|
||||
up_write(&sbi->pin_sem);
|
||||
f2fs_up_write(&sbi->pin_sem);
|
||||
ret = PTR_ERR(page);
|
||||
goto done;
|
||||
}
|
||||
@ -3977,7 +3977,7 @@ static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
|
||||
|
||||
ret = filemap_fdatawrite(inode->i_mapping);
|
||||
|
||||
up_write(&sbi->pin_sem);
|
||||
f2fs_up_write(&sbi->pin_sem);
|
||||
|
||||
if (ret)
|
||||
break;
|
||||
@ -3987,8 +3987,8 @@ static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
|
||||
clear_inode_flag(inode, FI_DO_DEFRAG);
|
||||
clear_inode_flag(inode, FI_ALIGNED_WRITE);
|
||||
|
||||
up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -768,7 +768,7 @@ int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname,
|
||||
f2fs_wait_on_page_writeback(dentry_page, DATA, true, true);
|
||||
|
||||
if (inode) {
|
||||
down_write(&F2FS_I(inode)->i_sem);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_sem);
|
||||
page = f2fs_init_inode_metadata(inode, dir, fname, NULL);
|
||||
if (IS_ERR(page)) {
|
||||
err = PTR_ERR(page);
|
||||
@ -795,7 +795,7 @@ int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname,
|
||||
f2fs_update_parent_metadata(dir, inode, current_depth);
|
||||
fail:
|
||||
if (inode)
|
||||
up_write(&F2FS_I(inode)->i_sem);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_sem);
|
||||
|
||||
f2fs_put_page(dentry_page, 1);
|
||||
|
||||
@ -860,7 +860,7 @@ int f2fs_do_tmpfile(struct inode *inode, struct inode *dir)
|
||||
struct page *page;
|
||||
int err = 0;
|
||||
|
||||
down_write(&F2FS_I(inode)->i_sem);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_sem);
|
||||
page = f2fs_init_inode_metadata(inode, dir, NULL, NULL);
|
||||
if (IS_ERR(page)) {
|
||||
err = PTR_ERR(page);
|
||||
@ -871,7 +871,7 @@ int f2fs_do_tmpfile(struct inode *inode, struct inode *dir)
|
||||
clear_inode_flag(inode, FI_NEW_INODE);
|
||||
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
|
||||
fail:
|
||||
up_write(&F2FS_I(inode)->i_sem);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_sem);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -879,7 +879,7 @@ void f2fs_drop_nlink(struct inode *dir, struct inode *inode)
|
||||
{
|
||||
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
|
||||
|
||||
down_write(&F2FS_I(inode)->i_sem);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_sem);
|
||||
|
||||
if (S_ISDIR(inode->i_mode))
|
||||
f2fs_i_links_write(dir, false);
|
||||
@ -890,7 +890,7 @@ void f2fs_drop_nlink(struct inode *dir, struct inode *inode)
|
||||
f2fs_i_links_write(inode, false);
|
||||
f2fs_i_size_write(inode, 0);
|
||||
}
|
||||
up_write(&F2FS_I(inode)->i_sem);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_sem);
|
||||
|
||||
if (inode->i_nlink == 0)
|
||||
f2fs_add_orphan_inode(inode);
|
||||
|
112
fs/f2fs/f2fs.h
112
fs/f2fs/f2fs.h
@ -117,6 +117,18 @@ typedef u32 nid_t;
|
||||
|
||||
#define COMPRESS_EXT_NUM 16
|
||||
|
||||
/*
|
||||
* An implementation of an rwsem that is explicitly unfair to readers. This
|
||||
* prevents priority inversion when a low-priority reader acquires the read lock
|
||||
* while sleeping on the write lock but the write lock is needed by
|
||||
* higher-priority clients.
|
||||
*/
|
||||
|
||||
struct f2fs_rwsem {
|
||||
struct rw_semaphore internal_rwsem;
|
||||
wait_queue_head_t read_waiters;
|
||||
};
|
||||
|
||||
struct f2fs_mount_info {
|
||||
unsigned int opt;
|
||||
int write_io_size_bits; /* Write IO size bits */
|
||||
@ -726,7 +738,7 @@ struct f2fs_inode_info {
|
||||
|
||||
/* Use below internally in f2fs*/
|
||||
unsigned long flags[BITS_TO_LONGS(FI_MAX)]; /* use to pass per-file flags */
|
||||
struct rw_semaphore i_sem; /* protect fi info */
|
||||
struct f2fs_rwsem i_sem; /* protect fi info */
|
||||
atomic_t dirty_pages; /* # of dirty pages */
|
||||
f2fs_hash_t chash; /* hash value of given file name */
|
||||
unsigned int clevel; /* maximum level of given file name */
|
||||
@ -751,9 +763,9 @@ struct f2fs_inode_info {
|
||||
struct extent_tree *extent_tree; /* cached extent_tree entry */
|
||||
|
||||
/* avoid racing between foreground op and gc */
|
||||
struct rw_semaphore i_gc_rwsem[2];
|
||||
struct rw_semaphore i_mmap_sem;
|
||||
struct rw_semaphore i_xattr_sem; /* avoid racing between reading and changing EAs */
|
||||
struct f2fs_rwsem i_gc_rwsem[2];
|
||||
struct f2fs_rwsem i_mmap_sem;
|
||||
struct f2fs_rwsem i_xattr_sem; /* avoid racing between reading and changing EAs */
|
||||
|
||||
int i_extra_isize; /* size of extra space located in i_addr */
|
||||
kprojid_t i_projid; /* id for project quota */
|
||||
@ -870,7 +882,7 @@ struct f2fs_nm_info {
|
||||
/* NAT cache management */
|
||||
struct radix_tree_root nat_root;/* root of the nat entry cache */
|
||||
struct radix_tree_root nat_set_root;/* root of the nat set cache */
|
||||
struct rw_semaphore nat_tree_lock; /* protect nat entry tree */
|
||||
struct f2fs_rwsem nat_tree_lock; /* protect nat entry tree */
|
||||
struct list_head nat_entries; /* cached nat entry list (clean) */
|
||||
spinlock_t nat_list_lock; /* protect clean nat entry list */
|
||||
unsigned int nat_cnt[MAX_NAT_STATE]; /* the # of cached nat entries */
|
||||
@ -983,7 +995,7 @@ struct f2fs_sm_info {
|
||||
struct dirty_seglist_info *dirty_info; /* dirty segment information */
|
||||
struct curseg_info *curseg_array; /* active segment information */
|
||||
|
||||
struct rw_semaphore curseg_lock; /* for preventing curseg change */
|
||||
struct f2fs_rwsem curseg_lock; /* for preventing curseg change */
|
||||
|
||||
block_t seg0_blkaddr; /* block address of 0'th segment */
|
||||
block_t main_blkaddr; /* start block address of main area */
|
||||
@ -1166,11 +1178,11 @@ struct f2fs_bio_info {
|
||||
struct bio *bio; /* bios to merge */
|
||||
sector_t last_block_in_bio; /* last block number */
|
||||
struct f2fs_io_info fio; /* store buffered io info. */
|
||||
struct rw_semaphore io_rwsem; /* blocking op for bio */
|
||||
struct f2fs_rwsem io_rwsem; /* blocking op for bio */
|
||||
spinlock_t io_lock; /* serialize DATA/NODE IOs */
|
||||
struct list_head io_list; /* track fios */
|
||||
struct list_head bio_list; /* bio entry list head */
|
||||
struct rw_semaphore bio_list_lock; /* lock to protect bio entry list */
|
||||
struct f2fs_rwsem bio_list_lock; /* lock to protect bio entry list */
|
||||
};
|
||||
|
||||
#define FDEV(i) (sbi->devs[i])
|
||||
@ -1527,7 +1539,7 @@ struct f2fs_sb_info {
|
||||
struct super_block *sb; /* pointer to VFS super block */
|
||||
struct proc_dir_entry *s_proc; /* proc entry */
|
||||
struct f2fs_super_block *raw_super; /* raw super block pointer */
|
||||
struct rw_semaphore sb_lock; /* lock for raw super block */
|
||||
struct f2fs_rwsem sb_lock; /* lock for raw super block */
|
||||
int valid_super_block; /* valid super block no */
|
||||
unsigned long s_flag; /* flags for sbi */
|
||||
struct mutex writepages; /* mutex for writepages() */
|
||||
@ -1547,7 +1559,7 @@ struct f2fs_sb_info {
|
||||
/* for bio operations */
|
||||
struct f2fs_bio_info *write_io[NR_PAGE_TYPE]; /* for write bios */
|
||||
/* keep migration IO order for LFS mode */
|
||||
struct rw_semaphore io_order_lock;
|
||||
struct f2fs_rwsem io_order_lock;
|
||||
mempool_t *write_io_dummy; /* Dummy pages */
|
||||
|
||||
/* for checkpoint */
|
||||
@ -1555,10 +1567,10 @@ struct f2fs_sb_info {
|
||||
int cur_cp_pack; /* remain current cp pack */
|
||||
spinlock_t cp_lock; /* for flag in ckpt */
|
||||
struct inode *meta_inode; /* cache meta blocks */
|
||||
struct rw_semaphore cp_global_sem; /* checkpoint procedure lock */
|
||||
struct rw_semaphore cp_rwsem; /* blocking FS operations */
|
||||
struct rw_semaphore node_write; /* locking node writes */
|
||||
struct rw_semaphore node_change; /* locking node change */
|
||||
struct f2fs_rwsem cp_global_sem; /* checkpoint procedure lock */
|
||||
struct f2fs_rwsem cp_rwsem; /* blocking FS operations */
|
||||
struct f2fs_rwsem node_write; /* locking node writes */
|
||||
struct f2fs_rwsem node_change; /* locking node change */
|
||||
wait_queue_head_t cp_wait;
|
||||
unsigned long last_time[MAX_TIME]; /* to store time in jiffies */
|
||||
long interval_time[MAX_TIME]; /* to store thresholds */
|
||||
@ -1618,7 +1630,7 @@ struct f2fs_sb_info {
|
||||
block_t unusable_block_count; /* # of blocks saved by last cp */
|
||||
|
||||
unsigned int nquota_files; /* # of quota sysfile */
|
||||
struct rw_semaphore quota_sem; /* blocking cp for flags */
|
||||
struct f2fs_rwsem quota_sem; /* blocking cp for flags */
|
||||
|
||||
/* # of pages, see count_type */
|
||||
atomic_t nr_pages[NR_COUNT_TYPE];
|
||||
@ -1634,7 +1646,7 @@ struct f2fs_sb_info {
|
||||
struct f2fs_mount_info mount_opt; /* mount options */
|
||||
|
||||
/* for cleaning operations */
|
||||
struct rw_semaphore gc_lock; /*
|
||||
struct f2fs_rwsem gc_lock; /*
|
||||
* semaphore for GC, avoid
|
||||
* race between GC and GC or CP
|
||||
*/
|
||||
@ -1651,7 +1663,7 @@ struct f2fs_sb_info {
|
||||
|
||||
/* threshold for gc trials on pinned files */
|
||||
u64 gc_pin_file_threshold;
|
||||
struct rw_semaphore pin_sem;
|
||||
struct f2fs_rwsem pin_sem;
|
||||
|
||||
/* maximum # of trials to find a victim segment for SSR and GC */
|
||||
unsigned int max_victim_search;
|
||||
@ -2068,29 +2080,85 @@ static inline bool enabled_nat_bits(struct f2fs_sb_info *sbi,
|
||||
return (cpc) ? (cpc->reason & CP_UMOUNT) && set : set;
|
||||
}
|
||||
|
||||
static inline void init_f2fs_rwsem(struct f2fs_rwsem *sem)
|
||||
{
|
||||
init_rwsem(&sem->internal_rwsem);
|
||||
init_waitqueue_head(&sem->read_waiters);
|
||||
}
|
||||
|
||||
static inline int f2fs_rwsem_is_locked(struct f2fs_rwsem *sem)
|
||||
{
|
||||
return rwsem_is_locked(&sem->internal_rwsem);
|
||||
}
|
||||
|
||||
static inline int f2fs_rwsem_is_contended(struct f2fs_rwsem *sem)
|
||||
{
|
||||
return rwsem_is_contended(&sem->internal_rwsem);
|
||||
}
|
||||
|
||||
static inline void f2fs_down_read(struct f2fs_rwsem *sem)
|
||||
{
|
||||
wait_event(sem->read_waiters, down_read_trylock(&sem->internal_rwsem));
|
||||
}
|
||||
|
||||
static inline int f2fs_down_read_trylock(struct f2fs_rwsem *sem)
|
||||
{
|
||||
return down_read_trylock(&sem->internal_rwsem);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
static inline void f2fs_down_read_nested(struct f2fs_rwsem *sem, int subclass)
|
||||
{
|
||||
down_read_nested(&sem->internal_rwsem, subclass);
|
||||
}
|
||||
#else
|
||||
#define f2fs_down_read_nested(sem, subclass) f2fs_down_read(sem)
|
||||
#endif
|
||||
|
||||
static inline void f2fs_up_read(struct f2fs_rwsem *sem)
|
||||
{
|
||||
up_read(&sem->internal_rwsem);
|
||||
}
|
||||
|
||||
static inline void f2fs_down_write(struct f2fs_rwsem *sem)
|
||||
{
|
||||
down_write(&sem->internal_rwsem);
|
||||
}
|
||||
|
||||
static inline int f2fs_down_write_trylock(struct f2fs_rwsem *sem)
|
||||
{
|
||||
return down_write_trylock(&sem->internal_rwsem);
|
||||
}
|
||||
|
||||
static inline void f2fs_up_write(struct f2fs_rwsem *sem)
|
||||
{
|
||||
up_write(&sem->internal_rwsem);
|
||||
wake_up_all(&sem->read_waiters);
|
||||
}
|
||||
|
||||
static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
|
||||
{
|
||||
down_read(&sbi->cp_rwsem);
|
||||
f2fs_down_read(&sbi->cp_rwsem);
|
||||
}
|
||||
|
||||
static inline int f2fs_trylock_op(struct f2fs_sb_info *sbi)
|
||||
{
|
||||
return down_read_trylock(&sbi->cp_rwsem);
|
||||
return f2fs_down_read_trylock(&sbi->cp_rwsem);
|
||||
}
|
||||
|
||||
static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi)
|
||||
{
|
||||
up_read(&sbi->cp_rwsem);
|
||||
f2fs_up_read(&sbi->cp_rwsem);
|
||||
}
|
||||
|
||||
static inline void f2fs_lock_all(struct f2fs_sb_info *sbi)
|
||||
{
|
||||
down_write(&sbi->cp_rwsem);
|
||||
f2fs_down_write(&sbi->cp_rwsem);
|
||||
}
|
||||
|
||||
static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
|
||||
{
|
||||
up_write(&sbi->cp_rwsem);
|
||||
f2fs_up_write(&sbi->cp_rwsem);
|
||||
}
|
||||
|
||||
static inline int __get_cp_reason(struct f2fs_sb_info *sbi)
|
||||
|
150
fs/f2fs/file.c
150
fs/f2fs/file.c
@ -37,9 +37,9 @@ static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
|
||||
struct inode *inode = file_inode(vmf->vma->vm_file);
|
||||
vm_fault_t ret;
|
||||
|
||||
down_read(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_down_read(&F2FS_I(inode)->i_mmap_sem);
|
||||
ret = filemap_fault(vmf);
|
||||
up_read(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_up_read(&F2FS_I(inode)->i_mmap_sem);
|
||||
|
||||
if (!ret)
|
||||
f2fs_update_iostat(F2FS_I_SB(inode), APP_MAPPED_READ_IO,
|
||||
@ -100,7 +100,7 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
|
||||
f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
|
||||
|
||||
file_update_time(vmf->vma->vm_file);
|
||||
down_read(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_down_read(&F2FS_I(inode)->i_mmap_sem);
|
||||
lock_page(page);
|
||||
if (unlikely(page->mapping != inode->i_mapping ||
|
||||
page_offset(page) > i_size_read(inode) ||
|
||||
@ -158,7 +158,7 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
|
||||
|
||||
trace_f2fs_vm_page_mkwrite(page, DATA);
|
||||
out_sem:
|
||||
up_read(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_up_read(&F2FS_I(inode)->i_mmap_sem);
|
||||
|
||||
sb_end_pagefault(inode->i_sb);
|
||||
err:
|
||||
@ -239,13 +239,13 @@ static void try_to_fix_pino(struct inode *inode)
|
||||
struct f2fs_inode_info *fi = F2FS_I(inode);
|
||||
nid_t pino;
|
||||
|
||||
down_write(&fi->i_sem);
|
||||
f2fs_down_write(&fi->i_sem);
|
||||
if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
|
||||
get_parent_ino(inode, &pino)) {
|
||||
f2fs_i_pino_write(inode, pino);
|
||||
file_got_pino(inode);
|
||||
}
|
||||
up_write(&fi->i_sem);
|
||||
f2fs_up_write(&fi->i_sem);
|
||||
}
|
||||
|
||||
static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
|
||||
@ -308,9 +308,9 @@ static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
|
||||
* Both of fdatasync() and fsync() are able to be recovered from
|
||||
* sudden-power-off.
|
||||
*/
|
||||
down_read(&F2FS_I(inode)->i_sem);
|
||||
f2fs_down_read(&F2FS_I(inode)->i_sem);
|
||||
cp_reason = need_do_checkpoint(inode);
|
||||
up_read(&F2FS_I(inode)->i_sem);
|
||||
f2fs_up_read(&F2FS_I(inode)->i_sem);
|
||||
|
||||
if (cp_reason) {
|
||||
/* all the dirty node pages should be flushed for POR */
|
||||
@ -938,8 +938,8 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
|
||||
return err;
|
||||
}
|
||||
|
||||
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
|
||||
truncate_setsize(inode, attr->ia_size);
|
||||
|
||||
@ -949,8 +949,8 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
|
||||
* do not trim all blocks after i_size if target size is
|
||||
* larger than i_size.
|
||||
*/
|
||||
up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -1090,8 +1090,8 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
|
||||
blk_start = (loff_t)pg_start << PAGE_SHIFT;
|
||||
blk_end = (loff_t)pg_end << PAGE_SHIFT;
|
||||
|
||||
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
|
||||
truncate_pagecache_range(inode, blk_start, blk_end - 1);
|
||||
|
||||
@ -1099,8 +1099,8 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
|
||||
ret = f2fs_truncate_hole(inode, pg_start, pg_end);
|
||||
f2fs_unlock_op(sbi);
|
||||
|
||||
up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1333,8 +1333,8 @@ static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
|
||||
f2fs_balance_fs(sbi, true);
|
||||
|
||||
/* avoid gc operation during block exchange */
|
||||
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
|
||||
f2fs_lock_op(sbi);
|
||||
f2fs_drop_extent_tree(inode);
|
||||
@ -1342,8 +1342,8 @@ static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
|
||||
ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
|
||||
f2fs_unlock_op(sbi);
|
||||
|
||||
up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1373,13 +1373,13 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
|
||||
return ret;
|
||||
|
||||
/* write out all moved pages, if possible */
|
||||
down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
|
||||
truncate_pagecache(inode, offset);
|
||||
|
||||
new_size = i_size_read(inode) - len;
|
||||
ret = f2fs_truncate_blocks(inode, new_size, true);
|
||||
up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
if (!ret)
|
||||
f2fs_i_size_write(inode, new_size);
|
||||
return ret;
|
||||
@ -1478,8 +1478,8 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
|
||||
unsigned int end_offset;
|
||||
pgoff_t end;
|
||||
|
||||
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
|
||||
truncate_pagecache_range(inode,
|
||||
(loff_t)index << PAGE_SHIFT,
|
||||
@ -1491,8 +1491,8 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
|
||||
ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
|
||||
if (ret) {
|
||||
f2fs_unlock_op(sbi);
|
||||
up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -1503,8 +1503,8 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
|
||||
f2fs_put_dnode(&dn);
|
||||
|
||||
f2fs_unlock_op(sbi);
|
||||
up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
|
||||
f2fs_balance_fs(sbi, dn.node_changed);
|
||||
|
||||
@ -1560,9 +1560,9 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
|
||||
|
||||
f2fs_balance_fs(sbi, true);
|
||||
|
||||
down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
|
||||
up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -1577,8 +1577,8 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
|
||||
idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
|
||||
|
||||
/* avoid gc operation during block exchange */
|
||||
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
truncate_pagecache(inode, offset);
|
||||
|
||||
while (!ret && idx > pg_start) {
|
||||
@ -1594,14 +1594,14 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
|
||||
idx + delta, nr, false);
|
||||
f2fs_unlock_op(sbi);
|
||||
}
|
||||
up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
|
||||
/* write out all moved pages, if possible */
|
||||
down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
|
||||
truncate_pagecache(inode, offset);
|
||||
up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
|
||||
if (!ret)
|
||||
f2fs_i_size_write(inode, new_size);
|
||||
@ -1651,13 +1651,13 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
|
||||
next_alloc:
|
||||
if (has_not_enough_free_secs(sbi, 0,
|
||||
GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
|
||||
down_write(&sbi->gc_lock);
|
||||
f2fs_down_write(&sbi->gc_lock);
|
||||
err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
|
||||
if (err && err != -ENODATA && err != -EAGAIN)
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
down_write(&sbi->pin_sem);
|
||||
f2fs_down_write(&sbi->pin_sem);
|
||||
|
||||
f2fs_lock_op(sbi);
|
||||
f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
|
||||
@ -1666,7 +1666,7 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
|
||||
map.m_seg_type = CURSEG_COLD_DATA_PINNED;
|
||||
err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
|
||||
|
||||
up_write(&sbi->pin_sem);
|
||||
f2fs_up_write(&sbi->pin_sem);
|
||||
|
||||
expanded += map.m_len;
|
||||
sec_len -= map.m_len;
|
||||
@ -2050,7 +2050,7 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
|
||||
/*
|
||||
* Should wait end_io to count F2FS_WB_CP_DATA correctly by
|
||||
@ -2061,7 +2061,7 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
|
||||
inode->i_ino, get_dirty_pages(inode));
|
||||
ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
|
||||
if (ret) {
|
||||
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -2074,7 +2074,7 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
|
||||
/* add inode in inmem_list first and set atomic_file */
|
||||
set_inode_flag(inode, FI_ATOMIC_FILE);
|
||||
clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
|
||||
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
|
||||
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
|
||||
F2FS_I(inode)->inmem_task = current;
|
||||
@ -2381,7 +2381,7 @@ static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
down_write(&sbi->sb_lock);
|
||||
f2fs_down_write(&sbi->sb_lock);
|
||||
|
||||
if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
|
||||
goto got_it;
|
||||
@ -2400,7 +2400,7 @@ static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
|
||||
16))
|
||||
err = -EFAULT;
|
||||
out_err:
|
||||
up_write(&sbi->sb_lock);
|
||||
f2fs_up_write(&sbi->sb_lock);
|
||||
mnt_drop_write_file(filp);
|
||||
return err;
|
||||
}
|
||||
@ -2477,12 +2477,12 @@ static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
|
||||
return ret;
|
||||
|
||||
if (!sync) {
|
||||
if (!down_write_trylock(&sbi->gc_lock)) {
|
||||
if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
down_write(&sbi->gc_lock);
|
||||
f2fs_down_write(&sbi->gc_lock);
|
||||
}
|
||||
|
||||
ret = f2fs_gc(sbi, sync, true, false, NULL_SEGNO);
|
||||
@ -2513,12 +2513,12 @@ static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
|
||||
|
||||
do_more:
|
||||
if (!range->sync) {
|
||||
if (!down_write_trylock(&sbi->gc_lock)) {
|
||||
if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
down_write(&sbi->gc_lock);
|
||||
f2fs_down_write(&sbi->gc_lock);
|
||||
}
|
||||
|
||||
ret = f2fs_gc(sbi, range->sync, true, false,
|
||||
@ -2850,10 +2850,10 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
|
||||
|
||||
f2fs_balance_fs(sbi, true);
|
||||
|
||||
down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
|
||||
f2fs_down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
|
||||
if (src != dst) {
|
||||
ret = -EBUSY;
|
||||
if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
|
||||
if (!f2fs_down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
|
||||
goto out_src;
|
||||
}
|
||||
|
||||
@ -2871,9 +2871,9 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
|
||||
f2fs_unlock_op(sbi);
|
||||
|
||||
if (src != dst)
|
||||
up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
|
||||
f2fs_up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
|
||||
out_src:
|
||||
up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
|
||||
f2fs_up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
|
||||
out_unlock:
|
||||
if (src != dst)
|
||||
inode_unlock(dst);
|
||||
@ -2968,7 +2968,7 @@ static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
|
||||
end_segno = min(start_segno + range.segments, dev_end_segno);
|
||||
|
||||
while (start_segno < end_segno) {
|
||||
if (!down_write_trylock(&sbi->gc_lock)) {
|
||||
if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
@ -3314,9 +3314,9 @@ int f2fs_precache_extents(struct inode *inode)
|
||||
while (map.m_lblk < end) {
|
||||
map.m_len = end - map.m_lblk;
|
||||
|
||||
down_write(&fi->i_gc_rwsem[WRITE]);
|
||||
f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
|
||||
err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE);
|
||||
up_write(&fi->i_gc_rwsem[WRITE]);
|
||||
f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -3393,11 +3393,11 @@ static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg)
|
||||
if (!vbuf)
|
||||
return -ENOMEM;
|
||||
|
||||
down_read(&sbi->sb_lock);
|
||||
f2fs_down_read(&sbi->sb_lock);
|
||||
count = utf16s_to_utf8s(sbi->raw_super->volume_name,
|
||||
ARRAY_SIZE(sbi->raw_super->volume_name),
|
||||
UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME);
|
||||
up_read(&sbi->sb_lock);
|
||||
f2fs_up_read(&sbi->sb_lock);
|
||||
|
||||
if (copy_to_user((char __user *)arg, vbuf,
|
||||
min(FSLABEL_MAX, count)))
|
||||
@ -3425,7 +3425,7 @@ static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg)
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
down_write(&sbi->sb_lock);
|
||||
f2fs_down_write(&sbi->sb_lock);
|
||||
|
||||
memset(sbi->raw_super->volume_name, 0,
|
||||
sizeof(sbi->raw_super->volume_name));
|
||||
@ -3435,7 +3435,7 @@ static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg)
|
||||
|
||||
err = f2fs_commit_super(sbi, false);
|
||||
|
||||
up_write(&sbi->sb_lock);
|
||||
f2fs_up_write(&sbi->sb_lock);
|
||||
|
||||
mnt_drop_write_file(filp);
|
||||
out:
|
||||
@ -3561,8 +3561,8 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
|
||||
if (!atomic_read(&F2FS_I(inode)->i_compr_blocks))
|
||||
goto out;
|
||||
|
||||
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
|
||||
last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
|
||||
|
||||
@ -3597,8 +3597,8 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
|
||||
released_blocks += ret;
|
||||
}
|
||||
|
||||
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
out:
|
||||
inode_unlock(inode);
|
||||
|
||||
@ -3714,8 +3714,8 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
|
||||
goto unlock_inode;
|
||||
}
|
||||
|
||||
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
|
||||
last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
|
||||
|
||||
@ -3750,8 +3750,8 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
|
||||
reserved_blocks += ret;
|
||||
}
|
||||
|
||||
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
|
||||
if (ret >= 0) {
|
||||
clear_inode_flag(inode, FI_COMPRESS_RELEASED);
|
||||
@ -3869,8 +3869,8 @@ static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
|
||||
ret = filemap_write_and_wait_range(mapping, range.start,
|
||||
to_end ? LLONG_MAX : end_addr - 1);
|
||||
@ -3957,8 +3957,8 @@ static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
|
||||
ret = f2fs_secure_erase(prev_bdev, inode, prev_index,
|
||||
prev_block, len, range.flags);
|
||||
out:
|
||||
up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
err:
|
||||
inode_unlock(inode);
|
||||
file_end_write(filp);
|
||||
@ -4442,11 +4442,11 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
||||
|
||||
/* if we couldn't write data, we should deallocate blocks. */
|
||||
if (preallocated && i_size_read(inode) < target_size) {
|
||||
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_truncate(inode);
|
||||
up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
}
|
||||
|
||||
if (ret > 0)
|
||||
|
46
fs/f2fs/gc.c
46
fs/f2fs/gc.c
@ -91,21 +91,21 @@ static int gc_thread_func(void *data)
|
||||
*/
|
||||
if (sbi->gc_mode == GC_URGENT_HIGH) {
|
||||
wait_ms = gc_th->urgent_sleep_time;
|
||||
down_write(&sbi->gc_lock);
|
||||
f2fs_down_write(&sbi->gc_lock);
|
||||
goto do_gc;
|
||||
}
|
||||
|
||||
if (foreground) {
|
||||
down_write(&sbi->gc_lock);
|
||||
f2fs_down_write(&sbi->gc_lock);
|
||||
goto do_gc;
|
||||
} else if (!down_write_trylock(&sbi->gc_lock)) {
|
||||
} else if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
|
||||
stat_other_skip_bggc_count(sbi);
|
||||
goto next;
|
||||
}
|
||||
|
||||
if (!is_idle(sbi, GC_TIME)) {
|
||||
increase_sleep_time(gc_th, &wait_ms);
|
||||
up_write(&sbi->gc_lock);
|
||||
f2fs_up_write(&sbi->gc_lock);
|
||||
stat_io_skip_bggc_count(sbi);
|
||||
goto next;
|
||||
}
|
||||
@ -1209,7 +1209,7 @@ static int move_data_block(struct inode *inode, block_t bidx,
|
||||
fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
|
||||
|
||||
if (lfs_mode)
|
||||
down_write(&fio.sbi->io_order_lock);
|
||||
f2fs_down_write(&fio.sbi->io_order_lock);
|
||||
|
||||
mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi),
|
||||
fio.old_blkaddr, false);
|
||||
@ -1295,7 +1295,7 @@ static int move_data_block(struct inode *inode, block_t bidx,
|
||||
true, true, true);
|
||||
up_out:
|
||||
if (lfs_mode)
|
||||
up_write(&fio.sbi->io_order_lock);
|
||||
f2fs_up_write(&fio.sbi->io_order_lock);
|
||||
put_out:
|
||||
f2fs_put_dnode(&dn);
|
||||
out:
|
||||
@ -1454,7 +1454,7 @@ static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
|
||||
if (IS_ERR(inode) || is_bad_inode(inode))
|
||||
continue;
|
||||
|
||||
if (!down_write_trylock(
|
||||
if (!f2fs_down_write_trylock(
|
||||
&F2FS_I(inode)->i_gc_rwsem[WRITE])) {
|
||||
iput(inode);
|
||||
sbi->skipped_gc_rwsem++;
|
||||
@ -1467,7 +1467,7 @@ static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
|
||||
if (f2fs_post_read_required(inode)) {
|
||||
int err = ra_data_block(inode, start_bidx);
|
||||
|
||||
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
if (err) {
|
||||
iput(inode);
|
||||
continue;
|
||||
@ -1478,7 +1478,7 @@ static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
|
||||
|
||||
data_page = f2fs_get_read_data_page(inode,
|
||||
start_bidx, REQ_RAHEAD, true);
|
||||
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
if (IS_ERR(data_page)) {
|
||||
iput(inode);
|
||||
continue;
|
||||
@ -1497,14 +1497,14 @@ static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
|
||||
int err;
|
||||
|
||||
if (S_ISREG(inode->i_mode)) {
|
||||
if (!down_write_trylock(&fi->i_gc_rwsem[READ])) {
|
||||
if (!f2fs_down_write_trylock(&fi->i_gc_rwsem[READ])) {
|
||||
sbi->skipped_gc_rwsem++;
|
||||
continue;
|
||||
}
|
||||
if (!down_write_trylock(
|
||||
if (!f2fs_down_write_trylock(
|
||||
&fi->i_gc_rwsem[WRITE])) {
|
||||
sbi->skipped_gc_rwsem++;
|
||||
up_write(&fi->i_gc_rwsem[READ]);
|
||||
f2fs_up_write(&fi->i_gc_rwsem[READ]);
|
||||
continue;
|
||||
}
|
||||
locked = true;
|
||||
@ -1527,8 +1527,8 @@ static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
|
||||
submitted++;
|
||||
|
||||
if (locked) {
|
||||
up_write(&fi->i_gc_rwsem[WRITE]);
|
||||
up_write(&fi->i_gc_rwsem[READ]);
|
||||
f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
|
||||
f2fs_up_write(&fi->i_gc_rwsem[READ]);
|
||||
}
|
||||
|
||||
stat_inc_data_blk_count(sbi, 1, gc_type);
|
||||
@ -1786,7 +1786,7 @@ int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
|
||||
reserved_segments(sbi),
|
||||
prefree_segments(sbi));
|
||||
|
||||
up_write(&sbi->gc_lock);
|
||||
f2fs_up_write(&sbi->gc_lock);
|
||||
|
||||
put_gc_inode(&gc_list);
|
||||
|
||||
@ -1915,7 +1915,7 @@ static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
|
||||
long long block_count;
|
||||
int segs = secs * sbi->segs_per_sec;
|
||||
|
||||
down_write(&sbi->sb_lock);
|
||||
f2fs_down_write(&sbi->sb_lock);
|
||||
|
||||
section_count = le32_to_cpu(raw_sb->section_count);
|
||||
segment_count = le32_to_cpu(raw_sb->segment_count);
|
||||
@ -1936,7 +1936,7 @@ static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
|
||||
cpu_to_le32(dev_segs + segs);
|
||||
}
|
||||
|
||||
up_write(&sbi->sb_lock);
|
||||
f2fs_up_write(&sbi->sb_lock);
|
||||
}
|
||||
|
||||
static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
|
||||
@ -2010,7 +2010,7 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
|
||||
secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi));
|
||||
|
||||
/* stop other GC */
|
||||
if (!down_write_trylock(&sbi->gc_lock))
|
||||
if (!f2fs_down_write_trylock(&sbi->gc_lock))
|
||||
return -EAGAIN;
|
||||
|
||||
/* stop CP to protect MAIN_SEC in free_segment_range */
|
||||
@ -2030,15 +2030,15 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
|
||||
|
||||
out_unlock:
|
||||
f2fs_unlock_op(sbi);
|
||||
up_write(&sbi->gc_lock);
|
||||
f2fs_up_write(&sbi->gc_lock);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
set_sbi_flag(sbi, SBI_IS_RESIZEFS);
|
||||
|
||||
freeze_super(sbi->sb);
|
||||
down_write(&sbi->gc_lock);
|
||||
down_write(&sbi->cp_global_sem);
|
||||
f2fs_down_write(&sbi->gc_lock);
|
||||
f2fs_down_write(&sbi->cp_global_sem);
|
||||
|
||||
spin_lock(&sbi->stat_lock);
|
||||
if (shrunk_blocks + valid_user_blocks(sbi) +
|
||||
@ -2083,8 +2083,8 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
|
||||
spin_unlock(&sbi->stat_lock);
|
||||
}
|
||||
out_err:
|
||||
up_write(&sbi->cp_global_sem);
|
||||
up_write(&sbi->gc_lock);
|
||||
f2fs_up_write(&sbi->cp_global_sem);
|
||||
f2fs_up_write(&sbi->gc_lock);
|
||||
thaw_super(sbi->sb);
|
||||
clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
|
||||
return err;
|
||||
|
@ -647,7 +647,7 @@ int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
|
||||
}
|
||||
|
||||
if (inode) {
|
||||
down_write(&F2FS_I(inode)->i_sem);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_sem);
|
||||
page = f2fs_init_inode_metadata(inode, dir, fname, ipage);
|
||||
if (IS_ERR(page)) {
|
||||
err = PTR_ERR(page);
|
||||
@ -676,7 +676,7 @@ int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
|
||||
f2fs_update_parent_metadata(dir, inode, 0);
|
||||
fail:
|
||||
if (inode)
|
||||
up_write(&F2FS_I(inode)->i_sem);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_sem);
|
||||
out:
|
||||
f2fs_put_page(ipage, 1);
|
||||
return err;
|
||||
|
@ -196,7 +196,7 @@ static inline void set_file_temperature(struct f2fs_sb_info *sbi, struct inode *
|
||||
__u8 (*extlist)[F2FS_EXTENSION_LEN] = sbi->raw_super->extension_list;
|
||||
int i, cold_count, hot_count;
|
||||
|
||||
down_read(&sbi->sb_lock);
|
||||
f2fs_down_read(&sbi->sb_lock);
|
||||
|
||||
cold_count = le32_to_cpu(sbi->raw_super->extension_count);
|
||||
hot_count = sbi->raw_super->hot_ext_count;
|
||||
@ -206,7 +206,7 @@ static inline void set_file_temperature(struct f2fs_sb_info *sbi, struct inode *
|
||||
break;
|
||||
}
|
||||
|
||||
up_read(&sbi->sb_lock);
|
||||
f2fs_up_read(&sbi->sb_lock);
|
||||
|
||||
if (i == cold_count + hot_count)
|
||||
return;
|
||||
@ -297,19 +297,19 @@ static void set_compress_inode(struct f2fs_sb_info *sbi, struct inode *inode,
|
||||
!f2fs_may_compress(inode))
|
||||
return;
|
||||
|
||||
down_read(&sbi->sb_lock);
|
||||
f2fs_down_read(&sbi->sb_lock);
|
||||
|
||||
cold_count = le32_to_cpu(sbi->raw_super->extension_count);
|
||||
hot_count = sbi->raw_super->hot_ext_count;
|
||||
|
||||
for (i = cold_count; i < cold_count + hot_count; i++) {
|
||||
if (is_extension_exist(name, extlist[i], false)) {
|
||||
up_read(&sbi->sb_lock);
|
||||
f2fs_up_read(&sbi->sb_lock);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
up_read(&sbi->sb_lock);
|
||||
f2fs_up_read(&sbi->sb_lock);
|
||||
|
||||
ext = F2FS_OPTION(sbi).extensions;
|
||||
|
||||
@ -1012,11 +1012,11 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
|
||||
new_page = NULL;
|
||||
|
||||
new_inode->i_ctime = current_time(new_inode);
|
||||
down_write(&F2FS_I(new_inode)->i_sem);
|
||||
f2fs_down_write(&F2FS_I(new_inode)->i_sem);
|
||||
if (old_dir_entry)
|
||||
f2fs_i_links_write(new_inode, false);
|
||||
f2fs_i_links_write(new_inode, false);
|
||||
up_write(&F2FS_I(new_inode)->i_sem);
|
||||
f2fs_up_write(&F2FS_I(new_inode)->i_sem);
|
||||
|
||||
if (!new_inode->i_nlink)
|
||||
f2fs_add_orphan_inode(new_inode);
|
||||
@ -1037,13 +1037,13 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
|
||||
f2fs_i_links_write(new_dir, true);
|
||||
}
|
||||
|
||||
down_write(&F2FS_I(old_inode)->i_sem);
|
||||
f2fs_down_write(&F2FS_I(old_inode)->i_sem);
|
||||
if (!old_dir_entry || whiteout)
|
||||
file_lost_pino(old_inode);
|
||||
else
|
||||
/* adjust dir's i_pino to pass fsck check */
|
||||
f2fs_i_pino_write(old_inode, new_dir->i_ino);
|
||||
up_write(&F2FS_I(old_inode)->i_sem);
|
||||
f2fs_up_write(&F2FS_I(old_inode)->i_sem);
|
||||
|
||||
old_inode->i_ctime = current_time(old_inode);
|
||||
f2fs_mark_inode_dirty_sync(old_inode, false);
|
||||
@ -1203,38 +1203,38 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
|
||||
/* update directory entry info of old dir inode */
|
||||
f2fs_set_link(old_dir, old_entry, old_page, new_inode);
|
||||
|
||||
down_write(&F2FS_I(old_inode)->i_sem);
|
||||
f2fs_down_write(&F2FS_I(old_inode)->i_sem);
|
||||
if (!old_dir_entry)
|
||||
file_lost_pino(old_inode);
|
||||
else
|
||||
/* adjust dir's i_pino to pass fsck check */
|
||||
f2fs_i_pino_write(old_inode, new_dir->i_ino);
|
||||
up_write(&F2FS_I(old_inode)->i_sem);
|
||||
f2fs_up_write(&F2FS_I(old_inode)->i_sem);
|
||||
|
||||
old_dir->i_ctime = current_time(old_dir);
|
||||
if (old_nlink) {
|
||||
down_write(&F2FS_I(old_dir)->i_sem);
|
||||
f2fs_down_write(&F2FS_I(old_dir)->i_sem);
|
||||
f2fs_i_links_write(old_dir, old_nlink > 0);
|
||||
up_write(&F2FS_I(old_dir)->i_sem);
|
||||
f2fs_up_write(&F2FS_I(old_dir)->i_sem);
|
||||
}
|
||||
f2fs_mark_inode_dirty_sync(old_dir, false);
|
||||
|
||||
/* update directory entry info of new dir inode */
|
||||
f2fs_set_link(new_dir, new_entry, new_page, old_inode);
|
||||
|
||||
down_write(&F2FS_I(new_inode)->i_sem);
|
||||
f2fs_down_write(&F2FS_I(new_inode)->i_sem);
|
||||
if (!new_dir_entry)
|
||||
file_lost_pino(new_inode);
|
||||
else
|
||||
/* adjust dir's i_pino to pass fsck check */
|
||||
f2fs_i_pino_write(new_inode, old_dir->i_ino);
|
||||
up_write(&F2FS_I(new_inode)->i_sem);
|
||||
f2fs_up_write(&F2FS_I(new_inode)->i_sem);
|
||||
|
||||
new_dir->i_ctime = current_time(new_dir);
|
||||
if (new_nlink) {
|
||||
down_write(&F2FS_I(new_dir)->i_sem);
|
||||
f2fs_down_write(&F2FS_I(new_dir)->i_sem);
|
||||
f2fs_i_links_write(new_dir, new_nlink > 0);
|
||||
up_write(&F2FS_I(new_dir)->i_sem);
|
||||
f2fs_up_write(&F2FS_I(new_dir)->i_sem);
|
||||
}
|
||||
f2fs_mark_inode_dirty_sync(new_dir, false);
|
||||
|
||||
|
@ -380,14 +380,14 @@ int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
|
||||
struct nat_entry *e;
|
||||
bool need = false;
|
||||
|
||||
down_read(&nm_i->nat_tree_lock);
|
||||
f2fs_down_read(&nm_i->nat_tree_lock);
|
||||
e = __lookup_nat_cache(nm_i, nid);
|
||||
if (e) {
|
||||
if (!get_nat_flag(e, IS_CHECKPOINTED) &&
|
||||
!get_nat_flag(e, HAS_FSYNCED_INODE))
|
||||
need = true;
|
||||
}
|
||||
up_read(&nm_i->nat_tree_lock);
|
||||
f2fs_up_read(&nm_i->nat_tree_lock);
|
||||
return need;
|
||||
}
|
||||
|
||||
@ -397,11 +397,11 @@ bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
|
||||
struct nat_entry *e;
|
||||
bool is_cp = true;
|
||||
|
||||
down_read(&nm_i->nat_tree_lock);
|
||||
f2fs_down_read(&nm_i->nat_tree_lock);
|
||||
e = __lookup_nat_cache(nm_i, nid);
|
||||
if (e && !get_nat_flag(e, IS_CHECKPOINTED))
|
||||
is_cp = false;
|
||||
up_read(&nm_i->nat_tree_lock);
|
||||
f2fs_up_read(&nm_i->nat_tree_lock);
|
||||
return is_cp;
|
||||
}
|
||||
|
||||
@ -411,13 +411,13 @@ bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
|
||||
struct nat_entry *e;
|
||||
bool need_update = true;
|
||||
|
||||
down_read(&nm_i->nat_tree_lock);
|
||||
f2fs_down_read(&nm_i->nat_tree_lock);
|
||||
e = __lookup_nat_cache(nm_i, ino);
|
||||
if (e && get_nat_flag(e, HAS_LAST_FSYNC) &&
|
||||
(get_nat_flag(e, IS_CHECKPOINTED) ||
|
||||
get_nat_flag(e, HAS_FSYNCED_INODE)))
|
||||
need_update = false;
|
||||
up_read(&nm_i->nat_tree_lock);
|
||||
f2fs_up_read(&nm_i->nat_tree_lock);
|
||||
return need_update;
|
||||
}
|
||||
|
||||
@ -429,14 +429,14 @@ static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
|
||||
struct nat_entry *new, *e;
|
||||
|
||||
/* Let's mitigate lock contention of nat_tree_lock during checkpoint */
|
||||
if (rwsem_is_locked(&sbi->cp_global_sem))
|
||||
if (f2fs_rwsem_is_locked(&sbi->cp_global_sem))
|
||||
return;
|
||||
|
||||
new = __alloc_nat_entry(nid, false);
|
||||
if (!new)
|
||||
return;
|
||||
|
||||
down_write(&nm_i->nat_tree_lock);
|
||||
f2fs_down_write(&nm_i->nat_tree_lock);
|
||||
e = __lookup_nat_cache(nm_i, nid);
|
||||
if (!e)
|
||||
e = __init_nat_entry(nm_i, new, ne, false);
|
||||
@ -445,7 +445,7 @@ static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
|
||||
nat_get_blkaddr(e) !=
|
||||
le32_to_cpu(ne->block_addr) ||
|
||||
nat_get_version(e) != ne->version);
|
||||
up_write(&nm_i->nat_tree_lock);
|
||||
f2fs_up_write(&nm_i->nat_tree_lock);
|
||||
if (e != new)
|
||||
__free_nat_entry(new);
|
||||
}
|
||||
@ -457,7 +457,7 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
|
||||
struct nat_entry *e;
|
||||
struct nat_entry *new = __alloc_nat_entry(ni->nid, true);
|
||||
|
||||
down_write(&nm_i->nat_tree_lock);
|
||||
f2fs_down_write(&nm_i->nat_tree_lock);
|
||||
e = __lookup_nat_cache(nm_i, ni->nid);
|
||||
if (!e) {
|
||||
e = __init_nat_entry(nm_i, new, NULL, true);
|
||||
@ -506,7 +506,7 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
|
||||
set_nat_flag(e, HAS_FSYNCED_INODE, true);
|
||||
set_nat_flag(e, HAS_LAST_FSYNC, fsync_done);
|
||||
}
|
||||
up_write(&nm_i->nat_tree_lock);
|
||||
f2fs_up_write(&nm_i->nat_tree_lock);
|
||||
}
|
||||
|
||||
int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
|
||||
@ -514,7 +514,7 @@ int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
|
||||
struct f2fs_nm_info *nm_i = NM_I(sbi);
|
||||
int nr = nr_shrink;
|
||||
|
||||
if (!down_write_trylock(&nm_i->nat_tree_lock))
|
||||
if (!f2fs_down_write_trylock(&nm_i->nat_tree_lock))
|
||||
return 0;
|
||||
|
||||
spin_lock(&nm_i->nat_list_lock);
|
||||
@ -536,7 +536,7 @@ int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
|
||||
}
|
||||
spin_unlock(&nm_i->nat_list_lock);
|
||||
|
||||
up_write(&nm_i->nat_tree_lock);
|
||||
f2fs_up_write(&nm_i->nat_tree_lock);
|
||||
return nr - nr_shrink;
|
||||
}
|
||||
|
||||
@ -558,13 +558,13 @@ int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
|
||||
ni->nid = nid;
|
||||
retry:
|
||||
/* Check nat cache */
|
||||
down_read(&nm_i->nat_tree_lock);
|
||||
f2fs_down_read(&nm_i->nat_tree_lock);
|
||||
e = __lookup_nat_cache(nm_i, nid);
|
||||
if (e) {
|
||||
ni->ino = nat_get_ino(e);
|
||||
ni->blk_addr = nat_get_blkaddr(e);
|
||||
ni->version = nat_get_version(e);
|
||||
up_read(&nm_i->nat_tree_lock);
|
||||
f2fs_up_read(&nm_i->nat_tree_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -574,11 +574,11 @@ int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
|
||||
* nat_tree_lock. Therefore, we should retry, if we failed to grab here
|
||||
* while not bothering checkpoint.
|
||||
*/
|
||||
if (!rwsem_is_locked(&sbi->cp_global_sem) || checkpoint_context) {
|
||||
if (!f2fs_rwsem_is_locked(&sbi->cp_global_sem) || checkpoint_context) {
|
||||
down_read(&curseg->journal_rwsem);
|
||||
} else if (rwsem_is_contended(&nm_i->nat_tree_lock) ||
|
||||
} else if (f2fs_rwsem_is_contended(&nm_i->nat_tree_lock) ||
|
||||
!down_read_trylock(&curseg->journal_rwsem)) {
|
||||
up_read(&nm_i->nat_tree_lock);
|
||||
f2fs_up_read(&nm_i->nat_tree_lock);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
@ -587,15 +587,15 @@ int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
|
||||
ne = nat_in_journal(journal, i);
|
||||
node_info_from_raw_nat(ni, &ne);
|
||||
}
|
||||
up_read(&curseg->journal_rwsem);
|
||||
up_read(&curseg->journal_rwsem);
|
||||
if (i >= 0) {
|
||||
up_read(&nm_i->nat_tree_lock);
|
||||
f2fs_up_read(&nm_i->nat_tree_lock);
|
||||
goto cache;
|
||||
}
|
||||
|
||||
/* Fill node_info from nat page */
|
||||
index = current_nat_addr(sbi, nid);
|
||||
up_read(&nm_i->nat_tree_lock);
|
||||
f2fs_up_read(&nm_i->nat_tree_lock);
|
||||
|
||||
page = f2fs_get_meta_page(sbi, index);
|
||||
if (IS_ERR(page))
|
||||
@ -1588,17 +1588,17 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
|
||||
goto redirty_out;
|
||||
|
||||
if (wbc->for_reclaim) {
|
||||
if (!down_read_trylock(&sbi->node_write))
|
||||
if (!f2fs_down_read_trylock(&sbi->node_write))
|
||||
goto redirty_out;
|
||||
} else {
|
||||
down_read(&sbi->node_write);
|
||||
f2fs_down_read(&sbi->node_write);
|
||||
}
|
||||
|
||||
/* This page is already truncated */
|
||||
if (unlikely(ni.blk_addr == NULL_ADDR)) {
|
||||
ClearPageUptodate(page);
|
||||
dec_page_count(sbi, F2FS_DIRTY_NODES);
|
||||
up_read(&sbi->node_write);
|
||||
f2fs_up_read(&sbi->node_write);
|
||||
unlock_page(page);
|
||||
return 0;
|
||||
}
|
||||
@ -1606,7 +1606,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
|
||||
if (__is_valid_data_blkaddr(ni.blk_addr) &&
|
||||
!f2fs_is_valid_blkaddr(sbi, ni.blk_addr,
|
||||
DATA_GENERIC_ENHANCE)) {
|
||||
up_read(&sbi->node_write);
|
||||
f2fs_up_read(&sbi->node_write);
|
||||
goto redirty_out;
|
||||
}
|
||||
|
||||
@ -1627,7 +1627,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
|
||||
f2fs_do_write_node_page(nid, &fio);
|
||||
set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
|
||||
dec_page_count(sbi, F2FS_DIRTY_NODES);
|
||||
up_read(&sbi->node_write);
|
||||
f2fs_up_read(&sbi->node_write);
|
||||
|
||||
if (wbc->for_reclaim) {
|
||||
f2fs_submit_merged_write_cond(sbi, NULL, page, 0, NODE);
|
||||
@ -2376,7 +2376,7 @@ static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
|
||||
unsigned int i, idx;
|
||||
nid_t nid;
|
||||
|
||||
down_read(&nm_i->nat_tree_lock);
|
||||
f2fs_down_read(&nm_i->nat_tree_lock);
|
||||
|
||||
for (i = 0; i < nm_i->nat_blocks; i++) {
|
||||
if (!test_bit_le(i, nm_i->nat_block_bitmap))
|
||||
@ -2399,7 +2399,7 @@ static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
|
||||
out:
|
||||
scan_curseg_cache(sbi);
|
||||
|
||||
up_read(&nm_i->nat_tree_lock);
|
||||
f2fs_up_read(&nm_i->nat_tree_lock);
|
||||
}
|
||||
|
||||
static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
|
||||
@ -2434,7 +2434,7 @@ static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
|
||||
f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
|
||||
META_NAT, true);
|
||||
|
||||
down_read(&nm_i->nat_tree_lock);
|
||||
f2fs_down_read(&nm_i->nat_tree_lock);
|
||||
|
||||
while (1) {
|
||||
if (!test_bit_le(NAT_BLOCK_OFFSET(nid),
|
||||
@ -2449,7 +2449,7 @@ static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
up_read(&nm_i->nat_tree_lock);
|
||||
f2fs_up_read(&nm_i->nat_tree_lock);
|
||||
f2fs_err(sbi, "NAT is corrupt, run fsck to fix it");
|
||||
return ret;
|
||||
}
|
||||
@ -2469,7 +2469,7 @@ static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
|
||||
/* find free nids from current sum_pages */
|
||||
scan_curseg_cache(sbi);
|
||||
|
||||
up_read(&nm_i->nat_tree_lock);
|
||||
f2fs_up_read(&nm_i->nat_tree_lock);
|
||||
|
||||
f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
|
||||
nm_i->ra_nid_pages, META_NAT, false);
|
||||
@ -2997,15 +2997,15 @@ int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
|
||||
* nat_cnt[DIRTY_NAT].
|
||||
*/
|
||||
if (enabled_nat_bits(sbi, cpc)) {
|
||||
down_write(&nm_i->nat_tree_lock);
|
||||
f2fs_down_write(&nm_i->nat_tree_lock);
|
||||
remove_nats_in_journal(sbi);
|
||||
up_write(&nm_i->nat_tree_lock);
|
||||
f2fs_up_write(&nm_i->nat_tree_lock);
|
||||
}
|
||||
|
||||
if (!nm_i->nat_cnt[DIRTY_NAT])
|
||||
return 0;
|
||||
|
||||
down_write(&nm_i->nat_tree_lock);
|
||||
f2fs_down_write(&nm_i->nat_tree_lock);
|
||||
|
||||
/*
|
||||
* if there are no enough space in journal to store dirty nat
|
||||
@ -3034,7 +3034,7 @@ int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
|
||||
break;
|
||||
}
|
||||
|
||||
up_write(&nm_i->nat_tree_lock);
|
||||
f2fs_up_write(&nm_i->nat_tree_lock);
|
||||
/* Allow dirty nats by node block allocation in write_begin */
|
||||
|
||||
return err;
|
||||
@ -3152,7 +3152,7 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
|
||||
|
||||
mutex_init(&nm_i->build_lock);
|
||||
spin_lock_init(&nm_i->nid_list_lock);
|
||||
init_rwsem(&nm_i->nat_tree_lock);
|
||||
init_f2fs_rwsem(&nm_i->nat_tree_lock);
|
||||
|
||||
nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
|
||||
nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
|
||||
@ -3258,7 +3258,7 @@ void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
|
||||
spin_unlock(&nm_i->nid_list_lock);
|
||||
|
||||
/* destroy nat cache */
|
||||
down_write(&nm_i->nat_tree_lock);
|
||||
f2fs_down_write(&nm_i->nat_tree_lock);
|
||||
while ((found = __gang_lookup_nat_cache(nm_i,
|
||||
nid, NATVEC_SIZE, natvec))) {
|
||||
unsigned idx;
|
||||
@ -3288,7 +3288,7 @@ void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
|
||||
kmem_cache_free(nat_entry_set_slab, setvec[idx]);
|
||||
}
|
||||
}
|
||||
up_write(&nm_i->nat_tree_lock);
|
||||
f2fs_up_write(&nm_i->nat_tree_lock);
|
||||
|
||||
kvfree(nm_i->nat_block_bitmap);
|
||||
if (nm_i->free_nid_bitmap) {
|
||||
|
@ -797,7 +797,7 @@ int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
|
||||
INIT_LIST_HEAD(&dir_list);
|
||||
|
||||
/* prevent checkpoint */
|
||||
down_write(&sbi->cp_global_sem);
|
||||
f2fs_down_write(&sbi->cp_global_sem);
|
||||
|
||||
/* step #1: find fsynced inode numbers */
|
||||
err = find_fsync_dnodes(sbi, &inode_list, check_only);
|
||||
@ -848,7 +848,7 @@ int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
|
||||
if (!err)
|
||||
clear_sbi_flag(sbi, SBI_POR_DOING);
|
||||
|
||||
up_write(&sbi->cp_global_sem);
|
||||
f2fs_up_write(&sbi->cp_global_sem);
|
||||
|
||||
/* let's drop all the directory inodes for clean checkpoint */
|
||||
destroy_fsync_dnodes(&dir_list, err);
|
||||
|
@ -471,7 +471,7 @@ int f2fs_commit_inmem_pages(struct inode *inode)
|
||||
|
||||
f2fs_balance_fs(sbi, true);
|
||||
|
||||
down_write(&fi->i_gc_rwsem[WRITE]);
|
||||
f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
|
||||
|
||||
f2fs_lock_op(sbi);
|
||||
set_inode_flag(inode, FI_ATOMIC_COMMIT);
|
||||
@ -483,7 +483,7 @@ int f2fs_commit_inmem_pages(struct inode *inode)
|
||||
clear_inode_flag(inode, FI_ATOMIC_COMMIT);
|
||||
|
||||
f2fs_unlock_op(sbi);
|
||||
up_write(&fi->i_gc_rwsem[WRITE]);
|
||||
f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -521,7 +521,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
|
||||
io_schedule();
|
||||
finish_wait(&sbi->gc_thread->fggc_wq, &wait);
|
||||
} else {
|
||||
down_write(&sbi->gc_lock);
|
||||
f2fs_down_write(&sbi->gc_lock);
|
||||
f2fs_gc(sbi, false, false, false, NULL_SEGNO);
|
||||
}
|
||||
}
|
||||
@ -551,7 +551,7 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg)
|
||||
|
||||
/* there is background inflight IO or foreground operation recently */
|
||||
if (is_inflight_io(sbi, REQ_TIME) ||
|
||||
(!f2fs_time_over(sbi, REQ_TIME) && rwsem_is_locked(&sbi->cp_rwsem)))
|
||||
(!f2fs_time_over(sbi, REQ_TIME) && f2fs_rwsem_is_locked(&sbi->cp_rwsem)))
|
||||
return;
|
||||
|
||||
/* exceed periodical checkpoint timeout threshold */
|
||||
@ -2746,7 +2746,7 @@ static void __f2fs_init_atgc_curseg(struct f2fs_sb_info *sbi)
|
||||
if (!sbi->am.atgc_enabled)
|
||||
return;
|
||||
|
||||
down_read(&SM_I(sbi)->curseg_lock);
|
||||
f2fs_down_read(&SM_I(sbi)->curseg_lock);
|
||||
|
||||
mutex_lock(&curseg->curseg_mutex);
|
||||
down_write(&SIT_I(sbi)->sentry_lock);
|
||||
@ -2756,7 +2756,7 @@ static void __f2fs_init_atgc_curseg(struct f2fs_sb_info *sbi)
|
||||
up_write(&SIT_I(sbi)->sentry_lock);
|
||||
mutex_unlock(&curseg->curseg_mutex);
|
||||
|
||||
up_read(&SM_I(sbi)->curseg_lock);
|
||||
f2fs_up_read(&SM_I(sbi)->curseg_lock);
|
||||
|
||||
}
|
||||
void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi)
|
||||
@ -2907,7 +2907,7 @@ void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
|
||||
struct curseg_info *curseg = CURSEG_I(sbi, type);
|
||||
unsigned int segno;
|
||||
|
||||
down_read(&SM_I(sbi)->curseg_lock);
|
||||
f2fs_down_read(&SM_I(sbi)->curseg_lock);
|
||||
mutex_lock(&curseg->curseg_mutex);
|
||||
down_write(&SIT_I(sbi)->sentry_lock);
|
||||
|
||||
@ -2931,7 +2931,7 @@ void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
|
||||
type, segno, curseg->segno);
|
||||
|
||||
mutex_unlock(&curseg->curseg_mutex);
|
||||
up_read(&SM_I(sbi)->curseg_lock);
|
||||
f2fs_up_read(&SM_I(sbi)->curseg_lock);
|
||||
}
|
||||
|
||||
static void __allocate_new_segment(struct f2fs_sb_info *sbi, int type,
|
||||
@ -2963,23 +2963,23 @@ static void __allocate_new_section(struct f2fs_sb_info *sbi,
|
||||
|
||||
void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force)
|
||||
{
|
||||
down_read(&SM_I(sbi)->curseg_lock);
|
||||
f2fs_down_read(&SM_I(sbi)->curseg_lock);
|
||||
down_write(&SIT_I(sbi)->sentry_lock);
|
||||
__allocate_new_section(sbi, type, force);
|
||||
up_write(&SIT_I(sbi)->sentry_lock);
|
||||
up_read(&SM_I(sbi)->curseg_lock);
|
||||
f2fs_up_read(&SM_I(sbi)->curseg_lock);
|
||||
}
|
||||
|
||||
void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi)
|
||||
{
|
||||
int i;
|
||||
|
||||
down_read(&SM_I(sbi)->curseg_lock);
|
||||
f2fs_down_read(&SM_I(sbi)->curseg_lock);
|
||||
down_write(&SIT_I(sbi)->sentry_lock);
|
||||
for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++)
|
||||
__allocate_new_segment(sbi, i, false, false);
|
||||
up_write(&SIT_I(sbi)->sentry_lock);
|
||||
up_read(&SM_I(sbi)->curseg_lock);
|
||||
f2fs_up_read(&SM_I(sbi)->curseg_lock);
|
||||
}
|
||||
|
||||
static const struct segment_allocation default_salloc_ops = {
|
||||
@ -3117,9 +3117,9 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
|
||||
if (sbi->discard_blks == 0)
|
||||
goto out;
|
||||
|
||||
down_write(&sbi->gc_lock);
|
||||
f2fs_down_write(&sbi->gc_lock);
|
||||
err = f2fs_write_checkpoint(sbi, &cpc);
|
||||
up_write(&sbi->gc_lock);
|
||||
f2fs_up_write(&sbi->gc_lock);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
@ -3356,7 +3356,7 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
|
||||
bool from_gc = (type == CURSEG_ALL_DATA_ATGC);
|
||||
struct seg_entry *se = NULL;
|
||||
|
||||
down_read(&SM_I(sbi)->curseg_lock);
|
||||
f2fs_down_read(&SM_I(sbi)->curseg_lock);
|
||||
|
||||
mutex_lock(&curseg->curseg_mutex);
|
||||
down_write(&sit_i->sentry_lock);
|
||||
@ -3439,7 +3439,7 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
|
||||
|
||||
mutex_unlock(&curseg->curseg_mutex);
|
||||
|
||||
up_read(&SM_I(sbi)->curseg_lock);
|
||||
f2fs_up_read(&SM_I(sbi)->curseg_lock);
|
||||
}
|
||||
|
||||
static void update_device_state(struct f2fs_io_info *fio)
|
||||
@ -3469,7 +3469,7 @@ static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
|
||||
bool keep_order = (f2fs_lfs_mode(fio->sbi) && type == CURSEG_COLD_DATA);
|
||||
|
||||
if (keep_order)
|
||||
down_read(&fio->sbi->io_order_lock);
|
||||
f2fs_down_read(&fio->sbi->io_order_lock);
|
||||
reallocate:
|
||||
f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
|
||||
&fio->new_blkaddr, sum, type, fio);
|
||||
@ -3489,7 +3489,7 @@ static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
|
||||
update_device_state(fio);
|
||||
|
||||
if (keep_order)
|
||||
up_read(&fio->sbi->io_order_lock);
|
||||
f2fs_up_read(&fio->sbi->io_order_lock);
|
||||
}
|
||||
|
||||
void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
|
||||
@ -3620,7 +3620,7 @@ void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
|
||||
se = get_seg_entry(sbi, segno);
|
||||
type = se->type;
|
||||
|
||||
down_write(&SM_I(sbi)->curseg_lock);
|
||||
f2fs_down_write(&SM_I(sbi)->curseg_lock);
|
||||
|
||||
if (!recover_curseg) {
|
||||
/* for recovery flow */
|
||||
@ -3689,7 +3689,7 @@ void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
|
||||
|
||||
up_write(&sit_i->sentry_lock);
|
||||
mutex_unlock(&curseg->curseg_mutex);
|
||||
up_write(&SM_I(sbi)->curseg_lock);
|
||||
f2fs_up_write(&SM_I(sbi)->curseg_lock);
|
||||
}
|
||||
|
||||
void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
|
||||
@ -5165,7 +5165,7 @@ int f2fs_build_segment_manager(struct f2fs_sb_info *sbi)
|
||||
|
||||
INIT_LIST_HEAD(&sm_info->sit_entry_set);
|
||||
|
||||
init_rwsem(&sm_info->curseg_lock);
|
||||
init_f2fs_rwsem(&sm_info->curseg_lock);
|
||||
|
||||
if (!f2fs_readonly(sbi->sb)) {
|
||||
err = f2fs_create_flush_cmd_control(sbi);
|
||||
|
@ -1208,17 +1208,17 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb)
|
||||
/* Initialize f2fs-specific inode info */
|
||||
atomic_set(&fi->dirty_pages, 0);
|
||||
atomic_set(&fi->i_compr_blocks, 0);
|
||||
init_rwsem(&fi->i_sem);
|
||||
init_f2fs_rwsem(&fi->i_sem);
|
||||
spin_lock_init(&fi->i_size_lock);
|
||||
INIT_LIST_HEAD(&fi->dirty_list);
|
||||
INIT_LIST_HEAD(&fi->gdirty_list);
|
||||
INIT_LIST_HEAD(&fi->inmem_ilist);
|
||||
INIT_LIST_HEAD(&fi->inmem_pages);
|
||||
mutex_init(&fi->inmem_lock);
|
||||
init_rwsem(&fi->i_gc_rwsem[READ]);
|
||||
init_rwsem(&fi->i_gc_rwsem[WRITE]);
|
||||
init_rwsem(&fi->i_mmap_sem);
|
||||
init_rwsem(&fi->i_xattr_sem);
|
||||
init_f2fs_rwsem(&fi->i_gc_rwsem[READ]);
|
||||
init_f2fs_rwsem(&fi->i_gc_rwsem[WRITE]);
|
||||
init_f2fs_rwsem(&fi->i_mmap_sem);
|
||||
init_f2fs_rwsem(&fi->i_xattr_sem);
|
||||
|
||||
/* Will be used by directory only */
|
||||
fi->i_dir_level = F2FS_SB(sb)->dir_level;
|
||||
@ -1923,7 +1923,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
|
||||
f2fs_update_time(sbi, DISABLE_TIME);
|
||||
|
||||
while (!f2fs_time_over(sbi, DISABLE_TIME)) {
|
||||
down_write(&sbi->gc_lock);
|
||||
f2fs_down_write(&sbi->gc_lock);
|
||||
err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
|
||||
if (err == -ENODATA) {
|
||||
err = 0;
|
||||
@ -1945,7 +1945,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
|
||||
goto restore_flag;
|
||||
}
|
||||
|
||||
down_write(&sbi->gc_lock);
|
||||
f2fs_down_write(&sbi->gc_lock);
|
||||
cpc.reason = CP_PAUSE;
|
||||
set_sbi_flag(sbi, SBI_CP_DISABLED);
|
||||
err = f2fs_write_checkpoint(sbi, &cpc);
|
||||
@ -1957,7 +1957,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
|
||||
spin_unlock(&sbi->stat_lock);
|
||||
|
||||
out_unlock:
|
||||
up_write(&sbi->gc_lock);
|
||||
f2fs_up_write(&sbi->gc_lock);
|
||||
restore_flag:
|
||||
sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
|
||||
return err;
|
||||
@ -1977,12 +1977,12 @@ static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
|
||||
if (unlikely(retry < 0))
|
||||
f2fs_warn(sbi, "checkpoint=enable has some unwritten data.");
|
||||
|
||||
down_write(&sbi->gc_lock);
|
||||
f2fs_down_write(&sbi->gc_lock);
|
||||
f2fs_dirty_to_prefree(sbi);
|
||||
|
||||
clear_sbi_flag(sbi, SBI_CP_DISABLED);
|
||||
set_sbi_flag(sbi, SBI_IS_DIRTY);
|
||||
up_write(&sbi->gc_lock);
|
||||
f2fs_up_write(&sbi->gc_lock);
|
||||
|
||||
f2fs_sync_fs(sbi->sb, 1);
|
||||
}
|
||||
@ -2504,18 +2504,18 @@ int f2fs_quota_sync(struct super_block *sb, int type)
|
||||
/*
|
||||
* do_quotactl
|
||||
* f2fs_quota_sync
|
||||
* down_read(quota_sem)
|
||||
* f2fs_down_read(quota_sem)
|
||||
* dquot_writeback_dquots()
|
||||
* f2fs_dquot_commit
|
||||
* block_operation
|
||||
* down_read(quota_sem)
|
||||
* f2fs_down_read(quota_sem)
|
||||
*/
|
||||
f2fs_lock_op(sbi);
|
||||
down_read(&sbi->quota_sem);
|
||||
f2fs_down_read(&sbi->quota_sem);
|
||||
|
||||
ret = f2fs_quota_sync_file(sbi, cnt);
|
||||
|
||||
up_read(&sbi->quota_sem);
|
||||
f2fs_up_read(&sbi->quota_sem);
|
||||
f2fs_unlock_op(sbi);
|
||||
|
||||
inode_unlock(dqopt->files[cnt]);
|
||||
@ -2640,11 +2640,11 @@ static int f2fs_dquot_commit(struct dquot *dquot)
|
||||
struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
|
||||
int ret;
|
||||
|
||||
down_read_nested(&sbi->quota_sem, SINGLE_DEPTH_NESTING);
|
||||
f2fs_down_read_nested(&sbi->quota_sem, SINGLE_DEPTH_NESTING);
|
||||
ret = dquot_commit(dquot);
|
||||
if (ret < 0)
|
||||
set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
|
||||
up_read(&sbi->quota_sem);
|
||||
f2fs_up_read(&sbi->quota_sem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -2653,11 +2653,11 @@ static int f2fs_dquot_acquire(struct dquot *dquot)
|
||||
struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
|
||||
int ret;
|
||||
|
||||
down_read(&sbi->quota_sem);
|
||||
f2fs_down_read(&sbi->quota_sem);
|
||||
ret = dquot_acquire(dquot);
|
||||
if (ret < 0)
|
||||
set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
|
||||
up_read(&sbi->quota_sem);
|
||||
f2fs_up_read(&sbi->quota_sem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -3390,14 +3390,14 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
|
||||
|
||||
INIT_LIST_HEAD(&sbi->s_list);
|
||||
mutex_init(&sbi->umount_mutex);
|
||||
init_rwsem(&sbi->io_order_lock);
|
||||
init_f2fs_rwsem(&sbi->io_order_lock);
|
||||
spin_lock_init(&sbi->cp_lock);
|
||||
|
||||
sbi->dirty_device = 0;
|
||||
spin_lock_init(&sbi->dev_lock);
|
||||
|
||||
init_rwsem(&sbi->sb_lock);
|
||||
init_rwsem(&sbi->pin_sem);
|
||||
init_f2fs_rwsem(&sbi->sb_lock);
|
||||
init_f2fs_rwsem(&sbi->pin_sem);
|
||||
}
|
||||
|
||||
static int init_percpu_info(struct f2fs_sb_info *sbi)
|
||||
@ -3841,11 +3841,11 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
|
||||
/* init f2fs-specific super block info */
|
||||
sbi->valid_super_block = valid_super_block;
|
||||
init_rwsem(&sbi->gc_lock);
|
||||
init_f2fs_rwsem(&sbi->gc_lock);
|
||||
mutex_init(&sbi->writepages);
|
||||
init_rwsem(&sbi->cp_global_sem);
|
||||
init_rwsem(&sbi->node_write);
|
||||
init_rwsem(&sbi->node_change);
|
||||
init_f2fs_rwsem(&sbi->cp_global_sem);
|
||||
init_f2fs_rwsem(&sbi->node_write);
|
||||
init_f2fs_rwsem(&sbi->node_change);
|
||||
|
||||
/* disallow all the data/node/meta page writes */
|
||||
set_sbi_flag(sbi, SBI_POR_DOING);
|
||||
@ -3871,18 +3871,18 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
}
|
||||
|
||||
for (j = HOT; j < n; j++) {
|
||||
init_rwsem(&sbi->write_io[i][j].io_rwsem);
|
||||
init_f2fs_rwsem(&sbi->write_io[i][j].io_rwsem);
|
||||
sbi->write_io[i][j].sbi = sbi;
|
||||
sbi->write_io[i][j].bio = NULL;
|
||||
spin_lock_init(&sbi->write_io[i][j].io_lock);
|
||||
INIT_LIST_HEAD(&sbi->write_io[i][j].io_list);
|
||||
INIT_LIST_HEAD(&sbi->write_io[i][j].bio_list);
|
||||
init_rwsem(&sbi->write_io[i][j].bio_list_lock);
|
||||
init_f2fs_rwsem(&sbi->write_io[i][j].bio_list_lock);
|
||||
}
|
||||
}
|
||||
|
||||
init_rwsem(&sbi->cp_rwsem);
|
||||
init_rwsem(&sbi->quota_sem);
|
||||
init_f2fs_rwsem(&sbi->cp_rwsem);
|
||||
init_f2fs_rwsem(&sbi->quota_sem);
|
||||
init_waitqueue_head(&sbi->cp_wait);
|
||||
init_sb_info(sbi);
|
||||
|
||||
|
@ -363,7 +363,7 @@ static ssize_t __sbi_store(struct f2fs_attr *a,
|
||||
if (strlen(name) >= F2FS_EXTENSION_LEN)
|
||||
return -EINVAL;
|
||||
|
||||
down_write(&sbi->sb_lock);
|
||||
f2fs_down_write(&sbi->sb_lock);
|
||||
|
||||
ret = f2fs_update_extension_list(sbi, name, hot, set);
|
||||
if (ret)
|
||||
@ -373,7 +373,7 @@ static ssize_t __sbi_store(struct f2fs_attr *a,
|
||||
if (ret)
|
||||
f2fs_update_extension_list(sbi, name, hot, !set);
|
||||
out:
|
||||
up_write(&sbi->sb_lock);
|
||||
f2fs_up_write(&sbi->sb_lock);
|
||||
return ret ? ret : count;
|
||||
}
|
||||
|
||||
|
@ -208,7 +208,7 @@ static int f2fs_end_enable_verity(struct file *filp, const void *desc,
|
||||
* from re-instantiating cached pages we are truncating (since unlike
|
||||
* normal file accesses, garbage collection isn't limited by i_size).
|
||||
*/
|
||||
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
truncate_inode_pages(inode->i_mapping, inode->i_size);
|
||||
err2 = f2fs_truncate(inode);
|
||||
if (err2) {
|
||||
@ -216,7 +216,7 @@ static int f2fs_end_enable_verity(struct file *filp, const void *desc,
|
||||
err2);
|
||||
set_sbi_flag(sbi, SBI_NEED_FSCK);
|
||||
}
|
||||
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
clear_inode_flag(inode, FI_VERITY_IN_PROGRESS);
|
||||
return err ?: err2;
|
||||
}
|
||||
|
@ -529,10 +529,10 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
|
||||
if (len > F2FS_NAME_LEN)
|
||||
return -ERANGE;
|
||||
|
||||
down_read(&F2FS_I(inode)->i_xattr_sem);
|
||||
f2fs_down_read(&F2FS_I(inode)->i_xattr_sem);
|
||||
error = lookup_all_xattrs(inode, ipage, index, len, name,
|
||||
&entry, &base_addr, &base_size, &is_inline);
|
||||
up_read(&F2FS_I(inode)->i_xattr_sem);
|
||||
f2fs_up_read(&F2FS_I(inode)->i_xattr_sem);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
@ -566,9 +566,9 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
|
||||
int error;
|
||||
size_t rest = buffer_size;
|
||||
|
||||
down_read(&F2FS_I(inode)->i_xattr_sem);
|
||||
f2fs_down_read(&F2FS_I(inode)->i_xattr_sem);
|
||||
error = read_all_xattrs(inode, NULL, &base_addr);
|
||||
up_read(&F2FS_I(inode)->i_xattr_sem);
|
||||
f2fs_up_read(&F2FS_I(inode)->i_xattr_sem);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
@ -781,9 +781,9 @@ int f2fs_setxattr(struct inode *inode, int index, const char *name,
|
||||
f2fs_balance_fs(sbi, true);
|
||||
|
||||
f2fs_lock_op(sbi);
|
||||
down_write(&F2FS_I(inode)->i_xattr_sem);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_xattr_sem);
|
||||
err = __f2fs_setxattr(inode, index, name, value, size, ipage, flags);
|
||||
up_write(&F2FS_I(inode)->i_xattr_sem);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_xattr_sem);
|
||||
f2fs_unlock_op(sbi);
|
||||
|
||||
f2fs_update_time(sbi, REQ_TIME);
|
||||
|
Loading…
Reference in New Issue
Block a user