Btrfs: remove duplicates of filemap_ helpers
Use filemap_fdatawrite_range and filemap_fdatawait_range instead of local copies of the functions. For filemap_fdatawait_range that also means replacing the awkward old wait_on_page_writeback_range calling convention with the regular filemap byte offsets. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Chris Mason <chris.mason@oracle.com>
This commit is contained in:
parent
25472b880c
commit
8aa38c31b7
@ -822,16 +822,14 @@ struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
|
|||||||
|
|
||||||
int btrfs_write_tree_block(struct extent_buffer *buf)
|
int btrfs_write_tree_block(struct extent_buffer *buf)
|
||||||
{
|
{
|
||||||
return btrfs_fdatawrite_range(buf->first_page->mapping, buf->start,
|
return filemap_fdatawrite_range(buf->first_page->mapping, buf->start,
|
||||||
buf->start + buf->len - 1, WB_SYNC_ALL);
|
buf->start + buf->len - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
|
int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
|
||||||
{
|
{
|
||||||
return btrfs_wait_on_page_writeback_range(buf->first_page->mapping,
|
return filemap_fdatawait_range(buf->first_page->mapping,
|
||||||
buf->start >> PAGE_CACHE_SHIFT,
|
buf->start, buf->start + buf->len - 1);
|
||||||
(buf->start + buf->len - 1) >>
|
|
||||||
PAGE_CACHE_SHIFT);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
|
struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
|
||||||
|
@ -1022,9 +1022,8 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (will_write) {
|
if (will_write) {
|
||||||
btrfs_fdatawrite_range(inode->i_mapping, pos,
|
filemap_fdatawrite_range(inode->i_mapping, pos,
|
||||||
pos + write_bytes - 1,
|
pos + write_bytes - 1);
|
||||||
WB_SYNC_ALL);
|
|
||||||
} else {
|
} else {
|
||||||
balance_dirty_pages_ratelimited_nr(inode->i_mapping,
|
balance_dirty_pages_ratelimited_nr(inode->i_mapping,
|
||||||
num_pages);
|
num_pages);
|
||||||
|
@ -458,7 +458,7 @@ void btrfs_start_ordered_extent(struct inode *inode,
|
|||||||
* start IO on any dirty ones so the wait doesn't stall waiting
|
* start IO on any dirty ones so the wait doesn't stall waiting
|
||||||
* for pdflush to find them
|
* for pdflush to find them
|
||||||
*/
|
*/
|
||||||
btrfs_fdatawrite_range(inode->i_mapping, start, end, WB_SYNC_ALL);
|
filemap_fdatawrite_range(inode->i_mapping, start, end);
|
||||||
if (wait) {
|
if (wait) {
|
||||||
wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
|
wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
|
||||||
&entry->flags));
|
&entry->flags));
|
||||||
@ -488,17 +488,15 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
|
|||||||
/* start IO across the range first to instantiate any delalloc
|
/* start IO across the range first to instantiate any delalloc
|
||||||
* extents
|
* extents
|
||||||
*/
|
*/
|
||||||
btrfs_fdatawrite_range(inode->i_mapping, start, orig_end, WB_SYNC_ALL);
|
filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
|
||||||
|
|
||||||
/* The compression code will leave pages locked but return from
|
/* The compression code will leave pages locked but return from
|
||||||
* writepage without setting the page writeback. Starting again
|
* writepage without setting the page writeback. Starting again
|
||||||
* with WB_SYNC_ALL will end up waiting for the IO to actually start.
|
* with WB_SYNC_ALL will end up waiting for the IO to actually start.
|
||||||
*/
|
*/
|
||||||
btrfs_fdatawrite_range(inode->i_mapping, start, orig_end, WB_SYNC_ALL);
|
filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
|
||||||
|
|
||||||
btrfs_wait_on_page_writeback_range(inode->i_mapping,
|
filemap_fdatawait_range(inode->i_mapping, start, orig_end);
|
||||||
start >> PAGE_CACHE_SHIFT,
|
|
||||||
orig_end >> PAGE_CACHE_SHIFT);
|
|
||||||
|
|
||||||
end = orig_end;
|
end = orig_end;
|
||||||
found = 0;
|
found = 0;
|
||||||
@ -716,89 +714,6 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
* taken from mm/filemap.c because it isn't exported
|
|
||||||
*
|
|
||||||
* __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
|
|
||||||
* @mapping: address space structure to write
|
|
||||||
* @start: offset in bytes where the range starts
|
|
||||||
* @end: offset in bytes where the range ends (inclusive)
|
|
||||||
* @sync_mode: enable synchronous operation
|
|
||||||
*
|
|
||||||
* Start writeback against all of a mapping's dirty pages that lie
|
|
||||||
* within the byte offsets <start, end> inclusive.
|
|
||||||
*
|
|
||||||
* If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
|
|
||||||
* opposed to a regular memory cleansing writeback. The difference between
|
|
||||||
* these two operations is that if a dirty page/buffer is encountered, it must
|
|
||||||
* be waited upon, and not just skipped over.
|
|
||||||
*/
|
|
||||||
int btrfs_fdatawrite_range(struct address_space *mapping, loff_t start,
|
|
||||||
loff_t end, int sync_mode)
|
|
||||||
{
|
|
||||||
struct writeback_control wbc = {
|
|
||||||
.sync_mode = sync_mode,
|
|
||||||
.nr_to_write = mapping->nrpages * 2,
|
|
||||||
.range_start = start,
|
|
||||||
.range_end = end,
|
|
||||||
};
|
|
||||||
return btrfs_writepages(mapping, &wbc);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* taken from mm/filemap.c because it isn't exported
|
|
||||||
*
|
|
||||||
* wait_on_page_writeback_range - wait for writeback to complete
|
|
||||||
* @mapping: target address_space
|
|
||||||
* @start: beginning page index
|
|
||||||
* @end: ending page index
|
|
||||||
*
|
|
||||||
* Wait for writeback to complete against pages indexed by start->end
|
|
||||||
* inclusive
|
|
||||||
*/
|
|
||||||
int btrfs_wait_on_page_writeback_range(struct address_space *mapping,
|
|
||||||
pgoff_t start, pgoff_t end)
|
|
||||||
{
|
|
||||||
struct pagevec pvec;
|
|
||||||
int nr_pages;
|
|
||||||
int ret = 0;
|
|
||||||
pgoff_t index;
|
|
||||||
|
|
||||||
if (end < start)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
pagevec_init(&pvec, 0);
|
|
||||||
index = start;
|
|
||||||
while ((index <= end) &&
|
|
||||||
(nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
|
|
||||||
PAGECACHE_TAG_WRITEBACK,
|
|
||||||
min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
|
|
||||||
unsigned i;
|
|
||||||
|
|
||||||
for (i = 0; i < nr_pages; i++) {
|
|
||||||
struct page *page = pvec.pages[i];
|
|
||||||
|
|
||||||
/* until radix tree lookup accepts end_index */
|
|
||||||
if (page->index > end)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
wait_on_page_writeback(page);
|
|
||||||
if (PageError(page))
|
|
||||||
ret = -EIO;
|
|
||||||
}
|
|
||||||
pagevec_release(&pvec);
|
|
||||||
cond_resched();
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Check for outstanding write errors */
|
|
||||||
if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
|
|
||||||
ret = -ENOSPC;
|
|
||||||
if (test_and_clear_bit(AS_EIO, &mapping->flags))
|
|
||||||
ret = -EIO;
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* add a given inode to the list of inodes that must be fully on
|
* add a given inode to the list of inodes that must be fully on
|
||||||
* disk before a transaction commit finishes.
|
* disk before a transaction commit finishes.
|
||||||
|
@ -153,10 +153,6 @@ btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset);
|
|||||||
int btrfs_ordered_update_i_size(struct inode *inode,
|
int btrfs_ordered_update_i_size(struct inode *inode,
|
||||||
struct btrfs_ordered_extent *ordered);
|
struct btrfs_ordered_extent *ordered);
|
||||||
int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u32 *sum);
|
int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u32 *sum);
|
||||||
int btrfs_wait_on_page_writeback_range(struct address_space *mapping,
|
|
||||||
pgoff_t start, pgoff_t end);
|
|
||||||
int btrfs_fdatawrite_range(struct address_space *mapping, loff_t start,
|
|
||||||
loff_t end, int sync_mode);
|
|
||||||
int btrfs_wait_ordered_extents(struct btrfs_root *root, int nocow_only);
|
int btrfs_wait_ordered_extents(struct btrfs_root *root, int nocow_only);
|
||||||
int btrfs_run_ordered_operations(struct btrfs_root *root, int wait);
|
int btrfs_run_ordered_operations(struct btrfs_root *root, int wait);
|
||||||
int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
|
int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
|
||||||
|
Loading…
Reference in New Issue
Block a user