iomap: Convert read_count to read_bytes_pending
Instead of counting bio segments, count the number of bytes submitted. This insulates us from the block layer's definition of what a 'same page' is, which is not necessarily clear once THPs are involved. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com> Reviewed-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
parent
0a195b91e8
commit
7d636676d2
@ -26,7 +26,7 @@
|
||||
* to track sub-page uptodate status and I/O completions.
|
||||
*/
|
||||
struct iomap_page {
|
||||
atomic_t read_count;
|
||||
atomic_t read_bytes_pending;
|
||||
atomic_t write_count;
|
||||
spinlock_t uptodate_lock;
|
||||
unsigned long uptodate[];
|
||||
@ -72,7 +72,7 @@ iomap_page_release(struct page *page)
|
||||
|
||||
if (!iop)
|
||||
return;
|
||||
WARN_ON_ONCE(atomic_read(&iop->read_count));
|
||||
WARN_ON_ONCE(atomic_read(&iop->read_bytes_pending));
|
||||
WARN_ON_ONCE(atomic_read(&iop->write_count));
|
||||
WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) !=
|
||||
PageUptodate(page));
|
||||
@ -167,13 +167,6 @@ iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len)
|
||||
SetPageUptodate(page);
|
||||
}
|
||||
|
||||
static void
|
||||
iomap_read_finish(struct iomap_page *iop, struct page *page)
|
||||
{
|
||||
if (!iop || atomic_dec_and_test(&iop->read_count))
|
||||
unlock_page(page);
|
||||
}
|
||||
|
||||
static void
|
||||
iomap_read_page_end_io(struct bio_vec *bvec, int error)
|
||||
{
|
||||
@ -187,7 +180,8 @@ iomap_read_page_end_io(struct bio_vec *bvec, int error)
|
||||
iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len);
|
||||
}
|
||||
|
||||
iomap_read_finish(iop, page);
|
||||
if (!iop || atomic_sub_and_test(bvec->bv_len, &iop->read_bytes_pending))
|
||||
unlock_page(page);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -267,30 +261,19 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
|
||||
}
|
||||
|
||||
ctx->cur_page_in_bio = true;
|
||||
if (iop)
|
||||
atomic_add(plen, &iop->read_bytes_pending);
|
||||
|
||||
/*
|
||||
* Try to merge into a previous segment if we can.
|
||||
*/
|
||||
/* Try to merge into a previous segment if we can */
|
||||
sector = iomap_sector(iomap, pos);
|
||||
if (ctx->bio && bio_end_sector(ctx->bio) == sector)
|
||||
if (ctx->bio && bio_end_sector(ctx->bio) == sector) {
|
||||
if (__bio_try_merge_page(ctx->bio, page, plen, poff,
|
||||
&same_page))
|
||||
goto done;
|
||||
is_contig = true;
|
||||
|
||||
if (is_contig &&
|
||||
__bio_try_merge_page(ctx->bio, page, plen, poff, &same_page)) {
|
||||
if (!same_page && iop)
|
||||
atomic_inc(&iop->read_count);
|
||||
goto done;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we start a new segment we need to increase the read count, and we
|
||||
* need to do so before submitting any previous full bio to make sure
|
||||
* that we don't prematurely unlock the page.
|
||||
*/
|
||||
if (iop)
|
||||
atomic_inc(&iop->read_count);
|
||||
|
||||
if (!ctx->bio || !is_contig || bio_full(ctx->bio, plen)) {
|
||||
if (!is_contig || bio_full(ctx->bio, plen)) {
|
||||
gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
|
||||
gfp_t orig_gfp = gfp;
|
||||
int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
|
Loading…
Reference in New Issue
Block a user