Merge f8142cf94d ("hugetlb: make hugetlb depends on SYSFS or SYSCTL") into android-mainline

Steps on the way to 6.1-rc1

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I47466de91cf736e2f4b76988f029482ebf963051
This commit is contained in:
Greg Kroah-Hartman 2022-10-20 11:56:25 +02:00
commit 22754e80f7
21 changed files with 169 additions and 201 deletions

View File

@ -236,6 +236,7 @@ config ARCH_SUPPORTS_HUGETLBFS
config HUGETLBFS
bool "HugeTLB file system support"
depends on X86 || IA64 || SPARC64 || ARCH_SUPPORTS_HUGETLBFS || BROKEN
depends on (SYSFS || SYSCTL)
help
hugetlbfs is a filesystem backing for HugeTLB pages, based on
ramfs. For architectures that support it, say Y here and read

View File

@ -152,7 +152,7 @@ static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
/*
* Default synchronous end-of-IO handler.. Just mark it up-to-date and
* unlock the buffer. This is what ll_rw_block uses too.
* unlock the buffer.
*/
void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
{
@ -491,8 +491,8 @@ int inode_has_buffers(struct inode *inode)
* all already-submitted IO to complete, but does not queue any new
* writes to the disk.
*
* To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
* you dirty the buffers, and then use osync_inode_buffers to wait for
* To do O_SYNC writes, just queue the buffer writes with write_dirty_buffer
* as you dirty the buffers, and then use osync_inode_buffers to wait for
* completion. Any other dirty buffers which are not yet queued for
* write will not be flushed to disk by the osync.
*/
@ -562,7 +562,7 @@ void write_boundary_block(struct block_device *bdev,
struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
if (bh) {
if (buffer_dirty(bh))
ll_rw_block(REQ_OP_WRITE, 1, &bh);
write_dirty_buffer(bh, 0);
put_bh(bh);
}
}
@ -1342,23 +1342,12 @@ void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
{
struct buffer_head *bh = __getblk(bdev, block, size);
if (likely(bh)) {
ll_rw_block(REQ_OP_READ | REQ_RAHEAD, 1, &bh);
bh_readahead(bh, REQ_RAHEAD);
brelse(bh);
}
}
EXPORT_SYMBOL(__breadahead);
void __breadahead_gfp(struct block_device *bdev, sector_t block, unsigned size,
gfp_t gfp)
{
struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp);
if (likely(bh)) {
ll_rw_block(REQ_OP_READ | REQ_RAHEAD, 1, &bh);
brelse(bh);
}
}
EXPORT_SYMBOL(__breadahead_gfp);
/**
* __bread_gfp() - reads a specified block and returns the bh
* @bdev: the block_device to read from
@ -1817,7 +1806,7 @@ int __block_write_full_page(struct inode *inode, struct page *page,
/*
* The page was marked dirty, but the buffers were
* clean. Someone wrote them back by hand with
* ll_rw_block/submit_bh. A rare case.
* write_dirty_buffer/submit_bh. A rare case.
*/
end_page_writeback(page);
@ -2033,7 +2022,7 @@ int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
!buffer_unwritten(bh) &&
(block_start < from || block_end > to)) {
ll_rw_block(REQ_OP_READ, 1, &bh);
bh_read_nowait(bh, 0);
*wait_bh++=bh;
}
}
@ -2593,11 +2582,9 @@ int block_truncate_page(struct address_space *mapping,
set_buffer_uptodate(bh);
if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
err = -EIO;
ll_rw_block(REQ_OP_READ, 1, &bh);
wait_on_buffer(bh);
err = bh_read(bh, 0);
/* Uhhuh. Read error. Complain and punt. */
if (!buffer_uptodate(bh))
if (err < 0)
goto unlock;
}
@ -2725,61 +2712,6 @@ void submit_bh(blk_opf_t opf, struct buffer_head *bh)
}
EXPORT_SYMBOL(submit_bh);
/**
* ll_rw_block: low-level access to block devices (DEPRECATED)
* @opf: block layer request operation and flags.
* @nr: number of &struct buffer_heads in the array
* @bhs: array of pointers to &struct buffer_head
*
* ll_rw_block() takes an array of pointers to &struct buffer_heads, and
* requests an I/O operation on them, either a %REQ_OP_READ or a %REQ_OP_WRITE.
* @opf contains flags modifying the detailed I/O behavior, most notably
* %REQ_RAHEAD.
*
* This function drops any buffer that it cannot get a lock on (with the
* BH_Lock state bit), any buffer that appears to be clean when doing a write
* request, and any buffer that appears to be up-to-date when doing read
* request. Further it marks as clean buffers that are processed for
* writing (the buffer cache won't assume that they are actually clean
* until the buffer gets unlocked).
*
* ll_rw_block sets b_end_io to simple completion handler that marks
* the buffer up-to-date (if appropriate), unlocks the buffer and wakes
* any waiters.
*
* All of the buffers must be for the same device, and must also be a
* multiple of the current approved size for the device.
*/
void ll_rw_block(const blk_opf_t opf, int nr, struct buffer_head *bhs[])
{
const enum req_op op = opf & REQ_OP_MASK;
int i;
for (i = 0; i < nr; i++) {
struct buffer_head *bh = bhs[i];
if (!trylock_buffer(bh))
continue;
if (op == REQ_OP_WRITE) {
if (test_clear_buffer_dirty(bh)) {
bh->b_end_io = end_buffer_write_sync;
get_bh(bh);
submit_bh(opf, bh);
continue;
}
} else {
if (!buffer_uptodate(bh)) {
bh->b_end_io = end_buffer_read_sync;
get_bh(bh);
submit_bh(opf, bh);
continue;
}
}
unlock_buffer(bh);
}
}
EXPORT_SYMBOL(ll_rw_block);
void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
{
lock_buffer(bh);
@ -3026,29 +2958,69 @@ int bh_uptodate_or_lock(struct buffer_head *bh)
EXPORT_SYMBOL(bh_uptodate_or_lock);
/**
* bh_submit_read - Submit a locked buffer for reading
* __bh_read - Submit read for a locked buffer
* @bh: struct buffer_head
* @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
* @wait: wait until reading finish
*
* Returns zero on success and -EIO on error.
* Returns zero on success or don't wait, and -EIO on error.
*/
int bh_submit_read(struct buffer_head *bh)
int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait)
{
BUG_ON(!buffer_locked(bh));
int ret = 0;
if (buffer_uptodate(bh)) {
unlock_buffer(bh);
return 0;
}
BUG_ON(!buffer_locked(bh));
get_bh(bh);
bh->b_end_io = end_buffer_read_sync;
submit_bh(REQ_OP_READ, bh);
submit_bh(REQ_OP_READ | op_flags, bh);
if (wait) {
wait_on_buffer(bh);
if (buffer_uptodate(bh))
return 0;
return -EIO;
if (!buffer_uptodate(bh))
ret = -EIO;
}
return ret;
}
EXPORT_SYMBOL(bh_submit_read);
EXPORT_SYMBOL(__bh_read);
/**
* __bh_read_batch - Submit read for a batch of unlocked buffers
* @nr: entry number of the buffer batch
* @bhs: a batch of struct buffer_head
* @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
* @force_lock: force to get a lock on the buffer if set, otherwise drops any
* buffer that cannot lock.
*
* Returns zero on success or don't wait, and -EIO on error.
*/
void __bh_read_batch(int nr, struct buffer_head *bhs[],
blk_opf_t op_flags, bool force_lock)
{
int i;
for (i = 0; i < nr; i++) {
struct buffer_head *bh = bhs[i];
if (buffer_uptodate(bh))
continue;
if (force_lock)
lock_buffer(bh);
else
if (!trylock_buffer(bh))
continue;
if (buffer_uptodate(bh)) {
unlock_buffer(bh);
continue;
}
bh->b_end_io = end_buffer_read_sync;
get_bh(bh);
submit_bh(REQ_OP_READ | op_flags, bh);
}
}
EXPORT_SYMBOL(__bh_read_batch);
void __init buffer_init(void)
{

View File

@ -126,6 +126,7 @@ read_block_bitmap(struct super_block *sb, unsigned int block_group)
struct ext2_group_desc * desc;
struct buffer_head * bh = NULL;
ext2_fsblk_t bitmap_blk;
int ret;
desc = ext2_get_group_desc(sb, block_group, NULL);
if (!desc)
@ -139,10 +140,10 @@ read_block_bitmap(struct super_block *sb, unsigned int block_group)
block_group, le32_to_cpu(desc->bg_block_bitmap));
return NULL;
}
if (likely(bh_uptodate_or_lock(bh)))
ret = bh_read(bh, 0);
if (ret > 0)
return bh;
if (bh_submit_read(bh) < 0) {
if (ret < 0) {
brelse(bh);
ext2_error(sb, __func__,
"Cannot read block bitmap - "

View File

@ -525,8 +525,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
if (buffer_uptodate(first_bh))
goto out;
if (!buffer_locked(first_bh))
ll_rw_block(REQ_OP_READ | REQ_META | REQ_PRIO, 1, &first_bh);
bh_read_nowait(first_bh, REQ_META | REQ_PRIO);
dblock++;
extlen--;
@ -534,9 +533,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
while (extlen) {
bh = gfs2_getbuf(gl, dblock, CREATE);
if (!buffer_uptodate(bh) && !buffer_locked(bh))
ll_rw_block(REQ_OP_READ | REQ_RAHEAD | REQ_META |
REQ_PRIO, 1, &bh);
bh_readahead(bh, REQ_RAHEAD | REQ_META | REQ_PRIO);
brelse(bh);
dblock++;
extlen--;

View File

@ -745,12 +745,8 @@ static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index,
}
if (PageUptodate(page))
set_buffer_uptodate(bh);
if (!buffer_uptodate(bh)) {
ll_rw_block(REQ_OP_READ | REQ_META | REQ_PRIO, 1, &bh);
wait_on_buffer(bh);
if (!buffer_uptodate(bh))
if (bh_read(bh, REQ_META | REQ_PRIO) < 0)
goto unlock_out;
}
if (gfs2_is_jdata(ip))
gfs2_trans_add_data(ip->i_gl, bh);
else

View File

@ -82,7 +82,7 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start,
return 0;
}
haveblocks = isofs_get_blocks(inode, blocknum, bhs, needblocks);
ll_rw_block(REQ_OP_READ, haveblocks, bhs);
bh_read_batch(haveblocks, bhs);
curbh = 0;
curpage = 0;

View File

@ -1898,20 +1898,17 @@ static int journal_get_superblock(journal_t *journal)
{
struct buffer_head *bh;
journal_superblock_t *sb;
int err = -EIO;
int err;
bh = journal->j_sb_buffer;
J_ASSERT(bh != NULL);
if (!buffer_uptodate(bh)) {
ll_rw_block(REQ_OP_READ, 1, &bh);
wait_on_buffer(bh);
if (!buffer_uptodate(bh)) {
err = bh_read(bh, 0);
if (err < 0) {
printk(KERN_ERR
"JBD2: IO error reading journal superblock\n");
goto out;
}
}
if (buffer_verified(bh))
return 0;

View File

@ -100,7 +100,7 @@ static int do_readahead(journal_t *journal, unsigned int start)
if (!buffer_uptodate(bh) && !buffer_locked(bh)) {
bufs[nbufs++] = bh;
if (nbufs == MAXBUF) {
ll_rw_block(REQ_OP_READ, nbufs, bufs);
bh_readahead_batch(nbufs, bufs, 0);
journal_brelse_array(bufs, nbufs);
nbufs = 0;
}
@ -109,7 +109,7 @@ static int do_readahead(journal_t *journal, unsigned int start)
}
if (nbufs)
ll_rw_block(REQ_OP_READ, nbufs, bufs);
bh_readahead_batch(nbufs, bufs, 0);
err = 0;
failed:
@ -152,9 +152,14 @@ static int jread(struct buffer_head **bhp, journal_t *journal,
return -ENOMEM;
if (!buffer_uptodate(bh)) {
/* If this is a brand new buffer, start readahead.
Otherwise, we assume we are already reading it. */
if (!buffer_req(bh))
/*
* If this is a brand new buffer, start readahead.
* Otherwise, we assume we are already reading it.
*/
bool need_readahead = !buffer_req(bh);
bh_read_nowait(bh, 0);
if (need_readahead)
do_readahead(journal, offset);
wait_on_buffer(bh);
}
@ -688,7 +693,6 @@ static int do_one_pass(journal_t *journal,
mark_buffer_dirty(nbh);
BUFFER_TRACE(nbh, "marking uptodate");
++info->nr_replays;
/* ll_rw_block(WRITE, 1, &nbh); */
unlock_buffer(nbh);
brelse(obh);
brelse(nbh);

View File

@ -630,12 +630,9 @@ static noinline int ntfs_get_block_vbo(struct inode *inode, u64 vbo,
bh->b_size = block_size;
off = vbo & (PAGE_SIZE - 1);
set_bh_page(bh, page, off);
ll_rw_block(REQ_OP_READ, 1, &bh);
wait_on_buffer(bh);
if (!buffer_uptodate(bh)) {
err = -EIO;
err = bh_read(bh, 0);
if (err < 0)
goto out;
}
zero_user_segment(page, off + voff, off + block_size);
}
}

View File

@ -636,7 +636,7 @@ int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
!buffer_new(bh) &&
ocfs2_should_read_blk(inode, page, block_start) &&
(block_start < from || block_end > to)) {
ll_rw_block(REQ_OP_READ, 1, &bh);
bh_read_nowait(bh, 0);
*wait_bh++=bh;
}

View File

@ -1764,9 +1764,7 @@ static int ocfs2_get_sector(struct super_block *sb,
if (!buffer_dirty(*bh))
clear_buffer_uptodate(*bh);
unlock_buffer(*bh);
ll_rw_block(REQ_OP_READ, 1, bh);
wait_on_buffer(*bh);
if (!buffer_uptodate(*bh)) {
if (bh_read(*bh, 0) < 0) {
mlog_errno(-EIO);
brelse(*bh);
*bh = NULL;

View File

@ -868,7 +868,7 @@ static int write_ordered_buffers(spinlock_t * lock,
*/
if (buffer_dirty(bh) && unlikely(bh->b_page->mapping == NULL)) {
spin_unlock(lock);
ll_rw_block(REQ_OP_WRITE, 1, &bh);
write_dirty_buffer(bh, 0);
spin_lock(lock);
}
put_bh(bh);
@ -1054,7 +1054,7 @@ static int flush_commit_list(struct super_block *s,
if (tbh) {
if (buffer_dirty(tbh)) {
depth = reiserfs_write_unlock_nested(s);
ll_rw_block(REQ_OP_WRITE, 1, &tbh);
write_dirty_buffer(tbh, 0);
reiserfs_write_lock_nested(s, depth);
}
put_bh(tbh) ;
@ -2240,7 +2240,7 @@ static int journal_read_transaction(struct super_block *sb,
}
}
/* read in the log blocks, memcpy to the corresponding real block */
ll_rw_block(REQ_OP_READ, get_desc_trans_len(desc), log_blocks);
bh_read_batch(get_desc_trans_len(desc), log_blocks);
for (i = 0; i < get_desc_trans_len(desc); i++) {
wait_on_buffer(log_blocks[i]);
@ -2342,10 +2342,11 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
} else
bhlist[j++] = bh;
}
ll_rw_block(REQ_OP_READ, j, bhlist);
bh = bhlist[0];
bh_read_nowait(bh, 0);
bh_readahead_batch(j - 1, &bhlist[1], 0);
for (i = 1; i < j; i++)
brelse(bhlist[i]);
bh = bhlist[0];
wait_on_buffer(bh);
if (buffer_uptodate(bh))
return bh;

View File

@ -579,7 +579,7 @@ static int search_by_key_reada(struct super_block *s,
if (!buffer_uptodate(bh[j])) {
if (depth == -1)
depth = reiserfs_write_unlock_nested(s);
ll_rw_block(REQ_OP_READ | REQ_RAHEAD, 1, bh + j);
bh_readahead(bh[j], REQ_RAHEAD);
}
brelse(bh[j]);
}
@ -685,7 +685,7 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key,
if (!buffer_uptodate(bh) && depth == -1)
depth = reiserfs_write_unlock_nested(sb);
ll_rw_block(REQ_OP_READ, 1, &bh);
bh_read_nowait(bh, 0);
wait_on_buffer(bh);
if (depth != -1)

View File

@ -1702,9 +1702,7 @@ static int read_super_block(struct super_block *s, int offset)
/* after journal replay, reread all bitmap and super blocks */
static int reread_meta_blocks(struct super_block *s)
{
ll_rw_block(REQ_OP_READ, 1, &SB_BUFFER_WITH_SB(s));
wait_on_buffer(SB_BUFFER_WITH_SB(s));
if (!buffer_uptodate(SB_BUFFER_WITH_SB(s))) {
if (bh_read(SB_BUFFER_WITH_SB(s), 0) < 0) {
reiserfs_warning(s, "reiserfs-2504", "error reading the super");
return 1;
}

View File

@ -130,7 +130,7 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
brelse(tmp);
}
if (num) {
ll_rw_block(REQ_OP_READ | REQ_RAHEAD, num, bha);
bh_readahead_batch(num, bha, REQ_RAHEAD);
for (i = 0; i < num; i++)
brelse(bha[i]);
}

View File

@ -89,7 +89,7 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos,
brelse(tmp);
}
if (num) {
ll_rw_block(REQ_OP_READ | REQ_RAHEAD, num, bha);
bh_readahead_batch(num, bha, REQ_RAHEAD);
for (i = 0; i < num; i++)
brelse(bha[i]);
}

View File

@ -1211,13 +1211,7 @@ struct buffer_head *udf_bread(struct inode *inode, udf_pblk_t block,
if (!bh)
return NULL;
if (buffer_uptodate(bh))
return bh;
ll_rw_block(REQ_OP_READ, 1, &bh);
wait_on_buffer(bh);
if (buffer_uptodate(bh))
if (bh_read(bh, 0) >= 0)
return bh;
brelse(bh);

View File

@ -295,15 +295,11 @@ static void ufs_change_blocknr(struct inode *inode, sector_t beg,
if (!buffer_mapped(bh))
map_bh(bh, inode->i_sb, oldb + pos);
if (!buffer_uptodate(bh)) {
ll_rw_block(REQ_OP_READ, 1, &bh);
wait_on_buffer(bh);
if (!buffer_uptodate(bh)) {
if (bh_read(bh, 0) < 0) {
ufs_error(inode->i_sb, __func__,
"read of block failed\n");
break;
}
}
UFSD(" change from %llu to %llu, pos %u\n",
(unsigned long long)(pos + oldb),

View File

@ -225,8 +225,6 @@ struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block,
void __brelse(struct buffer_head *);
void __bforget(struct buffer_head *);
void __breadahead(struct block_device *, sector_t block, unsigned int size);
void __breadahead_gfp(struct block_device *, sector_t block, unsigned int size,
gfp_t gfp);
struct buffer_head *__bread_gfp(struct block_device *,
sector_t block, unsigned size, gfp_t gfp);
void invalidate_bh_lrus(void);
@ -236,7 +234,6 @@ struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
void free_buffer_head(struct buffer_head * bh);
void unlock_buffer(struct buffer_head *bh);
void __lock_buffer(struct buffer_head *bh);
void ll_rw_block(blk_opf_t, int, struct buffer_head * bh[]);
int sync_dirty_buffer(struct buffer_head *bh);
int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
@ -244,7 +241,9 @@ void submit_bh(blk_opf_t, struct buffer_head *);
void write_boundary_block(struct block_device *bdev,
sector_t bblock, unsigned blocksize);
int bh_uptodate_or_lock(struct buffer_head *bh);
int bh_submit_read(struct buffer_head *bh);
int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait);
void __bh_read_batch(int nr, struct buffer_head *bhs[],
blk_opf_t op_flags, bool force_lock);
extern int buffer_heads_over_limit;
@ -351,12 +350,6 @@ sb_breadahead(struct super_block *sb, sector_t block)
__breadahead(sb->s_bdev, block, sb->s_blocksize);
}
static inline void
sb_breadahead_unmovable(struct super_block *sb, sector_t block)
{
__breadahead_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
}
static inline struct buffer_head *
sb_getblk(struct super_block *sb, sector_t block)
{
@ -418,6 +411,41 @@ static inline struct buffer_head *__getblk(struct block_device *bdev,
return __getblk_gfp(bdev, block, size, __GFP_MOVABLE);
}
static inline void bh_readahead(struct buffer_head *bh, blk_opf_t op_flags)
{
if (!buffer_uptodate(bh) && trylock_buffer(bh)) {
if (!buffer_uptodate(bh))
__bh_read(bh, op_flags, false);
else
unlock_buffer(bh);
}
}
static inline void bh_read_nowait(struct buffer_head *bh, blk_opf_t op_flags)
{
if (!bh_uptodate_or_lock(bh))
__bh_read(bh, op_flags, false);
}
/* Returns 1 if buffer uptodated, 0 on success, and -EIO on error. */
static inline int bh_read(struct buffer_head *bh, blk_opf_t op_flags)
{
if (bh_uptodate_or_lock(bh))
return 1;
return __bh_read(bh, op_flags, true);
}
static inline void bh_read_batch(int nr, struct buffer_head *bhs[])
{
__bh_read_batch(nr, bhs, 0, true);
}
static inline void bh_readahead_batch(int nr, struct buffer_head *bhs[],
blk_opf_t op_flags)
{
__bh_read_batch(nr, bhs, op_flags, false);
}
/**
* __bread() - reads a specified block and returns the bh
* @bdev: the block_device to read from

View File

@ -1123,14 +1123,10 @@ static inline spinlock_t *huge_pte_lock(struct hstate *h,
#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
extern void __init hugetlb_cma_reserve(int order);
extern void __init hugetlb_cma_check(void);
#else
static inline __init void hugetlb_cma_reserve(int order)
{
}
static inline __init void hugetlb_cma_check(void)
{
}
#endif
bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);

View File

@ -456,14 +456,12 @@ static int allocate_file_region_entries(struct resv_map *resv,
int regions_needed)
__must_hold(&resv->lock)
{
struct list_head allocated_regions;
LIST_HEAD(allocated_regions);
int to_allocate = 0, i = 0;
struct file_region *trg = NULL, *rg = NULL;
VM_BUG_ON(regions_needed < 0);
INIT_LIST_HEAD(&allocated_regions);
/*
* Check for sufficient descriptors in the cache to accommodate
* the number of in progress add operations plus regions_needed.
@ -1506,6 +1504,10 @@ static void add_hugetlb_page(struct hstate *h, struct page *page,
set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
set_page_private(page, 0);
/*
* We have to set HPageVmemmapOptimized again as above
* set_page_private(page, 0) cleared it.
*/
SetHPageVmemmapOptimized(page);
/*
@ -2336,7 +2338,7 @@ struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
static int gather_surplus_pages(struct hstate *h, long delta)
__must_hold(&hugetlb_lock)
{
struct list_head surplus_list;
LIST_HEAD(surplus_list);
struct page *page, *tmp;
int ret;
long i;
@ -2351,7 +2353,6 @@ static int gather_surplus_pages(struct hstate *h, long delta)
}
allocated = 0;
INIT_LIST_HEAD(&surplus_list);
ret = -ENOMEM;
retry:
@ -3768,8 +3769,7 @@ HSTATE_ATTR_WO(demote);
static ssize_t demote_size_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
int nid;
struct hstate *h = kobj_to_hstate(kobj, &nid);
struct hstate *h = kobj_to_hstate(kobj, NULL);
unsigned long demote_size = (PAGE_SIZE << h->demote_order) / SZ_1K;
return sysfs_emit(buf, "%lukB\n", demote_size);
@ -3782,7 +3782,6 @@ static ssize_t demote_size_store(struct kobject *kobj,
struct hstate *h, *demote_hstate;
unsigned long demote_size;
unsigned int demote_order;
int nid;
demote_size = (unsigned long)memparse(buf, NULL);
@ -3794,7 +3793,7 @@ static ssize_t demote_size_store(struct kobject *kobj,
return -EINVAL;
/* demote order must be smaller than hstate order */
h = kobj_to_hstate(kobj, &nid);
h = kobj_to_hstate(kobj, NULL);
if (demote_order >= h->order)
return -EINVAL;
@ -4032,6 +4031,14 @@ static void hugetlb_register_all_nodes(void) { }
#endif
#ifdef CONFIG_CMA
static void __init hugetlb_cma_check(void);
#else
static inline __init void hugetlb_cma_check(void)
{
}
#endif
static int __init hugetlb_init(void)
{
int i;
@ -4131,7 +4138,7 @@ void __init hugetlb_add_hstate(unsigned int order)
h->next_nid_to_alloc = first_memory_node;
h->next_nid_to_free = first_memory_node;
snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
huge_page_size(h)/1024);
huge_page_size(h)/SZ_1K);
parsed_hstate = h;
}
@ -4146,11 +4153,11 @@ static void __init hugepages_clear_pages_in_node(void)
if (!hugetlb_max_hstate) {
default_hstate_max_huge_pages = 0;
memset(default_hugepages_in_node, 0,
MAX_NUMNODES * sizeof(unsigned int));
sizeof(default_hugepages_in_node));
} else {
parsed_hstate->max_huge_pages = 0;
memset(parsed_hstate->max_huge_pages_node, 0,
MAX_NUMNODES * sizeof(unsigned int));
sizeof(parsed_hstate->max_huge_pages_node));
}
}
@ -5340,7 +5347,6 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
u32 hash;
put_page(old_page);
BUG_ON(huge_pte_none(pte));
/*
* Drop hugetlb_fault_mutex and i_mmap_rwsem before
* unmapping. unmapping needs to hold i_mmap_rwsem
@ -5432,19 +5438,6 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
return ret;
}
/* Return the pagecache page at a given address within a VMA */
static struct page *hugetlbfs_pagecache_page(struct hstate *h,
struct vm_area_struct *vma, unsigned long address)
{
struct address_space *mapping;
pgoff_t idx;
mapping = vma->vm_file->f_mapping;
idx = vma_hugecache_offset(h, vma, address);
return find_lock_page(mapping, idx);
}
/*
* Return whether there is a pagecache page to back given address within VMA.
* Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
@ -5839,7 +5832,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
/* Just decrements count, does not deallocate */
vma_end_reservation(h, vma, haddr);
pagecache_page = hugetlbfs_pagecache_page(h, vma, haddr);
pagecache_page = find_lock_page(mapping, idx);
}
ptl = huge_pte_lock(h, mm, ptep);
@ -6046,8 +6039,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
page_in_pagecache = true;
}
ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
spin_lock(ptl);
ptl = huge_pte_lock(h, dst_mm, dst_pte);
/*
* Recheck the i_size after holding PT lock to make sure not
@ -7363,7 +7355,7 @@ void __init hugetlb_cma_reserve(int order)
hugetlb_cma_size = 0;
}
void __init hugetlb_cma_check(void)
static void __init hugetlb_cma_check(void)
{
if (!hugetlb_cma_size || cma_reserve_called)
return;