Merge 2c769ed713 ("tools/testing/selftests/vm/userfaultfd.c: use swap() to make code cleaner") into android-mainline

Steps on the way to 5.17-rc1

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I6a422c8e4512825ec303fceb09a8d21b19bdcda2
This commit is contained in:
Greg Kroah-Hartman 2022-02-11 08:23:06 +01:00
commit 8ad312cdf2
11 changed files with 84 additions and 47 deletions

View File

@ -1268,6 +1268,9 @@ PAGE_SIZE multiple when read back.
The number of processes belonging to this cgroup
killed by any kind of OOM killer.
oom_group_kill
The number of times a group OOM has occurred.
memory.events.local
Similar to memory.events but the fields in the file are local
to the cgroup i.e. not hierarchical. The file modified event
@ -1311,6 +1314,9 @@ PAGE_SIZE multiple when read back.
sock (npn)
Amount of memory used in network transmission buffers
vmalloc (npn)
Amount of memory used for vmap backed memory.
shmem
Amount of cached filesystem data that is swap-backed,
such as tmpfs, shm segments, shared anonymous mmap()s

View File

@ -33,6 +33,7 @@ enum memcg_stat_item {
MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS,
MEMCG_SOCK,
MEMCG_PERCPU_B,
MEMCG_VMALLOC,
MEMCG_NR_STAT,
};
@ -42,6 +43,7 @@ enum memcg_memory_event {
MEMCG_MAX,
MEMCG_OOM,
MEMCG_OOM_KILL,
MEMCG_OOM_GROUP_KILL,
MEMCG_SWAP_HIGH,
MEMCG_SWAP_MAX,
MEMCG_SWAP_FAIL,
@ -943,6 +945,21 @@ static inline void mod_memcg_state(struct mem_cgroup *memcg,
local_irq_restore(flags);
}
static inline void mod_memcg_page_state(struct page *page,
int idx, int val)
{
struct mem_cgroup *memcg;
if (mem_cgroup_disabled())
return;
rcu_read_lock();
memcg = page_memcg(page);
if (memcg)
mod_memcg_state(memcg, idx, val);
rcu_read_unlock();
}
static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
{
return READ_ONCE(memcg->vmstats.state[idx]);
@ -1398,6 +1415,11 @@ static inline void mod_memcg_state(struct mem_cgroup *memcg,
{
}
static inline void mod_memcg_page_state(struct page *page,
int idx, int val)
{
}
static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
{
return 0;

View File

@ -127,7 +127,7 @@ void frontswap_register_ops(struct frontswap_ops *ops)
spin_lock(&swap_lock);
plist_for_each_entry(si, &swap_active_head, list) {
if (!WARN_ON(!si->frontswap_map))
set_bit(si->type, a);
__set_bit(si->type, a);
}
spin_unlock(&swap_lock);
@ -149,7 +149,7 @@ void frontswap_register_ops(struct frontswap_ops *ops)
spin_lock(&swap_lock);
plist_for_each_entry(si, &swap_active_head, list) {
if (si->frontswap_map)
set_bit(si->type, b);
__set_bit(si->type, b);
}
spin_unlock(&swap_lock);

View File

@ -166,11 +166,6 @@ extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason
*/
extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
/*
* in mm/memcontrol.c:
*/
extern bool cgroup_memory_nokmem;
/*
* in mm/page_alloc.c
*/

View File

@ -84,7 +84,7 @@ EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg);
static bool cgroup_memory_nosocket __ro_after_init;
/* Kernel memory accounting disabled? */
bool cgroup_memory_nokmem __ro_after_init;
static bool cgroup_memory_nokmem __ro_after_init;
/* Whether the swap controller is active */
#ifdef CONFIG_MEMCG_SWAP
@ -629,11 +629,17 @@ static DEFINE_SPINLOCK(stats_flush_lock);
static DEFINE_PER_CPU(unsigned int, stats_updates);
static atomic_t stats_flush_threshold = ATOMIC_INIT(0);
static inline void memcg_rstat_updated(struct mem_cgroup *memcg)
static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
{
unsigned int x;
cgroup_rstat_updated(memcg->css.cgroup, smp_processor_id());
if (!(__this_cpu_inc_return(stats_updates) % MEMCG_CHARGE_BATCH))
atomic_inc(&stats_flush_threshold);
x = __this_cpu_add_return(stats_updates, abs(val));
if (x > MEMCG_CHARGE_BATCH) {
atomic_add(x / MEMCG_CHARGE_BATCH, &stats_flush_threshold);
__this_cpu_write(stats_updates, 0);
}
}
static void __mem_cgroup_flush_stats(void)
@ -656,7 +662,7 @@ void mem_cgroup_flush_stats(void)
static void flush_memcg_stats_dwork(struct work_struct *w)
{
mem_cgroup_flush_stats();
__mem_cgroup_flush_stats();
queue_delayed_work(system_unbound_wq, &stats_flush_dwork, 2UL*HZ);
}
@ -672,7 +678,7 @@ void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
return;
__this_cpu_add(memcg->vmstats_percpu->state[idx], val);
memcg_rstat_updated(memcg);
memcg_rstat_updated(memcg, val);
}
/* idx can be of type enum memcg_stat_item or node_stat_item. */
@ -705,7 +711,7 @@ void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
/* Update lruvec */
__this_cpu_add(pn->lruvec_stats_percpu->state[idx], val);
memcg_rstat_updated(memcg);
memcg_rstat_updated(memcg, val);
}
/**
@ -789,7 +795,7 @@ void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
return;
__this_cpu_add(memcg->vmstats_percpu->events[idx], count);
memcg_rstat_updated(memcg);
memcg_rstat_updated(memcg, count);
}
static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
@ -1369,6 +1375,7 @@ static const struct memory_stat memory_stats[] = {
{ "pagetables", NR_PAGETABLE },
{ "percpu", MEMCG_PERCPU_B },
{ "sock", MEMCG_SOCK },
{ "vmalloc", MEMCG_VMALLOC },
{ "shmem", NR_SHMEM },
{ "file_mapped", NR_FILE_MAPPED },
{ "file_dirty", NR_FILE_DIRTY },
@ -5121,15 +5128,11 @@ static void mem_cgroup_free(struct mem_cgroup *memcg)
static struct mem_cgroup *mem_cgroup_alloc(void)
{
struct mem_cgroup *memcg;
unsigned int size;
int node;
int __maybe_unused i;
long error = -ENOMEM;
size = sizeof(struct mem_cgroup);
size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
memcg = kzalloc(size, GFP_KERNEL);
memcg = kzalloc(struct_size(memcg, nodeinfo, nr_node_ids), GFP_KERNEL);
if (!memcg)
return ERR_PTR(error);
@ -6323,6 +6326,8 @@ static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
seq_printf(m, "oom_kill %lu\n",
atomic_long_read(&events[MEMCG_OOM_KILL]));
seq_printf(m, "oom_group_kill %lu\n",
atomic_long_read(&events[MEMCG_OOM_GROUP_KILL]));
}
static int memory_events_show(struct seq_file *m, void *v)

View File

@ -994,6 +994,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
* If necessary, kill all tasks in the selected memory cgroup.
*/
if (oom_group) {
memcg_memory_event(oom_group, MEMCG_OOM_GROUP_KILL);
mem_cgroup_print_oom_group(oom_group);
mem_cgroup_scan_tasks(oom_group, oom_kill_memcg_member,
(void *)message);

View File

@ -120,7 +120,6 @@ bool page_counter_try_charge(struct page_counter *counter,
new = atomic_long_add_return(nr_pages, &c->usage);
if (new > c->max) {
atomic_long_sub(nr_pages, &c->usage);
propagate_protected_usage(c, new);
/*
* This is racy, but we can live with some
* inaccuracy in the failcnt which is only used

View File

@ -554,7 +554,7 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
struct shmem_inode_info *info;
struct page *page;
unsigned long batch = sc ? sc->nr_to_scan : 128;
int removed = 0, split = 0;
int split = 0;
if (list_empty(&sbinfo->shrinklist))
return SHRINK_STOP;
@ -569,7 +569,6 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
/* inode is about to be evicted */
if (!inode) {
list_del_init(&info->shrinklist);
removed++;
goto next;
}
@ -577,12 +576,12 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
if (round_up(inode->i_size, PAGE_SIZE) ==
round_up(inode->i_size, HPAGE_PMD_SIZE)) {
list_move(&info->shrinklist, &to_remove);
removed++;
goto next;
}
list_move(&info->shrinklist, &list);
next:
sbinfo->shrinklist_len--;
if (!--batch)
break;
}
@ -602,7 +601,7 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
inode = &info->vfs_inode;
if (nr_to_split && split >= nr_to_split)
goto leave;
goto move_back;
page = find_get_page(inode->i_mapping,
(inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT);
@ -616,38 +615,44 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
}
/*
* Leave the inode on the list if we failed to lock
* the page at this time.
* Move the inode on the list back to shrinklist if we failed
* to lock the page at this time.
*
* Waiting for the lock may lead to deadlock in the
* reclaim path.
*/
if (!trylock_page(page)) {
put_page(page);
goto leave;
goto move_back;
}
ret = split_huge_page(page);
unlock_page(page);
put_page(page);
/* If split failed leave the inode on the list */
/* If split failed move the inode on the list back to shrinklist */
if (ret)
goto leave;
goto move_back;
split++;
drop:
list_del_init(&info->shrinklist);
removed++;
leave:
goto put;
move_back:
/*
* Make sure the inode is either on the global list or deleted
* from any local list before iput() since it could be deleted
* in another thread once we put the inode (then the local list
* is corrupted).
*/
spin_lock(&sbinfo->shrinklist_lock);
list_move(&info->shrinklist, &sbinfo->shrinklist);
sbinfo->shrinklist_len++;
spin_unlock(&sbinfo->shrinklist_lock);
put:
iput(inode);
}
spin_lock(&sbinfo->shrinklist_lock);
list_splice_tail(&list, &sbinfo->shrinklist);
sbinfo->shrinklist_len -= removed;
spin_unlock(&sbinfo->shrinklist_lock);
return split;
}

View File

@ -844,7 +844,7 @@ new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags)
if (type == KMALLOC_RECLAIM) {
flags |= SLAB_RECLAIM_ACCOUNT;
} else if (IS_ENABLED(CONFIG_MEMCG_KMEM) && (type == KMALLOC_CGROUP)) {
if (cgroup_memory_nokmem) {
if (mem_cgroup_kmem_disabled()) {
kmalloc_caches[type][idx] = kmalloc_caches[KMALLOC_NORMAL][idx];
return;
}

View File

@ -31,6 +31,7 @@
#include <linux/kmemleak.h>
#include <linux/atomic.h>
#include <linux/compiler.h>
#include <linux/memcontrol.h>
#include <linux/llist.h>
#include <linux/bitops.h>
#include <linux/rbtree_augmented.h>
@ -2623,12 +2624,13 @@ static void __vunmap(const void *addr, int deallocate_pages)
if (deallocate_pages) {
unsigned int page_order = vm_area_page_order(area);
int i;
int i, step = 1U << page_order;
for (i = 0; i < area->nr_pages; i += 1U << page_order) {
for (i = 0; i < area->nr_pages; i += step) {
struct page *page = area->pages[i];
BUG_ON(!page);
mod_memcg_page_state(page, MEMCG_VMALLOC, -step);
__free_pages(page, page_order);
cond_resched();
}
@ -2955,6 +2957,13 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
page_order, nr_small_pages, area->pages);
atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
if (gfp_mask & __GFP_ACCOUNT) {
int i, step = 1U << page_order;
for (i = 0; i < area->nr_pages; i += step)
mod_memcg_page_state(area->pages[i], MEMCG_VMALLOC,
step);
}
/*
* If not enough pages were obtained to accomplish an

View File

@ -1417,7 +1417,6 @@ static void userfaultfd_pagemap_test(unsigned int test_pgsize)
static int userfaultfd_stress(void)
{
void *area;
char *tmp_area;
unsigned long nr;
struct uffdio_register uffdio_register;
struct uffd_stats uffd_stats[nr_cpus];
@ -1528,13 +1527,9 @@ static int userfaultfd_stress(void)
count_verify[nr], nr);
/* prepare next bounce */
tmp_area = area_src;
area_src = area_dst;
area_dst = tmp_area;
swap(area_src, area_dst);
tmp_area = area_src_alias;
area_src_alias = area_dst_alias;
area_dst_alias = tmp_area;
swap(area_src_alias, area_dst_alias);
uffd_stats_report(uffd_stats, nr_cpus);
}