Merge "taskstats: add support for system stats"

This commit is contained in:
qctecmdr 2020-06-09 14:25:01 -07:00 committed by Gerrit - the friendly Code Review server
commit ff0bdab35c
10 changed files with 299 additions and 1 deletions

View File

@ -153,8 +153,19 @@ void free_buffer_page(struct ion_system_heap *heap,
ion_page_pool_free_immediate(pool, page);
else
ion_page_pool_free(pool, page);
#ifdef CONFIG_MM_STAT_UNRECLAIMABLE_PAGES
mod_node_page_state(page_pgdat(page), NR_UNRECLAIMABLE_PAGES,
-(1 << pool->order));
#endif
} else {
__free_pages(page, order);
#ifdef CONFIG_MM_STAT_UNRECLAIMABLE_PAGES
mod_node_page_state(page_pgdat(page), NR_UNRECLAIMABLE_PAGES,
-(1 << order));
#endif
}
}
@ -314,6 +325,12 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
sz = (1 << info->order) * PAGE_SIZE;
#ifdef CONFIG_MM_STAT_UNRECLAIMABLE_PAGES
mod_node_page_state(page_pgdat(info->page),
NR_UNRECLAIMABLE_PAGES,
(1 << (info->order)));
#endif
if (info->from_pool) {
list_add_tail(&info->list, &pages_from_pool);
} else {

View File

@ -547,6 +547,7 @@ gen_headers_out_arm = [
"linux/synclink.h",
"linux/sysctl.h",
"linux/sysinfo.h",
"linux/sysstats.h",
"linux/target_core_user.h",
"linux/taskstats.h",
"linux/tcp.h",

View File

@ -542,6 +542,7 @@ gen_headers_out_arm64 = [
"linux/synclink.h",
"linux/sysctl.h",
"linux/sysinfo.h",
"linux/sysstats.h",
"linux/target_core_user.h",
"linux/taskstats.h",
"linux/tcp.h",

View File

@ -45,6 +45,9 @@ enum {
MM_ANONPAGES, /* Resident anonymous pages */
MM_SWAPENTS, /* Anonymous swap entries */
MM_SHMEMPAGES, /* Resident shared memory pages */
#ifdef CONFIG_MM_STAT_UNRECLAIMABLE_PAGES
MM_UNRECLAIMABLE, /* Unreclaimable pages, e.g. shared with HW */
#endif
NR_MM_COUNTERS
};

View File

@ -259,6 +259,9 @@ enum node_stat_item {
NR_DIRTIED, /* page dirtyings since bootup */
NR_WRITTEN, /* page writings since bootup */
NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */
#ifdef CONFIG_MM_STAT_UNRECLAIMABLE_PAGES
NR_UNRECLAIMABLE_PAGES,
#endif
NR_VM_NODE_STAT_ITEMS
};

View File

@ -0,0 +1,94 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
*/
#ifndef _LINUX_SYSSTATS_H
#define _LINUX_SYSSTATS_H
#include <linux/types.h>
#include <linux/taskstats.h>
#include <linux/cgroupstats.h>
#define SYSSTATS_VERSION 1
/*
* Data shared between user space and kernel space
* Each member is aligned to a 8 byte boundary.
* All values in KB.
*/
struct sys_memstats {
__u64 version;
__u64 memtotal;
__u64 vmalloc_total;
__u64 reclaimable;
__u64 zram_compressed;
__u64 swap_used;
__u64 swap_total;
#ifdef CONFIG_MM_STAT_UNRECLAIMABLE_PAGES
__u64 unreclaimable;
#endif
__u64 buffer;
__u64 slab_reclaimable;
__u64 slab_unreclaimable;
__u64 free_cma;
__u64 file_mapped;
__u64 swapcache;
__u64 pagetable;
__u64 kernelstack;
__u64 shmem;
__u64 dma_nr_free_pages;
__u64 dma_nr_active_anon;
__u64 dma_nr_inactive_anon;
__u64 dma_nr_active_file;
__u64 dma_nr_inactive_file;
__u64 normal_nr_free_pages;
__u64 normal_nr_active_anon;
__u64 normal_nr_inactive_anon;
__u64 normal_nr_active_file;
__u64 normal_nr_inactive_file;
__u64 movable_nr_free_pages;
__u64 movable_nr_active_anon;
__u64 movable_nr_inactive_anon;
__u64 movable_nr_active_file;
__u64 movable_nr_inactive_file;
__u64 highmem_nr_free_pages;
__u64 highmem_nr_active_anon;
__u64 highmem_nr_inactive_anon;
__u64 highmem_nr_active_file;
__u64 highmem_nr_inactive_file;
/* version 1 ends here */
};
/*
* Commands sent from userspace
* Not versioned. New commands should only be inserted at the enum's end.
*/
enum {
SYSSTATS_CMD_UNSPEC = __CGROUPSTATS_CMD_MAX, /* Reserved */
SYSSTATS_CMD_GET, /* user->kernel request/get-response */
SYSSTATS_CMD_NEW, /* kernel->user event */
};
#define SYSSTATS_CMD_UNSPEC SYSSTATS_CMD_UNSPEC
#define SYSSTATS_CMD_GET SYSSTATS_CMD_GET
#define SYSSTATS_CMD_NEW SYSSTATS_CMD_NEW
enum {
SYSSTATS_TYPE_UNSPEC = 0, /* Reserved */
SYSSTATS_TYPE_SYSMEM_STATS, /* contains name + memory stats */
};
#define SYSSTATS_TYPE_UNSPEC SYSSTATS_TYPE_UNSPEC
#define SYSSTATS_TYPE_SYSMEM_STATS SYSSTATS_TYPE_SYSMEM_STATS
enum {
SYSSTATS_CMD_ATTR_UNSPEC = 0,
SYSSTATS_CMD_ATTR_SYSMEM_STATS,
};
#define SYSSTATS_CMD_ATTR_UNSPEC SYSSTATS_CMD_ATTR_UNSPEC
#define SYSSTATS_CMD_ATTR_SYSMEM_STATS SYSSTATS_CMD_ATTR_SYSMEM_STATS
#endif /* _LINUX_SYSSTATS_H */

View File

@ -134,6 +134,9 @@ static const char * const resident_page_types[] = {
NAMED_ARRAY_INDEX(MM_ANONPAGES),
NAMED_ARRAY_INDEX(MM_SWAPENTS),
NAMED_ARRAY_INDEX(MM_SHMEMPAGES),
#ifdef CONFIG_MM_STAT_UNRECLAIMABLE_PAGES
NAMED_ARRAY_INDEX(MM_UNRECLAIMABLE),
#endif
};
DEFINE_PER_CPU(unsigned long, process_counts) = 0;
@ -663,7 +666,15 @@ static void check_mm(struct mm_struct *mm)
"Please make sure 'struct resident_page_types[]' is updated as well");
for (i = 0; i < NR_MM_COUNTERS; i++) {
long x = atomic_long_read(&mm->rss_stat.count[i]);
long x;
#ifdef CONFIG_MM_STAT_UNRECLAIMABLE_PAGES
/* MM_UNRECLAIMABLE could be freed later in exit_files */
if (i == MM_UNRECLAIMABLE)
continue;
#endif
x = atomic_long_read(&mm->rss_stat.count[i]);
if (unlikely(x))
pr_alert("BUG: Bad rss-counter state mm:%p type:%s val:%ld\n",

View File

@ -13,7 +13,10 @@
#include <linux/cpumask.h>
#include <linux/percpu.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/vmalloc.h>
#include <linux/cgroupstats.h>
#include <linux/sysstats.h>
#include <linux/cgroup.h>
#include <linux/fs.h>
#include <linux/file.h>
@ -48,6 +51,11 @@ static const struct nla_policy cgroupstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX
[CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 },
};
static const struct nla_policy
sysstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1] = {
[SYSSTATS_CMD_ATTR_SYSMEM_STATS] = { .type = NLA_U32 },
};
struct listener {
struct list_head list;
pid_t pid;
@ -386,6 +394,144 @@ static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid)
return NULL;
}
#define K(x) ((x) << (PAGE_SHIFT - 10))
#ifndef CONFIG_NUMA
static void sysstats_fill_zoneinfo(struct sys_memstats *stats)
{
pg_data_t *pgdat;
struct zone *zone;
struct zone *node_zones;
unsigned long zspages = 0;
pgdat = NODE_DATA(0);
node_zones = pgdat->node_zones;
for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
if (!populated_zone(zone))
continue;
zspages += zone_page_state(zone, NR_ZSPAGES);
if (!strcmp(zone->name, "DMA")) {
stats->dma_nr_free_pages =
K(zone_page_state(zone, NR_FREE_PAGES));
stats->dma_nr_active_anon =
K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON));
stats->dma_nr_inactive_anon =
K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON));
stats->dma_nr_active_file =
K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE));
stats->dma_nr_inactive_file =
K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE));
} else if (!strcmp(zone->name, "Normal")) {
stats->normal_nr_free_pages =
K(zone_page_state(zone, NR_FREE_PAGES));
stats->normal_nr_active_anon =
K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON));
stats->normal_nr_inactive_anon =
K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON));
stats->normal_nr_active_file =
K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE));
stats->normal_nr_inactive_file =
K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE));
} else if (!strcmp(zone->name, "HighMem")) {
stats->highmem_nr_free_pages =
K(zone_page_state(zone, NR_FREE_PAGES));
stats->highmem_nr_active_anon =
K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON));
stats->highmem_nr_inactive_anon =
K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON));
stats->highmem_nr_active_file =
K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE));
stats->highmem_nr_inactive_file =
K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE));
} else if (!strcmp(zone->name, "Movable")) {
stats->movable_nr_free_pages =
K(zone_page_state(zone, NR_FREE_PAGES));
stats->movable_nr_active_anon =
K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON));
stats->movable_nr_inactive_anon =
K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON));
stats->movable_nr_active_file =
K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE));
stats->movable_nr_inactive_file =
K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE));
}
}
stats->zram_compressed = K(zspages);
}
#elif
static void sysstats_fill_zoneinfo(struct sys_memstats *stats)
{
}
#endif
static void sysstats_build(struct sys_memstats *stats)
{
struct sysinfo i;
si_meminfo(&i);
si_swapinfo(&i);
stats->version = SYSSTATS_VERSION;
stats->memtotal = K(i.totalram);
stats->reclaimable =
global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE) >> 10;
stats->swap_used = K(i.totalswap - i.freeswap);
stats->swap_total = K(i.totalswap);
stats->vmalloc_total = K(vmalloc_nr_pages());
#ifdef CONFIG_MM_STAT_UNRECLAIMABLE_PAGES
stats->unreclaimable =
K(global_node_page_state(NR_UNRECLAIMABLE_PAGES));
#endif
stats->buffer = K(i.bufferram);
stats->swapcache = K(total_swapcache_pages());
stats->slab_reclaimable =
K(global_node_page_state(NR_SLAB_RECLAIMABLE));
stats->slab_unreclaimable =
K(global_node_page_state(NR_SLAB_UNRECLAIMABLE));
stats->free_cma = K(global_zone_page_state(NR_FREE_CMA_PAGES));
stats->file_mapped = K(global_node_page_state(NR_FILE_MAPPED));
stats->kernelstack = global_zone_page_state(NR_KERNEL_STACK_KB);
stats->pagetable = K(global_zone_page_state(NR_PAGETABLE));
stats->shmem = K(i.sharedram);
sysstats_fill_zoneinfo(stats);
}
#undef K
static int sysstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
{
int rc = 0;
struct sk_buff *rep_skb;
struct sys_memstats *stats;
struct nlattr *na;
size_t size;
size = nla_total_size(sizeof(struct sys_memstats));
rc = prepare_reply(info, SYSSTATS_CMD_NEW, &rep_skb,
size);
if (rc < 0)
goto err;
na = nla_reserve(rep_skb, SYSSTATS_TYPE_SYSMEM_STATS,
sizeof(struct sys_memstats));
if (na == NULL) {
nlmsg_free(rep_skb);
rc = -EMSGSIZE;
goto err;
}
stats = nla_data(na);
memset(stats, 0, sizeof(*stats));
sysstats_build(stats);
rc = send_reply(rep_skb, info);
err:
return rc;
}
static int cgroupstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
{
int rc = 0;
@ -659,6 +805,12 @@ static const struct genl_ops taskstats_ops[] = {
/* policy enforced later */
.flags = GENL_CMD_CAP_HASPOL,
},
{
.cmd = SYSSTATS_CMD_GET,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = sysstats_user_cmd,
/* policy enforced later */
},
};
static int taskstats_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
@ -673,6 +825,9 @@ static int taskstats_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
case CGROUPSTATS_CMD_GET:
policy = cgroupstats_cmd_get_policy;
break;
case SYSSTATS_CMD_GET:
policy = sysstats_cmd_get_policy;
break;
default:
return -EINVAL;
}

View File

@ -758,6 +758,16 @@ config PERCPU_STATS
information includes global and per chunk statistics, which can
be used to help understand percpu memory usage.
config MM_STAT_UNRECLAIMABLE_PAGES
default y
depends on QGKI
bool "Enable counter for unreclaimable pages"
help
Unreclaimable pages counters accounts the pages that cannot be
reclaimed under memory pressure. These include mm rss and global
counter. This option allows the enabling of the mm rss and global
unreclaimables pages counters on QGKI systems.
config HAVE_USERSPACE_LOW_MEMORY_KILLER
bool "Configure reclaim paths if there is a userspace LMK"
default n

View File

@ -1170,6 +1170,9 @@ const char * const vmstat_text[] = {
"nr_dirtied",
"nr_written",
"nr_kernel_misc_reclaimable",
#ifdef CONFIG_MM_STAT_UNRECLAIMABLE_PAGES
"nr_unreclaimable_pages",
#endif
/* enum writeback_stat_item counters */
"nr_dirty_threshold",