diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 10ab37a2cfba..4e4522f30898 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -1544,6 +1544,7 @@ static void __init map_lowmem(void) vm->flags |= VM_ARM_MTYPE(type); vm->caller = map_lowmem; add_static_vm_early(svm++); + mark_vmalloc_reserved_area(vm->addr, vm->size); } } diff --git a/include/linux/mm.h b/include/linux/mm.h index 3a97481a5383..2d2df6838ded 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -675,6 +675,10 @@ unsigned long vmalloc_to_pfn(const void *addr); * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there * is no special casing required. */ + +#ifdef CONFIG_ENABLE_VMALLOC_SAVING +extern bool is_vmalloc_addr(const void *x); +#else static inline bool is_vmalloc_addr(const void *x) { #ifdef CONFIG_MMU @@ -685,6 +689,7 @@ static inline bool is_vmalloc_addr(const void *x) return false; #endif } +#endif //CONFIG_ENABLE_VMALLOC_SAVING #ifndef is_ioremap_addr #define is_ioremap_addr(x) is_vmalloc_addr(x) diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index b81467562d2b..7e1c887d10a4 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -205,6 +205,12 @@ extern struct list_head vmap_area_list; extern __init void vm_area_add_early(struct vm_struct *vm); extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); extern __init int vm_area_check_early(struct vm_struct *vm); +#ifdef CONFIG_ENABLE_VMALLOC_SAVING +extern void mark_vmalloc_reserved_area(void *addr, unsigned long size); +#else +static inline void mark_vmalloc_reserved_area(void *addr, unsigned long size) +{ }; +#endif #ifdef CONFIG_SMP # ifdef CONFIG_MMU @@ -230,7 +236,12 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) #endif #ifdef CONFIG_MMU +#ifdef CONFIG_ENABLE_VMALLOC_SAVING +extern unsigned long total_vmalloc_size; +#define VMALLOC_TOTAL total_vmalloc_size +#else #define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START) +#endif #else #define VMALLOC_TOTAL 0UL #endif diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 51dbf88fcdb3..5bc93e42c3a8 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -249,6 +249,50 @@ static int vmap_page_range(unsigned long start, unsigned long end, return ret; } +#ifdef CONFIG_ENABLE_VMALLOC_SAVING +#define POSSIBLE_VMALLOC_START PAGE_OFFSET + +#define VMALLOC_BITMAP_SIZE ((VMALLOC_END - PAGE_OFFSET) >> \ + PAGE_SHIFT) +#define VMALLOC_TO_BIT(addr) ((addr - PAGE_OFFSET) >> PAGE_SHIFT) +#define BIT_TO_VMALLOC(i) (PAGE_OFFSET + i * PAGE_SIZE) + +unsigned long total_vmalloc_size; +unsigned long vmalloc_reserved; + +DECLARE_BITMAP(possible_areas, VMALLOC_BITMAP_SIZE); + +void mark_vmalloc_reserved_area(void *x, unsigned long size) +{ + unsigned long addr = (unsigned long)x; + + bitmap_set(possible_areas, VMALLOC_TO_BIT(addr), size >> PAGE_SHIFT); + vmalloc_reserved += size; +} + +bool is_vmalloc_addr(const void *x) +{ + unsigned long addr = (unsigned long)x; + + if (addr < POSSIBLE_VMALLOC_START || addr >= VMALLOC_END) + return false; + + if (test_bit(VMALLOC_TO_BIT(addr), possible_areas)) + return false; + + return true; +} +EXPORT_SYMBOL(is_vmalloc_addr); + +static void calc_total_vmalloc_size(void) +{ + total_vmalloc_size = VMALLOC_END - POSSIBLE_VMALLOC_START - + vmalloc_reserved; +} +#else +static void calc_total_vmalloc_size(void) { } +#endif + int is_vmalloc_or_module_addr(const void *x) { /* @@ -1963,6 +2007,7 @@ void __init vmalloc_init(void) * Now we can initialize a free vmap space. */ vmap_init_free_space(); + calc_total_vmalloc_size(); vmap_initialized = true; } @@ -3564,6 +3609,9 @@ static int s_show(struct seq_file *m, void *p) if (is_vmalloc_addr(v->pages)) seq_puts(m, " vpages"); + if (v->flags & VM_LOWMEM) + seq_puts(m, " lowmem"); + show_numa_info(m, v); seq_putc(m, '\n');