ANDROID: 16K: Introduce ELF padding representation for VMAs

The dynamic linker may extend ELF LOAD segment mappings to be contiguous
in memory when loading a 16kB compatible ELF on a 4kB page-size system.
This is done to reduce the use of unreclaimable VMA slab memory for the
otherwise necessary "gap" VMAs. The extended portion of the mapping
(VMA) can be viewed as "padding", meaning that the mapping in that range
corresponds to an area of the file that does not contain contents of the
respective segments (maybe zero's depending on how the ELF is built).

For some compatibility mitigations, the region of a VMA corresponding to
these padding sections need to be known.

In order to represent such regions without adding addtional overhead or
breaking ABI, some upper bits of vm_flags are used.

Add the VMA padding pages representation and the necessary APIs to
manipulate it.

Bug: 330117029
Bug: 327600007
Bug: 330767927
Bug: 328266487
Bug: 329803029
Change-Id: Ieb9fa98e30ec9b0bec62256624f14e3ed6062a75
Signed-off-by: Kalesh Singh <kaleshsingh@google.com>
This commit is contained in:
Kalesh Singh 2024-04-04 22:21:32 -07:00
parent 38c464b4a4
commit 37d6ffe5ca
2 changed files with 85 additions and 1 deletions

View File

@ -0,0 +1,64 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_PAGE_SIZE_MIGRATION_H
#define _LINUX_PAGE_SIZE_MIGRATION_H
/*
* Page Size Migration
*
* Copyright (c) 2024, Google LLC.
* Author: Kalesh Singh <kaleshsingh@goole.com>
*
* This file contains the APIs for mitigations to ensure
* app compatibility during the transition from 4kB to 16kB
* page size in Android.
*/
#include <linux/mm.h>
#include <linux/sizes.h>
/*
* vm_flags representation of VMA padding pages.
*
* This allows the kernel to identify the portion of an ELF LOAD segment VMA
* that is padding.
*
* 4 high bits of vm_flags [63,60] are used to represent ELF segment padding
* up to 60kB, which is sufficient for ELFs of both 16kB and 64kB segment
* alignment (p_align).
*
* The representation is illustrated below.
*
* 63 62 61 60
* _________ _________ _________ _________
* | Bit 3 | Bit 2 | Bit 1 | Bit 0 |
* | of 4kB | of 4kB | of 4kB | of 4kB |
* | chunks | chunks | chunks | chunks |
* |_________|_________|_________|_________|
*/
#define VM_PAD_WIDTH 4
#define VM_PAD_SHIFT (BITS_PER_LONG - VM_PAD_WIDTH)
#define VM_TOTAL_PAD_PAGES ((1ULL << VM_PAD_WIDTH) - 1)
#if PAGE_SIZE == SZ_4K && defined(CONFIG_64BIT)
extern void vma_set_pad_pages(struct vm_area_struct *vma,
unsigned long nr_pages);
extern unsigned long vma_pad_pages(struct vm_area_struct *vma);
#else /* PAGE_SIZE != SZ_4K || !defined(CONFIG_64BIT) */
static inline void vma_set_pad_pages(struct vm_area_struct *vma,
unsigned long nr_pages)
{
}
static inline unsigned long vma_pad_pages(struct vm_area_struct *vma)
{
return 0;
}
#endif /* PAGE_SIZE == SZ_4K && defined(CONFIG_64BIT) */
static inline unsigned long vma_data_pages(struct vm_area_struct *vma)
{
return vma_pages(vma) - vma_pad_pages(vma);
}
#endif /* _LINUX_PAGE_SIZE_MIGRATION_H */

View File

@ -10,11 +10,12 @@
* Author: Kalesh Singh <kaleshsingh@goole.com>
*/
#include <linux/pgsize_migration.h>
#include <linux/init.h>
#include <linux/jump_label.h>
#include <linux/kobject.h>
#include <linux/kstrtox.h>
#include <linux/mm.h>
#include <linux/sysfs.h>
#ifdef CONFIG_64BIT
@ -102,4 +103,23 @@ static int __init init_pgsize_migration(void)
return 0;
};
late_initcall(init_pgsize_migration);
#if PAGE_SIZE == SZ_4K
void vma_set_pad_pages(struct vm_area_struct *vma,
unsigned long nr_pages)
{
if (!is_pgsize_migration_enabled())
return;
vma->vm_flags |= (nr_pages << VM_PAD_SHIFT);
}
unsigned long vma_pad_pages(struct vm_area_struct *vma)
{
if (!is_pgsize_migration_enabled())
return 0;
return vma->vm_flags >> VM_PAD_SHIFT;
}
#endif /* PAGE_SIZE == SZ_4K */
#endif /* CONFIG_64BIT */