ANDROID: 16K: Introduce pgsize_migration_inline.h

Introduce inline header to avoid circular dependency. This
will be used in a subsequent patch.

Also take opportunity to do some small noop refactor in
vma_pad_pages() and split_pad_vma() for more robust code.

Bug: 357901498
Change-Id: Ia5f447758d0d07ed3e1429ca1e35dcc0741cc22a
Signed-off-by: Kalesh Singh <kaleshsingh@google.com>
This commit is contained in:
Kalesh Singh 2024-09-24 23:28:56 +00:00
parent 4c7b897f39
commit 0e3d191eaf
3 changed files with 71 additions and 51 deletions

View File

@ -13,35 +13,9 @@
* page size in Android.
*/
#include <linux/mm.h>
#include <linux/pgsize_migration_inline.h>
#include <linux/seq_file.h>
#include <linux/sizes.h>
/*
* vm_flags representation of VMA padding pages.
*
* This allows the kernel to identify the portion of an ELF LOAD segment VMA
* that is padding.
*
* 4 high bits of vm_flags [63,60] are used to represent ELF segment padding
* up to 60kB, which is sufficient for ELFs of both 16kB and 64kB segment
* alignment (p_align).
*
* The representation is illustrated below.
*
* 63 62 61 60
* _________ _________ _________ _________
* | Bit 3 | Bit 2 | Bit 1 | Bit 0 |
* | of 4kB | of 4kB | of 4kB | of 4kB |
* | chunks | chunks | chunks | chunks |
* |_________|_________|_________|_________|
*/
#define VM_PAD_WIDTH 4
#define VM_PAD_SHIFT (BITS_PER_LONG - VM_PAD_WIDTH)
#define VM_TOTAL_PAD_PAGES ((1ULL << VM_PAD_WIDTH) - 1)
#define VM_PAD_MASK (VM_TOTAL_PAD_PAGES << VM_PAD_SHIFT)
#define VMA_PAD_START(vma) (vma->vm_end - (vma_pad_pages(vma) << PAGE_SHIFT))
#include <linux/mm.h>
#if PAGE_SIZE == SZ_4K && defined(CONFIG_64BIT)
extern void vma_set_pad_pages(struct vm_area_struct *vma,
@ -63,9 +37,6 @@ extern void show_map_pad_vma(struct vm_area_struct *vma,
extern void split_pad_vma(struct vm_area_struct *vma, struct vm_area_struct *new,
unsigned long addr, int new_below);
extern unsigned long vma_pad_fixup_flags(struct vm_area_struct *vma,
unsigned long newflags);
extern bool is_mergable_pad_vma(struct vm_area_struct *vma,
unsigned long vm_flags);
@ -107,12 +78,6 @@ static inline void split_pad_vma(struct vm_area_struct *vma, struct vm_area_stru
{
}
static inline unsigned long vma_pad_fixup_flags(struct vm_area_struct *vma,
unsigned long newflags)
{
return newflags;
}
static inline bool is_mergable_pad_vma(struct vm_area_struct *vma,
unsigned long vm_flags)
{

View File

@ -0,0 +1,67 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_PAGE_SIZE_MIGRATION_INLINE_H
#define _LINUX_PAGE_SIZE_MIGRATION_INLINE_H
/*
* Page Size Migration
*
* Copyright (c) 2024, Google LLC.
* Author: Kalesh Singh <kaleshsingh@goole.com>
*
* This file contains inline APIs for mitigations to ensure
* app compatibility during the transition from 4kB to 16kB
* page size in Android.
*/
#include <linux/mm_types.h>
#include <linux/sizes.h>
#include <asm/page.h>
/*
* vm_flags representation of VMA padding pages.
*
* This allows the kernel to identify the portion of an ELF LOAD segment VMA
* that is padding.
*
* 4 high bits of vm_flags [63,60] are used to represent ELF segment padding
* up to 60kB, which is sufficient for ELFs of both 16kB and 64kB segment
* alignment (p_align).
*
* The representation is illustrated below.
*
* 63 62 61 60
* _________ _________ _________ _________
* | Bit 3 | Bit 2 | Bit 1 | Bit 0 |
* | of 4kB | of 4kB | of 4kB | of 4kB |
* | chunks | chunks | chunks | chunks |
* |_________|_________|_________|_________|
*/
#define VM_PAD_WIDTH 4
#define VM_PAD_SHIFT (BITS_PER_LONG - VM_PAD_WIDTH)
#define VM_TOTAL_PAD_PAGES ((1ULL << VM_PAD_WIDTH) - 1)
#define VM_PAD_MASK (VM_TOTAL_PAD_PAGES << VM_PAD_SHIFT)
#define VMA_PAD_START(vma) (vma->vm_end - (vma_pad_pages(vma) << PAGE_SHIFT))
#if PAGE_SIZE == SZ_4K && defined(CONFIG_64BIT)
/*
* Sets the correct padding bits / flags for a VMA split.
*/
static inline unsigned long vma_pad_fixup_flags(struct vm_area_struct *vma,
unsigned long newflags)
{
if (newflags & VM_PAD_MASK)
return (newflags & ~VM_PAD_MASK) | (vma->vm_flags & VM_PAD_MASK);
else
return newflags;
}
#else /* PAGE_SIZE != SZ_4K || !defined(CONFIG_64BIT) */
static inline unsigned long vma_pad_fixup_flags(struct vm_area_struct *vma,
unsigned long newflags)
{
return newflags;
}
#endif /* PAGE_SIZE == SZ_4K && defined(CONFIG_64BIT) */
#endif /* _LINUX_PAGE_SIZE_MIGRATION_INLINE_H */

View File

@ -126,7 +126,7 @@ unsigned long vma_pad_pages(struct vm_area_struct *vma)
if (!is_pgsize_migration_enabled())
return 0;
return vma->vm_flags >> VM_PAD_SHIFT;
return (vma->vm_flags & VM_PAD_MASK) >> VM_PAD_SHIFT;
}
static __always_inline bool str_has_suffix(const char *str, const char *suffix)
@ -397,7 +397,7 @@ void split_pad_vma(struct vm_area_struct *vma, struct vm_area_struct *new,
nr_vma2_pages = vma_pages(second);
if (nr_vma2_pages >= nr_pad_pages) { /* Case 1 & 3 */
first->vm_flags &= ~VM_PAD_MASK;
vma_set_pad_pages(first, 0);
vma_set_pad_pages(second, nr_pad_pages);
} else { /* Case 2 */
vma_set_pad_pages(first, nr_pad_pages - nr_vma2_pages);
@ -405,18 +405,6 @@ void split_pad_vma(struct vm_area_struct *vma, struct vm_area_struct *new,
}
}
/*
* Sets the correct padding bits / flags for a VMA split.
*/
unsigned long vma_pad_fixup_flags(struct vm_area_struct *vma,
unsigned long newflags)
{
if (newflags & VM_PAD_MASK)
return (newflags & ~VM_PAD_MASK) | (vma->vm_flags & VM_PAD_MASK);
else
return newflags;
}
/*
* Merging of padding VMAs is uncommon, as padding is only allowed
* from the linker context.