ANDROID: 16K: Handle pad VMA splits and merges

In some cases a VMA with padding representation may be split, and
therefore the padding flags must be updated accordingly.

There are 3 cases to handle:

Given:
    | DDDDPPPP |

where:
    - D represents 1 page of data;
    - P represents 1 page of padding;
    - | represents the boundaries (start/end) of the VMA

1) Split exactly at the padding boundary

    | DDDDPPPP | --> | DDDD | PPPP |

    - Remove padding flags from the first VMA.
    - The second VMA is all padding

2) Split within the padding area

    | DDDDPPPP | --> | DDDDPP | PP |

    - Subtract the length of the second VMA from the first VMA's
      padding.
    - The second VMA is all padding, adjust its padding length (flags)

3) Split within the data area

    | DDDDPPPP | --> | DD | DDPPPP |

    - Remove padding flags from the first VMA.
    - The second VMA is has the same padding as from before the split.

To simplify the semantics merging of padding VMAs is not allowed.

If a split produces a VMA that is entirely padding, show_[s]maps()
only outputs the padding VMA entry (as the data entry is of length 0).

Bug: 330117029
Bug: 327600007
Bug: 330767927
Bug: 328266487
Bug: 329803029
Change-Id: Ie2628ced5512e2c7f8af25fabae1f38730c8bb1a
Signed-off-by: Kalesh Singh <kaleshsingh@google.com>
This commit is contained in:
Kalesh Singh 2024-04-22 14:24:59 -07:00
parent 162de86e24
commit 09da1d141d
6 changed files with 117 additions and 6 deletions

View File

@ -394,6 +394,7 @@ static int show_map(struct seq_file *m, void *v)
struct vm_area_struct *pad_vma = get_pad_vma(v);
struct vm_area_struct *vma = get_data_vma(v);
if (vma_pages(vma))
show_map_vma(m, vma);
show_map_pad_vma(vma, pad_vma, m, show_map_vma);
@ -900,6 +901,9 @@ static int show_smap(struct seq_file *m, void *v)
memset(&mss, 0, sizeof(mss));
if (!vma_pages(vma))
goto show_pad;
smap_gather_stats(vma, &mss, 0);
show_map_vma(m, vma);
@ -923,6 +927,7 @@ static int show_smap(struct seq_file *m, void *v)
seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma));
show_smap_vma_flags(m, vma);
show_pad:
show_map_pad_vma(vma, pad_vma, m, (show_pad_vma_fn)show_smap);
return 0;

View File

@ -61,6 +61,9 @@ extern struct vm_area_struct *get_data_vma(struct vm_area_struct *vma);
extern void show_map_pad_vma(struct vm_area_struct *vma,
struct vm_area_struct *pad,
struct seq_file *m, show_pad_vma_fn func);
extern void split_pad_vma(struct vm_area_struct *vma, struct vm_area_struct *new,
unsigned long addr, int new_below);
#else /* PAGE_SIZE != SZ_4K || !defined(CONFIG_64BIT) */
static inline void vma_set_pad_pages(struct vm_area_struct *vma,
unsigned long nr_pages)
@ -92,10 +95,41 @@ static inline void show_map_pad_vma(struct vm_area_struct *vma,
struct seq_file *m, show_pad_vma_fn func)
{
}
static inline void split_pad_vma(struct vm_area_struct *vma, struct vm_area_struct *new,
unsigned long addr, int new_below)
{
}
#endif /* PAGE_SIZE == SZ_4K && defined(CONFIG_64BIT) */
static inline unsigned long vma_data_pages(struct vm_area_struct *vma)
{
return vma_pages(vma) - vma_pad_pages(vma);
}
/*
* Sets the correct padding bits / flags for a VMA split.
*/
static inline unsigned long vma_pad_fixup_flags(struct vm_area_struct *vma,
unsigned long newflags)
{
if (newflags & VM_PAD_MASK)
return (newflags & ~VM_PAD_MASK) | (vma->vm_flags & VM_PAD_MASK);
else
return newflags;
}
/*
* Merging of padding VMAs is uncommon, as padding is only allowed
* from the linker context.
*
* To simplify the semantics, adjacent VMAs with padding are not
* allowed to merge.
*/
static inline bool is_mergable_pad_vma(struct vm_area_struct *vma,
unsigned long vm_flags)
{
/* Padding VMAs cannot be merged with other padding or real VMAs */
return !((vma->vm_flags | vm_flags) & VM_PAD_MASK);
}
#endif /* _LINUX_PAGE_SIZE_MIGRATION_H */

View File

@ -13,6 +13,7 @@
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/pagemap.h>
#include <linux/pgsize_migration.h>
#include <linux/pagevec.h>
#include <linux/mempolicy.h>
#include <linux/syscalls.h>
@ -586,7 +587,7 @@ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
*/
if (lock) {
vm_write_begin(vma);
WRITE_ONCE(vma->vm_flags, newflags);
WRITE_ONCE(vma->vm_flags, vma_pad_fixup_flags(vma, newflags));
vm_write_end(vma);
} else
munlock_vma_pages_range(vma, start, end);

View File

@ -23,6 +23,7 @@
#include <linux/init.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/pgsize_migration.h>
#include <linux/personality.h>
#include <linux/security.h>
#include <linux/hugetlb.h>
@ -1103,6 +1104,8 @@ static inline int is_mergeable_vma(struct vm_area_struct *vma,
return 0;
if (vma_get_anon_name(vma) != anon_name)
return 0;
if (!is_mergable_pad_vma(vma, vm_flags))
return 0;
return 1;
}
@ -2902,8 +2905,10 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
/* Success. */
if (!err)
if (!err) {
split_pad_vma(vma, new, addr, new_below);
return 0;
}
/* Clean everything up if vma_adjust failed. */
if (new->vm_ops && new->vm_ops->close)

View File

@ -17,6 +17,7 @@
#include <linux/highmem.h>
#include <linux/security.h>
#include <linux/mempolicy.h>
#include <linux/pgsize_migration.h>
#include <linux/personality.h>
#include <linux/syscalls.h>
#include <linux/swap.h>
@ -481,7 +482,7 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
* held in write mode.
*/
vm_write_begin(vma);
WRITE_ONCE(vma->vm_flags, newflags);
WRITE_ONCE(vma->vm_flags, vma_pad_fixup_flags(vma, newflags));
dirty_accountable = vma_wants_writenotify(vma, vma->vm_page_prot);
vma_set_page_prot(vma);

View File

@ -113,6 +113,7 @@ void vma_set_pad_pages(struct vm_area_struct *vma,
if (!is_pgsize_migration_enabled())
return;
vma->vm_flags &= ~VM_PAD_MASK;
vma->vm_flags |= (nr_pages << VM_PAD_SHIFT);
}
@ -268,10 +269,10 @@ struct vm_area_struct *get_pad_vma(struct vm_area_struct *vma)
pad->vm_start = VMA_PAD_START(pad);
/* Make the pad vma PROT_NONE */
pad->vm_flags = pad->vm_flags & ~(VM_READ|VM_WRITE|VM_EXEC);
pad->vm_flags &= ~(VM_READ|VM_WRITE|VM_EXEC);
/* Remove padding bits */
pad->vm_flags = pad->vm_flags & ~VM_PAD_MASK;
pad->vm_flags &= ~VM_PAD_MASK;
return pad;
}
@ -324,5 +325,69 @@ void show_map_pad_vma(struct vm_area_struct *vma, struct vm_area_struct *pad,
kfree(pad);
kfree(vma);
}
/*
* When splitting a padding VMA there are a couple of cases to handle.
*
* Given:
*
* | DDDDPPPP |
*
* where:
* - D represents 1 page of data;
* - P represents 1 page of padding;
* - | represents the boundaries (start/end) of the VMA
*
*
* 1) Split exactly at the padding boundary
*
* | DDDDPPPP | --> | DDDD | PPPP |
*
* - Remove padding flags from the first VMA.
* - The second VMA is all padding
*
* 2) Split within the padding area
*
* | DDDDPPPP | --> | DDDDPP | PP |
*
* - Subtract the length of the second VMA from the first VMA's padding.
* - The second VMA is all padding, adjust its padding length (flags)
*
* 3) Split within the data area
*
* | DDDDPPPP | --> | DD | DDPPPP |
*
* - Remove padding flags from the first VMA.
* - The second VMA is has the same padding as from before the split.
*/
void split_pad_vma(struct vm_area_struct *vma, struct vm_area_struct *new,
unsigned long addr, int new_below)
{
unsigned long nr_pad_pages = vma_pad_pages(vma);
unsigned long nr_vma2_pages;
struct vm_area_struct *first;
struct vm_area_struct *second;
if (!nr_pad_pages)
return;
if (new_below) {
first = new;
second = vma;
} else {
first = vma;
second = new;
}
nr_vma2_pages = vma_pages(second);
if (nr_vma2_pages >= nr_pad_pages) { /* Case 1 & 3 */
first->vm_flags &= ~VM_PAD_MASK;
vma_set_pad_pages(second, nr_pad_pages);
} else { /* Case 2 */
vma_set_pad_pages(first, nr_pad_pages - nr_vma2_pages);
vma_set_pad_pages(second, nr_vma2_pages);
}
}
#endif /* PAGE_SIZE == SZ_4K */
#endif /* CONFIG_64BIT */