android_kernel_samsung_sm8650/arch/arm64/mm/mteswap.c
Catalin Marinas 96122e776f arm64: mte: Fix/clarify the PG_mte_tagged semantics
commit e059853d14ca4ed0f6a190d7109487918a22a976 upstream.

Currently the PG_mte_tagged page flag mostly means the page contains
valid tags and it should be set after the tags have been cleared or
restored. However, in mte_sync_tags() it is set before setting the tags
to avoid, in theory, a race with concurrent mprotect(PROT_MTE) for
shared pages. However, a concurrent mprotect(PROT_MTE) with a copy on
write in another thread can cause the new page to have stale tags.
Similarly, tag reading via ptrace() can read stale tags if the
PG_mte_tagged flag is set before actually clearing/restoring the tags.

Fix the PG_mte_tagged semantics so that it is only set after the tags
have been cleared or restored. This is safe for swap restoring into a
MAP_SHARED or CoW page since the core code takes the page lock. Add two
functions to test and set the PG_mte_tagged flag with acquire and
release semantics. The downside is that concurrent mprotect(PROT_MTE) on
a MAP_SHARED page may cause tag loss. This is already the case for KVM
guests if a VMM changes the page protection while the guest triggers a
user_mem_abort().

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
[pcc@google.com: fix build with CONFIG_ARM64_MTE disabled]
Signed-off-by: Peter Collingbourne <pcc@google.com>
Reviewed-by: Cornelia Huck <cohuck@redhat.com>
Reviewed-by: Steven Price <steven.price@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Peter Collingbourne <pcc@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20221104011041.290951-3-pcc@google.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2023-03-11 13:55:44 +01:00

89 lines
1.9 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
#include <linux/pagemap.h>
#include <linux/xarray.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <asm/mte.h>
static DEFINE_XARRAY(mte_pages);
void *mte_allocate_tag_storage(void)
{
/* tags granule is 16 bytes, 2 tags stored per byte */
return kmalloc(MTE_PAGE_TAG_STORAGE, GFP_KERNEL);
}
void mte_free_tag_storage(char *storage)
{
kfree(storage);
}
int mte_save_tags(struct page *page)
{
void *tag_storage, *ret;
if (!page_mte_tagged(page))
return 0;
tag_storage = mte_allocate_tag_storage();
if (!tag_storage)
return -ENOMEM;
mte_save_page_tags(page_address(page), tag_storage);
/* page_private contains the swap entry.val set in do_swap_page */
ret = xa_store(&mte_pages, page_private(page), tag_storage, GFP_KERNEL);
if (WARN(xa_is_err(ret), "Failed to store MTE tags")) {
mte_free_tag_storage(tag_storage);
return xa_err(ret);
} else if (ret) {
/* Entry is being replaced, free the old entry */
mte_free_tag_storage(ret);
}
return 0;
}
bool mte_restore_tags(swp_entry_t entry, struct page *page)
{
void *tags = xa_load(&mte_pages, entry.val);
if (!tags)
return false;
/*
* Test PG_mte_tagged again in case it was racing with another
* set_pte_at().
*/
if (!test_and_set_bit(PG_mte_tagged, &page->flags))
mte_restore_page_tags(page_address(page), tags);
return true;
}
void mte_invalidate_tags(int type, pgoff_t offset)
{
swp_entry_t entry = swp_entry(type, offset);
void *tags = xa_erase(&mte_pages, entry.val);
mte_free_tag_storage(tags);
}
void mte_invalidate_tags_area(int type)
{
swp_entry_t entry = swp_entry(type, 0);
swp_entry_t last_entry = swp_entry(type + 1, 0);
void *tags;
XA_STATE(xa_state, &mte_pages, entry.val);
xa_lock(&mte_pages);
xas_for_each(&xa_state, tags, last_entry.val - 1) {
__xa_erase(&mte_pages, xa_state.xa_index);
mte_free_tag_storage(tags);
}
xa_unlock(&mte_pages);
}