BACKPORT: arm64: mte: move register initialization to C

If FEAT_MTE2 is disabled via the arm64.nomte command line argument on a
CPU that claims to support FEAT_MTE2, the kernel will use Tagged Normal
in the MAIR. If we interpret arm64.nomte to mean that the CPU does not
in fact implement FEAT_MTE2, setting the system register like this may
lead to UNSPECIFIED behavior. Fix it by arranging for MAIR to be set
in the C function cpu_enable_mte which is called based on the sanitized
version of the system register.

There is no need for the rest of the MTE-related system register
initialization to happen from assembly, with the exception of TCR_EL1,
which must be set to include at least TBI1 because the secondary CPUs
access KASan-allocated data structures early. Therefore, make the TCR_EL1
initialization unconditional and move the rest of the initialization to
cpu_enable_mte so that we no longer have a dependency on the unsanitized
ID register value.

Signed-off-by: Peter Collingbourne <pcc@google.com>
Signed-off-by: Evgenii Stepanov <eugenis@google.com>
Signed-off-by: Qun-Wei Lin <qun-wei.lin@mediatek.corp-partner.google.com>
Suggested-by: Catalin Marinas <catalin.marinas@arm.com>
Reported-by: kernel test robot <lkp@intel.com>
Link: https://lore.kernel.org/linux-arm-kernel/20220907003630.1115439-1-eugenis@google.com/
Bug: 239834217
Change-Id: I7bd247ae5802295c198bac42df86fab9e9e9cefd
(cherry picked from commit 6a2905d35289caeeecbafa93ae4ff7cefafd02ed)
This commit is contained in:
Evgenii Stepanov 2022-09-07 15:34:08 -07:00 committed by Suren Baghdasaryan
parent eddac45546
commit a13e8447e8
5 changed files with 66 additions and 49 deletions

View File

@ -40,7 +40,9 @@ void mte_sync_tags(pte_t *ptep, pte_t pte);
void mte_copy_page_tags(void *kto, const void *kfrom);
void mte_thread_init_user(void);
void mte_thread_switch(struct task_struct *next);
void mte_cpu_setup(void);
void mte_suspend_enter(void);
void mte_suspend_exit(void);
long set_mte_ctrl(struct task_struct *task, unsigned long arg);
long get_mte_ctrl(struct task_struct *task);
int mte_ptrace_copy_tags(struct task_struct *child, long request,
@ -69,6 +71,9 @@ static inline void mte_thread_switch(struct task_struct *next)
static inline void mte_suspend_enter(void)
{
}
static inline void mte_suspend_exit(void)
{
}
static inline long set_mte_ctrl(struct task_struct *task, unsigned long arg)
{
return 0;

View File

@ -1870,7 +1870,8 @@ static void bti_enable(const struct arm64_cpu_capabilities *__unused)
static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
{
sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_ATA | SCTLR_EL1_ATA0);
isb();
mte_cpu_setup();
/*
* Clear the tags in the zero page. This needs to be done via the

View File

@ -242,6 +242,49 @@ void mte_thread_switch(struct task_struct *next)
mte_check_tfsr_el1();
}
void mte_cpu_setup(void)
{
u64 rgsr;
/*
* CnP must be enabled only after the MAIR_EL1 register has been set
* up. Inconsistent MAIR_EL1 between CPUs sharing the same TLB may
* lead to the wrong memory type being used for a brief window during
* CPU power-up.
*
* CnP is not a boot feature so MTE gets enabled before CnP, but let's
* make sure that is the case.
*/
BUG_ON(read_sysreg(ttbr0_el1) & TTBR_CNP_BIT);
BUG_ON(read_sysreg(ttbr1_el1) & TTBR_CNP_BIT);
/* Normal Tagged memory type at the corresponding MAIR index */
sysreg_clear_set(mair_el1,
MAIR_ATTRIDX(MAIR_ATTR_MASK, MT_NORMAL_TAGGED),
MAIR_ATTRIDX(MAIR_ATTR_NORMAL_TAGGED,
MT_NORMAL_TAGGED));
write_sysreg_s(KERNEL_GCR_EL1, SYS_GCR_EL1);
/*
* If GCR_EL1.RRND=1 is implemented the same way as RRND=0, then
* RGSR_EL1.SEED must be non-zero for IRG to produce
* pseudorandom numbers. As RGSR_EL1 is UNKNOWN out of reset, we
* must initialize it.
*/
rgsr = (read_sysreg(CNTVCT_EL0) & SYS_RGSR_EL1_SEED_MASK) <<
SYS_RGSR_EL1_SEED_SHIFT;
if (rgsr == 0)
rgsr = 1 << SYS_RGSR_EL1_SEED_SHIFT;
write_sysreg_s(rgsr, SYS_RGSR_EL1);
/* clear any pending tag check faults in TFSR*_EL1 */
write_sysreg_s(0, SYS_TFSR_EL1);
write_sysreg_s(0, SYS_TFSRE0_EL1);
local_flush_tlb_all();
}
void mte_suspend_enter(void)
{
if (!system_supports_mte())
@ -258,6 +301,14 @@ void mte_suspend_enter(void)
mte_check_tfsr_el1();
}
void mte_suspend_exit(void)
{
if (!system_supports_mte())
return;
mte_cpu_setup();
}
long set_mte_ctrl(struct task_struct *task, unsigned long arg)
{
u64 mte_ctrl = (~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) &

View File

@ -42,6 +42,8 @@ void notrace __cpu_suspend_exit(void)
{
unsigned int cpu = smp_processor_id();
mte_suspend_exit();
/*
* We are resuming from reset with the idmap active in TTBR0_EL1.
* We must uninstall the idmap and restore the expected MMU

View File

@ -47,17 +47,19 @@
#ifdef CONFIG_KASAN_HW_TAGS
#define TCR_MTE_FLAGS SYS_TCR_EL1_TCMA1 | TCR_TBI1 | TCR_TBID1
#else
#elif defined(CONFIG_ARM64_MTE)
/*
* The mte_zero_clear_page_tags() implementation uses DC GZVA, which relies on
* TBI being enabled at EL1.
*/
#define TCR_MTE_FLAGS TCR_TBI1 | TCR_TBID1
#else
#define TCR_MTE_FLAGS 0
#endif
/*
* Default MAIR_EL1. MT_NORMAL_TAGGED is initially mapped as Normal memory and
* changed during __cpu_setup to Normal Tagged if the system supports MTE.
* changed during mte_cpu_setup to Normal Tagged if the system supports MTE.
*/
#define MAIR_EL1_SET \
(MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \
@ -427,47 +429,6 @@ SYM_FUNC_START(__cpu_setup)
* Memory region attributes
*/
mov_q x5, MAIR_EL1_SET
#ifdef CONFIG_ARM64_MTE
mte_tcr .req x20
mov mte_tcr, #0
/*
* Update MAIR_EL1, GCR_EL1 and TFSR*_EL1 if MTE is supported
* (ID_AA64PFR1_EL1[11:8] > 1).
*/
mrs x10, ID_AA64PFR1_EL1
ubfx x10, x10, #ID_AA64PFR1_MTE_SHIFT, #4
cmp x10, #ID_AA64PFR1_MTE
b.lt 1f
/* Normal Tagged memory type at the corresponding MAIR index */
mov x10, #MAIR_ATTR_NORMAL_TAGGED
bfi x5, x10, #(8 * MT_NORMAL_TAGGED), #8
mov x10, #KERNEL_GCR_EL1
msr_s SYS_GCR_EL1, x10
/*
* If GCR_EL1.RRND=1 is implemented the same way as RRND=0, then
* RGSR_EL1.SEED must be non-zero for IRG to produce
* pseudorandom numbers. As RGSR_EL1 is UNKNOWN out of reset, we
* must initialize it.
*/
mrs x10, CNTVCT_EL0
ands x10, x10, #SYS_RGSR_EL1_SEED_MASK
csinc x10, x10, xzr, ne
lsl x10, x10, #SYS_RGSR_EL1_SEED_SHIFT
msr_s SYS_RGSR_EL1, x10
/* clear any pending tag check faults in TFSR*_EL1 */
msr_s SYS_TFSR_EL1, xzr
msr_s SYS_TFSRE0_EL1, xzr
/* set the TCR_EL1 bits */
mov_q mte_tcr, TCR_MTE_FLAGS
1:
#endif
msr mair_el1, x5
/*
* Set/prepare TCR and TTBR. TCR_EL1.T1SZ gets further
@ -475,11 +436,8 @@ SYM_FUNC_START(__cpu_setup)
*/
mov_q x10, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS
#ifdef CONFIG_ARM64_MTE
orr x10, x10, mte_tcr
.unreq mte_tcr
#endif
TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS | TCR_MTE_FLAGS
tcr_clear_errata_bits x10, x9, x5
#ifdef CONFIG_ARM64_VA_BITS_52