riscv/mm: Convert to using lock_mm_and_find_vma()

commit 7267ef7b0b77f4ed23b7b3c87d8eca7bd9c2d007 upstream.

Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
[6.1: Kconfig context]
Signed-off-by: Samuel Mendoza-Jonas <samjonas@amazon.com>
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Ben Hutchings 2023-06-22 20:18:18 +02:00 committed by Greg Kroah-Hartman
parent 7227d70acc
commit ac764deea7
2 changed files with 14 additions and 18 deletions

View File

@ -114,6 +114,7 @@ config RISCV
select HAVE_RSEQ select HAVE_RSEQ
select IRQ_DOMAIN select IRQ_DOMAIN
select IRQ_FORCED_THREADING select IRQ_FORCED_THREADING
select LOCK_MM_AND_FIND_VMA
select MODULES_USE_ELF_RELA if MODULES select MODULES_USE_ELF_RELA if MODULES
select MODULE_SECTIONS if MODULES select MODULE_SECTIONS if MODULES
select OF select OF

View File

@ -83,13 +83,13 @@ static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_f
BUG(); BUG();
} }
static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr) static inline void
bad_area_nosemaphore(struct pt_regs *regs, int code, unsigned long addr)
{ {
/* /*
* Something tried to access memory that isn't in our memory map. * Something tried to access memory that isn't in our memory map.
* Fix it, but check if it's kernel or user first. * Fix it, but check if it's kernel or user first.
*/ */
mmap_read_unlock(mm);
/* User mode accesses just cause a SIGSEGV */ /* User mode accesses just cause a SIGSEGV */
if (user_mode(regs)) { if (user_mode(regs)) {
do_trap(regs, SIGSEGV, code, addr); do_trap(regs, SIGSEGV, code, addr);
@ -99,6 +99,15 @@ static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code
no_context(regs, addr); no_context(regs, addr);
} }
static inline void
bad_area(struct pt_regs *regs, struct mm_struct *mm, int code,
unsigned long addr)
{
mmap_read_unlock(mm);
bad_area_nosemaphore(regs, code, addr);
}
static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long addr) static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long addr)
{ {
pgd_t *pgd, *pgd_k; pgd_t *pgd, *pgd_k;
@ -281,23 +290,10 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
else if (cause == EXC_INST_PAGE_FAULT) else if (cause == EXC_INST_PAGE_FAULT)
flags |= FAULT_FLAG_INSTRUCTION; flags |= FAULT_FLAG_INSTRUCTION;
retry: retry:
mmap_read_lock(mm); vma = lock_mm_and_find_vma(mm, addr, regs);
vma = find_vma(mm, addr);
if (unlikely(!vma)) { if (unlikely(!vma)) {
tsk->thread.bad_cause = cause; tsk->thread.bad_cause = cause;
bad_area(regs, mm, code, addr); bad_area_nosemaphore(regs, code, addr);
return;
}
if (likely(vma->vm_start <= addr))
goto good_area;
if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
tsk->thread.bad_cause = cause;
bad_area(regs, mm, code, addr);
return;
}
if (unlikely(expand_stack(vma, addr))) {
tsk->thread.bad_cause = cause;
bad_area(regs, mm, code, addr);
return; return;
} }
@ -305,7 +301,6 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
* Ok, we have a good vm_area for this memory access, so * Ok, we have a good vm_area for this memory access, so
* we can handle it. * we can handle it.
*/ */
good_area:
code = SEGV_ACCERR; code = SEGV_ACCERR;
if (unlikely(access_error(cause, vma))) { if (unlikely(access_error(cause, vma))) {