From a51271d99cdd04910227060936d0598ba49fb1cc Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Tue, 25 Aug 2020 18:48:01 +0300 Subject: [PATCH] riscv/mm/fault: Move bad area handling to bad_area() This patch moves the bad area handling in do_page_fault() to bad_area() function and converts gotos to calls to the new function. Signed-off-by: Pekka Enberg Signed-off-by: Palmer Dabbelt --- arch/riscv/mm/fault.c | 67 ++++++++++++++++++++++++++++++--------------------- 1 file changed, 40 insertions(+), 27 deletions(-) diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index 1612552478c5..ac9a99255365 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -37,6 +37,22 @@ static inline void no_context(struct pt_regs *regs, unsigned long addr) do_exit(SIGKILL); } +static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr) +{ + /* + * Something tried to access memory that isn't in our memory map. + * Fix it, but check if it's kernel or user first. + */ + mmap_read_unlock(mm); + /* User mode accesses just cause a SIGSEGV */ + if (user_mode(regs)) { + do_trap(regs, SIGSEGV, code, addr); + return; + } + + no_context(regs, addr); +} + /* * This routine handles page faults. It determines the address and the * problem, and then passes it off to one of the appropriate routines. @@ -90,14 +106,20 @@ asmlinkage void do_page_fault(struct pt_regs *regs) retry: mmap_read_lock(mm); vma = find_vma(mm, addr); - if (unlikely(!vma)) - goto bad_area; + if (unlikely(!vma)) { + bad_area(regs, mm, code, addr); + return; + } if (likely(vma->vm_start <= addr)) goto good_area; - if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) - goto bad_area; - if (unlikely(expand_stack(vma, addr))) - goto bad_area; + if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { + bad_area(regs, mm, code, addr); + return; + } + if (unlikely(expand_stack(vma, addr))) { + bad_area(regs, mm, code, addr); + return; + } /* * Ok, we have a good vm_area for this memory access, so @@ -108,16 +130,22 @@ good_area: switch (cause) { case EXC_INST_PAGE_FAULT: - if (!(vma->vm_flags & VM_EXEC)) - goto bad_area; + if (!(vma->vm_flags & VM_EXEC)) { + bad_area(regs, mm, code, addr); + return; + } break; case EXC_LOAD_PAGE_FAULT: - if (!(vma->vm_flags & VM_READ)) - goto bad_area; + if (!(vma->vm_flags & VM_READ)) { + bad_area(regs, mm, code, addr); + return; + } break; case EXC_STORE_PAGE_FAULT: - if (!(vma->vm_flags & VM_WRITE)) - goto bad_area; + if (!(vma->vm_flags & VM_WRITE)) { + bad_area(regs, mm, code, addr); + return; + } flags |= FAULT_FLAG_WRITE; break; default: @@ -162,21 +190,6 @@ good_area: return; /* - * Something tried to access memory that isn't in our memory map. - * Fix it, but check if it's kernel or user first. - */ -bad_area: - mmap_read_unlock(mm); - /* User mode accesses just cause a SIGSEGV */ - if (user_mode(regs)) { - do_trap(regs, SIGSEGV, code, addr); - return; - } - - no_context(regs, addr); - return; - - /* * We ran out of memory, call the OOM killer, and return the userspace * (which will retry the fault, or kill us if we got oom-killed). */ -- 2.11.0