summaryrefslogtreecommitdiff
path: root/arch/arm64/mm/fault.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/mm/fault.c')
-rw-r--r--arch/arm64/mm/fault.c57
1 files changed, 11 insertions, 46 deletions
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index f4ab44e058b2..e5e07212126d 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -502,27 +502,14 @@ static void do_bad_area(unsigned long far, unsigned long esr,
#define VM_FAULT_BADMAP 0x010000
#define VM_FAULT_BADACCESS 0x020000
-static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr,
+static vm_fault_t __do_page_fault(struct mm_struct *mm,
+ struct vm_area_struct *vma, unsigned long addr,
unsigned int mm_flags, unsigned long vm_flags,
struct pt_regs *regs)
{
- struct vm_area_struct *vma = find_vma(mm, addr);
-
- if (unlikely(!vma))
- return VM_FAULT_BADMAP;
-
/*
* Ok, we have a good vm_area for this memory access, so we can handle
* it.
- */
- if (unlikely(vma->vm_start > addr)) {
- if (!(vma->vm_flags & VM_GROWSDOWN))
- return VM_FAULT_BADMAP;
- if (expand_stack(vma, addr))
- return VM_FAULT_BADMAP;
- }
-
- /*
* Check that the permissions on the VMA allow for the fault which
* occurred.
*/
@@ -554,9 +541,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
unsigned long vm_flags;
unsigned int mm_flags = FAULT_FLAG_DEFAULT;
unsigned long addr = untagged_addr(far);
-#ifdef CONFIG_PER_VMA_LOCK
struct vm_area_struct *vma;
-#endif
if (kprobe_page_fault(regs, esr))
return 0;
@@ -614,7 +599,6 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
-#ifdef CONFIG_PER_VMA_LOCK
if (!(mm_flags & FAULT_FLAG_USER))
goto lock_mmap;
@@ -626,9 +610,9 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
vma_end_read(vma);
goto lock_mmap;
}
- fault = handle_mm_fault(vma, addr & PAGE_MASK,
- mm_flags | FAULT_FLAG_VMA_LOCK, regs);
- vma_end_read(vma);
+ fault = handle_mm_fault(vma, addr, mm_flags | FAULT_FLAG_VMA_LOCK, regs);
+ if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
+ vma_end_read(vma);
if (!(fault & VM_FAULT_RETRY)) {
count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
@@ -643,32 +627,15 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
return 0;
}
lock_mmap:
-#endif /* CONFIG_PER_VMA_LOCK */
- /*
- * As per x86, we may deadlock here. However, since the kernel only
- * validly references user space from well defined areas of the code,
- * we can bug out early if this is from code which shouldn't.
- */
- if (!mmap_read_trylock(mm)) {
- if (!user_mode(regs) && !search_exception_tables(regs->pc))
- goto no_context;
+
retry:
- mmap_read_lock(mm);
- } else {
- /*
- * The above mmap_read_trylock() might have succeeded in which
- * case, we'll have missed the might_sleep() from down_read().
- */
- might_sleep();
-#ifdef CONFIG_DEBUG_VM
- if (!user_mode(regs) && !search_exception_tables(regs->pc)) {
- mmap_read_unlock(mm);
- goto no_context;
- }
-#endif
+ vma = lock_mm_and_find_vma(mm, addr, regs);
+ if (unlikely(!vma)) {
+ fault = VM_FAULT_BADMAP;
+ goto done;
}
- fault = __do_page_fault(mm, addr, mm_flags, vm_flags, regs);
+ fault = __do_page_fault(mm, vma, addr, mm_flags, vm_flags, regs);
/* Quick path to respond to signals */
if (fault_signal_pending(fault, regs)) {
@@ -687,9 +654,7 @@ retry:
}
mmap_read_unlock(mm);
-#ifdef CONFIG_PER_VMA_LOCK
done:
-#endif
/*
* Handle the "normal" (no error) case first.
*/