]> rtime.felk.cvut.cz Git - hercules2020/nv-tegra/linux-4.4.git/commitdiff
mm: memory: Handle VM_NONE protected page faults
authorSri Krishna chowdary <schowdary@nvidia.com>
Wed, 6 Apr 2016 05:14:16 +0000 (10:44 +0530)
committerSri Krishna Chowdary <schowdary@nvidia.com>
Wed, 25 May 2016 05:29:02 +0000 (22:29 -0700)
Allow driver to decide if page protections can be fixed.
This allows us replace zapping vma's with mprotect calls.
Zap based solution is considerably slower than mprotect
based one and hence deviating from upstream.

JIRA TMM-59

Change-Id: I3d362deda2866d5db9f1cfb037a2a35e646ced8a
Signed-off-by: Sri Krishna chowdary <schowdary@nvidia.com>
Reviewed-on: http://git-master/r/1150586
Reviewed-by: Krishna Reddy <vdumpa@nvidia.com>
include/linux/mm.h
mm/memory.c

index 5fe9099e6a9572ad98326c047c02d0920a0d23ab..2ee899717e98449b46368805d7e07b26f99e3805 100644 (file)
@@ -318,6 +318,9 @@ struct vm_operations_struct {
         */
        struct page *(*find_special_page)(struct vm_area_struct *vma,
                                          unsigned long addr);
+       /* called when driver allows fixing ptes with none protections */
+       bool (*fixup_prot)(struct vm_area_struct *vma, unsigned long addr,
+                         pgoff_t pgoff);
 };
 
 struct mmu_gather;
index 2a35d3599069b2adbb7aef0aceadd407adfd8731..e252a70a9f076587ed6463da7b0b3e1dd4f2af9e 100644 (file)
@@ -3287,6 +3287,7 @@ static int handle_pte_fault(struct mm_struct *mm,
 {
        pte_t entry;
        spinlock_t *ptl;
+       bool fix_prot = false;
 
        /*
         * some architectures can have larger ptes than wordsize,
@@ -3314,10 +3315,24 @@ static int handle_pte_fault(struct mm_struct *mm,
        if (pte_protnone(entry))
                return do_numa_page(mm, vma, address, entry, pte, pmd);
 
+       if (vma->vm_ops && vma->vm_ops->fixup_prot && vma->vm_ops->fault &&
+               (entry == pte_modify(entry, vm_get_page_prot(VM_NONE)))) {
+               pgoff_t pgoff = (((address & PAGE_MASK)
+                               - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
+               if (!vma->vm_ops->fixup_prot(vma, address & PAGE_MASK, pgoff))
+                       return VM_FAULT_SIGSEGV; /* access not granted */
+               fix_prot = true;
+       }
+
        ptl = pte_lockptr(mm, pmd);
        spin_lock(ptl);
        if (unlikely(!pte_same(*pte, entry)))
                goto unlock;
+       if (fix_prot) {
+               entry = pte_modify(entry, vma->vm_page_prot);
+               vm_stat_account(mm, VM_NONE, vma->vm_file, -1);
+               vm_stat_account(mm, vma->vm_flags, vma->vm_file, 1);
+       }
        if (flags & FAULT_FLAG_WRITE) {
                if (!pte_write(entry))
                        return do_wp_page(mm, vma, address,