]> rtime.felk.cvut.cz Git - can-eth-gw-linux.git/blobdiff - mm/rmap.c
Fix bug
[can-eth-gw-linux.git] / mm / rmap.c
index 2ee1ef0f317b7487bfb21b7a6717b1e12d1f7ef4..face808a489e7765ecbaa57041e4690b47c9d177 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -562,6 +562,27 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
        return address;
 }
 
+pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
+{
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd = NULL;
+
+       pgd = pgd_offset(mm, address);
+       if (!pgd_present(*pgd))
+               goto out;
+
+       pud = pud_offset(pgd, address);
+       if (!pud_present(*pud))
+               goto out;
+
+       pmd = pmd_offset(pud, address);
+       if (!pmd_present(*pmd))
+               pmd = NULL;
+out:
+       return pmd;
+}
+
 /*
  * Check that @page is mapped at @address into @mm.
  *
@@ -574,8 +595,6 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
 pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
                          unsigned long address, spinlock_t **ptlp, int sync)
 {
-       pgd_t *pgd;
-       pud_t *pud;
        pmd_t *pmd;
        pte_t *pte;
        spinlock_t *ptl;
@@ -586,17 +605,10 @@ pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
                goto check;
        }
 
-       pgd = pgd_offset(mm, address);
-       if (!pgd_present(*pgd))
+       pmd = mm_find_pmd(mm, address);
+       if (!pmd)
                return NULL;
 
-       pud = pud_offset(pgd, address);
-       if (!pud_present(*pud))
-               return NULL;
-
-       pmd = pmd_offset(pud, address);
-       if (!pmd_present(*pmd))
-               return NULL;
        if (pmd_trans_huge(*pmd))
                return NULL;
 
@@ -1139,9 +1151,11 @@ void page_remove_rmap(struct page *page)
         * containing the swap entry, but page not yet written to swap.
         *
         * And we can skip it on file pages, so long as the filesystem
-        * participates in dirty tracking; but need to catch shm and tmpfs
-        * and ramfs pages which have been modified since creation by read
-        * fault.
+        * participates in dirty tracking (note that this is not only an
+        * optimization but also solves problems caused by dirty flag in
+        * storage key getting set by a write from inside kernel); but need to
+        * catch shm and tmpfs and ramfs pages which have been modified since
+        * creation by read fault.
         *
         * Note that mapping must be decided above, before decrementing
         * mapcount (which luckily provides a barrier): once page is unmapped,
@@ -1235,12 +1249,14 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
        update_hiwater_rss(mm);
 
        if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
-               if (PageAnon(page))
-                       dec_mm_counter(mm, MM_ANONPAGES);
-               else
-                       dec_mm_counter(mm, MM_FILEPAGES);
+               if (!PageHuge(page)) {
+                       if (PageAnon(page))
+                               dec_mm_counter(mm, MM_ANONPAGES);
+                       else
+                               dec_mm_counter(mm, MM_FILEPAGES);
+               }
                set_pte_at(mm, address, pte,
-                               swp_entry_to_pte(make_hwpoison_entry(page)));
+                          swp_entry_to_pte(make_hwpoison_entry(page)));
        } else if (PageAnon(page)) {
                swp_entry_t entry = { .val = page_private(page) };
 
@@ -1345,8 +1361,6 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
                struct vm_area_struct *vma, struct page *check_page)
 {
        struct mm_struct *mm = vma->vm_mm;
-       pgd_t *pgd;
-       pud_t *pud;
        pmd_t *pmd;
        pte_t *pte;
        pte_t pteval;
@@ -1366,16 +1380,8 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
        if (end > vma->vm_end)
                end = vma->vm_end;
 
-       pgd = pgd_offset(mm, address);
-       if (!pgd_present(*pgd))
-               return ret;
-
-       pud = pud_offset(pgd, address);
-       if (!pud_present(*pud))
-               return ret;
-
-       pmd = pmd_offset(pud, address);
-       if (!pmd_present(*pmd))
+       pmd = mm_find_pmd(mm, address);
+       if (!pmd)
                return ret;
 
        mmun_start = address;