]> rtime.felk.cvut.cz Git - can-eth-gw-linux.git/blobdiff - mm/huge_memory.c
thp: do_huge_pmd_wp_page(): handle huge zero page
[can-eth-gw-linux.git] / mm / huge_memory.c
index 40f17c34b4153fab93b4f1a2685dee0b8cac4da8..a959b3a4ddd5471dc2e73609917bfa7acd0fc705 100644 (file)
@@ -47,6 +47,7 @@ static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
 /* during fragmentation poll the hugepage allocator once every minute */
 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
 static struct task_struct *khugepaged_thread __read_mostly;
+static unsigned long huge_zero_pfn __read_mostly;
 static DEFINE_MUTEX(khugepaged_mutex);
 static DEFINE_SPINLOCK(khugepaged_mm_lock);
 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
@@ -159,6 +160,29 @@ static int start_khugepaged(void)
        return err;
 }
 
+static int __init init_huge_zero_page(void)
+{
+       struct page *hpage;
+
+       hpage = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
+                       HPAGE_PMD_ORDER);
+       if (!hpage)
+               return -ENOMEM;
+
+       huge_zero_pfn = page_to_pfn(hpage);
+       return 0;
+}
+
+static inline bool is_huge_zero_pfn(unsigned long pfn)
+{
+       return pfn == huge_zero_pfn;
+}
+
+static inline bool is_huge_zero_pmd(pmd_t pmd)
+{
+       return is_huge_zero_pfn(pmd_pfn(pmd));
+}
+
 #ifdef CONFIG_SYSFS
 
 static ssize_t double_flag_show(struct kobject *kobj,
@@ -540,6 +564,10 @@ static int __init hugepage_init(void)
        if (err)
                return err;
 
+       err = init_huge_zero_page();
+       if (err)
+               goto out;
+
        err = khugepaged_slab_init();
        if (err)
                goto out;
@@ -562,6 +590,8 @@ static int __init hugepage_init(void)
 
        return 0;
 out:
+       if (huge_zero_pfn)
+               __free_page(pfn_to_page(huge_zero_pfn));
        hugepage_exit_sysfs(hugepage_kobj);
        return err;
 }
@@ -606,6 +636,15 @@ static inline pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
        return pmd;
 }
 
+static inline pmd_t mk_huge_pmd(struct page *page, struct vm_area_struct *vma)
+{
+       pmd_t entry;
+       entry = mk_pmd(page, vma->vm_page_prot);
+       entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
+       entry = pmd_mkhuge(entry);
+       return entry;
+}
+
 static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
                                        struct vm_area_struct *vma,
                                        unsigned long haddr, pmd_t *pmd,
@@ -629,9 +668,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
                pte_free(mm, pgtable);
        } else {
                pmd_t entry;
-               entry = mk_pmd(page, vma->vm_page_prot);
-               entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
-               entry = pmd_mkhuge(entry);
+               entry = mk_huge_pmd(page, vma);
                /*
                 * The spinlocking to take the lru_lock inside
                 * page_add_new_anon_rmap() acts as a full memory
@@ -671,6 +708,18 @@ static inline struct page *alloc_hugepage(int defrag)
 }
 #endif
 
+static void set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
+               struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd)
+{
+       pmd_t entry;
+       entry = pfn_pmd(huge_zero_pfn, vma->vm_page_prot);
+       entry = pmd_wrprotect(entry);
+       entry = pmd_mkhuge(entry);
+       set_pmd_at(mm, haddr, pmd, entry);
+       pgtable_trans_huge_deposit(mm, pgtable);
+       mm->nr_ptes++;
+}
+
 int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
                               unsigned long address, pmd_t *pmd,
                               unsigned int flags)
@@ -748,6 +797,16 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                pte_free(dst_mm, pgtable);
                goto out_unlock;
        }
+       /*
+        * mm->page_table_lock is enough to be sure that huge zero pmd is not
+        * under splitting since we don't split the page itself, only pmd to
+        * a page table.
+        */
+       if (is_huge_zero_pmd(pmd)) {
+               set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd);
+               ret = 0;
+               goto out_unlock;
+       }
        if (unlikely(pmd_trans_splitting(pmd))) {
                /* split huge page running from under us */
                spin_unlock(&src_mm->page_table_lock);
@@ -777,6 +836,92 @@ out:
        return ret;
 }
 
+void huge_pmd_set_accessed(struct mm_struct *mm,
+                          struct vm_area_struct *vma,
+                          unsigned long address,
+                          pmd_t *pmd, pmd_t orig_pmd,
+                          int dirty)
+{
+       pmd_t entry;
+       unsigned long haddr;
+
+       spin_lock(&mm->page_table_lock);
+       if (unlikely(!pmd_same(*pmd, orig_pmd)))
+               goto unlock;
+
+       entry = pmd_mkyoung(orig_pmd);
+       haddr = address & HPAGE_PMD_MASK;
+       if (pmdp_set_access_flags(vma, haddr, pmd, entry, dirty))
+               update_mmu_cache_pmd(vma, address, pmd);
+
+unlock:
+       spin_unlock(&mm->page_table_lock);
+}
+
+static int do_huge_pmd_wp_zero_page_fallback(struct mm_struct *mm,
+               struct vm_area_struct *vma, unsigned long address,
+               pmd_t *pmd, unsigned long haddr)
+{
+       pgtable_t pgtable;
+       pmd_t _pmd;
+       struct page *page;
+       int i, ret = 0;
+       unsigned long mmun_start;       /* For mmu_notifiers */
+       unsigned long mmun_end;         /* For mmu_notifiers */
+
+       page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
+       if (!page) {
+               ret |= VM_FAULT_OOM;
+               goto out;
+       }
+
+       if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) {
+               put_page(page);
+               ret |= VM_FAULT_OOM;
+               goto out;
+       }
+
+       clear_user_highpage(page, address);
+       __SetPageUptodate(page);
+
+       mmun_start = haddr;
+       mmun_end   = haddr + HPAGE_PMD_SIZE;
+       mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
+
+       spin_lock(&mm->page_table_lock);
+       pmdp_clear_flush(vma, haddr, pmd);
+       /* leave pmd empty until pte is filled */
+
+       pgtable = pgtable_trans_huge_withdraw(mm);
+       pmd_populate(mm, &_pmd, pgtable);
+
+       for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
+               pte_t *pte, entry;
+               if (haddr == (address & PAGE_MASK)) {
+                       entry = mk_pte(page, vma->vm_page_prot);
+                       entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+                       page_add_new_anon_rmap(page, vma, haddr);
+               } else {
+                       entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
+                       entry = pte_mkspecial(entry);
+               }
+               pte = pte_offset_map(&_pmd, haddr);
+               VM_BUG_ON(!pte_none(*pte));
+               set_pte_at(mm, haddr, pte, entry);
+               pte_unmap(pte);
+       }
+       smp_wmb(); /* make pte visible before pmd */
+       pmd_populate(mm, pmd, pgtable);
+       spin_unlock(&mm->page_table_lock);
+       inc_mm_counter(mm, MM_ANONPAGES);
+
+       mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
+
+       ret |= VM_FAULT_WRITE;
+out:
+       return ret;
+}
+
 static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
                                        struct vm_area_struct *vma,
                                        unsigned long address,
@@ -883,19 +1028,21 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        unsigned long address, pmd_t *pmd, pmd_t orig_pmd)
 {
        int ret = 0;
-       struct page *page, *new_page;
+       struct page *page = NULL, *new_page;
        unsigned long haddr;
        unsigned long mmun_start;       /* For mmu_notifiers */
        unsigned long mmun_end;         /* For mmu_notifiers */
 
        VM_BUG_ON(!vma->anon_vma);
+       haddr = address & HPAGE_PMD_MASK;
+       if (is_huge_zero_pmd(orig_pmd))
+               goto alloc;
        spin_lock(&mm->page_table_lock);
        if (unlikely(!pmd_same(*pmd, orig_pmd)))
                goto out_unlock;
 
        page = pmd_page(orig_pmd);
        VM_BUG_ON(!PageCompound(page) || !PageHead(page));
-       haddr = address & HPAGE_PMD_MASK;
        if (page_mapcount(page) == 1) {
                pmd_t entry;
                entry = pmd_mkyoung(orig_pmd);
@@ -907,7 +1054,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
        }
        get_page(page);
        spin_unlock(&mm->page_table_lock);
-
+alloc:
        if (transparent_hugepage_enabled(vma) &&
            !transparent_hugepage_debug_cow())
                new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
@@ -917,24 +1064,34 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
 
        if (unlikely(!new_page)) {
                count_vm_event(THP_FAULT_FALLBACK);
-               ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
-                                                  pmd, orig_pmd, page, haddr);
-               if (ret & VM_FAULT_OOM)
-                       split_huge_page(page);
-               put_page(page);
+               if (is_huge_zero_pmd(orig_pmd)) {
+                       ret = do_huge_pmd_wp_zero_page_fallback(mm, vma,
+                                       address, pmd, haddr);
+               } else {
+                       ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
+                                       pmd, orig_pmd, page, haddr);
+                       if (ret & VM_FAULT_OOM)
+                               split_huge_page(page);
+                       put_page(page);
+               }
                goto out;
        }
        count_vm_event(THP_FAULT_ALLOC);
 
        if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
                put_page(new_page);
-               split_huge_page(page);
-               put_page(page);
+               if (page) {
+                       split_huge_page(page);
+                       put_page(page);
+               }
                ret |= VM_FAULT_OOM;
                goto out;
        }
 
-       copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
+       if (is_huge_zero_pmd(orig_pmd))
+               clear_huge_page(new_page, haddr, HPAGE_PMD_NR);
+       else
+               copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
        __SetPageUptodate(new_page);
 
        mmun_start = haddr;
@@ -942,7 +1099,8 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
        mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
 
        spin_lock(&mm->page_table_lock);
-       put_page(page);
+       if (page)
+               put_page(page);
        if (unlikely(!pmd_same(*pmd, orig_pmd))) {
                spin_unlock(&mm->page_table_lock);
                mem_cgroup_uncharge_page(new_page);
@@ -950,16 +1108,18 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                goto out_mn;
        } else {
                pmd_t entry;
-               VM_BUG_ON(!PageHead(page));
-               entry = mk_pmd(new_page, vma->vm_page_prot);
-               entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
-               entry = pmd_mkhuge(entry);
+               entry = mk_huge_pmd(new_page, vma);
                pmdp_clear_flush(vma, haddr, pmd);
                page_add_new_anon_rmap(new_page, vma, haddr);
                set_pmd_at(mm, haddr, pmd, entry);
                update_mmu_cache_pmd(vma, address, pmd);
-               page_remove_rmap(page);
-               put_page(page);
+               if (is_huge_zero_pmd(orig_pmd))
+                       add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
+               else {
+                       VM_BUG_ON(!PageHead(page));
+                       page_remove_rmap(page);
+                       put_page(page);
+               }
                ret |= VM_FAULT_WRITE;
        }
        spin_unlock(&mm->page_table_lock);
@@ -1028,15 +1188,20 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                pmd_t orig_pmd;
                pgtable = pgtable_trans_huge_withdraw(tlb->mm);
                orig_pmd = pmdp_get_and_clear(tlb->mm, addr, pmd);
-               page = pmd_page(orig_pmd);
                tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
-               page_remove_rmap(page);
-               VM_BUG_ON(page_mapcount(page) < 0);
-               add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
-               VM_BUG_ON(!PageHead(page));
-               tlb->mm->nr_ptes--;
-               spin_unlock(&tlb->mm->page_table_lock);
-               tlb_remove_page(tlb, page);
+               if (is_huge_zero_pmd(orig_pmd)) {
+                       tlb->mm->nr_ptes--;
+                       spin_unlock(&tlb->mm->page_table_lock);
+               } else {
+                       page = pmd_page(orig_pmd);
+                       page_remove_rmap(page);
+                       VM_BUG_ON(page_mapcount(page) < 0);
+                       add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
+                       VM_BUG_ON(!PageHead(page));
+                       tlb->mm->nr_ptes--;
+                       spin_unlock(&tlb->mm->page_table_lock);
+                       tlb_remove_page(tlb, page);
+               }
                pte_free(tlb->mm, pgtable);
                ret = 1;
        }
@@ -1146,22 +1311,14 @@ pmd_t *page_check_address_pmd(struct page *page,
                              unsigned long address,
                              enum page_check_address_pmd_flag flag)
 {
-       pgd_t *pgd;
-       pud_t *pud;
        pmd_t *pmd, *ret = NULL;
 
        if (address & ~HPAGE_PMD_MASK)
                goto out;
 
-       pgd = pgd_offset(mm, address);
-       if (!pgd_present(*pgd))
-               goto out;
-
-       pud = pud_offset(pgd, address);
-       if (!pud_present(*pud))
+       pmd = mm_find_pmd(mm, address);
+       if (!pmd)
                goto out;
-
-       pmd = pmd_offset(pud, address);
        if (pmd_none(*pmd))
                goto out;
        if (pmd_page(*pmd) != page)
@@ -1701,64 +1858,49 @@ static void release_pte_pages(pte_t *pte, pte_t *_pte)
        }
 }
 
-static void release_all_pte_pages(pte_t *pte)
-{
-       release_pte_pages(pte, pte + HPAGE_PMD_NR);
-}
-
 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
                                        unsigned long address,
                                        pte_t *pte)
 {
        struct page *page;
        pte_t *_pte;
-       int referenced = 0, isolated = 0, none = 0;
+       int referenced = 0, none = 0;
        for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
             _pte++, address += PAGE_SIZE) {
                pte_t pteval = *_pte;
                if (pte_none(pteval)) {
                        if (++none <= khugepaged_max_ptes_none)
                                continue;
-                       else {
-                               release_pte_pages(pte, _pte);
+                       else
                                goto out;
-                       }
                }
-               if (!pte_present(pteval) || !pte_write(pteval)) {
-                       release_pte_pages(pte, _pte);
+               if (!pte_present(pteval) || !pte_write(pteval))
                        goto out;
-               }
                page = vm_normal_page(vma, address, pteval);
-               if (unlikely(!page)) {
-                       release_pte_pages(pte, _pte);
+               if (unlikely(!page))
                        goto out;
-               }
+
                VM_BUG_ON(PageCompound(page));
                BUG_ON(!PageAnon(page));
                VM_BUG_ON(!PageSwapBacked(page));
 
                /* cannot use mapcount: can't collapse if there's a gup pin */
-               if (page_count(page) != 1) {
-                       release_pte_pages(pte, _pte);
+               if (page_count(page) != 1)
                        goto out;
-               }
                /*
                 * We can do it before isolate_lru_page because the
                 * page can't be freed from under us. NOTE: PG_lock
                 * is needed to serialize against split_huge_page
                 * when invoked from the VM.
                 */
-               if (!trylock_page(page)) {
-                       release_pte_pages(pte, _pte);
+               if (!trylock_page(page))
                        goto out;
-               }
                /*
                 * Isolate the page to avoid collapsing an hugepage
                 * currently in use by the VM.
                 */
                if (isolate_lru_page(page)) {
                        unlock_page(page);
-                       release_pte_pages(pte, _pte);
                        goto out;
                }
                /* 0 stands for page_is_file_cache(page) == false */
@@ -1771,12 +1913,11 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
                    mmu_notifier_test_young(vma->vm_mm, address))
                        referenced = 1;
        }
-       if (unlikely(!referenced))
-               release_all_pte_pages(pte);
-       else
-               isolated = 1;
+       if (likely(referenced))
+               return 1;
 out:
-       return isolated;
+       release_pte_pages(pte, _pte);
+       return 0;
 }
 
 static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
@@ -1918,14 +2059,26 @@ static struct page
 }
 #endif
 
+static bool hugepage_vma_check(struct vm_area_struct *vma)
+{
+       if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
+           (vma->vm_flags & VM_NOHUGEPAGE))
+               return false;
+
+       if (!vma->anon_vma || vma->vm_ops)
+               return false;
+       if (is_vma_temporary_stack(vma))
+               return false;
+       VM_BUG_ON(vma->vm_flags & VM_NO_THP);
+       return true;
+}
+
 static void collapse_huge_page(struct mm_struct *mm,
                                   unsigned long address,
                                   struct page **hpage,
                                   struct vm_area_struct *vma,
                                   int node)
 {
-       pgd_t *pgd;
-       pud_t *pud;
        pmd_t *pmd, _pmd;
        pte_t *pte;
        pgtable_t pgtable;
@@ -1960,28 +2113,12 @@ static void collapse_huge_page(struct mm_struct *mm,
        hend = vma->vm_end & HPAGE_PMD_MASK;
        if (address < hstart || address + HPAGE_PMD_SIZE > hend)
                goto out;
-
-       if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
-           (vma->vm_flags & VM_NOHUGEPAGE))
-               goto out;
-
-       if (!vma->anon_vma || vma->vm_ops)
+       if (!hugepage_vma_check(vma))
                goto out;
-       if (is_vma_temporary_stack(vma))
+       pmd = mm_find_pmd(mm, address);
+       if (!pmd)
                goto out;
-       VM_BUG_ON(vma->vm_flags & VM_NO_THP);
-
-       pgd = pgd_offset(mm, address);
-       if (!pgd_present(*pgd))
-               goto out;
-
-       pud = pud_offset(pgd, address);
-       if (!pud_present(*pud))
-               goto out;
-
-       pmd = pmd_offset(pud, address);
-       /* pmd can't go away or become huge under us */
-       if (!pmd_present(*pmd) || pmd_trans_huge(*pmd))
+       if (pmd_trans_huge(*pmd))
                goto out;
 
        anon_vma_lock(vma->anon_vma);
@@ -2028,9 +2165,7 @@ static void collapse_huge_page(struct mm_struct *mm,
        __SetPageUptodate(new_page);
        pgtable = pmd_pgtable(_pmd);
 
-       _pmd = mk_pmd(new_page, vma->vm_page_prot);
-       _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
-       _pmd = pmd_mkhuge(_pmd);
+       _pmd = mk_huge_pmd(new_page, vma);
 
        /*
         * spin_lock() below is not the equivalent of smp_wmb(), so
@@ -2064,8 +2199,6 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
                               unsigned long address,
                               struct page **hpage)
 {
-       pgd_t *pgd;
-       pud_t *pud;
        pmd_t *pmd;
        pte_t *pte, *_pte;
        int ret = 0, referenced = 0, none = 0;
@@ -2076,16 +2209,10 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
 
        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 
-       pgd = pgd_offset(mm, address);
-       if (!pgd_present(*pgd))
+       pmd = mm_find_pmd(mm, address);
+       if (!pmd)
                goto out;
-
-       pud = pud_offset(pgd, address);
-       if (!pud_present(*pud))
-               goto out;
-
-       pmd = pmd_offset(pud, address);
-       if (!pmd_present(*pmd) || pmd_trans_huge(*pmd))
+       if (pmd_trans_huge(*pmd))
                goto out;
 
        pte = pte_offset_map_lock(mm, pmd, address, &ptl);
@@ -2193,20 +2320,11 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
                        progress++;
                        break;
                }
-
-               if ((!(vma->vm_flags & VM_HUGEPAGE) &&
-                    !khugepaged_always()) ||
-                   (vma->vm_flags & VM_NOHUGEPAGE)) {
-               skip:
+               if (!hugepage_vma_check(vma)) {
+skip:
                        progress++;
                        continue;
                }
-               if (!vma->anon_vma || vma->vm_ops)
-                       goto skip;
-               if (is_vma_temporary_stack(vma))
-                       goto skip;
-               VM_BUG_ON(vma->vm_flags & VM_NO_THP);
-
                hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
                hend = vma->vm_end & HPAGE_PMD_MASK;
                if (hstart >= hend)
@@ -2379,22 +2497,12 @@ void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd)
 static void split_huge_page_address(struct mm_struct *mm,
                                    unsigned long address)
 {
-       pgd_t *pgd;
-       pud_t *pud;
        pmd_t *pmd;
 
        VM_BUG_ON(!(address & ~HPAGE_PMD_MASK));
 
-       pgd = pgd_offset(mm, address);
-       if (!pgd_present(*pgd))
-               return;
-
-       pud = pud_offset(pgd, address);
-       if (!pud_present(*pud))
-               return;
-
-       pmd = pmd_offset(pud, address);
-       if (!pmd_present(*pmd))
+       pmd = mm_find_pmd(mm, address);
+       if (!pmd)
                return;
        /*
         * Caller holds the mmap_sem write mode, so a huge pmd cannot