]> rtime.felk.cvut.cz Git - can-eth-gw-linux.git/blobdiff - mm/huge_memory.c
thp: do_huge_pmd_wp_page(): handle huge zero page
[can-eth-gw-linux.git] / mm / huge_memory.c
index ea5fb93a53a9a0d33f9a2851ee2ab008fe6a8a9e..a959b3a4ddd5471dc2e73609917bfa7acd0fc705 100644 (file)
@@ -47,6 +47,7 @@ static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
 /* during fragmentation poll the hugepage allocator once every minute */
 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
 static struct task_struct *khugepaged_thread __read_mostly;
+static unsigned long huge_zero_pfn __read_mostly;
 static DEFINE_MUTEX(khugepaged_mutex);
 static DEFINE_SPINLOCK(khugepaged_mm_lock);
 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
@@ -159,6 +160,29 @@ static int start_khugepaged(void)
        return err;
 }
 
+static int __init init_huge_zero_page(void)
+{
+       struct page *hpage;
+
+       hpage = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
+                       HPAGE_PMD_ORDER);
+       if (!hpage)
+               return -ENOMEM;
+
+       huge_zero_pfn = page_to_pfn(hpage);
+       return 0;
+}
+
+static inline bool is_huge_zero_pfn(unsigned long pfn)
+{
+       return pfn == huge_zero_pfn;
+}
+
+static inline bool is_huge_zero_pmd(pmd_t pmd)
+{
+       return is_huge_zero_pfn(pmd_pfn(pmd));
+}
+
 #ifdef CONFIG_SYSFS
 
 static ssize_t double_flag_show(struct kobject *kobj,
@@ -540,6 +564,10 @@ static int __init hugepage_init(void)
        if (err)
                return err;
 
+       err = init_huge_zero_page();
+       if (err)
+               goto out;
+
        err = khugepaged_slab_init();
        if (err)
                goto out;
@@ -562,6 +590,8 @@ static int __init hugepage_init(void)
 
        return 0;
 out:
+       if (huge_zero_pfn)
+               __free_page(pfn_to_page(huge_zero_pfn));
        hugepage_exit_sysfs(hugepage_kobj);
        return err;
 }
@@ -678,6 +708,18 @@ static inline struct page *alloc_hugepage(int defrag)
 }
 #endif
 
+static void set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
+               struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd)
+{
+       pmd_t entry;
+       entry = pfn_pmd(huge_zero_pfn, vma->vm_page_prot);
+       entry = pmd_wrprotect(entry);
+       entry = pmd_mkhuge(entry);
+       set_pmd_at(mm, haddr, pmd, entry);
+       pgtable_trans_huge_deposit(mm, pgtable);
+       mm->nr_ptes++;
+}
+
 int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
                               unsigned long address, pmd_t *pmd,
                               unsigned int flags)
@@ -755,6 +797,16 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                pte_free(dst_mm, pgtable);
                goto out_unlock;
        }
+       /*
+        * mm->page_table_lock is enough to be sure that huge zero pmd is not
+        * under splitting since we don't split the page itself, only pmd to
+        * a page table.
+        */
+       if (is_huge_zero_pmd(pmd)) {
+               set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd);
+               ret = 0;
+               goto out_unlock;
+       }
        if (unlikely(pmd_trans_splitting(pmd))) {
                /* split huge page running from under us */
                spin_unlock(&src_mm->page_table_lock);
@@ -784,6 +836,92 @@ out:
        return ret;
 }
 
+void huge_pmd_set_accessed(struct mm_struct *mm,
+                          struct vm_area_struct *vma,
+                          unsigned long address,
+                          pmd_t *pmd, pmd_t orig_pmd,
+                          int dirty)
+{
+       pmd_t entry;
+       unsigned long haddr;
+
+       spin_lock(&mm->page_table_lock);
+       if (unlikely(!pmd_same(*pmd, orig_pmd)))
+               goto unlock;
+
+       entry = pmd_mkyoung(orig_pmd);
+       haddr = address & HPAGE_PMD_MASK;
+       if (pmdp_set_access_flags(vma, haddr, pmd, entry, dirty))
+               update_mmu_cache_pmd(vma, address, pmd);
+
+unlock:
+       spin_unlock(&mm->page_table_lock);
+}
+
+static int do_huge_pmd_wp_zero_page_fallback(struct mm_struct *mm,
+               struct vm_area_struct *vma, unsigned long address,
+               pmd_t *pmd, unsigned long haddr)
+{
+       pgtable_t pgtable;
+       pmd_t _pmd;
+       struct page *page;
+       int i, ret = 0;
+       unsigned long mmun_start;       /* For mmu_notifiers */
+       unsigned long mmun_end;         /* For mmu_notifiers */
+
+       page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
+       if (!page) {
+               ret |= VM_FAULT_OOM;
+               goto out;
+       }
+
+       if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) {
+               put_page(page);
+               ret |= VM_FAULT_OOM;
+               goto out;
+       }
+
+       clear_user_highpage(page, address);
+       __SetPageUptodate(page);
+
+       mmun_start = haddr;
+       mmun_end   = haddr + HPAGE_PMD_SIZE;
+       mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
+
+       spin_lock(&mm->page_table_lock);
+       pmdp_clear_flush(vma, haddr, pmd);
+       /* leave pmd empty until pte is filled */
+
+       pgtable = pgtable_trans_huge_withdraw(mm);
+       pmd_populate(mm, &_pmd, pgtable);
+
+       for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
+               pte_t *pte, entry;
+               if (haddr == (address & PAGE_MASK)) {
+                       entry = mk_pte(page, vma->vm_page_prot);
+                       entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+                       page_add_new_anon_rmap(page, vma, haddr);
+               } else {
+                       entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
+                       entry = pte_mkspecial(entry);
+               }
+               pte = pte_offset_map(&_pmd, haddr);
+               VM_BUG_ON(!pte_none(*pte));
+               set_pte_at(mm, haddr, pte, entry);
+               pte_unmap(pte);
+       }
+       smp_wmb(); /* make pte visible before pmd */
+       pmd_populate(mm, pmd, pgtable);
+       spin_unlock(&mm->page_table_lock);
+       inc_mm_counter(mm, MM_ANONPAGES);
+
+       mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
+
+       ret |= VM_FAULT_WRITE;
+out:
+       return ret;
+}
+
 static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
                                        struct vm_area_struct *vma,
                                        unsigned long address,
@@ -890,19 +1028,21 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        unsigned long address, pmd_t *pmd, pmd_t orig_pmd)
 {
        int ret = 0;
-       struct page *page, *new_page;
+       struct page *page = NULL, *new_page;
        unsigned long haddr;
        unsigned long mmun_start;       /* For mmu_notifiers */
        unsigned long mmun_end;         /* For mmu_notifiers */
 
        VM_BUG_ON(!vma->anon_vma);
+       haddr = address & HPAGE_PMD_MASK;
+       if (is_huge_zero_pmd(orig_pmd))
+               goto alloc;
        spin_lock(&mm->page_table_lock);
        if (unlikely(!pmd_same(*pmd, orig_pmd)))
                goto out_unlock;
 
        page = pmd_page(orig_pmd);
        VM_BUG_ON(!PageCompound(page) || !PageHead(page));
-       haddr = address & HPAGE_PMD_MASK;
        if (page_mapcount(page) == 1) {
                pmd_t entry;
                entry = pmd_mkyoung(orig_pmd);
@@ -914,7 +1054,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
        }
        get_page(page);
        spin_unlock(&mm->page_table_lock);
-
+alloc:
        if (transparent_hugepage_enabled(vma) &&
            !transparent_hugepage_debug_cow())
                new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
@@ -924,24 +1064,34 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
 
        if (unlikely(!new_page)) {
                count_vm_event(THP_FAULT_FALLBACK);
-               ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
-                                                  pmd, orig_pmd, page, haddr);
-               if (ret & VM_FAULT_OOM)
-                       split_huge_page(page);
-               put_page(page);
+               if (is_huge_zero_pmd(orig_pmd)) {
+                       ret = do_huge_pmd_wp_zero_page_fallback(mm, vma,
+                                       address, pmd, haddr);
+               } else {
+                       ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
+                                       pmd, orig_pmd, page, haddr);
+                       if (ret & VM_FAULT_OOM)
+                               split_huge_page(page);
+                       put_page(page);
+               }
                goto out;
        }
        count_vm_event(THP_FAULT_ALLOC);
 
        if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
                put_page(new_page);
-               split_huge_page(page);
-               put_page(page);
+               if (page) {
+                       split_huge_page(page);
+                       put_page(page);
+               }
                ret |= VM_FAULT_OOM;
                goto out;
        }
 
-       copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
+       if (is_huge_zero_pmd(orig_pmd))
+               clear_huge_page(new_page, haddr, HPAGE_PMD_NR);
+       else
+               copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
        __SetPageUptodate(new_page);
 
        mmun_start = haddr;
@@ -949,7 +1099,8 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
        mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
 
        spin_lock(&mm->page_table_lock);
-       put_page(page);
+       if (page)
+               put_page(page);
        if (unlikely(!pmd_same(*pmd, orig_pmd))) {
                spin_unlock(&mm->page_table_lock);
                mem_cgroup_uncharge_page(new_page);
@@ -957,14 +1108,18 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                goto out_mn;
        } else {
                pmd_t entry;
-               VM_BUG_ON(!PageHead(page));
                entry = mk_huge_pmd(new_page, vma);
                pmdp_clear_flush(vma, haddr, pmd);
                page_add_new_anon_rmap(new_page, vma, haddr);
                set_pmd_at(mm, haddr, pmd, entry);
                update_mmu_cache_pmd(vma, address, pmd);
-               page_remove_rmap(page);
-               put_page(page);
+               if (is_huge_zero_pmd(orig_pmd))
+                       add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
+               else {
+                       VM_BUG_ON(!PageHead(page));
+                       page_remove_rmap(page);
+                       put_page(page);
+               }
                ret |= VM_FAULT_WRITE;
        }
        spin_unlock(&mm->page_table_lock);
@@ -1033,15 +1188,20 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                pmd_t orig_pmd;
                pgtable = pgtable_trans_huge_withdraw(tlb->mm);
                orig_pmd = pmdp_get_and_clear(tlb->mm, addr, pmd);
-               page = pmd_page(orig_pmd);
                tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
-               page_remove_rmap(page);
-               VM_BUG_ON(page_mapcount(page) < 0);
-               add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
-               VM_BUG_ON(!PageHead(page));
-               tlb->mm->nr_ptes--;
-               spin_unlock(&tlb->mm->page_table_lock);
-               tlb_remove_page(tlb, page);
+               if (is_huge_zero_pmd(orig_pmd)) {
+                       tlb->mm->nr_ptes--;
+                       spin_unlock(&tlb->mm->page_table_lock);
+               } else {
+                       page = pmd_page(orig_pmd);
+                       page_remove_rmap(page);
+                       VM_BUG_ON(page_mapcount(page) < 0);
+                       add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
+                       VM_BUG_ON(!PageHead(page));
+                       tlb->mm->nr_ptes--;
+                       spin_unlock(&tlb->mm->page_table_lock);
+                       tlb_remove_page(tlb, page);
+               }
                pte_free(tlb->mm, pgtable);
                ret = 1;
        }