]> rtime.felk.cvut.cz Git - can-eth-gw-linux.git/blobdiff - mm/hugetlb.c
hwpoison, hugetlbfs: fix "bad pmd" warning in unmapping hwpoisoned hugepage
[can-eth-gw-linux.git] / mm / hugetlb.c
index 59a0059b39e27e8eb6d8dbaea784318dbde97333..e53f39cd67dba964cc9c651d32d82e55e32f5377 100644 (file)
@@ -1057,7 +1057,7 @@ static void return_unused_surplus_pages(struct hstate *h,
         * on-line nodes with memory and will handle the hstate accounting.
         */
        while (nr_pages--) {
-               if (!free_pool_huge_page(h, &node_states[N_HIGH_MEMORY], 1))
+               if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
                        break;
        }
 }
@@ -1180,14 +1180,14 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
 int __weak alloc_bootmem_huge_page(struct hstate *h)
 {
        struct huge_bootmem_page *m;
-       int nr_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
+       int nr_nodes = nodes_weight(node_states[N_MEMORY]);
 
        while (nr_nodes) {
                void *addr;
 
                addr = __alloc_bootmem_node_nopanic(
                                NODE_DATA(hstate_next_node_to_alloc(h,
-                                               &node_states[N_HIGH_MEMORY])),
+                                               &node_states[N_MEMORY])),
                                huge_page_size(h), huge_page_size(h), 0);
 
                if (addr) {
@@ -1259,7 +1259,7 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
                        if (!alloc_bootmem_huge_page(h))
                                break;
                } else if (!alloc_fresh_huge_page(h,
-                                        &node_states[N_HIGH_MEMORY]))
+                                        &node_states[N_MEMORY]))
                        break;
        }
        h->max_huge_pages = i;
@@ -1527,7 +1527,7 @@ static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
                if (!(obey_mempolicy &&
                                init_nodemask_of_mempolicy(nodes_allowed))) {
                        NODEMASK_FREE(nodes_allowed);
-                       nodes_allowed = &node_states[N_HIGH_MEMORY];
+                       nodes_allowed = &node_states[N_MEMORY];
                }
        } else if (nodes_allowed) {
                /*
@@ -1537,11 +1537,11 @@ static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
                count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
                init_nodemask_of_node(nodes_allowed, nid);
        } else
-               nodes_allowed = &node_states[N_HIGH_MEMORY];
+               nodes_allowed = &node_states[N_MEMORY];
 
        h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
 
-       if (nodes_allowed != &node_states[N_HIGH_MEMORY])
+       if (nodes_allowed != &node_states[N_MEMORY])
                NODEMASK_FREE(nodes_allowed);
 
        return len;
@@ -1800,7 +1800,7 @@ static void hugetlb_unregister_all_nodes(void)
         * remove hstate attributes from any nodes that have them.
         */
        for (nid = 0; nid < nr_node_ids; nid++)
-               hugetlb_unregister_node(&node_devices[nid]);
+               hugetlb_unregister_node(node_devices[nid]);
 }
 
 /*
@@ -1844,8 +1844,8 @@ static void hugetlb_register_all_nodes(void)
 {
        int nid;
 
-       for_each_node_state(nid, N_HIGH_MEMORY) {
-               struct node *node = &node_devices[nid];
+       for_each_node_state(nid, N_MEMORY) {
+               struct node *node = node_devices[nid];
                if (node->dev.id == nid)
                        hugetlb_register_node(node);
        }
@@ -1939,8 +1939,8 @@ void __init hugetlb_add_hstate(unsigned order)
        for (i = 0; i < MAX_NUMNODES; ++i)
                INIT_LIST_HEAD(&h->hugepage_freelists[i]);
        INIT_LIST_HEAD(&h->hugepage_activelist);
-       h->next_nid_to_alloc = first_node(node_states[N_HIGH_MEMORY]);
-       h->next_nid_to_free = first_node(node_states[N_HIGH_MEMORY]);
+       h->next_nid_to_alloc = first_node(node_states[N_MEMORY]);
+       h->next_nid_to_free = first_node(node_states[N_MEMORY]);
        snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
                                        huge_page_size(h)/1024);
        /*
@@ -2035,11 +2035,11 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
                if (!(obey_mempolicy &&
                               init_nodemask_of_mempolicy(nodes_allowed))) {
                        NODEMASK_FREE(nodes_allowed);
-                       nodes_allowed = &node_states[N_HIGH_MEMORY];
+                       nodes_allowed = &node_states[N_MEMORY];
                }
                h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed);
 
-               if (nodes_allowed != &node_states[N_HIGH_MEMORY])
+               if (nodes_allowed != &node_states[N_MEMORY])
                        NODEMASK_FREE(nodes_allowed);
        }
 out:
@@ -2386,8 +2386,10 @@ again:
                /*
                 * HWPoisoned hugepage is already unmapped and dropped reference
                 */
-               if (unlikely(is_hugetlb_entry_hwpoisoned(pte)))
+               if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
+                       pte_clear(mm, address, ptep);
                        continue;
+               }
 
                page = pte_page(pte);
                /*