]> rtime.felk.cvut.cz Git - zynq/linux.git/commitdiff
x86/mm/cpa: Use flush_tlb_all()
authorPeter Zijlstra <peterz@infradead.org>
Wed, 19 Sep 2018 08:50:17 +0000 (10:50 +0200)
committerThomas Gleixner <tglx@linutronix.de>
Thu, 27 Sep 2018 18:39:40 +0000 (20:39 +0200)
Instead of open-coding it..

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Dave Hansen <dave.hansen@intel.com>
Cc: Bin Yang <bin.yang@intel.com>
Cc: Mark Gross <mark.gross@intel.com>
Link: https://lkml.kernel.org/r/20180919085947.831102058@infradead.org
arch/x86/mm/pageattr.c

index 4e55ded01be556990bfcc3251b216f15e7e4c462..a22f6b71a308473e45253dabfa65aaa439b1bd0d 100644 (file)
@@ -285,16 +285,6 @@ static void cpa_flush_all(unsigned long cache)
        on_each_cpu(__cpa_flush_all, (void *) cache, 1);
 }
 
-static void __cpa_flush_range(void *arg)
-{
-       /*
-        * We could optimize that further and do individual per page
-        * tlb invalidates for a low number of pages. Caveat: we must
-        * flush the high aliases on 64bit as well.
-        */
-       __flush_tlb_all();
-}
-
 static void cpa_flush_range(unsigned long start, int numpages, int cache)
 {
        unsigned int i, level;
@@ -303,7 +293,7 @@ static void cpa_flush_range(unsigned long start, int numpages, int cache)
        BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
        WARN_ON(PAGE_ALIGN(start) != start);
 
-       on_each_cpu(__cpa_flush_range, NULL, 1);
+       flush_tlb_all();
 
        if (!cache)
                return;