]> rtime.felk.cvut.cz Git - linux-imx.git/commitdiff
Merge branch 'security-fixes' into fixes
authorRussell King <rmk+kernel@arm.linux.org.uk>
Sat, 3 Aug 2013 09:49:38 +0000 (10:49 +0100)
committerRussell King <rmk+kernel@arm.linux.org.uk>
Sat, 3 Aug 2013 09:49:38 +0000 (10:49 +0100)
25 files changed:
arch/arm/Kconfig
arch/arm/Kconfig.debug
arch/arm/Makefile
arch/arm/include/asm/a.out-core.h [deleted file]
arch/arm/include/asm/cputype.h
arch/arm/include/asm/mmu.h
arch/arm/include/asm/mmu_context.h
arch/arm/include/asm/processor.h
arch/arm/include/asm/thread_info.h
arch/arm/include/asm/tlbflush.h
arch/arm/include/asm/virt.h
arch/arm/include/uapi/asm/Kbuild
arch/arm/include/uapi/asm/a.out.h [deleted file]
arch/arm/kernel/entry-v7m.S
arch/arm/kernel/head-nommu.S
arch/arm/kernel/head.S
arch/arm/kernel/hyp-stub.S
arch/arm/kernel/process.c
arch/arm/kernel/setup.c
arch/arm/kernel/smp_tlb.c
arch/arm/mm/context.c
arch/arm/mm/mmu.c
arch/arm/mm/proc-v7-2level.S
arch/arm/mm/proc-v7-3level.S
arch/arm/mm/proc-v7.S

index 123b7924904fe0209ffa01ae204f7c667b30bbbd..995b55311c55c37c7627f5ed126743ec689b2d5e 100644 (file)
@@ -20,7 +20,6 @@ config ARM
        select GENERIC_STRNCPY_FROM_USER
        select GENERIC_STRNLEN_USER
        select HARDIRQS_SW_RESEND
-       select HAVE_AOUT
        select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
        select HAVE_ARCH_KGDB
        select HAVE_ARCH_SECCOMP_FILTER
index e401a766c0bdaf633f9dd14977eb231d2b2656e9..583f4a00ec3210bf6cb3cf58dac4ae7ac8a2807c 100644 (file)
@@ -804,9 +804,19 @@ config DEBUG_LL_INCLUDE
 
 config DEBUG_UNCOMPRESS
        bool
-       default y if ARCH_MULTIPLATFORM && DEBUG_LL && \
-                    !DEBUG_OMAP2PLUS_UART && \
+       depends on ARCH_MULTIPLATFORM
+       default y if DEBUG_LL && !DEBUG_OMAP2PLUS_UART && \
                     !DEBUG_TEGRA_UART
+       help
+         This option influences the normal decompressor output for
+         multiplatform kernels.  Normally, multiplatform kernels disable
+         decompressor output because it is not possible to know where to
+         send the decompressor output.
+
+         When this option is set, the selected DEBUG_LL output method
+         will be re-used for normal decompressor output on multiplatform
+         kernels.
+         
 
 config UNCOMPRESS_INCLUDE
        string
index c0ac0f5e5e5c0900c5e0e081390e0af900005738..6fd2ceae305a6a5cf6c5d2e766e4c91848b147f7 100644 (file)
@@ -153,6 +153,7 @@ machine-$(CONFIG_ARCH_DAVINCI)              += davinci
 machine-$(CONFIG_ARCH_DOVE)            += dove
 machine-$(CONFIG_ARCH_EBSA110)         += ebsa110
 machine-$(CONFIG_ARCH_EP93XX)          += ep93xx
+machine-$(CONFIG_ARCH_EXYNOS)          += exynos
 machine-$(CONFIG_ARCH_GEMINI)          += gemini
 machine-$(CONFIG_ARCH_HIGHBANK)                += highbank
 machine-$(CONFIG_ARCH_INTEGRATOR)      += integrator
@@ -160,15 +161,16 @@ machine-$(CONFIG_ARCH_IOP13XX)            += iop13xx
 machine-$(CONFIG_ARCH_IOP32X)          += iop32x
 machine-$(CONFIG_ARCH_IOP33X)          += iop33x
 machine-$(CONFIG_ARCH_IXP4XX)          += ixp4xx
+machine-$(CONFIG_ARCH_KEYSTONE)                += keystone
 machine-$(CONFIG_ARCH_KIRKWOOD)                += kirkwood
 machine-$(CONFIG_ARCH_KS8695)          += ks8695
 machine-$(CONFIG_ARCH_LPC32XX)         += lpc32xx
 machine-$(CONFIG_ARCH_MMP)             += mmp
 machine-$(CONFIG_ARCH_MSM)             += msm
 machine-$(CONFIG_ARCH_MV78XX0)         += mv78xx0
+machine-$(CONFIG_ARCH_MVEBU)           += mvebu
 machine-$(CONFIG_ARCH_MXC)             += imx
 machine-$(CONFIG_ARCH_MXS)             += mxs
-machine-$(CONFIG_ARCH_MVEBU)           += mvebu
 machine-$(CONFIG_ARCH_NETX)            += netx
 machine-$(CONFIG_ARCH_NOMADIK)         += nomadik
 machine-$(CONFIG_ARCH_NSPIRE)          += nspire
@@ -176,7 +178,6 @@ machine-$(CONFIG_ARCH_OMAP1)                += omap1
 machine-$(CONFIG_ARCH_OMAP2PLUS)       += omap2
 machine-$(CONFIG_ARCH_ORION5X)         += orion5x
 machine-$(CONFIG_ARCH_PICOXCELL)       += picoxcell
-machine-$(CONFIG_ARCH_SIRF)            += prima2
 machine-$(CONFIG_ARCH_PXA)             += pxa
 machine-$(CONFIG_ARCH_REALVIEW)                += realview
 machine-$(CONFIG_ARCH_ROCKCHIP)                += rockchip
@@ -186,25 +187,24 @@ machine-$(CONFIG_ARCH_S3C64XX)            += s3c64xx
 machine-$(CONFIG_ARCH_S5P64X0)         += s5p64x0
 machine-$(CONFIG_ARCH_S5PC100)         += s5pc100
 machine-$(CONFIG_ARCH_S5PV210)         += s5pv210
-machine-$(CONFIG_ARCH_EXYNOS)          += exynos
 machine-$(CONFIG_ARCH_SA1100)          += sa1100
 machine-$(CONFIG_ARCH_SHARK)           += shark
 machine-$(CONFIG_ARCH_SHMOBILE)        += shmobile
+machine-$(CONFIG_ARCH_SIRF)            += prima2
+machine-$(CONFIG_ARCH_SOCFPGA)         += socfpga
+machine-$(CONFIG_ARCH_STI)             += sti
+machine-$(CONFIG_ARCH_SUNXI)           += sunxi
 machine-$(CONFIG_ARCH_TEGRA)           += tegra
 machine-$(CONFIG_ARCH_U300)            += u300
 machine-$(CONFIG_ARCH_U8500)           += ux500
 machine-$(CONFIG_ARCH_VERSATILE)       += versatile
 machine-$(CONFIG_ARCH_VEXPRESS)                += vexpress
+machine-$(CONFIG_ARCH_VIRT)            += virt
 machine-$(CONFIG_ARCH_VT8500)          += vt8500
 machine-$(CONFIG_ARCH_W90X900)         += w90x900
+machine-$(CONFIG_ARCH_ZYNQ)            += zynq
 machine-$(CONFIG_FOOTBRIDGE)           += footbridge
-machine-$(CONFIG_ARCH_SOCFPGA)         += socfpga
 machine-$(CONFIG_PLAT_SPEAR)           += spear
-machine-$(CONFIG_ARCH_STI)             += sti
-machine-$(CONFIG_ARCH_VIRT)            += virt
-machine-$(CONFIG_ARCH_ZYNQ)            += zynq
-machine-$(CONFIG_ARCH_SUNXI)           += sunxi
-machine-$(CONFIG_ARCH_KEYSTONE)                += keystone
 
 # Platform directory name.  This list is sorted alphanumerically
 # by CONFIG_* macro name.
diff --git a/arch/arm/include/asm/a.out-core.h b/arch/arm/include/asm/a.out-core.h
deleted file mode 100644 (file)
index 92f10cb..0000000
+++ /dev/null
@@ -1,45 +0,0 @@
-/* a.out coredump register dumper
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-
-#ifndef _ASM_A_OUT_CORE_H
-#define _ASM_A_OUT_CORE_H
-
-#ifdef __KERNEL__
-
-#include <linux/user.h>
-#include <linux/elfcore.h>
-
-/*
- * fill in the user structure for an a.out core dump
- */
-static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
-{
-       struct task_struct *tsk = current;
-
-       dump->magic = CMAGIC;
-       dump->start_code = tsk->mm->start_code;
-       dump->start_stack = regs->ARM_sp & ~(PAGE_SIZE - 1);
-
-       dump->u_tsize = (tsk->mm->end_code - tsk->mm->start_code) >> PAGE_SHIFT;
-       dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT;
-       dump->u_ssize = 0;
-
-       memset(dump->u_debugreg, 0, sizeof(dump->u_debugreg));
-
-       if (dump->start_stack < 0x04000000)
-               dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT;
-
-       dump->regs = *regs;
-       dump->u_fpvalid = dump_fpu (regs, &dump->u_fp);
-}
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_A_OUT_CORE_H */
index 8c25dc4e98514d85db8ce77f4d9e7fa8f828262d..9672e978d50df67d94c3dd86d23f3bcdd187c54d 100644 (file)
@@ -89,13 +89,18 @@ extern unsigned int processor_id;
                __val;                                                  \
        })
 
+/*
+ * The memory clobber prevents gcc 4.5 from reordering the mrc before
+ * any is_smp() tests, which can cause undefined instruction aborts on
+ * ARM1136 r0 due to the missing extended CP15 registers.
+ */
 #define read_cpuid_ext(ext_reg)                                                \
        ({                                                              \
                unsigned int __val;                                     \
                asm("mrc        p15, 0, %0, c0, " ext_reg               \
                    : "=r" (__val)                                      \
                    :                                                   \
-                   : "cc");                                            \
+                   : "memory");                                        \
                __val;                                                  \
        })
 
index 7345e37155d55a91ec9113b72fd1dfed8773a1cf..6f18da09668b5f324ad8c32e6fd6058e5b837e2a 100644 (file)
@@ -6,6 +6,8 @@
 typedef struct {
 #ifdef CONFIG_CPU_HAS_ASID
        atomic64_t      id;
+#else
+       int             switch_pending;
 #endif
        unsigned int    vmalloc_seq;
        unsigned long   sigpage;
index b5792b7fd8d3149c94c87f5650410b3ece3a2bd2..9b32f76bb0ddaa6d6e485d79cc9f2bdfeb24d776 100644 (file)
@@ -56,7 +56,7 @@ static inline void check_and_switch_context(struct mm_struct *mm,
                 * on non-ASID CPUs, the old mm will remain valid until the
                 * finish_arch_post_lock_switch() call.
                 */
-               set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
+               mm->context.switch_pending = 1;
        else
                cpu_switch_mm(mm->pgd, mm);
 }
@@ -65,9 +65,21 @@ static inline void check_and_switch_context(struct mm_struct *mm,
        finish_arch_post_lock_switch
 static inline void finish_arch_post_lock_switch(void)
 {
-       if (test_and_clear_thread_flag(TIF_SWITCH_MM)) {
-               struct mm_struct *mm = current->mm;
-               cpu_switch_mm(mm->pgd, mm);
+       struct mm_struct *mm = current->mm;
+
+       if (mm && mm->context.switch_pending) {
+               /*
+                * Preemption must be disabled during cpu_switch_mm() as we
+                * have some stateful cache flush implementations. Check
+                * switch_pending again in case we were preempted and the
+                * switch to this mm was already done.
+                */
+               preempt_disable();
+               if (mm->context.switch_pending) {
+                       mm->context.switch_pending = 0;
+                       cpu_switch_mm(mm->pgd, mm);
+               }
+               preempt_enable_no_resched();
        }
 }
 
index 06e7d509eaac218864cc9d089ce9e6f4177c22f9..413f3876341cd6fd2e7bc4b1c6a71873cadaa887 100644 (file)
@@ -54,7 +54,6 @@ struct thread_struct {
 
 #define start_thread(regs,pc,sp)                                       \
 ({                                                                     \
-       unsigned long *stack = (unsigned long *)sp;                     \
        memset(regs->uregs, 0, sizeof(regs->uregs));                    \
        if (current->personality & ADDR_LIMIT_32BIT)                    \
                regs->ARM_cpsr = USR_MODE;                              \
@@ -65,9 +64,6 @@ struct thread_struct {
        regs->ARM_cpsr |= PSR_ENDSTATE;                                 \
        regs->ARM_pc = pc & ~1;         /* pc */                        \
        regs->ARM_sp = sp;              /* sp */                        \
-       regs->ARM_r2 = stack[2];        /* r2 (envp) */                 \
-       regs->ARM_r1 = stack[1];        /* r1 (argv) */                 \
-       regs->ARM_r0 = stack[0];        /* r0 (argc) */                 \
        nommu_start_thread(regs);                                       \
 })
 
index 214d4158089afce9c04102604fe07c5257c454e5..2b8114fcba09a3c5b9d6aceefbc6e456a8c9050a 100644 (file)
@@ -156,7 +156,6 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
 #define TIF_USING_IWMMXT       17
 #define TIF_MEMDIE             18      /* is terminating due to OOM killer */
 #define TIF_RESTORE_SIGMASK    20
-#define TIF_SWITCH_MM          22      /* deferred switch_mm */
 
 #define _TIF_SIGPENDING                (1 << TIF_SIGPENDING)
 #define _TIF_NEED_RESCHED      (1 << TIF_NEED_RESCHED)
index fdbb9e369745c4b09d776a4568caa52702b7e9a7..f467e9b3f8d5d5b35c1d6fce386c718eb1b21e8f 100644 (file)
@@ -443,7 +443,18 @@ static inline void local_flush_bp_all(void)
                isb();
 }
 
+#include <asm/cputype.h>
 #ifdef CONFIG_ARM_ERRATA_798181
+static inline int erratum_a15_798181(void)
+{
+       unsigned int midr = read_cpuid_id();
+
+       /* Cortex-A15 r0p0..r3p2 affected */
+       if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2)
+               return 0;
+       return 1;
+}
+
 static inline void dummy_flush_tlb_a15_erratum(void)
 {
        /*
@@ -453,6 +464,11 @@ static inline void dummy_flush_tlb_a15_erratum(void)
        dsb();
 }
 #else
+static inline int erratum_a15_798181(void)
+{
+       return 0;
+}
+
 static inline void dummy_flush_tlb_a15_erratum(void)
 {
 }
index 50af92bac7373eb7fbc01c8f38c4d7dfd6ccb05d..4371f45c578401c7f233e565dbb5fd36d9d59d52 100644 (file)
@@ -29,6 +29,7 @@
 #define BOOT_CPU_MODE_MISMATCH PSR_N_BIT
 
 #ifndef __ASSEMBLY__
+#include <asm/cacheflush.h>
 
 #ifdef CONFIG_ARM_VIRT_EXT
 /*
  */
 extern int __boot_cpu_mode;
 
+static inline void sync_boot_mode(void)
+{
+       /*
+        * As secondaries write to __boot_cpu_mode with caches disabled, we
+        * must flush the corresponding cache entries to ensure the visibility
+        * of their writes.
+        */
+       sync_cache_r(&__boot_cpu_mode);
+}
+
 void __hyp_set_vectors(unsigned long phys_vector_base);
 unsigned long __hyp_get_vectors(void);
 #else
 #define __boot_cpu_mode        (SVC_MODE)
+#define sync_boot_mode()
 #endif
 
 #ifndef ZIMAGE
index 47bcb2d254af8e4fdb3bacee96131d88a0e4f37e..18d76fd5a2afb2bf27b91980f29c77094e6638c0 100644 (file)
@@ -1,7 +1,6 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
 
-header-y += a.out.h
 header-y += byteorder.h
 header-y += fcntl.h
 header-y += hwcap.h
diff --git a/arch/arm/include/uapi/asm/a.out.h b/arch/arm/include/uapi/asm/a.out.h
deleted file mode 100644 (file)
index 083894b..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-#ifndef __ARM_A_OUT_H__
-#define __ARM_A_OUT_H__
-
-#include <linux/personality.h>
-#include <linux/types.h>
-
-struct exec
-{
-  __u32 a_info;                /* Use macros N_MAGIC, etc for access */
-  __u32 a_text;                /* length of text, in bytes */
-  __u32 a_data;                /* length of data, in bytes */
-  __u32 a_bss;         /* length of uninitialized data area for file, in bytes */
-  __u32 a_syms;                /* length of symbol table data in file, in bytes */
-  __u32 a_entry;       /* start address */
-  __u32 a_trsize;      /* length of relocation info for text, in bytes */
-  __u32 a_drsize;      /* length of relocation info for data, in bytes */
-};
-
-/*
- * This is always the same
- */
-#define N_TXTADDR(a)   (0x00008000)
-
-#define N_TRSIZE(a)    ((a).a_trsize)
-#define N_DRSIZE(a)    ((a).a_drsize)
-#define N_SYMSIZE(a)   ((a).a_syms)
-
-#define M_ARM 103
-
-#ifndef LIBRARY_START_TEXT
-#define LIBRARY_START_TEXT     (0x00c00000)
-#endif
-
-#endif /* __A_OUT_GNU_H__ */
index e00621f1403f7c44765103591cbefd6e3315702f..52b26432c9a9941e8281a4483dd7aed148a99995 100644 (file)
@@ -49,7 +49,7 @@ __irq_entry:
        mov     r1, sp
        stmdb   sp!, {lr}
        @ routine called with r0 = irq number, r1 = struct pt_regs *
-       bl      nvic_do_IRQ
+       bl      nvic_handle_irq
 
        pop     {lr}
        @
index b361de143756d4a3412f4512d794eebc173e25e8..14235ba64a90736ecebad575f5d88ea4568e8eaf 100644 (file)
@@ -87,6 +87,7 @@ ENTRY(stext)
 ENDPROC(stext)
 
 #ifdef CONFIG_SMP
+       .text
 ENTRY(secondary_startup)
        /*
         * Common entry point for secondary CPUs.
index 9cf6063020ae99ce68afb617bf88905ec5f6212f..2c7cc1e03473aee9463e86d7dbedfc999f6e51f7 100644 (file)
@@ -343,6 +343,7 @@ __turn_mmu_on_loc:
        .long   __turn_mmu_on_end
 
 #if defined(CONFIG_SMP)
+       .text
 ENTRY(secondary_startup)
        /*
         * Common entry point for secondary CPUs.
index 4910232c48330d4dc857d6bf32754bfd781008b5..797b1a6a4906da0f8ca3f942186ac0e033250ce6 100644 (file)
@@ -56,8 +56,8 @@ ENTRY(__boot_cpu_mode)
        ldr     \reg3, [\reg2]
        ldr     \reg1, [\reg2, \reg3]
        cmp     \mode, \reg1            @ matches primary CPU boot mode?
-       orrne   r7, r7, #BOOT_CPU_MODE_MISMATCH
-       strne   r7, [r5, r6]            @ record what happened and give up
+       orrne   \reg1, \reg1, #BOOT_CPU_MODE_MISMATCH
+       strne   \reg1, [\reg2, \reg3]   @ record what happened and give up
        .endm
 
 #else  /* ZIMAGE */
index d03b5bd889c5d0c3101ea82fde6cbd6f06d76f0a..536c85fe72a838aafe3371e0280175b8cda2b452 100644 (file)
@@ -197,6 +197,7 @@ void machine_shutdown(void)
  */
 void machine_halt(void)
 {
+       local_irq_disable();
        smp_send_stop();
 
        local_irq_disable();
@@ -211,6 +212,7 @@ void machine_halt(void)
  */
 void machine_power_off(void)
 {
+       local_irq_disable();
        smp_send_stop();
 
        if (pm_power_off)
@@ -230,6 +232,7 @@ void machine_power_off(void)
  */
 void machine_restart(char *cmd)
 {
+       local_irq_disable();
        smp_send_stop();
 
        arm_pm_restart(reboot_mode, cmd);
index 63af9a7ae5124f82484feb8b33a88114b915753d..afc2489ee13bc098523f92549baa4a69728c1ced 100644 (file)
@@ -836,6 +836,8 @@ static int __init meminfo_cmp(const void *_a, const void *_b)
 void __init hyp_mode_check(void)
 {
 #ifdef CONFIG_ARM_VIRT_EXT
+       sync_boot_mode();
+
        if (is_hyp_mode_available()) {
                pr_info("CPU: All CPU(s) started in HYP mode.\n");
                pr_info("CPU: Virtualization extensions available.\n");
@@ -971,6 +973,7 @@ static const char *hwcap_str[] = {
        "vfpv4",
        "idiva",
        "idivt",
+       "vfpd32",
        "lpae",
        NULL
 };
index a98b62dca2faf9bbce7fbcb786d4c325513f7231..c2edfff573c2c9e2e68193cf729a98f460ac1a2d 100644 (file)
@@ -70,23 +70,6 @@ static inline void ipi_flush_bp_all(void *ignored)
        local_flush_bp_all();
 }
 
-#ifdef CONFIG_ARM_ERRATA_798181
-static int erratum_a15_798181(void)
-{
-       unsigned int midr = read_cpuid_id();
-
-       /* Cortex-A15 r0p0..r3p2 affected */
-       if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2)
-               return 0;
-       return 1;
-}
-#else
-static int erratum_a15_798181(void)
-{
-       return 0;
-}
-#endif
-
 static void ipi_flush_tlb_a15_erratum(void *arg)
 {
        dmb();
index b55b1015724b56931c57ad7b2920d760b80529e8..4a0544492f10e4cd63d490f61d3cff3e54adbd19 100644 (file)
@@ -245,7 +245,8 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
        if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
                local_flush_bp_all();
                local_flush_tlb_all();
-               dummy_flush_tlb_a15_erratum();
+               if (erratum_a15_798181())
+                       dummy_flush_tlb_a15_erratum();
        }
 
        atomic64_set(&per_cpu(active_asids, cpu), asid);
index ca46f413d867e9b90b18ef85001be758023211f5..53cdbd39ec8e23c1facab667f56fa9441aa870c6 100644 (file)
@@ -989,6 +989,7 @@ phys_addr_t arm_lowmem_limit __initdata = 0;
 
 void __init sanity_check_meminfo(void)
 {
+       phys_addr_t memblock_limit = 0;
        int i, j, highmem = 0;
        phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1;
 
@@ -1052,9 +1053,32 @@ void __init sanity_check_meminfo(void)
                        bank->size = size_limit;
                }
 #endif
-               if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit)
-                       arm_lowmem_limit = bank->start + bank->size;
+               if (!bank->highmem) {
+                       phys_addr_t bank_end = bank->start + bank->size;
 
+                       if (bank_end > arm_lowmem_limit)
+                               arm_lowmem_limit = bank_end;
+
+                       /*
+                        * Find the first non-section-aligned page, and point
+                        * memblock_limit at it. This relies on rounding the
+                        * limit down to be section-aligned, which happens at
+                        * the end of this function.
+                        *
+                        * With this algorithm, the start or end of almost any
+                        * bank can be non-section-aligned. The only exception
+                        * is that the start of the bank 0 must be section-
+                        * aligned, since otherwise memory would need to be
+                        * allocated when mapping the start of bank 0, which
+                        * occurs before any free memory is mapped.
+                        */
+                       if (!memblock_limit) {
+                               if (!IS_ALIGNED(bank->start, SECTION_SIZE))
+                                       memblock_limit = bank->start;
+                               else if (!IS_ALIGNED(bank_end, SECTION_SIZE))
+                                       memblock_limit = bank_end;
+                       }
+               }
                j++;
        }
 #ifdef CONFIG_HIGHMEM
@@ -1079,7 +1103,18 @@ void __init sanity_check_meminfo(void)
 #endif
        meminfo.nr_banks = j;
        high_memory = __va(arm_lowmem_limit - 1) + 1;
-       memblock_set_current_limit(arm_lowmem_limit);
+
+       /*
+        * Round the memblock limit down to a section size.  This
+        * helps to ensure that we will allocate memory from the
+        * last full section, which should be mapped.
+        */
+       if (memblock_limit)
+               memblock_limit = round_down(memblock_limit, SECTION_SIZE);
+       if (!memblock_limit)
+               memblock_limit = arm_lowmem_limit;
+
+       memblock_set_current_limit(memblock_limit);
 }
 
 static inline void prepare_page_table(void)
@@ -1288,8 +1323,6 @@ void __init paging_init(struct machine_desc *mdesc)
 {
        void *zero_page;
 
-       memblock_set_current_limit(arm_lowmem_limit);
-
        build_mem_type_table();
        prepare_page_table();
        map_lowmem();
index f64afb9f1bd595a9fb1b9c0cd8bb442c9fda0337..bdd3be4be77aa50c93dcc20c8afdaa3af3c0cf5f 100644 (file)
@@ -110,7 +110,7 @@ ENTRY(cpu_v7_set_pte_ext)
  ARM(  str     r3, [r0, #2048]! )
  THUMB(        add     r0, r0, #2048 )
  THUMB(        str     r3, [r0] )
-       ALT_SMP(mov     pc,lr)
+       ALT_SMP(W(nop))
        ALT_UP (mcr     p15, 0, r0, c7, c10, 1)         @ flush_pte
 #endif
        mov     pc, lr
index c36ac69488c8fa60b17d91d085d1f8efcae42cb6..01a719e18bb047c655694c336d5a42e1388d1062 100644 (file)
@@ -81,7 +81,7 @@ ENTRY(cpu_v7_set_pte_ext)
        tst     r3, #1 << (55 - 32)             @ L_PTE_DIRTY
        orreq   r2, #L_PTE_RDONLY
 1:     strd    r2, r3, [r0]
-       ALT_SMP(mov     pc, lr)
+       ALT_SMP(W(nop))
        ALT_UP (mcr     p15, 0, r0, c7, c10, 1)         @ flush_pte
 #endif
        mov     pc, lr
index 5c6d5a3050eac65f1f2f89465d8398c971f866e4..73398bcf9bd8ea8d7293803828cb837306742fb7 100644 (file)
@@ -75,13 +75,14 @@ ENTRY(cpu_v7_do_idle)
 ENDPROC(cpu_v7_do_idle)
 
 ENTRY(cpu_v7_dcache_clean_area)
-       ALT_SMP(mov     pc, lr)                 @ MP extensions imply L1 PTW
-       ALT_UP(W(nop))
-       dcache_line_size r2, r3
-1:     mcr     p15, 0, r0, c7, c10, 1          @ clean D entry
+       ALT_SMP(W(nop))                 @ MP extensions imply L1 PTW
+       ALT_UP_B(1f)
+       mov     pc, lr
+1:     dcache_line_size r2, r3
+2:     mcr     p15, 0, r0, c7, c10, 1          @ clean D entry
        add     r0, r0, r2
        subs    r1, r1, r2
-       bhi     1b
+       bhi     2b
        dsb
        mov     pc, lr
 ENDPROC(cpu_v7_dcache_clean_area)