]> rtime.felk.cvut.cz Git - mcf548x/linux.git/commitdiff
Various compilation fixes.
authorMartin <meloumar@cmp.felk.cvut.cz>
Thu, 26 May 2011 00:07:56 +0000 (02:07 +0200)
committerMartin <meloumar@cmp.felk.cvut.cz>
Thu, 26 May 2011 00:07:56 +0000 (02:07 +0200)
24 files changed:
arch/m68k/coldfire/config.c
arch/m68k/coldfire/ints.c
arch/m68k/coldfire/signal.c
arch/m68k/include/asm/atomic.h
arch/m68k/include/asm/m548xgpt.h
arch/m68k/include/asm/m548xsim.h
arch/m68k/include/asm/machdep.h
arch/m68k/include/asm/mcf_pgalloc.h
arch/m68k/include/asm/mcf_pgtable.h
arch/m68k/include/asm/mcfsim.h
arch/m68k/include/asm/mmu_context.h
arch/m68k/include/asm/pgalloc.h
arch/m68k/include/asm/pgtable_mm.h
arch/m68k/include/asm/setup.h
arch/m68k/include/asm/signal.h
arch/m68k/include/asm/tlbflush.h
arch/m68k/include/asm/uaccess.h
arch/m68k/include/asm/uaccess_coldfire.h [new file with mode: 0644]
arch/m68k/include/asm/uaccess_mm.h
arch/m68k/include/asm/virtconvert.h
arch/m68k/kernel/setup.c
arch/m68k/kernel/sys_m68k.c
arch/m68k/mm/cf-mmu.c
arch/m68k/mm/memory.c

index 23be9d43586514d2778b682f023a8981235ca09c..4f5f9edf9aca5732748515d5f0ba5c5f301040b7 100644 (file)
@@ -55,7 +55,7 @@ extern char _etext, _edata, __init_begin, __init_end;
 extern struct console mcfrs_console;
 extern unsigned long availmem;
 
-#if CONFIG_UBOOT
+#ifdef CONFIG_UBOOT
 extern char m68k_command_line[CL_SIZE];
 struct mem_info m68k_ramdisk;
 #endif
@@ -380,7 +380,7 @@ void coldfire_reboot(void)
 
 static void coldfire_get_model(char *model)
 {
-       sprintf(model, "Version 4 ColdFire");
+       sprintf(model, "Version 4e ColdFire");
 }
 
 static void __init
@@ -462,7 +462,7 @@ void __init config_coldfire(void)
 }
 
 //no special boot record
-int coldfire_parse_bootinfo(const struct bi_record *)
+int coldfire_parse_bootinfo(const struct bi_record * record)
 {
        return 1;
 }
index 00d5dd637aa6873634497bf874c35736a786c9e4..abadadc33f08b264fe454f7f397ace5c9fb50567 100644 (file)
@@ -40,9 +40,9 @@
 /*
  * IRQ Handler lists.
  */
-static struct irq_node *irq_list[SYS_IRQS];
-static struct irq_controller *irq_controller[SYS_IRQS];
-static int irq_depth[SYS_IRQS];
+static struct irq_node *irq_list[NR_IRQS];
+static struct irq_controller *irq_controller[NR_IRQS];
+static int irq_depth[NR_IRQS];
 
 /*
  * IRQ Controller
@@ -69,7 +69,7 @@ static struct irq_controller m547x_8x_irq_controller = {
 # error No IRQ controller defined
 #endif
 
-#define        POOL_SIZE       SYS_IRQS
+#define        POOL_SIZE       NR_IRQS
 static struct irq_node  pool[POOL_SIZE];
 static struct irq_node *get_irq_node(void);
 
@@ -88,10 +88,10 @@ void __init init_IRQ(void)
        int i;
 
 #if defined(CONFIG_M5445X)
-       for (i = 0; i < SYS_IRQS; i++)
+       for (i = 0; i < NR_IRQS; i++)
                irq_controller[i] = &m5445x_irq_controller;
 #elif defined(CONFIG_M547X_8X)
-       for (i = 0; i < SYS_IRQS; i++)
+       for (i = 0; i < NR_IRQS; i++)
                irq_controller[i] = &m547x_8x_irq_controller;
 #endif
 }
index 38671c44a665c4cb8f924ccc29f61f27c960f9e5..900113afefaf4fb9ce25c82340d3b0bda1d5f013 100644 (file)
@@ -33,8 +33,8 @@
 #include <linux/binfmts.h>
 
 #include <asm/setup.h>
-#include <asm/cf_uaccess.h>
-#include <asm/cf_pgtable.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
 #include <asm/traps.h>
 #include <asm/ucontext.h>
 #include <asm/cacheflush.h>
index 3269a1305d23c3895deed1285159fe7c8d5d76df..6c6e0c3374bfa9d50a075bab704ff3740a48ee56 100644 (file)
@@ -55,6 +55,13 @@ static inline int atomic_dec_and_test(atomic_t *v)
        return c != 0;
 }
 
+static inline int atomic_dec_and_test_lt(volatile atomic_t *v)
+{
+       char c;
+       __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "=m" (*v));
+       return c != 0;
+}
+
 static inline int atomic_inc_and_test(atomic_t *v)
 {
        char c;
@@ -62,6 +69,13 @@ static inline int atomic_inc_and_test(atomic_t *v)
        return c != 0;
 }
 
+static inline int atomic_inc_and_test_lt(volatile atomic_t *v)
+{
+       char c;
+       __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "=m" (*v));
+       return c != 0;
+}
+
 #ifdef CONFIG_RMW_INSNS
 
 static inline int atomic_add_return(int i, atomic_t *v)
index b68273bcb5b287e6a007266c6da06c0452f94a15..42d2e43524b77de89756dfedac87e8e0a39c4fcc 100644 (file)
 *********************************************************************/
 
 /* Register read/write macros */
-#define MCF_GPT_GMS0       0x000800
-#define MCF_GPT_GCIR0      0x000804
-#define MCF_GPT_GPWM0      0x000808
-#define MCF_GPT_GSR0       0x00080C
-#define MCF_GPT_GMS1       0x000810
-#define MCF_GPT_GCIR1      0x000814
-#define MCF_GPT_GPWM1      0x000818
-#define MCF_GPT_GSR1       0x00081C
-#define MCF_GPT_GMS2       0x000820
-#define MCF_GPT_GCIR2      0x000824
-#define MCF_GPT_GPWM2      0x000828
-#define MCF_GPT_GSR2       0x00082C
-#define MCF_GPT_GMS3       0x000830
-#define MCF_GPT_GCIR3      0x000834
-#define MCF_GPT_GPWM3      0x000838
-#define MCF_GPT_GSR3       0x00083C
-#define MCF_GPT_GMS(x)     (0x000800+((x)*0x010))
-#define MCF_GPT_GCIR(x)    (0x000804+((x)*0x010))
-#define MCF_GPT_GPWM(x)    (0x000808+((x)*0x010))
-#define MCF_GPT_GSR(x)     (0x00080C+((x)*0x010))
+#define MCF_GPT_GMS0       MCF_REG32(0x000800)
+#define MCF_GPT_GCIR0      MCF_REG32(0x000804)
+#define MCF_GPT_GPWM0      MCF_REG32(0x000808)
+#define MCF_GPT_GSR0       MCF_REG32(0x00080C)
+#define MCF_GPT_GMS1       MCF_REG32(0x000810)
+#define MCF_GPT_GCIR1      MCF_REG32(0x000814)
+#define MCF_GPT_GPWM1      MCF_REG32(0x000818)
+#define MCF_GPT_GSR1       MCF_REG32(0x00081C)
+#define MCF_GPT_GMS2       MCF_REG32(0x000820)
+#define MCF_GPT_GCIR2      MCF_REG32(0x000824)
+#define MCF_GPT_GPWM2      MCF_REG32(0x000828)
+#define MCF_GPT_GSR2       MCF_REG32(0x00082C)
+#define MCF_GPT_GMS3       MCF_REG32(0x000830)
+#define MCF_GPT_GCIR3      MCF_REG32(0x000834)
+#define MCF_GPT_GPWM3      MCF_REG32(0x000838)
+#define MCF_GPT_GSR3       MCF_REG32(0x00083C)
+#define MCF_GPT_GMS(x)     MCF_REG32(0x000800+((x)*0x010))
+#define MCF_GPT_GCIR(x)    MCF_REG32(0x000804+((x)*0x010))
+#define MCF_GPT_GPWM(x)    MCF_REG32(0x000808+((x)*0x010))
+#define MCF_GPT_GSR(x)     MCF_REG32(0x00080C+((x)*0x010))
 
 /* Bit definitions and macros for MCF_GPT_GMS */
 #define MCF_GPT_GMS_TMS(x)         (((x)&0x00000007)<<0)
index 149135ef30d23ea3b8318f13ee6c7f8075b4206d..6b34701db4f555b3ea26a8d8939d3052e0fd1df5 100644 (file)
 #define MCF_PAR_PSC_RTS_RTS    (0x30)
 #define MCF_PAR_PSC_CANRX      (0x40)
 
+/*
+ *      System Integration Unit Registers
+ */
+#define MCF_SDRAMDS    MCF_REG32(0x000004)     /* SDRAM Drive Strength         */
+#define MCF_SBCR               MCF_REG32(0x000010)     /* System Breakpoint Control    */
+#define MCF_CSnCFG(x)  MCF_REG32(0x000020+(x*4))/* SDRAM Chip Select X         */
+#define MCF_SECSACR     MCF_REG32(0x000038)    /* Sequential Access Control    */      
+#define MCF_RSR         MCF_REG32(0x000044)    /* Reset Status                 */
+#define MCF_JTAGID      MCF_REG32(0x000050)    /* JTAG Device Identification   */
+
+/*
+ *      FlexBus Chip Selects Registers
+ */
+#define MCF_CSARn(x)    MCF_REG32(0x000500+(x*0xC))
+#define MCF_CSMRn(x)    MCF_REG32(0x000504+(x*0xC))
+#define MCF_CSCRn(x)    MCF_REG32(0x000508+(x*0xC))
+
+/*
+ *      Interrupt Controller Registers
+ */
+#define MCF_IPRH       MCF_REG32(0x000700)
+#define MCF_IPRL       MCF_REG32(0x000704)
+#define MCF_IMRH       MCF_REG32(0x000708)
+#define MCF_IMRL       MCF_REG32(0x00070C)
+#define MCF_INTFRCH    MCF_REG32(0x000710)
+#define MCF_INTFRCL    MCF_REG32(0x000714)
+#define MCF_IRLR       MCF_REG08(0x000718)
+#define MCF_IACKLPR    MCF_REG08(0x000719)
+#define MCF_SWIACK     MCF_REG08(0x0007E0)
+#define MCF_LnIACK(x)  MCF_REG08(0x0007E4+((x)*0x004))
+#define MCF_ICR(x)     MCF_REG08(0x000740+((x)*0x001))
+
+/*
+ *     Slice Timers Registers
+ */
+#define MCF_SLTCNT(x)   MCF_REG32(0x000900+((x)*0x010))
+#define MCF_SCR(x)      MCF_REG32(0x000904+((x)*0x010))
+#define MCF_SCNT(x)     MCF_REG32(0x000908+((x)*0x010))
+#define MCF_SSR(x)      MCF_REG32(0x00090C+((x)*0x010))
+
+/*
+ *     Interrupt sources
+ */
+#define ISC_EPORT_Fn(x)                (x)             /* EPORT Interrupts     */
+#define ISC_USB_EPn(x)         (15+(x))        /* USB Endopint         */
+#define ISC_USB_ISR            (22)            /* USB General source   */
+#define ISC_USB_AISR           (22)            /* USB core source      */
+#define ISC_DSPI_OVRFW         (25)            /* DSPI overflow        */
+#define ISC_DSPI_RFOF          (26)            
+#define ISC_DSPI_RFDF          (27)            
+#define ISC_DSPI_TFUF          (28)            
+#define ISC_DSPI_TCF           (29)            
+#define ISC_DSPI_TFFF          (30)            
+#define ISC_DSPI_EOQF          (31)            
+#define ISC_PSCn(x)            (35-(x))                
+#define ISC_COMM_TIM           (36)            
+#define ISC_SEC                        (37)            
+#define ISC_FEC1               (38)            
+#define ISC_FEC0               (39)            
+#define ISC_I2C                        (40)            
+#define ISC_PCI_ARB            (41)            
+#define ISC_PCI_CB             (42)            
+#define ISC_PCI_XLB            (43)            
+#define ISC_DMA                        (48)            
+#define ISC_CANn_ERR(x)                (49+(6*(x)))            
+#define ISC_CANn_BUSOFF(x)     (50+(6*(x)))            
+#define ISC_CANn_MBOR(x)       (51+(6*(x)))            
+#define ISC_CAN0_WAKEIN                (52)            
+#define ISC_SLTn(x)            (54-(x))                
+#define ISC_GPTn(x)            (62-(x))                
+
+/*
+ *     Interrupt level and priorities
+ */
+#define ILP_TOP                        (MCF_ICR_IL(5) | MCF_ICR_IP(3))
+#define ILP_SLT0               (MCF_ICR_IL(5) | MCF_ICR_IP(2))
+#define ILP_SLT1               (MCF_ICR_IL(5) | MCF_ICR_IP(1))
+#define ILP_DMA                        (MCF_ICR_IL(5) | MCF_ICR_IP(0))
+#define ILP_SEC                        (MCF_ICR_IL(4) | MCF_ICR_IP(7))
+#define ILP_FEC0               (MCF_ICR_IL(4) | MCF_ICR_IP(6))
+#define ILP_FEC1               (MCF_ICR_IL(4) | MCF_ICR_IP(5))
+#define ILP_PCI_XLB            (MCF_ICR_IL(4) | MCF_ICR_IP(4))
+#define ILP_PCI_ARB            (MCF_ICR_IL(4) | MCF_ICR_IP(3))
+#define ILP_PCI_CB             (MCF_ICR_IL(4) | MCF_ICR_IP(2))
+#define ILP_I2C                        (MCF_ICR_IL(4) | MCF_ICR_IP(1))
+
+#define ILP_USB_EPn(x)         (MCF_ICR_IL(3) | MCF_ICR_IP(7-(x)))
+#define ILP_USB_EP0            (MCF_ICR_IL(3) | MCF_ICR_IP(7))
+#define ILP_USB_EP1            (MCF_ICR_IL(3) | MCF_ICR_IP(6))
+#define ILP_USB_EP2            (MCF_ICR_IL(3) | MCF_ICR_IP(5))
+#define ILP_USB_EP3            (MCF_ICR_IL(3) | MCF_ICR_IP(4))
+#define ILP_USB_EP4            (MCF_ICR_IL(3) | MCF_ICR_IP(3))
+#define ILP_USB_EP5            (MCF_ICR_IL(3) | MCF_ICR_IP(2))
+#define ILP_USB_EP6            (MCF_ICR_IL(3) | MCF_ICR_IP(1))
+#define ILP_USB_ISR            (MCF_ICR_IL(3) | MCF_ICR_IP(0))
+
+#define ILP_USB_AISR           (MCF_ICR_IL(2) | MCF_ICR_IP(7))
+#define ILP_DSPI_OVRFW         (MCF_ICR_IL(2) | MCF_ICR_IP(6))
+#define ILP_DSPI_RFOF          (MCF_ICR_IL(2) | MCF_ICR_IP(5))
+#define ILP_DSPI_RFDF          (MCF_ICR_IL(2) | MCF_ICR_IP(4))
+#define ILP_DSPI_TFUF          (MCF_ICR_IL(2) | MCF_ICR_IP(3))
+#define ILP_DSPI_TCF           (MCF_ICR_IL(2) | MCF_ICR_IP(2))
+#define ILP_DSPI_TFFF          (MCF_ICR_IL(2) | MCF_ICR_IP(1))
+#define ILP_DSPI_EOQF          (MCF_ICR_IL(2) | MCF_ICR_IP(0))
+
+#define ILP_COMM_TIM           (MCF_ICR_IL(1) | MCF_ICR_IP(7))
+#define ILP_PSCn(x)            (MCF_ICR_IL(1) | MCF_ICR_IP(3-((x)&3)))
+#define ILP_PSC0               (MCF_ICR_IL(1) | MCF_ICR_IP(3))
+#define ILP_PSC1               (MCF_ICR_IL(1) | MCF_ICR_IP(2))
+#define ILP_PSC2               (MCF_ICR_IL(1) | MCF_ICR_IP(1))
+#define ILP_PSC3               (MCF_ICR_IL(1) | MCF_ICR_IP(0))
+
+
+
+
+
+/********************************************************************/
+
+/*
+ *      System Integration Unit Bitfields
+ */
+/* SBCR */
+#define MCF_SBCR_PIN2DSPI       (0x08000000)
+#define MCF_SBCR_DMA2CPU        (0x10000000)
+#define MCF_SBCR_CPU2DMA        (0x20000000)
+#define MCF_SBCR_PIN2DMA        (0x40000000)
+#define MCF_SBCR_PIN2CPU        (0x80000000)
+
+/* SECSACR */
+#define MCF_SECSACR_SEQEN       (0x00000001)
+
+/* RSR */
+#define MCF_RSR_RST             (0x00000001)
+#define MCF_RSR_RSTWD           (0x00000002)
+#define MCF_RSR_RSTJTG          (0x00000008)
+
+/* JTAGID */
+#define MCF_JTAGID_REV          (0xF0000000)
+#define MCF_JTAGID_PROCESSOR    (0x0FFFFFFF)
+#define MCF_JTAGID_MCF5485      (0x0800C01D)
+#define MCF_JTAGID_MCF5484      (0x0800D01D)
+#define MCF_JTAGID_MCF5483      (0x0800E01D)
+#define MCF_JTAGID_MCF5482      (0x0800F01D)
+#define MCF_JTAGID_MCF5481      (0x0801001D)
+#define MCF_JTAGID_MCF5480      (0x0801101D)
+#define MCF_JTAGID_MCF5475      (0x0801201D)
+#define MCF_JTAGID_MCF5474      (0x0801301D)
+#define MCF_JTAGID_MCF5473      (0x0801401D)
+#define MCF_JTAGID_MCF5472      (0x0801501D)
+#define MCF_JTAGID_MCF5471      (0x0801601D)
+#define MCF_JTAGID_MCF5470      (0x0801701D)
+
+
+/*
+ *      Interrupt Controller Bitfields
+ */
+#define MCF_IRLR_IRQ(x)         (((x)&0x7F)<<1)
+#define MCF_IACKLPR_PRI(x)      (((x)&0x0F)<<0)
+#define MCF_IACKLPR_LEVEL(x)    (((x)&0x07)<<4)
+#define MCF_ICR_IP(x)           (((x)&0x07)<<0)
+#define MCF_ICR_IL(x)           (((x)&0x07)<<3)
+
+/*
+ *      Slice Timers Bitfields
+ */
+#define MCF_SCR_TEN                    (0x01000000)
+#define MCF_SCR_IEN                    (0x02000000)
+#define MCF_SCR_RUN                    (0x04000000)
+#define MCF_SSR_ST                     (0x01000000)
+#define MCF_SSR_BE                     (0x02000000)
+
+
+/*
+ * Some needed coldfire registers
+ */
+#define MCF_PAR_PCIBG             MCF_REG16(0x000A48)
+#define MCF_PAR_PCIBR             MCF_REG16(0x000A4A)
+#define MCF_PAR_PSCn(x)            MCF_REG08(0x000A4F-((x)&0x3))
+#define MCF_PAR_FECI2CIRQ         MCF_REG16(0x000A44)
+#define MCF_PAR_DSPI               MCF_REG16(0x000A50)
+#define MCF_PAR_TIMER              MCF_REG08(0X000A52)
+#define MCF_EPPAR                 MCF_REG16(0x000F00)
+#define MCF_EPIER                 MCF_REG08(0x000F05)
+#define MCF_EPFR                          MCF_REG08(0x000F0C)
+
+/*
+ * Some GPIO bitfields
+ */
+#define MCF_PAR_SDA                    (0x0008)
+#define MCF_PAR_SCL                    (0x0004)
+#define MCF_PAR_PSC_TXD                (0x04)
+#define MCF_PAR_PSC_RXD                (0x08)
+#define MCF_PAR_PSC_RTS(x)             (((x)&0x03)<<4)
+#define MCF_PAR_PSC_CTS(x)             (((x)&0x03)<<6)
+#define MCF_PAR_PSC_CTS_GPIO           (0x00)
+#define MCF_PAR_PSC_CTS_BCLK           (0x80)
+#define MCF_PAR_PSC_CTS_CTS            (0xC0)
+#define MCF_PAR_PSC_RTS_GPIO           (0x00)
+#define MCF_PAR_PSC_RTS_FSYNC          (0x20)
+#define MCF_PAR_PSC_RTS_RTS            (0x30)
+#define MCF_PAR_PSC_CANRX              (0x40)
+
+
+/*
+ * Some used coldfire values
+ */
+#define MCF_EPIER_EPIE(x)          (0x01 << (x))
+#define MCF_EPPAR_EPPAx_FALLING    (2)
+#define MCF_EPPAR_EPPA(n,x)        (((x)&0x0003) << (2*n))
+
 #endif /* m548xsim_h */
index 415d5484916c4d9e6ec1981e380cfe9a0f1ab4de..69aef077c7c4f564d5f439db07ceea5fcc4d6327 100644 (file)
@@ -42,4 +42,11 @@ extern irqreturn_t arch_timer_interrupt(int irq, void *dummy);
 extern void config_BSP(char *command, int len);
 extern void do_IRQ(int irq, struct pt_regs *fp);
 
+#ifdef CONFIG_COLDFIRE
+extern void __init config_coldfire(void);
+extern void __init mmu_context_init(void);
+extern irq_handler_t mach_default_handler;
+extern void (*mach_tick)(void);
+#endif
+
 #endif /* _M68K_MACHDEP_H */
index 88089c72280ac21e53eaede950e1479a059938e0..0932ecf5538a748d7d80d319b01340e1231072f6 100644 (file)
@@ -3,7 +3,7 @@
 
 #include <asm/coldfire.h>
 #include <asm/page.h>
-#include <asm/mcf_tlbflush.h>
+#include <asm/tlbflush.h>
 #include <asm/cacheflush.h>
 
 extern inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
index e649803ee1a2619f2ebf9b0ab936b1fe5ca49af3..0b63fb5b3f6ecd134e6b73892c4505853494313a 100644 (file)
@@ -176,7 +176,7 @@ extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 
 #define pmd_set(pmdp, ptep) do {} while (0)
 
-extern inline void pgd_set(pgd_t *pgdp, pmd_t *pmdp)
+static inline void pgd_set(pgd_t *pgdp, pmd_t *pmdp)
 {
        pgd_val(*pgdp) = virt_to_phys(pmdp);
 }
index f98ffc2fe7c5d5f5567cbddd84eadfdb89e9cb46..f72ae67936cc67d4993e316bc5e1b2bfa2511472 100644 (file)
 #include <asm/m548xgpt.h>
 #endif
 
+/*
+ *     Define the base address of the SIM within the MBAR address space.
+ */
+#define        MCFSIM_BASE             0x0             /* Base address of SIM */
+
+/*
+ *     Bit definitions for the ICR family of registers.
+ */
+#define        MCFSIM_ICR_AUTOVEC      0x80            /* Auto-vectored intr */
+#define        MCFSIM_ICR_LEVEL0       0x00            /* Level 0 intr */
+#define        MCFSIM_ICR_LEVEL1       0x04            /* Level 1 intr */
+#define        MCFSIM_ICR_LEVEL2       0x08            /* Level 2 intr */
+#define        MCFSIM_ICR_LEVEL3       0x0c            /* Level 3 intr */
+#define        MCFSIM_ICR_LEVEL4       0x10            /* Level 4 intr */
+#define        MCFSIM_ICR_LEVEL5       0x14            /* Level 5 intr */
+#define        MCFSIM_ICR_LEVEL6       0x18            /* Level 6 intr */
+#define        MCFSIM_ICR_LEVEL7       0x1c            /* Level 7 intr */
+
+#define        MCFSIM_ICR_PRI0         0x00            /* Priority 0 intr */
+#define        MCFSIM_ICR_PRI1         0x01            /* Priority 1 intr */
+#define        MCFSIM_ICR_PRI2         0x02            /* Priority 2 intr */
+#define        MCFSIM_ICR_PRI3         0x03            /* Priority 3 intr */
+
+/*
+ *     Bit definitions for the Interrupt Mask register (IMR).
+ */
+#define        MCFSIM_IMR_EINT1        0x0002          /* External intr # 1 */
+#define        MCFSIM_IMR_EINT2        0x0004          /* External intr # 2 */
+#define        MCFSIM_IMR_EINT3        0x0008          /* External intr # 3 */
+#define        MCFSIM_IMR_EINT4        0x0010          /* External intr # 4 */
+#define        MCFSIM_IMR_EINT5        0x0020          /* External intr # 5 */
+#define        MCFSIM_IMR_EINT6        0x0040          /* External intr # 6 */
+#define        MCFSIM_IMR_EINT7        0x0080          /* External intr # 7 */
+
+#define        MCFSIM_IMR_SWD          0x0100          /* Software Watchdog intr */
+#define        MCFSIM_IMR_TIMER1       0x0200          /* TIMER 1 intr */
+#define        MCFSIM_IMR_TIMER2       0x0400          /* TIMER 2 intr */
+#define MCFSIM_IMR_MBUS                0x0800          /* MBUS intr    */
+#define        MCFSIM_IMR_UART1        0x1000          /* UART 1 intr */
+#define        MCFSIM_IMR_UART2        0x2000          /* UART 2 intr */
+
+/*
+ *     Mask for all of the SIM devices. Some parts have more or less
+ *     SIM devices. This is a catchall for the sandard set.
+ */
+#ifndef MCFSIM_IMR_MASKALL
+#define        MCFSIM_IMR_MASKALL      0x3ffe          /* All intr sources */
+#endif
+
+
+/*
+ *     PIT interrupt settings, if not found in mXXXXsim.h file.
+ */
+#ifndef        ICR_INTRCONF
+#define        ICR_INTRCONF            0x2b            /* PIT1 level 5, priority 3 */
+#endif
+#ifndef        MCFPIT_IMR
+#define        MCFPIT_IMR              MCFINTC_IMRH
+#endif
+#ifndef        MCFPIT_IMR_IBIT
+#define        MCFPIT_IMR_IBIT         (1 << (MCFINT_PIT1 - 32))
+#endif
+
+
+#ifndef __ASSEMBLY__
+/*
+ *     Definition for the interrupt auto-vectoring support.
+ */
+extern void    mcf_autovector(unsigned int vec);
+#endif /* __ASSEMBLY__ */
+
 /****************************************************************************/
 #endif /* mcfsim_h */
index 3ef8f88c9f899f733b02e7c34ea62e8a04c99b94..db551a8cdd0be45657ec325dd52134b63ed18dc0 100644 (file)
@@ -8,7 +8,7 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
 }
 
 #ifdef CONFIG_MMU
-#ifndef CONFIG_SUN3 && !defined(CONFIG_COLDFIRE)
+#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
 
 #include <asm/setup.h>
 #include <asm/page.h>
@@ -229,7 +229,7 @@ static inline void activate_mm(struct mm_struct *active_mm,
 #define deactivate_mm(tsk, mm) do { } while (0)
 
 extern void mmu_context_init(void);
-#if defined(CONFIG_M547X_8X)
+#ifdef CONFIG_M547X_8X
 #define prepare_arch_switch(next) load_ksp_mmu(next)
 
 //FIXME: Don't use TLB here for kernel stacks
@@ -307,6 +307,8 @@ end:
 
 #endif /* CONFIG_M547X_8X */
 
+#endif /* CONFIG_COLDFIRE */
+
 #else /* !CONFIG_MMU */
 
 static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
index c294aad8a9000bd9d891d5a0a35b9987e5ed9c6b..bfa4295375427f5afaca1f7532f98b86ac7697ff 100644 (file)
@@ -9,6 +9,8 @@
 #include <asm/virtconvert.h>
 #ifdef CONFIG_SUN3
 #include <asm/sun3_pgalloc.h>
+#elif defined CONFIG_COLDFIRE
+#include <asm/mcf_pgalloc.h>
 #else
 #include <asm/motorola_pgalloc.h>
 #endif
index c0c51463303c3db3825d79fcb3e596b859fc0f70..82878af9f45e706c34d11677a171366592fc8c04 100644 (file)
@@ -136,7 +136,7 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
 
 #ifdef CONFIG_SUN3
 #include <asm/sun3_pgtable.h>
-#elif CONFIG_COLDFIRE
+#elif defined(CONFIG_COLDFIRE)
 #include <asm/mcf_pgtable.h>
 #else
 #include <asm/motorola_pgtable.h>
index bed5b186ac6a3b510b3954a00087ac356fdd900b..951ce266173ab67aef8bef24039289e84bee0d89 100644 (file)
@@ -40,6 +40,7 @@
 #define MACH_HP300    9
 #define MACH_Q40     10
 #define MACH_SUN3X   11
+#define MACH_CFMMU   12
 
 #define COMMAND_LINE_SIZE 256
 
index 25275377aee638a96a0d963ff65ffe4840bd2027..f728e069f097f783031b7809d676901d1cc773d7 100644 (file)
@@ -151,6 +151,7 @@ typedef struct sigaltstack {
 #include <asm/sigcontext.h>
 
 #if !defined(__uClinux__)
+#ifndef CONFIG_COLDFIRE
 #define __HAVE_ARCH_SIG_BITOPS
 
 static inline void sigaddset(sigset_t *set, int _sig)
@@ -206,6 +207,7 @@ extern void ptrace_signal_deliver(struct pt_regs *regs, void *cookie);
 
 #undef __HAVE_ARCH_SIG_BITOPS
 #define ptrace_signal_deliver(regs, cookie) do { } while (0)
+#endif /* CONFIG_COLDFIRE */
 
 #endif /* __uClinux__ */
 #endif /* __KERNEL__ */
index a6b4ed4fc90faf9acdb31262fca3623f1f09cc40..a06262bd2dcb11dc304086e591460ba7992f982a 100644 (file)
@@ -2,7 +2,7 @@
 #define _M68K_TLBFLUSH_H
 
 #ifdef CONFIG_MMU
-#ifndef CONFIG_SUN3
+#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
 
 #include <asm/current.h>
 
@@ -92,7 +92,7 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
        flush_tlb_all();
 }
 
-#else
+#elif defined(CONFIG_SUN3)
 
 
 /* Reserved PMEGs. */
@@ -214,6 +214,8 @@ static inline void flush_tlb_kernel_page (unsigned long addr)
        sun3_put_segmap (addr & ~(SUN3_PMEG_SIZE - 1), SUN3_INVALID_PMEG);
 }
 
+#else /* CONFIG_COLDFIRE */
+#include <asm/mcf_tlbflush.h>
 #endif
 
 #else /* !CONFIG_MMU */
@@ -262,6 +264,7 @@ static inline void flush_tlb_kernel_page(unsigned long addr)
        BUG();
 }
 
+
 #endif /* CONFIG_MMU */
 
 #endif /* _M68K_TLBFLUSH_H */
index 38f92dbb9a45003dc0ccd802d4d29a2ac6b11de1..acd8096d5210502b36d1e9221516eced4b3c6044 100644 (file)
@@ -1,5 +1,7 @@
 #ifdef __uClinux__
 #include "uaccess_no.h"
+#elif defined(CONFIG_COLDFIRE)
+#include "uaccess_coldfire.h"
 #else
 #include "uaccess_mm.h"
 #endif
diff --git a/arch/m68k/include/asm/uaccess_coldfire.h b/arch/m68k/include/asm/uaccess_coldfire.h
new file mode 100644 (file)
index 0000000..7bea1f5
--- /dev/null
@@ -0,0 +1,376 @@
+#ifndef __M68K_UACCESS_H
+#define __M68K_UACCESS_H
+
+/*
+ * User space memory access functions
+ */
+
+/* The "moves" command is not available in the CF instruction set. */
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <asm/segment.h>
+
+#define VERIFY_READ    0
+#define VERIFY_WRITE   1
+
+/* We let the MMU do all checking */
+#define access_ok(type, addr, size) 1
+
+/*
+ * The exception table consists of pairs of addresses: the first is the
+ * address of an instruction that is allowed to fault, and the second is
+ * the address at which the program should continue.  No registers are
+ * modified, so it is entirely up to the continuation code to figure out
+ * what to do.
+ *
+ * All the routines below use bits of fixup code that are out of line
+ * with the main instruction path.  This means when everything is well,
+ * we don't even have to jump over them.  Further, they do not intrude
+ * on our cache or tlb entries.
+ */
+
+struct exception_table_entry
+{
+       unsigned long insn, fixup;
+};
+
+extern int __put_user_bad(void);
+extern int __get_user_bad(void);
+
+#define __put_user_asm(res, x, ptr, bwl, reg, err)     \
+asm volatile ("\n"                                     \
+       "1:     move."#bwl"     %2,%1\n"                \
+       "2:\n"                                          \
+       "       .section .fixup,\"ax\"\n"               \
+       "       .even\n"                                \
+       "10:    moveq.l %3,%0\n"                        \
+       "       jra 2b\n"                               \
+       "       .previous\n"                            \
+       "\n"                                            \
+       "       .section __ex_table,\"a\"\n"            \
+       "       .align  4\n"                            \
+       "       .long   1b,10b\n"                       \
+       "       .long   2b,10b\n"                       \
+       "       .previous"                              \
+       : "+d" (res), "=m" (*(ptr))                     \
+       : #reg (x), "i" (err))
+
+/*
+ * These are the main single-value transfer routines.  They automatically
+ * use the right size if we just have the right pointer type.
+ */
+
+#define __put_user(x, ptr)                                             \
+({                                                                     \
+       typeof(*(ptr)) __pu_val = (x);                                  \
+       int __pu_err = 0;                                               \
+       __chk_user_ptr(ptr);                                            \
+       switch (sizeof (*(ptr))) {                                      \
+       case 1:                                                         \
+               __put_user_asm(__pu_err, __pu_val, ptr, b, d, -EFAULT); \
+               break;                                                  \
+       case 2:                                                         \
+               __put_user_asm(__pu_err, __pu_val, ptr, w, d, -EFAULT); \
+               break;                                                  \
+       case 4:                                                         \
+               __put_user_asm(__pu_err, __pu_val, ptr, l, r, -EFAULT); \
+               break;                                                  \
+       case 8:                                                         \
+               {                                                       \
+               const void __user *__pu_ptr = (ptr);                    \
+               asm volatile ("\n"                                      \
+                       "1:     move.l  %2,(%1)+\n"                     \
+                       "2:     move.l  %R2,(%1)\n"                     \
+                       "3:\n"                                          \
+                       "       .section .fixup,\"ax\"\n"               \
+                       "       .even\n"                                \
+                       "10:    movel %3,%0\n"                          \
+                       "       jra 3b\n"                               \
+                       "       .previous\n"                            \
+                       "\n"                                            \
+                       "       .section __ex_table,\"a\"\n"            \
+                       "       .align 4\n"                             \
+                       "       .long 1b,10b\n"                         \
+                       "       .long 2b,10b\n"                         \
+                       "       .long 3b,10b\n"                         \
+                       "       .previous"                              \
+                       : "+d" (__pu_err), "+a" (__pu_ptr)              \
+                       : "r" (__pu_val), "i" (-EFAULT)                 \
+                       : "memory");                                    \
+               break;                                                  \
+           }                                                           \
+       default:                                                        \
+               __pu_err = __put_user_bad();                            \
+               break;                                                  \
+       }                                                               \
+       __pu_err;                                                       \
+})
+#define put_user(x, ptr)       __put_user(x, ptr)
+
+
+#define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({    \
+       type __gu_val;                                          \
+       asm volatile ("\n"                                      \
+               "1:     move."#bwl"     %2,%1\n"                \
+               "2:\n"                                          \
+               "       .section .fixup,\"ax\"\n"               \
+               "       .even\n"                                \
+               "10:    move.l  %3,%0\n"                        \
+               "       subl    %1,%1\n"                        \
+               "       jra     2b\n"                           \
+               "       .previous\n"                            \
+               "\n"                                            \
+               "       .section __ex_table,\"a\"\n"            \
+               "       .align  4\n"                            \
+               "       .long   1b,10b\n"                       \
+               "       .previous"                              \
+               : "+d" (res), "=&" #reg (__gu_val)              \
+               : "m" (*(ptr)), "i" (err));                     \
+       (x) = (typeof(*(ptr)))(unsigned long)__gu_val;          \
+})
+
+#define __get_user(x, ptr)                                             \
+({                                                                     \
+       int __gu_err = 0;                                               \
+       __chk_user_ptr(ptr);                                            \
+       switch (sizeof(*(ptr))) {                                       \
+       case 1:                                                         \
+               __get_user_asm(__gu_err, x, ptr, u8, b, d, -EFAULT);    \
+               break;                                                  \
+       case 2:                                                         \
+               __get_user_asm(__gu_err, x, ptr, u16, w, d, -EFAULT);   \
+               break;                                                  \
+       case 4:                                                         \
+               __get_user_asm(__gu_err, x, ptr, u32, l, r, -EFAULT);   \
+               break;                                                  \
+/*     case 8: disabled because gcc-4.1 has a broken typeof            \
+               {                                                       \
+               const void *__gu_ptr = (ptr);                           \
+               u64 __gu_val;                                           \
+               asm volatile ("\n"                                      \
+                       "1:     move.l  (%2)+,%1\n"                     \
+                       "2:     move.l  (%2),%R1\n"                     \
+                       "3:\n"                                          \
+                       "       .section .fixup,\"ax\"\n"               \
+                       "       .even\n"                                \
+                       "10:    move.l  %3,%0\n"                        \
+                       "       subl    %1,%1\n"                        \
+                       "       subl    %R1,%R1\n"                      \
+                       "       jra     3b\n"                           \
+                       "       .previous\n"                            \
+                       "\n"                                            \
+                       "       .section __ex_table,\"a\"\n"            \
+                       "       .align  4\n"                            \
+                       "       .long   1b,10b\n"                       \
+                       "       .long   2b,10b\n"                       \
+                       "       .previous"                              \
+                       : "+d" (__gu_err), "=&r" (__gu_val),            \
+                         "+a" (__gu_ptr)                               \
+                       : "i" (-EFAULT)                                 \
+                       : "memory");                                    \
+               (x) = (typeof(*(ptr)))__gu_val;                         \
+               break;                                                  \
+           }   */                                                      \
+       default:                                                        \
+               __gu_err = __get_user_bad();                            \
+               break;                                                  \
+       }                                                               \
+       __gu_err;                                                       \
+})
+#define get_user(x, ptr) __get_user(x, ptr)
+
+unsigned long __generic_copy_from_user(void *to, const void __user *from,
+       unsigned long n);
+unsigned long __generic_copy_to_user(void __user *to, const void *from,
+       unsigned long n);
+
+#define __constant_copy_from_user_asm(res, to, from, tmp, n, s1, s2, s3)\
+       asm volatile ("\n"                                              \
+               "1:     move."#s1"      (%2)+,%3\n"                     \
+               "       move."#s1"      %3,(%1)+\n"                     \
+               "2:     move."#s2"      (%2)+,%3\n"                     \
+               "       move."#s2"      %3,(%1)+\n"                     \
+               "       .ifnc   \""#s3"\",\"\"\n"                       \
+               "3:     move."#s3"      (%2)+,%3\n"                     \
+               "       move."#s3"      %3,(%1)+\n"                     \
+               "       .endif\n"                                       \
+               "4:\n"                                                  \
+               "       .section __ex_table,\"a\"\n"                    \
+               "       .align  4\n"                                    \
+               "       .long   1b,10f\n"                               \
+               "       .long   2b,20f\n"                               \
+               "       .ifnc   \""#s3"\",\"\"\n"                       \
+               "       .long   3b,30f\n"                               \
+               "       .endif\n"                                       \
+               "       .previous\n"                                    \
+               "\n"                                                    \
+               "       .section .fixup,\"ax\"\n"                       \
+               "       .even\n"                                        \
+               "10:    clr."#s1"       (%1)+\n"                        \
+               "20:    clr."#s2"       (%1)+\n"                        \
+               "       .ifnc   \""#s3"\",\"\"\n"                       \
+               "30:    clr."#s3"       (%1)+\n"                        \
+               "       .endif\n"                                       \
+               "       moveq.l #"#n",%0\n"                             \
+               "       jra     4b\n"                                   \
+               "       .previous\n"                                    \
+               : "+d" (res), "+&a" (to), "+a" (from), "=&d" (tmp)      \
+               : : "memory")
+
+static __always_inline unsigned long
+__constant_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+       unsigned long res = 0, tmp;
+
+       switch (n) {
+       case 1:
+               __get_user_asm(res, *(u8 *)to, (u8 __user *)from, u8, b, d, 1);
+               break;
+       case 2:
+               __get_user_asm(res, *(u16 *)to, (u16 __user *)from, u16, w,
+                       d, 2);
+               break;
+       case 3:
+               __constant_copy_from_user_asm(res, to, from, tmp, 3, w, b,);
+               break;
+       case 4:
+               __get_user_asm(res, *(u32 *)to, (u32 __user *)from, u32, l,
+                       r, 4);
+               break;
+       case 5:
+               __constant_copy_from_user_asm(res, to, from, tmp, 5, l, b,);
+               break;
+       case 6:
+               __constant_copy_from_user_asm(res, to, from, tmp, 6, l, w,);
+               break;
+       case 7:
+               __constant_copy_from_user_asm(res, to, from, tmp, 7, l, w, b);
+               break;
+       case 8:
+               __constant_copy_from_user_asm(res, to, from, tmp, 8, l, l,);
+               break;
+       case 9:
+               __constant_copy_from_user_asm(res, to, from, tmp, 9, l, l, b);
+               break;
+       case 10:
+               __constant_copy_from_user_asm(res, to, from, tmp, 10, l, l, w);
+               break;
+       case 12:
+               __constant_copy_from_user_asm(res, to, from, tmp, 12, l, l, l);
+               break;
+       default:
+               /* we limit the inlined version to 3 moves */
+               return __generic_copy_from_user(to, from, n);
+       }
+
+       return res;
+}
+
+#define __constant_copy_to_user_asm(res, to, from, tmp, n, s1, s2, s3) \
+       asm volatile ("\n"                                              \
+               "       move."#s1"      (%2)+,%3\n"                     \
+               "11:    move."#s1"      %3,(%1)+\n"                     \
+               "12:    move."#s2"      (%2)+,%3\n"                     \
+               "21:    move."#s2"      %3,(%1)+\n"                     \
+               "22:\n"                                                 \
+               "       .ifnc   \""#s3"\",\"\"\n"                       \
+               "       move."#s3"      (%2)+,%3\n"                     \
+               "31:    move."#s3"      %3,(%1)+\n"                     \
+               "32:\n"                                                 \
+               "       .endif\n"                                       \
+               "4:\n"                                                  \
+               "\n"                                                    \
+               "       .section __ex_table,\"a\"\n"                    \
+               "       .align  4\n"                                    \
+               "       .long   11b,5f\n"                               \
+               "       .long   12b,5f\n"                               \
+               "       .long   21b,5f\n"                               \
+               "       .long   22b,5f\n"                               \
+               "       .ifnc   \""#s3"\",\"\"\n"                       \
+               "       .long   31b,5f\n"                               \
+               "       .long   32b,5f\n"                               \
+               "       .endif\n"                                       \
+               "       .previous\n"                                    \
+               "\n"                                                    \
+               "       .section .fixup,\"ax\"\n"                       \
+               "       .even\n"                                        \
+               "5:     moveq.l #"#n",%0\n"                             \
+               "       jra     4b\n"                                   \
+               "       .previous\n"                                    \
+               : "+d" (res), "+a" (to), "+a" (from), "=&d" (tmp)       \
+               : : "memory")
+
+static __always_inline unsigned long
+__constant_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+       unsigned long res = 0, tmp;
+
+       switch (n) {
+       case 1:
+               __put_user_asm(res, *(u8 *)from, (u8 __user *)to, b, d, 1);
+               break;
+       case 2:
+               __put_user_asm(res, *(u16 *)from, (u16 __user *)to, w, d, 2);
+               break;
+       case 3:
+               __constant_copy_to_user_asm(res, to, from, tmp, 3, w, b,);
+               break;
+       case 4:
+               __put_user_asm(res, *(u32 *)from, (u32 __user *)to, l, r, 4);
+               break;
+       case 5:
+               __constant_copy_to_user_asm(res, to, from, tmp, 5, l, b,);
+               break;
+       case 6:
+               __constant_copy_to_user_asm(res, to, from, tmp, 6, l, w,);
+               break;
+       case 7:
+               __constant_copy_to_user_asm(res, to, from, tmp, 7, l, w, b);
+               break;
+       case 8:
+               __constant_copy_to_user_asm(res, to, from, tmp, 8, l, l,);
+               break;
+       case 9:
+               __constant_copy_to_user_asm(res, to, from, tmp, 9, l, l, b);
+               break;
+       case 10:
+               __constant_copy_to_user_asm(res, to, from, tmp, 10, l, l, w);
+               break;
+       case 12:
+               __constant_copy_to_user_asm(res, to, from, tmp, 12, l, l, l);
+               break;
+       default:
+               /* limit the inlined version to 3 moves */
+               return __generic_copy_to_user(to, from, n);
+       }
+
+       return res;
+}
+
+#define __copy_from_user(to, from, n)          \
+(__builtin_constant_p(n) ?                     \
+ __constant_copy_from_user(to, from, n) :      \
+ __generic_copy_from_user(to, from, n))
+
+#define __copy_to_user(to, from, n)            \
+(__builtin_constant_p(n) ?                     \
+ __constant_copy_to_user(to, from, n) :                \
+ __generic_copy_to_user(to, from, n))
+
+#define __copy_to_user_inatomic                __copy_to_user
+#define __copy_from_user_inatomic      __copy_from_user
+
+#define copy_from_user(to, from, n)    __copy_from_user(to, from, n)
+#define copy_to_user(to, from, n)      __copy_to_user(to, from, n)
+
+long strncpy_from_user(char *dst, const char __user *src, long count);
+long strnlen_user(const char __user *src, long n);
+unsigned long __clear_user(void __user *to, unsigned long n);
+
+#define clear_user __clear_user
+
+#define strlen_user(str) strnlen_user(str, 32767)
+
+#endif /* _M68K_UACCESS_H */
index 20692e3f4fb26a412a824866fdc42e574e584e30..7107f3fbdbb66885bd000ec206648b5013bbcaf9 100644 (file)
 #define VERIFY_READ    0
 #define VERIFY_WRITE   1
 
-/* Coldifre doesn't have moves instruction; use move. */
-#ifdef CONFIG_COLDFIRE
-#define moves move
-#endif
-
 /* We let the MMU do all checking */
 static inline int access_ok(int type, const void __user *addr,
                            unsigned long size)
index f35229b8651dcff3744b7b55010f53b615d70678..3fdd894cf8b3905ad2fd14a5290034fc77e75e22 100644 (file)
@@ -15,6 +15,7 @@
 /*
  * Change virtual addresses to physical addresses and vv.
  */
 static inline unsigned long virt_to_phys(void *address)
 {
        return __pa(address);
index 9e1b0e92dcce45a02169202941d600288bcac939..1abe3412c7f61adc528f075847b8ba2bdcb0087b 100644 (file)
@@ -131,6 +131,10 @@ extern int hp300_parse_bootinfo(const struct bi_record *);
 extern int apollo_parse_bootinfo(const struct bi_record *);
 extern int coldfire_parse_bootinfo(const struct bi_record *);
 
+#ifdef CONFIG_COLDFIRE
+void coldfire_sort_memrec(void);
+#endif
+
 extern void config_amiga(void);
 extern void config_atari(void);
 extern void config_mac(void);
@@ -209,7 +213,7 @@ static void __init m68k_parse_bootinfo(const struct bi_record *record)
                                unknown = apollo_parse_bootinfo(record);
                        else if (MACH_IS_COLDFIRE)
                                unknown = coldfire_parse_bootinfo(record);
-                       else if
+                       else 
                                unknown = 1;
                }
                if (unknown)
@@ -342,7 +346,7 @@ void __init setup_arch(char **cmdline_p)
                break;
 #endif
 #ifdef CONFIG_COLDFIRE
-       case MACH_COLDFIRE:
+       case MACH_CFMMU:
                config_coldfire();
                break;
 #endif
@@ -408,6 +412,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
 #define LOOP_CYCLES_68030      (8)
 #define LOOP_CYCLES_68040      (3)
 #define LOOP_CYCLES_68060      (1)
+#define LOOP_CYCLES_COLDFIRE   (2)
 
        if (CPU_IS_020) {
                cpu = "68020";
index ad75780b31066b6ed8d6e27637c7f5d04443db6d..06f18d2029f3d0b2140551523b9134db9eabe0e0 100644 (file)
@@ -459,7 +459,8 @@ sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
        else
                flush_dcache(); 
        
-       ret = 0;        
+       ret = 0; 
+#endif 
 out:
        return ret;
 }
index 311f265ae1b3a69ae9ccc0180a619098a6b1e5f1..629e7a2cab57b46935f1c3cc99ddf6e03cd71f8b 100644 (file)
@@ -30,7 +30,7 @@
 #include <asm/machdep.h>
 #include <asm/io.h>
 #include <asm/mmu_context.h>
-#include <asm/mcf_pgalloc.h>
+#include <asm/pgalloc.h>
 
 #include <asm/coldfire.h>
 #include <asm/tlbflush.h>
index 34c77ce24fba5b54a46c63449620259a2a9fdd93..f47b635d6af051b38db8f9a960b9f91223b17531 100644 (file)
@@ -127,6 +127,7 @@ int free_pointer_table (pmd_t *ptable)
        return 0;
 }
 
+#ifndef CONFIG_COLDFIRE 
 /* invalidate page in both caches */
 static inline void clear040(unsigned long paddr)
 {
@@ -293,3 +294,20 @@ void cache_push (unsigned long paddr, int len)
 }
 EXPORT_SYMBOL(cache_push);
 
+#else
+
+//Not the best idea ...
+void cache_clear (unsigned long paddr, int len)
+{
+       flush_bcache(); 
+}
+EXPORT_SYMBOL(cache_clear);
+
+void cache_push (unsigned long paddr, int len)
+{
+       flush_bcache(); 
+}
+EXPORT_SYMBOL(cache_push);
+
+#endif /* CONFIG_COLDFIRE */ 
+