]> rtime.felk.cvut.cz Git - zynq/linux.git/commitdiff
Merge tag 'csky-for-linus-4.21' of git://github.com/c-sky/csky-linux
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 5 Jan 2019 17:50:07 +0000 (09:50 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 5 Jan 2019 17:50:07 +0000 (09:50 -0800)
Pull arch/csky updates from Guo Ren:
 "Here are three main features (cpu_hotplug, basic ftrace, basic perf)
  and some bugfixes:

  Features:
   - Add CPU-hotplug support for SMP
   - Add ftrace with function trace and function graph trace
   - Add Perf support
   - Add EM_CSKY_OLD 39
   - optimize kernel panic print.
   - remove syscall_exit_work

  Bugfixes:
   - fix abiv2 mmap(... O_SYNC) failure
   - fix gdb coredump error
   - remove vdsp implement for kernel
   - fix qemu failure to bootup sometimes
   - fix ftrace call-graph panic
   - fix device tree node reference leak
   - remove meaningless header-y
   - fix save hi,lo,dspcr regs in switch_stack
   - remove unused members in processor.h"

* tag 'csky-for-linus-4.21' of git://github.com/c-sky/csky-linux:
  csky: Add perf support for C-SKY
  csky: Add EM_CSKY_OLD 39
  clocksource/drivers/c-sky: fixup ftrace call-graph panic
  csky: ftrace call graph supported.
  csky: basic ftrace supported
  csky: remove unused members in processor.h
  csky: optimize kernel panic print.
  csky: stacktrace supported.
  csky: CPU-hotplug supported for SMP
  clocksource/drivers/c-sky: fixup qemu fail to bootup sometimes.
  csky: fixup save hi,lo,dspcr regs in switch_stack.
  csky: remove syscall_exit_work
  csky: fixup remove vdsp implement for kernel.
  csky: bugfix gdb coredump error.
  csky: fixup abiv2 mmap(... O_SYNC) failed.
  csky: define syscall_get_arch()
  elf-em.h: add EM_CSKY
  csky: remove meaningless header-y
  csky: Don't leak device tree node reference

36 files changed:
arch/csky/Kconfig
arch/csky/Makefile
arch/csky/abiv1/inc/abi/pgtable-bits.h
arch/csky/abiv1/inc/abi/switch_context.h [new file with mode: 0644]
arch/csky/abiv2/Makefile
arch/csky/abiv2/inc/abi/entry.h
arch/csky/abiv2/inc/abi/pgtable-bits.h
arch/csky/abiv2/inc/abi/switch_context.h [new file with mode: 0644]
arch/csky/abiv2/mcount.S [new file with mode: 0644]
arch/csky/abiv2/memcpy.S
arch/csky/include/asm/elf.h
arch/csky/include/asm/ftrace.h [new file with mode: 0644]
arch/csky/include/asm/perf_event.h [new file with mode: 0644]
arch/csky/include/asm/processor.h
arch/csky/include/asm/smp.h
arch/csky/include/asm/syscall.h
arch/csky/include/asm/thread_info.h
arch/csky/include/uapi/asm/Kbuild
arch/csky/include/uapi/asm/ptrace.h
arch/csky/kernel/Makefile
arch/csky/kernel/asm-offsets.c
arch/csky/kernel/dumpstack.c
arch/csky/kernel/entry.S
arch/csky/kernel/ftrace.c [new file with mode: 0644]
arch/csky/kernel/perf_event.c [new file with mode: 0644]
arch/csky/kernel/process.c
arch/csky/kernel/ptrace.c
arch/csky/kernel/signal.c
arch/csky/kernel/smp.c
arch/csky/kernel/stacktrace.c [new file with mode: 0644]
arch/csky/kernel/traps.c
arch/csky/mm/fault.c
arch/csky/mm/ioremap.c
drivers/clocksource/timer-mp-csky.c
include/uapi/linux/audit.h
include/uapi/linux/elf-em.h

index 37bed8aadf95b395f72b9e5c6550593e68e1c170..398113c845f56c0d14a3d367af4d515b1586f7ad 100644 (file)
@@ -28,10 +28,13 @@ config CSKY
        select GENERIC_SCHED_CLOCK
        select GENERIC_SMP_IDLE_THREAD
        select HAVE_ARCH_TRACEHOOK
+       select HAVE_FUNCTION_TRACER
+       select HAVE_FUNCTION_GRAPH_TRACER
        select HAVE_GENERIC_DMA_COHERENT
        select HAVE_KERNEL_GZIP
        select HAVE_KERNEL_LZO
        select HAVE_KERNEL_LZMA
+       select HAVE_PERF_EVENTS
        select HAVE_C_RECORDMCOUNT
        select HAVE_DMA_API_DEBUG
        select HAVE_DMA_CONTIGUOUS
@@ -40,7 +43,7 @@ config CSKY
        select OF
        select OF_EARLY_FLATTREE
        select OF_RESERVED_MEM
-       select PERF_USE_VMALLOC
+       select PERF_USE_VMALLOC if CPU_CK610
        select RTC_LIB
        select TIMER_OF
        select USB_ARCH_HAS_EHCI
@@ -93,6 +96,9 @@ config MMU
 config RWSEM_GENERIC_SPINLOCK
        def_bool y
 
+config STACKTRACE_SUPPORT
+       def_bool y
+
 config TIME_LOW_RES
        def_bool y
 
@@ -144,6 +150,19 @@ config CPU_CK860
        select CPU_HAS_FPUV2
 endchoice
 
+choice
+       prompt "C-SKY PMU type"
+       depends on PERF_EVENTS
+       depends on CPU_CK807 || CPU_CK810 || CPU_CK860
+
+config CPU_PMU_NONE
+       bool "None"
+
+config CSKY_PMU_V1
+       bool "Performance Monitoring Unit Ver.1"
+
+endchoice
+
 choice
        prompt "Power Manager Instruction (wait/doze/stop)"
        default CPU_PM_NONE
@@ -197,6 +216,15 @@ config RAM_BASE
        hex "DRAM start addr (the same with memory-section in dts)"
        default 0x0
 
+config HOTPLUG_CPU
+       bool "Support for hot-pluggable CPUs"
+       select GENERIC_IRQ_MIGRATION
+       depends on SMP
+       help
+         Say Y here to allow turning CPUs off and on. CPUs can be
+         controlled through /sys/devices/system/cpu/cpu1/hotplug/target.
+
+         Say N if you want to disable CPU hotplug.
 endmenu
 
 source "kernel/Kconfig.hz"
index c639fc167895d7a2f00909bf079e5ea2e6b0558c..3607a6e8f66cbd7883caf995589d8d51d9245738 100644 (file)
@@ -47,6 +47,10 @@ ifeq ($(CSKYABI),abiv2)
 KBUILD_CFLAGS += -mno-stack-size
 endif
 
+ifdef CONFIG_STACKTRACE
+KBUILD_CFLAGS += -mbacktrace
+endif
+
 abidirs := $(patsubst %,arch/csky/%/,$(CSKYABI))
 KBUILD_CFLAGS += $(patsubst %,-I$(srctree)/%inc,$(abidirs))
 
index 455075b5db0da5391a8d50b7d1d18409b613ab11..d605445aad9ad4362f51d3ad41f47707c1946a07 100644 (file)
@@ -26,6 +26,7 @@
 
 #define _PAGE_CACHE            (3<<9)
 #define _PAGE_UNCACHE          (2<<9)
+#define _PAGE_SO               _PAGE_UNCACHE
 
 #define _CACHE_MASK            (7<<9)
 
diff --git a/arch/csky/abiv1/inc/abi/switch_context.h b/arch/csky/abiv1/inc/abi/switch_context.h
new file mode 100644 (file)
index 0000000..17c8268
--- /dev/null
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ABI_CSKY_PTRACE_H
+#define __ABI_CSKY_PTRACE_H
+
+struct switch_stack {
+       unsigned long r8;
+       unsigned long r9;
+       unsigned long r10;
+       unsigned long r11;
+       unsigned long r12;
+       unsigned long r13;
+       unsigned long r14;
+       unsigned long r15;
+};
+#endif /* __ABI_CSKY_PTRACE_H */
index 069ca7276b99ac1a13c168d835d5db2be0c204ad..b1d44f6fbcbdd90841bb5fffaca498476ead8f5e 100644 (file)
@@ -8,3 +8,4 @@ obj-y                           += strcmp.o
 obj-y                          += strcpy.o
 obj-y                          += strlen.o
 obj-y                          += strksyms.o
+obj-$(CONFIG_FUNCTION_TRACER)  += mcount.o
index acd05214d4e3adabcc406c4d28d7f059b945705d..edc5cc04c4de59621121c4031ba68d9ebcb92af9 100644 (file)
@@ -57,6 +57,8 @@
        stw     lr, (sp, 60)
        mflo    lr
        stw     lr, (sp, 64)
+       mfcr    lr, cr14
+       stw     lr, (sp, 68)
 #endif
        subi    sp, 80
 .endm
@@ -77,6 +79,8 @@
        mthi    a0
        ldw     a0, (sp, 144)
        mtlo    a0
+       ldw     a0, (sp, 148)
+       mtcr    a0, cr14
 #endif
 
        ldw     a0, (sp, 24)
@@ -93,9 +97,9 @@
 .endm
 
 .macro SAVE_SWITCH_STACK
-       subi    sp, 64
+       subi    sp, 64
        stm     r4-r11, (sp)
-       stw     r15, (sp, 32)
+       stw     lr,  (sp, 32)
        stw     r16, (sp, 36)
        stw     r17, (sp, 40)
        stw     r26, (sp, 44)
        stw     r28, (sp, 52)
        stw     r29, (sp, 56)
        stw     r30, (sp, 60)
+#ifdef CONFIG_CPU_HAS_HILO
+       subi    sp, 16
+       mfhi    lr
+       stw     lr, (sp, 0)
+       mflo    lr
+       stw     lr, (sp, 4)
+       mfcr    lr, cr14
+       stw     lr, (sp, 8)
+#endif
 .endm
 
 .macro RESTORE_SWITCH_STACK
+#ifdef CONFIG_CPU_HAS_HILO
+       ldw     lr, (sp, 0)
+       mthi    lr
+       ldw     lr, (sp, 4)
+       mtlo    lr
+       ldw     lr, (sp, 8)
+       mtcr    lr, cr14
+       addi    sp, 16
+#endif
        ldm     r4-r11, (sp)
-       ldw     r15, (sp, 32)
+       ldw     lr,  (sp, 32)
        ldw     r16, (sp, 36)
        ldw     r17, (sp, 40)
        ldw     r26, (sp, 44)
index b20ae19702e3c5fd1153f3215a3a052da09427bc..137f7932c83b3be191e9971da02bce7270b9ad62 100644 (file)
@@ -32,6 +32,6 @@
 #define _CACHE_MASK            _PAGE_CACHE
 
 #define _CACHE_CACHED          (_PAGE_VALID | _PAGE_CACHE | _PAGE_BUF)
-#define _CACHE_UNCACHED                (_PAGE_VALID | _PAGE_SO)
+#define _CACHE_UNCACHED                (_PAGE_VALID)
 
 #endif /* __ASM_CSKY_PGTABLE_BITS_H */
diff --git a/arch/csky/abiv2/inc/abi/switch_context.h b/arch/csky/abiv2/inc/abi/switch_context.h
new file mode 100644 (file)
index 0000000..73a8124
--- /dev/null
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ABI_CSKY_PTRACE_H
+#define __ABI_CSKY_PTRACE_H
+
+struct switch_stack {
+#ifdef CONFIG_CPU_HAS_HILO
+       unsigned long rhi;
+       unsigned long rlo;
+       unsigned long cr14;
+       unsigned long pad;
+#endif
+       unsigned long r4;
+       unsigned long r5;
+       unsigned long r6;
+       unsigned long r7;
+       unsigned long r8;
+       unsigned long r9;
+       unsigned long r10;
+       unsigned long r11;
+
+       unsigned long r15;
+       unsigned long r16;
+       unsigned long r17;
+       unsigned long r26;
+       unsigned long r27;
+       unsigned long r28;
+       unsigned long r29;
+       unsigned long r30;
+};
+#endif /* __ABI_CSKY_PTRACE_H */
diff --git a/arch/csky/abiv2/mcount.S b/arch/csky/abiv2/mcount.S
new file mode 100644 (file)
index 0000000..c633379
--- /dev/null
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/linkage.h>
+#include <asm/ftrace.h>
+
+/*
+ * csky-gcc with -pg will put the following asm after prologue:
+ *      push   r15
+ *      jsri   _mcount
+ *
+ * stack layout after mcount_enter in _mcount():
+ *
+ * current sp => 0:+-------+
+ *                 | a0-a3 | -> must save all argument regs
+ *             +16:+-------+
+ *                 | lr    | -> _mcount lr (instrumente function's pc)
+ *             +20:+-------+
+ *                 | fp=r8 | -> instrumented function fp
+ *             +24:+-------+
+ *                 | plr   | -> instrumented function lr (parent's pc)
+ *                 +-------+
+ */
+
+.macro mcount_enter
+       subi    sp, 24
+       stw     a0, (sp, 0)
+       stw     a1, (sp, 4)
+       stw     a2, (sp, 8)
+       stw     a3, (sp, 12)
+       stw     lr, (sp, 16)
+       stw     r8, (sp, 20)
+.endm
+
+.macro mcount_exit
+       ldw     a0, (sp, 0)
+       ldw     a1, (sp, 4)
+       ldw     a2, (sp, 8)
+       ldw     a3, (sp, 12)
+       ldw     t1, (sp, 16)
+       ldw     r8, (sp, 20)
+       ldw     lr, (sp, 24)
+       addi    sp, 28
+       jmp     t1
+.endm
+
+.macro save_return_regs
+       subi    sp, 16
+       stw     a0, (sp, 0)
+       stw     a1, (sp, 4)
+       stw     a2, (sp, 8)
+       stw     a3, (sp, 12)
+.endm
+
+.macro restore_return_regs
+       mov     lr, a0
+       ldw     a0, (sp, 0)
+       ldw     a1, (sp, 4)
+       ldw     a2, (sp, 8)
+       ldw     a3, (sp, 12)
+       addi    sp, 16
+.endm
+
+ENTRY(ftrace_stub)
+       jmp     lr
+END(ftrace_stub)
+
+ENTRY(_mcount)
+       mcount_enter
+
+       /* r26 is link register, only used with jsri translation */
+       lrw     r26, ftrace_trace_function
+       ldw     r26, (r26, 0)
+       lrw     a1, ftrace_stub
+       cmpne   r26, a1
+       bf      skip_ftrace
+
+       mov     a0, lr
+       subi    a0, MCOUNT_INSN_SIZE
+       ldw     a1, (sp, 24)
+
+       jsr     r26
+
+#ifndef CONFIG_FUNCTION_GRAPH_TRACER
+skip_ftrace:
+       mcount_exit
+#else
+skip_ftrace:
+       lrw     a0, ftrace_graph_return
+       ldw     a0, (a0, 0)
+       lrw     a1, ftrace_stub
+       cmpne   a0, a1
+       bt      ftrace_graph_caller
+
+       lrw     a0, ftrace_graph_entry
+       ldw     a0, (a0, 0)
+       lrw     a1, ftrace_graph_entry_stub
+       cmpne   a0, a1
+       bt      ftrace_graph_caller
+
+       mcount_exit
+#endif
+END(_mcount)
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ENTRY(ftrace_graph_caller)
+       mov     a0, sp
+       addi    a0, 24
+       ldw     a1, (sp, 16)
+       subi    a1, MCOUNT_INSN_SIZE
+       mov     a2, r8
+       lrw     r26, prepare_ftrace_return
+       jsr     r26
+       mcount_exit
+END(ftrace_graph_caller)
+
+ENTRY(return_to_handler)
+       save_return_regs
+       mov     a0, r8
+       jsri    ftrace_return_to_handler
+       restore_return_regs
+       jmp     lr
+END(return_to_handler)
+#endif
index 987fec60ab97d03d446f6392adf94db4a76dbf1f..145bf3a9360ee18636a4c66862ef34d4591e14fa 100644 (file)
@@ -27,13 +27,7 @@ ENTRY(memcpy)
 
        LABLE_ALIGN
 .L_len_larger_16bytes:
-#if defined(__CSKY_VDSPV2__)
-       vldx.8  vr0, (r1), r19
-       PRE_BNEZAD (r18)
-       addi    r1, 16
-       vstx.8  vr0, (r0), r19
-       addi    r0, 16
-#elif defined(__CK860__)
+#if defined(__CK860__)
        ldw     r3, (r1, 0)
        stw     r3, (r0, 0)
        ldw     r3, (r1, 4)
index 773b133ca2972bbae32c90aefbfb481966b7e83c..e1ec558278bc9bba3511d198aa3344415824ffc5 100644 (file)
@@ -7,7 +7,8 @@
 #include <asm/ptrace.h>
 #include <abi/regdef.h>
 
-#define ELF_ARCH 252
+#define ELF_ARCH EM_CSKY
+#define EM_CSKY_OLD 39
 
 /* CSKY Relocations */
 #define R_CSKY_NONE               0
@@ -31,14 +32,20 @@ typedef unsigned long elf_greg_t;
 
 typedef struct user_fp elf_fpregset_t;
 
-#define ELF_NGREG (sizeof(struct pt_regs) / sizeof(elf_greg_t))
+/*
+ * In gdb/bfd elf32-csky.c, csky_elf_grok_prstatus() use fixed size of
+ * elf_prstatus. It's 148 for abiv1 and 220 for abiv2, the size is enough
+ * for coredump and no need full sizeof(struct pt_regs).
+ */
+#define ELF_NGREG ((sizeof(struct pt_regs) / sizeof(elf_greg_t)) - 2)
 
 typedef elf_greg_t elf_gregset_t[ELF_NGREG];
 
 /*
  * This is used to ensure we don't load something for the wrong architecture.
  */
-#define elf_check_arch(x) ((x)->e_machine == ELF_ARCH)
+#define elf_check_arch(x) (((x)->e_machine == ELF_ARCH) || \
+                          ((x)->e_machine == EM_CSKY_OLD))
 
 /*
  * These are used to set parameters in the core dumps.
diff --git a/arch/csky/include/asm/ftrace.h b/arch/csky/include/asm/ftrace.h
new file mode 100644 (file)
index 0000000..7547c45
--- /dev/null
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_FTRACE_H
+#define __ASM_CSKY_FTRACE_H
+
+#define MCOUNT_INSN_SIZE 4
+
+#define HAVE_FUNCTION_GRAPH_FP_TEST
+
+#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
+
+#endif /* __ASM_CSKY_FTRACE_H */
diff --git a/arch/csky/include/asm/perf_event.h b/arch/csky/include/asm/perf_event.h
new file mode 100644 (file)
index 0000000..ea81931
--- /dev/null
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_PERF_EVENT_H
+#define __ASM_CSKY_PERF_EVENT_H
+
+#endif /* __ASM_PERF_EVENT_ELF_H */
index b1748659b2e9578ab11d7342ea202639863a83cb..8f454810514f21be1356bd9d763f28e68f8d70b5 100644 (file)
 #include <asm/cache.h>
 #include <abi/reg_ops.h>
 #include <abi/regdef.h>
+#include <abi/switch_context.h>
 #ifdef CONFIG_CPU_HAS_FPU
 #include <abi/fpu.h>
 #endif
 
 struct cpuinfo_csky {
-       unsigned long udelay_val;
        unsigned long asid_cache;
-       /*
-        * Capability and feature descriptor structure for CSKY CPU
-        */
-       unsigned long options;
-       unsigned int processor_id[4];
-       unsigned int fpu_id;
 } __aligned(SMP_CACHE_BYTES);
 
 extern struct cpuinfo_csky cpu_data[];
@@ -49,13 +43,6 @@ extern struct cpuinfo_csky cpu_data[];
 struct thread_struct {
        unsigned long  ksp;       /* kernel stack pointer */
        unsigned long  sr;        /* saved status register */
-       unsigned long  esp0;      /* points to SR of stack frame */
-       unsigned long  hi;
-       unsigned long  lo;
-
-       /* Other stuff associated with the thread. */
-       unsigned long address;      /* Last user fault */
-       unsigned long error_code;
 
        /* FPU regs */
        struct user_fp __aligned(16) user_fp;
index 4a929c4d6437fd43fc97a72db8b1fa6a6cda483b..668b79ce29ea111d700f66d4ccf348acd85a3f7b 100644 (file)
@@ -21,6 +21,10 @@ void __init set_send_ipi(void (*func)(const struct cpumask *mask), int irq);
 
 #define raw_smp_processor_id() (current_thread_info()->cpu)
 
+int __cpu_disable(void);
+
+void __cpu_die(unsigned int cpu);
+
 #endif /* CONFIG_SMP */
 
 #endif /* __ASM_CSKY_SMP_H */
index 926a64a8b4eee8465e822ec426aa24e33b214036..d637445737b78fd5c78c9994173a1e7c73eb3d1f 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/sched.h>
 #include <linux/err.h>
 #include <abi/regdef.h>
+#include <uapi/linux/audit.h>
 
 static inline int
 syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
@@ -68,4 +69,10 @@ syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
        memcpy(&regs->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0));
 }
 
+static inline int
+syscall_get_arch(void)
+{
+       return AUDIT_ARCH_CSKY;
+}
+
 #endif /* __ASM_SYSCALL_H */
index a2c69a7836f700f6698d785368f997d7d993d616..0e9d035d712b675a96353408a426bd118fb14528 100644 (file)
@@ -10,6 +10,7 @@
 #include <asm/types.h>
 #include <asm/page.h>
 #include <asm/processor.h>
+#include <abi/switch_context.h>
 
 struct thread_info {
        struct task_struct      *task;
@@ -36,6 +37,9 @@ struct thread_info {
 
 #define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
 
+#define thread_saved_fp(tsk) \
+       ((unsigned long)(((struct switch_stack *)(tsk->thread.ksp))->r8))
+
 static inline struct thread_info *current_thread_info(void)
 {
        unsigned long sp;
index e02fd44e6447865e57ed57cb126eeadf876f63c0..7449fdeb973dbc88b5c66d72cc89506c5f50b52b 100644 (file)
@@ -1,7 +1,5 @@
 include include/uapi/asm-generic/Kbuild.asm
 
-header-y += cachectl.h
-
 generic-y += auxvec.h
 generic-y += param.h
 generic-y += bpf_perf_event.h
index f10d02c8b09e037bb83443e1fd4757360c805e7e..a4eaa8ddf0b1d070e58548a4a0c40d2c931942aa 100644 (file)
@@ -36,7 +36,7 @@ struct pt_regs {
 
        unsigned long   rhi;
        unsigned long   rlo;
-       unsigned long   pad; /* reserved */
+       unsigned long   dcsr;
 #endif
 };
 
@@ -48,43 +48,6 @@ struct user_fp {
        unsigned long   reserved;
 };
 
-/*
- * Switch stack for switch_to after push pt_regs.
- *
- * ABI_CSKYV2: r4 ~ r11, r15 ~ r17, r26 ~ r30;
- * ABI_CSKYV1: r8 ~ r14, r15;
- */
-struct  switch_stack {
-#if defined(__CSKYABIV2__)
-       unsigned long   r4;
-       unsigned long   r5;
-       unsigned long   r6;
-       unsigned long   r7;
-       unsigned long   r8;
-       unsigned long   r9;
-       unsigned long   r10;
-       unsigned long   r11;
-#else
-       unsigned long   r8;
-       unsigned long   r9;
-       unsigned long   r10;
-       unsigned long   r11;
-       unsigned long   r12;
-       unsigned long   r13;
-       unsigned long   r14;
-#endif
-       unsigned long   r15;
-#if defined(__CSKYABIV2__)
-       unsigned long   r16;
-       unsigned long   r17;
-       unsigned long   r26;
-       unsigned long   r27;
-       unsigned long   r28;
-       unsigned long   r29;
-       unsigned long   r30;
-#endif
-};
-
 #ifdef __KERNEL__
 
 #define PS_S   0x80000000 /* Supervisor Mode */
index 4422de756cdefc5470d74bb0c33d9aa1db6cd3ec..484e6d3a364719b521c6586a4a06df9d57d12b82 100644 (file)
@@ -6,3 +6,10 @@ obj-y += process.o cpu-probe.o ptrace.o dumpstack.o
 
 obj-$(CONFIG_MODULES)                  += module.o
 obj-$(CONFIG_SMP)                      += smp.o
+obj-$(CONFIG_FUNCTION_TRACER)          += ftrace.o
+obj-$(CONFIG_STACKTRACE)               += stacktrace.o
+obj-$(CONFIG_CSKY_PMU_V1)              += perf_event.o
+
+ifdef CONFIG_FUNCTION_TRACER
+CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
+endif
index 8d3ed811321fd1095589351e090ba6e57f907778..9b48b1b1a61b57cee9f37de3336cfb6f2deffb3d 100644 (file)
@@ -20,12 +20,9 @@ int main(void)
        /* offsets into the thread struct */
        DEFINE(THREAD_KSP,        offsetof(struct thread_struct, ksp));
        DEFINE(THREAD_SR,         offsetof(struct thread_struct, sr));
-       DEFINE(THREAD_ESP0,       offsetof(struct thread_struct, esp0));
        DEFINE(THREAD_FESR,       offsetof(struct thread_struct, user_fp.fesr));
        DEFINE(THREAD_FCR,        offsetof(struct thread_struct, user_fp.fcr));
        DEFINE(THREAD_FPREG,      offsetof(struct thread_struct, user_fp.vr));
-       DEFINE(THREAD_DSPHI,      offsetof(struct thread_struct, hi));
-       DEFINE(THREAD_DSPLO,      offsetof(struct thread_struct, lo));
 
        /* offsets into the thread_info struct */
        DEFINE(TINFO_FLAGS,       offsetof(struct thread_info, flags));
index a9a03ac57ec58cea5d3de1b90b58874552efd744..659253e9989cb062e84f38c04ce9d4219ebf1d2e 100644 (file)
@@ -7,60 +7,39 @@ int kstack_depth_to_print = 48;
 
 void show_trace(unsigned long *stack)
 {
-       unsigned long *endstack;
+       unsigned long *stack_end;
+       unsigned long *stack_start;
+       unsigned long *fp;
        unsigned long addr;
-       int i;
 
-       pr_info("Call Trace:\n");
-       addr = (unsigned long)stack + THREAD_SIZE - 1;
-       endstack = (unsigned long *)(addr & -THREAD_SIZE);
-       i = 0;
-       while (stack + 1 <= endstack) {
-               addr = *stack++;
-               /*
-                * If the address is either in the text segment of the
-                * kernel, or in the region which contains vmalloc'ed
-                * memory, it *may* be the address of a calling
-                * routine; if so, print it so that someone tracing
-                * down the cause of the crash will be able to figure
-                * out the call path that was taken.
-                */
-               if (__kernel_text_address(addr)) {
-#ifndef CONFIG_KALLSYMS
-                       if (i % 5 == 0)
-                               pr_cont("\n       ");
+       addr = (unsigned long) stack & THREAD_MASK;
+       stack_start = (unsigned long *) addr;
+       stack_end = (unsigned long *) (addr + THREAD_SIZE);
+
+       fp = stack;
+       pr_info("\nCall Trace:");
+
+       while (fp > stack_start && fp < stack_end) {
+#ifdef CONFIG_STACKTRACE
+               addr    = fp[1];
+               fp      = (unsigned long *) fp[0];
+#else
+               addr    = *fp++;
 #endif
-                       pr_cont(" [<%08lx>] %pS\n", addr, (void *)addr);
-                       i++;
-               }
+               if (__kernel_text_address(addr))
+                       pr_cont("\n[<%08lx>] %pS", addr, (void *)addr);
        }
        pr_cont("\n");
 }
 
 void show_stack(struct task_struct *task, unsigned long *stack)
 {
-       unsigned long *p;
-       unsigned long *endstack;
-       int i;
-
        if (!stack) {
                if (task)
-                       stack = (unsigned long *)task->thread.esp0;
+                       stack = (unsigned long *)thread_saved_fp(task);
                else
                        stack = (unsigned long *)&stack;
        }
-       endstack = (unsigned long *)
-               (((unsigned long)stack + THREAD_SIZE - 1) & -THREAD_SIZE);
 
-       pr_info("Stack from %08lx:", (unsigned long)stack);
-       p = stack;
-       for (i = 0; i < kstack_depth_to_print; i++) {
-               if (p + 1 > endstack)
-                       break;
-               if (i % 8 == 0)
-                       pr_cont("\n       ");
-               pr_cont(" %08lx", *p++);
-       }
-       pr_cont("\n");
        show_trace(stack);
 }
index 79f92b8606c8a11df17cf57e2e4d0f5e5e73e2ce..5137ed9062bdc1448e0ccb491ef67f3abc2d734d 100644 (file)
@@ -122,16 +122,6 @@ ENTRY(csky_systemcall)
 
        psrset  ee, ie
 
-       /* Stack frame for syscall, origin call set_esp0 */
-       mov     r12, sp
-
-       bmaski  r11, 13
-       andn    r12, r11
-       bgeni   r11, 9
-       addi    r11, 32
-       addu    r12, r11
-       st      sp, (r12, 0)
-
        lrw     r11, __NR_syscalls
        cmphs   syscallid, r11          /* Check nr of syscall */
        bt      ret_from_exception
@@ -183,18 +173,10 @@ ENTRY(csky_systemcall)
 #endif
        stw     a0, (sp, LSAVE_A0)      /* Save return value */
 
-       movi    a0, 1                   /* leave system call */
-       mov     a1, sp                  /* sp = pt_regs pointer */
-       jbsr    syscall_trace
-
-syscall_exit_work:
-       ld      syscallid, (sp, LSAVE_PSR)
-       btsti   syscallid, 31
-       bt      2f
-
-       jmpi    resume_userspace
-
-2:      RESTORE_ALL
+       movi    a0, 1                   /* leave system call */
+       mov     a1, sp                  /* right now, sp --> pt_regs */
+       jbsr    syscall_trace
+       br      ret_from_exception
 
 ENTRY(ret_from_kernel_thread)
        jbsr    schedule_tail
@@ -238,8 +220,6 @@ resume_userspace:
 1:  RESTORE_ALL
 
 exit_work:
-       mov     a0, sp                  /* Stack address is arg[0] */
-       jbsr    set_esp0                /* Call C level */
        btsti   r8, TIF_NEED_RESCHED
        bt      work_resched
        /* If thread_info->flag is empty, RESTORE_ALL */
@@ -354,34 +334,12 @@ ENTRY(__switch_to)
 
        stw     sp, (a3, THREAD_KSP)
 
-#ifdef CONFIG_CPU_HAS_HILO
-       lrw     r10, THREAD_DSPHI
-       add     r10, a3
-       mfhi    r6
-       mflo    r7
-       stw     r6, (r10, 0)            /* THREAD_DSPHI */
-       stw     r7, (r10, 4)            /* THREAD_DSPLO */
-       mfcr    r6, cr14
-       stw     r6, (r10, 8)            /* THREAD_DSPCSR */
-#endif
-
        /* Set up next process to run */
        lrw     a3, TASK_THREAD
        addu    a3, a1
 
        ldw     sp, (a3, THREAD_KSP)    /* Set next kernel sp */
 
-#ifdef CONFIG_CPU_HAS_HILO
-       lrw     r10, THREAD_DSPHI
-       add     r10, a3
-       ldw     r6, (r10, 8)            /* THREAD_DSPCSR */
-       mtcr    r6, cr14
-       ldw     r6, (r10, 0)            /* THREAD_DSPHI */
-       ldw     r7, (r10, 4)            /* THREAD_DSPLO */
-       mthi    r6
-       mtlo    r7
-#endif
-
        ldw     a2, (a3, THREAD_SR)     /* Set next PSR */
        mtcr    a2, psr
 
diff --git a/arch/csky/kernel/ftrace.c b/arch/csky/kernel/ftrace.c
new file mode 100644 (file)
index 0000000..274c431
--- /dev/null
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/ftrace.h>
+#include <linux/uaccess.h>
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
+                          unsigned long frame_pointer)
+{
+       unsigned long return_hooker = (unsigned long)&return_to_handler;
+       unsigned long old;
+
+       if (unlikely(atomic_read(&current->tracing_graph_pause)))
+               return;
+
+       old = *parent;
+
+       if (!function_graph_enter(old, self_addr,
+                       *(unsigned long *)frame_pointer, parent)) {
+               /*
+                * For csky-gcc function has sub-call:
+                * subi sp,     sp, 8
+                * stw  r8,     (sp, 0)
+                * mov  r8,     sp
+                * st.w r15,    (sp, 0x4)
+                * push r15
+                * jl   _mcount
+                * We only need set *parent for resume
+                *
+                * For csky-gcc function has no sub-call:
+                * subi sp,     sp, 4
+                * stw  r8,     (sp, 0)
+                * mov  r8,     sp
+                * push r15
+                * jl   _mcount
+                * We need set *parent and *(frame_pointer + 4) for resume,
+                * because lr is resumed twice.
+                */
+               *parent = return_hooker;
+               frame_pointer += 4;
+               if (*(unsigned long *)frame_pointer == old)
+                       *(unsigned long *)frame_pointer = return_hooker;
+       }
+}
+#endif
+
+/* _mcount is defined in abi's mcount.S */
+extern void _mcount(void);
+EXPORT_SYMBOL(_mcount);
diff --git a/arch/csky/kernel/perf_event.c b/arch/csky/kernel/perf_event.c
new file mode 100644 (file)
index 0000000..376c972
--- /dev/null
@@ -0,0 +1,1031 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+
+#define CSKY_PMU_MAX_EVENTS 32
+
+#define HPCR           "<0, 0x0>"      /* PMU Control reg */
+#define HPCNTENR       "<0, 0x4>"      /* Count Enable reg */
+
+static uint64_t (*hw_raw_read_mapping[CSKY_PMU_MAX_EVENTS])(void);
+static void (*hw_raw_write_mapping[CSKY_PMU_MAX_EVENTS])(uint64_t val);
+
+struct csky_pmu_t {
+       struct pmu      pmu;
+       uint32_t        hpcr;
+} csky_pmu;
+
+#define cprgr(reg)                             \
+({                                             \
+       unsigned int tmp;                       \
+       asm volatile("cprgr %0, "reg"\n"        \
+                    : "=r"(tmp)                \
+                    :                          \
+                    : "memory");               \
+       tmp;                                    \
+})
+
+#define cpwgr(reg, val)                \
+({                             \
+       asm volatile(           \
+       "cpwgr %0, "reg"\n"     \
+       :                       \
+       : "r"(val)              \
+       : "memory");            \
+})
+
+#define cprcr(reg)                             \
+({                                             \
+       unsigned int tmp;                       \
+       asm volatile("cprcr %0, "reg"\n"        \
+                    : "=r"(tmp)                \
+                    :                          \
+                    : "memory");               \
+       tmp;                                    \
+})
+
+#define cpwcr(reg, val)                \
+({                             \
+       asm volatile(           \
+       "cpwcr %0, "reg"\n"     \
+       :                       \
+       : "r"(val)              \
+       : "memory");            \
+})
+
+/* cycle counter */
+static uint64_t csky_pmu_read_cc(void)
+{
+       uint32_t lo, hi, tmp;
+       uint64_t result;
+
+       do {
+               tmp = cprgr("<0, 0x3>");
+               lo  = cprgr("<0, 0x2>");
+               hi  = cprgr("<0, 0x3>");
+       } while (hi != tmp);
+
+       result = (uint64_t) (hi) << 32;
+       result |= lo;
+
+       return result;
+}
+
+static void csky_pmu_write_cc(uint64_t val)
+{
+       cpwgr("<0, 0x2>", (uint32_t)  val);
+       cpwgr("<0, 0x3>", (uint32_t) (val >> 32));
+}
+
+/* instruction counter */
+static uint64_t csky_pmu_read_ic(void)
+{
+       uint32_t lo, hi, tmp;
+       uint64_t result;
+
+       do {
+               tmp = cprgr("<0, 0x5>");
+               lo  = cprgr("<0, 0x4>");
+               hi  = cprgr("<0, 0x5>");
+       } while (hi != tmp);
+
+       result = (uint64_t) (hi) << 32;
+       result |= lo;
+
+       return result;
+}
+
+static void csky_pmu_write_ic(uint64_t val)
+{
+       cpwgr("<0, 0x4>", (uint32_t)  val);
+       cpwgr("<0, 0x5>", (uint32_t) (val >> 32));
+}
+
+/* l1 icache access counter */
+static uint64_t csky_pmu_read_icac(void)
+{
+       uint32_t lo, hi, tmp;
+       uint64_t result;
+
+       do {
+               tmp = cprgr("<0, 0x7>");
+               lo  = cprgr("<0, 0x6>");
+               hi  = cprgr("<0, 0x7>");
+       } while (hi != tmp);
+
+       result = (uint64_t) (hi) << 32;
+       result |= lo;
+
+       return result;
+}
+
+static void csky_pmu_write_icac(uint64_t val)
+{
+       cpwgr("<0, 0x6>", (uint32_t)  val);
+       cpwgr("<0, 0x7>", (uint32_t) (val >> 32));
+}
+
+/* l1 icache miss counter */
+static uint64_t csky_pmu_read_icmc(void)
+{
+       uint32_t lo, hi, tmp;
+       uint64_t result;
+
+       do {
+               tmp = cprgr("<0, 0x9>");
+               lo  = cprgr("<0, 0x8>");
+               hi  = cprgr("<0, 0x9>");
+       } while (hi != tmp);
+
+       result = (uint64_t) (hi) << 32;
+       result |= lo;
+
+       return result;
+}
+
+static void csky_pmu_write_icmc(uint64_t val)
+{
+       cpwgr("<0, 0x8>", (uint32_t)  val);
+       cpwgr("<0, 0x9>", (uint32_t) (val >> 32));
+}
+
+/* l1 dcache access counter */
+static uint64_t csky_pmu_read_dcac(void)
+{
+       uint32_t lo, hi, tmp;
+       uint64_t result;
+
+       do {
+               tmp = cprgr("<0, 0xb>");
+               lo  = cprgr("<0, 0xa>");
+               hi  = cprgr("<0, 0xb>");
+       } while (hi != tmp);
+
+       result = (uint64_t) (hi) << 32;
+       result |= lo;
+
+       return result;
+}
+
+static void csky_pmu_write_dcac(uint64_t val)
+{
+       cpwgr("<0, 0xa>", (uint32_t)  val);
+       cpwgr("<0, 0xb>", (uint32_t) (val >> 32));
+}
+
+/* l1 dcache miss counter */
+static uint64_t csky_pmu_read_dcmc(void)
+{
+       uint32_t lo, hi, tmp;
+       uint64_t result;
+
+       do {
+               tmp = cprgr("<0, 0xd>");
+               lo  = cprgr("<0, 0xc>");
+               hi  = cprgr("<0, 0xd>");
+       } while (hi != tmp);
+
+       result = (uint64_t) (hi) << 32;
+       result |= lo;
+
+       return result;
+}
+
+static void csky_pmu_write_dcmc(uint64_t val)
+{
+       cpwgr("<0, 0xc>", (uint32_t)  val);
+       cpwgr("<0, 0xd>", (uint32_t) (val >> 32));
+}
+
+/* l2 cache access counter */
+static uint64_t csky_pmu_read_l2ac(void)
+{
+       uint32_t lo, hi, tmp;
+       uint64_t result;
+
+       do {
+               tmp = cprgr("<0, 0xf>");
+               lo  = cprgr("<0, 0xe>");
+               hi  = cprgr("<0, 0xf>");
+       } while (hi != tmp);
+
+       result = (uint64_t) (hi) << 32;
+       result |= lo;
+
+       return result;
+}
+
+static void csky_pmu_write_l2ac(uint64_t val)
+{
+       cpwgr("<0, 0xe>", (uint32_t)  val);
+       cpwgr("<0, 0xf>", (uint32_t) (val >> 32));
+}
+
+/* l2 cache miss counter */
+static uint64_t csky_pmu_read_l2mc(void)
+{
+       uint32_t lo, hi, tmp;
+       uint64_t result;
+
+       do {
+               tmp = cprgr("<0, 0x11>");
+               lo  = cprgr("<0, 0x10>");
+               hi  = cprgr("<0, 0x11>");
+       } while (hi != tmp);
+
+       result = (uint64_t) (hi) << 32;
+       result |= lo;
+
+       return result;
+}
+
+static void csky_pmu_write_l2mc(uint64_t val)
+{
+       cpwgr("<0, 0x10>", (uint32_t)  val);
+       cpwgr("<0, 0x11>", (uint32_t) (val >> 32));
+}
+
+/* I-UTLB miss counter */
+static uint64_t csky_pmu_read_iutlbmc(void)
+{
+       uint32_t lo, hi, tmp;
+       uint64_t result;
+
+       do {
+               tmp = cprgr("<0, 0x15>");
+               lo  = cprgr("<0, 0x14>");
+               hi  = cprgr("<0, 0x15>");
+       } while (hi != tmp);
+
+       result = (uint64_t) (hi) << 32;
+       result |= lo;
+
+       return result;
+}
+
+static void csky_pmu_write_iutlbmc(uint64_t val)
+{
+       cpwgr("<0, 0x14>", (uint32_t)  val);
+       cpwgr("<0, 0x15>", (uint32_t) (val >> 32));
+}
+
+/* D-UTLB miss counter */
+static uint64_t csky_pmu_read_dutlbmc(void)
+{
+       uint32_t lo, hi, tmp;
+       uint64_t result;
+
+       do {
+               tmp = cprgr("<0, 0x17>");
+               lo  = cprgr("<0, 0x16>");
+               hi  = cprgr("<0, 0x17>");
+       } while (hi != tmp);
+
+       result = (uint64_t) (hi) << 32;
+       result |= lo;
+
+       return result;
+}
+
+static void csky_pmu_write_dutlbmc(uint64_t val)
+{
+       cpwgr("<0, 0x16>", (uint32_t)  val);
+       cpwgr("<0, 0x17>", (uint32_t) (val >> 32));
+}
+
+/* JTLB miss counter */
+static uint64_t csky_pmu_read_jtlbmc(void)
+{
+       uint32_t lo, hi, tmp;
+       uint64_t result;
+
+       do {
+               tmp = cprgr("<0, 0x19>");
+               lo  = cprgr("<0, 0x18>");
+               hi  = cprgr("<0, 0x19>");
+       } while (hi != tmp);
+
+       result = (uint64_t) (hi) << 32;
+       result |= lo;
+
+       return result;
+}
+
+static void csky_pmu_write_jtlbmc(uint64_t val)
+{
+       cpwgr("<0, 0x18>", (uint32_t)  val);
+       cpwgr("<0, 0x19>", (uint32_t) (val >> 32));
+}
+
+/* software counter */
+static uint64_t csky_pmu_read_softc(void)
+{
+       uint32_t lo, hi, tmp;
+       uint64_t result;
+
+       do {
+               tmp = cprgr("<0, 0x1b>");
+               lo  = cprgr("<0, 0x1a>");
+               hi  = cprgr("<0, 0x1b>");
+       } while (hi != tmp);
+
+       result = (uint64_t) (hi) << 32;
+       result |= lo;
+
+       return result;
+}
+
+static void csky_pmu_write_softc(uint64_t val)
+{
+       cpwgr("<0, 0x1a>", (uint32_t)  val);
+       cpwgr("<0, 0x1b>", (uint32_t) (val >> 32));
+}
+
+/* conditional branch mispredict counter */
+static uint64_t csky_pmu_read_cbmc(void)
+{
+       uint32_t lo, hi, tmp;
+       uint64_t result;
+
+       do {
+               tmp = cprgr("<0, 0x1d>");
+               lo  = cprgr("<0, 0x1c>");
+               hi  = cprgr("<0, 0x1d>");
+       } while (hi != tmp);
+
+       result = (uint64_t) (hi) << 32;
+       result |= lo;
+
+       return result;
+}
+
+static void csky_pmu_write_cbmc(uint64_t val)
+{
+       cpwgr("<0, 0x1c>", (uint32_t)  val);
+       cpwgr("<0, 0x1d>", (uint32_t) (val >> 32));
+}
+
+/* conditional branch instruction counter */
+static uint64_t csky_pmu_read_cbic(void)
+{
+       uint32_t lo, hi, tmp;
+       uint64_t result;
+
+       do {
+               tmp = cprgr("<0, 0x1f>");
+               lo  = cprgr("<0, 0x1e>");
+               hi  = cprgr("<0, 0x1f>");
+       } while (hi != tmp);
+
+       result = (uint64_t) (hi) << 32;
+       result |= lo;
+
+       return result;
+}
+
+static void csky_pmu_write_cbic(uint64_t val)
+{
+       cpwgr("<0, 0x1e>", (uint32_t)  val);
+       cpwgr("<0, 0x1f>", (uint32_t) (val >> 32));
+}
+
+/* indirect branch mispredict counter */
+static uint64_t csky_pmu_read_ibmc(void)
+{
+       uint32_t lo, hi, tmp;
+       uint64_t result;
+
+       do {
+               tmp = cprgr("<0, 0x21>");
+               lo  = cprgr("<0, 0x20>");
+               hi  = cprgr("<0, 0x21>");
+       } while (hi != tmp);
+
+       result = (uint64_t) (hi) << 32;
+       result |= lo;
+
+       return result;
+}
+
+static void csky_pmu_write_ibmc(uint64_t val)
+{
+       cpwgr("<0, 0x20>", (uint32_t)  val);
+       cpwgr("<0, 0x21>", (uint32_t) (val >> 32));
+}
+
+/* indirect branch instruction counter */
+static uint64_t csky_pmu_read_ibic(void)
+{
+       uint32_t lo, hi, tmp;
+       uint64_t result;
+
+       do {
+               tmp = cprgr("<0, 0x23>");
+               lo  = cprgr("<0, 0x22>");
+               hi  = cprgr("<0, 0x23>");
+       } while (hi != tmp);
+
+       result = (uint64_t) (hi) << 32;
+       result |= lo;
+
+       return result;
+}
+
+static void csky_pmu_write_ibic(uint64_t val)
+{
+       cpwgr("<0, 0x22>", (uint32_t)  val);
+       cpwgr("<0, 0x23>", (uint32_t) (val >> 32));
+}
+
+/* LSU spec fail counter */
+static uint64_t csky_pmu_read_lsfc(void)
+{
+       uint32_t lo, hi, tmp;
+       uint64_t result;
+
+       do {
+               tmp = cprgr("<0, 0x25>");
+               lo  = cprgr("<0, 0x24>");
+               hi  = cprgr("<0, 0x25>");
+       } while (hi != tmp);
+
+       result = (uint64_t) (hi) << 32;
+       result |= lo;
+
+       return result;
+}
+
+static void csky_pmu_write_lsfc(uint64_t val)
+{
+       cpwgr("<0, 0x24>", (uint32_t)  val);
+       cpwgr("<0, 0x25>", (uint32_t) (val >> 32));
+}
+
+/* store instruction counter */
+static uint64_t csky_pmu_read_sic(void)
+{
+       uint32_t lo, hi, tmp;
+       uint64_t result;
+
+       do {
+               tmp = cprgr("<0, 0x27>");
+               lo  = cprgr("<0, 0x26>");
+               hi  = cprgr("<0, 0x27>");
+       } while (hi != tmp);
+
+       result = (uint64_t) (hi) << 32;
+       result |= lo;
+
+       return result;
+}
+
+static void csky_pmu_write_sic(uint64_t val)
+{
+       cpwgr("<0, 0x26>", (uint32_t)  val);
+       cpwgr("<0, 0x27>", (uint32_t) (val >> 32));
+}
+
+/* dcache read access counter */
+static uint64_t csky_pmu_read_dcrac(void)
+{
+       uint32_t lo, hi, tmp;
+       uint64_t result;
+
+       do {
+               tmp = cprgr("<0, 0x29>");
+               lo  = cprgr("<0, 0x28>");
+               hi  = cprgr("<0, 0x29>");
+       } while (hi != tmp);
+
+       result = (uint64_t) (hi) << 32;
+       result |= lo;
+
+       return result;
+}
+
+static void csky_pmu_write_dcrac(uint64_t val)
+{
+       cpwgr("<0, 0x28>", (uint32_t)  val);
+       cpwgr("<0, 0x29>", (uint32_t) (val >> 32));
+}
+
+/* dcache read miss counter */
+static uint64_t csky_pmu_read_dcrmc(void)
+{
+       uint32_t lo, hi, tmp;
+       uint64_t result;
+
+       do {
+               tmp = cprgr("<0, 0x2b>");
+               lo  = cprgr("<0, 0x2a>");
+               hi  = cprgr("<0, 0x2b>");
+       } while (hi != tmp);
+
+       result = (uint64_t) (hi) << 32;
+       result |= lo;
+
+       return result;
+}
+
+static void csky_pmu_write_dcrmc(uint64_t val)
+{
+       cpwgr("<0, 0x2a>", (uint32_t)  val);
+       cpwgr("<0, 0x2b>", (uint32_t) (val >> 32));
+}
+
+/* dcache write access counter */
+static uint64_t csky_pmu_read_dcwac(void)
+{
+       uint32_t lo, hi, tmp;
+       uint64_t result;
+
+       do {
+               tmp = cprgr("<0, 0x2d>");
+               lo  = cprgr("<0, 0x2c>");
+               hi  = cprgr("<0, 0x2d>");
+       } while (hi != tmp);
+
+       result = (uint64_t) (hi) << 32;
+       result |= lo;
+
+       return result;
+}
+
+static void csky_pmu_write_dcwac(uint64_t val)
+{
+       cpwgr("<0, 0x2c>", (uint32_t)  val);
+       cpwgr("<0, 0x2d>", (uint32_t) (val >> 32));
+}
+
+/* dcache write miss counter */
+static uint64_t csky_pmu_read_dcwmc(void)
+{
+       uint32_t lo, hi, tmp;
+       uint64_t result;
+
+       do {
+               tmp = cprgr("<0, 0x2f>");
+               lo  = cprgr("<0, 0x2e>");
+               hi  = cprgr("<0, 0x2f>");
+       } while (hi != tmp);
+
+       result = (uint64_t) (hi) << 32;
+       result |= lo;
+
+       return result;
+}
+
+static void csky_pmu_write_dcwmc(uint64_t val)
+{
+       cpwgr("<0, 0x2e>", (uint32_t)  val);
+       cpwgr("<0, 0x2f>", (uint32_t) (val >> 32));
+}
+
+/* l2cache read access counter */
+static uint64_t csky_pmu_read_l2rac(void)
+{
+       uint32_t lo, hi, tmp;
+       uint64_t result;
+
+       do {
+               tmp = cprgr("<0, 0x31>");
+               lo  = cprgr("<0, 0x30>");
+               hi  = cprgr("<0, 0x31>");
+       } while (hi != tmp);
+
+       result = (uint64_t) (hi) << 32;
+       result |= lo;
+
+       return result;
+}
+
+static void csky_pmu_write_l2rac(uint64_t val)
+{
+       cpwgr("<0, 0x30>", (uint32_t)  val);
+       cpwgr("<0, 0x31>", (uint32_t) (val >> 32));
+}
+
+/* l2cache read miss counter */
+static uint64_t csky_pmu_read_l2rmc(void)
+{
+       uint32_t lo, hi, tmp;
+       uint64_t result;
+
+       do {
+               tmp = cprgr("<0, 0x33>");
+               lo  = cprgr("<0, 0x32>");
+               hi  = cprgr("<0, 0x33>");
+       } while (hi != tmp);
+
+       result = (uint64_t) (hi) << 32;
+       result |= lo;
+
+       return result;
+}
+
+static void csky_pmu_write_l2rmc(uint64_t val)
+{
+       cpwgr("<0, 0x32>", (uint32_t)  val);
+       cpwgr("<0, 0x33>", (uint32_t) (val >> 32));
+}
+
+/* l2cache write access counter */
+static uint64_t csky_pmu_read_l2wac(void)
+{
+       uint32_t lo, hi, tmp;
+       uint64_t result;
+
+       do {
+               tmp = cprgr("<0, 0x35>");
+               lo  = cprgr("<0, 0x34>");
+               hi  = cprgr("<0, 0x35>");
+       } while (hi != tmp);
+
+       result = (uint64_t) (hi) << 32;
+       result |= lo;
+
+       return result;
+}
+
+static void csky_pmu_write_l2wac(uint64_t val)
+{
+       cpwgr("<0, 0x34>", (uint32_t)  val);
+       cpwgr("<0, 0x35>", (uint32_t) (val >> 32));
+}
+
+/* l2cache write miss counter */
+static uint64_t csky_pmu_read_l2wmc(void)
+{
+       uint32_t lo, hi, tmp;
+       uint64_t result;
+
+       do {
+               tmp = cprgr("<0, 0x37>");
+               lo  = cprgr("<0, 0x36>");
+               hi  = cprgr("<0, 0x37>");
+       } while (hi != tmp);
+
+       result = (uint64_t) (hi) << 32;
+       result |= lo;
+
+       return result;
+}
+
+static void csky_pmu_write_l2wmc(uint64_t val)
+{
+       cpwgr("<0, 0x36>", (uint32_t)  val);
+       cpwgr("<0, 0x37>", (uint32_t) (val >> 32));
+}
+
+#define HW_OP_UNSUPPORTED      0xffff
+static const int csky_pmu_hw_map[PERF_COUNT_HW_MAX] = {
+       [PERF_COUNT_HW_CPU_CYCLES]              = 0x1,
+       [PERF_COUNT_HW_INSTRUCTIONS]            = 0x2,
+       [PERF_COUNT_HW_CACHE_REFERENCES]        = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_CACHE_MISSES]            = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = 0xf,
+       [PERF_COUNT_HW_BRANCH_MISSES]           = 0xe,
+       [PERF_COUNT_HW_BUS_CYCLES]              = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_STALLED_CYCLES_BACKEND]  = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_REF_CPU_CYCLES]          = HW_OP_UNSUPPORTED,
+};
+
+#define C(_x)                  PERF_COUNT_HW_CACHE_##_x
+#define CACHE_OP_UNSUPPORTED   0xffff
+static const int csky_pmu_cache_map[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+       [C(L1D)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = 0x14,
+                       [C(RESULT_MISS)]        = 0x15,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = 0x16,
+                       [C(RESULT_MISS)]        = 0x17,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = 0x5,
+                       [C(RESULT_MISS)]        = 0x6,
+               },
+       },
+       [C(L1I)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = 0x3,
+                       [C(RESULT_MISS)]        = 0x4,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(LL)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = 0x18,
+                       [C(RESULT_MISS)]        = 0x19,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = 0x1a,
+                       [C(RESULT_MISS)]        = 0x1b,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = 0x7,
+                       [C(RESULT_MISS)]        = 0x8,
+               },
+       },
+       [C(DTLB)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = 0x5,
+                       [C(RESULT_MISS)]        = 0xb,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(ITLB)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = 0x3,
+                       [C(RESULT_MISS)]        = 0xa,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(BPU)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(NODE)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+};
+
+static void csky_perf_event_update(struct perf_event *event,
+                                  struct hw_perf_event *hwc)
+{
+       uint64_t prev_raw_count = local64_read(&hwc->prev_count);
+       uint64_t new_raw_count = hw_raw_read_mapping[hwc->idx]();
+       int64_t delta = new_raw_count - prev_raw_count;
+
+       /*
+        * We aren't afraid of hwc->prev_count changing beneath our feet
+        * because there's no way for us to re-enter this function anytime.
+        */
+       local64_set(&hwc->prev_count, new_raw_count);
+       local64_add(delta, &event->count);
+       local64_sub(delta, &hwc->period_left);
+}
+
+static void csky_pmu_read(struct perf_event *event)
+{
+       csky_perf_event_update(event, &event->hw);
+}
+
+static int csky_pmu_cache_event(u64 config)
+{
+       unsigned int cache_type, cache_op, cache_result;
+
+       cache_type      = (config >>  0) & 0xff;
+       cache_op        = (config >>  8) & 0xff;
+       cache_result    = (config >> 16) & 0xff;
+
+       if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
+               return -EINVAL;
+       if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
+               return -EINVAL;
+       if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
+               return -EINVAL;
+
+       return csky_pmu_cache_map[cache_type][cache_op][cache_result];
+}
+
+static int csky_pmu_event_init(struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+       int ret;
+
+       if (event->attr.exclude_user)
+               csky_pmu.hpcr = BIT(2);
+       else if (event->attr.exclude_kernel)
+               csky_pmu.hpcr = BIT(3);
+       else
+               csky_pmu.hpcr = BIT(2) | BIT(3);
+
+       csky_pmu.hpcr |= BIT(1) | BIT(0);
+
+       switch (event->attr.type) {
+       case PERF_TYPE_HARDWARE:
+               if (event->attr.config >= PERF_COUNT_HW_MAX)
+                       return -ENOENT;
+               ret = csky_pmu_hw_map[event->attr.config];
+               if (ret == HW_OP_UNSUPPORTED)
+                       return -ENOENT;
+               hwc->idx = ret;
+               return 0;
+       case PERF_TYPE_HW_CACHE:
+               ret = csky_pmu_cache_event(event->attr.config);
+               if (ret == CACHE_OP_UNSUPPORTED)
+                       return -ENOENT;
+               hwc->idx = ret;
+               return 0;
+       case PERF_TYPE_RAW:
+               if (hw_raw_read_mapping[event->attr.config] == NULL)
+                       return -ENOENT;
+               hwc->idx = event->attr.config;
+               return 0;
+       default:
+               return -ENOENT;
+       }
+}
+
+/* starts all counters */
+static void csky_pmu_enable(struct pmu *pmu)
+{
+       cpwcr(HPCR, csky_pmu.hpcr);
+}
+
+/* stops all counters */
+static void csky_pmu_disable(struct pmu *pmu)
+{
+       cpwcr(HPCR, BIT(1));
+}
+
+static void csky_pmu_start(struct perf_event *event, int flags)
+{
+       struct hw_perf_event *hwc = &event->hw;
+       int idx = hwc->idx;
+
+       if (WARN_ON_ONCE(idx == -1))
+               return;
+
+       if (flags & PERF_EF_RELOAD)
+               WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
+
+       hwc->state = 0;
+
+       cpwcr(HPCNTENR, BIT(idx) | cprcr(HPCNTENR));
+}
+
+static void csky_pmu_stop(struct perf_event *event, int flags)
+{
+       struct hw_perf_event *hwc = &event->hw;
+       int idx = hwc->idx;
+
+       if (!(event->hw.state & PERF_HES_STOPPED)) {
+               cpwcr(HPCNTENR, ~BIT(idx) & cprcr(HPCNTENR));
+               event->hw.state |= PERF_HES_STOPPED;
+       }
+
+       if ((flags & PERF_EF_UPDATE) &&
+           !(event->hw.state & PERF_HES_UPTODATE)) {
+               csky_perf_event_update(event, &event->hw);
+               event->hw.state |= PERF_HES_UPTODATE;
+       }
+}
+
+static void csky_pmu_del(struct perf_event *event, int flags)
+{
+       csky_pmu_stop(event, PERF_EF_UPDATE);
+
+       perf_event_update_userpage(event);
+}
+
+/* allocate hardware counter and optionally start counting */
+static int csky_pmu_add(struct perf_event *event, int flags)
+{
+       struct hw_perf_event *hwc = &event->hw;
+
+       local64_set(&hwc->prev_count, 0);
+
+       if (hw_raw_write_mapping[hwc->idx] != NULL)
+               hw_raw_write_mapping[hwc->idx](0);
+
+       hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+       if (flags & PERF_EF_START)
+               csky_pmu_start(event, PERF_EF_RELOAD);
+
+       perf_event_update_userpage(event);
+
+       return 0;
+}
+
+int __init init_hw_perf_events(void)
+{
+       csky_pmu.pmu = (struct pmu) {
+               .pmu_enable     = csky_pmu_enable,
+               .pmu_disable    = csky_pmu_disable,
+               .event_init     = csky_pmu_event_init,
+               .add            = csky_pmu_add,
+               .del            = csky_pmu_del,
+               .start          = csky_pmu_start,
+               .stop           = csky_pmu_stop,
+               .read           = csky_pmu_read,
+       };
+
+       memset((void *)hw_raw_read_mapping, 0,
+               sizeof(hw_raw_read_mapping[CSKY_PMU_MAX_EVENTS]));
+
+       hw_raw_read_mapping[0x1]  = csky_pmu_read_cc;
+       hw_raw_read_mapping[0x2]  = csky_pmu_read_ic;
+       hw_raw_read_mapping[0x3]  = csky_pmu_read_icac;
+       hw_raw_read_mapping[0x4]  = csky_pmu_read_icmc;
+       hw_raw_read_mapping[0x5]  = csky_pmu_read_dcac;
+       hw_raw_read_mapping[0x6]  = csky_pmu_read_dcmc;
+       hw_raw_read_mapping[0x7]  = csky_pmu_read_l2ac;
+       hw_raw_read_mapping[0x8]  = csky_pmu_read_l2mc;
+       hw_raw_read_mapping[0xa]  = csky_pmu_read_iutlbmc;
+       hw_raw_read_mapping[0xb]  = csky_pmu_read_dutlbmc;
+       hw_raw_read_mapping[0xc]  = csky_pmu_read_jtlbmc;
+       hw_raw_read_mapping[0xd]  = csky_pmu_read_softc;
+       hw_raw_read_mapping[0xe]  = csky_pmu_read_cbmc;
+       hw_raw_read_mapping[0xf]  = csky_pmu_read_cbic;
+       hw_raw_read_mapping[0x10] = csky_pmu_read_ibmc;
+       hw_raw_read_mapping[0x11] = csky_pmu_read_ibic;
+       hw_raw_read_mapping[0x12] = csky_pmu_read_lsfc;
+       hw_raw_read_mapping[0x13] = csky_pmu_read_sic;
+       hw_raw_read_mapping[0x14] = csky_pmu_read_dcrac;
+       hw_raw_read_mapping[0x15] = csky_pmu_read_dcrmc;
+       hw_raw_read_mapping[0x16] = csky_pmu_read_dcwac;
+       hw_raw_read_mapping[0x17] = csky_pmu_read_dcwmc;
+       hw_raw_read_mapping[0x18] = csky_pmu_read_l2rac;
+       hw_raw_read_mapping[0x19] = csky_pmu_read_l2rmc;
+       hw_raw_read_mapping[0x1a] = csky_pmu_read_l2wac;
+       hw_raw_read_mapping[0x1b] = csky_pmu_read_l2wmc;
+
+       memset((void *)hw_raw_write_mapping, 0,
+               sizeof(hw_raw_write_mapping[CSKY_PMU_MAX_EVENTS]));
+
+       hw_raw_write_mapping[0x1]  = csky_pmu_write_cc;
+       hw_raw_write_mapping[0x2]  = csky_pmu_write_ic;
+       hw_raw_write_mapping[0x3]  = csky_pmu_write_icac;
+       hw_raw_write_mapping[0x4]  = csky_pmu_write_icmc;
+       hw_raw_write_mapping[0x5]  = csky_pmu_write_dcac;
+       hw_raw_write_mapping[0x6]  = csky_pmu_write_dcmc;
+       hw_raw_write_mapping[0x7]  = csky_pmu_write_l2ac;
+       hw_raw_write_mapping[0x8]  = csky_pmu_write_l2mc;
+       hw_raw_write_mapping[0xa]  = csky_pmu_write_iutlbmc;
+       hw_raw_write_mapping[0xb]  = csky_pmu_write_dutlbmc;
+       hw_raw_write_mapping[0xc]  = csky_pmu_write_jtlbmc;
+       hw_raw_write_mapping[0xd]  = csky_pmu_write_softc;
+       hw_raw_write_mapping[0xe]  = csky_pmu_write_cbmc;
+       hw_raw_write_mapping[0xf]  = csky_pmu_write_cbic;
+       hw_raw_write_mapping[0x10] = csky_pmu_write_ibmc;
+       hw_raw_write_mapping[0x11] = csky_pmu_write_ibic;
+       hw_raw_write_mapping[0x12] = csky_pmu_write_lsfc;
+       hw_raw_write_mapping[0x13] = csky_pmu_write_sic;
+       hw_raw_write_mapping[0x14] = csky_pmu_write_dcrac;
+       hw_raw_write_mapping[0x15] = csky_pmu_write_dcrmc;
+       hw_raw_write_mapping[0x16] = csky_pmu_write_dcwac;
+       hw_raw_write_mapping[0x17] = csky_pmu_write_dcwmc;
+       hw_raw_write_mapping[0x18] = csky_pmu_write_l2rac;
+       hw_raw_write_mapping[0x19] = csky_pmu_write_l2rmc;
+       hw_raw_write_mapping[0x1a] = csky_pmu_write_l2wac;
+       hw_raw_write_mapping[0x1b] = csky_pmu_write_l2wmc;
+
+       csky_pmu.pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
+
+       cpwcr(HPCR, BIT(31) | BIT(30) | BIT(1));
+
+       return perf_pmu_register(&csky_pmu.pmu, "cpu", PERF_TYPE_RAW);
+}
+arch_initcall(init_hw_perf_events);
index 8ed20028b1609e129918c3415e884543affdc108..e555740c0be5768d001c81fe8ff91e98e71e0e4b 100644 (file)
@@ -93,26 +93,31 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *pr_regs)
 
 unsigned long get_wchan(struct task_struct *p)
 {
-       unsigned long esp, pc;
-       unsigned long stack_page;
+       unsigned long lr;
+       unsigned long *fp, *stack_start, *stack_end;
        int count = 0;
 
        if (!p || p == current || p->state == TASK_RUNNING)
                return 0;
 
-       stack_page = (unsigned long)p;
-       esp = p->thread.esp0;
+       stack_start = (unsigned long *)end_of_stack(p);
+       stack_end = (unsigned long *)(task_stack_page(p) + THREAD_SIZE);
+
+       fp = (unsigned long *) thread_saved_fp(p);
        do {
-               if (esp < stack_page+sizeof(struct task_struct) ||
-                   esp >= 8184+stack_page)
+               if (fp < stack_start || fp > stack_end)
                        return 0;
-               /*FIXME: There's may be error here!*/
-               pc = ((unsigned long *)esp)[1];
-               /* FIXME: This depends on the order of these functions. */
-               if (!in_sched_functions(pc))
-                       return pc;
-               esp = *(unsigned long *) esp;
+#ifdef CONFIG_STACKTRACE
+               lr = fp[1];
+               fp = (unsigned long *)fp[0];
+#else
+               lr = *fp++;
+#endif
+               if (!in_sched_functions(lr) &&
+                   __kernel_text_address(lr))
+                       return lr;
        } while (count++ < 16);
+
        return 0;
 }
 EXPORT_SYMBOL(get_wchan);
index 34b30257298f80e2a2645ed12c94b9aca0b0350b..57f1afe19a52cb7896021a47691fc17c37d0b4bb 100644 (file)
@@ -50,15 +50,11 @@ static void singlestep_enable(struct task_struct *tsk)
  */
 void user_enable_single_step(struct task_struct *child)
 {
-       if (child->thread.esp0 == 0)
-               return;
        singlestep_enable(child);
 }
 
 void user_disable_single_step(struct task_struct *child)
 {
-       if (child->thread.esp0 == 0)
-               return;
        singlestep_disable(child);
 }
 
@@ -95,7 +91,9 @@ static int gpr_set(struct task_struct *target,
                return ret;
 
        regs.sr = task_pt_regs(target)->sr;
-
+#ifdef CONFIG_CPU_HAS_HILO
+       regs.dcsr = task_pt_regs(target)->dcsr;
+#endif
        task_thread_info(target)->tp_value = regs.tls;
 
        *task_pt_regs(target) = regs;
@@ -239,6 +237,7 @@ asmlinkage void syscall_trace(int why, struct pt_regs *regs)
        regs->regs[SYSTRACE_SAVENUM] = saved_why;
 }
 
+extern void show_stack(struct task_struct *task, unsigned long *stack);
 void show_regs(struct pt_regs *fp)
 {
        unsigned long   *sp;
@@ -261,35 +260,37 @@ void show_regs(struct pt_regs *fp)
                       (int) (((unsigned long) current) + 2 * PAGE_SIZE));
        }
 
-       pr_info("PC: 0x%08lx\n", (long)fp->pc);
+       pr_info("PC: 0x%08lx (%pS)\n", (long)fp->pc, (void *)fp->pc);
+       pr_info("LR: 0x%08lx (%pS)\n", (long)fp->lr, (void *)fp->lr);
+       pr_info("SP: 0x%08lx\n", (long)fp);
        pr_info("orig_a0: 0x%08lx\n", fp->orig_a0);
        pr_info("PSR: 0x%08lx\n", (long)fp->sr);
 
-       pr_info("a0: 0x%08lx  a1: 0x%08lx  a2: 0x%08lx  a3: 0x%08lx\n",
-              fp->a0, fp->a1, fp->a2, fp->a3);
+       pr_info(" a0: 0x%08lx   a1: 0x%08lx   a2: 0x%08lx   a3: 0x%08lx\n",
+               fp->a0, fp->a1, fp->a2, fp->a3);
 #if defined(__CSKYABIV2__)
-       pr_info("r4: 0x%08lx  r5: 0x%08lx    r6: 0x%08lx    r7: 0x%08lx\n",
+       pr_info(" r4: 0x%08lx   r5: 0x%08lx   r6: 0x%08lx   r7: 0x%08lx\n",
                fp->regs[0], fp->regs[1], fp->regs[2], fp->regs[3]);
-       pr_info("r8: 0x%08lx  r9: 0x%08lx   r10: 0x%08lx   r11: 0x%08lx\n",
+       pr_info(" r8: 0x%08lx   r9: 0x%08lx  r10: 0x%08lx  r11: 0x%08lx\n",
                fp->regs[4], fp->regs[5], fp->regs[6], fp->regs[7]);
-       pr_info("r12 0x%08lx  r13: 0x%08lx   r15: 0x%08lx\n",
+       pr_info("r12: 0x%08lx  r13: 0x%08lx  r15: 0x%08lx\n",
                fp->regs[8], fp->regs[9], fp->lr);
-       pr_info("r16:0x%08lx   r17: 0x%08lx   r18: 0x%08lx    r19: 0x%08lx\n",
+       pr_info("r16: 0x%08lx  r17: 0x%08lx  r18: 0x%08lx  r19: 0x%08lx\n",
                fp->exregs[0], fp->exregs[1], fp->exregs[2], fp->exregs[3]);
-       pr_info("r20 0x%08lx   r21: 0x%08lx   r22: 0x%08lx    r23: 0x%08lx\n",
+       pr_info("r20: 0x%08lx  r21: 0x%08lx  r22: 0x%08lx  r23: 0x%08lx\n",
                fp->exregs[4], fp->exregs[5], fp->exregs[6], fp->exregs[7]);
-       pr_info("r24 0x%08lx   r25: 0x%08lx   r26: 0x%08lx    r27: 0x%08lx\n",
+       pr_info("r24: 0x%08lx  r25: 0x%08lx  r26: 0x%08lx  r27: 0x%08lx\n",
                fp->exregs[8], fp->exregs[9], fp->exregs[10], fp->exregs[11]);
-       pr_info("r28 0x%08lx   r29: 0x%08lx   r30: 0x%08lx    tls: 0x%08lx\n",
+       pr_info("r28: 0x%08lx  r29: 0x%08lx  r30: 0x%08lx  tls: 0x%08lx\n",
                fp->exregs[12], fp->exregs[13], fp->exregs[14], fp->tls);
-       pr_info("hi 0x%08lx    lo: 0x%08lx\n",
+       pr_info(" hi: 0x%08lx   lo: 0x%08lx\n",
                fp->rhi, fp->rlo);
 #else
-       pr_info("r6: 0x%08lx   r7: 0x%08lx   r8: 0x%08lx   r9: 0x%08lx\n",
+       pr_info(" r6: 0x%08lx   r7: 0x%08lx   r8: 0x%08lx   r9: 0x%08lx\n",
                fp->regs[0], fp->regs[1], fp->regs[2], fp->regs[3]);
-       pr_info("r10: 0x%08lx   r11: 0x%08lx   r12: 0x%08lx   r13: 0x%08lx\n",
+       pr_info("r10: 0x%08lx  r11: 0x%08lx  r12: 0x%08lx  r13: 0x%08lx\n",
                fp->regs[4], fp->regs[5], fp->regs[6], fp->regs[7]);
-       pr_info("r14 0x%08lx   r1: 0x%08lx   r15: 0x%08lx\n",
+       pr_info("r14: 0x%08lx   r1: 0x%08lx  r15: 0x%08lx\n",
                fp->regs[8], fp->regs[9], fp->lr);
 #endif
 
@@ -311,4 +312,7 @@ void show_regs(struct pt_regs *fp)
                pr_cont("%08x ", (int) *sp++);
        }
        pr_cont("\n");
+
+       show_stack(NULL, (unsigned long *)fp->regs[4]);
+       return;
 }
index 9967c10eee2bff00f7bab79ef4ab3fd1b6023430..207a891479d26e63100ea88c7665a673a6574f62 100644 (file)
@@ -238,8 +238,6 @@ static void do_signal(struct pt_regs *regs, int syscall)
        if (!user_mode(regs))
                return;
 
-       current->thread.esp0 = (unsigned long)regs;
-
        /*
         * If we were from a system call, check for system call restarting...
         */
index 36ebaf9834e1c4221747a24605ba5288b8ad815b..ddc4dd79f2826f837b3557a239043f7b0aa1c162 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/of.h>
 #include <linux/sched/task_stack.h>
 #include <linux/sched/mm.h>
+#include <linux/sched/hotplug.h>
 #include <asm/irq.h>
 #include <asm/traps.h>
 #include <asm/sections.h>
@@ -112,12 +113,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
 {
 }
 
-static void __init enable_smp_ipi(void)
-{
-       enable_percpu_irq(ipi_irq, 0);
-}
-
 static int ipi_dummy_dev;
+
 void __init setup_smp_ipi(void)
 {
        int rc;
@@ -130,7 +127,7 @@ void __init setup_smp_ipi(void)
        if (rc)
                panic("%s IRQ request failed\n", __func__);
 
-       enable_smp_ipi();
+       enable_percpu_irq(ipi_irq, 0);
 }
 
 void __init setup_smp(void)
@@ -138,7 +135,7 @@ void __init setup_smp(void)
        struct device_node *node = NULL;
        int cpu;
 
-       while ((node = of_find_node_by_type(node, "cpu"))) {
+       for_each_of_cpu_node(node) {
                if (!of_device_is_available(node))
                        continue;
 
@@ -161,12 +158,10 @@ volatile unsigned int secondary_stack;
 
 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
 {
-       unsigned int tmp;
-
-       secondary_stack = (unsigned int)tidle->stack + THREAD_SIZE;
+       unsigned long mask = 1 << cpu;
 
+       secondary_stack = (unsigned int)tidle->stack + THREAD_SIZE - 8;
        secondary_hint = mfcr("cr31");
-
        secondary_ccr  = mfcr("cr18");
 
        /*
@@ -176,10 +171,13 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
         */
        mtcr("cr17", 0x22);
 
-       /* Enable cpu in SMP reset ctrl reg */
-       tmp = mfcr("cr<29, 0>");
-       tmp |= 1 << cpu;
-       mtcr("cr<29, 0>", tmp);
+       if (mask & mfcr("cr<29, 0>")) {
+               send_arch_ipi(cpumask_of(cpu));
+       } else {
+               /* Enable cpu in SMP reset ctrl reg */
+               mask |= mfcr("cr<29, 0>");
+               mtcr("cr<29, 0>", mask);
+       }
 
        /* Wait for the cpu online */
        while (!cpu_online(cpu));
@@ -219,7 +217,7 @@ void csky_start_secondary(void)
        init_fpu();
 #endif
 
-       enable_smp_ipi();
+       enable_percpu_irq(ipi_irq, 0);
 
        mmget(mm);
        mmgrab(mm);
@@ -235,3 +233,46 @@ void csky_start_secondary(void)
        preempt_disable();
        cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 }
+
+#ifdef CONFIG_HOTPLUG_CPU
+int __cpu_disable(void)
+{
+       unsigned int cpu = smp_processor_id();
+
+       set_cpu_online(cpu, false);
+
+       irq_migrate_all_off_this_cpu();
+
+       clear_tasks_mm_cpumask(cpu);
+
+       return 0;
+}
+
+void __cpu_die(unsigned int cpu)
+{
+       if (!cpu_wait_death(cpu, 5)) {
+               pr_crit("CPU%u: shutdown failed\n", cpu);
+               return;
+       }
+       pr_notice("CPU%u: shutdown\n", cpu);
+}
+
+void arch_cpu_idle_dead(void)
+{
+       idle_task_exit();
+
+       cpu_report_death();
+
+       while (!secondary_stack)
+               arch_cpu_idle();
+
+       local_irq_disable();
+
+       asm volatile(
+               "mov    sp, %0\n"
+               "mov    r8, %0\n"
+               "jmpi   csky_start_secondary"
+               :
+               : "r" (secondary_stack));
+}
+#endif
diff --git a/arch/csky/kernel/stacktrace.c b/arch/csky/kernel/stacktrace.c
new file mode 100644 (file)
index 0000000..fec777a
--- /dev/null
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. */
+
+#include <linux/sched/debug.h>
+#include <linux/sched/task_stack.h>
+#include <linux/stacktrace.h>
+#include <linux/ftrace.h>
+
+void save_stack_trace(struct stack_trace *trace)
+{
+       save_stack_trace_tsk(current, trace);
+}
+EXPORT_SYMBOL_GPL(save_stack_trace);
+
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+       unsigned long *fp, *stack_start, *stack_end;
+       unsigned long addr;
+       int skip = trace->skip;
+       int savesched;
+       int graph_idx = 0;
+
+       if (tsk == current) {
+               asm volatile("mov %0, r8\n":"=r"(fp));
+               savesched = 1;
+       } else {
+               fp = (unsigned long *)thread_saved_fp(tsk);
+               savesched = 0;
+       }
+
+       addr = (unsigned long) fp & THREAD_MASK;
+       stack_start = (unsigned long *) addr;
+       stack_end = (unsigned long *) (addr + THREAD_SIZE);
+
+       while (fp > stack_start && fp < stack_end) {
+               unsigned long lpp, fpp;
+
+               fpp = fp[0];
+               lpp = fp[1];
+               if (!__kernel_text_address(lpp))
+                       break;
+               else
+                       lpp = ftrace_graph_ret_addr(tsk, &graph_idx, lpp, NULL);
+
+               if (savesched || !in_sched_functions(lpp)) {
+                       if (skip) {
+                               skip--;
+                       } else {
+                               trace->entries[trace->nr_entries++] = lpp;
+                               if (trace->nr_entries >= trace->max_entries)
+                                       break;
+                       }
+               }
+               fp = (unsigned long *)fpp;
+       }
+}
+EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
index a8368ed43517c3d403bcad793bb426fd59b9ac90..f487a9b996ae1e4e23b33d34db193dcbf2205279 100644 (file)
@@ -106,7 +106,6 @@ void buserr(struct pt_regs *regs)
        pr_err("User mode Bus Error\n");
        show_regs(regs);
 
-       current->thread.esp0 = (unsigned long) regs;
        force_sig_fault(SIGSEGV, 0, (void __user *)regs->pc, current);
 }
 
@@ -162,8 +161,3 @@ asmlinkage void trap_c(struct pt_regs *regs)
        }
        send_sig(sig, current, 0);
 }
-
-asmlinkage void set_esp0(unsigned long ssp)
-{
-       current->thread.esp0 = ssp;
-}
index 7df57f90b52ccc948acea7a7bdcc94e0d012329d..d6f4b66b93e21c8ede70e2cc4070861d6df7730d 100644 (file)
@@ -172,8 +172,6 @@ bad_area:
 bad_area_nosemaphore:
        /* User mode accesses just cause a SIGSEGV */
        if (user_mode(regs)) {
-               tsk->thread.address = address;
-               tsk->thread.error_code = write;
                force_sig_fault(SIGSEGV, si_code, (void __user *)address, current);
                return;
        }
@@ -188,8 +186,8 @@ no_context:
         * terminate things with extreme prejudice.
         */
        bust_spinlocks(1);
-       pr_alert("Unable to %s at vaddr: %08lx, epc: %08lx\n",
-                __func__, address, regs->pc);
+       pr_alert("Unable to handle kernel paging request at virtual "
+                "address 0x%08lx, pc: 0x%08lx\n", address, regs->pc);
        die_if_kernel("Oops", regs, write);
 
 out_of_memory:
@@ -207,6 +205,5 @@ do_sigbus:
        if (!user_mode(regs))
                goto no_context;
 
-       tsk->thread.address = address;
        force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address, current);
 }
index 7ad3ff103f4a865ab48f533c1d721daef2d2ccf9..cb7c03e5cd218a4236d1cb686a2cc36f0e4f91ff 100644 (file)
@@ -30,7 +30,7 @@ void __iomem *ioremap(phys_addr_t addr, size_t size)
        vaddr = (unsigned long)area->addr;
 
        prot = __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE |
-                       _PAGE_GLOBAL | _CACHE_UNCACHED);
+                       _PAGE_GLOBAL | _CACHE_UNCACHED | _PAGE_SO);
 
        if (ioremap_page_range(vaddr, vaddr + size, addr, prot)) {
                free_vm_area(area);
index a8acc431a774c61cd526abe4c945acd2b7f301cb..183a9955160a80dd89bdc2d2c82cd358a487c6e9 100644 (file)
@@ -79,11 +79,11 @@ static int csky_mptimer_starting_cpu(unsigned int cpu)
 
        to->clkevt.cpumask = cpumask_of(cpu);
 
+       enable_percpu_irq(csky_mptimer_irq, 0);
+
        clockevents_config_and_register(&to->clkevt, timer_of_rate(to),
                                        2, ULONG_MAX);
 
-       enable_percpu_irq(csky_mptimer_irq, 0);
-
        return 0;
 }
 
@@ -97,7 +97,7 @@ static int csky_mptimer_dying_cpu(unsigned int cpu)
 /*
  * clock source
  */
-static u64 sched_clock_read(void)
+static u64 notrace sched_clock_read(void)
 {
        return (u64)mfcr(PTIM_CCVR);
 }
index 9e67fd359d589934b331679ae08bf6aa8157bb11..36a7e3f18e6999b56d02585d6f774463ee0d41d6 100644 (file)
@@ -378,6 +378,7 @@ enum {
 #define AUDIT_ARCH_ARM         (EM_ARM|__AUDIT_ARCH_LE)
 #define AUDIT_ARCH_ARMEB       (EM_ARM)
 #define AUDIT_ARCH_CRIS                (EM_CRIS|__AUDIT_ARCH_LE)
+#define AUDIT_ARCH_CSKY                (EM_CSKY|__AUDIT_ARCH_LE)
 #define AUDIT_ARCH_FRV         (EM_FRV)
 #define AUDIT_ARCH_I386                (EM_386|__AUDIT_ARCH_LE)
 #define AUDIT_ARCH_IA64                (EM_IA_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
index d2fb964432f335f95c27b9574ea3c22980c2200b..0c3000faedbae3f831c5e62c4cd0676e1a71ead5 100644 (file)
@@ -44,6 +44,7 @@
 #define EM_TILEGX      191     /* Tilera TILE-Gx */
 #define EM_RISCV       243     /* RISC-V */
 #define EM_BPF         247     /* Linux BPF - in-kernel virtual machine */
+#define EM_CSKY                252     /* C-SKY */
 #define EM_FRV         0x5441  /* Fujitsu FR-V */
 
 /*