]> rtime.felk.cvut.cz Git - jailhouse.git/blob - hypervisor/arch/arm/include/asm/percpu.h
arm: attribute virtual IDs to the cell cpus
[jailhouse.git] / hypervisor / arch / arm / include / asm / percpu.h
1 /*
2  * Jailhouse, a Linux-based partitioning hypervisor
3  *
4  * Copyright (c) Siemens AG, 2013
5  *
6  * Authors:
7  *  Jan Kiszka <jan.kiszka@siemens.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  */
12
13 #ifndef _JAILHOUSE_ASM_PERCPU_H
14 #define _JAILHOUSE_ASM_PERCPU_H
15
16 #include <jailhouse/types.h>
17 #include <asm/paging.h>
18
19 #define NUM_ENTRY_REGS                  13
20
21 /* Keep in sync with struct per_cpu! */
22 #define PERCPU_SIZE_SHIFT               13
23 #define PERCPU_STACK_END                PAGE_SIZE
24 #define PERCPU_LINUX_SP                 PERCPU_STACK_END
25
26 #ifndef __ASSEMBLY__
27
28 #include <asm/cell.h>
29 #include <asm/psci.h>
30 #include <asm/spinlock.h>
31 #include <asm/sysregs.h>
32
33 struct pending_irq;
34
35 struct per_cpu {
36         /* Keep these two in sync with defines above! */
37         u8 stack[PAGE_SIZE];
38         unsigned long linux_sp;
39         unsigned long linux_ret;
40         unsigned long linux_flags;
41         unsigned long linux_reg[NUM_ENTRY_REGS];
42
43         unsigned int cpu_id;
44         unsigned int virt_id;
45
46         /* Other CPUs can insert sgis into the pending array */
47         spinlock_t gic_lock;
48         struct pending_irq *pending_irqs;
49         struct pending_irq *first_pending;
50         /* Only GICv3: redistributor base */
51         void *gicr_base;
52
53         struct cell *cell;
54
55         u32 stats[JAILHOUSE_NUM_CPU_STATS];
56
57         bool initialized;
58
59         /* The mbox will be accessed with a ldrd, which requires alignment */
60         __attribute__((aligned(8))) struct psci_mbox psci_mbox;
61
62         bool flush_vcpu_caches;
63         int shutdown_state;
64         bool failed;
65 } __attribute__((aligned(PAGE_SIZE)));
66
67 static inline struct per_cpu *this_cpu_data(void)
68 {
69         struct per_cpu *cpu_data;
70
71         arm_read_sysreg(TPIDR_EL2, cpu_data);
72         return cpu_data;
73 }
74
75 #define DEFINE_PER_CPU_ACCESSOR(field)                                  \
76 static inline typeof(((struct per_cpu *)0)->field) this_##field(void)   \
77 {                                                                       \
78         return this_cpu_data()->field;                                  \
79 }
80
81 DEFINE_PER_CPU_ACCESSOR(cpu_id)
82 DEFINE_PER_CPU_ACCESSOR(cell)
83
84 static inline struct per_cpu *per_cpu(unsigned int cpu)
85 {
86         extern u8 __page_pool[];
87
88         return (struct per_cpu *)(__page_pool + (cpu << PERCPU_SIZE_SHIFT));
89 }
90
91 static inline struct registers *guest_regs(struct per_cpu *cpu_data)
92 {
93         /* Assumes that the trap handler is entered with an empty stack */
94         return (struct registers *)(cpu_data->stack + PERCPU_STACK_END
95                         - sizeof(struct registers));
96 }
97
98 static inline unsigned int arm_cpu_phys2virt(unsigned int cpu_id)
99 {
100         return per_cpu(cpu_id)->virt_id;
101 }
102
103 unsigned int arm_cpu_virt2phys(struct cell *cell, unsigned int virt_id);
104
105 /* Validate defines */
106 #define CHECK_ASSUMPTION(assume)        ((void)sizeof(char[1 - 2*!(assume)]))
107
108 static inline void __check_assumptions(void)
109 {
110         struct per_cpu cpu_data;
111
112         CHECK_ASSUMPTION(sizeof(struct per_cpu) == (1 << PERCPU_SIZE_SHIFT));
113         CHECK_ASSUMPTION(sizeof(cpu_data.stack) == PERCPU_STACK_END);
114         CHECK_ASSUMPTION(__builtin_offsetof(struct per_cpu, linux_sp) ==
115                          PERCPU_LINUX_SP);
116 }
117 #endif /* !__ASSEMBLY__ */
118
119 #endif /* !_JAILHOUSE_ASM_PERCPU_H */