]> rtime.felk.cvut.cz Git - jailhouse.git/blobdiff - hypervisor/arch/arm/include/asm/percpu.h
Merge remote-tracking branch 'kiszka/master'
[jailhouse.git] / hypervisor / arch / arm / include / asm / percpu.h
index 128cb9f104faba1c2bc78e524397851a7b92a3c2..0e9eac87abb767801ae271663c057b0abc80b829 100644 (file)
 
 #define NUM_ENTRY_REGS                 13
 
-/* Keep in sync with struct per_cpu! */
-#define PERCPU_SIZE_SHIFT              13
-#define PERCPU_STACK_END               PAGE_SIZE
-#define PERCPU_LINUX_SP                        PERCPU_STACK_END
-
 #ifndef __ASSEMBLY__
 
-#include <asm/cell.h>
+#include <jailhouse/cell.h>
+#include <asm/irqchip.h>
+#include <asm/psci.h>
+#include <asm/spinlock.h>
+#include <asm/sysregs.h>
+
+/* Round up sizeof(struct per_cpu) to the next power of two. */
+#define PERCPU_SIZE_SHIFT \
+       (BITS_PER_LONG - __builtin_clzl(sizeof(struct per_cpu) - 1))
 
 struct per_cpu {
-       /* Keep these two in sync with defines above! */
        u8 stack[PAGE_SIZE];
        unsigned long linux_sp;
        unsigned long linux_ret;
        unsigned long linux_flags;
        unsigned long linux_reg[NUM_ENTRY_REGS];
 
-       struct per_cpu *cpu_data;
        unsigned int cpu_id;
-//     u32 apic_id;
+       unsigned int virt_id;
+
+       /* synchronizes parallel insertions of SGIs into the pending ring */
+       spinlock_t pending_irqs_lock;
+       u16 pending_irqs[MAX_PENDING_IRQS];
+       unsigned int pending_irqs_head;
+       /* removal from the ring happens lockless, thus tail is volatile */
+       volatile unsigned int pending_irqs_tail;
+       /* Only GICv3: redistributor base */
+       void *gicr_base;
+
        struct cell *cell;
 
        u32 stats[JAILHOUSE_NUM_CPU_STATS];
 
        bool initialized;
 
-       bool flush_caches;
-       bool shutdown_cpu;
+       /* The mbox will be accessed with a ldrd, which requires alignment */
+       __attribute__((aligned(8))) struct psci_mbox psci_mbox;
+       struct psci_mbox guest_mbox;
+
+       bool flush_vcpu_caches;
        int shutdown_state;
+       bool shutdown;
+       unsigned long mpidr;
        bool failed;
 } __attribute__((aligned(PAGE_SIZE)));
 
-#define DEFINE_PER_CPU_ACCESSOR(field)                                     \
-static inline typeof(((struct per_cpu *)0)->field) this_##field(void)      \
-{                                                                          \
-       typeof(((struct per_cpu *)0)->field) tmp = 0;                       \
-                                                                           \
-       return tmp;                                                         \
+static inline struct per_cpu *this_cpu_data(void)
+{
+       struct per_cpu *cpu_data;
+
+       arm_read_sysreg(TPIDR_EL2, cpu_data);
+       return cpu_data;
+}
+
+#define DEFINE_PER_CPU_ACCESSOR(field)                                 \
+static inline typeof(((struct per_cpu *)0)->field) this_##field(void)  \
+{                                                                      \
+       return this_cpu_data()->field;                                  \
 }
 
-DEFINE_PER_CPU_ACCESSOR(cpu_data)
 DEFINE_PER_CPU_ACCESSOR(cpu_id)
 DEFINE_PER_CPU_ACCESSOR(cell)
 
@@ -69,18 +90,19 @@ static inline struct per_cpu *per_cpu(unsigned int cpu)
        return (struct per_cpu *)(__page_pool + (cpu << PERCPU_SIZE_SHIFT));
 }
 
-/* Validate defines */
-#define CHECK_ASSUMPTION(assume)       ((void)sizeof(char[1 - 2*!(assume)]))
-
-static inline void __check_assumptions(void)
+static inline struct registers *guest_regs(struct per_cpu *cpu_data)
 {
-       struct per_cpu cpu_data;
+       /* Assumes that the trap handler is entered with an empty stack */
+       return (struct registers *)(cpu_data->stack + sizeof(cpu_data->stack)
+                       - sizeof(struct registers));
+}
 
-       CHECK_ASSUMPTION(sizeof(struct per_cpu) == (1 << PERCPU_SIZE_SHIFT));
-       CHECK_ASSUMPTION(sizeof(cpu_data.stack) == PERCPU_STACK_END);
-       CHECK_ASSUMPTION(__builtin_offsetof(struct per_cpu, linux_sp) ==
-                        PERCPU_LINUX_SP);
+static inline unsigned int arm_cpu_phys2virt(unsigned int cpu_id)
+{
+       return per_cpu(cpu_id)->virt_id;
 }
+
+unsigned int arm_cpu_virt2phys(struct cell *cell, unsigned int virt_id);
 #endif /* !__ASSEMBLY__ */
 
 #endif /* !_JAILHOUSE_ASM_PERCPU_H */