]> rtime.felk.cvut.cz Git - jailhouse.git/blobdiff - hypervisor/arch/arm/include/asm/percpu.h
Merge remote-tracking branch 'kiszka/master'
[jailhouse.git] / hypervisor / arch / arm / include / asm / percpu.h
index 81fa18015d072256aa886cf42c3862e3d9c186b8..0e9eac87abb767801ae271663c057b0abc80b829 100644 (file)
 
 #define NUM_ENTRY_REGS                 13
 
-/* Keep in sync with struct per_cpu! */
-#define PERCPU_SIZE_SHIFT              13
-#define PERCPU_STACK_END               PAGE_SIZE
-#define PERCPU_LINUX_SP                        PERCPU_STACK_END
-
 #ifndef __ASSEMBLY__
 
-#include <asm/cell.h>
+#include <jailhouse/cell.h>
+#include <asm/irqchip.h>
 #include <asm/psci.h>
 #include <asm/spinlock.h>
 #include <asm/sysregs.h>
 
-struct pending_irq;
+/* Round up sizeof(struct per_cpu) to the next power of two. */
+#define PERCPU_SIZE_SHIFT \
+       (BITS_PER_LONG - __builtin_clzl(sizeof(struct per_cpu) - 1))
 
 struct per_cpu {
-       /* Keep these two in sync with defines above! */
        u8 stack[PAGE_SIZE];
        unsigned long linux_sp;
        unsigned long linux_ret;
@@ -43,10 +40,12 @@ struct per_cpu {
        unsigned int cpu_id;
        unsigned int virt_id;
 
-       /* Other CPUs can insert sgis into the pending array */
-       spinlock_t gic_lock;
-       struct pending_irq *pending_irqs;
-       struct pending_irq *first_pending;
+       /* synchronizes parallel insertions of SGIs into the pending ring */
+       spinlock_t pending_irqs_lock;
+       u16 pending_irqs[MAX_PENDING_IRQS];
+       unsigned int pending_irqs_head;
+       /* removal from the ring happens lockless, thus tail is volatile */
+       volatile unsigned int pending_irqs_tail;
        /* Only GICv3: redistributor base */
        void *gicr_base;
 
@@ -63,6 +62,7 @@ struct per_cpu {
        bool flush_vcpu_caches;
        int shutdown_state;
        bool shutdown;
+       unsigned long mpidr;
        bool failed;
 } __attribute__((aligned(PAGE_SIZE)));
 
@@ -93,7 +93,7 @@ static inline struct per_cpu *per_cpu(unsigned int cpu)
 static inline struct registers *guest_regs(struct per_cpu *cpu_data)
 {
        /* Assumes that the trap handler is entered with an empty stack */
-       return (struct registers *)(cpu_data->stack + PERCPU_STACK_END
+       return (struct registers *)(cpu_data->stack + sizeof(cpu_data->stack)
                        - sizeof(struct registers));
 }
 
@@ -103,19 +103,6 @@ static inline unsigned int arm_cpu_phys2virt(unsigned int cpu_id)
 }
 
 unsigned int arm_cpu_virt2phys(struct cell *cell, unsigned int virt_id);
-
-/* Validate defines */
-#define CHECK_ASSUMPTION(assume)       ((void)sizeof(char[1 - 2*!(assume)]))
-
-static inline void __check_assumptions(void)
-{
-       struct per_cpu cpu_data;
-
-       CHECK_ASSUMPTION(sizeof(struct per_cpu) == (1 << PERCPU_SIZE_SHIFT));
-       CHECK_ASSUMPTION(sizeof(cpu_data.stack) == PERCPU_STACK_END);
-       CHECK_ASSUMPTION(__builtin_offsetof(struct per_cpu, linux_sp) ==
-                        PERCPU_LINUX_SP);
-}
 #endif /* !__ASSEMBLY__ */
 
 #endif /* !_JAILHOUSE_ASM_PERCPU_H */