#define NUM_ENTRY_REGS 13
-/* Keep in sync with struct per_cpu! */
-#define PERCPU_SIZE_SHIFT 13
-#define PERCPU_STACK_END PAGE_SIZE
-#define PERCPU_LINUX_SP PERCPU_STACK_END
-
#ifndef __ASSEMBLY__
-#include <asm/cell.h>
+#include <jailhouse/cell.h>
+#include <asm/irqchip.h>
#include <asm/psci.h>
#include <asm/spinlock.h>
#include <asm/sysregs.h>
-struct pending_irq;
+/* Round up sizeof(struct per_cpu) to the next power of two. */
+#define PERCPU_SIZE_SHIFT \
+ (BITS_PER_LONG - __builtin_clzl(sizeof(struct per_cpu) - 1))
struct per_cpu {
- /* Keep these two in sync with defines above! */
u8 stack[PAGE_SIZE];
unsigned long linux_sp;
unsigned long linux_ret;
unsigned int cpu_id;
unsigned int virt_id;
- /* Other CPUs can insert sgis into the pending array */
- spinlock_t gic_lock;
- struct pending_irq *pending_irqs;
- struct pending_irq *first_pending;
+ /* synchronizes parallel insertions of SGIs into the pending ring */
+ spinlock_t pending_irqs_lock;
+ u16 pending_irqs[MAX_PENDING_IRQS];
+ unsigned int pending_irqs_head;
+ /* removal from the ring happens lockless, thus tail is volatile */
+ volatile unsigned int pending_irqs_tail;
/* Only GICv3: redistributor base */
void *gicr_base;
/* The mbox will be accessed with a ldrd, which requires alignment */
__attribute__((aligned(8))) struct psci_mbox psci_mbox;
+ struct psci_mbox guest_mbox;
bool flush_vcpu_caches;
int shutdown_state;
+ bool shutdown;
+ unsigned long mpidr;
bool failed;
} __attribute__((aligned(PAGE_SIZE)));
static inline struct registers *guest_regs(struct per_cpu *cpu_data)
{
/* Assumes that the trap handler is entered with an empty stack */
- return (struct registers *)(cpu_data->stack + PERCPU_STACK_END
+ return (struct registers *)(cpu_data->stack + sizeof(cpu_data->stack)
- sizeof(struct registers));
}
}
unsigned int arm_cpu_virt2phys(struct cell *cell, unsigned int virt_id);
-
-/* Validate defines */
-#define CHECK_ASSUMPTION(assume) ((void)sizeof(char[1 - 2*!(assume)]))
-
-static inline void __check_assumptions(void)
-{
- struct per_cpu cpu_data;
-
- CHECK_ASSUMPTION(sizeof(struct per_cpu) == (1 << PERCPU_SIZE_SHIFT));
- CHECK_ASSUMPTION(sizeof(cpu_data.stack) == PERCPU_STACK_END);
- CHECK_ASSUMPTION(__builtin_offsetof(struct per_cpu, linux_sp) ==
- PERCPU_LINUX_SP);
-}
#endif /* !__ASSEMBLY__ */
#endif /* !_JAILHOUSE_ASM_PERCPU_H */