]> rtime.felk.cvut.cz Git - jailhouse.git/blob - hypervisor/arch/arm/include/asm/percpu.h
arm: Convert software queue of pending interrupts into a ring
[jailhouse.git] / hypervisor / arch / arm / include / asm / percpu.h
1 /*
2  * Jailhouse, a Linux-based partitioning hypervisor
3  *
4  * Copyright (c) Siemens AG, 2013
5  *
6  * Authors:
7  *  Jan Kiszka <jan.kiszka@siemens.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  */
12
13 #ifndef _JAILHOUSE_ASM_PERCPU_H
14 #define _JAILHOUSE_ASM_PERCPU_H
15
16 #include <jailhouse/types.h>
17 #include <asm/paging.h>
18
19 #define NUM_ENTRY_REGS                  13
20
21 #ifndef __ASSEMBLY__
22
23 #include <jailhouse/cell.h>
24 #include <asm/irqchip.h>
25 #include <asm/psci.h>
26 #include <asm/spinlock.h>
27 #include <asm/sysregs.h>
28
29 /* Round up sizeof(struct per_cpu) to the next power of two. */
30 #define PERCPU_SIZE_SHIFT \
31         (BITS_PER_LONG - __builtin_clzl(sizeof(struct per_cpu) - 1))
32
33 struct per_cpu {
34         u8 stack[PAGE_SIZE];
35         unsigned long linux_sp;
36         unsigned long linux_ret;
37         unsigned long linux_flags;
38         unsigned long linux_reg[NUM_ENTRY_REGS];
39
40         unsigned int cpu_id;
41         unsigned int virt_id;
42
43         /* synchronizes parallel insertions of SGIs into the pending ring */
44         spinlock_t pending_irqs_lock;
45         u16 pending_irqs[MAX_PENDING_IRQS];
46         unsigned int pending_irqs_head;
47         /* removal from the ring happens lockless, thus tail is volatile */
48         volatile unsigned int pending_irqs_tail;
49         /* Only GICv3: redistributor base */
50         void *gicr_base;
51
52         struct cell *cell;
53
54         u32 stats[JAILHOUSE_NUM_CPU_STATS];
55
56         bool initialized;
57
58         /* The mbox will be accessed with a ldrd, which requires alignment */
59         __attribute__((aligned(8))) struct psci_mbox psci_mbox;
60         struct psci_mbox guest_mbox;
61
62         bool flush_vcpu_caches;
63         int shutdown_state;
64         bool shutdown;
65         bool failed;
66 } __attribute__((aligned(PAGE_SIZE)));
67
68 static inline struct per_cpu *this_cpu_data(void)
69 {
70         struct per_cpu *cpu_data;
71
72         arm_read_sysreg(TPIDR_EL2, cpu_data);
73         return cpu_data;
74 }
75
76 #define DEFINE_PER_CPU_ACCESSOR(field)                                  \
77 static inline typeof(((struct per_cpu *)0)->field) this_##field(void)   \
78 {                                                                       \
79         return this_cpu_data()->field;                                  \
80 }
81
82 DEFINE_PER_CPU_ACCESSOR(cpu_id)
83 DEFINE_PER_CPU_ACCESSOR(cell)
84
85 static inline struct per_cpu *per_cpu(unsigned int cpu)
86 {
87         extern u8 __page_pool[];
88
89         return (struct per_cpu *)(__page_pool + (cpu << PERCPU_SIZE_SHIFT));
90 }
91
92 static inline struct registers *guest_regs(struct per_cpu *cpu_data)
93 {
94         /* Assumes that the trap handler is entered with an empty stack */
95         return (struct registers *)(cpu_data->stack + sizeof(cpu_data->stack)
96                         - sizeof(struct registers));
97 }
98
99 static inline unsigned int arm_cpu_phys2virt(unsigned int cpu_id)
100 {
101         return per_cpu(cpu_id)->virt_id;
102 }
103
104 unsigned int arm_cpu_virt2phys(struct cell *cell, unsigned int virt_id);
105 #endif /* !__ASSEMBLY__ */
106
107 #endif /* !_JAILHOUSE_ASM_PERCPU_H */