#ifndef _JAILHOUSE_ASM_IRQCHIP_H
#define _JAILHOUSE_ASM_IRQCHIP_H
+/*
+ * Since there is no finer-grained allocation than page-alloc for the moment,
+ * and it is very complicated to predict the total size needed at
+ * initialisation, each cpu is allocated one page of pending irqs.
+ * This allows for 256 pending IRQs, which should be sufficient.
+ */
+#define MAX_PENDING_IRQS (PAGE_SIZE / sizeof(struct pending_irq))
+
#include <asm/percpu.h>
#ifndef __ASSEMBLY__
int (*send_sgi)(struct sgi *sgi);
void (*handle_irq)(struct per_cpu *cpu_data);
+ int (*inject_irq)(struct per_cpu *cpu_data, struct pending_irq *irq);
};
+/* Virtual interrupts waiting to be injected */
+struct pending_irq {
+ u32 virt_id;
+
+ u8 priority;
+ u8 hw;
+ union {
+ /* Physical id, when hw is 1 */
+ u16 irq;
+ struct {
+ /* GICv2 needs cpuid for SGIs */
+ u16 cpuid : 15;
+ /* EOI generates a maintenance irq */
+ u16 maintenance : 1;
+ } sgi __attribute__((packed));
+ } type;
+
+ struct pending_irq *next;
+ struct pending_irq *prev;
+} __attribute__((packed));
+
int irqchip_init(void);
int irqchip_cpu_init(struct per_cpu *cpu_data);
int irqchip_send_sgi(struct sgi *sgi);
void irqchip_handle_irq(struct per_cpu *cpu_data);
+int irqchip_inject_pending(struct per_cpu *cpu_data);
+int irqchip_insert_pending(struct per_cpu *cpu_data, struct pending_irq *irq);
+int irqchip_remove_pending(struct per_cpu *cpu_data, struct pending_irq *irq);
+
#endif /* __ASSEMBLY__ */
#endif /* _JAILHOUSE_ASM_IRQCHIP_H */
static bool irqchip_is_init;
static struct irqchip_ops irqchip;
+static int irqchip_init_pending(struct per_cpu *cpu_data)
+{
+ struct pending_irq *pend_array = page_alloc(&mem_pool, 1);
+
+ if (pend_array == NULL)
+ return -ENOMEM;
+ memset(pend_array, 0, PAGE_SIZE);
+
+ cpu_data->pending_irqs = pend_array;
+ cpu_data->first_pending = NULL;
+
+ return 0;
+}
+
+/*
+ * Find the first available pending struct for insertion. The `prev' pointer is
+ * set to the previous pending interrupt, if any, to help inserting the new one
+ * into the list.
+ * Returns NULL when no slot is available
+ */
+static struct pending_irq* get_pending_slot(struct per_cpu *cpu_data,
+ struct pending_irq **prev)
+{
+ u32 i, pending_idx;
+ struct pending_irq *pending = cpu_data->first_pending;
+
+ *prev = NULL;
+
+ for (i = 0; i < MAX_PENDING_IRQS; i++) {
+ pending_idx = pending - cpu_data->pending_irqs;
+ if (pending == NULL || i < pending_idx)
+ return cpu_data->pending_irqs + i;
+
+ *prev = pending;
+ pending = pending->next;
+ }
+
+ return NULL;
+}
+
+int irqchip_insert_pending(struct per_cpu *cpu_data, struct pending_irq *irq)
+{
+ struct pending_irq *prev = NULL;
+ struct pending_irq *slot;
+
+ spin_lock(&cpu_data->gic_lock);
+
+ slot = get_pending_slot(cpu_data, &prev);
+ if (slot == NULL) {
+ spin_unlock(&cpu_data->gic_lock);
+ return -ENOMEM;
+ }
+
+ /*
+ * Don't override the pointers yet, they may be read by the injection
+ * loop. Odds are astronomically low, but hey.
+ */
+ memcpy(slot, irq, sizeof(struct pending_irq) - 2 * sizeof(void *));
+ slot->prev = prev;
+ if (prev) {
+ slot->next = prev->next;
+ prev->next = slot;
+ } else {
+ slot->next = cpu_data->first_pending;
+ cpu_data->first_pending = slot;
+ }
+ if (slot->next)
+ slot->next->prev = slot;
+
+ spin_unlock(&cpu_data->gic_lock);
+
+ return 0;
+}
+
+/*
+ * Only executed by `irqchip_inject_pending' on a CPU to inject its own stuff.
+ */
+int irqchip_remove_pending(struct per_cpu *cpu_data, struct pending_irq *irq)
+{
+ spin_lock(&cpu_data->gic_lock);
+
+ if (cpu_data->first_pending == irq)
+ cpu_data->first_pending = irq->next;
+ if (irq->prev)
+ irq->prev->next = irq->next;
+ if (irq->next)
+ irq->next->prev = irq->prev;
+
+ spin_unlock(&cpu_data->gic_lock);
+
+ return 0;
+}
+
+int irqchip_inject_pending(struct per_cpu *cpu_data)
+{
+ int err;
+ struct pending_irq *pending = cpu_data->first_pending;
+
+ while (pending != NULL) {
+ err = irqchip.inject_irq(cpu_data, pending);
+ if (err == -EBUSY)
+ /* The list registers are full. */
+ break;
+ else
+ /*
+ * Removal only changes the pointers, but does not
+ * deallocate anything.
+ * Concurrent accesses are avoided with the spinlock,
+ * but the `next' pointer of the current pending object
+ * may be rewritten by an external insert before or
+ * after this removal, which isn't an issue.
+ */
+ irqchip_remove_pending(cpu_data, pending);
+
+ pending = pending->next;
+ }
+
+ return 0;
+}
+
void irqchip_handle_irq(struct per_cpu *cpu_data)
{
irqchip.handle_irq(cpu_data);
int irqchip_cpu_init(struct per_cpu *cpu_data)
{
+ int err;
+
+ err = irqchip_init_pending(cpu_data);
+ if (err)
+ return err;
+
if (irqchip.cpu_init)
return irqchip.cpu_init(cpu_data);