else
reset_address = 0;
+ /* Set the new MPIDR */
+ arm_write_sysreg(VMPIDR_EL2, cpu_data->virt_id | MPIDR_MP_BIT);
+
/* Restore an empty context */
arch_reset_el1(regs);
}
}
+unsigned int arm_cpu_virt2phys(struct cell *cell, unsigned int virt_id)
+{
+ unsigned int cpu;
+
+ for_each_cpu(cpu, cell->cpu_set) {
+ if (per_cpu(cpu)->virt_id == virt_id)
+ return cpu;
+ }
+
+ return -1;
+}
+
int arch_cell_create(struct cell *cell)
{
int err;
+ unsigned int cpu;
+ unsigned int virt_id = 0;
err = arch_mmu_cell_init(cell);
if (err)
return err;
+ /*
+ * Generate a virtual CPU id according to the position of each CPU in
+ * the cell set
+ */
+ for_each_cpu(cpu, cell->cpu_set) {
+ per_cpu(cpu)->virt_id = virt_id;
+ virt_id++;
+ }
+ cell->arch.last_virt_id = virt_id - 1;
+
return 0;
}
void arch_cell_destroy(struct cell *cell)
{
unsigned int cpu;
+ struct per_cpu *percpu;
arch_mmu_cell_destroy(cell);
- for_each_cpu(cpu, cell->cpu_set)
+ for_each_cpu(cpu, cell->cpu_set) {
+ percpu = per_cpu(cpu);
+ /* Re-assign the physical IDs for the root cell */
+ percpu->virt_id = percpu->cpu_id;
arch_reset_cpu(cpu);
+ }
}
/* Note: only supports synchronous flushing as triggered by config_commit! */
{
struct sgi sgi;
struct cell *cell = cpu_data->cell;
- unsigned int cpu;
+ unsigned int cpu, virt_id;
unsigned long this_cpu = cpu_data->cpu_id;
unsigned long routing_mode = !!(sgir & ICC_SGIR_ROUTING_BIT);
unsigned long targets = sgir & ICC_SGIR_TARGET_MASK;
sgi.id = SGI_INJECT;
for_each_cpu_except(cpu, cell->cpu_set, this_cpu) {
- if (routing_mode == 0 && !test_bit(cpu, &targets))
+ virt_id = arm_cpu_phys2virt(cpu);
+
+ if (routing_mode == 0 && !test_bit(virt_id, &targets))
continue;
else if (routing_mode == 1 && cpu == this_cpu)
continue;
spinlock_t caches_lock;
bool needs_flush;
+
+ unsigned int last_virt_id;
};
struct cell {
unsigned long linux_reg[NUM_ENTRY_REGS];
unsigned int cpu_id;
+ unsigned int virt_id;
/* Other CPUs can insert sgis into the pending array */
spinlock_t gic_lock;
- sizeof(struct registers));
}
+static inline unsigned int arm_cpu_phys2virt(unsigned int cpu_id)
+{
+ return per_cpu(cpu_id)->virt_id;
+}
+
+unsigned int arm_cpu_virt2phys(struct cell *cell, unsigned int virt_id);
+
/* Validate defines */
#define CHECK_ASSUMPTION(assume) ((void)sizeof(char[1 - 2*!(assume)]))
| PSR_32_BIT)
#define MPIDR_CPUID_MASK 0x00ffffff
+#define MPIDR_MP_BIT (1 << 31)
+#define MPIDR_U_BIT (1 << 30)
#define PFR1_VIRT(pfr) ((pfr) >> 12 & 0xf)
| SCTLR_UWXN_BIT | SCTLR_FI_BIT | SCTLR_EE_BIT \
| SCTLR_TRE_BIT | SCTLR_AFE_BIT | SCTLR_TE_BIT)
-
#define HCR_TRVM_BIT (1 << 30)
#define HCR_TVM_BIT (1 << 26)
#define HCR_HDC_BIT (1 << 29)
#define CSSIDR_EL1 SYSREG_32(1, c0, c0, 0)
#define CLIDR_EL1 SYSREG_32(1, c0, c0, 1)
#define CSSELR_EL1 SYSREG_32(2, c0, c0, 0)
+#define VMPIDR_EL2 SYSREG_32(4, c0, c0, 5)
#define SCTLR_EL2 SYSREG_32(4, c1, c0, 0)
#define ESR_EL2 SYSREG_32(4, c5, c2, 0)
#define TPIDR_EL2 SYSREG_32(4, c13, c0, 2)
unsigned long hcr = HCR_VM_BIT | HCR_IMO_BIT | HCR_FMO_BIT;
cpu_data->psci_mbox.entry = 0;
+ cpu_data->virt_id = cpu_data->cpu_id;
/*
* Copy the registers to restore from the linux stack here, because we