/* For SGIs or PPIs, let the caller do the mmio access */
if (!is_spi(first_irq)) {
- arm_mmio_perform_access((unsigned long)gicd_base, mmio);
+ mmio_perform_access(gicd_base, mmio);
return MMIO_HANDLED;
}
if (!mmio->is_write) {
/* Restrict the read value */
- arm_mmio_perform_access((unsigned long)gicd_base, mmio);
+ mmio_perform_access(gicd_base, mmio);
mmio->value &= access_mask;
return MMIO_HANDLED;
}
spin_lock(&dist_lock);
mmio->is_write = false;
- arm_mmio_perform_access((unsigned long)gicd_base, mmio);
+ mmio_perform_access(gicd_base, mmio);
mmio->is_write = true;
/* Clear 0 bits */
mmio->value &= ~(access_mask & ~access_val);
mmio->value |= access_val;
- arm_mmio_perform_access((unsigned long)gicd_base, mmio);
+ mmio_perform_access(gicd_base, mmio);
spin_unlock(&dist_lock);
} else {
mmio->value &= access_mask;
- arm_mmio_perform_access((unsigned long)gicd_base, mmio);
+ mmio_perform_access(gicd_base, mmio);
}
return MMIO_HANDLED;
}
printk("Attempt to route IRQ%d outside of cell\n", irq);
return MMIO_ERROR;
}
- arm_mmio_perform_access((unsigned long)gicd_base, mmio);
+ mmio_perform_access(gicd_base, mmio);
} else {
cpu = mmio_read32(gicd_base + GICD_IROUTER + 8 * irq);
mmio->value = arm_cpu_phys2virt(cpu);
* fill its CPU interface map.
*/
if (!is_spi(reg)) {
- arm_mmio_perform_access((unsigned long)gicd_base, mmio);
+ mmio_perform_access(gicd_base, mmio);
return MMIO_HANDLED;
}
/* Combine with external SPIs */
mmio->value |= (itargetsr & ~access_mask);
/* And do the access */
- arm_mmio_perform_access((unsigned long)gicd_base, mmio);
+ mmio_perform_access(gicd_base, mmio);
spin_unlock(&dist_lock);
} else {
- arm_mmio_perform_access((unsigned long)gicd_base, mmio);
+ mmio_perform_access(gicd_base, mmio);
mmio->value &= access_mask;
}
case REG_RANGE(GICD_CIDR0, 4, 4):
/* Allow read access, ignore write */
if (!mmio->is_write)
- arm_mmio_perform_access((unsigned long)gicd_base, mmio);
+ mmio_perform_access(gicd_base, mmio);
/* fall through */
default:
/* Ignore access. */
arm_write_sysreg(DFAR, addr);
}
-void arm_mmio_perform_access(unsigned long base, struct mmio_access *mmio)
-{
- void *addr = (void *)(base + mmio->address);
-
- if (mmio->is_write)
- switch (mmio->size) {
- case 1:
- mmio_write8(addr, mmio->value);
- return;
- case 2:
- mmio_write16(addr, mmio->value);
- return;
- case 4:
- mmio_write32(addr, mmio->value);
- return;
- }
- else
- switch (mmio->size) {
- case 1:
- mmio->value = mmio_read8(addr);
- return;
- case 2:
- mmio->value = mmio_read16(addr);
- return;
- case 4:
- mmio->value = mmio_read32(addr);
- return;
- }
-}
-
int arch_handle_dabt(struct trap_context *ctx)
{
enum mmio_result mmio_result;
(sizeof(struct mmio_region_location) +
sizeof(struct mmio_region_handler))));
}
+
+void mmio_perform_access(void *base, struct mmio_access *mmio)
+{
+ void *addr = base + mmio->address;
+
+ if (mmio->is_write)
+ switch (mmio->size) {
+ case 1:
+ mmio_write8(addr, mmio->value);
+ break;
+ case 2:
+ mmio_write16(addr, mmio->value);
+ break;
+ case 4:
+ mmio_write32(addr, mmio->value);
+ break;
+#if BITS_PER_LONG == 64
+ case 8:
+ mmio_write64(addr, mmio->value);
+ break;
+#endif
+ }
+ else
+ switch (mmio->size) {
+ case 1:
+ mmio->value = mmio_read8(addr);
+ break;
+ case 2:
+ mmio->value = mmio_read16(addr);
+ break;
+ case 4:
+ mmio->value = mmio_read32(addr);
+ break;
+#if BITS_PER_LONG == 64
+ case 8:
+ mmio->value = mmio_read64(addr);
+ break;
+#endif
+ }
+}