Arguments: 1. Logical ID of CPU to be queried
2. Information type:
0 - CPU state
+ 1000 - Total number of VM exits
+ 1001 - VM exits due to MMIO access
+ 1002 - VM exits due to PIO access
+ 1003 - VM exits due to IPI submissions
+ 1004 - VM exits due to management events
+ 1005 - VM exits due to hypercalls
+
+Statistic counters are reset when a CPU is assigned to a different cell. The
+total number of VM exits may be different from the sum of all specific VM exit
+counters.
Return code: Requested value (>=0) or negative error code
#define JAILHOUSE_CALL_ARG1 "r1"
#define JAILHOUSE_CALL_ARG2 "r2"
+/* CPU statistics */
+#define JAILHOUSE_NUM_CPU_STATS JAILHOUSE_GENERIC_CPU_STATS
+
#ifndef __asmeq
#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
#endif
// u32 apic_id;
struct cell *cell;
+ u32 stats[JAILHOUSE_NUM_CPU_STATS];
+
unsigned long linux_reg[NUM_ENTRY_REGS];
// unsigned long linux_ip;
bool initialized;
#define JAILHOUSE_CALL_ARG1 "D" (arg1)
#define JAILHOUSE_CALL_ARG2 "S" (arg2)
+/* CPU statistics */
+#define JAILHOUSE_CPU_STAT_VMEXITS_PIO JAILHOUSE_GENERIC_CPU_STATS
+#define JAILHOUSE_CPU_STAT_VMEXITS_XAPIC JAILHOUSE_GENERIC_CPU_STATS + 1
+#define JAILHOUSE_CPU_STAT_VMEXITS_CR JAILHOUSE_GENERIC_CPU_STATS + 2
+#define JAILHOUSE_CPU_STAT_VMEXITS_MSR JAILHOUSE_GENERIC_CPU_STATS + 3
+#define JAILHOUSE_CPU_STAT_VMEXITS_CPUID JAILHOUSE_GENERIC_CPU_STATS + 4
+#define JAILHOUSE_CPU_STAT_VMEXITS_XSETBV JAILHOUSE_GENERIC_CPU_STATS + 5
+#define JAILHOUSE_NUM_CPU_STATS JAILHOUSE_GENERIC_CPU_STATS + 6
+
#ifndef __ASSEMBLY__
static inline __u32 jailhouse_call(__u32 num)
#include <asm/paging.h>
#include <asm/processor.h>
+#include <jailhouse/hypercall.h>
+
#define NUM_ENTRY_REGS 6
/* Keep in sync with struct per_cpu! */
u32 apic_id;
struct cell *cell;
+ u32 stats[JAILHOUSE_NUM_CPU_STATS];
+
struct desc_table_reg linux_gdtr;
struct desc_table_reg linux_idtr;
unsigned long linux_reg[NUM_ENTRY_REGS];
u32 reason = vmcs_read32(VM_EXIT_REASON);
int sipi_vector;
+ cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_TOTAL]++;
+
switch (reason) {
case EXIT_REASON_EXCEPTION_NMI:
asm volatile("int %0" : : "i" (NMI_VECTOR));
/* fall through */
case EXIT_REASON_PREEMPTION_TIMER:
+ cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MANAGEMENT]++;
vmx_disable_preemption_timer();
sipi_vector = x86_handle_events(cpu_data);
if (sipi_vector >= 0) {
vmx_handle_hypercall(guest_regs, cpu_data);
return;
case EXIT_REASON_CR_ACCESS:
+ cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_CR]++;
if (vmx_handle_cr(guest_regs, cpu_data))
return;
break;
case EXIT_REASON_MSR_READ:
+ cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MSR]++;
if (guest_regs->rcx >= MSR_X2APIC_BASE &&
guest_regs->rcx <= MSR_X2APIC_END) {
vmx_skip_emulated_instruction(X86_INST_LEN_RDMSR);
guest_regs->rcx);
break;
case EXIT_REASON_MSR_WRITE:
+ cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MSR]++;
if (guest_regs->rcx == MSR_X2APIC_ICR) {
if (!apic_handle_icr_write(cpu_data, guest_regs->rax,
guest_regs->rdx))
guest_regs->rcx);
break;
case EXIT_REASON_APIC_ACCESS:
+ cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_XAPIC]++;
if (vmx_handle_apic_access(guest_regs, cpu_data))
return;
break;
case EXIT_REASON_XSETBV:
+ cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_XSETBV]++;
if (guest_regs->rax & X86_XCR0_FP &&
(guest_regs->rax & ~cpuid_eax(0x0d)) == 0 &&
guest_regs->rcx == 0 && guest_regs->rdx == 0) {
guest_regs->rdx, guest_regs->rax);
break;
case EXIT_REASON_IO_INSTRUCTION:
+ cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_PIO]++;
if (vmx_handle_io_access(guest_regs, cpu_data))
return;
break;
case EXIT_REASON_EPT_VIOLATION:
+ cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MMIO]++;
if (vmx_handle_ept_violation(guest_regs, cpu_data))
return;
break;
#include <jailhouse/paging.h>
#include <jailhouse/processor.h>
#include <jailhouse/string.h>
+#include <jailhouse/utils.h>
#include <asm/bitops.h>
#include <asm/spinlock.h>
set_bit(cpu, root_cell.cpu_set->bitmap);
per_cpu(cpu)->cell = &root_cell;
per_cpu(cpu)->failed = false;
+ memset(per_cpu(cpu)->stats, 0, sizeof(per_cpu(cpu)->stats));
}
for (n = 0; n < cell->config->num_memory_regions; n++, mem++) {
clear_bit(cpu, root_cell.cpu_set->bitmap);
per_cpu(cpu)->cell = cell;
+ memset(per_cpu(cpu)->stats, 0, sizeof(per_cpu(cpu)->stats));
}
/*
if (type == JAILHOUSE_CPU_INFO_STATE) {
return per_cpu(cpu_id)->failed ? JAILHOUSE_CPU_FAILED :
JAILHOUSE_CPU_RUNNING;
+ } else if (type >= JAILHOUSE_CPU_INFO_STAT_BASE &&
+ type - JAILHOUSE_CPU_INFO_STAT_BASE < JAILHOUSE_NUM_CPU_STATS) {
+ type -= JAILHOUSE_CPU_INFO_STAT_BASE;
+ return per_cpu(cpu_id)->stats[type] & BIT_MASK(30, 0);
} else
return -EINVAL;
}
long hypercall(struct per_cpu *cpu_data, unsigned long code,
unsigned long arg1, unsigned long arg2)
{
+ cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_HYPERCALL]++;
+
switch (code) {
case JAILHOUSE_HC_DISABLE:
return shutdown(cpu_data);
/* Hypervisor information type */
#define JAILHOUSE_CPU_INFO_STATE 0
+#define JAILHOUSE_CPU_INFO_STAT_BASE 1000
/* CPU state */
#define JAILHOUSE_CPU_RUNNING 0
#define JAILHOUSE_CPU_FAILED 2 /* terminal state */
+/* CPU statistics */
+#define JAILHOUSE_CPU_STAT_VMEXITS_TOTAL 0
+#define JAILHOUSE_CPU_STAT_VMEXITS_MMIO 1
+#define JAILHOUSE_CPU_STAT_VMEXITS_MANAGEMENT 2
+#define JAILHOUSE_CPU_STAT_VMEXITS_HYPERCALL 3
+#define JAILHOUSE_GENERIC_CPU_STATS 4
+
#define JAILHOUSE_MSG_NONE 0
/* messages to cell */
#define JAILHOUSE_CELL_SHUT_DOWN 2 /* terminal state */
#define JAILHOUSE_CELL_FAILED 3 /* terminal state */
+#ifndef __ASSEMBLY__
+
struct jailhouse_comm_region {
volatile __u32 msg_to_cell;
volatile __u32 reply_from_cell;
/* errors etc. */
};
+#endif /* !__ASSEMBLY__ */
+
#include <asm/jailhouse_hypercall.h>
#endif /* !_JAILHOUSE_HYPERCALL_H */