struct list_head list;
};
+#define QUADD_ARCH_NAME_MAX 64
+
+struct quadd_arch_info {
+ int type;
+ int ver;
+
+ char name[QUADD_ARCH_NAME_MAX];
+};
+
struct quadd_pmu_ctx {
- int arch;
- char arch_name[64];
+ struct quadd_arch_info arch;
u32 counters_mask;
* so currently we are devided by two
*/
if (pmu_ctx.l1_cache_rw &&
- (pmu_ctx.arch == QUADD_ARM_CPU_TYPE_CORTEX_A8 ||
- pmu_ctx.arch == QUADD_ARM_CPU_TYPE_CORTEX_A9) &&
+ (pmu_ctx.arch.type == QUADD_ARM_CPU_TYPE_CORTEX_A8 ||
+ pmu_ctx.arch.type == QUADD_ARM_CPU_TYPE_CORTEX_A9) &&
(event_id == QUADD_EVENT_TYPE_L1_DCACHE_READ_MISSES ||
event_id == QUADD_EVENT_TYPE_L1_DCACHE_WRITE_MISSES)) {
return value / 2;
return i;
}
+static struct quadd_arch_info *get_arch(void)
+{
+ return &pmu_ctx.arch;
+}
+
static struct quadd_event_source_interface pmu_armv7_int = {
.enable = pmu_enable,
.disable = pmu_disable,
.set_events = set_events,
.get_supported_events = get_supported_events,
.get_current_events = get_current_events,
+ .get_arch = get_arch,
};
struct quadd_event_source_interface *quadd_armv7_pmu_init(void)
cpu_implementer = cpu_id >> 24;
part_number = cpu_id & 0xFFF0;
+ pmu_ctx.arch.type = QUADD_ARM_CPU_TYPE_UNKNOWN;
+ pmu_ctx.arch.ver = 0;
+ strncpy(pmu_ctx.arch.name, "Unknown",
+ sizeof(pmu_ctx.arch.name));
+
if (cpu_implementer == ARM_CPU_IMP_ARM) {
switch (part_number) {
case ARM_CPU_PART_CORTEX_A9:
- pmu_ctx.arch = QUADD_ARM_CPU_TYPE_CORTEX_A9;
- strcpy(pmu_ctx.arch_name, "Cortex A9");
+ pmu_ctx.arch.type = QUADD_ARM_CPU_TYPE_CORTEX_A9;
+ strncpy(pmu_ctx.arch.name, "Cortex A9",
+ sizeof(pmu_ctx.arch.name));
+
pmu_ctx.counters_mask =
QUADD_ARMV7_COUNTERS_MASK_CORTEX_A9;
pmu_ctx.current_map = quadd_armv7_a9_events_map;
break;
case ARM_CPU_PART_CORTEX_A15:
- pmu_ctx.arch = QUADD_ARM_CPU_TYPE_CORTEX_A15;
- strcpy(pmu_ctx.arch_name, "Cortex A15");
+ pmu_ctx.arch.type = QUADD_ARM_CPU_TYPE_CORTEX_A15;
+ strncpy(pmu_ctx.arch.name, "Cortex A15",
+ sizeof(pmu_ctx.arch.name));
+
pmu_ctx.counters_mask =
QUADD_ARMV7_COUNTERS_MASK_CORTEX_A15;
pmu_ctx.current_map = quadd_armv7_a15_events_map;
break;
default:
- pmu_ctx.arch = QUADD_ARM_CPU_TYPE_UNKNOWN;
- strcpy(pmu_ctx.arch_name, "Unknown");
+ pmu_ctx.arch.type = QUADD_ARM_CPU_TYPE_UNKNOWN;
pmu_ctx.current_map = NULL;
break;
}
INIT_LIST_HEAD(&pmu_ctx.used_events);
- pr_info("arch: %s\n", pmu_ctx.arch_name);
+ pmu_ctx.arch.name[sizeof(pmu_ctx.arch.name) - 1] = '\0';
+ pr_info("arch: %s, type: %d, ver: %d\n",
+ pmu_ctx.arch.name, pmu_ctx.arch.type, pmu_ctx.arch.ver);
return pmu;
}
#define QUADD_ARMV8_PMCR_LC (1 << 6)
/* Number of event counters */
-#define QUADD_ARMV8_PMCR_N_SHIFT 16
+#define QUADD_ARMV8_PMCR_N_SHIFT 11
#define QUADD_ARMV8_PMCR_N_MASK 0x1f
/* Identification code */
-#define QUADD_ARMV8_PMCR_IDCODE_SHIFT 11
+#define QUADD_ARMV8_PMCR_IDCODE_SHIFT 16
#define QUADD_ARMV8_PMCR_IDCODE_MASK 0xff
/* Implementer code */
#define QUADD_ARMV8_COUNTERS_MASK_PMUV3 0x3f
+#define QUADD_ARMV8_PMU_NVEXT_SHIFT 4
+#define QUADD_ARMV8_PMU_NVEXT_MASK 0x0f
+
/*
* ARMv8 PMUv3 Performance Events handling code.
* Common event types.
asm volatile("msr pmovsclr_el0, %0" : : "r" (BIT(idx)));
}
-/*********************************************************************/
-
+static inline u32
+armv8_id_afr0_el1_read(void)
+{
+ u32 val;
+ /* Read Auxiliary Feature Register 0 */
+ asm volatile("mrs %0, id_afr0_el1" : "=r" (val));
+ return val;
+}
static void enable_counter(int idx)
{
return i;
}
-/*********************************************************************/
+static struct quadd_arch_info *get_arch(void)
+{
+ return &pmu_ctx.arch;
+}
static struct quadd_event_source_interface pmu_armv8_int = {
.enable = pmu_enable,
.set_events = set_events,
.get_supported_events = get_supported_events,
.get_current_events = get_current_events,
+ .get_arch = get_arch,
};
struct quadd_event_source_interface *quadd_armv8_pmu_init(void)
u64 aa64_dfr = read_cpuid(ID_AA64DFR0_EL1);
aa64_dfr = (aa64_dfr >> 8) & 0x0f;
- pmu_ctx.arch = QUADD_AA64_CPU_TYPE_UNKNOWN;
+ strncpy(pmu_ctx.arch.name, "Unknown", sizeof(pmu_ctx.arch.name));
+ pmu_ctx.arch.type = QUADD_AA64_CPU_TYPE_UNKNOWN;
+ pmu_ctx.arch.ver = 0;
switch (aa64_dfr) {
case QUADD_AA64_PMUVER_PMUV3:
- strcpy(pmu_ctx.arch_name, "AA64 PmuV3");
+ strncpy(pmu_ctx.arch.name, "AA64 PmuV3",
+ sizeof(pmu_ctx.arch.name));
+ pmu_ctx.arch.name[sizeof(pmu_ctx.arch.name) - 1] = '\0';
+
pmu_ctx.counters_mask =
QUADD_ARMV8_COUNTERS_MASK_PMUV3;
pmu_ctx.current_map = quadd_armv8_pmuv3_events_map;
pr_info("imp: %#x, idcode: %#x\n", imp, idcode);
if (imp == ARM_CPU_IMP_ARM) {
- strcat(pmu_ctx.arch_name, " ARM");
+ strncat(pmu_ctx.arch.name, " ARM",
+ sizeof(pmu_ctx.arch.name) -
+ strlen(pmu_ctx.arch.name));
+ pmu_ctx.arch.name[sizeof(pmu_ctx.arch.name) - 1] = '\0';
+
if (idcode == QUADD_AA64_CPU_IDCODE_CORTEX_A57) {
- pmu_ctx.arch = QUADD_AA64_CPU_TYPE_CORTEX_A57;
- strcat(pmu_ctx.arch_name, " CORTEX_A57");
+ pmu_ctx.arch.type =
+ QUADD_AA64_CPU_TYPE_CORTEX_A57;
+ strncat(pmu_ctx.arch.name, " CORTEX_A57",
+ sizeof(pmu_ctx.arch.name) -
+ strlen(pmu_ctx.arch.name));
} else {
- pmu_ctx.arch = QUADD_AA64_CPU_TYPE_ARM;
+ pmu_ctx.arch.type = QUADD_AA64_CPU_TYPE_ARM;
}
} else if (imp == QUADD_AA64_CPU_IMP_NVIDIA) {
- strcat(pmu_ctx.arch_name, " Nvidia");
- pmu_ctx.arch = QUADD_AA64_CPU_TYPE_DENVER;
+ u32 ext_ver = armv8_id_afr0_el1_read();
+ ext_ver = (ext_ver >> QUADD_ARMV8_PMU_NVEXT_SHIFT) &
+ QUADD_ARMV8_PMU_NVEXT_MASK;
+
+ strncat(pmu_ctx.arch.name, " NVIDIA (Denver)",
+ sizeof(pmu_ctx.arch.name) -
+ strlen(pmu_ctx.arch.name));
+ pmu_ctx.arch.type = QUADD_AA64_CPU_TYPE_DENVER;
+ pmu_ctx.arch.ver = ext_ver;
} else {
- strcat(pmu_ctx.arch_name, " Unknown");
- pmu_ctx.arch = QUADD_AA64_CPU_TYPE_UNKNOWN_IMP;
+ strncat(pmu_ctx.arch.name, " Unknown implementor code",
+ sizeof(pmu_ctx.arch.name) -
+ strlen(pmu_ctx.arch.name));
+ pmu_ctx.arch.type = QUADD_AA64_CPU_TYPE_UNKNOWN_IMP;
}
pmu = &pmu_armv8_int;
INIT_LIST_HEAD(&pmu_ctx.used_events);
- pr_info("arch: %s\n", pmu_ctx.arch_name);
+ pmu_ctx.arch.name[sizeof(pmu_ctx.arch.name) - 1] = '\0';
+ pr_info("arch: %s, type: %d, ver: %d\n",
+ pmu_ctx.arch.name, pmu_ctx.arch.type, pmu_ctx.arch.ver);
return pmu;
}
struct quadd_comm_data_interface;
struct quadd_hrt_ctx;
struct quadd_module_state;
+struct quadd_arch_info;
struct quadd_event_source_interface {
int (*enable)(void);
int (*set_events)(int *events, int size);
int (*get_supported_events)(int *events, int max_events);
int (*get_current_events)(int *events, int max_events);
+ struct quadd_arch_info * (*get_arch)(void);
};
struct source_info {
#include "quadd.h"
#include "version.h"
#include "quadd_proc.h"
+#include "arm_pmu.h"
#define YES_NO(x) ((x) ? "yes" : "no")
struct quadd_comm_cap *cap = &ctx->cap;
struct quadd_events_cap *event = &cap->events_cap;
unsigned int extra = cap->reserved[QUADD_COMM_CAP_IDX_EXTRA];
+ struct quadd_arch_info *arch = NULL;
+
+ if (ctx->pmu)
+ arch = ctx->pmu->get_arch();
seq_printf(f, "pmu: %s\n",
YES_NO(cap->pmu));
seq_printf(f, "l2 cache: %s\n",
YES_NO(cap->l2_cache));
if (cap->l2_cache) {
- seq_printf(f, "multiple l2 events: %s\n",
+ seq_printf(f, "multiple l2 events: %s\n",
YES_NO(cap->l2_multiple_events));
}
seq_printf(f, "information about unwind entry: %s\n",
YES_NO(extra & QUADD_COMM_CAP_EXTRA_UNW_ENTRY_TYPE));
+ seq_puts(f, "\n");
+
+ if (arch) {
+ seq_printf(f, "pmu arch: %s\n",
+ arch->name);
+ seq_printf(f, "pmu arch version: %d\n",
+ arch->ver);
+ }
+
seq_puts(f, "\n");
seq_puts(f, "Supported events:\n");
seq_printf(f, "cpu_cycles: %s\n",
#ifndef __QUADD_VERSION_H
#define __QUADD_VERSION_H
-#define QUADD_MODULE_VERSION "1.71"
+#define QUADD_MODULE_VERSION "1.72"
#define QUADD_MODULE_BRANCH "Dev"
#endif /* __QUADD_VERSION_H */