2 * drivers/misc/tegra-profiler/armv8_pmu.c
4 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 #include <linux/printk.h>
20 #include <linux/types.h>
21 #include <linux/string.h>
23 #include <linux/version.h>
24 #include <linux/err.h>
25 #include <linux/bitmap.h>
26 #include <linux/slab.h>
28 #include <asm/cputype.h>
31 #include "armv8_pmu.h"
32 #include "armv8_events.h"
36 struct quadd_pmu_info {
37 DECLARE_BITMAP(used_cntrs, QUADD_MAX_PMU_COUNTERS);
38 u32 prev_vals[QUADD_MAX_PMU_COUNTERS];
39 int is_already_active;
42 struct quadd_cntrs_info {
49 static DEFINE_PER_CPU(struct quadd_pmu_info, cpu_pmu_info);
51 static struct quadd_pmu_ctx pmu_ctx;
54 quadd_armv8_pmuv3_arm_events_map[QUADD_EVENT_TYPE_MAX] = {
55 [QUADD_EVENT_TYPE_INSTRUCTIONS] =
56 QUADD_ARMV8_HW_EVENT_INSTR_EXECUTED,
57 [QUADD_EVENT_TYPE_BRANCH_INSTRUCTIONS] =
58 QUADD_ARMV8_UNSUPPORTED_EVENT,
59 [QUADD_EVENT_TYPE_BRANCH_MISSES] =
60 QUADD_ARMV8_HW_EVENT_PC_BRANCH_MIS_PRED,
61 [QUADD_EVENT_TYPE_BUS_CYCLES] =
62 QUADD_ARMV8_UNSUPPORTED_EVENT,
64 [QUADD_EVENT_TYPE_L1_DCACHE_READ_MISSES] =
65 QUADD_ARMV8_HW_EVENT_L1_DCACHE_REFILL,
66 [QUADD_EVENT_TYPE_L1_DCACHE_WRITE_MISSES] =
67 QUADD_ARMV8_HW_EVENT_L1_DCACHE_REFILL,
68 [QUADD_EVENT_TYPE_L1_ICACHE_MISSES] =
69 QUADD_ARMV8_HW_EVENT_L1_ICACHE_REFILL,
71 [QUADD_EVENT_TYPE_L2_DCACHE_READ_MISSES] =
72 QUADD_ARMV8_HW_EVENT_L2_CACHE_REFILL,
73 [QUADD_EVENT_TYPE_L2_DCACHE_WRITE_MISSES] =
74 QUADD_ARMV8_HW_EVENT_L2_CACHE_REFILL,
75 [QUADD_EVENT_TYPE_L2_ICACHE_MISSES] =
76 QUADD_ARMV8_UNSUPPORTED_EVENT,
80 quadd_armv8_pmuv3_a57_events_map[QUADD_EVENT_TYPE_MAX] = {
81 [QUADD_EVENT_TYPE_INSTRUCTIONS] =
82 QUADD_ARMV8_HW_EVENT_INSTR_EXECUTED,
83 [QUADD_EVENT_TYPE_BRANCH_INSTRUCTIONS] =
84 QUADD_ARMV8_UNSUPPORTED_EVENT,
85 [QUADD_EVENT_TYPE_BRANCH_MISSES] =
86 QUADD_ARMV8_HW_EVENT_PC_BRANCH_MIS_PRED,
87 [QUADD_EVENT_TYPE_BUS_CYCLES] =
88 QUADD_ARMV8_UNSUPPORTED_EVENT,
90 [QUADD_EVENT_TYPE_L1_DCACHE_READ_MISSES] =
91 QUADD_ARMV8_A57_HW_EVENT_L1D_CACHE_REFILL_LD,
92 [QUADD_EVENT_TYPE_L1_DCACHE_WRITE_MISSES] =
93 QUADD_ARMV8_A57_HW_EVENT_L1D_CACHE_REFILL_ST,
94 [QUADD_EVENT_TYPE_L1_ICACHE_MISSES] =
95 QUADD_ARMV8_HW_EVENT_L1_ICACHE_REFILL,
97 [QUADD_EVENT_TYPE_L2_DCACHE_READ_MISSES] =
98 QUADD_ARMV8_A57_HW_EVENT_L2D_CACHE_REFILL_LD,
99 [QUADD_EVENT_TYPE_L2_DCACHE_WRITE_MISSES] =
100 QUADD_ARMV8_A57_HW_EVENT_L2D_CACHE_REFILL_ST,
101 [QUADD_EVENT_TYPE_L2_ICACHE_MISSES] =
102 QUADD_ARMV8_UNSUPPORTED_EVENT,
106 quadd_armv8_pmuv3_denver_events_map[QUADD_EVENT_TYPE_MAX] = {
107 [QUADD_EVENT_TYPE_INSTRUCTIONS] =
108 QUADD_ARMV8_HW_EVENT_INSTR_EXECUTED,
109 [QUADD_EVENT_TYPE_BRANCH_INSTRUCTIONS] =
110 QUADD_ARMV8_UNSUPPORTED_EVENT,
111 [QUADD_EVENT_TYPE_BRANCH_MISSES] =
112 QUADD_ARMV8_HW_EVENT_PC_BRANCH_MIS_PRED,
113 [QUADD_EVENT_TYPE_BUS_CYCLES] =
114 QUADD_ARMV8_UNSUPPORTED_EVENT,
116 [QUADD_EVENT_TYPE_L1_DCACHE_READ_MISSES] =
117 QUADD_ARMV8_HW_EVENT_L1_DCACHE_REFILL,
118 [QUADD_EVENT_TYPE_L1_DCACHE_WRITE_MISSES] =
119 QUADD_ARMV8_HW_EVENT_L1_DCACHE_REFILL,
120 [QUADD_EVENT_TYPE_L1_ICACHE_MISSES] =
121 QUADD_ARMV8_HW_EVENT_L1_ICACHE_REFILL,
123 [QUADD_EVENT_TYPE_L2_DCACHE_READ_MISSES] =
124 QUADD_ARMV8_UNSUPPORTED_EVENT,
125 [QUADD_EVENT_TYPE_L2_DCACHE_WRITE_MISSES] =
126 QUADD_ARMV8_UNSUPPORTED_EVENT,
127 [QUADD_EVENT_TYPE_L2_ICACHE_MISSES] =
128 QUADD_ARMV8_UNSUPPORTED_EVENT,
131 /*********************************************************************/
134 armv8_pmu_pmcr_read(void)
138 /* Read Performance Monitors Control Register */
139 asm volatile("mrs %0, pmcr_el0" : "=r" (val));
144 armv8_pmu_pmcr_write(u32 val)
146 asm volatile("msr pmcr_el0, %0" : :
147 "r" (val & QUADD_ARMV8_PMCR_WR_MASK));
151 armv8_pmu_pmceid_read(void)
155 /* Read Performance Monitors Common Event Identification Register */
156 asm volatile("mrs %0, pmceid0_el0" : "=r" (val));
161 armv8_pmu_pmcntenset_read(void)
165 /* Read Performance Monitors Count Enable Set Register */
166 asm volatile("mrs %0, pmcntenset_el0" : "=r" (val));
171 armv8_pmu_pmcntenset_write(u32 val)
173 /* Write Performance Monitors Count Enable Set Register */
174 asm volatile("msr pmcntenset_el0, %0" : : "r" (val));
178 armv8_pmu_pmcntenclr_write(u32 val)
180 /* Write Performance Monitors Count Enable Clear Register */
181 asm volatile("msr pmcntenclr_el0, %0" : : "r" (val));
185 armv8_pmu_pmselr_write(u32 val)
187 /* Write Performance Monitors Event Counter Selection Register */
188 asm volatile("msr pmselr_el0, %0" : :
189 "r" (val & QUADD_ARMV8_SELECT_MASK));
193 armv8_pmu_pmccntr_read(void)
197 /* Read Performance Monitors Cycle Count Register */
198 asm volatile("mrs %0, pmccntr_el0" : "=r" (val));
203 armv8_pmu_pmccntr_write(u64 val)
205 /* Write Performance Monitors Selected Event Count Register */
206 asm volatile("msr pmccntr_el0, %0" : : "r" (val));
210 armv8_pmu_pmxevcntr_read(void)
214 /* Read Performance Monitors Selected Event Count Register */
215 asm volatile("mrs %0, pmxevcntr_el0" : "=r" (val));
220 armv8_pmu_pmxevcntr_write(u32 val)
222 /* Write Performance Monitors Selected Event Count Register */
223 asm volatile("msr pmxevcntr_el0, %0" : : "r" (val));
227 armv8_pmu_pmxevtyper_write(u32 event)
229 /* Write Performance Monitors Selected Event Type Register */
230 asm volatile("msr pmxevtyper_el0, %0" : :
231 "r" (event & QUADD_ARMV8_EVTSEL_MASK));
235 armv8_pmu_pmintenset_read(void)
239 /* Read Performance Monitors Interrupt Enable Set Register */
240 asm volatile("mrs %0, pmintenset_el1" : "=r" (val));
245 armv8_pmu_pmintenset_write(u32 val)
247 /* Write Performance Monitors Interrupt Enable Set Register */
248 asm volatile("msr pmintenset_el1, %0" : : "r" (val));
252 armv8_pmu_pmintenclr_write(u32 val)
254 /* Write Performance Monitors Interrupt Enable Clear Register */
255 asm volatile("msr pmintenclr_el1, %0" : : "r" (val));
259 armv8_pmu_pmovsclr_read(void)
263 /* Read Performance Monitors Overflow Flag Status Register */
264 asm volatile("mrs %0, pmovsclr_el0" : "=r" (val));
269 armv8_pmu_pmovsclr_write(int idx)
271 /* Write Performance Monitors Overflow Flag Status Register */
272 asm volatile("msr pmovsclr_el0, %0" : : "r" (BIT(idx)));
276 armv8_id_afr0_el1_read(void)
280 /* Read Auxiliary Feature Register 0 */
281 asm volatile("mrs %0, id_afr0_el1" : "=r" (val));
285 static void enable_counter(int idx)
287 armv8_pmu_pmcntenset_write(BIT(idx));
290 static void disable_counter(int idx)
292 armv8_pmu_pmcntenclr_write(BIT(idx));
295 static void select_counter(unsigned int counter)
297 armv8_pmu_pmselr_write(counter);
300 static int is_pmu_enabled(void)
302 u32 pmcr = armv8_pmu_pmcr_read();
304 if (pmcr & QUADD_ARMV8_PMCR_E) {
305 u32 pmcnten = armv8_pmu_pmcntenset_read();
306 pmcnten &= pmu_ctx.counters_mask | QUADD_ARMV8_CCNT;
307 return pmcnten ? 1 : 0;
313 static u32 read_counter(int idx)
317 if (idx == QUADD_ARMV8_CCNT_BIT) {
318 val = armv8_pmu_pmccntr_read();
321 val = armv8_pmu_pmxevcntr_read();
327 static void write_counter(int idx, u32 value)
329 if (idx == QUADD_ARMV8_CCNT_BIT) {
330 armv8_pmu_pmccntr_write(value);
333 armv8_pmu_pmxevcntr_write(value);
338 get_free_counters(unsigned long *bitmap, int nbits, int *ccntr)
343 cntens = armv8_pmu_pmcntenset_read();
344 cntens = ~cntens & (pmu_ctx.counters_mask | QUADD_ARMV8_CCNT);
346 bitmap_zero(bitmap, nbits);
347 bitmap_copy(bitmap, (unsigned long *)&cntens,
348 BITS_PER_BYTE * sizeof(u32));
350 cc = (cntens & QUADD_ARMV8_CCNT) ? 1 : 0;
355 return bitmap_weight(bitmap, BITS_PER_BYTE * sizeof(u32)) - cc;
358 static void __maybe_unused
359 disable_interrupt(int idx)
361 armv8_pmu_pmintenclr_write(BIT(idx));
365 disable_all_interrupts(void)
367 u32 val = QUADD_ARMV8_CCNT | pmu_ctx.counters_mask;
368 armv8_pmu_pmintenclr_write(val);
372 reset_overflow_flags(void)
374 u32 val = QUADD_ARMV8_CCNT | pmu_ctx.counters_mask;
375 armv8_pmu_pmovsclr_write(val);
379 select_event(unsigned int idx, unsigned int event)
382 armv8_pmu_pmxevtyper_write(event);
385 static void disable_all_counters(void)
389 /* Disable all counters */
390 val = armv8_pmu_pmcr_read();
391 if (val & QUADD_ARMV8_PMCR_E)
392 armv8_pmu_pmcr_write(val & ~QUADD_ARMV8_PMCR_E);
394 armv8_pmu_pmcntenclr_write(QUADD_ARMV8_CCNT | pmu_ctx.counters_mask);
397 static void enable_all_counters(void)
401 /* Enable all counters */
402 val = armv8_pmu_pmcr_read();
403 val |= QUADD_ARMV8_PMCR_E | QUADD_ARMV8_PMCR_X;
404 armv8_pmu_pmcr_write(val);
407 static void reset_all_counters(void)
411 val = armv8_pmu_pmcr_read();
412 val |= QUADD_ARMV8_PMCR_P | QUADD_ARMV8_PMCR_C;
413 armv8_pmu_pmcr_write(val);
416 static void quadd_init_pmu(void)
418 reset_overflow_flags();
419 disable_all_interrupts();
422 static int pmu_enable(void)
424 pr_info("pmu was reserved\n");
428 static void __pmu_disable(void *arg)
430 struct quadd_pmu_info *pi = &__get_cpu_var(cpu_pmu_info);
432 if (!pi->is_already_active) {
433 pr_info("[%d] reset all counters\n",
436 disable_all_counters();
437 reset_all_counters();
441 for_each_set_bit(idx, pi->used_cntrs, QUADD_MAX_PMU_COUNTERS) {
442 pr_info("[%d] reset counter: %d\n",
443 smp_processor_id(), idx);
445 disable_counter(idx);
446 write_counter(idx, 0);
451 static void pmu_disable(void)
453 on_each_cpu(__pmu_disable, NULL, 1);
454 pr_info("pmu was released\n");
457 static void pmu_start(void)
459 int idx = 0, pcntrs, ccntr;
461 DECLARE_BITMAP(free_bitmap, QUADD_MAX_PMU_COUNTERS);
462 struct quadd_pmu_info *pi = &__get_cpu_var(cpu_pmu_info);
463 u32 *prevp = pi->prev_vals;
464 struct quadd_pmu_event_info *ei;
466 bitmap_zero(pi->used_cntrs, QUADD_MAX_PMU_COUNTERS);
468 if (is_pmu_enabled()) {
469 pi->is_already_active = 1;
471 disable_all_counters();
474 pi->is_already_active = 0;
477 pcntrs = get_free_counters(free_bitmap, QUADD_MAX_PMU_COUNTERS, &ccntr);
479 list_for_each_entry(ei, &pmu_ctx.used_events, list) {
484 event = ei->hw_value;
486 if (ei->quadd_event_id == QUADD_EVENT_TYPE_CPU_CYCLES) {
488 pr_err_once("Error: cpu cycles counter is already occupied\n");
491 index = QUADD_ARMV8_CCNT_BIT;
494 pr_err_once("Error: too many performance events\n");
498 index = find_next_bit(free_bitmap,
499 QUADD_MAX_PMU_COUNTERS, idx);
500 if (index >= QUADD_MAX_PMU_COUNTERS) {
501 pr_err_once("Error: too many events\n");
505 select_event(index, event);
507 set_bit(index, pi->used_cntrs);
509 write_counter(index, 0);
510 enable_counter(index);
513 if (!pi->is_already_active) {
514 reset_all_counters();
515 enable_all_counters();
518 qm_debug_start_source(QUADD_EVENT_SOURCE_PMU);
521 static void pmu_stop(void)
524 struct quadd_pmu_info *pi = &__get_cpu_var(cpu_pmu_info);
526 if (!pi->is_already_active) {
527 disable_all_counters();
528 reset_all_counters();
530 for_each_set_bit(idx, pi->used_cntrs, QUADD_MAX_PMU_COUNTERS) {
531 disable_counter(idx);
532 write_counter(idx, 0);
536 qm_debug_stop_source(QUADD_EVENT_SOURCE_PMU);
539 static int __maybe_unused
540 pmu_read(struct event_data *events, int max_events)
544 struct quadd_pmu_info *pi = &__get_cpu_var(cpu_pmu_info);
545 u32 *prevp = pi->prev_vals;
546 struct quadd_pmu_event_info *ei;
548 if (bitmap_empty(pi->used_cntrs, QUADD_MAX_PMU_COUNTERS)) {
549 pr_err_once("Error: counters were not initialized\n");
553 list_for_each_entry(ei, &pmu_ctx.used_events, list) {
556 if (ei->quadd_event_id == QUADD_EVENT_TYPE_CPU_CYCLES) {
557 if (!test_bit(QUADD_ARMV8_CCNT_BIT, pi->used_cntrs)) {
558 pr_err_once("Error: ccntr is not used\n");
561 index = QUADD_ARMV8_CCNT_BIT;
563 index = find_next_bit(pi->used_cntrs,
564 QUADD_MAX_PMU_COUNTERS, idx);
567 if (index >= QUADD_MAX_PMU_COUNTERS) {
568 pr_err_once("Error: perf counter is not used\n");
573 val = read_counter(index);
575 events->event_source = QUADD_EVENT_SOURCE_PMU;
576 events->event_id = ei->quadd_event_id;
579 events->prev_val = *prevp;
583 qm_debug_read_counter(events->event_id, events->prev_val,
586 if (++i >= max_events)
596 static int __maybe_unused
597 pmu_read_emulate(struct event_data *events, int max_events)
600 static u32 val = 100;
601 struct quadd_pmu_info *pi = &__get_cpu_var(cpu_pmu_info);
602 u32 *prevp = pi->prev_vals;
603 struct quadd_pmu_event_info *ei;
605 list_for_each_entry(ei, &pmu_ctx.used_events, list) {
609 events->event_id = *prevp;
615 if (++i >= max_events)
625 static void __get_free_counters(void *arg)
628 DECLARE_BITMAP(free_bitmap, QUADD_MAX_PMU_COUNTERS);
629 struct quadd_cntrs_info *ci = arg;
631 pcntrs = get_free_counters(free_bitmap, QUADD_MAX_PMU_COUNTERS, &ccntr);
633 spin_lock(&ci->lock);
635 ci->pcntrs = min_t(int, pcntrs, ci->pcntrs);
640 pr_info("[%d] pcntrs/ccntr: %d/%d, free_bitmap: %#lx\n",
641 smp_processor_id(), pcntrs, ccntr, free_bitmap[0]);
643 spin_unlock(&ci->lock);
646 static void free_events(struct list_head *head)
648 struct quadd_pmu_event_info *entry, *next;
650 list_for_each_entry_safe(entry, next, head, list) {
651 list_del(&entry->list);
656 static int set_events(int *events, int size)
658 int free_pcntrs, err;
659 int i, nr_l1_r = 0, nr_l1_w = 0;
660 struct quadd_cntrs_info free_ci;
662 pmu_ctx.l1_cache_rw = 0;
664 free_events(&pmu_ctx.used_events);
666 if (!events || !size)
669 if (!pmu_ctx.current_map) {
670 pr_err("Invalid current_map\n");
674 spin_lock_init(&free_ci.lock);
675 free_ci.pcntrs = QUADD_MAX_PMU_COUNTERS;
678 on_each_cpu(__get_free_counters, &free_ci, 1);
680 free_pcntrs = free_ci.pcntrs;
681 pr_info("free counters: pcntrs/ccntr: %d/%d\n",
682 free_pcntrs, free_ci.ccntr);
684 pr_info("event identification register: %#x\n",
685 armv8_pmu_pmceid_read());
687 for (i = 0; i < size; i++) {
688 struct quadd_pmu_event_info *ei;
690 if (events[i] > QUADD_EVENT_TYPE_MAX) {
691 pr_err("error event: %d\n", events[i]);
696 ei = kzalloc(sizeof(*ei), GFP_KERNEL);
702 INIT_LIST_HEAD(&ei->list);
703 list_add_tail(&ei->list, &pmu_ctx.used_events);
705 if (events[i] == QUADD_EVENT_TYPE_CPU_CYCLES) {
706 ei->hw_value = QUADD_ARMV8_CPU_CYCLE_EVENT;
707 if (!free_ci.ccntr) {
708 pr_err("error: cpu cycles counter is already occupied\n");
713 if (!free_pcntrs--) {
714 pr_err("error: too many performance events\n");
719 ei->hw_value = pmu_ctx.current_map[events[i]];
722 ei->quadd_event_id = events[i];
724 if (events[i] == QUADD_EVENT_TYPE_L1_DCACHE_READ_MISSES)
726 else if (events[i] == QUADD_EVENT_TYPE_L1_DCACHE_WRITE_MISSES)
729 pr_info("Event has been added: id/pmu value: %s/%#x\n",
730 quadd_get_event_str(events[i]),
734 if (nr_l1_r > 0 && nr_l1_w > 0)
735 pmu_ctx.l1_cache_rw = 1;
740 free_events(&pmu_ctx.used_events);
744 static int get_supported_events(int *events, int max_events)
746 int i, nr_events = 0;
748 max_events = min_t(int, QUADD_EVENT_TYPE_MAX, max_events);
750 for (i = 0; i < max_events; i++) {
751 if (pmu_ctx.current_map[i] != QUADD_ARMV8_UNSUPPORTED_EVENT)
752 events[nr_events++] = i;
757 static int get_current_events(int *events, int max_events)
760 struct quadd_pmu_event_info *ei;
762 list_for_each_entry(ei, &pmu_ctx.used_events, list) {
763 events[i++] = ei->quadd_event_id;
772 static struct quadd_arch_info *get_arch(void)
774 return &pmu_ctx.arch;
777 static struct quadd_event_source_interface pmu_armv8_int = {
778 .enable = pmu_enable,
779 .disable = pmu_disable,
784 #ifndef QUADD_USE_EMULATE_COUNTERS
787 .read = pmu_read_emulate,
789 .set_events = set_events,
790 .get_supported_events = get_supported_events,
791 .get_current_events = get_current_events,
792 .get_arch = get_arch,
795 struct quadd_event_source_interface *quadd_armv8_pmu_init(void)
797 u32 pmcr, imp, idcode;
798 struct quadd_event_source_interface *pmu = NULL;
800 u64 aa64_dfr = read_cpuid(ID_AA64DFR0_EL1);
801 aa64_dfr = (aa64_dfr >> 8) & 0x0f;
803 strncpy(pmu_ctx.arch.name, "Unknown", sizeof(pmu_ctx.arch.name));
804 pmu_ctx.arch.type = QUADD_AA64_CPU_TYPE_UNKNOWN;
805 pmu_ctx.arch.ver = 0;
806 pmu_ctx.current_map = NULL;
809 case QUADD_AA64_PMUVER_PMUV3:
810 strncpy(pmu_ctx.arch.name, "AA64 PmuV3",
811 sizeof(pmu_ctx.arch.name));
812 pmu_ctx.arch.name[sizeof(pmu_ctx.arch.name) - 1] = '\0';
814 pmu_ctx.counters_mask =
815 QUADD_ARMV8_COUNTERS_MASK_PMUV3;
816 pmu_ctx.current_map =
817 quadd_armv8_pmuv3_arm_events_map;
819 pmcr = armv8_pmu_pmcr_read();
821 idcode = (pmcr >> QUADD_ARMV8_PMCR_IDCODE_SHIFT) &
822 QUADD_ARMV8_PMCR_IDCODE_MASK;
823 imp = pmcr >> QUADD_ARMV8_PMCR_IMP_SHIFT;
825 pr_info("imp: %#x, idcode: %#x\n", imp, idcode);
827 if (imp == ARM_CPU_IMP_ARM) {
828 strncat(pmu_ctx.arch.name, " ARM",
829 sizeof(pmu_ctx.arch.name) -
830 strlen(pmu_ctx.arch.name));
831 pmu_ctx.arch.name[sizeof(pmu_ctx.arch.name) - 1] = '\0';
833 if (idcode == QUADD_AA64_CPU_IDCODE_CORTEX_A53) {
835 QUADD_AA64_CPU_TYPE_CORTEX_A53;
837 strncat(pmu_ctx.arch.name, " CORTEX-A53",
838 sizeof(pmu_ctx.arch.name) -
839 strlen(pmu_ctx.arch.name));
840 } else if (idcode == QUADD_AA64_CPU_IDCODE_CORTEX_A57) {
842 QUADD_AA64_CPU_TYPE_CORTEX_A57;
843 pmu_ctx.current_map =
844 quadd_armv8_pmuv3_a57_events_map;
846 strncat(pmu_ctx.arch.name, " CORTEX-A57",
847 sizeof(pmu_ctx.arch.name) -
848 strlen(pmu_ctx.arch.name));
850 pmu_ctx.arch.type = QUADD_AA64_CPU_TYPE_ARM;
852 } else if (imp == QUADD_AA64_CPU_IMP_NVIDIA) {
853 u32 ext_ver = armv8_id_afr0_el1_read();
854 ext_ver = (ext_ver >> QUADD_ARMV8_PMU_NVEXT_SHIFT) &
855 QUADD_ARMV8_PMU_NVEXT_MASK;
857 strncat(pmu_ctx.arch.name, " NVIDIA (Denver)",
858 sizeof(pmu_ctx.arch.name) -
859 strlen(pmu_ctx.arch.name));
861 pmu_ctx.arch.type = QUADD_AA64_CPU_TYPE_DENVER;
862 pmu_ctx.arch.ver = ext_ver;
863 pmu_ctx.current_map =
864 quadd_armv8_pmuv3_denver_events_map;
866 strncat(pmu_ctx.arch.name, " Unknown implementor code",
867 sizeof(pmu_ctx.arch.name) -
868 strlen(pmu_ctx.arch.name));
869 pmu_ctx.arch.type = QUADD_AA64_CPU_TYPE_UNKNOWN_IMP;
872 pmu = &pmu_armv8_int;
876 pr_err("error: incorrect PMUVer\n");
880 INIT_LIST_HEAD(&pmu_ctx.used_events);
882 pmu_ctx.arch.name[sizeof(pmu_ctx.arch.name) - 1] = '\0';
883 pr_info("arch: %s, type: %d, ver: %d\n",
884 pmu_ctx.arch.name, pmu_ctx.arch.type, pmu_ctx.arch.ver);
889 void quadd_armv8_pmu_deinit(void)
891 free_events(&pmu_ctx.used_events);