]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/blob - drivers/misc/tegra-profiler/hrt.c
misc: tegra-profiler: add lower bound of memory
[sojka/nv-tegra/linux-3.10.git] / drivers / misc / tegra-profiler / hrt.c
1 /*
2  * drivers/misc/tegra-profiler/hrt.c
3  *
4  * Copyright (c) 2015, NVIDIA CORPORATION.  All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  */
16
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19 #include <linux/sched.h>
20 #include <linux/hrtimer.h>
21 #include <linux/slab.h>
22 #include <linux/cpu.h>
23 #include <linux/ptrace.h>
24 #include <linux/interrupt.h>
25 #include <linux/err.h>
26 #include <linux/nsproxy.h>
27 #include <clocksource/arm_arch_timer.h>
28
29 #include <asm/cputype.h>
30 #include <asm/irq_regs.h>
31 #include <asm/arch_timer.h>
32
33 #include <linux/tegra_profiler.h>
34
35 #include "quadd.h"
36 #include "hrt.h"
37 #include "comm.h"
38 #include "mmap.h"
39 #include "ma.h"
40 #include "power_clk.h"
41 #include "tegra.h"
42 #include "debug.h"
43
44 static struct quadd_hrt_ctx hrt;
45
46 static void
47 read_all_sources(struct pt_regs *regs, struct task_struct *task);
48
49 struct hrt_event_value {
50         int event_id;
51         u32 value;
52 };
53
54 static enum hrtimer_restart hrtimer_handler(struct hrtimer *hrtimer)
55 {
56         struct pt_regs *regs;
57
58         regs = get_irq_regs();
59
60         if (!hrt.active)
61                 return HRTIMER_NORESTART;
62
63         qm_debug_handler_sample(regs);
64
65         if (regs)
66                 read_all_sources(regs, NULL);
67
68         hrtimer_forward_now(hrtimer, ns_to_ktime(hrt.sample_period));
69         qm_debug_timer_forward(regs, hrt.sample_period);
70
71         return HRTIMER_RESTART;
72 }
73
74 static void start_hrtimer(struct quadd_cpu_context *cpu_ctx)
75 {
76         u64 period = hrt.sample_period;
77
78         __hrtimer_start_range_ns(&cpu_ctx->hrtimer,
79                                  ns_to_ktime(period), 0,
80                                  HRTIMER_MODE_REL_PINNED, 0);
81         qm_debug_timer_start(NULL, period);
82 }
83
84 static void cancel_hrtimer(struct quadd_cpu_context *cpu_ctx)
85 {
86         hrtimer_cancel(&cpu_ctx->hrtimer);
87         qm_debug_timer_cancel();
88 }
89
90 static void init_hrtimer(struct quadd_cpu_context *cpu_ctx)
91 {
92         hrtimer_init(&cpu_ctx->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
93         cpu_ctx->hrtimer.function = hrtimer_handler;
94 }
95
96 static inline u64 get_posix_clock_monotonic_time(void)
97 {
98         struct timespec ts;
99
100         do_posix_clock_monotonic_gettime(&ts);
101         return timespec_to_ns(&ts);
102 }
103
104 static inline u64 get_arch_time(struct timecounter *tc)
105 {
106         cycle_t value;
107         const struct cyclecounter *cc = tc->cc;
108
109         value = cc->read(cc);
110         return cyclecounter_cyc2ns(cc, value);
111 }
112
113 u64 quadd_get_time(void)
114 {
115         struct timecounter *tc = hrt.tc;
116
117         return (tc && hrt.use_arch_timer) ?
118                 get_arch_time(tc) :
119                 get_posix_clock_monotonic_time();
120 }
121
122 static void
123 put_sample_cpu(struct quadd_record_data *data,
124                struct quadd_iovec *vec,
125                int vec_count, int cpu_id)
126 {
127         ssize_t err;
128         struct quadd_comm_data_interface *comm = hrt.quadd_ctx->comm;
129
130         err = comm->put_sample(data, vec, vec_count, cpu_id);
131         if (err < 0)
132                 atomic64_inc(&hrt.skipped_samples);
133
134         atomic64_inc(&hrt.counter_samples);
135 }
136
137 void
138 quadd_put_sample(struct quadd_record_data *data,
139                  struct quadd_iovec *vec, int vec_count)
140 {
141         put_sample_cpu(data, vec, vec_count, -1);
142 }
143
144 static void put_header(void)
145 {
146         int cpu_id;
147         int nr_events = 0, max_events = QUADD_MAX_COUNTERS;
148         int events[QUADD_MAX_COUNTERS];
149         struct quadd_record_data record;
150         struct quadd_header_data *hdr = &record.hdr;
151         struct quadd_parameters *param = &hrt.quadd_ctx->param;
152         unsigned int extra = param->reserved[QUADD_PARAM_IDX_EXTRA];
153         struct quadd_iovec vec;
154         struct quadd_ctx *ctx = hrt.quadd_ctx;
155         struct quadd_event_source_interface *pmu = ctx->pmu;
156         struct quadd_event_source_interface *pl310 = ctx->pl310;
157
158         record.record_type = QUADD_RECORD_TYPE_HEADER;
159
160         hdr->magic = QUADD_HEADER_MAGIC;
161         hdr->version = QUADD_SAMPLES_VERSION;
162
163         hdr->backtrace = param->backtrace;
164         hdr->use_freq = param->use_freq;
165         hdr->system_wide = param->system_wide;
166
167         /* TODO: dynamically */
168 #ifdef QM_DEBUG_SAMPLES_ENABLE
169         hdr->debug_samples = 1;
170 #else
171         hdr->debug_samples = 0;
172 #endif
173
174         hdr->freq = param->freq;
175         hdr->ma_freq = param->ma_freq;
176         hdr->power_rate_freq = param->power_rate_freq;
177
178         hdr->power_rate = hdr->power_rate_freq > 0 ? 1 : 0;
179         hdr->get_mmap = (extra & QUADD_PARAM_EXTRA_GET_MMAP) ? 1 : 0;
180
181         hdr->reserved = 0;
182         hdr->extra_length = 0;
183
184         hdr->reserved |= hrt.unw_method << QUADD_HDR_UNW_METHOD_SHIFT;
185
186         if (hrt.use_arch_timer)
187                 hdr->reserved |= QUADD_HDR_USE_ARCH_TIMER;
188
189         if (pmu)
190                 nr_events += pmu->get_current_events(events, max_events);
191
192         if (pl310)
193                 nr_events += pl310->get_current_events(events + nr_events,
194                                                        max_events - nr_events);
195
196         hdr->nr_events = nr_events;
197
198         vec.base = events;
199         vec.len = nr_events * sizeof(events[0]);
200
201         for_each_possible_cpu(cpu_id)
202                 put_sample_cpu(&record, &vec, 1, cpu_id);
203 }
204
205 static void
206 put_sched_sample(struct task_struct *task, int is_sched_in)
207 {
208         unsigned int cpu, flags;
209         struct quadd_record_data record;
210         struct quadd_sched_data *s = &record.sched;
211
212         record.record_type = QUADD_RECORD_TYPE_SCHED;
213
214         cpu = quadd_get_processor_id(NULL, &flags);
215         s->cpu = cpu;
216         s->lp_mode = (flags & QUADD_CPUMODE_TEGRA_POWER_CLUSTER_LP) ? 1 : 0;
217
218         s->sched_in = is_sched_in ? 1 : 0;
219         s->time = quadd_get_time();
220         s->pid = task->pid;
221
222         s->reserved = 0;
223
224         s->data[0] = 0;
225         s->data[1] = 0;
226
227         quadd_put_sample(&record, NULL, 0);
228 }
229
230 static int get_sample_data(struct quadd_sample_data *sample,
231                            struct pt_regs *regs,
232                            struct task_struct *task)
233 {
234         unsigned int cpu, flags;
235         struct quadd_ctx *quadd_ctx = hrt.quadd_ctx;
236
237         cpu = quadd_get_processor_id(regs, &flags);
238         sample->cpu = cpu;
239
240         sample->lp_mode =
241                 (flags & QUADD_CPUMODE_TEGRA_POWER_CLUSTER_LP) ? 1 : 0;
242         sample->thumb_mode = (flags & QUADD_CPUMODE_THUMB) ? 1 : 0;
243         sample->user_mode = user_mode(regs) ? 1 : 0;
244
245         /* For security reasons, hide IPs from the kernel space. */
246         if (!sample->user_mode && !quadd_ctx->collect_kernel_ips)
247                 sample->ip = 0;
248         else
249                 sample->ip = instruction_pointer(regs);
250
251         sample->time = quadd_get_time();
252         sample->reserved = 0;
253         sample->pid = task->pid;
254         sample->in_interrupt = in_interrupt() ? 1 : 0;
255
256         return 0;
257 }
258
259 static int read_source(struct quadd_event_source_interface *source,
260                        struct pt_regs *regs,
261                        struct hrt_event_value *events_vals,
262                        int max_events)
263 {
264         int nr_events, i;
265         u32 prev_val, val, res_val;
266         struct event_data events[QUADD_MAX_COUNTERS];
267
268         if (!source)
269                 return 0;
270
271         max_events = min_t(int, max_events, QUADD_MAX_COUNTERS);
272         nr_events = source->read(events, max_events);
273
274         for (i = 0; i < nr_events; i++) {
275                 struct event_data *s = &events[i];
276
277                 prev_val = s->prev_val;
278                 val = s->val;
279
280                 if (prev_val <= val)
281                         res_val = val - prev_val;
282                 else
283                         res_val = QUADD_U32_MAX - prev_val + val;
284
285                 if (s->event_source == QUADD_EVENT_SOURCE_PL310) {
286                         int nr_active = atomic_read(&hrt.nr_active_all_core);
287                         if (nr_active > 1)
288                                 res_val /= nr_active;
289                 }
290
291                 events_vals[i].event_id = s->event_id;
292                 events_vals[i].value = res_val;
293         }
294
295         return nr_events;
296 }
297
298 static void
299 read_all_sources(struct pt_regs *regs, struct task_struct *task)
300 {
301         u32 state, extra_data = 0;
302         int i, vec_idx = 0, bt_size = 0;
303         int nr_events = 0, nr_positive_events = 0;
304         struct pt_regs *user_regs;
305         struct quadd_iovec vec[5];
306         struct hrt_event_value events[QUADD_MAX_COUNTERS];
307         u32 events_extra[QUADD_MAX_COUNTERS];
308
309         struct quadd_record_data record_data;
310         struct quadd_sample_data *s = &record_data.sample;
311
312         struct quadd_ctx *ctx = hrt.quadd_ctx;
313         struct quadd_cpu_context *cpu_ctx = this_cpu_ptr(hrt.cpu_ctx);
314         struct quadd_callchain *cc = &cpu_ctx->cc;
315
316         if (!regs)
317                 return;
318
319         if (atomic_read(&cpu_ctx->nr_active) == 0)
320                 return;
321
322         if (!task)
323                 task = current;
324
325         rcu_read_lock();
326         if (!task_nsproxy(task)) {
327                 rcu_read_unlock();
328                 return;
329         }
330         rcu_read_unlock();
331
332         if (ctx->pmu && ctx->pmu_info.active)
333                 nr_events += read_source(ctx->pmu, regs,
334                                          events, QUADD_MAX_COUNTERS);
335
336         if (ctx->pl310 && ctx->pl310_info.active)
337                 nr_events += read_source(ctx->pl310, regs,
338                                          events + nr_events,
339                                          QUADD_MAX_COUNTERS - nr_events);
340
341         if (!nr_events)
342                 return;
343
344         if (user_mode(regs))
345                 user_regs = regs;
346         else
347                 user_regs = current_pt_regs();
348
349         if (get_sample_data(s, regs, task))
350                 return;
351
352         vec[vec_idx].base = &extra_data;
353         vec[vec_idx].len = sizeof(extra_data);
354         vec_idx++;
355
356         s->reserved = 0;
357
358         if (ctx->param.backtrace) {
359                 cc->unw_method = hrt.unw_method;
360                 bt_size = quadd_get_user_callchain(user_regs, cc, ctx, task);
361
362                 if (!bt_size && !user_mode(regs)) {
363                         unsigned long pc = instruction_pointer(user_regs);
364
365                         cc->nr = 0;
366 #ifdef CONFIG_ARM64
367                         cc->cs_64 = compat_user_mode(user_regs) ? 0 : 1;
368 #else
369                         cc->cs_64 = 0;
370 #endif
371                         bt_size += quadd_callchain_store(cc, pc,
372                                                          QUADD_UNW_TYPE_KCTX);
373                 }
374
375                 if (bt_size > 0) {
376                         int ip_size = cc->cs_64 ? sizeof(u64) : sizeof(u32);
377                         int nr_types = DIV_ROUND_UP(bt_size, 8);
378
379                         vec[vec_idx].base = cc->cs_64 ?
380                                 (void *)cc->ip_64 : (void *)cc->ip_32;
381                         vec[vec_idx].len = bt_size * ip_size;
382                         vec_idx++;
383
384                         vec[vec_idx].base = cc->types;
385                         vec[vec_idx].len = nr_types * sizeof(cc->types[0]);
386                         vec_idx++;
387
388                         if (cc->cs_64)
389                                 extra_data |= QUADD_SED_IP64;
390                 }
391
392                 extra_data |= cc->unw_method << QUADD_SED_UNW_METHOD_SHIFT;
393                 s->reserved |= cc->unw_rc << QUADD_SAMPLE_URC_SHIFT;
394         }
395         s->callchain_nr = bt_size;
396
397         record_data.record_type = QUADD_RECORD_TYPE_SAMPLE;
398
399         s->events_flags = 0;
400         for (i = 0; i < nr_events; i++) {
401                 u32 value = events[i].value;
402                 if (value > 0) {
403                         s->events_flags |= 1 << i;
404                         events_extra[nr_positive_events++] = value;
405                 }
406         }
407
408         if (nr_positive_events == 0)
409                 return;
410
411         vec[vec_idx].base = events_extra;
412         vec[vec_idx].len = nr_positive_events * sizeof(events_extra[0]);
413         vec_idx++;
414
415         state = task->state;
416         if (state) {
417                 s->state = 1;
418                 vec[vec_idx].base = &state;
419                 vec[vec_idx].len = sizeof(state);
420                 vec_idx++;
421         } else {
422                 s->state = 0;
423         }
424
425         quadd_put_sample(&record_data, vec, vec_idx);
426 }
427
428 static inline int
429 is_profile_process(struct task_struct *task)
430 {
431         int i;
432         pid_t pid, profile_pid;
433         struct quadd_ctx *ctx = hrt.quadd_ctx;
434
435         if (!task)
436                 return 0;
437
438         pid = task->tgid;
439
440         for (i = 0; i < ctx->param.nr_pids; i++) {
441                 profile_pid = ctx->param.pids[i];
442                 if (profile_pid == pid)
443                         return 1;
444         }
445         return 0;
446 }
447
448 static int
449 add_active_thread(struct quadd_cpu_context *cpu_ctx, pid_t pid, pid_t tgid)
450 {
451         struct quadd_thread_data *t_data = &cpu_ctx->active_thread;
452
453         if (t_data->pid > 0 ||
454                 atomic_read(&cpu_ctx->nr_active) > 0) {
455                 pr_warn_once("Warning for thread: %d\n", (int)pid);
456                 return 0;
457         }
458
459         t_data->pid = pid;
460         t_data->tgid = tgid;
461         return 1;
462 }
463
464 static int remove_active_thread(struct quadd_cpu_context *cpu_ctx, pid_t pid)
465 {
466         struct quadd_thread_data *t_data = &cpu_ctx->active_thread;
467
468         if (t_data->pid < 0)
469                 return 0;
470
471         if (t_data->pid == pid) {
472                 t_data->pid = -1;
473                 t_data->tgid = -1;
474                 return 1;
475         }
476
477         pr_warn_once("Warning for thread: %d\n", (int)pid);
478         return 0;
479 }
480
481 void __quadd_task_sched_in(struct task_struct *prev,
482                            struct task_struct *task)
483 {
484         struct quadd_cpu_context *cpu_ctx = this_cpu_ptr(hrt.cpu_ctx);
485         struct quadd_ctx *ctx = hrt.quadd_ctx;
486         struct event_data events[QUADD_MAX_COUNTERS];
487         /* static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 2); */
488
489         if (likely(!hrt.active))
490                 return;
491 /*
492         if (__ratelimit(&ratelimit_state))
493                 pr_info("sch_in, cpu: %d, prev: %u (%u) \t--> curr: %u (%u)\n",
494                         smp_processor_id(), (unsigned int)prev->pid,
495                         (unsigned int)prev->tgid, (unsigned int)task->pid,
496                         (unsigned int)task->tgid);
497 */
498
499         if (is_profile_process(task)) {
500                 put_sched_sample(task, 1);
501
502                 add_active_thread(cpu_ctx, task->pid, task->tgid);
503                 atomic_inc(&cpu_ctx->nr_active);
504
505                 if (atomic_read(&cpu_ctx->nr_active) == 1) {
506                         if (ctx->pmu)
507                                 ctx->pmu->start();
508
509                         if (ctx->pl310)
510                                 ctx->pl310->read(events, 1);
511
512                         start_hrtimer(cpu_ctx);
513                         atomic_inc(&hrt.nr_active_all_core);
514                 }
515         }
516 }
517
518 void __quadd_task_sched_out(struct task_struct *prev,
519                             struct task_struct *next)
520 {
521         int n;
522         struct pt_regs *user_regs;
523         struct quadd_cpu_context *cpu_ctx = this_cpu_ptr(hrt.cpu_ctx);
524         struct quadd_ctx *ctx = hrt.quadd_ctx;
525         /* static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 2); */
526
527         if (likely(!hrt.active))
528                 return;
529 /*
530         if (__ratelimit(&ratelimit_state))
531                 pr_info("sch_out: cpu: %d, prev: %u (%u) \t--> next: %u (%u)\n",
532                         smp_processor_id(), (unsigned int)prev->pid,
533                         (unsigned int)prev->tgid, (unsigned int)next->pid,
534                         (unsigned int)next->tgid);
535 */
536
537         if (is_profile_process(prev)) {
538                 user_regs = task_pt_regs(prev);
539                 if (user_regs)
540                         read_all_sources(user_regs, prev);
541
542                 n = remove_active_thread(cpu_ctx, prev->pid);
543                 atomic_sub(n, &cpu_ctx->nr_active);
544
545                 if (n && atomic_read(&cpu_ctx->nr_active) == 0) {
546                         cancel_hrtimer(cpu_ctx);
547                         atomic_dec(&hrt.nr_active_all_core);
548
549                         if (ctx->pmu)
550                                 ctx->pmu->stop();
551                 }
552
553                 put_sched_sample(prev, 0);
554         }
555 }
556
557 void __quadd_event_mmap(struct vm_area_struct *vma)
558 {
559         struct quadd_parameters *param;
560
561         if (likely(!hrt.active))
562                 return;
563
564         if (!is_profile_process(current))
565                 return;
566
567         param = &hrt.quadd_ctx->param;
568         quadd_process_mmap(vma, param->pids[0]);
569 }
570
571 static void reset_cpu_ctx(void)
572 {
573         int cpu_id;
574         struct quadd_cpu_context *cpu_ctx;
575         struct quadd_thread_data *t_data;
576
577         for (cpu_id = 0; cpu_id < nr_cpu_ids; cpu_id++) {
578                 cpu_ctx = per_cpu_ptr(hrt.cpu_ctx, cpu_id);
579                 t_data = &cpu_ctx->active_thread;
580
581                 atomic_set(&cpu_ctx->nr_active, 0);
582
583                 t_data->pid = -1;
584                 t_data->tgid = -1;
585         }
586 }
587
588 int quadd_hrt_start(void)
589 {
590         int err;
591         u64 period;
592         long freq;
593         unsigned int extra;
594         struct quadd_ctx *ctx = hrt.quadd_ctx;
595         struct quadd_parameters *param = &ctx->param;
596
597         freq = ctx->param.freq;
598         freq = max_t(long, QUADD_HRT_MIN_FREQ, freq);
599         period = NSEC_PER_SEC / freq;
600         hrt.sample_period = period;
601
602         if (ctx->param.ma_freq > 0)
603                 hrt.ma_period = MSEC_PER_SEC / ctx->param.ma_freq;
604         else
605                 hrt.ma_period = 0;
606
607         atomic64_set(&hrt.counter_samples, 0);
608         atomic64_set(&hrt.skipped_samples, 0);
609
610         reset_cpu_ctx();
611
612         extra = param->reserved[QUADD_PARAM_IDX_EXTRA];
613
614         if (extra & QUADD_PARAM_EXTRA_BT_MIXED)
615                 hrt.unw_method = QUADD_UNW_METHOD_MIXED;
616         else if (extra & QUADD_PARAM_EXTRA_BT_UNWIND_TABLES)
617                 hrt.unw_method = QUADD_UNW_METHOD_EHT;
618         else if (extra & QUADD_PARAM_EXTRA_BT_FP)
619                 hrt.unw_method = QUADD_UNW_METHOD_FP;
620         else
621                 hrt.unw_method = QUADD_UNW_METHOD_NONE;
622
623         if (hrt.tc && (extra & QUADD_PARAM_EXTRA_USE_ARCH_TIMER))
624                 hrt.use_arch_timer = 1;
625         else
626                 hrt.use_arch_timer = 0;
627
628         pr_info("timer: %s\n", hrt.use_arch_timer ? "arch" : "monotonic clock");
629
630         put_header();
631
632         if (extra & QUADD_PARAM_EXTRA_GET_MMAP) {
633                 err = quadd_get_current_mmap(param->pids[0]);
634                 if (err) {
635                         pr_err("error: quadd_get_current_mmap\n");
636                         return err;
637                 }
638         }
639
640         if (ctx->pl310)
641                 ctx->pl310->start();
642
643         quadd_ma_start(&hrt);
644
645         hrt.active = 1;
646
647         pr_info("Start hrt: freq/period: %ld/%llu\n", freq, period);
648         return 0;
649 }
650
651 void quadd_hrt_stop(void)
652 {
653         struct quadd_ctx *ctx = hrt.quadd_ctx;
654
655         pr_info("Stop hrt, samples all/skipped: %llu/%llu\n",
656                 atomic64_read(&hrt.counter_samples),
657                 atomic64_read(&hrt.skipped_samples));
658
659         if (ctx->pl310)
660                 ctx->pl310->stop();
661
662         quadd_ma_stop(&hrt);
663
664         hrt.active = 0;
665
666         atomic64_set(&hrt.counter_samples, 0);
667         atomic64_set(&hrt.skipped_samples, 0);
668
669         /* reset_cpu_ctx(); */
670 }
671
672 void quadd_hrt_deinit(void)
673 {
674         if (hrt.active)
675                 quadd_hrt_stop();
676
677         free_percpu(hrt.cpu_ctx);
678 }
679
680 void quadd_hrt_get_state(struct quadd_module_state *state)
681 {
682         state->nr_all_samples = atomic64_read(&hrt.counter_samples);
683         state->nr_skipped_samples = atomic64_read(&hrt.skipped_samples);
684 }
685
686 static void init_arch_timer(void)
687 {
688         u32 cntkctl = arch_timer_get_cntkctl();
689
690         if (cntkctl & ARCH_TIMER_USR_VCT_ACCESS_EN)
691                 hrt.tc = arch_timer_get_timecounter();
692         else
693                 hrt.tc = NULL;
694 }
695
696 struct quadd_hrt_ctx *quadd_hrt_init(struct quadd_ctx *ctx)
697 {
698         int cpu_id;
699         u64 period;
700         long freq;
701         struct quadd_cpu_context *cpu_ctx;
702
703         hrt.quadd_ctx = ctx;
704         hrt.active = 0;
705
706         freq = ctx->param.freq;
707         freq = max_t(long, QUADD_HRT_MIN_FREQ, freq);
708         period = NSEC_PER_SEC / freq;
709         hrt.sample_period = period;
710
711         if (ctx->param.ma_freq > 0)
712                 hrt.ma_period = MSEC_PER_SEC / ctx->param.ma_freq;
713         else
714                 hrt.ma_period = 0;
715
716         atomic64_set(&hrt.counter_samples, 0);
717         init_arch_timer();
718
719         hrt.cpu_ctx = alloc_percpu(struct quadd_cpu_context);
720         if (!hrt.cpu_ctx)
721                 return ERR_PTR(-ENOMEM);
722
723         for_each_possible_cpu(cpu_id) {
724                 cpu_ctx = per_cpu_ptr(hrt.cpu_ctx, cpu_id);
725
726                 atomic_set(&cpu_ctx->nr_active, 0);
727
728                 cpu_ctx->active_thread.pid = -1;
729                 cpu_ctx->active_thread.tgid = -1;
730
731                 cpu_ctx->cc.hrt = &hrt;
732
733                 init_hrtimer(cpu_ctx);
734         }
735
736         return &hrt;
737 }