]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/blob - drivers/misc/tegra-profiler/hrt.c
2d7b1a9d6d6bfcac86bf3fd954de80e4478afcdf
[sojka/nv-tegra/linux-3.10.git] / drivers / misc / tegra-profiler / hrt.c
1 /*
2  * drivers/misc/tegra-profiler/hrt.c
3  *
4  * Copyright (c) 2014, NVIDIA CORPORATION.  All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  */
16
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19 #include <linux/sched.h>
20 #include <linux/hrtimer.h>
21 #include <linux/slab.h>
22 #include <linux/cpu.h>
23 #include <linux/ptrace.h>
24 #include <linux/interrupt.h>
25 #include <linux/err.h>
26 #include <linux/nsproxy.h>
27 #include <clocksource/arm_arch_timer.h>
28
29 #include <asm/cputype.h>
30 #include <asm/irq_regs.h>
31 #include <asm/arch_timer.h>
32
33 #include <linux/tegra_profiler.h>
34
35 #include "quadd.h"
36 #include "hrt.h"
37 #include "comm.h"
38 #include "mmap.h"
39 #include "ma.h"
40 #include "power_clk.h"
41 #include "tegra.h"
42 #include "debug.h"
43
44 static struct quadd_hrt_ctx hrt;
45
46 static void
47 read_all_sources(struct pt_regs *regs, struct task_struct *task);
48
49 struct hrt_event_value {
50         int event_id;
51         u32 value;
52 };
53
54 #ifndef ARCH_TIMER_USR_VCT_ACCESS_EN
55 #define ARCH_TIMER_USR_VCT_ACCESS_EN    (1 << 1) /* virtual counter */
56 #endif
57
58 static enum hrtimer_restart hrtimer_handler(struct hrtimer *hrtimer)
59 {
60         struct pt_regs *regs;
61
62         regs = get_irq_regs();
63
64         if (!hrt.active)
65                 return HRTIMER_NORESTART;
66
67         qm_debug_handler_sample(regs);
68
69         if (regs)
70                 read_all_sources(regs, NULL);
71
72         hrtimer_forward_now(hrtimer, ns_to_ktime(hrt.sample_period));
73         qm_debug_timer_forward(regs, hrt.sample_period);
74
75         return HRTIMER_RESTART;
76 }
77
78 static void start_hrtimer(struct quadd_cpu_context *cpu_ctx)
79 {
80         u64 period = hrt.sample_period;
81
82         __hrtimer_start_range_ns(&cpu_ctx->hrtimer,
83                                  ns_to_ktime(period), 0,
84                                  HRTIMER_MODE_REL_PINNED, 0);
85         qm_debug_timer_start(NULL, period);
86 }
87
88 static void cancel_hrtimer(struct quadd_cpu_context *cpu_ctx)
89 {
90         hrtimer_cancel(&cpu_ctx->hrtimer);
91         qm_debug_timer_cancel();
92 }
93
94 static void init_hrtimer(struct quadd_cpu_context *cpu_ctx)
95 {
96         hrtimer_init(&cpu_ctx->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
97         cpu_ctx->hrtimer.function = hrtimer_handler;
98 }
99
100 static inline u64 get_posix_clock_monotonic_time(void)
101 {
102         struct timespec ts;
103
104         do_posix_clock_monotonic_gettime(&ts);
105         return timespec_to_ns(&ts);
106 }
107
108 static inline u64 get_arch_time(struct timecounter *tc)
109 {
110         cycle_t value;
111         const struct cyclecounter *cc = tc->cc;
112
113         value = cc->read(cc);
114         return cyclecounter_cyc2ns(cc, value);
115 }
116
117 u64 quadd_get_time(void)
118 {
119         struct timecounter *tc = hrt.tc;
120
121         return (tc && hrt.use_arch_timer) ?
122                 get_arch_time(tc) :
123                 get_posix_clock_monotonic_time();
124 }
125
126 static void put_header(void)
127 {
128         int nr_events = 0, max_events = QUADD_MAX_COUNTERS;
129         int events[QUADD_MAX_COUNTERS];
130         struct quadd_record_data record;
131         struct quadd_header_data *hdr = &record.hdr;
132         struct quadd_parameters *param = &hrt.quadd_ctx->param;
133         unsigned int extra = param->reserved[QUADD_PARAM_IDX_EXTRA];
134         struct quadd_iovec vec;
135         struct quadd_ctx *ctx = hrt.quadd_ctx;
136         struct quadd_event_source_interface *pmu = ctx->pmu;
137         struct quadd_event_source_interface *pl310 = ctx->pl310;
138
139         record.record_type = QUADD_RECORD_TYPE_HEADER;
140
141         hdr->magic = QUADD_HEADER_MAGIC;
142         hdr->version = QUADD_SAMPLES_VERSION;
143
144         hdr->backtrace = param->backtrace;
145         hdr->use_freq = param->use_freq;
146         hdr->system_wide = param->system_wide;
147
148         /* TODO: dynamically */
149 #ifdef QM_DEBUG_SAMPLES_ENABLE
150         hdr->debug_samples = 1;
151 #else
152         hdr->debug_samples = 0;
153 #endif
154
155         hdr->freq = param->freq;
156         hdr->ma_freq = param->ma_freq;
157         hdr->power_rate_freq = param->power_rate_freq;
158
159         hdr->power_rate = hdr->power_rate_freq > 0 ? 1 : 0;
160         hdr->get_mmap = (extra & QUADD_PARAM_EXTRA_GET_MMAP) ? 1 : 0;
161
162         hdr->reserved = 0;
163         hdr->extra_length = 0;
164
165         hdr->reserved |= hrt.unw_method << QUADD_HDR_UNW_METHOD_SHIFT;
166
167         if (hrt.use_arch_timer)
168                 hdr->reserved |= QUADD_HDR_USE_ARCH_TIMER;
169
170         if (pmu)
171                 nr_events += pmu->get_current_events(events, max_events);
172
173         if (pl310)
174                 nr_events += pl310->get_current_events(events + nr_events,
175                                                        max_events - nr_events);
176
177         hdr->nr_events = nr_events;
178
179         vec.base = events;
180         vec.len = nr_events * sizeof(events[0]);
181
182         quadd_put_sample(&record, &vec, 1);
183 }
184
185 void quadd_put_sample(struct quadd_record_data *data,
186                       struct quadd_iovec *vec, int vec_count)
187 {
188         struct quadd_comm_data_interface *comm = hrt.quadd_ctx->comm;
189
190         comm->put_sample(data, vec, vec_count);
191         atomic64_inc(&hrt.counter_samples);
192 }
193
194 static void
195 put_sched_sample(struct task_struct *task, int is_sched_in)
196 {
197         unsigned int cpu, flags;
198         struct quadd_record_data record;
199         struct quadd_sched_data *s = &record.sched;
200
201         record.record_type = QUADD_RECORD_TYPE_SCHED;
202
203         cpu = quadd_get_processor_id(NULL, &flags);
204         s->cpu = cpu;
205         s->lp_mode = (flags & QUADD_CPUMODE_TEGRA_POWER_CLUSTER_LP) ? 1 : 0;
206
207         s->sched_in = is_sched_in ? 1 : 0;
208         s->time = quadd_get_time();
209         s->pid = task->pid;
210
211         s->reserved = 0;
212
213         s->data[0] = 0;
214         s->data[1] = 0;
215
216         quadd_put_sample(&record, NULL, 0);
217 }
218
219 static int get_sample_data(struct quadd_sample_data *sample,
220                            struct pt_regs *regs,
221                            struct task_struct *task)
222 {
223         unsigned int cpu, flags;
224         struct quadd_ctx *quadd_ctx = hrt.quadd_ctx;
225
226         cpu = quadd_get_processor_id(regs, &flags);
227         sample->cpu = cpu;
228
229         sample->lp_mode =
230                 (flags & QUADD_CPUMODE_TEGRA_POWER_CLUSTER_LP) ? 1 : 0;
231         sample->thumb_mode = (flags & QUADD_CPUMODE_THUMB) ? 1 : 0;
232         sample->user_mode = user_mode(regs) ? 1 : 0;
233
234         /* For security reasons, hide IPs from the kernel space. */
235         if (!sample->user_mode && !quadd_ctx->collect_kernel_ips)
236                 sample->ip = 0;
237         else
238                 sample->ip = instruction_pointer(regs);
239
240         sample->time = quadd_get_time();
241         sample->reserved = 0;
242         sample->pid = task->pid;
243         sample->in_interrupt = in_interrupt() ? 1 : 0;
244
245         return 0;
246 }
247
248 static int read_source(struct quadd_event_source_interface *source,
249                        struct pt_regs *regs,
250                        struct hrt_event_value *events_vals,
251                        int max_events)
252 {
253         int nr_events, i;
254         u32 prev_val, val, res_val;
255         struct event_data events[QUADD_MAX_COUNTERS];
256
257         if (!source)
258                 return 0;
259
260         max_events = min_t(int, max_events, QUADD_MAX_COUNTERS);
261         nr_events = source->read(events, max_events);
262
263         for (i = 0; i < nr_events; i++) {
264                 struct event_data *s = &events[i];
265
266                 prev_val = s->prev_val;
267                 val = s->val;
268
269                 if (prev_val <= val)
270                         res_val = val - prev_val;
271                 else
272                         res_val = QUADD_U32_MAX - prev_val + val;
273
274                 if (s->event_source == QUADD_EVENT_SOURCE_PL310) {
275                         int nr_active = atomic_read(&hrt.nr_active_all_core);
276                         if (nr_active > 1)
277                                 res_val /= nr_active;
278                 }
279
280                 events_vals[i].event_id = s->event_id;
281                 events_vals[i].value = res_val;
282         }
283
284         return nr_events;
285 }
286
287 static void
288 read_all_sources(struct pt_regs *regs, struct task_struct *task)
289 {
290         u32 state, extra_data = 0;
291         int i, vec_idx = 0, bt_size = 0;
292         int nr_events = 0, nr_positive_events = 0;
293         struct pt_regs *user_regs;
294         struct quadd_iovec vec[5];
295         struct hrt_event_value events[QUADD_MAX_COUNTERS];
296         u32 events_extra[QUADD_MAX_COUNTERS];
297
298         struct quadd_record_data record_data;
299         struct quadd_sample_data *s = &record_data.sample;
300
301         struct quadd_ctx *ctx = hrt.quadd_ctx;
302         struct quadd_cpu_context *cpu_ctx = this_cpu_ptr(hrt.cpu_ctx);
303         struct quadd_callchain *cc = &cpu_ctx->cc;
304
305         if (!regs)
306                 return;
307
308         if (atomic_read(&cpu_ctx->nr_active) == 0)
309                 return;
310
311         if (!task)
312                 task = current;
313
314         rcu_read_lock();
315         if (!task_nsproxy(task)) {
316                 rcu_read_unlock();
317                 return;
318         }
319         rcu_read_unlock();
320
321         if (ctx->pmu && ctx->pmu_info.active)
322                 nr_events += read_source(ctx->pmu, regs,
323                                          events, QUADD_MAX_COUNTERS);
324
325         if (ctx->pl310 && ctx->pl310_info.active)
326                 nr_events += read_source(ctx->pl310, regs,
327                                          events + nr_events,
328                                          QUADD_MAX_COUNTERS - nr_events);
329
330         if (!nr_events)
331                 return;
332
333         if (user_mode(regs))
334                 user_regs = regs;
335         else
336                 user_regs = current_pt_regs();
337
338         if (get_sample_data(s, regs, task))
339                 return;
340
341         if (cc->cs_64)
342                 extra_data |= QUADD_SED_IP64;
343
344         vec[vec_idx].base = &extra_data;
345         vec[vec_idx].len = sizeof(extra_data);
346         vec_idx++;
347
348         s->reserved = 0;
349
350         if (ctx->param.backtrace) {
351                 cc->unw_method = hrt.unw_method;
352                 bt_size = quadd_get_user_callchain(user_regs, cc, ctx, task);
353
354                 if (!bt_size && !user_mode(regs)) {
355                         unsigned long pc = instruction_pointer(user_regs);
356
357                         cc->nr = 0;
358 #ifdef CONFIG_ARM64
359                         cc->cs_64 = compat_user_mode(user_regs) ? 0 : 1;
360 #else
361                         cc->cs_64 = 0;
362 #endif
363                         bt_size += quadd_callchain_store(cc, pc,
364                                                          QUADD_UNW_TYPE_KCTX);
365                 }
366
367                 if (bt_size > 0) {
368                         int ip_size = cc->cs_64 ? sizeof(u64) : sizeof(u32);
369                         int nr_types = DIV_ROUND_UP(bt_size, 8);
370
371                         vec[vec_idx].base = cc->cs_64 ?
372                                 (void *)cc->ip_64 : (void *)cc->ip_32;
373                         vec[vec_idx].len = bt_size * ip_size;
374                         vec_idx++;
375
376                         vec[vec_idx].base = cc->types;
377                         vec[vec_idx].len = nr_types * sizeof(cc->types[0]);
378                         vec_idx++;
379                 }
380
381                 extra_data |= cc->unw_method << QUADD_SED_UNW_METHOD_SHIFT;
382                 s->reserved |= cc->unw_rc << QUADD_SAMPLE_URC_SHIFT;
383         }
384         s->callchain_nr = bt_size;
385
386         record_data.record_type = QUADD_RECORD_TYPE_SAMPLE;
387
388         s->events_flags = 0;
389         for (i = 0; i < nr_events; i++) {
390                 u32 value = events[i].value;
391                 if (value > 0) {
392                         s->events_flags |= 1 << i;
393                         events_extra[nr_positive_events++] = value;
394                 }
395         }
396
397         if (nr_positive_events == 0)
398                 return;
399
400         vec[vec_idx].base = events_extra;
401         vec[vec_idx].len = nr_positive_events * sizeof(events_extra[0]);
402         vec_idx++;
403
404         state = task->state;
405         if (state) {
406                 s->state = 1;
407                 vec[vec_idx].base = &state;
408                 vec[vec_idx].len = sizeof(state);
409                 vec_idx++;
410         } else {
411                 s->state = 0;
412         }
413
414         quadd_put_sample(&record_data, vec, vec_idx);
415 }
416
417 static inline int
418 is_profile_process(struct task_struct *task)
419 {
420         int i;
421         pid_t pid, profile_pid;
422         struct quadd_ctx *ctx = hrt.quadd_ctx;
423
424         if (!task)
425                 return 0;
426
427         pid = task->tgid;
428
429         for (i = 0; i < ctx->param.nr_pids; i++) {
430                 profile_pid = ctx->param.pids[i];
431                 if (profile_pid == pid)
432                         return 1;
433         }
434         return 0;
435 }
436
437 static int
438 add_active_thread(struct quadd_cpu_context *cpu_ctx, pid_t pid, pid_t tgid)
439 {
440         struct quadd_thread_data *t_data = &cpu_ctx->active_thread;
441
442         if (t_data->pid > 0 ||
443                 atomic_read(&cpu_ctx->nr_active) > 0) {
444                 pr_warn_once("Warning for thread: %d\n", (int)pid);
445                 return 0;
446         }
447
448         t_data->pid = pid;
449         t_data->tgid = tgid;
450         return 1;
451 }
452
453 static int remove_active_thread(struct quadd_cpu_context *cpu_ctx, pid_t pid)
454 {
455         struct quadd_thread_data *t_data = &cpu_ctx->active_thread;
456
457         if (t_data->pid < 0)
458                 return 0;
459
460         if (t_data->pid == pid) {
461                 t_data->pid = -1;
462                 t_data->tgid = -1;
463                 return 1;
464         }
465
466         pr_warn_once("Warning for thread: %d\n", (int)pid);
467         return 0;
468 }
469
470 void __quadd_task_sched_in(struct task_struct *prev,
471                            struct task_struct *task)
472 {
473         struct quadd_cpu_context *cpu_ctx = this_cpu_ptr(hrt.cpu_ctx);
474         struct quadd_ctx *ctx = hrt.quadd_ctx;
475         struct event_data events[QUADD_MAX_COUNTERS];
476         /* static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 2); */
477
478         if (likely(!hrt.active))
479                 return;
480 /*
481         if (__ratelimit(&ratelimit_state))
482                 pr_info("sch_in, cpu: %d, prev: %u (%u) \t--> curr: %u (%u)\n",
483                         smp_processor_id(), (unsigned int)prev->pid,
484                         (unsigned int)prev->tgid, (unsigned int)task->pid,
485                         (unsigned int)task->tgid);
486 */
487
488         if (is_profile_process(task)) {
489                 put_sched_sample(task, 1);
490
491                 add_active_thread(cpu_ctx, task->pid, task->tgid);
492                 atomic_inc(&cpu_ctx->nr_active);
493
494                 if (atomic_read(&cpu_ctx->nr_active) == 1) {
495                         if (ctx->pmu)
496                                 ctx->pmu->start();
497
498                         if (ctx->pl310)
499                                 ctx->pl310->read(events, 1);
500
501                         start_hrtimer(cpu_ctx);
502                         atomic_inc(&hrt.nr_active_all_core);
503                 }
504         }
505 }
506
507 void __quadd_task_sched_out(struct task_struct *prev,
508                             struct task_struct *next)
509 {
510         int n;
511         struct pt_regs *user_regs;
512         struct quadd_cpu_context *cpu_ctx = this_cpu_ptr(hrt.cpu_ctx);
513         struct quadd_ctx *ctx = hrt.quadd_ctx;
514         /* static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 2); */
515
516         if (likely(!hrt.active))
517                 return;
518 /*
519         if (__ratelimit(&ratelimit_state))
520                 pr_info("sch_out: cpu: %d, prev: %u (%u) \t--> next: %u (%u)\n",
521                         smp_processor_id(), (unsigned int)prev->pid,
522                         (unsigned int)prev->tgid, (unsigned int)next->pid,
523                         (unsigned int)next->tgid);
524 */
525
526         if (is_profile_process(prev)) {
527                 user_regs = task_pt_regs(prev);
528                 if (user_regs)
529                         read_all_sources(user_regs, prev);
530
531                 n = remove_active_thread(cpu_ctx, prev->pid);
532                 atomic_sub(n, &cpu_ctx->nr_active);
533
534                 if (n && atomic_read(&cpu_ctx->nr_active) == 0) {
535                         cancel_hrtimer(cpu_ctx);
536                         atomic_dec(&hrt.nr_active_all_core);
537
538                         if (ctx->pmu)
539                                 ctx->pmu->stop();
540                 }
541
542                 put_sched_sample(prev, 0);
543         }
544 }
545
546 void __quadd_event_mmap(struct vm_area_struct *vma)
547 {
548         struct quadd_parameters *param;
549
550         if (likely(!hrt.active))
551                 return;
552
553         if (!is_profile_process(current))
554                 return;
555
556         param = &hrt.quadd_ctx->param;
557         quadd_process_mmap(vma, param->pids[0]);
558 }
559
560 static void reset_cpu_ctx(void)
561 {
562         int cpu_id;
563         struct quadd_cpu_context *cpu_ctx;
564         struct quadd_thread_data *t_data;
565
566         for (cpu_id = 0; cpu_id < nr_cpu_ids; cpu_id++) {
567                 cpu_ctx = per_cpu_ptr(hrt.cpu_ctx, cpu_id);
568                 t_data = &cpu_ctx->active_thread;
569
570                 atomic_set(&cpu_ctx->nr_active, 0);
571
572                 t_data->pid = -1;
573                 t_data->tgid = -1;
574         }
575 }
576
577 int quadd_hrt_start(void)
578 {
579         int err;
580         u64 period;
581         long freq;
582         unsigned int extra;
583         struct quadd_ctx *ctx = hrt.quadd_ctx;
584         struct quadd_parameters *param = &ctx->param;
585
586         freq = ctx->param.freq;
587         freq = max_t(long, QUADD_HRT_MIN_FREQ, freq);
588         period = NSEC_PER_SEC / freq;
589         hrt.sample_period = period;
590
591         if (ctx->param.ma_freq > 0)
592                 hrt.ma_period = MSEC_PER_SEC / ctx->param.ma_freq;
593         else
594                 hrt.ma_period = 0;
595
596         atomic64_set(&hrt.counter_samples, 0);
597
598         reset_cpu_ctx();
599
600         extra = param->reserved[QUADD_PARAM_IDX_EXTRA];
601
602         if (extra & QUADD_PARAM_EXTRA_BT_MIXED)
603                 hrt.unw_method = QUADD_UNW_METHOD_MIXED;
604         else if (extra & QUADD_PARAM_EXTRA_BT_UNWIND_TABLES)
605                 hrt.unw_method = QUADD_UNW_METHOD_EHT;
606         else if (extra & QUADD_PARAM_EXTRA_BT_FP)
607                 hrt.unw_method = QUADD_UNW_METHOD_FP;
608         else
609                 hrt.unw_method = QUADD_UNW_METHOD_NONE;
610
611         if (hrt.tc && (extra & QUADD_PARAM_EXTRA_USE_ARCH_TIMER))
612                 hrt.use_arch_timer = 1;
613         else
614                 hrt.use_arch_timer = 0;
615
616         pr_info("timer: %s\n", hrt.use_arch_timer ? "arch" : "monotonic clock");
617
618         put_header();
619
620         if (extra & QUADD_PARAM_EXTRA_GET_MMAP) {
621                 err = quadd_get_current_mmap(param->pids[0]);
622                 if (err) {
623                         pr_err("error: quadd_get_current_mmap\n");
624                         return err;
625                 }
626         }
627
628         if (ctx->pl310)
629                 ctx->pl310->start();
630
631         quadd_ma_start(&hrt);
632
633         hrt.active = 1;
634
635         pr_info("Start hrt: freq/period: %ld/%llu\n", freq, period);
636         return 0;
637 }
638
639 void quadd_hrt_stop(void)
640 {
641         struct quadd_ctx *ctx = hrt.quadd_ctx;
642
643         pr_info("Stop hrt, number of samples: %llu\n",
644                 atomic64_read(&hrt.counter_samples));
645
646         if (ctx->pl310)
647                 ctx->pl310->stop();
648
649         quadd_ma_stop(&hrt);
650
651         hrt.active = 0;
652
653         atomic64_set(&hrt.counter_samples, 0);
654
655         /* reset_cpu_ctx(); */
656 }
657
658 void quadd_hrt_deinit(void)
659 {
660         if (hrt.active)
661                 quadd_hrt_stop();
662
663         free_percpu(hrt.cpu_ctx);
664 }
665
666 void quadd_hrt_get_state(struct quadd_module_state *state)
667 {
668         state->nr_all_samples = atomic64_read(&hrt.counter_samples);
669         state->nr_skipped_samples = 0;
670 }
671
672 static void init_arch_timer(void)
673 {
674         u32 cntkctl = arch_timer_get_cntkctl();
675
676         if (cntkctl & ARCH_TIMER_USR_VCT_ACCESS_EN)
677                 hrt.tc = arch_timer_get_timecounter();
678         else
679                 hrt.tc = NULL;
680 }
681
682 struct quadd_hrt_ctx *quadd_hrt_init(struct quadd_ctx *ctx)
683 {
684         int cpu_id;
685         u64 period;
686         long freq;
687         struct quadd_cpu_context *cpu_ctx;
688
689         hrt.quadd_ctx = ctx;
690         hrt.active = 0;
691
692         freq = ctx->param.freq;
693         freq = max_t(long, QUADD_HRT_MIN_FREQ, freq);
694         period = NSEC_PER_SEC / freq;
695         hrt.sample_period = period;
696
697         if (ctx->param.ma_freq > 0)
698                 hrt.ma_period = MSEC_PER_SEC / ctx->param.ma_freq;
699         else
700                 hrt.ma_period = 0;
701
702         atomic64_set(&hrt.counter_samples, 0);
703         init_arch_timer();
704
705         hrt.cpu_ctx = alloc_percpu(struct quadd_cpu_context);
706         if (!hrt.cpu_ctx)
707                 return ERR_PTR(-ENOMEM);
708
709         for (cpu_id = 0; cpu_id < nr_cpu_ids; cpu_id++) {
710                 cpu_ctx = per_cpu_ptr(hrt.cpu_ctx, cpu_id);
711
712                 atomic_set(&cpu_ctx->nr_active, 0);
713
714                 cpu_ctx->active_thread.pid = -1;
715                 cpu_ctx->active_thread.tgid = -1;
716
717                 init_hrtimer(cpu_ctx);
718         }
719
720         return &hrt;
721 }