]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/blob - drivers/clocksource/arm_arch_timer.c
044892e1ca746b00dd65ea85be70870c90655202
[sojka/nv-tegra/linux-3.10.git] / drivers / clocksource / arm_arch_timer.c
1 /*
2  *  linux/drivers/clocksource/arm_arch_timer.c
3  *
4  *  Copyright (C) 2011 ARM Ltd.
5  *  All Rights Reserved
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/device.h>
14 #include <linux/smp.h>
15 #include <linux/cpu.h>
16 #include <linux/clockchips.h>
17 #include <linux/interrupt.h>
18 #include <linux/of_irq.h>
19 #include <linux/io.h>
20 #include <linux/slab.h>
21 #include <linux/sched_clock.h>
22
23 #include <asm/arch_timer.h>
24 #include <asm/virt.h>
25
26 #include <clocksource/arm_arch_timer.h>
27
28 static u32 arch_timer_rate;
29
30 enum ppi_nr {
31         PHYS_SECURE_PPI,
32         PHYS_NONSECURE_PPI,
33         VIRT_PPI,
34         HYP_PPI,
35         MAX_TIMER_PPI
36 };
37
38 static int arch_timer_ppi[MAX_TIMER_PPI];
39
40 static struct clock_event_device __percpu *arch_timer_evt;
41
42 static bool arch_timer_use_virtual = true;
43
44 /*
45  * Architected system timer support.
46  */
47
48 static inline irqreturn_t timer_handler(const int access,
49                                         struct clock_event_device *evt)
50 {
51         unsigned long ctrl;
52         ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
53         if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
54                 ctrl |= ARCH_TIMER_CTRL_IT_MASK;
55                 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
56                 evt->event_handler(evt);
57                 return IRQ_HANDLED;
58         }
59
60         return IRQ_NONE;
61 }
62
63 static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
64 {
65         struct clock_event_device *evt = dev_id;
66
67         return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
68 }
69
70 static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
71 {
72         struct clock_event_device *evt = dev_id;
73
74         return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
75 }
76
77 static inline void timer_set_mode(const int access, int mode)
78 {
79         unsigned long ctrl;
80         switch (mode) {
81         case CLOCK_EVT_MODE_UNUSED:
82         case CLOCK_EVT_MODE_SHUTDOWN:
83                 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
84                 ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
85                 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
86                 break;
87         default:
88                 break;
89         }
90 }
91
92 static void arch_timer_set_mode_virt(enum clock_event_mode mode,
93                                      struct clock_event_device *clk)
94 {
95         timer_set_mode(ARCH_TIMER_VIRT_ACCESS, mode);
96 }
97
98 static void arch_timer_set_mode_phys(enum clock_event_mode mode,
99                                      struct clock_event_device *clk)
100 {
101         timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode);
102 }
103
104 static inline void set_next_event(const int access, unsigned long evt)
105 {
106         unsigned long ctrl;
107         ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
108         ctrl |= ARCH_TIMER_CTRL_ENABLE;
109         ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
110         arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt);
111         arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
112 }
113
114 static int arch_timer_set_next_event_virt(unsigned long evt,
115                                           struct clock_event_device *unused)
116 {
117         set_next_event(ARCH_TIMER_VIRT_ACCESS, evt);
118         return 0;
119 }
120
121 static int arch_timer_set_next_event_phys(unsigned long evt,
122                                           struct clock_event_device *unused)
123 {
124         set_next_event(ARCH_TIMER_PHYS_ACCESS, evt);
125         return 0;
126 }
127
128 static int __cpuinit arch_timer_setup(struct clock_event_device *clk)
129 {
130         clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP;
131         clk->name = "arch_sys_timer";
132         clk->rating = 450;
133         if (arch_timer_use_virtual) {
134                 clk->irq = arch_timer_ppi[VIRT_PPI];
135                 clk->set_mode = arch_timer_set_mode_virt;
136                 clk->set_next_event = arch_timer_set_next_event_virt;
137         } else {
138                 clk->irq = arch_timer_ppi[PHYS_SECURE_PPI];
139                 clk->set_mode = arch_timer_set_mode_phys;
140                 clk->set_next_event = arch_timer_set_next_event_phys;
141         }
142
143         clk->cpumask = cpumask_of(smp_processor_id());
144
145         clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, NULL);
146
147         clockevents_config_and_register(clk, arch_timer_rate,
148                                         0xf, 0x7fffffff);
149
150         if (arch_timer_use_virtual)
151                 enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0);
152         else {
153                 enable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 0);
154                 if (arch_timer_ppi[PHYS_NONSECURE_PPI])
155                         enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0);
156         }
157
158         arch_counter_set_user_access();
159
160         return 0;
161 }
162
163 static int arch_timer_available(void)
164 {
165         u32 freq;
166
167         if (arch_timer_rate == 0) {
168                 freq = arch_timer_get_cntfrq();
169
170                 /* Check the timer frequency. */
171                 if (freq == 0) {
172                         pr_warn("Architected timer frequency not available\n");
173                         return -EINVAL;
174                 }
175
176                 arch_timer_rate = freq;
177         }
178
179         pr_info_once("Architected local timer running at %lu.%02luMHz (%s).\n",
180                      (unsigned long)arch_timer_rate / 1000000,
181                      (unsigned long)(arch_timer_rate / 10000) % 100,
182                      arch_timer_use_virtual ? "virt" : "phys");
183         return 0;
184 }
185
186 u32 arch_timer_get_rate(void)
187 {
188         return arch_timer_rate;
189 }
190
191 u64 arch_timer_read_counter(void)
192 {
193         return arch_counter_get_cntvct();
194 }
195
196 static cycle_t arch_counter_read(struct clocksource *cs)
197 {
198         return arch_counter_get_cntvct();
199 }
200
201 static cycle_t arch_counter_read_cc(const struct cyclecounter *cc)
202 {
203         return arch_counter_get_cntvct();
204 }
205
206 static struct clocksource clocksource_counter = {
207         .name   = "arch_sys_counter",
208         .rating = 400,
209         .read   = arch_counter_read,
210         .mask   = CLOCKSOURCE_MASK(56),
211         .flags  = CLOCK_SOURCE_IS_CONTINUOUS,
212 };
213
214 static struct cyclecounter cyclecounter = {
215         .read   = arch_counter_read_cc,
216         .mask   = CLOCKSOURCE_MASK(56),
217 };
218
219 static struct timecounter timecounter;
220
221 struct timecounter *arch_timer_get_timecounter(void)
222 {
223         return &timecounter;
224 }
225
226 static void __cpuinit arch_timer_stop(struct clock_event_device *clk)
227 {
228         pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
229                  clk->irq, smp_processor_id());
230
231         if (arch_timer_use_virtual)
232                 disable_percpu_irq(arch_timer_ppi[VIRT_PPI]);
233         else {
234                 disable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI]);
235                 if (arch_timer_ppi[PHYS_NONSECURE_PPI])
236                         disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]);
237         }
238
239         clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk);
240 }
241
242 static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
243                                            unsigned long action, void *hcpu)
244 {
245         /*
246          * Grab cpu pointer in each case to avoid spurious
247          * preemptible warnings
248          */
249         switch (action & ~CPU_TASKS_FROZEN) {
250         case CPU_STARTING:
251                 arch_timer_setup(this_cpu_ptr(arch_timer_evt));
252                 break;
253         case CPU_DYING:
254                 arch_timer_stop(this_cpu_ptr(arch_timer_evt));
255                 break;
256         }
257
258         return NOTIFY_OK;
259 }
260
261 static struct notifier_block arch_timer_cpu_nb __cpuinitdata = {
262         .notifier_call = arch_timer_cpu_notify,
263 };
264
265 static int __init arch_timer_register(void)
266 {
267         int err;
268         int ppi;
269
270         err = arch_timer_available();
271         if (err)
272                 goto out;
273
274         arch_timer_evt = alloc_percpu(struct clock_event_device);
275         if (!arch_timer_evt) {
276                 err = -ENOMEM;
277                 goto out;
278         }
279
280         clocksource_register_hz(&clocksource_counter, arch_timer_rate);
281         cyclecounter.mult = clocksource_counter.mult;
282         cyclecounter.shift = clocksource_counter.shift;
283         timecounter_init(&timecounter, &cyclecounter,
284                          arch_counter_get_cntvct());
285
286         /* 56 bits minimum, so we assume worst case rollover */
287         sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate);
288
289         if (arch_timer_use_virtual) {
290                 ppi = arch_timer_ppi[VIRT_PPI];
291                 err = request_percpu_irq(ppi, arch_timer_handler_virt,
292                                          "arch_timer", arch_timer_evt);
293         } else {
294                 ppi = arch_timer_ppi[PHYS_SECURE_PPI];
295                 err = request_percpu_irq(ppi, arch_timer_handler_phys,
296                                          "arch_timer", arch_timer_evt);
297                 if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) {
298                         ppi = arch_timer_ppi[PHYS_NONSECURE_PPI];
299                         err = request_percpu_irq(ppi, arch_timer_handler_phys,
300                                                  "arch_timer", arch_timer_evt);
301                         if (err)
302                                 free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
303                                                 arch_timer_evt);
304                 }
305         }
306
307         if (err) {
308                 pr_err("arch_timer: can't register interrupt %d (%d)\n",
309                        ppi, err);
310                 goto out_free;
311         }
312
313         err = register_cpu_notifier(&arch_timer_cpu_nb);
314         if (err)
315                 goto out_free_irq;
316
317         /* Immediately configure the timer on the boot CPU */
318         arch_timer_setup(this_cpu_ptr(arch_timer_evt));
319
320         return 0;
321
322 out_free_irq:
323         if (arch_timer_use_virtual)
324                 free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt);
325         else {
326                 free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
327                                 arch_timer_evt);
328                 if (arch_timer_ppi[PHYS_NONSECURE_PPI])
329                         free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
330                                         arch_timer_evt);
331         }
332
333 out_free:
334         free_percpu(arch_timer_evt);
335 out:
336         return err;
337 }
338
339 static void __init arch_timer_init(struct device_node *np)
340 {
341         u32 freq;
342         int i;
343
344         if (arch_timer_get_rate()) {
345                 pr_warn("arch_timer: multiple nodes in dt, skipping\n");
346                 return;
347         }
348
349         /* Try to determine the frequency from the device tree or CNTFRQ */
350         if (!of_property_read_u32(np, "clock-frequency", &freq))
351                 arch_timer_rate = freq;
352
353         for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++)
354                 arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
355
356         of_node_put(np);
357
358         /*
359          * If HYP mode is available, we know that the physical timer
360          * has been configured to be accessible from PL1. Use it, so
361          * that a guest can use the virtual timer instead.
362          *
363          * If no interrupt provided for virtual timer, we'll have to
364          * stick to the physical timer. It'd better be accessible...
365          */
366         if (is_hyp_mode_available() || !arch_timer_ppi[VIRT_PPI]) {
367                 arch_timer_use_virtual = false;
368
369                 if (!arch_timer_ppi[PHYS_SECURE_PPI] ||
370                     !arch_timer_ppi[PHYS_NONSECURE_PPI]) {
371                         pr_warn("arch_timer: No interrupt available, giving up\n");
372                         return;
373                 }
374         }
375
376         arch_timer_register();
377         arch_timer_arch_init();
378 }
379 CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_init);
380 CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_init);