1 From 8072afcff6ac98ce8a2068dd406d9ad1332c47ce Mon Sep 17 00:00:00 2001
2 From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
3 Date: Wed, 25 May 2016 14:03:50 +0200
4 Subject: [PATCH 349/366] trace: correct off by one while recording the
7 Trace events like raw_syscalls show always a preempt code of one. The
8 reason is that on PREEMPT kernels rcu_read_lock_sched_notrace()
9 increases the preemption counter and the function recording the counter
10 is caller within the RCU section.
12 Cc: stable-rt@vger.kernel.org
13 Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
14 [ Changed this to upstream version. See commit e947841c0dce ]
15 Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
17 kernel/trace/trace_events.c | 8 ++++++++
18 1 file changed, 8 insertions(+)
20 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
21 index 4a48f97..5bd79b3 100644
22 --- a/kernel/trace/trace_events.c
23 +++ b/kernel/trace/trace_events.c
24 @@ -246,6 +246,14 @@ void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
26 local_save_flags(fbuffer->flags);
27 fbuffer->pc = preempt_count();
29 + * If CONFIG_PREEMPT is enabled, then the tracepoint itself disables
30 + * preemption (adding one to the preempt_count). Since we are
31 + * interested in the preempt_count at the time the tracepoint was
32 + * hit, we need to subtract one to offset the increment.
34 + if (IS_ENABLED(CONFIG_PREEMPT))
36 fbuffer->trace_file = trace_file;