]> rtime.felk.cvut.cz Git - hercules2020/nv-tegra/linux-4.4.git/blob - rt-patches/0341-sched-cputime-Convert-vtime_seqlock-to-seqcount.patch
rt_patches: required rebase due to printk change
[hercules2020/nv-tegra/linux-4.4.git] / rt-patches / 0341-sched-cputime-Convert-vtime_seqlock-to-seqcount.patch
1 From 8bedc8143487eeaaa68ec330562695cb0c9af5f5 Mon Sep 17 00:00:00 2001
2 From: Frederic Weisbecker <fweisbec@gmail.com>
3 Date: Thu, 19 Nov 2015 16:47:34 +0100
4 Subject: [PATCH 341/366] sched/cputime: Convert vtime_seqlock to seqcount
5
6 The cputime can only be updated by the current task itself, even in
7 vtime case. So we can safely use seqcount instead of seqlock as there
8 is no writer concurrency involved.
9
10 [ bigeasy: safe since 6a61671bb2f3 ("cputime: Safely read cputime of
11 full dynticks CPUs") ]
12
13 Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
14 Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
15 Cc: Chris Metcalf <cmetcalf@ezchip.com>
16 Cc: Christoph Lameter <cl@linux.com>
17 Cc: Hiroshi Shimamoto <h-shimamoto@ct.jp.nec.com>
18 Cc: Linus Torvalds <torvalds@linux-foundation.org>
19 Cc: Luiz Capitulino <lcapitulino@redhat.com>
20 Cc: Mike Galbraith <efault@gmx.de>
21 Cc: Paul E . McKenney <paulmck@linux.vnet.ibm.com>
22 Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
23 Cc: Peter Zijlstra <peterz@infradead.org>
24 Cc: Rik van Riel <riel@redhat.com>
25 Cc: Thomas Gleixner <tglx@linutronix.de>
26 Link: http://lkml.kernel.org/r/1447948054-28668-8-git-send-email-fweisbec@gmail.com
27 Signed-off-by: Ingo Molnar <mingo@kernel.org>
28 Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
29 ---
30  include/linux/init_task.h |  2 +-
31  include/linux/sched.h     |  2 +-
32  kernel/fork.c             |  2 +-
33  kernel/sched/cputime.c    | 46 ++++++++++++++++++++++++----------------------
34  4 files changed, 27 insertions(+), 25 deletions(-)
35
36 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
37 index c52f61d..60fadde 100644
38 --- a/include/linux/init_task.h
39 +++ b/include/linux/init_task.h
40 @@ -156,7 +156,7 @@ extern struct task_group root_task_group;
41  
42  #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
43  # define INIT_VTIME(tsk)                                               \
44 -       .vtime_seqlock = __SEQLOCK_UNLOCKED(tsk.vtime_seqlock), \
45 +       .vtime_seqcount = SEQCNT_ZERO(tsk.vtime_seqcount),      \
46         .vtime_snap = 0,                                \
47         .vtime_snap_whence = VTIME_SYS,
48  #else
49 diff --git a/include/linux/sched.h b/include/linux/sched.h
50 index 9891e23..bef5058 100644
51 --- a/include/linux/sched.h
52 +++ b/include/linux/sched.h
53 @@ -1551,7 +1551,7 @@ struct task_struct {
54         cputime_t gtime;
55         struct prev_cputime prev_cputime;
56  #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
57 -       seqlock_t vtime_seqlock;
58 +       seqcount_t vtime_seqcount;
59         unsigned long long vtime_snap;
60         enum {
61                 /* Task is sleeping or running in a CPU with VTIME inactive */
62 diff --git a/kernel/fork.c b/kernel/fork.c
63 index 5bc0004..3b88031 100644
64 --- a/kernel/fork.c
65 +++ b/kernel/fork.c
66 @@ -1400,7 +1400,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
67         prev_cputime_init(&p->prev_cputime);
68  
69  #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
70 -       seqlock_init(&p->vtime_seqlock);
71 +       seqcount_init(&p->vtime_seqcount);
72         p->vtime_snap = 0;
73         p->vtime_snap_whence = VTIME_INACTIVE;
74  #endif
75 diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
76 index ac7559b..558b98a 100644
77 --- a/kernel/sched/cputime.c
78 +++ b/kernel/sched/cputime.c
79 @@ -701,37 +701,37 @@ static void __vtime_account_system(struct task_struct *tsk)
80  
81  void vtime_account_system(struct task_struct *tsk)
82  {
83 -       write_seqlock(&tsk->vtime_seqlock);
84 +       write_seqcount_begin(&tsk->vtime_seqcount);
85         __vtime_account_system(tsk);
86 -       write_sequnlock(&tsk->vtime_seqlock);
87 +       write_seqcount_end(&tsk->vtime_seqcount);
88  }
89  
90  void vtime_gen_account_irq_exit(struct task_struct *tsk)
91  {
92 -       write_seqlock(&tsk->vtime_seqlock);
93 +       write_seqcount_begin(&tsk->vtime_seqcount);
94         __vtime_account_system(tsk);
95         if (context_tracking_in_user())
96                 tsk->vtime_snap_whence = VTIME_USER;
97 -       write_sequnlock(&tsk->vtime_seqlock);
98 +       write_seqcount_end(&tsk->vtime_seqcount);
99  }
100  
101  void vtime_account_user(struct task_struct *tsk)
102  {
103         cputime_t delta_cpu;
104  
105 -       write_seqlock(&tsk->vtime_seqlock);
106 +       write_seqcount_begin(&tsk->vtime_seqcount);
107         delta_cpu = get_vtime_delta(tsk);
108         tsk->vtime_snap_whence = VTIME_SYS;
109         account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu));
110 -       write_sequnlock(&tsk->vtime_seqlock);
111 +       write_seqcount_end(&tsk->vtime_seqcount);
112  }
113  
114  void vtime_user_enter(struct task_struct *tsk)
115  {
116 -       write_seqlock(&tsk->vtime_seqlock);
117 +       write_seqcount_begin(&tsk->vtime_seqcount);
118         __vtime_account_system(tsk);
119         tsk->vtime_snap_whence = VTIME_USER;
120 -       write_sequnlock(&tsk->vtime_seqlock);
121 +       write_seqcount_end(&tsk->vtime_seqcount);
122  }
123  
124  void vtime_guest_enter(struct task_struct *tsk)
125 @@ -743,19 +743,19 @@ void vtime_guest_enter(struct task_struct *tsk)
126          * synchronization against the reader (task_gtime())
127          * that can thus safely catch up with a tickless delta.
128          */
129 -       write_seqlock(&tsk->vtime_seqlock);
130 +       write_seqcount_begin(&tsk->vtime_seqcount);
131         __vtime_account_system(tsk);
132         current->flags |= PF_VCPU;
133 -       write_sequnlock(&tsk->vtime_seqlock);
134 +       write_seqcount_end(&tsk->vtime_seqcount);
135  }
136  EXPORT_SYMBOL_GPL(vtime_guest_enter);
137  
138  void vtime_guest_exit(struct task_struct *tsk)
139  {
140 -       write_seqlock(&tsk->vtime_seqlock);
141 +       write_seqcount_begin(&tsk->vtime_seqcount);
142         __vtime_account_system(tsk);
143         current->flags &= ~PF_VCPU;
144 -       write_sequnlock(&tsk->vtime_seqlock);
145 +       write_seqcount_end(&tsk->vtime_seqcount);
146  }
147  EXPORT_SYMBOL_GPL(vtime_guest_exit);
148  
149 @@ -768,24 +768,26 @@ void vtime_account_idle(struct task_struct *tsk)
150  
151  void arch_vtime_task_switch(struct task_struct *prev)
152  {
153 -       write_seqlock(&prev->vtime_seqlock);
154 +       write_seqcount_begin(&prev->vtime_seqcount);
155         prev->vtime_snap_whence = VTIME_INACTIVE;
156 -       write_sequnlock(&prev->vtime_seqlock);
157 +       write_seqcount_end(&prev->vtime_seqcount);
158  
159 -       write_seqlock(&current->vtime_seqlock);
160 +       write_seqcount_begin(&current->vtime_seqcount);
161         current->vtime_snap_whence = VTIME_SYS;
162         current->vtime_snap = sched_clock_cpu(smp_processor_id());
163 -       write_sequnlock(&current->vtime_seqlock);
164 +       write_seqcount_end(&current->vtime_seqcount);
165  }
166  
167  void vtime_init_idle(struct task_struct *t, int cpu)
168  {
169         unsigned long flags;
170  
171 -       write_seqlock_irqsave(&t->vtime_seqlock, flags);
172 +       local_irq_save(flags);
173 +       write_seqcount_begin(&t->vtime_seqcount);
174         t->vtime_snap_whence = VTIME_SYS;
175         t->vtime_snap = sched_clock_cpu(cpu);
176 -       write_sequnlock_irqrestore(&t->vtime_seqlock, flags);
177 +       write_seqcount_end(&t->vtime_seqcount);
178 +       local_irq_restore(flags);
179  }
180  
181  cputime_t task_gtime(struct task_struct *t)
182 @@ -797,13 +799,13 @@ cputime_t task_gtime(struct task_struct *t)
183                 return t->gtime;
184  
185         do {
186 -               seq = read_seqbegin(&t->vtime_seqlock);
187 +               seq = read_seqcount_begin(&t->vtime_seqcount);
188  
189                 gtime = t->gtime;
190                 if (t->flags & PF_VCPU)
191                         gtime += vtime_delta(t);
192  
193 -       } while (read_seqretry(&t->vtime_seqlock, seq));
194 +       } while (read_seqcount_retry(&t->vtime_seqcount, seq));
195  
196         return gtime;
197  }
198 @@ -826,7 +828,7 @@ fetch_task_cputime(struct task_struct *t,
199                 *udelta = 0;
200                 *sdelta = 0;
201  
202 -               seq = read_seqbegin(&t->vtime_seqlock);
203 +               seq = read_seqcount_begin(&t->vtime_seqcount);
204  
205                 if (u_dst)
206                         *u_dst = *u_src;
207 @@ -850,7 +852,7 @@ fetch_task_cputime(struct task_struct *t,
208                         if (t->vtime_snap_whence == VTIME_SYS)
209                                 *sdelta = delta;
210                 }
211 -       } while (read_seqretry(&t->vtime_seqlock, seq));
212 +       } while (read_seqcount_retry(&t->vtime_seqcount, seq));
213  }
214  
215  
216 -- 
217 1.9.1
218