]> rtime.felk.cvut.cz Git - hercules2020/nv-tegra/linux-4.4.git/blob - rt-patches/0008-vtime-Split-lock-and-seqcount.patch
WAR:media:i2c:ov5693: add flip and mirror setting
[hercules2020/nv-tegra/linux-4.4.git] / rt-patches / 0008-vtime-Split-lock-and-seqcount.patch
1 From e1a956b9249701fad7242c07a44a9d98c5ab0ec2 Mon Sep 17 00:00:00 2001
2 From: Thomas Gleixner <tglx@linutronix.de>
3 Date: Tue, 23 Jul 2013 15:45:51 +0200
4 Subject: [PATCH 008/365] vtime: Split lock and seqcount
5
6 Replace vtime_seqlock seqlock with a simple seqcounter and a rawlock
7 so it can taken in atomic context on RT.
8
9 Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
10 ---
11  include/linux/init_task.h |  3 ++-
12  include/linux/sched.h     |  3 ++-
13  kernel/fork.c             |  3 ++-
14  kernel/sched/cputime.c    | 62 ++++++++++++++++++++++++++++++-----------------
15  4 files changed, 46 insertions(+), 25 deletions(-)
16
17 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
18 index 1c1ff7e..6b42f297 100644
19 --- a/include/linux/init_task.h
20 +++ b/include/linux/init_task.h
21 @@ -150,7 +150,8 @@ extern struct task_group root_task_group;
22  
23  #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
24  # define INIT_VTIME(tsk)                                               \
25 -       .vtime_seqlock = __SEQLOCK_UNLOCKED(tsk.vtime_seqlock), \
26 +       .vtime_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.vtime_lock), \
27 +       .vtime_seq = SEQCNT_ZERO(tsk.vtime_seq),                        \
28         .vtime_snap = 0,                                \
29         .vtime_snap_whence = VTIME_SYS,
30  #else
31 diff --git a/include/linux/sched.h b/include/linux/sched.h
32 index 4d05a8d..539dbad 100644
33 --- a/include/linux/sched.h
34 +++ b/include/linux/sched.h
35 @@ -1533,7 +1533,8 @@ struct task_struct {
36         cputime_t gtime;
37         struct prev_cputime prev_cputime;
38  #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
39 -       seqlock_t vtime_seqlock;
40 +       raw_spinlock_t vtime_lock;
41 +       seqcount_t vtime_seq;
42         unsigned long long vtime_snap;
43         enum {
44                 VTIME_SLEEPING = 0,
45 diff --git a/kernel/fork.c b/kernel/fork.c
46 index 18a5cb1..e283178 100644
47 --- a/kernel/fork.c
48 +++ b/kernel/fork.c
49 @@ -1371,7 +1371,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
50         prev_cputime_init(&p->prev_cputime);
51  
52  #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
53 -       seqlock_init(&p->vtime_seqlock);
54 +       raw_spin_lock_init(&p->vtime_lock);
55 +       seqcount_init(&p->vtime_seq);
56         p->vtime_snap = 0;
57         p->vtime_snap_whence = VTIME_SLEEPING;
58  #endif
59 diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
60 index a1aecbe..09c2e7a 100644
61 --- a/kernel/sched/cputime.c
62 +++ b/kernel/sched/cputime.c
63 @@ -701,37 +701,45 @@ static void __vtime_account_system(struct task_struct *tsk)
64  
65  void vtime_account_system(struct task_struct *tsk)
66  {
67 -       write_seqlock(&tsk->vtime_seqlock);
68 +       raw_spin_lock(&tsk->vtime_lock);
69 +       write_seqcount_begin(&tsk->vtime_seq);
70         __vtime_account_system(tsk);
71 -       write_sequnlock(&tsk->vtime_seqlock);
72 +       write_seqcount_end(&tsk->vtime_seq);
73 +       raw_spin_unlock(&tsk->vtime_lock);
74  }
75  
76  void vtime_gen_account_irq_exit(struct task_struct *tsk)
77  {
78 -       write_seqlock(&tsk->vtime_seqlock);
79 +       raw_spin_lock(&tsk->vtime_lock);
80 +       write_seqcount_begin(&tsk->vtime_seq);
81         __vtime_account_system(tsk);
82         if (context_tracking_in_user())
83                 tsk->vtime_snap_whence = VTIME_USER;
84 -       write_sequnlock(&tsk->vtime_seqlock);
85 +       write_seqcount_end(&tsk->vtime_seq);
86 +       raw_spin_unlock(&tsk->vtime_lock);
87  }
88  
89  void vtime_account_user(struct task_struct *tsk)
90  {
91         cputime_t delta_cpu;
92  
93 -       write_seqlock(&tsk->vtime_seqlock);
94 +       raw_spin_lock(&tsk->vtime_lock);
95 +       write_seqcount_begin(&tsk->vtime_seq);
96         delta_cpu = get_vtime_delta(tsk);
97         tsk->vtime_snap_whence = VTIME_SYS;
98         account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu));
99 -       write_sequnlock(&tsk->vtime_seqlock);
100 +       write_seqcount_end(&tsk->vtime_seq);
101 +       raw_spin_unlock(&tsk->vtime_lock);
102  }
103  
104  void vtime_user_enter(struct task_struct *tsk)
105  {
106 -       write_seqlock(&tsk->vtime_seqlock);
107 +       raw_spin_lock(&tsk->vtime_lock);
108 +       write_seqcount_begin(&tsk->vtime_seq);
109         __vtime_account_system(tsk);
110         tsk->vtime_snap_whence = VTIME_USER;
111 -       write_sequnlock(&tsk->vtime_seqlock);
112 +       write_seqcount_end(&tsk->vtime_seq);
113 +       raw_spin_unlock(&tsk->vtime_lock);
114  }
115  
116  void vtime_guest_enter(struct task_struct *tsk)
117 @@ -743,19 +751,23 @@ void vtime_guest_enter(struct task_struct *tsk)
118          * synchronization against the reader (task_gtime())
119          * that can thus safely catch up with a tickless delta.
120          */
121 -       write_seqlock(&tsk->vtime_seqlock);
122 +       raw_spin_lock(&tsk->vtime_lock);
123 +       write_seqcount_begin(&tsk->vtime_seq);
124         __vtime_account_system(tsk);
125         current->flags |= PF_VCPU;
126 -       write_sequnlock(&tsk->vtime_seqlock);
127 +       write_seqcount_end(&tsk->vtime_seq);
128 +       raw_spin_unlock(&tsk->vtime_lock);
129  }
130  EXPORT_SYMBOL_GPL(vtime_guest_enter);
131  
132  void vtime_guest_exit(struct task_struct *tsk)
133  {
134 -       write_seqlock(&tsk->vtime_seqlock);
135 +       raw_spin_lock(&tsk->vtime_lock);
136 +       write_seqcount_begin(&tsk->vtime_seq);
137         __vtime_account_system(tsk);
138         current->flags &= ~PF_VCPU;
139 -       write_sequnlock(&tsk->vtime_seqlock);
140 +       write_seqcount_end(&tsk->vtime_seq);
141 +       raw_spin_unlock(&tsk->vtime_lock);
142  }
143  EXPORT_SYMBOL_GPL(vtime_guest_exit);
144  
145 @@ -768,24 +780,30 @@ void vtime_account_idle(struct task_struct *tsk)
146  
147  void arch_vtime_task_switch(struct task_struct *prev)
148  {
149 -       write_seqlock(&prev->vtime_seqlock);
150 +       raw_spin_lock(&prev->vtime_lock);
151 +       write_seqcount_begin(&prev->vtime_seq);
152         prev->vtime_snap_whence = VTIME_SLEEPING;
153 -       write_sequnlock(&prev->vtime_seqlock);
154 +       write_seqcount_end(&prev->vtime_seq);
155 +       raw_spin_unlock(&prev->vtime_lock);
156  
157 -       write_seqlock(&current->vtime_seqlock);
158 +       raw_spin_lock(&current->vtime_lock);
159 +       write_seqcount_begin(&current->vtime_seq);
160         current->vtime_snap_whence = VTIME_SYS;
161         current->vtime_snap = sched_clock_cpu(smp_processor_id());
162 -       write_sequnlock(&current->vtime_seqlock);
163 +       write_seqcount_end(&current->vtime_seq);
164 +       raw_spin_unlock(&current->vtime_lock);
165  }
166  
167  void vtime_init_idle(struct task_struct *t, int cpu)
168  {
169         unsigned long flags;
170  
171 -       write_seqlock_irqsave(&t->vtime_seqlock, flags);
172 +       raw_spin_lock_irqsave(&t->vtime_lock, flags);
173 +       write_seqcount_begin(&t->vtime_seq);
174         t->vtime_snap_whence = VTIME_SYS;
175         t->vtime_snap = sched_clock_cpu(cpu);
176 -       write_sequnlock_irqrestore(&t->vtime_seqlock, flags);
177 +       write_seqcount_end(&t->vtime_seq);
178 +       raw_spin_unlock_irqrestore(&t->vtime_lock, flags);
179  }
180  
181  cputime_t task_gtime(struct task_struct *t)
182 @@ -797,13 +815,13 @@ cputime_t task_gtime(struct task_struct *t)
183                 return t->gtime;
184  
185         do {
186 -               seq = read_seqbegin(&t->vtime_seqlock);
187 +               seq = read_seqcount_begin(&t->vtime_seq);
188  
189                 gtime = t->gtime;
190                 if (t->flags & PF_VCPU)
191                         gtime += vtime_delta(t);
192  
193 -       } while (read_seqretry(&t->vtime_seqlock, seq));
194 +       } while (read_seqcount_retry(&t->vtime_seq, seq));
195  
196         return gtime;
197  }
198 @@ -826,7 +844,7 @@ fetch_task_cputime(struct task_struct *t,
199                 *udelta = 0;
200                 *sdelta = 0;
201  
202 -               seq = read_seqbegin(&t->vtime_seqlock);
203 +               seq = read_seqcount_begin(&t->vtime_seq);
204  
205                 if (u_dst)
206                         *u_dst = *u_src;
207 @@ -850,7 +868,7 @@ fetch_task_cputime(struct task_struct *t,
208                         if (t->vtime_snap_whence == VTIME_SYS)
209                                 *sdelta = delta;
210                 }
211 -       } while (read_seqretry(&t->vtime_seqlock, seq));
212 +       } while (read_seqcount_retry(&t->vtime_seq, seq));
213  }
214  
215  
216 -- 
217 2.7.4
218