1 From e1a956b9249701fad7242c07a44a9d98c5ab0ec2 Mon Sep 17 00:00:00 2001
2 From: Thomas Gleixner <tglx@linutronix.de>
3 Date: Tue, 23 Jul 2013 15:45:51 +0200
4 Subject: [PATCH 008/365] vtime: Split lock and seqcount
6 Replace vtime_seqlock seqlock with a simple seqcounter and a rawlock
7 so it can taken in atomic context on RT.
9 Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
11 include/linux/init_task.h | 3 ++-
12 include/linux/sched.h | 3 ++-
14 kernel/sched/cputime.c | 62 ++++++++++++++++++++++++++++++-----------------
15 4 files changed, 46 insertions(+), 25 deletions(-)
17 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
18 index 1c1ff7e..6b42f297 100644
19 --- a/include/linux/init_task.h
20 +++ b/include/linux/init_task.h
21 @@ -150,7 +150,8 @@ extern struct task_group root_task_group;
23 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
24 # define INIT_VTIME(tsk) \
25 - .vtime_seqlock = __SEQLOCK_UNLOCKED(tsk.vtime_seqlock), \
26 + .vtime_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.vtime_lock), \
27 + .vtime_seq = SEQCNT_ZERO(tsk.vtime_seq), \
29 .vtime_snap_whence = VTIME_SYS,
31 diff --git a/include/linux/sched.h b/include/linux/sched.h
32 index 4d05a8d..539dbad 100644
33 --- a/include/linux/sched.h
34 +++ b/include/linux/sched.h
35 @@ -1533,7 +1533,8 @@ struct task_struct {
37 struct prev_cputime prev_cputime;
38 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
39 - seqlock_t vtime_seqlock;
40 + raw_spinlock_t vtime_lock;
41 + seqcount_t vtime_seq;
42 unsigned long long vtime_snap;
45 diff --git a/kernel/fork.c b/kernel/fork.c
46 index 18a5cb1..e283178 100644
49 @@ -1371,7 +1371,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
50 prev_cputime_init(&p->prev_cputime);
52 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
53 - seqlock_init(&p->vtime_seqlock);
54 + raw_spin_lock_init(&p->vtime_lock);
55 + seqcount_init(&p->vtime_seq);
57 p->vtime_snap_whence = VTIME_SLEEPING;
59 diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
60 index a1aecbe..09c2e7a 100644
61 --- a/kernel/sched/cputime.c
62 +++ b/kernel/sched/cputime.c
63 @@ -701,37 +701,45 @@ static void __vtime_account_system(struct task_struct *tsk)
65 void vtime_account_system(struct task_struct *tsk)
67 - write_seqlock(&tsk->vtime_seqlock);
68 + raw_spin_lock(&tsk->vtime_lock);
69 + write_seqcount_begin(&tsk->vtime_seq);
70 __vtime_account_system(tsk);
71 - write_sequnlock(&tsk->vtime_seqlock);
72 + write_seqcount_end(&tsk->vtime_seq);
73 + raw_spin_unlock(&tsk->vtime_lock);
76 void vtime_gen_account_irq_exit(struct task_struct *tsk)
78 - write_seqlock(&tsk->vtime_seqlock);
79 + raw_spin_lock(&tsk->vtime_lock);
80 + write_seqcount_begin(&tsk->vtime_seq);
81 __vtime_account_system(tsk);
82 if (context_tracking_in_user())
83 tsk->vtime_snap_whence = VTIME_USER;
84 - write_sequnlock(&tsk->vtime_seqlock);
85 + write_seqcount_end(&tsk->vtime_seq);
86 + raw_spin_unlock(&tsk->vtime_lock);
89 void vtime_account_user(struct task_struct *tsk)
93 - write_seqlock(&tsk->vtime_seqlock);
94 + raw_spin_lock(&tsk->vtime_lock);
95 + write_seqcount_begin(&tsk->vtime_seq);
96 delta_cpu = get_vtime_delta(tsk);
97 tsk->vtime_snap_whence = VTIME_SYS;
98 account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu));
99 - write_sequnlock(&tsk->vtime_seqlock);
100 + write_seqcount_end(&tsk->vtime_seq);
101 + raw_spin_unlock(&tsk->vtime_lock);
104 void vtime_user_enter(struct task_struct *tsk)
106 - write_seqlock(&tsk->vtime_seqlock);
107 + raw_spin_lock(&tsk->vtime_lock);
108 + write_seqcount_begin(&tsk->vtime_seq);
109 __vtime_account_system(tsk);
110 tsk->vtime_snap_whence = VTIME_USER;
111 - write_sequnlock(&tsk->vtime_seqlock);
112 + write_seqcount_end(&tsk->vtime_seq);
113 + raw_spin_unlock(&tsk->vtime_lock);
116 void vtime_guest_enter(struct task_struct *tsk)
117 @@ -743,19 +751,23 @@ void vtime_guest_enter(struct task_struct *tsk)
118 * synchronization against the reader (task_gtime())
119 * that can thus safely catch up with a tickless delta.
121 - write_seqlock(&tsk->vtime_seqlock);
122 + raw_spin_lock(&tsk->vtime_lock);
123 + write_seqcount_begin(&tsk->vtime_seq);
124 __vtime_account_system(tsk);
125 current->flags |= PF_VCPU;
126 - write_sequnlock(&tsk->vtime_seqlock);
127 + write_seqcount_end(&tsk->vtime_seq);
128 + raw_spin_unlock(&tsk->vtime_lock);
130 EXPORT_SYMBOL_GPL(vtime_guest_enter);
132 void vtime_guest_exit(struct task_struct *tsk)
134 - write_seqlock(&tsk->vtime_seqlock);
135 + raw_spin_lock(&tsk->vtime_lock);
136 + write_seqcount_begin(&tsk->vtime_seq);
137 __vtime_account_system(tsk);
138 current->flags &= ~PF_VCPU;
139 - write_sequnlock(&tsk->vtime_seqlock);
140 + write_seqcount_end(&tsk->vtime_seq);
141 + raw_spin_unlock(&tsk->vtime_lock);
143 EXPORT_SYMBOL_GPL(vtime_guest_exit);
145 @@ -768,24 +780,30 @@ void vtime_account_idle(struct task_struct *tsk)
147 void arch_vtime_task_switch(struct task_struct *prev)
149 - write_seqlock(&prev->vtime_seqlock);
150 + raw_spin_lock(&prev->vtime_lock);
151 + write_seqcount_begin(&prev->vtime_seq);
152 prev->vtime_snap_whence = VTIME_SLEEPING;
153 - write_sequnlock(&prev->vtime_seqlock);
154 + write_seqcount_end(&prev->vtime_seq);
155 + raw_spin_unlock(&prev->vtime_lock);
157 - write_seqlock(¤t->vtime_seqlock);
158 + raw_spin_lock(¤t->vtime_lock);
159 + write_seqcount_begin(¤t->vtime_seq);
160 current->vtime_snap_whence = VTIME_SYS;
161 current->vtime_snap = sched_clock_cpu(smp_processor_id());
162 - write_sequnlock(¤t->vtime_seqlock);
163 + write_seqcount_end(¤t->vtime_seq);
164 + raw_spin_unlock(¤t->vtime_lock);
167 void vtime_init_idle(struct task_struct *t, int cpu)
171 - write_seqlock_irqsave(&t->vtime_seqlock, flags);
172 + raw_spin_lock_irqsave(&t->vtime_lock, flags);
173 + write_seqcount_begin(&t->vtime_seq);
174 t->vtime_snap_whence = VTIME_SYS;
175 t->vtime_snap = sched_clock_cpu(cpu);
176 - write_sequnlock_irqrestore(&t->vtime_seqlock, flags);
177 + write_seqcount_end(&t->vtime_seq);
178 + raw_spin_unlock_irqrestore(&t->vtime_lock, flags);
181 cputime_t task_gtime(struct task_struct *t)
182 @@ -797,13 +815,13 @@ cputime_t task_gtime(struct task_struct *t)
186 - seq = read_seqbegin(&t->vtime_seqlock);
187 + seq = read_seqcount_begin(&t->vtime_seq);
190 if (t->flags & PF_VCPU)
191 gtime += vtime_delta(t);
193 - } while (read_seqretry(&t->vtime_seqlock, seq));
194 + } while (read_seqcount_retry(&t->vtime_seq, seq));
198 @@ -826,7 +844,7 @@ fetch_task_cputime(struct task_struct *t,
202 - seq = read_seqbegin(&t->vtime_seqlock);
203 + seq = read_seqcount_begin(&t->vtime_seq);
207 @@ -850,7 +868,7 @@ fetch_task_cputime(struct task_struct *t,
208 if (t->vtime_snap_whence == VTIME_SYS)
211 - } while (read_seqretry(&t->vtime_seqlock, seq));
212 + } while (read_seqcount_retry(&t->vtime_seq, seq));