2013-11-22 01:00:09 +00:00
|
|
|
Subject: vtime-split-lock-and-seqcount.patch
|
|
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
|
|
Date: Tue, 23 Jul 2013 15:45:51 +0200
|
2014-01-15 02:32:05 +00:00
|
|
|
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.6-rt9.tar.xz
|
2013-11-22 01:00:09 +00:00
|
|
|
|
|
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
|
|
---
|
|
|
|
include/linux/init_task.h | 3 +-
|
|
|
|
include/linux/sched.h | 3 +-
|
|
|
|
kernel/fork.c | 3 +-
|
|
|
|
kernel/sched/cputime.c | 62 +++++++++++++++++++++++++++++-----------------
|
|
|
|
4 files changed, 46 insertions(+), 25 deletions(-)
|
|
|
|
--- a/include/linux/init_task.h
|
|
|
|
+++ b/include/linux/init_task.h
|
|
|
|
@@ -145,7 +145,8 @@ extern struct task_group root_task_group
|
|
|
|
|
|
|
|
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
|
|
|
|
# define INIT_VTIME(tsk) \
|
|
|
|
- .vtime_seqlock = __SEQLOCK_UNLOCKED(tsk.vtime_seqlock), \
|
|
|
|
+ .vtime_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.vtime_lock), \
|
|
|
|
+ .vtime_seq = SEQCNT_ZERO, \
|
|
|
|
.vtime_snap = 0, \
|
|
|
|
.vtime_snap_whence = VTIME_SYS,
|
|
|
|
#else
|
|
|
|
--- a/include/linux/sched.h
|
|
|
|
+++ b/include/linux/sched.h
|
2013-12-21 01:39:20 +00:00
|
|
|
@@ -1159,7 +1159,8 @@ struct task_struct {
|
2013-11-22 01:00:09 +00:00
|
|
|
struct cputime prev_cputime;
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
|
|
|
|
- seqlock_t vtime_seqlock;
|
|
|
|
+ raw_spinlock_t vtime_lock;
|
|
|
|
+ seqcount_t vtime_seq;
|
|
|
|
unsigned long long vtime_snap;
|
|
|
|
enum {
|
|
|
|
VTIME_SLEEPING = 0,
|
|
|
|
--- a/kernel/fork.c
|
|
|
|
+++ b/kernel/fork.c
|
|
|
|
@@ -1241,7 +1241,8 @@ static struct task_struct *copy_process(
|
|
|
|
p->prev_cputime.utime = p->prev_cputime.stime = 0;
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
|
|
|
|
- seqlock_init(&p->vtime_seqlock);
|
|
|
|
+ raw_spin_lock_init(&p->vtime_lock);
|
|
|
|
+ seqcount_init(&p->vtime_seq);
|
|
|
|
p->vtime_snap = 0;
|
|
|
|
p->vtime_snap_whence = VTIME_SLEEPING;
|
|
|
|
#endif
|
|
|
|
--- a/kernel/sched/cputime.c
|
|
|
|
+++ b/kernel/sched/cputime.c
|
|
|
|
@@ -655,37 +655,45 @@ static void __vtime_account_system(struc
|
|
|
|
|
|
|
|
void vtime_account_system(struct task_struct *tsk)
|
|
|
|
{
|
|
|
|
- write_seqlock(&tsk->vtime_seqlock);
|
|
|
|
+ raw_spin_lock(&tsk->vtime_lock);
|
|
|
|
+ write_seqcount_begin(&tsk->vtime_seq);
|
|
|
|
__vtime_account_system(tsk);
|
|
|
|
- write_sequnlock(&tsk->vtime_seqlock);
|
|
|
|
+ write_seqcount_end(&tsk->vtime_seq);
|
|
|
|
+ raw_spin_unlock(&tsk->vtime_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void vtime_gen_account_irq_exit(struct task_struct *tsk)
|
|
|
|
{
|
|
|
|
- write_seqlock(&tsk->vtime_seqlock);
|
|
|
|
+ raw_spin_lock(&tsk->vtime_lock);
|
|
|
|
+ write_seqcount_begin(&tsk->vtime_seq);
|
|
|
|
__vtime_account_system(tsk);
|
|
|
|
if (context_tracking_in_user())
|
|
|
|
tsk->vtime_snap_whence = VTIME_USER;
|
|
|
|
- write_sequnlock(&tsk->vtime_seqlock);
|
|
|
|
+ write_seqcount_end(&tsk->vtime_seq);
|
|
|
|
+ raw_spin_unlock(&tsk->vtime_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void vtime_account_user(struct task_struct *tsk)
|
|
|
|
{
|
|
|
|
cputime_t delta_cpu;
|
|
|
|
|
|
|
|
- write_seqlock(&tsk->vtime_seqlock);
|
|
|
|
+ raw_spin_lock(&tsk->vtime_lock);
|
|
|
|
+ write_seqcount_begin(&tsk->vtime_seq);
|
|
|
|
delta_cpu = get_vtime_delta(tsk);
|
|
|
|
tsk->vtime_snap_whence = VTIME_SYS;
|
|
|
|
account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu));
|
|
|
|
- write_sequnlock(&tsk->vtime_seqlock);
|
|
|
|
+ write_seqcount_end(&tsk->vtime_seq);
|
|
|
|
+ raw_spin_unlock(&tsk->vtime_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void vtime_user_enter(struct task_struct *tsk)
|
|
|
|
{
|
|
|
|
- write_seqlock(&tsk->vtime_seqlock);
|
|
|
|
+ raw_spin_lock(&tsk->vtime_lock);
|
|
|
|
+ write_seqcount_begin(&tsk->vtime_seq);
|
|
|
|
__vtime_account_system(tsk);
|
|
|
|
tsk->vtime_snap_whence = VTIME_USER;
|
|
|
|
- write_sequnlock(&tsk->vtime_seqlock);
|
|
|
|
+ write_seqcount_end(&tsk->vtime_seq);
|
|
|
|
+ raw_spin_unlock(&tsk->vtime_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void vtime_guest_enter(struct task_struct *tsk)
|
|
|
|
@@ -697,19 +705,23 @@ void vtime_guest_enter(struct task_struc
|
|
|
|
* synchronization against the reader (task_gtime())
|
|
|
|
* that can thus safely catch up with a tickless delta.
|
|
|
|
*/
|
|
|
|
- write_seqlock(&tsk->vtime_seqlock);
|
|
|
|
+ raw_spin_lock(&tsk->vtime_lock);
|
|
|
|
+ write_seqcount_begin(&tsk->vtime_seq);
|
|
|
|
__vtime_account_system(tsk);
|
|
|
|
current->flags |= PF_VCPU;
|
|
|
|
- write_sequnlock(&tsk->vtime_seqlock);
|
|
|
|
+ write_seqcount_end(&tsk->vtime_seq);
|
|
|
|
+ raw_spin_unlock(&tsk->vtime_lock);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(vtime_guest_enter);
|
|
|
|
|
|
|
|
void vtime_guest_exit(struct task_struct *tsk)
|
|
|
|
{
|
|
|
|
- write_seqlock(&tsk->vtime_seqlock);
|
|
|
|
+ raw_spin_lock(&tsk->vtime_lock);
|
|
|
|
+ write_seqcount_begin(&tsk->vtime_seq);
|
|
|
|
__vtime_account_system(tsk);
|
|
|
|
current->flags &= ~PF_VCPU;
|
|
|
|
- write_sequnlock(&tsk->vtime_seqlock);
|
|
|
|
+ write_seqcount_end(&tsk->vtime_seq);
|
|
|
|
+ raw_spin_unlock(&tsk->vtime_lock);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(vtime_guest_exit);
|
|
|
|
|
|
|
|
@@ -722,24 +734,30 @@ void vtime_account_idle(struct task_stru
|
|
|
|
|
|
|
|
void arch_vtime_task_switch(struct task_struct *prev)
|
|
|
|
{
|
|
|
|
- write_seqlock(&prev->vtime_seqlock);
|
|
|
|
+ raw_spin_lock(&prev->vtime_lock);
|
|
|
|
+ write_seqcount_begin(&prev->vtime_seq);
|
|
|
|
prev->vtime_snap_whence = VTIME_SLEEPING;
|
|
|
|
- write_sequnlock(&prev->vtime_seqlock);
|
|
|
|
+ write_seqcount_end(&prev->vtime_seq);
|
|
|
|
+ raw_spin_unlock(&prev->vtime_lock);
|
|
|
|
|
|
|
|
- write_seqlock(¤t->vtime_seqlock);
|
|
|
|
+ raw_spin_lock(¤t->vtime_lock);
|
|
|
|
+ write_seqcount_begin(¤t->vtime_seq);
|
|
|
|
current->vtime_snap_whence = VTIME_SYS;
|
|
|
|
current->vtime_snap = sched_clock_cpu(smp_processor_id());
|
|
|
|
- write_sequnlock(¤t->vtime_seqlock);
|
|
|
|
+ write_seqcount_end(¤t->vtime_seq);
|
|
|
|
+ raw_spin_unlock(¤t->vtime_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void vtime_init_idle(struct task_struct *t, int cpu)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
- write_seqlock_irqsave(&t->vtime_seqlock, flags);
|
|
|
|
+ raw_spin_lock_irqsave(&t->vtime_lock, flags);
|
|
|
|
+ write_seqcount_begin(&t->vtime_seq);
|
|
|
|
t->vtime_snap_whence = VTIME_SYS;
|
|
|
|
t->vtime_snap = sched_clock_cpu(cpu);
|
|
|
|
- write_sequnlock_irqrestore(&t->vtime_seqlock, flags);
|
|
|
|
+ write_seqcount_end(&t->vtime_seq);
|
|
|
|
+ raw_spin_unlock_irqrestore(&t->vtime_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
cputime_t task_gtime(struct task_struct *t)
|
|
|
|
@@ -748,13 +766,13 @@ cputime_t task_gtime(struct task_struct
|
|
|
|
cputime_t gtime;
|
|
|
|
|
|
|
|
do {
|
|
|
|
- seq = read_seqbegin(&t->vtime_seqlock);
|
|
|
|
+ seq = read_seqcount_begin(&t->vtime_seq);
|
|
|
|
|
|
|
|
gtime = t->gtime;
|
|
|
|
if (t->flags & PF_VCPU)
|
|
|
|
gtime += vtime_delta(t);
|
|
|
|
|
|
|
|
- } while (read_seqretry(&t->vtime_seqlock, seq));
|
|
|
|
+ } while (read_seqcount_retry(&t->vtime_seq, seq));
|
|
|
|
|
|
|
|
return gtime;
|
|
|
|
}
|
|
|
|
@@ -777,7 +795,7 @@ fetch_task_cputime(struct task_struct *t
|
|
|
|
*udelta = 0;
|
|
|
|
*sdelta = 0;
|
|
|
|
|
|
|
|
- seq = read_seqbegin(&t->vtime_seqlock);
|
|
|
|
+ seq = read_seqcount_begin(&t->vtime_seq);
|
|
|
|
|
|
|
|
if (u_dst)
|
|
|
|
*u_dst = *u_src;
|
|
|
|
@@ -801,7 +819,7 @@ fetch_task_cputime(struct task_struct *t
|
|
|
|
if (t->vtime_snap_whence == VTIME_SYS)
|
|
|
|
*sdelta = delta;
|
|
|
|
}
|
|
|
|
- } while (read_seqretry(&t->vtime_seqlock, seq));
|
|
|
|
+ } while (read_seqcount_retry(&t->vtime_seq, seq));
|
|
|
|
}
|
|
|
|
|
|
|
|
|