linux/debian/patches/features/all/rt/timer-make-the-base-lock-ra...

182 lines
5.3 KiB
Diff
Raw Normal View History

2016-10-11 18:58:48 +00:00
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 13 Jul 2016 18:22:23 +0200
Subject: [PATCH] timer: make the base lock raw
2017-07-16 20:28:09 +00:00
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.9-rt7.tar.xz
2016-10-11 18:58:48 +00:00
The part where the base lock is held got more predictable / shorter after the
timer rework. One reason is the lack of re-cascading.
That means the lock can be made raw and held in IRQ context.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/time/timer.c | 48 ++++++++++++++++++++++++------------------------
1 file changed, 24 insertions(+), 24 deletions(-)
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
2017-06-18 17:14:20 +00:00
@@ -195,7 +195,7 @@ EXPORT_SYMBOL(jiffies_64);
2016-10-11 18:58:48 +00:00
#endif
struct timer_base {
- spinlock_t lock;
+ raw_spinlock_t lock;
struct timer_list *running_timer;
unsigned long clk;
unsigned long next_expiry;
2017-06-18 17:14:20 +00:00
@@ -913,10 +913,10 @@ static struct timer_base *lock_timer_bas
2016-10-11 18:58:48 +00:00
if (!(tf & TIMER_MIGRATING)) {
base = get_timer_base(tf);
- spin_lock_irqsave(&base->lock, *flags);
+ raw_spin_lock_irqsave(&base->lock, *flags);
if (timer->flags == tf)
return base;
- spin_unlock_irqrestore(&base->lock, *flags);
+ raw_spin_unlock_irqrestore(&base->lock, *flags);
}
cpu_relax();
}
2017-06-18 17:14:20 +00:00
@@ -986,9 +986,9 @@ static inline int
2016-10-11 18:58:48 +00:00
/* See the comment in lock_timer_base() */
timer->flags |= TIMER_MIGRATING;
- spin_unlock(&base->lock);
+ raw_spin_unlock(&base->lock);
base = new_base;
- spin_lock(&base->lock);
+ raw_spin_lock(&base->lock);
WRITE_ONCE(timer->flags,
(timer->flags & ~TIMER_BASEMASK) | base->cpu);
}
2017-06-18 17:14:20 +00:00
@@ -1013,7 +1013,7 @@ static inline int
2016-10-11 18:58:48 +00:00
}
out_unlock:
- spin_unlock_irqrestore(&base->lock, flags);
+ raw_spin_unlock_irqrestore(&base->lock, flags);
return ret;
}
2017-06-18 17:14:20 +00:00
@@ -1106,16 +1106,16 @@ void add_timer_on(struct timer_list *tim
2016-10-11 18:58:48 +00:00
if (base != new_base) {
timer->flags |= TIMER_MIGRATING;
- spin_unlock(&base->lock);
+ raw_spin_unlock(&base->lock);
base = new_base;
- spin_lock(&base->lock);
+ raw_spin_lock(&base->lock);
WRITE_ONCE(timer->flags,
(timer->flags & ~TIMER_BASEMASK) | cpu);
}
debug_activate(timer, timer->expires);
internal_add_timer(base, timer);
- spin_unlock_irqrestore(&base->lock, flags);
+ raw_spin_unlock_irqrestore(&base->lock, flags);
}
EXPORT_SYMBOL_GPL(add_timer_on);
2017-06-18 17:14:20 +00:00
@@ -1141,7 +1141,7 @@ int del_timer(struct timer_list *timer)
2016-10-11 18:58:48 +00:00
if (timer_pending(timer)) {
base = lock_timer_base(timer, &flags);
ret = detach_if_pending(timer, base, true);
- spin_unlock_irqrestore(&base->lock, flags);
+ raw_spin_unlock_irqrestore(&base->lock, flags);
}
return ret;
2017-06-18 17:14:20 +00:00
@@ -1168,7 +1168,7 @@ int try_to_del_timer_sync(struct timer_l
if (base->running_timer != timer)
2016-10-11 18:58:48 +00:00
ret = detach_if_pending(timer, base, true);
2017-06-18 17:14:20 +00:00
2016-10-11 18:58:48 +00:00
- spin_unlock_irqrestore(&base->lock, flags);
+ raw_spin_unlock_irqrestore(&base->lock, flags);
return ret;
}
2017-06-18 17:14:20 +00:00
@@ -1299,13 +1299,13 @@ static void expire_timers(struct timer_b
2016-10-11 18:58:48 +00:00
data = timer->data;
if (timer->flags & TIMER_IRQSAFE) {
- spin_unlock(&base->lock);
+ raw_spin_unlock(&base->lock);
call_timer_fn(timer, fn, data);
- spin_lock(&base->lock);
+ raw_spin_lock(&base->lock);
} else {
- spin_unlock_irq(&base->lock);
+ raw_spin_unlock_irq(&base->lock);
call_timer_fn(timer, fn, data);
- spin_lock_irq(&base->lock);
+ raw_spin_lock_irq(&base->lock);
}
}
}
2017-06-18 17:14:20 +00:00
@@ -1474,7 +1474,7 @@ u64 get_next_timer_interrupt(unsigned lo
2016-10-11 18:58:48 +00:00
if (cpu_is_offline(smp_processor_id()))
return expires;
- spin_lock(&base->lock);
+ raw_spin_lock(&base->lock);
nextevt = __next_timer_interrupt(base);
is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA);
base->next_expiry = nextevt;
2017-06-18 17:14:20 +00:00
@@ -1502,7 +1502,7 @@ u64 get_next_timer_interrupt(unsigned lo
2016-10-11 18:58:48 +00:00
if ((expires - basem) > TICK_NSEC)
base->is_idle = true;
}
- spin_unlock(&base->lock);
+ raw_spin_unlock(&base->lock);
return cmp_next_hrtimer_event(basem, expires);
}
2017-06-18 17:14:20 +00:00
@@ -1590,7 +1590,7 @@ static inline void __run_timers(struct t
2016-10-11 18:58:48 +00:00
if (!time_after_eq(jiffies, base->clk))
return;
- spin_lock_irq(&base->lock);
+ raw_spin_lock_irq(&base->lock);
while (time_after_eq(jiffies, base->clk)) {
2017-06-18 17:14:20 +00:00
@@ -1601,7 +1601,7 @@ static inline void __run_timers(struct t
2016-10-11 18:58:48 +00:00
expire_timers(base, heads + levels);
}
base->running_timer = NULL;
- spin_unlock_irq(&base->lock);
+ raw_spin_unlock_irq(&base->lock);
}
/*
2017-06-18 17:14:20 +00:00
@@ -1786,16 +1786,16 @@ int timers_dead_cpu(unsigned int cpu)
2016-10-11 18:58:48 +00:00
* The caller is globally serialized and nobody else
* takes two locks at once, deadlock is not possible.
*/
- spin_lock_irq(&new_base->lock);
- spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
+ raw_spin_lock_irq(&new_base->lock);
+ raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
BUG_ON(old_base->running_timer);
for (i = 0; i < WHEEL_SIZE; i++)
migrate_timer_list(new_base, old_base->vectors + i);
- spin_unlock(&old_base->lock);
- spin_unlock_irq(&new_base->lock);
+ raw_spin_unlock(&old_base->lock);
+ raw_spin_unlock_irq(&new_base->lock);
put_cpu_ptr(&timer_bases);
}
return 0;
2017-06-18 17:14:20 +00:00
@@ -1811,7 +1811,7 @@ static void __init init_timer_cpu(int cp
2016-10-11 18:58:48 +00:00
for (i = 0; i < NR_BASES; i++) {
base = per_cpu_ptr(&timer_bases[i], cpu);
base->cpu = cpu;
- spin_lock_init(&base->lock);
+ raw_spin_lock_init(&base->lock);
base->clk = jiffies;
}
}