[rt] Refresh "timers: Redo the notification of canceling timers on -RT" for context changes in 4.19.138

This commit is contained in:
Salvatore Bonaccorso 2020-08-28 10:23:30 +02:00
parent f5e1f03364
commit dca4fea8c5
2 changed files with 31 additions and 225 deletions

2
debian/changelog vendored
View File

@ -313,6 +313,8 @@ linux (4.19.138-1) UNRELEASED; urgency=medium
4.19.136
* [rt] Refresh "timers: Prepare for full preemption" for context changes in
4.19.138
* [rt] Refresh "timers: Redo the notification of canceling timers on -RT"
for context changes in 4.19.138
-- Salvatore Bonaccorso <carnil@debian.org> Tue, 04 Aug 2020 16:33:40 +0200

View File

@ -42,11 +42,9 @@ Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
kernel/time/timer.c | 96 ++++++++++++++++------------------
10 files changed, 118 insertions(+), 135 deletions(-)
diff --git a/fs/timerfd.c b/fs/timerfd.c
index 82d0f52414a6..f845093466be 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -471,10 +471,11 @@ static int do_timerfd_settime(int ufd, int flags,
@@ -471,10 +471,11 @@
break;
}
spin_unlock_irq(&ctx->wqh.lock);
@ -60,8 +58,6 @@ index 82d0f52414a6..f845093466be 100644
}
/*
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 082147c07831..aee31b1f0cc3 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -22,7 +22,6 @@
@ -72,7 +68,7 @@ index 082147c07831..aee31b1f0cc3 100644
struct hrtimer_clock_base;
struct hrtimer_cpu_base;
@@ -193,6 +192,8 @@ enum hrtimer_base_type {
@@ -193,6 +192,8 @@
* @nr_retries: Total number of hrtimer interrupt retries
* @nr_hangs: Total number of hrtimer interrupt hangs
* @max_hang_time: Maximum time spent in hrtimer_interrupt
@ -81,7 +77,7 @@ index 082147c07831..aee31b1f0cc3 100644
* @expires_next: absolute time of the next event, is required for remote
* hrtimer enqueue; it is the total first expiry time (hard
* and soft hrtimer are taken into account)
@@ -220,12 +221,10 @@ struct hrtimer_cpu_base {
@@ -220,12 +221,10 @@
unsigned short nr_hangs;
unsigned int max_hang_time;
#endif
@ -95,7 +91,7 @@ index 082147c07831..aee31b1f0cc3 100644
struct hrtimer *softirq_next_timer;
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
} ____cacheline_aligned;
@@ -426,6 +425,7 @@ static inline void hrtimer_start(struct hrtimer *timer, ktime_t tim,
@@ -426,6 +425,7 @@
extern int hrtimer_cancel(struct hrtimer *timer);
extern int hrtimer_try_to_cancel(struct hrtimer *timer);
@ -103,7 +99,7 @@ index 082147c07831..aee31b1f0cc3 100644
static inline void hrtimer_start_expires(struct hrtimer *timer,
enum hrtimer_mode mode)
@@ -443,13 +443,6 @@ static inline void hrtimer_restart(struct hrtimer *timer)
@@ -443,13 +443,6 @@
hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
}
@ -117,7 +113,7 @@ index 082147c07831..aee31b1f0cc3 100644
/* Query timers: */
extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust);
@@ -481,7 +474,7 @@ static inline bool hrtimer_is_queued(struct hrtimer *timer)
@@ -481,7 +474,7 @@
* Helper function to check, whether the timer is running the callback
* function
*/
@ -126,11 +122,9 @@ index 082147c07831..aee31b1f0cc3 100644
{
return timer->base->running == timer;
}
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index 0571b498db73..3e6c91bdf2ef 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -15,6 +15,7 @@ struct cpu_timer_list {
@@ -15,6 +15,7 @@
u64 expires, incr;
struct task_struct *task;
int firing;
@ -138,11 +132,9 @@ index 0571b498db73..3e6c91bdf2ef 100644
};
/*
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index a465564367ec..dcf0204264f1 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -438,7 +438,7 @@ int alarm_cancel(struct alarm *alarm)
@@ -438,7 +438,7 @@
int ret = alarm_try_to_cancel(alarm);
if (ret >= 0)
return ret;
@ -151,11 +143,9 @@ index a465564367ec..dcf0204264f1 100644
}
}
EXPORT_SYMBOL_GPL(alarm_cancel);
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index f16cbc98c47a..ed5d8d51ca91 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -963,33 +963,16 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
@@ -963,33 +963,16 @@
}
EXPORT_SYMBOL_GPL(hrtimer_forward);
@ -194,7 +184,7 @@ index f16cbc98c47a..ed5d8d51ca91 100644
/*
* enqueue_hrtimer - internal function to (re)start a timer
*
@@ -1227,7 +1210,7 @@ int hrtimer_cancel(struct hrtimer *timer)
@@ -1227,7 +1210,7 @@
if (ret >= 0)
return ret;
@ -203,7 +193,7 @@ index f16cbc98c47a..ed5d8d51ca91 100644
}
}
EXPORT_SYMBOL_GPL(hrtimer_cancel);
@@ -1531,6 +1514,7 @@ static __latent_entropy void hrtimer_run_softirq(struct softirq_action *h)
@@ -1531,6 +1514,7 @@
unsigned long flags;
ktime_t now;
@ -211,7 +201,7 @@ index f16cbc98c47a..ed5d8d51ca91 100644
raw_spin_lock_irqsave(&cpu_base->lock, flags);
now = hrtimer_update_base(cpu_base);
@@ -1540,7 +1524,7 @@ static __latent_entropy void hrtimer_run_softirq(struct softirq_action *h)
@@ -1540,7 +1524,7 @@
hrtimer_update_softirq_timer(cpu_base, true);
raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
@ -220,7 +210,7 @@ index f16cbc98c47a..ed5d8d51ca91 100644
}
#ifdef CONFIG_HIGH_RES_TIMERS
@@ -1950,9 +1934,7 @@ int hrtimers_prepare_cpu(unsigned int cpu)
@@ -1950,9 +1934,7 @@
cpu_base->softirq_next_timer = NULL;
cpu_base->expires_next = KTIME_MAX;
cpu_base->softirq_expires_next = KTIME_MAX;
@ -231,11 +221,9 @@ index f16cbc98c47a..ed5d8d51ca91 100644
return 0;
}
diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c
index 55b0e58368bf..a5ff222df4c7 100644
--- a/kernel/time/itimer.c
+++ b/kernel/time/itimer.c
@@ -215,7 +215,7 @@ int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue)
@@ -215,7 +215,7 @@
/* We are sharing ->siglock with it_real_fn() */
if (hrtimer_try_to_cancel(timer) < 0) {
spin_unlock_irq(&tsk->sighand->siglock);
@ -244,11 +232,9 @@ index 55b0e58368bf..a5ff222df4c7 100644
goto again;
}
expires = timeval_to_ktime(value->it_value);
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 8d95e8de98b2..765e700962ab 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -792,6 +792,7 @@ check_timers_list(struct list_head *timers,
@@ -792,6 +792,7 @@
return t->expires;
t->firing = 1;
@ -256,7 +242,7 @@ index 8d95e8de98b2..765e700962ab 100644
list_move_tail(&t->entry, firing);
}
@@ -1138,6 +1139,20 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
@@ -1138,6 +1139,20 @@
return 0;
}
@ -277,7 +263,7 @@ index 8d95e8de98b2..765e700962ab 100644
/*
* This is called from the timer interrupt handler. The irq handler has
* already updated our counts. We need to check if any timers fire now.
@@ -1148,6 +1163,7 @@ static void __run_posix_cpu_timers(struct task_struct *tsk)
@@ -1148,6 +1163,7 @@
LIST_HEAD(firing);
struct k_itimer *timer, *next;
unsigned long flags;
@ -285,7 +271,7 @@ index 8d95e8de98b2..765e700962ab 100644
/*
* The fast path checks that there are no expired thread or thread
@@ -1156,6 +1172,9 @@ static void __run_posix_cpu_timers(struct task_struct *tsk)
@@ -1156,6 +1172,9 @@
if (!fastpath_timer_check(tsk))
return;
@ -295,7 +281,7 @@ index 8d95e8de98b2..765e700962ab 100644
if (!lock_task_sighand(tsk, &flags))
return;
/*
@@ -1190,6 +1209,7 @@ static void __run_posix_cpu_timers(struct task_struct *tsk)
@@ -1190,6 +1209,7 @@
list_del_init(&timer->it.cpu.entry);
cpu_firing = timer->it.cpu.firing;
timer->it.cpu.firing = 0;
@ -303,7 +289,7 @@ index 8d95e8de98b2..765e700962ab 100644
/*
* The firing flag is -1 if we collided with a reset
* of the timer, which already reported this
@@ -1199,6 +1219,7 @@ static void __run_posix_cpu_timers(struct task_struct *tsk)
@@ -1199,6 +1219,7 @@
cpu_timer_fire(timer);
spin_unlock(&timer->it_lock);
}
@ -311,7 +297,7 @@ index 8d95e8de98b2..765e700962ab 100644
}
#ifdef CONFIG_PREEMPT_RT_BASE
@@ -1466,6 +1487,8 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
@@ -1466,6 +1487,8 @@
spin_unlock_irq(&timer.it_lock);
while (error == TIMER_RETRY) {
@ -320,11 +306,9 @@ index 8d95e8de98b2..765e700962ab 100644
/*
* We need to handle case when timer was or is in the
* middle of firing. In other cases we already freed
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
index a5ec421e3437..c7e97d421590 100644
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
@@ -821,25 +821,20 @@ static void common_hrtimer_arm(struct k_itimer *timr, ktime_t expires,
@@ -821,25 +821,20 @@
hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
}
@ -360,7 +344,7 @@ index a5ec421e3437..c7e97d421590 100644
}
/* Set a POSIX.1b interval timer. */
@@ -901,21 +896,21 @@ static int do_timer_settime(timer_t timer_id, int flags,
@@ -901,21 +896,21 @@
if (!timr)
return -EINVAL;
@ -386,7 +370,7 @@ index a5ec421e3437..c7e97d421590 100644
return error;
}
@@ -977,13 +972,21 @@ int common_timer_del(struct k_itimer *timer)
@@ -977,13 +972,21 @@
return 0;
}
@ -410,7 +394,7 @@ index a5ec421e3437..c7e97d421590 100644
}
/* Delete a POSIX.1b interval timer. */
@@ -997,15 +1000,8 @@ SYSCALL_DEFINE1(timer_delete, timer_t, timer_id)
@@ -997,15 +1000,8 @@
if (!timer)
return -EINVAL;
@ -427,7 +411,7 @@ index a5ec421e3437..c7e97d421590 100644
spin_lock(&current->sighand->siglock);
list_del(&timer->list);
@@ -1031,20 +1027,9 @@ static void itimer_delete(struct k_itimer *timer)
@@ -1031,20 +1027,9 @@
retry_delete:
spin_lock_irqsave(&timer->it_lock, flags);
@ -450,11 +434,9 @@ index a5ec421e3437..c7e97d421590 100644
list_del(&timer->list);
/*
* This keeps any tasks waiting on the spin lock from thinking
diff --git a/kernel/time/posix-timers.h b/kernel/time/posix-timers.h
index ddb21145211a..725bd230a8db 100644
--- a/kernel/time/posix-timers.h
+++ b/kernel/time/posix-timers.h
@@ -32,6 +32,8 @@ extern const struct k_clock clock_process;
@@ -32,6 +32,8 @@
extern const struct k_clock clock_thread;
extern const struct k_clock alarm_clock;
@ -463,191 +445,13 @@ index ddb21145211a..725bd230a8db 100644
int posix_timer_event(struct k_itimer *timr, int si_private);
void common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting);
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index a4279d24b438..0e33ced0404b 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -44,7 +44,6 @@
#include <linux/sched/debug.h>
@@ -45,7 +45,6 @@
#include <linux/slab.h>
#include <linux/compat.h>
#include <linux/random.h>
-#include <linux/swait.h>
#include <linux/uaccess.h>
#include <asm/unistd.h>
@@ -198,9 +197,7 @@ EXPORT_SYMBOL(jiffies_64);
struct timer_base {
raw_spinlock_t lock;
struct timer_list *running_timer;
-#ifdef CONFIG_PREEMPT_RT_FULL
- struct swait_queue_head wait_for_running_timer;
-#endif
+ spinlock_t expiry_lock;
unsigned long clk;
unsigned long next_expiry;
unsigned int cpu;
@@ -1200,33 +1197,6 @@ void add_timer_on(struct timer_list *timer, int cpu)
}
EXPORT_SYMBOL_GPL(add_timer_on);
-#ifdef CONFIG_PREEMPT_RT_FULL
-/*
- * Wait for a running timer
- */
-static void wait_for_running_timer(struct timer_list *timer)
-{
- struct timer_base *base;
- u32 tf = timer->flags;
-
- if (tf & TIMER_MIGRATING)
- return;
-
- base = get_timer_base(tf);
- swait_event_exclusive(base->wait_for_running_timer,
- base->running_timer != timer);
-}
-
-# define wakeup_timer_waiters(b) swake_up_all(&(b)->wait_for_running_timer)
-#else
-static inline void wait_for_running_timer(struct timer_list *timer)
-{
- cpu_relax();
-}
-
-# define wakeup_timer_waiters(b) do { } while (0)
-#endif
-
/**
* del_timer - deactivate a timer.
* @timer: the timer to be deactivated
@@ -1256,14 +1226,8 @@ int del_timer(struct timer_list *timer)
}
EXPORT_SYMBOL(del_timer);
-/**
- * try_to_del_timer_sync - Try to deactivate a timer
- * @timer: timer to delete
- *
- * This function tries to deactivate a timer. Upon successful (ret >= 0)
- * exit the timer is not queued and the handler is not running on any CPU.
- */
-int try_to_del_timer_sync(struct timer_list *timer)
+static int __try_to_del_timer_sync(struct timer_list *timer,
+ struct timer_base **basep)
{
struct timer_base *base;
unsigned long flags;
@@ -1271,7 +1235,7 @@ int try_to_del_timer_sync(struct timer_list *timer)
debug_assert_init(timer);
- base = lock_timer_base(timer, &flags);
+ *basep = base = lock_timer_base(timer, &flags);
if (base->running_timer != timer)
ret = detach_if_pending(timer, base, true);
@@ -1280,9 +1244,42 @@ int try_to_del_timer_sync(struct timer_list *timer)
return ret;
}
+
+/**
+ * try_to_del_timer_sync - Try to deactivate a timer
+ * @timer: timer to delete
+ *
+ * This function tries to deactivate a timer. Upon successful (ret >= 0)
+ * exit the timer is not queued and the handler is not running on any CPU.
+ */
+int try_to_del_timer_sync(struct timer_list *timer)
+{
+ struct timer_base *base;
+
+ return __try_to_del_timer_sync(timer, &base);
+}
EXPORT_SYMBOL(try_to_del_timer_sync);
#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
+static int __del_timer_sync(struct timer_list *timer)
+{
+ struct timer_base *base;
+ int ret;
+
+ for (;;) {
+ ret = __try_to_del_timer_sync(timer, &base);
+ if (ret >= 0)
+ return ret;
+
+ /*
+ * When accessing the lock, timers of base are no longer expired
+ * and so timer is no longer running.
+ */
+ spin_lock(&base->expiry_lock);
+ spin_unlock(&base->expiry_lock);
+ }
+}
+
/**
* del_timer_sync - deactivate a timer and wait for the handler to finish.
* @timer: the timer to be deactivated
@@ -1338,12 +1335,8 @@ int del_timer_sync(struct timer_list *timer)
* could lead to deadlock.
*/
WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE));
- for (;;) {
- int ret = try_to_del_timer_sync(timer);
- if (ret >= 0)
- return ret;
- wait_for_running_timer(timer);
- }
+
+ return __del_timer_sync(timer);
}
EXPORT_SYMBOL(del_timer_sync);
#endif
@@ -1408,11 +1401,15 @@ static void expire_timers(struct timer_base *base, struct hlist_head *head)
raw_spin_unlock(&base->lock);
call_timer_fn(timer, fn);
base->running_timer = NULL;
+ spin_unlock(&base->expiry_lock);
+ spin_lock(&base->expiry_lock);
raw_spin_lock(&base->lock);
} else {
raw_spin_unlock_irq(&base->lock);
call_timer_fn(timer, fn);
base->running_timer = NULL;
+ spin_unlock(&base->expiry_lock);
+ spin_lock(&base->expiry_lock);
raw_spin_lock_irq(&base->lock);
}
}
@@ -1709,6 +1706,7 @@ static inline void __run_timers(struct timer_base *base)
if (!time_after_eq(jiffies, base->clk))
return;
+ spin_lock(&base->expiry_lock);
raw_spin_lock_irq(&base->lock);
/*
@@ -1736,7 +1734,7 @@ static inline void __run_timers(struct timer_base *base)
expire_timers(base, heads + levels);
}
raw_spin_unlock_irq(&base->lock);
- wakeup_timer_waiters(base);
+ spin_unlock(&base->expiry_lock);
}
/*
@@ -1983,9 +1981,7 @@ static void __init init_timer_cpu(int cpu)
base->cpu = cpu;
raw_spin_lock_init(&base->lock);
base->clk = jiffies;
-#ifdef CONFIG_PREEMPT_RT_FULL
- init_swait_queue_head(&base->wait_for_running_timer);
-#endif
+ spin_lock_init(&base->expiry_lock);
}
}
--
2.17.1