linux/debian/patches-rt/0153-ptrace-fix-ptrace-vs-t...

170 lines
5.7 KiB
Diff

From 55c6ef74987f9b95f4551dfad13bc1bae0c74ea8 Mon Sep 17 00:00:00 2001
Message-Id: <55c6ef74987f9b95f4551dfad13bc1bae0c74ea8.1601675152.git.zanussi@kernel.org>
In-Reply-To: <5b5a156f9808b1acf1205606e03da117214549ea.1601675151.git.zanussi@kernel.org>
References: <5b5a156f9808b1acf1205606e03da117214549ea.1601675151.git.zanussi@kernel.org>
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 29 Aug 2013 18:21:04 +0200
Subject: [PATCH 153/333] ptrace: fix ptrace vs tasklist_lock race
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.148-rt64.tar.xz
As explained by Alexander Fyodorov <halcy@yandex.ru>:
|read_lock(&tasklist_lock) in ptrace_stop() is converted to mutex on RT kernel,
|and it can remove __TASK_TRACED from task->state (by moving it to
|task->saved_state). If parent does wait() on child followed by a sys_ptrace
|call, the following race can happen:
|
|- child sets __TASK_TRACED in ptrace_stop()
|- parent does wait() which eventually calls wait_task_stopped() and returns
| child's pid
|- child blocks on read_lock(&tasklist_lock) in ptrace_stop() and moves
| __TASK_TRACED flag to saved_state
|- parent calls sys_ptrace, which calls ptrace_check_attach() and wait_task_inactive()
The patch is based on his initial patch where an additional check is
added in case the __TASK_TRACED moved to ->saved_state. The pi_lock is
taken in case the caller is interrupted between looking into ->state and
->saved_state.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/sched.h | 49 +++++++++++++++++++++++++++++++++++++++----
kernel/ptrace.c | 9 +++++++-
kernel/sched/core.c | 17 +++++++++++++--
3 files changed, 68 insertions(+), 7 deletions(-)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index a0d1a009aa56..d207f79c2ecb 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -101,12 +101,8 @@ struct task_group;
__TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
TASK_PARKED)
-#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
-
#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
-#define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
-
#define task_contributes_to_load(task) ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
(task->flags & PF_FROZEN) == 0 && \
(task->state & TASK_NOLOAD) == 0)
@@ -1717,6 +1713,51 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
+static inline bool __task_is_stopped_or_traced(struct task_struct *task)
+{
+ if (task->state & (__TASK_STOPPED | __TASK_TRACED))
+ return true;
+#ifdef CONFIG_PREEMPT_RT_FULL
+ if (task->saved_state & (__TASK_STOPPED | __TASK_TRACED))
+ return true;
+#endif
+ return false;
+}
+
+static inline bool task_is_stopped_or_traced(struct task_struct *task)
+{
+ bool traced_stopped;
+
+#ifdef CONFIG_PREEMPT_RT_FULL
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
+ traced_stopped = __task_is_stopped_or_traced(task);
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+#else
+ traced_stopped = __task_is_stopped_or_traced(task);
+#endif
+ return traced_stopped;
+}
+
+static inline bool task_is_traced(struct task_struct *task)
+{
+ bool traced = false;
+
+ if (task->state & __TASK_TRACED)
+ return true;
+#ifdef CONFIG_PREEMPT_RT_FULL
+ /* in case the task is sleeping on tasklist_lock */
+ raw_spin_lock_irq(&task->pi_lock);
+ if (task->state & __TASK_TRACED)
+ traced = true;
+ else if (task->saved_state & __TASK_TRACED)
+ traced = true;
+ raw_spin_unlock_irq(&task->pi_lock);
+#endif
+ return traced;
+}
+
/*
* cond_resched() and cond_resched_lock(): latency reduction via
* explicit rescheduling in places that are safe. The return
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index b93eb4eaf7ac..a38b304fb9fd 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -174,7 +174,14 @@ static bool ptrace_freeze_traced(struct task_struct *task)
spin_lock_irq(&task->sighand->siglock);
if (task_is_traced(task) && !__fatal_signal_pending(task)) {
- task->state = __TASK_TRACED;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
+ if (task->state & __TASK_TRACED)
+ task->state = __TASK_TRACED;
+ else
+ task->saved_state = __TASK_TRACED;
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
ret = true;
}
spin_unlock_irq(&task->sighand->siglock);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index c94224ddfb73..3128a5b95fd7 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1349,6 +1349,18 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p,
}
#endif /* CONFIG_NUMA_BALANCING */
+static bool check_task_state(struct task_struct *p, long match_state)
+{
+ bool match = false;
+
+ raw_spin_lock_irq(&p->pi_lock);
+ if (p->state == match_state || p->saved_state == match_state)
+ match = true;
+ raw_spin_unlock_irq(&p->pi_lock);
+
+ return match;
+}
+
/*
* wait_task_inactive - wait for a thread to unschedule.
*
@@ -1393,7 +1405,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
* is actually now running somewhere else!
*/
while (task_running(rq, p)) {
- if (match_state && unlikely(p->state != match_state))
+ if (match_state && !check_task_state(p, match_state))
return 0;
cpu_relax();
}
@@ -1408,7 +1420,8 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
running = task_running(rq, p);
queued = task_on_rq_queued(p);
ncsw = 0;
- if (!match_state || p->state == match_state)
+ if (!match_state || p->state == match_state ||
+ p->saved_state == match_state)
ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
task_rq_unlock(rq, p, &rf);
--
2.17.1