2016-05-17 22:40:10 +00:00
|
|
|
Subject: arm: Add support for lazy preemption
|
|
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
|
|
Date: Wed, 31 Oct 2012 12:04:11 +0100
|
2017-07-16 20:28:09 +00:00
|
|
|
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.9-rt7.tar.xz
|
2016-05-17 22:40:10 +00:00
|
|
|
|
|
|
|
Implement the arm pieces for lazy preempt.
|
|
|
|
|
|
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
|
|
---
|
|
|
|
arch/arm/Kconfig | 1 +
|
2016-10-11 18:58:48 +00:00
|
|
|
arch/arm/include/asm/thread_info.h | 8 ++++++--
|
2016-05-17 22:40:10 +00:00
|
|
|
arch/arm/kernel/asm-offsets.c | 1 +
|
2016-10-11 18:58:48 +00:00
|
|
|
arch/arm/kernel/entry-armv.S | 19 ++++++++++++++++---
|
|
|
|
arch/arm/kernel/entry-common.S | 9 +++++++--
|
2016-05-17 22:40:10 +00:00
|
|
|
arch/arm/kernel/signal.c | 3 ++-
|
2016-10-11 18:58:48 +00:00
|
|
|
6 files changed, 33 insertions(+), 8 deletions(-)
|
2016-05-17 22:40:10 +00:00
|
|
|
|
|
|
|
--- a/arch/arm/Kconfig
|
|
|
|
+++ b/arch/arm/Kconfig
|
2017-06-18 17:14:20 +00:00
|
|
|
@@ -81,6 +81,7 @@ config ARM
|
2016-05-17 22:40:10 +00:00
|
|
|
select HAVE_PERF_EVENTS
|
|
|
|
select HAVE_PERF_REGS
|
|
|
|
select HAVE_PERF_USER_STACK_DUMP
|
|
|
|
+ select HAVE_PREEMPT_LAZY
|
|
|
|
select HAVE_RCU_TABLE_FREE if (SMP && ARM_LPAE)
|
|
|
|
select HAVE_REGS_AND_STACK_ACCESS_API
|
|
|
|
select HAVE_SYSCALL_TRACEPOINTS
|
|
|
|
--- a/arch/arm/include/asm/thread_info.h
|
|
|
|
+++ b/arch/arm/include/asm/thread_info.h
|
|
|
|
@@ -49,6 +49,7 @@ struct cpu_context_save {
|
|
|
|
struct thread_info {
|
|
|
|
unsigned long flags; /* low level flags */
|
|
|
|
int preempt_count; /* 0 => preemptable, <0 => bug */
|
|
|
|
+ int preempt_lazy_count; /* 0 => preemptable, <0 => bug */
|
|
|
|
mm_segment_t addr_limit; /* address limit */
|
|
|
|
struct task_struct *task; /* main task structure */
|
|
|
|
__u32 cpu; /* cpu */
|
2016-10-11 18:58:48 +00:00
|
|
|
@@ -142,7 +143,8 @@ extern int vfp_restore_user_hwstate(stru
|
|
|
|
#define TIF_SYSCALL_TRACE 4 /* syscall trace active */
|
2016-05-17 22:40:10 +00:00
|
|
|
#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
|
|
|
|
#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
|
2016-10-11 18:58:48 +00:00
|
|
|
-#define TIF_SECCOMP 7 /* seccomp syscall filtering active */
|
|
|
|
+#define TIF_SECCOMP 8 /* seccomp syscall filtering active */
|
|
|
|
+#define TIF_NEED_RESCHED_LAZY 7
|
2016-05-17 22:40:10 +00:00
|
|
|
|
|
|
|
#define TIF_NOHZ 12 /* in adaptive nohz mode */
|
|
|
|
#define TIF_USING_IWMMXT 17
|
|
|
|
@@ -152,6 +154,7 @@ extern int vfp_restore_user_hwstate(stru
|
|
|
|
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
|
|
|
|
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
|
|
|
|
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
|
|
|
+#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
|
|
|
|
#define _TIF_UPROBE (1 << TIF_UPROBE)
|
|
|
|
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
|
|
|
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
|
2016-10-11 18:58:48 +00:00
|
|
|
@@ -167,7 +170,8 @@ extern int vfp_restore_user_hwstate(stru
|
|
|
|
* Change these and you break ASM code in entry-common.S
|
|
|
|
*/
|
|
|
|
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
|
|
|
|
- _TIF_NOTIFY_RESUME | _TIF_UPROBE)
|
|
|
|
+ _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
|
|
|
|
+ _TIF_NEED_RESCHED_LAZY)
|
|
|
|
|
|
|
|
#endif /* __KERNEL__ */
|
|
|
|
#endif /* __ASM_ARM_THREAD_INFO_H */
|
2016-05-17 22:40:10 +00:00
|
|
|
--- a/arch/arm/kernel/asm-offsets.c
|
|
|
|
+++ b/arch/arm/kernel/asm-offsets.c
|
|
|
|
@@ -65,6 +65,7 @@ int main(void)
|
|
|
|
BLANK();
|
|
|
|
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
|
|
|
|
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
|
|
|
|
+ DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count));
|
|
|
|
DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
|
|
|
|
DEFINE(TI_TASK, offsetof(struct thread_info, task));
|
|
|
|
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
|
|
|
|
--- a/arch/arm/kernel/entry-armv.S
|
|
|
|
+++ b/arch/arm/kernel/entry-armv.S
|
2016-10-11 18:58:48 +00:00
|
|
|
@@ -220,11 +220,18 @@ ENDPROC(__dabt_svc)
|
|
|
|
|
2016-05-17 22:40:10 +00:00
|
|
|
#ifdef CONFIG_PREEMPT
|
|
|
|
ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
|
|
|
|
- ldr r0, [tsk, #TI_FLAGS] @ get flags
|
|
|
|
teq r8, #0 @ if preempt count != 0
|
|
|
|
+ bne 1f @ return from exeption
|
|
|
|
+ ldr r0, [tsk, #TI_FLAGS] @ get flags
|
|
|
|
+ tst r0, #_TIF_NEED_RESCHED @ if NEED_RESCHED is set
|
|
|
|
+ blne svc_preempt @ preempt!
|
|
|
|
+
|
|
|
|
+ ldr r8, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count
|
|
|
|
+ teq r8, #0 @ if preempt lazy count != 0
|
|
|
|
movne r0, #0 @ force flags to 0
|
|
|
|
- tst r0, #_TIF_NEED_RESCHED
|
|
|
|
+ tst r0, #_TIF_NEED_RESCHED_LAZY
|
|
|
|
blne svc_preempt
|
|
|
|
+1:
|
|
|
|
#endif
|
|
|
|
|
|
|
|
svc_exit r5, irq = 1 @ return from exception
|
2016-10-11 18:58:48 +00:00
|
|
|
@@ -239,8 +246,14 @@ ENDPROC(__irq_svc)
|
2016-05-17 22:40:10 +00:00
|
|
|
1: bl preempt_schedule_irq @ irq en/disable is done inside
|
|
|
|
ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
|
|
|
|
tst r0, #_TIF_NEED_RESCHED
|
|
|
|
+ bne 1b
|
|
|
|
+ tst r0, #_TIF_NEED_RESCHED_LAZY
|
|
|
|
reteq r8 @ go again
|
2016-10-11 18:58:48 +00:00
|
|
|
- b 1b
|
|
|
|
+ ldr r0, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count
|
|
|
|
+ teq r0, #0 @ if preempt lazy count != 0
|
|
|
|
+ beq 1b
|
|
|
|
+ ret r8 @ go again
|
|
|
|
+
|
2016-05-17 22:40:10 +00:00
|
|
|
#endif
|
2016-10-11 18:58:48 +00:00
|
|
|
|
|
|
|
__und_fault:
|
|
|
|
--- a/arch/arm/kernel/entry-common.S
|
|
|
|
+++ b/arch/arm/kernel/entry-common.S
|
2017-06-18 17:14:20 +00:00
|
|
|
@@ -41,7 +41,9 @@
|
2016-10-11 18:58:48 +00:00
|
|
|
UNWIND(.cantunwind )
|
|
|
|
disable_irq_notrace @ disable interrupts
|
|
|
|
ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
|
|
|
|
- tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
|
|
|
|
+ tst r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP)
|
|
|
|
+ bne fast_work_pending
|
|
|
|
+ tst r1, #_TIF_SECCOMP
|
|
|
|
bne fast_work_pending
|
|
|
|
|
|
|
|
/* perform architecture specific actions before user return */
|
2017-06-18 17:14:20 +00:00
|
|
|
@@ -67,8 +69,11 @@ ENDPROC(ret_fast_syscall)
|
2016-10-11 18:58:48 +00:00
|
|
|
str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
|
|
|
|
disable_irq_notrace @ disable interrupts
|
|
|
|
ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
|
|
|
|
- tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
|
|
|
|
+ tst r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP)
|
|
|
|
+ bne do_slower_path
|
|
|
|
+ tst r1, #_TIF_SECCOMP
|
|
|
|
beq no_work_pending
|
|
|
|
+do_slower_path:
|
|
|
|
UNWIND(.fnend )
|
|
|
|
ENDPROC(ret_fast_syscall)
|
|
|
|
|
2016-05-17 22:40:10 +00:00
|
|
|
--- a/arch/arm/kernel/signal.c
|
|
|
|
+++ b/arch/arm/kernel/signal.c
|
|
|
|
@@ -572,7 +572,8 @@ do_work_pending(struct pt_regs *regs, un
|
|
|
|
*/
|
|
|
|
trace_hardirqs_off();
|
|
|
|
do {
|
|
|
|
- if (likely(thread_flags & _TIF_NEED_RESCHED)) {
|
|
|
|
+ if (likely(thread_flags & (_TIF_NEED_RESCHED |
|
|
|
|
+ _TIF_NEED_RESCHED_LAZY))) {
|
|
|
|
schedule();
|
|
|
|
} else {
|
|
|
|
if (unlikely(!user_mode(regs)))
|