178 lines
5.7 KiB
Diff
178 lines
5.7 KiB
Diff
Subject: x86-preempt-lazy.patch
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
Date: Thu, 01 Nov 2012 11:03:47 +0100
|
|
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
|
|
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
---
|
|
arch/x86/Kconfig | 1 +
|
|
arch/x86/include/asm/thread_info.h | 6 ++++++
|
|
arch/x86/kernel/asm-offsets.c | 1 +
|
|
arch/x86/kernel/entry_32.S | 17 +++++++++++++++--
|
|
arch/x86/kernel/entry_64.S | 28 ++++++++++++++++++++--------
|
|
5 files changed, 43 insertions(+), 10 deletions(-)
|
|
|
|
--- a/arch/x86/Kconfig
|
|
+++ b/arch/x86/Kconfig
|
|
@@ -21,6 +21,7 @@ config X86_64
|
|
### Arch settings
|
|
config X86
|
|
def_bool y
|
|
+ select HAVE_PREEMPT_LAZY
|
|
select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
|
|
select ARCH_MIGHT_HAVE_PC_PARPORT
|
|
select ARCH_MIGHT_HAVE_PC_SERIO
|
|
--- a/arch/x86/include/asm/thread_info.h
|
|
+++ b/arch/x86/include/asm/thread_info.h
|
|
@@ -29,6 +29,8 @@ struct thread_info {
|
|
__u32 status; /* thread synchronous flags */
|
|
__u32 cpu; /* current CPU */
|
|
int saved_preempt_count;
|
|
+ int preempt_lazy_count; /* 0 => lazy preemptable
|
|
+ <0 => BUG */
|
|
mm_segment_t addr_limit;
|
|
struct restart_block restart_block;
|
|
void __user *sysenter_return;
|
|
@@ -80,6 +82,7 @@ struct thread_info {
|
|
#define TIF_SYSCALL_EMU 6 /* syscall emulation active */
|
|
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
|
|
#define TIF_SECCOMP 8 /* secure computing */
|
|
+#define TIF_NEED_RESCHED_LAZY 9 /* lazy rescheduling necessary */
|
|
#define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */
|
|
#define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
|
|
#define TIF_UPROBE 12 /* breakpointed or singlestepping */
|
|
@@ -104,6 +107,7 @@ struct thread_info {
|
|
#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
|
|
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
|
|
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
|
|
+#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
|
|
#define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY)
|
|
#define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY)
|
|
#define _TIF_UPROBE (1 << TIF_UPROBE)
|
|
@@ -153,6 +157,8 @@ struct thread_info {
|
|
#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
|
|
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
|
|
|
|
+#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
|
|
+
|
|
#ifdef CONFIG_X86_32
|
|
|
|
#define STACK_WARN (THREAD_SIZE/8)
|
|
--- a/arch/x86/kernel/asm-offsets.c
|
|
+++ b/arch/x86/kernel/asm-offsets.c
|
|
@@ -32,6 +32,7 @@ void common(void) {
|
|
OFFSET(TI_flags, thread_info, flags);
|
|
OFFSET(TI_status, thread_info, status);
|
|
OFFSET(TI_addr_limit, thread_info, addr_limit);
|
|
+ OFFSET(TI_preempt_lazy_count, thread_info, preempt_lazy_count);
|
|
|
|
BLANK();
|
|
OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
|
|
--- a/arch/x86/kernel/entry_32.S
|
|
+++ b/arch/x86/kernel/entry_32.S
|
|
@@ -363,8 +363,21 @@ END(ret_from_exception)
|
|
ENTRY(resume_kernel)
|
|
DISABLE_INTERRUPTS(CLBR_ANY)
|
|
need_resched:
|
|
+ # preempt count == 0 + NEED_RS set?
|
|
cmpl $0,PER_CPU_VAR(__preempt_count)
|
|
+ jz test_int_off
|
|
+
|
|
+ # atleast preempt count == 0 ?
|
|
+ cmpl $_TIF_NEED_RESCHED,PER_CPU_VAR(__preempt_count)
|
|
+ jne restore_all
|
|
+
|
|
+ cmpl $0,TI_preempt_lazy_count(%ebp) # non-zero preempt_lazy_count ?
|
|
jnz restore_all
|
|
+
|
|
+ testl $_TIF_NEED_RESCHED_LAZY, %ecx
|
|
+ jz restore_all
|
|
+
|
|
+test_int_off:
|
|
testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ?
|
|
jz restore_all
|
|
call preempt_schedule_irq
|
|
@@ -604,7 +617,7 @@ ENDPROC(system_call)
|
|
ALIGN
|
|
RING0_PTREGS_FRAME # can't unwind into user space anyway
|
|
work_pending:
|
|
- testb $_TIF_NEED_RESCHED, %cl
|
|
+ testl $_TIF_NEED_RESCHED_MASK, %ecx
|
|
jz work_notifysig
|
|
work_resched:
|
|
call schedule
|
|
@@ -617,7 +630,7 @@ ENDPROC(system_call)
|
|
andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
|
|
# than syscall tracing?
|
|
jz restore_all
|
|
- testb $_TIF_NEED_RESCHED, %cl
|
|
+ testl $_TIF_NEED_RESCHED_MASK, %ecx
|
|
jnz work_resched
|
|
|
|
work_notifysig: # deal with pending signals and
|
|
--- a/arch/x86/kernel/entry_64.S
|
|
+++ b/arch/x86/kernel/entry_64.S
|
|
@@ -658,8 +658,8 @@ GLOBAL(system_call_after_swapgs)
|
|
/* Handle reschedules */
|
|
/* edx: work, edi: workmask */
|
|
sysret_careful:
|
|
- bt $TIF_NEED_RESCHED,%edx
|
|
- jnc sysret_signal
|
|
+ testl $_TIF_NEED_RESCHED_MASK,%edx
|
|
+ jz sysret_signal
|
|
TRACE_IRQS_ON
|
|
ENABLE_INTERRUPTS(CLBR_NONE)
|
|
pushq_cfi %rdi
|
|
@@ -771,8 +771,8 @@ GLOBAL(int_with_check)
|
|
/* First do a reschedule test. */
|
|
/* edx: work, edi: workmask */
|
|
int_careful:
|
|
- bt $TIF_NEED_RESCHED,%edx
|
|
- jnc int_very_careful
|
|
+ testl $_TIF_NEED_RESCHED_MASK,%edx
|
|
+ jz int_very_careful
|
|
TRACE_IRQS_ON
|
|
ENABLE_INTERRUPTS(CLBR_NONE)
|
|
pushq_cfi %rdi
|
|
@@ -1071,8 +1071,8 @@ ENTRY(native_iret)
|
|
/* edi: workmask, edx: work */
|
|
retint_careful:
|
|
CFI_RESTORE_STATE
|
|
- bt $TIF_NEED_RESCHED,%edx
|
|
- jnc retint_signal
|
|
+ testl $_TIF_NEED_RESCHED_MASK,%edx
|
|
+ jz retint_signal
|
|
TRACE_IRQS_ON
|
|
ENABLE_INTERRUPTS(CLBR_NONE)
|
|
pushq_cfi %rdi
|
|
@@ -1104,7 +1104,19 @@ ENTRY(native_iret)
|
|
/* rcx: threadinfo. interrupts off. */
|
|
ENTRY(retint_kernel)
|
|
cmpl $0,PER_CPU_VAR(__preempt_count)
|
|
- jnz retint_restore_args
|
|
+ jz check_int_off
|
|
+
|
|
+ # atleast preempt count == 0 ?
|
|
+ cmpl $_TIF_NEED_RESCHED,PER_CPU_VAR(__preempt_count)
|
|
+ jnz retint_restore_args
|
|
+
|
|
+ cmpl $0, TI_preempt_lazy_count(%rcx)
|
|
+ jnz retint_restore_args
|
|
+
|
|
+ bt $TIF_NEED_RESCHED_LAZY,TI_flags(%rcx)
|
|
+ jnc retint_restore_args
|
|
+
|
|
+check_int_off:
|
|
bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
|
|
jnc retint_restore_args
|
|
call preempt_schedule_irq
|
|
@@ -1540,7 +1552,7 @@ ENTRY(paranoid_exit)
|
|
movq %rsp,%rdi /* &pt_regs */
|
|
call sync_regs
|
|
movq %rax,%rsp /* switch stack for scheduling */
|
|
- testl $_TIF_NEED_RESCHED,%ebx
|
|
+ testl $_TIF_NEED_RESCHED_MASK,%ebx
|
|
jnz paranoid_schedule
|
|
movl %ebx,%edx /* arg3: thread flags */
|
|
TRACE_IRQS_ON
|