[rt] Update to 3.12.0-rt2 and reenable
svn path=/dists/trunk/linux/; revision=20817
This commit is contained in:
parent
33e439ff0f
commit
94bed1121b
|
@ -1,3 +1,10 @@
|
|||
linux (3.12-1~exp2) UNRELEASED; urgency=low
|
||||
|
||||
[ Ben Hutchings ]
|
||||
* [rt] Update to 3.12.0-rt2 and reenable
|
||||
|
||||
-- Ben Hutchings <ben@decadent.org.uk> Fri, 22 Nov 2013 00:41:09 +0000
|
||||
|
||||
linux (3.12-1~exp1) experimental; urgency=low
|
||||
|
||||
* New upstream release: http://kernelnewbies.org/Linux_3.12
|
||||
|
|
|
@ -29,7 +29,7 @@ featuresets:
|
|||
rt
|
||||
|
||||
[featureset-rt_base]
|
||||
enabled: false
|
||||
enabled: true
|
||||
|
||||
[description]
|
||||
part-long-up: This kernel is not suitable for SMP (multi-processor,
|
||||
|
|
130
debian/patches/features/all/rt/0002-kernel-SRCU-provide-a-static-initializer.patch
vendored
Normal file
130
debian/patches/features/all/rt/0002-kernel-SRCU-provide-a-static-initializer.patch
vendored
Normal file
|
@ -0,0 +1,130 @@
|
|||
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Date: Tue, 19 Mar 2013 14:44:30 +0100
|
||||
Subject: [PATCH] kernel/SRCU: provide a static initializer
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
There are macros for static initializer for the three out of four
|
||||
possible notifier types, that are:
|
||||
ATOMIC_NOTIFIER_HEAD()
|
||||
BLOCKING_NOTIFIER_HEAD()
|
||||
RAW_NOTIFIER_HEAD()
|
||||
|
||||
This patch provides a static initilizer for the forth type to make it
|
||||
complete.
|
||||
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
include/linux/notifier.h | 34 +++++++++++++++++++++++++---------
|
||||
include/linux/srcu.h | 8 ++++----
|
||||
2 files changed, 29 insertions(+), 13 deletions(-)
|
||||
|
||||
--- a/include/linux/notifier.h
|
||||
+++ b/include/linux/notifier.h
|
||||
@@ -6,7 +6,7 @@
|
||||
*
|
||||
* Alan Cox <Alan.Cox@linux.org>
|
||||
*/
|
||||
-
|
||||
+
|
||||
#ifndef _LINUX_NOTIFIER_H
|
||||
#define _LINUX_NOTIFIER_H
|
||||
#include <linux/errno.h>
|
||||
@@ -42,9 +42,7 @@
|
||||
* in srcu_notifier_call_chain(): no cache bounces and no memory barriers.
|
||||
* As compensation, srcu_notifier_chain_unregister() is rather expensive.
|
||||
* SRCU notifier chains should be used when the chain will be called very
|
||||
- * often but notifier_blocks will seldom be removed. Also, SRCU notifier
|
||||
- * chains are slightly more difficult to use because they require special
|
||||
- * runtime initialization.
|
||||
+ * often but notifier_blocks will seldom be removed.
|
||||
*/
|
||||
|
||||
typedef int (*notifier_fn_t)(struct notifier_block *nb,
|
||||
@@ -88,7 +86,7 @@ struct srcu_notifier_head {
|
||||
(name)->head = NULL; \
|
||||
} while (0)
|
||||
|
||||
-/* srcu_notifier_heads must be initialized and cleaned up dynamically */
|
||||
+/* srcu_notifier_heads must be cleaned up dynamically */
|
||||
extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
|
||||
#define srcu_cleanup_notifier_head(name) \
|
||||
cleanup_srcu_struct(&(name)->srcu);
|
||||
@@ -101,7 +99,13 @@ extern void srcu_init_notifier_head(stru
|
||||
.head = NULL }
|
||||
#define RAW_NOTIFIER_INIT(name) { \
|
||||
.head = NULL }
|
||||
-/* srcu_notifier_heads cannot be initialized statically */
|
||||
+
|
||||
+#define SRCU_NOTIFIER_INIT(name, pcpu) \
|
||||
+ { \
|
||||
+ .mutex = __MUTEX_INITIALIZER(name.mutex), \
|
||||
+ .head = NULL, \
|
||||
+ .srcu = __SRCU_STRUCT_INIT(name.srcu, pcpu), \
|
||||
+ }
|
||||
|
||||
#define ATOMIC_NOTIFIER_HEAD(name) \
|
||||
struct atomic_notifier_head name = \
|
||||
@@ -113,6 +117,18 @@ extern void srcu_init_notifier_head(stru
|
||||
struct raw_notifier_head name = \
|
||||
RAW_NOTIFIER_INIT(name)
|
||||
|
||||
+#define _SRCU_NOTIFIER_HEAD(name, mod) \
|
||||
+ static DEFINE_PER_CPU(struct srcu_struct_array, \
|
||||
+ name##_head_srcu_array); \
|
||||
+ mod struct srcu_notifier_head name = \
|
||||
+ SRCU_NOTIFIER_INIT(name, name##_head_srcu_array)
|
||||
+
|
||||
+#define SRCU_NOTIFIER_HEAD(name) \
|
||||
+ _SRCU_NOTIFIER_HEAD(name, )
|
||||
+
|
||||
+#define SRCU_NOTIFIER_HEAD_STATIC(name) \
|
||||
+ _SRCU_NOTIFIER_HEAD(name, static)
|
||||
+
|
||||
#ifdef __KERNEL__
|
||||
|
||||
extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
|
||||
@@ -182,12 +198,12 @@ static inline int notifier_to_errno(int
|
||||
|
||||
/*
|
||||
* Declared notifiers so far. I can imagine quite a few more chains
|
||||
- * over time (eg laptop power reset chains, reboot chain (to clean
|
||||
+ * over time (eg laptop power reset chains, reboot chain (to clean
|
||||
* device units up), device [un]mount chain, module load/unload chain,
|
||||
- * low memory chain, screenblank chain (for plug in modular screenblankers)
|
||||
+ * low memory chain, screenblank chain (for plug in modular screenblankers)
|
||||
* VC switch chains (for loadable kernel svgalib VC switch helpers) etc...
|
||||
*/
|
||||
-
|
||||
+
|
||||
/* CPU notfiers are defined in include/linux/cpu.h. */
|
||||
|
||||
/* netdevice notifiers are defined in include/linux/netdevice.h */
|
||||
--- a/include/linux/srcu.h
|
||||
+++ b/include/linux/srcu.h
|
||||
@@ -84,10 +84,10 @@ int init_srcu_struct(struct srcu_struct
|
||||
|
||||
void process_srcu(struct work_struct *work);
|
||||
|
||||
-#define __SRCU_STRUCT_INIT(name) \
|
||||
+#define __SRCU_STRUCT_INIT(name, pcpu_name) \
|
||||
{ \
|
||||
.completed = -300, \
|
||||
- .per_cpu_ref = &name##_srcu_array, \
|
||||
+ .per_cpu_ref = &pcpu_name, \
|
||||
.queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock), \
|
||||
.running = false, \
|
||||
.batch_queue = RCU_BATCH_INIT(name.batch_queue), \
|
||||
@@ -104,11 +104,11 @@ void process_srcu(struct work_struct *wo
|
||||
*/
|
||||
#define DEFINE_SRCU(name) \
|
||||
static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
|
||||
- struct srcu_struct name = __SRCU_STRUCT_INIT(name);
|
||||
+ struct srcu_struct name = __SRCU_STRUCT_INIT(name, name##_srcu_array);
|
||||
|
||||
#define DEFINE_STATIC_SRCU(name) \
|
||||
static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
|
||||
- static struct srcu_struct name = __SRCU_STRUCT_INIT(name);
|
||||
+ static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name##_srcu_array);
|
||||
|
||||
/**
|
||||
* call_srcu() - Queue a callback for invocation after an SRCU grace period
|
24
debian/patches/features/all/rt/0002-x86-highmem-add-a-already-used-pte-check.patch
vendored
Normal file
24
debian/patches/features/all/rt/0002-x86-highmem-add-a-already-used-pte-check.patch
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
From 65513f34449eedb6b84c24a3583266534c1627e4 Mon Sep 17 00:00:00 2001
|
||||
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Date: Mon, 11 Mar 2013 17:09:55 +0100
|
||||
Subject: [PATCH 2/6] x86/highmem: add a "already used pte" check
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
This is a copy from kmap_atomic_prot().
|
||||
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
arch/x86/mm/iomap_32.c | 2 ++
|
||||
1 file changed, 2 insertions(+)
|
||||
|
||||
--- a/arch/x86/mm/iomap_32.c
|
||||
+++ b/arch/x86/mm/iomap_32.c
|
||||
@@ -65,6 +65,8 @@ void *kmap_atomic_prot_pfn(unsigned long
|
||||
type = kmap_atomic_idx_push();
|
||||
idx = type + KM_TYPE_NR * smp_processor_id();
|
||||
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
|
||||
+ WARN_ON(!pte_none(*(kmap_pte - idx)));
|
||||
+
|
||||
#ifdef CONFIG_PREEMPT_RT_FULL
|
||||
current->kmap_pte[type] = pte;
|
||||
#endif
|
|
@ -0,0 +1,29 @@
|
|||
From e2ca4d092d9c6e6b07b465b4d81da207bbcc7437 Mon Sep 17 00:00:00 2001
|
||||
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Date: Mon, 11 Mar 2013 21:37:27 +0100
|
||||
Subject: [PATCH 3/6] arm/highmem: flush tlb on unmap
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
The tlb should be flushed on unmap and thus make the mapping entry
|
||||
invalid. This is only done in the non-debug case which does not look
|
||||
right.
|
||||
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
arch/arm/mm/highmem.c | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
--- a/arch/arm/mm/highmem.c
|
||||
+++ b/arch/arm/mm/highmem.c
|
||||
@@ -95,10 +95,10 @@ void __kunmap_atomic(void *kvaddr)
|
||||
__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
|
||||
#ifdef CONFIG_DEBUG_HIGHMEM
|
||||
BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
|
||||
- set_top_pte(vaddr, __pte(0));
|
||||
#else
|
||||
(void) idx; /* to kill a warning */
|
||||
#endif
|
||||
+ set_top_pte(vaddr, __pte(0));
|
||||
kmap_atomic_idx_pop();
|
||||
} else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
|
||||
/* this address was obtained through kmap_high_get() */
|
45
debian/patches/features/all/rt/0005-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch
vendored
Normal file
45
debian/patches/features/all/rt/0005-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch
vendored
Normal file
|
@ -0,0 +1,45 @@
|
|||
From eef09918aff670a6162d2ae5fe87b393698ef57d Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Fri, 1 Mar 2013 11:17:42 +0100
|
||||
Subject: [PATCH 5/6] futex: Ensure lock/unlock symetry versus pi_lock and
|
||||
hash bucket lock
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
In exit_pi_state_list() we have the following locking construct:
|
||||
|
||||
spin_lock(&hb->lock);
|
||||
raw_spin_lock_irq(&curr->pi_lock);
|
||||
|
||||
...
|
||||
spin_unlock(&hb->lock);
|
||||
|
||||
In !RT this works, but on RT the migrate_enable() function which is
|
||||
called from spin_unlock() sees atomic context due to the held pi_lock
|
||||
and just decrements the migrate_disable_atomic counter of the
|
||||
task. Now the next call to migrate_disable() sees the counter being
|
||||
negative and issues a warning. That check should be in
|
||||
migrate_enable() already.
|
||||
|
||||
Fix this by dropping pi_lock before unlocking hb->lock and reaquire
|
||||
pi_lock after that again. This is safe as the loop code reevaluates
|
||||
head again under the pi_lock.
|
||||
|
||||
Reported-by: Yong Zhang <yong.zhang@windriver.com>
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
kernel/futex.c | 2 ++
|
||||
1 file changed, 2 insertions(+)
|
||||
|
||||
--- a/kernel/futex.c
|
||||
+++ b/kernel/futex.c
|
||||
@@ -572,7 +572,9 @@ void exit_pi_state_list(struct task_stru
|
||||
* task still owns the PI-state:
|
||||
*/
|
||||
if (head->next != next) {
|
||||
+ raw_spin_unlock_irq(&curr->pi_lock);
|
||||
spin_unlock(&hb->lock);
|
||||
+ raw_spin_lock_irq(&curr->pi_lock);
|
||||
continue;
|
||||
}
|
||||
|
78
debian/patches/features/all/rt/HACK-printk-drop-the-logbuf_lock-more-often.patch
vendored
Normal file
78
debian/patches/features/all/rt/HACK-printk-drop-the-logbuf_lock-more-often.patch
vendored
Normal file
|
@ -0,0 +1,78 @@
|
|||
From b72b514282ffad0d665ea94932b968f388304079 Mon Sep 17 00:00:00 2001
|
||||
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Date: Thu, 21 Mar 2013 19:01:05 +0100
|
||||
Subject: [PATCH] HACK: printk: drop the logbuf_lock more often
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
The lock is hold with irgs off. The latency drops 500us+ on my arm bugs
|
||||
with a "full" buffer after executing "dmesg" on the shell.
|
||||
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
kernel/printk/printk.c | 27 ++++++++++++++++++++++++++-
|
||||
1 file changed, 26 insertions(+), 1 deletion(-)
|
||||
|
||||
--- a/kernel/printk/printk.c
|
||||
+++ b/kernel/printk/printk.c
|
||||
@@ -1029,6 +1029,7 @@ static int syslog_print_all(char __user
|
||||
{
|
||||
char *text;
|
||||
int len = 0;
|
||||
+ int attempts = 0;
|
||||
|
||||
text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
|
||||
if (!text)
|
||||
@@ -1040,7 +1041,14 @@ static int syslog_print_all(char __user
|
||||
u64 seq;
|
||||
u32 idx;
|
||||
enum log_flags prev;
|
||||
-
|
||||
+ int num_msg;
|
||||
+try_again:
|
||||
+ attempts++;
|
||||
+ if (attempts > 10) {
|
||||
+ len = -EBUSY;
|
||||
+ goto out;
|
||||
+ }
|
||||
+ num_msg = 0;
|
||||
if (clear_seq < log_first_seq) {
|
||||
/* messages are gone, move to first available one */
|
||||
clear_seq = log_first_seq;
|
||||
@@ -1061,6 +1069,14 @@ static int syslog_print_all(char __user
|
||||
prev = msg->flags;
|
||||
idx = log_next(idx);
|
||||
seq++;
|
||||
+ num_msg++;
|
||||
+ if (num_msg > 5) {
|
||||
+ num_msg = 0;
|
||||
+ raw_spin_unlock_irq(&logbuf_lock);
|
||||
+ raw_spin_lock_irq(&logbuf_lock);
|
||||
+ if (clear_seq < log_first_seq)
|
||||
+ goto try_again;
|
||||
+ }
|
||||
}
|
||||
|
||||
/* move first record forward until length fits into the buffer */
|
||||
@@ -1074,6 +1090,14 @@ static int syslog_print_all(char __user
|
||||
prev = msg->flags;
|
||||
idx = log_next(idx);
|
||||
seq++;
|
||||
+ num_msg++;
|
||||
+ if (num_msg > 5) {
|
||||
+ num_msg = 0;
|
||||
+ raw_spin_unlock_irq(&logbuf_lock);
|
||||
+ raw_spin_lock_irq(&logbuf_lock);
|
||||
+ if (clear_seq < log_first_seq)
|
||||
+ goto try_again;
|
||||
+ }
|
||||
}
|
||||
|
||||
/* last message fitting into this dump */
|
||||
@@ -1115,6 +1139,7 @@ static int syslog_print_all(char __user
|
||||
clear_seq = log_next_seq;
|
||||
clear_idx = log_next_idx;
|
||||
}
|
||||
+out:
|
||||
raw_spin_unlock_irq(&logbuf_lock);
|
||||
|
||||
kfree(text);
|
43
debian/patches/features/all/rt/Kind-of-revert-powerpc-52xx-provide-a-default-in-mpc.patch
vendored
Normal file
43
debian/patches/features/all/rt/Kind-of-revert-powerpc-52xx-provide-a-default-in-mpc.patch
vendored
Normal file
|
@ -0,0 +1,43 @@
|
|||
From 0939f949f9c72833e5b1a718bd5b4546873b0b38 Mon Sep 17 00:00:00 2001
|
||||
From: Wolfram Sang <wsa@the-dreams.de>
|
||||
Date: Fri, 4 Oct 2013 17:37:09 +0200
|
||||
Subject: [PATCH] Kind of revert "powerpc: 52xx: provide a default in
|
||||
mpc52xx_irqhost_map()"
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
This more or less reverts commit 6391f697d4892a6f233501beea553e13f7745a23.
|
||||
Instead of adding an unneeded 'default', mark the variable to prevent
|
||||
the false positive 'uninitialized var'. The other change (fixing the
|
||||
printout) needs revert, too. We want to know WHICH critical irq failed,
|
||||
not which level it had.
|
||||
|
||||
Signed-off-by: Wolfram Sang <wsa@the-dreams.de>
|
||||
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Cc: Anatolij Gustschin <agust@denx.de>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
arch/powerpc/platforms/52xx/mpc52xx_pic.c | 5 ++---
|
||||
1 file changed, 2 insertions(+), 3 deletions(-)
|
||||
|
||||
--- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c
|
||||
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
|
||||
@@ -340,7 +340,7 @@ static int mpc52xx_irqhost_map(struct ir
|
||||
{
|
||||
int l1irq;
|
||||
int l2irq;
|
||||
- struct irq_chip *irqchip;
|
||||
+ struct irq_chip *uninitialized_var(irqchip);
|
||||
void *hndlr;
|
||||
int type;
|
||||
u32 reg;
|
||||
@@ -373,9 +373,8 @@ static int mpc52xx_irqhost_map(struct ir
|
||||
case MPC52xx_IRQ_L1_PERP: irqchip = &mpc52xx_periph_irqchip; break;
|
||||
case MPC52xx_IRQ_L1_SDMA: irqchip = &mpc52xx_sdma_irqchip; break;
|
||||
case MPC52xx_IRQ_L1_CRIT:
|
||||
- default:
|
||||
pr_warn("%s: Critical IRQ #%d is unsupported! Nopping it.\n",
|
||||
- __func__, l1irq);
|
||||
+ __func__, l2irq);
|
||||
irq_set_chip(virq, &no_irq_chip);
|
||||
return 0;
|
||||
}
|
173
debian/patches/features/all/rt/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch
vendored
Normal file
173
debian/patches/features/all/rt/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch
vendored
Normal file
|
@ -0,0 +1,173 @@
|
|||
From: Steven Rostedt <rostedt@goodmis.org>
|
||||
Date: Wed, 13 Feb 2013 09:26:05 -0500
|
||||
Subject: [PATCH] acpi/rt: Convert acpi_gbl_hardware lock back to a raw_spinlock_t
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
We hit the following bug with 3.6-rt:
|
||||
|
||||
[ 5.898990] BUG: scheduling while atomic: swapper/3/0/0x00000002
|
||||
[ 5.898991] no locks held by swapper/3/0.
|
||||
[ 5.898993] Modules linked in:
|
||||
[ 5.898996] Pid: 0, comm: swapper/3 Not tainted 3.6.11-rt28.19.el6rt.x86_64.debug #1
|
||||
[ 5.898997] Call Trace:
|
||||
[ 5.899011] [<ffffffff810804e7>] __schedule_bug+0x67/0x90
|
||||
[ 5.899028] [<ffffffff81577923>] __schedule+0x793/0x7a0
|
||||
[ 5.899032] [<ffffffff810b4e40>] ? debug_rt_mutex_print_deadlock+0x50/0x200
|
||||
[ 5.899034] [<ffffffff81577b89>] schedule+0x29/0x70
|
||||
[ 5.899036] BUG: scheduling while atomic: swapper/7/0/0x00000002
|
||||
[ 5.899037] no locks held by swapper/7/0.
|
||||
[ 5.899039] [<ffffffff81578525>] rt_spin_lock_slowlock+0xe5/0x2f0
|
||||
[ 5.899040] Modules linked in:
|
||||
[ 5.899041]
|
||||
[ 5.899045] [<ffffffff81579a58>] ? _raw_spin_unlock_irqrestore+0x38/0x90
|
||||
[ 5.899046] Pid: 0, comm: swapper/7 Not tainted 3.6.11-rt28.19.el6rt.x86_64.debug #1
|
||||
[ 5.899047] Call Trace:
|
||||
[ 5.899049] [<ffffffff81578bc6>] rt_spin_lock+0x16/0x40
|
||||
[ 5.899052] [<ffffffff810804e7>] __schedule_bug+0x67/0x90
|
||||
[ 5.899054] [<ffffffff8157d3f0>] ? notifier_call_chain+0x80/0x80
|
||||
[ 5.899056] [<ffffffff81577923>] __schedule+0x793/0x7a0
|
||||
[ 5.899059] [<ffffffff812f2034>] acpi_os_acquire_lock+0x1f/0x23
|
||||
[ 5.899062] [<ffffffff810b4e40>] ? debug_rt_mutex_print_deadlock+0x50/0x200
|
||||
[ 5.899068] [<ffffffff8130be64>] acpi_write_bit_register+0x33/0xb0
|
||||
[ 5.899071] [<ffffffff81577b89>] schedule+0x29/0x70
|
||||
[ 5.899072] [<ffffffff8130be13>] ? acpi_read_bit_register+0x33/0x51
|
||||
[ 5.899074] [<ffffffff81578525>] rt_spin_lock_slowlock+0xe5/0x2f0
|
||||
[ 5.899077] [<ffffffff8131d1fc>] acpi_idle_enter_bm+0x8a/0x28e
|
||||
[ 5.899079] [<ffffffff81579a58>] ? _raw_spin_unlock_irqrestore+0x38/0x90
|
||||
[ 5.899081] [<ffffffff8107e5da>] ? this_cpu_load+0x1a/0x30
|
||||
[ 5.899083] [<ffffffff81578bc6>] rt_spin_lock+0x16/0x40
|
||||
[ 5.899087] [<ffffffff8144c759>] cpuidle_enter+0x19/0x20
|
||||
[ 5.899088] [<ffffffff8157d3f0>] ? notifier_call_chain+0x80/0x80
|
||||
[ 5.899090] [<ffffffff8144c777>] cpuidle_enter_state+0x17/0x50
|
||||
[ 5.899092] [<ffffffff812f2034>] acpi_os_acquire_lock+0x1f/0x23
|
||||
[ 5.899094] [<ffffffff8144d1a1>] cpuidle899101] [<ffffffff8130be13>] ?
|
||||
|
||||
As the acpi code disables interrupts in acpi_idle_enter_bm, and calls
|
||||
code that grabs the acpi lock, it causes issues as the lock is currently
|
||||
in RT a sleeping lock.
|
||||
|
||||
The lock was converted from a raw to a sleeping lock due to some
|
||||
previous issues, and tests that showed it didn't seem to matter.
|
||||
Unfortunately, it did matter for one of our boxes.
|
||||
|
||||
This patch converts the lock back to a raw lock. I've run this code on a
|
||||
few of my own machines, one being my laptop that uses the acpi quite
|
||||
extensively. I've been able to suspend and resume without issues.
|
||||
|
||||
[ tglx: Made the change exclusive for acpi_gbl_hardware_lock ]
|
||||
|
||||
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
|
||||
Cc: John Kacur <jkacur@gmail.com>
|
||||
Cc: Clark Williams <clark@redhat.com>
|
||||
Link: http://lkml.kernel.org/r/1360765565.23152.5.camel@gandalf.local.home
|
||||
Cc: stable-rt@vger.kernel.org
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/acpi/acpica/acglobal.h | 2 +-
|
||||
drivers/acpi/acpica/hwregs.c | 4 ++--
|
||||
drivers/acpi/acpica/hwxface.c | 4 ++--
|
||||
drivers/acpi/acpica/utmutex.c | 4 ++--
|
||||
include/acpi/platform/aclinux.h | 14 ++++++++++++++
|
||||
5 files changed, 21 insertions(+), 7 deletions(-)
|
||||
|
||||
--- a/drivers/acpi/acpica/acglobal.h
|
||||
+++ b/drivers/acpi/acpica/acglobal.h
|
||||
@@ -235,7 +235,7 @@ ACPI_EXTERN u8 acpi_gbl_global_lock_pend
|
||||
* interrupt level
|
||||
*/
|
||||
ACPI_EXTERN acpi_spinlock acpi_gbl_gpe_lock; /* For GPE data structs and registers */
|
||||
-ACPI_EXTERN acpi_spinlock acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */
|
||||
+ACPI_EXTERN acpi_raw_spinlock acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */
|
||||
ACPI_EXTERN acpi_spinlock acpi_gbl_reference_count_lock;
|
||||
|
||||
/* Mutex for _OSI support */
|
||||
--- a/drivers/acpi/acpica/hwregs.c
|
||||
+++ b/drivers/acpi/acpica/hwregs.c
|
||||
@@ -269,14 +269,14 @@ acpi_status acpi_hw_clear_acpi_status(vo
|
||||
ACPI_BITMASK_ALL_FIXED_STATUS,
|
||||
ACPI_FORMAT_UINT64(acpi_gbl_xpm1a_status.address)));
|
||||
|
||||
- lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
|
||||
+ raw_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags);
|
||||
|
||||
/* Clear the fixed events in PM1 A/B */
|
||||
|
||||
status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS,
|
||||
ACPI_BITMASK_ALL_FIXED_STATUS);
|
||||
|
||||
- acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
|
||||
+ raw_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags);
|
||||
|
||||
if (ACPI_FAILURE(status))
|
||||
goto exit;
|
||||
--- a/drivers/acpi/acpica/hwxface.c
|
||||
+++ b/drivers/acpi/acpica/hwxface.c
|
||||
@@ -365,7 +365,7 @@ acpi_status acpi_write_bit_register(u32
|
||||
return_ACPI_STATUS(AE_BAD_PARAMETER);
|
||||
}
|
||||
|
||||
- lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
|
||||
+ raw_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags);
|
||||
|
||||
/*
|
||||
* At this point, we know that the parent register is one of the
|
||||
@@ -426,7 +426,7 @@ acpi_status acpi_write_bit_register(u32
|
||||
|
||||
unlock_and_exit:
|
||||
|
||||
- acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
|
||||
+ raw_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags);
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
--- a/drivers/acpi/acpica/utmutex.c
|
||||
+++ b/drivers/acpi/acpica/utmutex.c
|
||||
@@ -88,7 +88,7 @@ acpi_status acpi_ut_mutex_initialize(voi
|
||||
return_ACPI_STATUS (status);
|
||||
}
|
||||
|
||||
- status = acpi_os_create_lock (&acpi_gbl_hardware_lock);
|
||||
+ status = acpi_os_create_raw_lock (&acpi_gbl_hardware_lock);
|
||||
if (ACPI_FAILURE (status)) {
|
||||
return_ACPI_STATUS (status);
|
||||
}
|
||||
@@ -141,7 +141,7 @@ void acpi_ut_mutex_terminate(void)
|
||||
/* Delete the spinlocks */
|
||||
|
||||
acpi_os_delete_lock(acpi_gbl_gpe_lock);
|
||||
- acpi_os_delete_lock(acpi_gbl_hardware_lock);
|
||||
+ acpi_os_delete_raw_lock(acpi_gbl_hardware_lock);
|
||||
acpi_os_delete_lock(acpi_gbl_reference_count_lock);
|
||||
|
||||
/* Delete the reader/writer lock */
|
||||
--- a/include/acpi/platform/aclinux.h
|
||||
+++ b/include/acpi/platform/aclinux.h
|
||||
@@ -72,6 +72,7 @@
|
||||
|
||||
#define acpi_cache_t struct kmem_cache
|
||||
#define acpi_spinlock spinlock_t *
|
||||
+#define acpi_raw_spinlock raw_spinlock_t *
|
||||
#define acpi_cpu_flags unsigned long
|
||||
|
||||
#else /* !__KERNEL__ */
|
||||
@@ -174,6 +175,19 @@ static inline void *acpi_os_acquire_obje
|
||||
lock ? AE_OK : AE_NO_MEMORY; \
|
||||
})
|
||||
|
||||
+#define acpi_os_create_raw_lock(__handle) \
|
||||
+({ \
|
||||
+ raw_spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \
|
||||
+ \
|
||||
+ if (lock) { \
|
||||
+ *(__handle) = lock; \
|
||||
+ raw_spin_lock_init(*(__handle)); \
|
||||
+ } \
|
||||
+ lock ? AE_OK : AE_NO_MEMORY; \
|
||||
+})
|
||||
+
|
||||
+#define acpi_os_delete_raw_lock(__handle) kfree(__handle)
|
||||
+
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* __ACLINUX_H__ */
|
|
@ -0,0 +1,287 @@
|
|||
Subject: mm: Fixup all fault handlers to check current->pagefault_disable
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 17 Mar 2011 11:32:28 +0100
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Necessary for decoupling pagefault disable from preempt count.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
---
|
||||
arch/alpha/mm/fault.c | 2 +-
|
||||
arch/arm/mm/fault.c | 2 +-
|
||||
arch/avr32/mm/fault.c | 3 ++-
|
||||
arch/cris/mm/fault.c | 2 +-
|
||||
arch/frv/mm/fault.c | 2 +-
|
||||
arch/ia64/mm/fault.c | 2 +-
|
||||
arch/m32r/mm/fault.c | 2 +-
|
||||
arch/m68k/mm/fault.c | 2 +-
|
||||
arch/microblaze/mm/fault.c | 2 +-
|
||||
arch/mips/mm/fault.c | 2 +-
|
||||
arch/mn10300/mm/fault.c | 2 +-
|
||||
arch/parisc/mm/fault.c | 2 +-
|
||||
arch/powerpc/mm/fault.c | 2 +-
|
||||
arch/s390/mm/fault.c | 6 ++++--
|
||||
arch/score/mm/fault.c | 2 +-
|
||||
arch/sh/mm/fault.c | 2 +-
|
||||
arch/sparc/mm/fault_32.c | 2 +-
|
||||
arch/sparc/mm/fault_64.c | 2 +-
|
||||
arch/tile/mm/fault.c | 2 +-
|
||||
arch/um/kernel/trap.c | 2 +-
|
||||
arch/x86/mm/fault.c | 2 +-
|
||||
arch/xtensa/mm/fault.c | 2 +-
|
||||
22 files changed, 26 insertions(+), 23 deletions(-)
|
||||
|
||||
--- a/arch/alpha/mm/fault.c
|
||||
+++ b/arch/alpha/mm/fault.c
|
||||
@@ -107,7 +107,7 @@ do_page_fault(unsigned long address, uns
|
||||
|
||||
/* If we're in an interrupt context, or have no user context,
|
||||
we must not take the fault. */
|
||||
- if (!mm || in_atomic())
|
||||
+ if (!mm || in_atomic() || current->pagefault_disabled)
|
||||
goto no_context;
|
||||
|
||||
#ifdef CONFIG_ALPHA_LARGE_VMALLOC
|
||||
--- a/arch/arm/mm/fault.c
|
||||
+++ b/arch/arm/mm/fault.c
|
||||
@@ -277,7 +277,7 @@ do_page_fault(unsigned long addr, unsign
|
||||
* If we're in an interrupt or have no user
|
||||
* context, we must not take the fault..
|
||||
*/
|
||||
- if (in_atomic() || !mm)
|
||||
+ if (in_atomic() || !mm || current->pagefault_disabled)
|
||||
goto no_context;
|
||||
|
||||
if (user_mode(regs))
|
||||
--- a/arch/avr32/mm/fault.c
|
||||
+++ b/arch/avr32/mm/fault.c
|
||||
@@ -81,7 +81,8 @@ asmlinkage void do_page_fault(unsigned l
|
||||
* If we're in an interrupt or have no user context, we must
|
||||
* not take the fault...
|
||||
*/
|
||||
- if (in_atomic() || !mm || regs->sr & SYSREG_BIT(GM))
|
||||
+ if (in_atomic() || !mm || regs->sr & SYSREG_BIT(GM) ||
|
||||
+ current->pagefault_disabled)
|
||||
goto no_context;
|
||||
|
||||
local_irq_enable();
|
||||
--- a/arch/cris/mm/fault.c
|
||||
+++ b/arch/cris/mm/fault.c
|
||||
@@ -113,7 +113,7 @@ do_page_fault(unsigned long address, str
|
||||
* user context, we must not take the fault.
|
||||
*/
|
||||
|
||||
- if (in_atomic() || !mm)
|
||||
+ if (in_atomic() || !mm || current->pagefault_disabled)
|
||||
goto no_context;
|
||||
|
||||
if (user_mode(regs))
|
||||
--- a/arch/frv/mm/fault.c
|
||||
+++ b/arch/frv/mm/fault.c
|
||||
@@ -78,7 +78,7 @@ asmlinkage void do_page_fault(int datamm
|
||||
* If we're in an interrupt or have no user
|
||||
* context, we must not take the fault..
|
||||
*/
|
||||
- if (in_atomic() || !mm)
|
||||
+ if (in_atomic() || !mm || current->pagefault_disabled)
|
||||
goto no_context;
|
||||
|
||||
if (user_mode(__frame))
|
||||
--- a/arch/ia64/mm/fault.c
|
||||
+++ b/arch/ia64/mm/fault.c
|
||||
@@ -96,7 +96,7 @@ ia64_do_page_fault (unsigned long addres
|
||||
/*
|
||||
* If we're in an interrupt or have no user context, we must not take the fault..
|
||||
*/
|
||||
- if (in_atomic() || !mm)
|
||||
+ if (in_atomic() || !mm || current->pagefault_disabled)
|
||||
goto no_context;
|
||||
|
||||
#ifdef CONFIG_VIRTUAL_MEM_MAP
|
||||
--- a/arch/m32r/mm/fault.c
|
||||
+++ b/arch/m32r/mm/fault.c
|
||||
@@ -114,7 +114,7 @@ asmlinkage void do_page_fault(struct pt_
|
||||
* If we're in an interrupt or have no user context or are running in an
|
||||
* atomic region then we must not take the fault..
|
||||
*/
|
||||
- if (in_atomic() || !mm)
|
||||
+ if (in_atomic() || !mm || current->pagefault_disabled
|
||||
goto bad_area_nosemaphore;
|
||||
|
||||
if (error_code & ACE_USERMODE)
|
||||
--- a/arch/m68k/mm/fault.c
|
||||
+++ b/arch/m68k/mm/fault.c
|
||||
@@ -85,7 +85,7 @@ int do_page_fault(struct pt_regs *regs,
|
||||
* If we're in an interrupt or have no user
|
||||
* context, we must not take the fault..
|
||||
*/
|
||||
- if (in_atomic() || !mm)
|
||||
+ if (in_atomic() || !mm || current->pagefault_disabled)
|
||||
goto no_context;
|
||||
|
||||
if (user_mode(regs))
|
||||
--- a/arch/microblaze/mm/fault.c
|
||||
+++ b/arch/microblaze/mm/fault.c
|
||||
@@ -107,7 +107,7 @@ void do_page_fault(struct pt_regs *regs,
|
||||
if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11)
|
||||
is_write = 0;
|
||||
|
||||
- if (unlikely(in_atomic() || !mm)) {
|
||||
+ if (unlikely(in_atomic() || !mm || current->pagefault_disabled)) {
|
||||
if (kernel_mode(regs))
|
||||
goto bad_area_nosemaphore;
|
||||
|
||||
--- a/arch/mips/mm/fault.c
|
||||
+++ b/arch/mips/mm/fault.c
|
||||
@@ -89,7 +89,7 @@ static void __kprobes __do_page_fault(st
|
||||
* If we're in an interrupt or have no user
|
||||
* context, we must not take the fault..
|
||||
*/
|
||||
- if (in_atomic() || !mm)
|
||||
+ if (in_atomic() || !mm || current->pagefault_disabled)
|
||||
goto bad_area_nosemaphore;
|
||||
|
||||
if (user_mode(regs))
|
||||
--- a/arch/mn10300/mm/fault.c
|
||||
+++ b/arch/mn10300/mm/fault.c
|
||||
@@ -168,7 +168,7 @@ asmlinkage void do_page_fault(struct pt_
|
||||
* If we're in an interrupt or have no user
|
||||
* context, we must not take the fault..
|
||||
*/
|
||||
- if (in_atomic() || !mm)
|
||||
+ if (in_atomic() || !mm || current->pagefault_disabled)
|
||||
goto no_context;
|
||||
|
||||
if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR)
|
||||
--- a/arch/parisc/mm/fault.c
|
||||
+++ b/arch/parisc/mm/fault.c
|
||||
@@ -177,7 +177,7 @@ void do_page_fault(struct pt_regs *regs,
|
||||
int fault;
|
||||
unsigned int flags;
|
||||
|
||||
- if (in_atomic())
|
||||
+ if (in_atomic() || current->pagefault_disabled)
|
||||
goto no_context;
|
||||
|
||||
tsk = current;
|
||||
--- a/arch/powerpc/mm/fault.c
|
||||
+++ b/arch/powerpc/mm/fault.c
|
||||
@@ -261,7 +261,7 @@ int __kprobes do_page_fault(struct pt_re
|
||||
if (!arch_irq_disabled_regs(regs))
|
||||
local_irq_enable();
|
||||
|
||||
- if (in_atomic() || mm == NULL) {
|
||||
+ if (in_atomic() || mm == NULL || current->pagefault_disabled) {
|
||||
if (!user_mode(regs)) {
|
||||
rc = SIGSEGV;
|
||||
goto bail;
|
||||
--- a/arch/s390/mm/fault.c
|
||||
+++ b/arch/s390/mm/fault.c
|
||||
@@ -296,7 +296,8 @@ static inline int do_exception(struct pt
|
||||
* user context.
|
||||
*/
|
||||
fault = VM_FAULT_BADCONTEXT;
|
||||
- if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
|
||||
+ if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm ||
|
||||
+ tsk->pagefault_disabled))
|
||||
goto out;
|
||||
|
||||
address = trans_exc_code & __FAIL_ADDR_MASK;
|
||||
@@ -442,7 +443,8 @@ void __kprobes do_asce_exception(struct
|
||||
clear_tsk_thread_flag(current, TIF_PER_TRAP);
|
||||
|
||||
trans_exc_code = regs->int_parm_long;
|
||||
- if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
|
||||
+ if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm ||
|
||||
+ current->pagefault_disabled()));
|
||||
goto no_context;
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
--- a/arch/score/mm/fault.c
|
||||
+++ b/arch/score/mm/fault.c
|
||||
@@ -73,7 +73,7 @@ asmlinkage void do_page_fault(struct pt_
|
||||
* If we're in an interrupt or have no user
|
||||
* context, we must not take the fault..
|
||||
*/
|
||||
- if (in_atomic() || !mm)
|
||||
+ if (in_atomic() || !mm || current->pagefault_disabled)
|
||||
goto bad_area_nosemaphore;
|
||||
|
||||
if (user_mode(regs))
|
||||
--- a/arch/sh/mm/fault.c
|
||||
+++ b/arch/sh/mm/fault.c
|
||||
@@ -438,7 +438,7 @@ asmlinkage void __kprobes do_page_fault(
|
||||
* If we're in an interrupt, have no user context or are running
|
||||
* in an atomic region then we must not take the fault:
|
||||
*/
|
||||
- if (unlikely(in_atomic() || !mm)) {
|
||||
+ if (unlikely(in_atomic() || !mm || current->pagefault_disabled)) {
|
||||
bad_area_nosemaphore(regs, error_code, address);
|
||||
return;
|
||||
}
|
||||
--- a/arch/sparc/mm/fault_32.c
|
||||
+++ b/arch/sparc/mm/fault_32.c
|
||||
@@ -199,7 +199,7 @@ asmlinkage void do_sparc_fault(struct pt
|
||||
* If we're in an interrupt or have no user
|
||||
* context, we must not take the fault..
|
||||
*/
|
||||
- if (in_atomic() || !mm)
|
||||
+ if (in_atomic() || !mm || current->pagefault_disabled)
|
||||
goto no_context;
|
||||
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
|
||||
--- a/arch/sparc/mm/fault_64.c
|
||||
+++ b/arch/sparc/mm/fault_64.c
|
||||
@@ -322,7 +322,7 @@ asmlinkage void __kprobes do_sparc64_fau
|
||||
* If we're in an interrupt or have no user
|
||||
* context, we must not take the fault..
|
||||
*/
|
||||
- if (in_atomic() || !mm)
|
||||
+ if (in_atomic() || !mm || current->pagefault_enabled)
|
||||
goto intr_or_no_mm;
|
||||
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
|
||||
--- a/arch/tile/mm/fault.c
|
||||
+++ b/arch/tile/mm/fault.c
|
||||
@@ -357,7 +357,7 @@ static int handle_page_fault(struct pt_r
|
||||
* If we're in an interrupt, have no user context or are running in an
|
||||
* atomic region then we must not take the fault.
|
||||
*/
|
||||
- if (in_atomic() || !mm) {
|
||||
+ if (in_atomic() || !mm || current->pagefault_disabled) {
|
||||
vma = NULL; /* happy compiler */
|
||||
goto bad_area_nosemaphore;
|
||||
}
|
||||
--- a/arch/um/kernel/trap.c
|
||||
+++ b/arch/um/kernel/trap.c
|
||||
@@ -38,7 +38,7 @@ int handle_page_fault(unsigned long addr
|
||||
* If the fault was during atomic operation, don't take the fault, just
|
||||
* fail.
|
||||
*/
|
||||
- if (in_atomic())
|
||||
+ if (in_atomic() || current->pagefault_disabled)
|
||||
goto out_nosemaphore;
|
||||
|
||||
if (is_user)
|
||||
--- a/arch/x86/mm/fault.c
|
||||
+++ b/arch/x86/mm/fault.c
|
||||
@@ -1094,7 +1094,7 @@ static void __kprobes
|
||||
* If we're in an interrupt, have no user context or are running
|
||||
* in an atomic region then we must not take the fault:
|
||||
*/
|
||||
- if (unlikely(in_atomic() || !mm)) {
|
||||
+ if (unlikely(in_atomic() || !mm || current->pagefault_disabled)) {
|
||||
bad_area_nosemaphore(regs, error_code, address);
|
||||
return;
|
||||
}
|
||||
--- a/arch/xtensa/mm/fault.c
|
||||
+++ b/arch/xtensa/mm/fault.c
|
||||
@@ -57,7 +57,7 @@ void do_page_fault(struct pt_regs *regs)
|
||||
/* If we're in an interrupt or have no user
|
||||
* context, we must not take the fault..
|
||||
*/
|
||||
- if (in_atomic() || !mm) {
|
||||
+ if (in_atomic() || !mm || current->pagefault_disabled) {
|
||||
bad_page_fault(regs, address, SIGSEGV);
|
||||
return;
|
||||
}
|
58
debian/patches/features/all/rt/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch
vendored
Normal file
58
debian/patches/features/all/rt/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch
vendored
Normal file
|
@ -0,0 +1,58 @@
|
|||
From: Benedikt Spranger <b.spranger@linutronix.de>
|
||||
Date: Sat, 6 Mar 2010 17:47:10 +0100
|
||||
Subject: ARM: AT91: PIT: Remove irq handler when clock event is unused
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Setup and remove the interrupt handler in clock event mode selection.
|
||||
This avoids calling the (shared) interrupt handler when the device is
|
||||
not used.
|
||||
|
||||
Signed-off-by: Benedikt Spranger <b.spranger@linutronix.de>
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
[bigeasy: redo the patch with NR_IRQS_LEGACY which is probabyl required since
|
||||
commit 8fe82a55 ("ARM: at91: sparse irq support") which is included since v3.6.
|
||||
Patch based on what Sami Pietikäinen <Sami.Pietikainen@wapice.com> suggested].
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
arch/arm/mach-at91/at91rm9200_time.c | 1 +
|
||||
arch/arm/mach-at91/at91sam926x_time.c | 5 ++++-
|
||||
2 files changed, 5 insertions(+), 1 deletion(-)
|
||||
|
||||
--- a/arch/arm/mach-at91/at91rm9200_time.c
|
||||
+++ b/arch/arm/mach-at91/at91rm9200_time.c
|
||||
@@ -134,6 +134,7 @@ clkevt32k_mode(enum clock_event_mode mod
|
||||
break;
|
||||
case CLOCK_EVT_MODE_SHUTDOWN:
|
||||
case CLOCK_EVT_MODE_UNUSED:
|
||||
+ remove_irq(NR_IRQS_LEGACY + AT91_ID_SYS, &at91rm9200_timer_irq);
|
||||
case CLOCK_EVT_MODE_RESUME:
|
||||
irqmask = 0;
|
||||
break;
|
||||
--- a/arch/arm/mach-at91/at91sam926x_time.c
|
||||
+++ b/arch/arm/mach-at91/at91sam926x_time.c
|
||||
@@ -77,7 +77,7 @@ static struct clocksource pit_clk = {
|
||||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||
};
|
||||
|
||||
-
|
||||
+static struct irqaction at91sam926x_pit_irq;
|
||||
/*
|
||||
* Clockevent device: interrupts every 1/HZ (== pit_cycles * MCK/16)
|
||||
*/
|
||||
@@ -86,6 +86,8 @@ pit_clkevt_mode(enum clock_event_mode mo
|
||||
{
|
||||
switch (mode) {
|
||||
case CLOCK_EVT_MODE_PERIODIC:
|
||||
+ /* Set up irq handler */
|
||||
+ setup_irq(at91sam926x_pit_irq.irq, &at91sam926x_pit_irq);
|
||||
/* update clocksource counter */
|
||||
pit_cnt += pit_cycle * PIT_PICNT(pit_read(AT91_PIT_PIVR));
|
||||
pit_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN
|
||||
@@ -98,6 +100,7 @@ pit_clkevt_mode(enum clock_event_mode mo
|
||||
case CLOCK_EVT_MODE_UNUSED:
|
||||
/* disable irq, leaving the clocksource active */
|
||||
pit_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN);
|
||||
+ remove_irq(at91sam926x_pit_irq.irq, &at91sam926x_pit_irq);
|
||||
break;
|
||||
case CLOCK_EVT_MODE_RESUME:
|
||||
break;
|
33
debian/patches/features/all/rt/arm-at91-tclib-default-to-tclib-timer-for-rt.patch
vendored
Normal file
33
debian/patches/features/all/rt/arm-at91-tclib-default-to-tclib-timer-for-rt.patch
vendored
Normal file
|
@ -0,0 +1,33 @@
|
|||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Sat, 1 May 2010 18:29:35 +0200
|
||||
Subject: ARM: at91: tclib: Default to tclib timer for RT
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
RT is not too happy about the shared timer interrupt in AT91
|
||||
devices. Default to tclib timer for RT.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
|
||||
---
|
||||
drivers/misc/Kconfig | 3 ++-
|
||||
1 file changed, 2 insertions(+), 1 deletion(-)
|
||||
|
||||
--- a/drivers/misc/Kconfig
|
||||
+++ b/drivers/misc/Kconfig
|
||||
@@ -63,6 +63,7 @@ config ATMEL_PWM
|
||||
config ATMEL_TCLIB
|
||||
bool "Atmel AT32/AT91 Timer/Counter Library"
|
||||
depends on (AVR32 || ARCH_AT91)
|
||||
+ default y if PREEMPT_RT_FULL
|
||||
help
|
||||
Select this if you want a library to allocate the Timer/Counter
|
||||
blocks found on many Atmel processors. This facilitates using
|
||||
@@ -95,7 +96,7 @@ config ATMEL_TCB_CLKSRC_BLOCK
|
||||
config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
|
||||
bool "TC Block use 32 KiHz clock"
|
||||
depends on ATMEL_TCB_CLKSRC
|
||||
- default y
|
||||
+ default y if !PREEMPT_RT_FULL
|
||||
help
|
||||
Select this to use 32 KiHz base clock rate as TC block clock
|
||||
source for clock events.
|
|
@ -0,0 +1,320 @@
|
|||
From: Frank Rowand <frank.rowand@am.sony.com>
|
||||
Date: Mon, 19 Sep 2011 14:51:14 -0700
|
||||
Subject: [PATCH] preempt-rt: Convert arm boot_lock to raw
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
The arm boot_lock is used by the secondary processor startup code. The locking
|
||||
task is the idle thread, which has idle->sched_class == &idle_sched_class.
|
||||
idle_sched_class->enqueue_task == NULL, so if the idle task blocks on the
|
||||
lock, the attempt to wake it when the lock becomes available will fail:
|
||||
|
||||
try_to_wake_up()
|
||||
...
|
||||
activate_task()
|
||||
enqueue_task()
|
||||
p->sched_class->enqueue_task(rq, p, flags)
|
||||
|
||||
Fix by converting boot_lock to a raw spin lock.
|
||||
|
||||
Signed-off-by: Frank Rowand <frank.rowand@am.sony.com>
|
||||
Link: http://lkml.kernel.org/r/4E77B952.3010606@am.sony.com
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
---
|
||||
arch/arm/mach-exynos/platsmp.c | 12 ++++++------
|
||||
arch/arm/mach-msm/platsmp.c | 10 +++++-----
|
||||
arch/arm/mach-omap2/omap-smp.c | 10 +++++-----
|
||||
arch/arm/mach-prima2/platsmp.c | 10 +++++-----
|
||||
arch/arm/mach-spear/platsmp.c | 10 +++++-----
|
||||
arch/arm/mach-ux500/platsmp.c | 10 +++++-----
|
||||
arch/arm/plat-versatile/platsmp.c | 10 +++++-----
|
||||
7 files changed, 36 insertions(+), 36 deletions(-)
|
||||
|
||||
--- a/arch/arm/mach-exynos/platsmp.c
|
||||
+++ b/arch/arm/mach-exynos/platsmp.c
|
||||
@@ -73,7 +73,7 @@ static void __iomem *scu_base_addr(void)
|
||||
return (void __iomem *)(S5P_VA_SCU);
|
||||
}
|
||||
|
||||
-static DEFINE_SPINLOCK(boot_lock);
|
||||
+static DEFINE_RAW_SPINLOCK(boot_lock);
|
||||
|
||||
static void exynos_secondary_init(unsigned int cpu)
|
||||
{
|
||||
@@ -86,8 +86,8 @@ static void exynos_secondary_init(unsign
|
||||
/*
|
||||
* Synchronise with the boot thread.
|
||||
*/
|
||||
- spin_lock(&boot_lock);
|
||||
- spin_unlock(&boot_lock);
|
||||
+ raw_spin_lock(&boot_lock);
|
||||
+ raw_spin_unlock(&boot_lock);
|
||||
}
|
||||
|
||||
static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
||||
@@ -99,7 +99,7 @@ static int exynos_boot_secondary(unsigne
|
||||
* Set synchronisation state between this boot processor
|
||||
* and the secondary one
|
||||
*/
|
||||
- spin_lock(&boot_lock);
|
||||
+ raw_spin_lock(&boot_lock);
|
||||
|
||||
/*
|
||||
* The secondary processor is waiting to be released from
|
||||
@@ -128,7 +128,7 @@ static int exynos_boot_secondary(unsigne
|
||||
|
||||
if (timeout == 0) {
|
||||
printk(KERN_ERR "cpu1 power enable failed");
|
||||
- spin_unlock(&boot_lock);
|
||||
+ raw_spin_unlock(&boot_lock);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
}
|
||||
@@ -167,7 +167,7 @@ static int exynos_boot_secondary(unsigne
|
||||
* now the secondary core is starting up let it run its
|
||||
* calibrations, then wait for it to finish
|
||||
*/
|
||||
- spin_unlock(&boot_lock);
|
||||
+ raw_spin_unlock(&boot_lock);
|
||||
|
||||
return pen_release != -1 ? -ENOSYS : 0;
|
||||
}
|
||||
--- a/arch/arm/mach-msm/platsmp.c
|
||||
+++ b/arch/arm/mach-msm/platsmp.c
|
||||
@@ -30,7 +30,7 @@
|
||||
|
||||
extern void msm_secondary_startup(void);
|
||||
|
||||
-static DEFINE_SPINLOCK(boot_lock);
|
||||
+static DEFINE_RAW_SPINLOCK(boot_lock);
|
||||
|
||||
static inline int get_core_count(void)
|
||||
{
|
||||
@@ -50,8 +50,8 @@ static void msm_secondary_init(unsigned
|
||||
/*
|
||||
* Synchronise with the boot thread.
|
||||
*/
|
||||
- spin_lock(&boot_lock);
|
||||
- spin_unlock(&boot_lock);
|
||||
+ raw_spin_lock(&boot_lock);
|
||||
+ raw_spin_unlock(&boot_lock);
|
||||
}
|
||||
|
||||
static void prepare_cold_cpu(unsigned int cpu)
|
||||
@@ -88,7 +88,7 @@ static int msm_boot_secondary(unsigned i
|
||||
* set synchronisation state between this boot processor
|
||||
* and the secondary one
|
||||
*/
|
||||
- spin_lock(&boot_lock);
|
||||
+ raw_spin_lock(&boot_lock);
|
||||
|
||||
/*
|
||||
* The secondary processor is waiting to be released from
|
||||
@@ -122,7 +122,7 @@ static int msm_boot_secondary(unsigned i
|
||||
* now the secondary core is starting up let it run its
|
||||
* calibrations, then wait for it to finish
|
||||
*/
|
||||
- spin_unlock(&boot_lock);
|
||||
+ raw_spin_unlock(&boot_lock);
|
||||
|
||||
return pen_release != -1 ? -ENOSYS : 0;
|
||||
}
|
||||
--- a/arch/arm/mach-omap2/omap-smp.c
|
||||
+++ b/arch/arm/mach-omap2/omap-smp.c
|
||||
@@ -44,7 +44,7 @@ u16 pm44xx_errata;
|
||||
/* SCU base address */
|
||||
static void __iomem *scu_base;
|
||||
|
||||
-static DEFINE_SPINLOCK(boot_lock);
|
||||
+static DEFINE_RAW_SPINLOCK(boot_lock);
|
||||
|
||||
void __iomem *omap4_get_scu_base(void)
|
||||
{
|
||||
@@ -68,8 +68,8 @@ static void omap4_secondary_init(unsigne
|
||||
/*
|
||||
* Synchronise with the boot thread.
|
||||
*/
|
||||
- spin_lock(&boot_lock);
|
||||
- spin_unlock(&boot_lock);
|
||||
+ raw_spin_lock(&boot_lock);
|
||||
+ raw_spin_unlock(&boot_lock);
|
||||
}
|
||||
|
||||
static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
||||
@@ -83,7 +83,7 @@ static int omap4_boot_secondary(unsigned
|
||||
* Set synchronisation state between this boot processor
|
||||
* and the secondary one
|
||||
*/
|
||||
- spin_lock(&boot_lock);
|
||||
+ raw_spin_lock(&boot_lock);
|
||||
|
||||
/*
|
||||
* Update the AuxCoreBoot0 with boot state for secondary core.
|
||||
@@ -160,7 +160,7 @@ static int omap4_boot_secondary(unsigned
|
||||
* Now the secondary core is starting up let it run its
|
||||
* calibrations, then wait for it to finish
|
||||
*/
|
||||
- spin_unlock(&boot_lock);
|
||||
+ raw_spin_unlock(&boot_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
--- a/arch/arm/mach-prima2/platsmp.c
|
||||
+++ b/arch/arm/mach-prima2/platsmp.c
|
||||
@@ -23,7 +23,7 @@
|
||||
static void __iomem *scu_base;
|
||||
static void __iomem *rsc_base;
|
||||
|
||||
-static DEFINE_SPINLOCK(boot_lock);
|
||||
+static DEFINE_RAW_SPINLOCK(boot_lock);
|
||||
|
||||
static struct map_desc scu_io_desc __initdata = {
|
||||
.length = SZ_4K,
|
||||
@@ -56,8 +56,8 @@ static void sirfsoc_secondary_init(unsig
|
||||
/*
|
||||
* Synchronise with the boot thread.
|
||||
*/
|
||||
- spin_lock(&boot_lock);
|
||||
- spin_unlock(&boot_lock);
|
||||
+ raw_spin_lock(&boot_lock);
|
||||
+ raw_spin_unlock(&boot_lock);
|
||||
}
|
||||
|
||||
static struct of_device_id rsc_ids[] = {
|
||||
@@ -95,7 +95,7 @@ static int sirfsoc_boot_secondary(unsign
|
||||
/* make sure write buffer is drained */
|
||||
mb();
|
||||
|
||||
- spin_lock(&boot_lock);
|
||||
+ raw_spin_lock(&boot_lock);
|
||||
|
||||
/*
|
||||
* The secondary processor is waiting to be released from
|
||||
@@ -128,7 +128,7 @@ static int sirfsoc_boot_secondary(unsign
|
||||
* now the secondary core is starting up let it run its
|
||||
* calibrations, then wait for it to finish
|
||||
*/
|
||||
- spin_unlock(&boot_lock);
|
||||
+ raw_spin_unlock(&boot_lock);
|
||||
|
||||
return pen_release != -1 ? -ENOSYS : 0;
|
||||
}
|
||||
--- a/arch/arm/mach-spear/platsmp.c
|
||||
+++ b/arch/arm/mach-spear/platsmp.c
|
||||
@@ -20,7 +20,7 @@
|
||||
#include <mach/spear.h>
|
||||
#include "generic.h"
|
||||
|
||||
-static DEFINE_SPINLOCK(boot_lock);
|
||||
+static DEFINE_RAW_SPINLOCK(boot_lock);
|
||||
|
||||
static void __iomem *scu_base = IOMEM(VA_SCU_BASE);
|
||||
|
||||
@@ -36,8 +36,8 @@ static void spear13xx_secondary_init(uns
|
||||
/*
|
||||
* Synchronise with the boot thread.
|
||||
*/
|
||||
- spin_lock(&boot_lock);
|
||||
- spin_unlock(&boot_lock);
|
||||
+ raw_spin_lock(&boot_lock);
|
||||
+ raw_spin_unlock(&boot_lock);
|
||||
}
|
||||
|
||||
static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
||||
@@ -48,7 +48,7 @@ static int spear13xx_boot_secondary(unsi
|
||||
* set synchronisation state between this boot processor
|
||||
* and the secondary one
|
||||
*/
|
||||
- spin_lock(&boot_lock);
|
||||
+ raw_spin_lock(&boot_lock);
|
||||
|
||||
/*
|
||||
* The secondary processor is waiting to be released from
|
||||
@@ -75,7 +75,7 @@ static int spear13xx_boot_secondary(unsi
|
||||
* now the secondary core is starting up let it run its
|
||||
* calibrations, then wait for it to finish
|
||||
*/
|
||||
- spin_unlock(&boot_lock);
|
||||
+ raw_spin_unlock(&boot_lock);
|
||||
|
||||
return pen_release != -1 ? -ENOSYS : 0;
|
||||
}
|
||||
--- a/arch/arm/mach-ux500/platsmp.c
|
||||
+++ b/arch/arm/mach-ux500/platsmp.c
|
||||
@@ -52,7 +52,7 @@ static void __iomem *scu_base_addr(void)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
-static DEFINE_SPINLOCK(boot_lock);
|
||||
+static DEFINE_RAW_SPINLOCK(boot_lock);
|
||||
|
||||
static void ux500_secondary_init(unsigned int cpu)
|
||||
{
|
||||
@@ -65,8 +65,8 @@ static void ux500_secondary_init(unsigne
|
||||
/*
|
||||
* Synchronise with the boot thread.
|
||||
*/
|
||||
- spin_lock(&boot_lock);
|
||||
- spin_unlock(&boot_lock);
|
||||
+ raw_spin_lock(&boot_lock);
|
||||
+ raw_spin_unlock(&boot_lock);
|
||||
}
|
||||
|
||||
static int ux500_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
||||
@@ -77,7 +77,7 @@ static int ux500_boot_secondary(unsigned
|
||||
* set synchronisation state between this boot processor
|
||||
* and the secondary one
|
||||
*/
|
||||
- spin_lock(&boot_lock);
|
||||
+ raw_spin_lock(&boot_lock);
|
||||
|
||||
/*
|
||||
* The secondary processor is waiting to be released from
|
||||
@@ -98,7 +98,7 @@ static int ux500_boot_secondary(unsigned
|
||||
* now the secondary core is starting up let it run its
|
||||
* calibrations, then wait for it to finish
|
||||
*/
|
||||
- spin_unlock(&boot_lock);
|
||||
+ raw_spin_unlock(&boot_lock);
|
||||
|
||||
return pen_release != -1 ? -ENOSYS : 0;
|
||||
}
|
||||
--- a/arch/arm/plat-versatile/platsmp.c
|
||||
+++ b/arch/arm/plat-versatile/platsmp.c
|
||||
@@ -31,7 +31,7 @@ static void write_pen_release(int val)
|
||||
outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));
|
||||
}
|
||||
|
||||
-static DEFINE_SPINLOCK(boot_lock);
|
||||
+static DEFINE_RAW_SPINLOCK(boot_lock);
|
||||
|
||||
void versatile_secondary_init(unsigned int cpu)
|
||||
{
|
||||
@@ -44,8 +44,8 @@ void versatile_secondary_init(unsigned i
|
||||
/*
|
||||
* Synchronise with the boot thread.
|
||||
*/
|
||||
- spin_lock(&boot_lock);
|
||||
- spin_unlock(&boot_lock);
|
||||
+ raw_spin_lock(&boot_lock);
|
||||
+ raw_spin_unlock(&boot_lock);
|
||||
}
|
||||
|
||||
int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
||||
@@ -56,7 +56,7 @@ int versatile_boot_secondary(unsigned in
|
||||
* Set synchronisation state between this boot processor
|
||||
* and the secondary one
|
||||
*/
|
||||
- spin_lock(&boot_lock);
|
||||
+ raw_spin_lock(&boot_lock);
|
||||
|
||||
/*
|
||||
* This is really belt and braces; we hold unintended secondary
|
||||
@@ -86,7 +86,7 @@ int versatile_boot_secondary(unsigned in
|
||||
* now the secondary core is starting up let it run its
|
||||
* calibrations, then wait for it to finish
|
||||
*/
|
||||
- spin_unlock(&boot_lock);
|
||||
+ raw_spin_unlock(&boot_lock);
|
||||
|
||||
return pen_release != -1 ? -ENOSYS : 0;
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
Subject: arm-disable-highmem-on-rt.patch
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Mon, 18 Jul 2011 17:09:28 +0200
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
---
|
||||
arch/arm/Kconfig | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
--- a/arch/arm/Kconfig
|
||||
+++ b/arch/arm/Kconfig
|
||||
@@ -1759,7 +1759,7 @@ config HAVE_ARCH_PFN_VALID
|
||||
|
||||
config HIGHMEM
|
||||
bool "High Memory Support"
|
||||
- depends on MMU
|
||||
+ depends on MMU && !PREEMPT_RT_FULL
|
||||
help
|
||||
The address space of ARM processors is only 4 Gigabytes large
|
||||
and it has to accommodate user address space, kernel address
|
|
@ -0,0 +1,140 @@
|
|||
Subject: arm-enable-highmem-for-rt.patch
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Wed, 13 Feb 2013 11:03:11 +0100
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
---
|
||||
arch/arm/Kconfig | 2 -
|
||||
arch/arm/include/asm/switch_to.h | 8 +++++++
|
||||
arch/arm/mm/highmem.c | 41 +++++++++++++++++++++++++++++++++++++--
|
||||
include/linux/highmem.h | 1
|
||||
4 files changed, 49 insertions(+), 3 deletions(-)
|
||||
|
||||
--- a/arch/arm/Kconfig
|
||||
+++ b/arch/arm/Kconfig
|
||||
@@ -1759,7 +1759,7 @@ config HAVE_ARCH_PFN_VALID
|
||||
|
||||
config HIGHMEM
|
||||
bool "High Memory Support"
|
||||
- depends on MMU && !PREEMPT_RT_FULL
|
||||
+ depends on MMU
|
||||
help
|
||||
The address space of ARM processors is only 4 Gigabytes large
|
||||
and it has to accommodate user address space, kernel address
|
||||
--- a/arch/arm/include/asm/switch_to.h
|
||||
+++ b/arch/arm/include/asm/switch_to.h
|
||||
@@ -3,6 +3,13 @@
|
||||
|
||||
#include <linux/thread_info.h>
|
||||
|
||||
+#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
|
||||
+void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p);
|
||||
+#else
|
||||
+static inline void
|
||||
+switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
|
||||
+#endif
|
||||
+
|
||||
/*
|
||||
* For v7 SMP cores running a preemptible kernel we may be pre-empted
|
||||
* during a TLB maintenance operation, so execute an inner-shareable dsb
|
||||
@@ -22,6 +29,7 @@ extern struct task_struct *__switch_to(s
|
||||
|
||||
#define switch_to(prev,next,last) \
|
||||
do { \
|
||||
+ switch_kmaps(prev, next); \
|
||||
last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
|
||||
} while (0)
|
||||
|
||||
--- a/arch/arm/mm/highmem.c
|
||||
+++ b/arch/arm/mm/highmem.c
|
||||
@@ -38,6 +38,7 @@ EXPORT_SYMBOL(kunmap);
|
||||
|
||||
void *kmap_atomic(struct page *page)
|
||||
{
|
||||
+ pte_t pte = mk_pte(page, kmap_prot);
|
||||
unsigned int idx;
|
||||
unsigned long vaddr;
|
||||
void *kmap;
|
||||
@@ -76,7 +77,10 @@ void *kmap_atomic(struct page *page)
|
||||
* in place, so the contained TLB flush ensures the TLB is updated
|
||||
* with the new mapping.
|
||||
*/
|
||||
- set_top_pte(vaddr, mk_pte(page, kmap_prot));
|
||||
+#ifdef CONFIG_PREEMPT_RT_FULL
|
||||
+ current->kmap_pte[type] = pte;
|
||||
+#endif
|
||||
+ set_top_pte(vaddr, pte);
|
||||
|
||||
return (void *)vaddr;
|
||||
}
|
||||
@@ -93,6 +97,9 @@ void __kunmap_atomic(void *kvaddr)
|
||||
|
||||
if (cache_is_vivt())
|
||||
__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
|
||||
+#ifdef CONFIG_PREEMPT_RT_FULL
|
||||
+ current->kmap_pte[type] = __pte(0);
|
||||
+#endif
|
||||
#ifdef CONFIG_DEBUG_HIGHMEM
|
||||
BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
|
||||
#else
|
||||
@@ -110,6 +117,7 @@ EXPORT_SYMBOL(__kunmap_atomic);
|
||||
|
||||
void *kmap_atomic_pfn(unsigned long pfn)
|
||||
{
|
||||
+ pte_t pte = pfn_pte(pfn, kmap_prot);
|
||||
unsigned long vaddr;
|
||||
int idx, type;
|
||||
|
||||
@@ -121,7 +129,10 @@ void *kmap_atomic_pfn(unsigned long pfn)
|
||||
#ifdef CONFIG_DEBUG_HIGHMEM
|
||||
BUG_ON(!pte_none(get_top_pte(vaddr)));
|
||||
#endif
|
||||
- set_top_pte(vaddr, pfn_pte(pfn, kmap_prot));
|
||||
+#ifdef CONFIG_PREEMPT_RT_FULL
|
||||
+ current->kmap_pte[type] = pte;
|
||||
+#endif
|
||||
+ set_top_pte(vaddr, pte);
|
||||
|
||||
return (void *)vaddr;
|
||||
}
|
||||
@@ -135,3 +146,29 @@ struct page *kmap_atomic_to_page(const v
|
||||
|
||||
return pte_page(get_top_pte(vaddr));
|
||||
}
|
||||
+
|
||||
+#if defined CONFIG_PREEMPT_RT_FULL
|
||||
+void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
|
||||
+{
|
||||
+ int i;
|
||||
+
|
||||
+ /*
|
||||
+ * Clear @prev's kmap_atomic mappings
|
||||
+ */
|
||||
+ for (i = 0; i < prev_p->kmap_idx; i++) {
|
||||
+ int idx = i + KM_TYPE_NR * smp_processor_id();
|
||||
+
|
||||
+ set_top_pte(__fix_to_virt(FIX_KMAP_BEGIN + idx), __pte(0));
|
||||
+ }
|
||||
+ /*
|
||||
+ * Restore @next_p's kmap_atomic mappings
|
||||
+ */
|
||||
+ for (i = 0; i < next_p->kmap_idx; i++) {
|
||||
+ int idx = i + KM_TYPE_NR * smp_processor_id();
|
||||
+
|
||||
+ if (!pte_none(next_p->kmap_pte[i]))
|
||||
+ set_top_pte(__fix_to_virt(FIX_KMAP_BEGIN + idx),
|
||||
+ next_p->kmap_pte[i]);
|
||||
+ }
|
||||
+}
|
||||
+#endif
|
||||
--- a/include/linux/highmem.h
|
||||
+++ b/include/linux/highmem.h
|
||||
@@ -7,6 +7,7 @@
|
||||
#include <linux/mm.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/hardirq.h>
|
||||
+#include <linux/sched.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
|
|
@ -0,0 +1,104 @@
|
|||
Subject: arm-preempt-lazy-support.patch
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Wed, 31 Oct 2012 12:04:11 +0100
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
---
|
||||
arch/arm/Kconfig | 1 +
|
||||
arch/arm/include/asm/thread_info.h | 3 +++
|
||||
arch/arm/kernel/asm-offsets.c | 1 +
|
||||
arch/arm/kernel/entry-armv.S | 13 +++++++++++--
|
||||
arch/arm/kernel/signal.c | 3 ++-
|
||||
5 files changed, 18 insertions(+), 3 deletions(-)
|
||||
|
||||
--- a/arch/arm/Kconfig
|
||||
+++ b/arch/arm/Kconfig
|
||||
@@ -51,6 +51,7 @@ config ARM
|
||||
select HAVE_MOD_ARCH_SPECIFIC if ARM_UNWIND
|
||||
select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
|
||||
select HAVE_PERF_EVENTS
|
||||
+ select HAVE_PREEMPT_LAZY
|
||||
select HAVE_REGS_AND_STACK_ACCESS_API
|
||||
select HAVE_SYSCALL_TRACEPOINTS
|
||||
select HAVE_UID16
|
||||
--- a/arch/arm/include/asm/thread_info.h
|
||||
+++ b/arch/arm/include/asm/thread_info.h
|
||||
@@ -60,6 +60,7 @@ struct arm_restart_block {
|
||||
struct thread_info {
|
||||
unsigned long flags; /* low level flags */
|
||||
int preempt_count; /* 0 => preemptable, <0 => bug */
|
||||
+ int preempt_lazy_count; /* 0 => preemptable, <0 => bug */
|
||||
mm_segment_t addr_limit; /* address limit */
|
||||
struct task_struct *task; /* main task structure */
|
||||
struct exec_domain *exec_domain; /* execution domain */
|
||||
@@ -159,6 +160,7 @@ extern int vfp_restore_user_hwstate(stru
|
||||
#define TIF_SIGPENDING 0
|
||||
#define TIF_NEED_RESCHED 1
|
||||
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
|
||||
+#define TIF_NEED_RESCHED_LAZY 3
|
||||
#define TIF_SYSCALL_TRACE 8
|
||||
#define TIF_SYSCALL_AUDIT 9
|
||||
#define TIF_SYSCALL_TRACEPOINT 10
|
||||
@@ -171,6 +173,7 @@ extern int vfp_restore_user_hwstate(stru
|
||||
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
|
||||
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
|
||||
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
||||
+#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
|
||||
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
||||
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
|
||||
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
|
||||
--- a/arch/arm/kernel/asm-offsets.c
|
||||
+++ b/arch/arm/kernel/asm-offsets.c
|
||||
@@ -54,6 +54,7 @@ int main(void)
|
||||
BLANK();
|
||||
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
|
||||
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
|
||||
+ DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count));
|
||||
DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
|
||||
DEFINE(TI_TASK, offsetof(struct thread_info, task));
|
||||
DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain));
|
||||
--- a/arch/arm/kernel/entry-armv.S
|
||||
+++ b/arch/arm/kernel/entry-armv.S
|
||||
@@ -204,11 +204,18 @@ ENDPROC(__dabt_svc)
|
||||
#ifdef CONFIG_PREEMPT
|
||||
get_thread_info tsk
|
||||
ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
|
||||
- ldr r0, [tsk, #TI_FLAGS] @ get flags
|
||||
teq r8, #0 @ if preempt count != 0
|
||||
+ bne 1f @ return from exeption
|
||||
+ ldr r0, [tsk, #TI_FLAGS] @ get flags
|
||||
+ tst r0, #_TIF_NEED_RESCHED @ if NEED_RESCHED is set
|
||||
+ blne svc_preempt @ preempt!
|
||||
+
|
||||
+ ldr r8, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count
|
||||
+ teq r8, #0 @ if preempt lazy count != 0
|
||||
movne r0, #0 @ force flags to 0
|
||||
- tst r0, #_TIF_NEED_RESCHED
|
||||
+ tst r0, #_TIF_NEED_RESCHED_LAZY
|
||||
blne svc_preempt
|
||||
+1:
|
||||
#endif
|
||||
|
||||
svc_exit r5, irq = 1 @ return from exception
|
||||
@@ -223,6 +230,8 @@ ENDPROC(__irq_svc)
|
||||
1: bl preempt_schedule_irq @ irq en/disable is done inside
|
||||
ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
|
||||
tst r0, #_TIF_NEED_RESCHED
|
||||
+ bne 1b
|
||||
+ tst r0, #_TIF_NEED_RESCHED_LAZY
|
||||
moveq pc, r8 @ go again
|
||||
b 1b
|
||||
#endif
|
||||
--- a/arch/arm/kernel/signal.c
|
||||
+++ b/arch/arm/kernel/signal.c
|
||||
@@ -589,7 +589,8 @@ asmlinkage int
|
||||
do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
|
||||
{
|
||||
do {
|
||||
- if (likely(thread_flags & _TIF_NEED_RESCHED)) {
|
||||
+ if (likely(thread_flags & (_TIF_NEED_RESCHED |
|
||||
+ _TIF_NEED_RESCHED_LAZY))) {
|
||||
schedule();
|
||||
} else {
|
||||
if (unlikely(!user_mode(regs)))
|
|
@ -0,0 +1,65 @@
|
|||
From: Steven Rostedt <srostedt@redhat.com>
|
||||
Date: Fri, 3 Jul 2009 08:44:29 -0500
|
||||
Subject: ata: Do not disable interrupts in ide code for preempt-rt
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Use the local_irq_*_nort variants.
|
||||
|
||||
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
|
||||
Signed-off-by: Ingo Molnar <mingo@elte.hu>
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
|
||||
---
|
||||
drivers/ata/libata-sff.c | 12 ++++++------
|
||||
1 file changed, 6 insertions(+), 6 deletions(-)
|
||||
|
||||
--- a/drivers/ata/libata-sff.c
|
||||
+++ b/drivers/ata/libata-sff.c
|
||||
@@ -678,9 +678,9 @@ unsigned int ata_sff_data_xfer_noirq(str
|
||||
unsigned long flags;
|
||||
unsigned int consumed;
|
||||
|
||||
- local_irq_save(flags);
|
||||
+ local_irq_save_nort(flags);
|
||||
consumed = ata_sff_data_xfer32(dev, buf, buflen, rw);
|
||||
- local_irq_restore(flags);
|
||||
+ local_irq_restore_nort(flags);
|
||||
|
||||
return consumed;
|
||||
}
|
||||
@@ -719,7 +719,7 @@ static void ata_pio_sector(struct ata_qu
|
||||
unsigned long flags;
|
||||
|
||||
/* FIXME: use a bounce buffer */
|
||||
- local_irq_save(flags);
|
||||
+ local_irq_save_nort(flags);
|
||||
buf = kmap_atomic(page);
|
||||
|
||||
/* do the actual data transfer */
|
||||
@@ -727,7 +727,7 @@ static void ata_pio_sector(struct ata_qu
|
||||
do_write);
|
||||
|
||||
kunmap_atomic(buf);
|
||||
- local_irq_restore(flags);
|
||||
+ local_irq_restore_nort(flags);
|
||||
} else {
|
||||
buf = page_address(page);
|
||||
ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
|
||||
@@ -864,7 +864,7 @@ static int __atapi_pio_bytes(struct ata_
|
||||
unsigned long flags;
|
||||
|
||||
/* FIXME: use bounce buffer */
|
||||
- local_irq_save(flags);
|
||||
+ local_irq_save_nort(flags);
|
||||
buf = kmap_atomic(page);
|
||||
|
||||
/* do the actual data transfer */
|
||||
@@ -872,7 +872,7 @@ static int __atapi_pio_bytes(struct ata_
|
||||
count, rw);
|
||||
|
||||
kunmap_atomic(buf);
|
||||
- local_irq_restore(flags);
|
||||
+ local_irq_restore_nort(flags);
|
||||
} else {
|
||||
buf = page_address(page);
|
||||
consumed = ap->ops->sff_data_xfer(dev, buf + offset,
|
98
debian/patches/features/all/rt/block-shorten-interrupt-disabled-regions.patch
vendored
Normal file
98
debian/patches/features/all/rt/block-shorten-interrupt-disabled-regions.patch
vendored
Normal file
|
@ -0,0 +1,98 @@
|
|||
Subject: block: Shorten interrupt disabled regions
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Wed, 22 Jun 2011 19:47:02 +0200
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Moving the blk_sched_flush_plug() call out of the interrupt/preempt
|
||||
disabled region in the scheduler allows us to replace
|
||||
local_irq_save/restore(flags) by local_irq_disable/enable() in
|
||||
blk_flush_plug().
|
||||
|
||||
Now instead of doing this we disable interrupts explicitely when we
|
||||
lock the request_queue and reenable them when we drop the lock. That
|
||||
allows interrupts to be handled when the plug list contains requests
|
||||
for more than one queue.
|
||||
|
||||
Aside of that this change makes the scope of the irq disabled region
|
||||
more obvious. The current code confused the hell out of me when
|
||||
looking at:
|
||||
|
||||
local_irq_save(flags);
|
||||
spin_lock(q->queue_lock);
|
||||
...
|
||||
queue_unplugged(q...);
|
||||
scsi_request_fn();
|
||||
spin_unlock(q->queue_lock);
|
||||
spin_lock(shost->host_lock);
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
|
||||
-------------------^^^ ????
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
spin_unlock(q->lock);
|
||||
local_irq_restore(flags);
|
||||
|
||||
Also add a comment to __blk_run_queue() documenting that
|
||||
q->request_fn() can drop q->queue_lock and reenable interrupts, but
|
||||
must return with q->queue_lock held and interrupts disabled.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Cc: Peter Zijlstra <peterz@infradead.org>
|
||||
Cc: Tejun Heo <tj@kernel.org>
|
||||
Cc: Jens Axboe <axboe@kernel.dk>
|
||||
Cc: Linus Torvalds <torvalds@linux-foundation.org>
|
||||
Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
---
|
||||
block/blk-core.c | 12 ++----------
|
||||
1 file changed, 2 insertions(+), 10 deletions(-)
|
||||
|
||||
--- a/block/blk-core.c
|
||||
+++ b/block/blk-core.c
|
||||
@@ -2914,7 +2914,7 @@ static void queue_unplugged(struct reque
|
||||
blk_run_queue_async(q);
|
||||
else
|
||||
__blk_run_queue(q);
|
||||
- spin_unlock(q->queue_lock);
|
||||
+ spin_unlock_irq(q->queue_lock);
|
||||
}
|
||||
|
||||
static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
|
||||
@@ -2962,7 +2962,6 @@ EXPORT_SYMBOL(blk_check_plugged);
|
||||
void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
|
||||
{
|
||||
struct request_queue *q;
|
||||
- unsigned long flags;
|
||||
struct request *rq;
|
||||
LIST_HEAD(list);
|
||||
unsigned int depth;
|
||||
@@ -2980,11 +2979,6 @@ void blk_flush_plug_list(struct blk_plug
|
||||
q = NULL;
|
||||
depth = 0;
|
||||
|
||||
- /*
|
||||
- * Save and disable interrupts here, to avoid doing it for every
|
||||
- * queue lock we have to take.
|
||||
- */
|
||||
- local_irq_save(flags);
|
||||
while (!list_empty(&list)) {
|
||||
rq = list_entry_rq(list.next);
|
||||
list_del_init(&rq->queuelist);
|
||||
@@ -2997,7 +2991,7 @@ void blk_flush_plug_list(struct blk_plug
|
||||
queue_unplugged(q, depth, from_schedule);
|
||||
q = rq->q;
|
||||
depth = 0;
|
||||
- spin_lock(q->queue_lock);
|
||||
+ spin_lock_irq(q->queue_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -3024,8 +3018,6 @@ void blk_flush_plug_list(struct blk_plug
|
||||
*/
|
||||
if (q)
|
||||
queue_unplugged(q, depth, from_schedule);
|
||||
-
|
||||
- local_irq_restore(flags);
|
||||
}
|
||||
|
||||
void blk_finish_plug(struct blk_plug *plug)
|
|
@ -0,0 +1,46 @@
|
|||
Subject: block: Use cpu_chill() for retry loops
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 20 Dec 2012 18:28:26 +0100
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Retry loops on RT might loop forever when the modifying side was
|
||||
preempted. Steven also observed a live lock when there was a
|
||||
concurrent priority boosting going on.
|
||||
|
||||
Use cpu_chill() instead of cpu_relax() to let the system
|
||||
make progress.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Cc: stable-rt@vger.kernel.org
|
||||
---
|
||||
block/blk-ioc.c | 5 +++--
|
||||
1 file changed, 3 insertions(+), 2 deletions(-)
|
||||
|
||||
--- a/block/blk-ioc.c
|
||||
+++ b/block/blk-ioc.c
|
||||
@@ -8,6 +8,7 @@
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
|
||||
#include <linux/slab.h>
|
||||
+#include <linux/delay.h>
|
||||
|
||||
#include "blk.h"
|
||||
|
||||
@@ -110,7 +111,7 @@ static void ioc_release_fn(struct work_s
|
||||
spin_unlock(q->queue_lock);
|
||||
} else {
|
||||
spin_unlock_irqrestore(&ioc->lock, flags);
|
||||
- cpu_relax();
|
||||
+ cpu_chill();
|
||||
spin_lock_irqsave_nested(&ioc->lock, flags, 1);
|
||||
}
|
||||
}
|
||||
@@ -188,7 +189,7 @@ void put_io_context_active(struct io_con
|
||||
spin_unlock(icq->q->queue_lock);
|
||||
} else {
|
||||
spin_unlock_irqrestore(&ioc->lock, flags);
|
||||
- cpu_relax();
|
||||
+ cpu_chill();
|
||||
goto retry;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
From: Ingo Molnar <mingo@elte.hu>
|
||||
Date: Fri, 3 Jul 2009 08:29:58 -0500
|
||||
Subject: bug: BUG_ON/WARN_ON variants dependend on RT/!RT
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Signed-off-by: Ingo Molnar <mingo@elte.hu>
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
|
||||
---
|
||||
include/asm-generic/bug.h | 14 ++++++++++++++
|
||||
1 file changed, 14 insertions(+)
|
||||
|
||||
--- a/include/asm-generic/bug.h
|
||||
+++ b/include/asm-generic/bug.h
|
||||
@@ -202,6 +202,20 @@ extern void warn_slowpath_null(const cha
|
||||
# define WARN_ON_SMP(x) ({0;})
|
||||
#endif
|
||||
|
||||
+#ifdef CONFIG_PREEMPT_RT_BASE
|
||||
+# define BUG_ON_RT(c) BUG_ON(c)
|
||||
+# define BUG_ON_NONRT(c) do { } while (0)
|
||||
+# define WARN_ON_RT(condition) WARN_ON(condition)
|
||||
+# define WARN_ON_NONRT(condition) do { } while (0)
|
||||
+# define WARN_ON_ONCE_NONRT(condition) do { } while (0)
|
||||
+#else
|
||||
+# define BUG_ON_RT(c) do { } while (0)
|
||||
+# define BUG_ON_NONRT(c) BUG_ON(c)
|
||||
+# define WARN_ON_RT(condition) do { } while (0)
|
||||
+# define WARN_ON_NONRT(condition) WARN_ON(condition)
|
||||
+# define WARN_ON_ONCE_NONRT(condition) WARN_ON_ONCE(condition)
|
||||
+#endif
|
||||
+
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif
|
153
debian/patches/features/all/rt/clocksource-tclib-allow-higher-clockrates.patch
vendored
Normal file
153
debian/patches/features/all/rt/clocksource-tclib-allow-higher-clockrates.patch
vendored
Normal file
|
@ -0,0 +1,153 @@
|
|||
From: Benedikt Spranger <b.spranger@linutronix.de>
|
||||
Date: Mon, 8 Mar 2010 18:57:04 +0100
|
||||
Subject: clocksource: TCLIB: Allow higher clock rates for clock events
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
As default the TCLIB uses the 32KiHz base clock rate for clock events.
|
||||
Add a compile time selection to allow higher clock resulution.
|
||||
|
||||
Signed-off-by: Benedikt Spranger <b.spranger@linutronix.de>
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
|
||||
---
|
||||
drivers/clocksource/tcb_clksrc.c | 35 +++++++++++++++++++++--------------
|
||||
drivers/misc/Kconfig | 12 ++++++++++--
|
||||
2 files changed, 31 insertions(+), 16 deletions(-)
|
||||
|
||||
--- a/drivers/clocksource/tcb_clksrc.c
|
||||
+++ b/drivers/clocksource/tcb_clksrc.c
|
||||
@@ -23,8 +23,7 @@
|
||||
* this 32 bit free-running counter. the second channel is not used.
|
||||
*
|
||||
* - The third channel may be used to provide a 16-bit clockevent
|
||||
- * source, used in either periodic or oneshot mode. This runs
|
||||
- * at 32 KiHZ, and can handle delays of up to two seconds.
|
||||
+ * source, used in either periodic or oneshot mode.
|
||||
*
|
||||
* A boot clocksource and clockevent source are also currently needed,
|
||||
* unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so
|
||||
@@ -74,6 +73,7 @@ static struct clocksource clksrc = {
|
||||
struct tc_clkevt_device {
|
||||
struct clock_event_device clkevt;
|
||||
struct clk *clk;
|
||||
+ u32 freq;
|
||||
void __iomem *regs;
|
||||
};
|
||||
|
||||
@@ -82,13 +82,6 @@ static struct tc_clkevt_device *to_tc_cl
|
||||
return container_of(clkevt, struct tc_clkevt_device, clkevt);
|
||||
}
|
||||
|
||||
-/* For now, we always use the 32K clock ... this optimizes for NO_HZ,
|
||||
- * because using one of the divided clocks would usually mean the
|
||||
- * tick rate can never be less than several dozen Hz (vs 0.5 Hz).
|
||||
- *
|
||||
- * A divided clock could be good for high resolution timers, since
|
||||
- * 30.5 usec resolution can seem "low".
|
||||
- */
|
||||
static u32 timer_clock;
|
||||
|
||||
static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
|
||||
@@ -111,11 +104,12 @@ static void tc_mode(enum clock_event_mod
|
||||
case CLOCK_EVT_MODE_PERIODIC:
|
||||
clk_enable(tcd->clk);
|
||||
|
||||
- /* slow clock, count up to RC, then irq and restart */
|
||||
+ /* count up to RC, then irq and restart */
|
||||
__raw_writel(timer_clock
|
||||
| ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
|
||||
regs + ATMEL_TC_REG(2, CMR));
|
||||
- __raw_writel((32768 + HZ/2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
|
||||
+ __raw_writel((tcd->freq + HZ/2)/HZ,
|
||||
+ tcaddr + ATMEL_TC_REG(2, RC));
|
||||
|
||||
/* Enable clock and interrupts on RC compare */
|
||||
__raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
|
||||
@@ -128,7 +122,7 @@ static void tc_mode(enum clock_event_mod
|
||||
case CLOCK_EVT_MODE_ONESHOT:
|
||||
clk_enable(tcd->clk);
|
||||
|
||||
- /* slow clock, count up to RC, then irq and stop */
|
||||
+ /* count up to RC, then irq and stop */
|
||||
__raw_writel(timer_clock | ATMEL_TC_CPCSTOP
|
||||
| ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
|
||||
regs + ATMEL_TC_REG(2, CMR));
|
||||
@@ -157,8 +151,12 @@ static struct tc_clkevt_device clkevt =
|
||||
.name = "tc_clkevt",
|
||||
.features = CLOCK_EVT_FEAT_PERIODIC
|
||||
| CLOCK_EVT_FEAT_ONESHOT,
|
||||
+#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
|
||||
/* Should be lower than at91rm9200's system timer */
|
||||
.rating = 125,
|
||||
+#else
|
||||
+ .rating = 200,
|
||||
+#endif
|
||||
.set_next_event = tc_next_event,
|
||||
.set_mode = tc_mode,
|
||||
},
|
||||
@@ -186,6 +184,8 @@ static struct irqaction tc_irqaction = {
|
||||
|
||||
static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
|
||||
{
|
||||
+ unsigned divisor = atmel_tc_divisors[clk32k_divisor_idx];
|
||||
+ u32 freq;
|
||||
struct clk *t2_clk = tc->clk[2];
|
||||
int irq = tc->irq[2];
|
||||
|
||||
@@ -194,10 +194,14 @@ static void __init setup_clkevents(struc
|
||||
tc_irqaction.dev_id = &clkevt;
|
||||
|
||||
timer_clock = clk32k_divisor_idx;
|
||||
+ if (!divisor)
|
||||
+ freq = 32768;
|
||||
+ else
|
||||
+ freq = clk_get_rate(t2_clk) / divisor;
|
||||
|
||||
clkevt.clkevt.cpumask = cpumask_of(0);
|
||||
|
||||
- clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff);
|
||||
+ clockevents_config_and_register(&clkevt.clkevt, freq, 1, 0xffff);
|
||||
|
||||
setup_irq(irq, &tc_irqaction);
|
||||
}
|
||||
@@ -322,8 +326,11 @@ static int __init tcb_clksrc_init(void)
|
||||
clocksource_register_hz(&clksrc, divided_rate);
|
||||
|
||||
/* channel 2: periodic and oneshot timer support */
|
||||
+#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
|
||||
setup_clkevents(tc, clk32k_divisor_idx);
|
||||
-
|
||||
+#else
|
||||
+ setup_clkevents(tc, best_divisor_idx);
|
||||
+#endif
|
||||
return 0;
|
||||
}
|
||||
arch_initcall(tcb_clksrc_init);
|
||||
--- a/drivers/misc/Kconfig
|
||||
+++ b/drivers/misc/Kconfig
|
||||
@@ -78,8 +78,7 @@ config ATMEL_TCB_CLKSRC
|
||||
are combined to make a single 32-bit timer.
|
||||
|
||||
When GENERIC_CLOCKEVENTS is defined, the third timer channel
|
||||
- may be used as a clock event device supporting oneshot mode
|
||||
- (delays of up to two seconds) based on the 32 KiHz clock.
|
||||
+ may be used as a clock event device supporting oneshot mode.
|
||||
|
||||
config ATMEL_TCB_CLKSRC_BLOCK
|
||||
int
|
||||
@@ -93,6 +92,15 @@ config ATMEL_TCB_CLKSRC_BLOCK
|
||||
TC can be used for other purposes, such as PWM generation and
|
||||
interval timing.
|
||||
|
||||
+config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
|
||||
+ bool "TC Block use 32 KiHz clock"
|
||||
+ depends on ATMEL_TCB_CLKSRC
|
||||
+ default y
|
||||
+ help
|
||||
+ Select this to use 32 KiHz base clock rate as TC block clock
|
||||
+ source for clock events.
|
||||
+
|
||||
+
|
||||
config DUMMY_IRQ
|
||||
tristate "Dummy IRQ handler"
|
||||
default n
|
|
@ -0,0 +1,180 @@
|
|||
Subject: completion: Use simple wait queues
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Fri, 11 Jan 2013 11:23:51 +0100
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Completions have no long lasting callbacks and therefor do not need
|
||||
the complex waitqueue variant. Use simple waitqueues which reduces the
|
||||
contention on the waitqueue lock.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
---
|
||||
include/linux/completion.h | 8 ++++----
|
||||
include/linux/uprobes.h | 1 +
|
||||
kernel/sched/core.c | 44 +++++++++++++++++++++++++-------------------
|
||||
3 files changed, 30 insertions(+), 23 deletions(-)
|
||||
|
||||
--- a/include/linux/completion.h
|
||||
+++ b/include/linux/completion.h
|
||||
@@ -8,7 +8,7 @@
|
||||
* See kernel/sched/core.c for details.
|
||||
*/
|
||||
|
||||
-#include <linux/wait.h>
|
||||
+#include <linux/wait-simple.h>
|
||||
|
||||
/*
|
||||
* struct completion - structure used to maintain state for a "completion"
|
||||
@@ -24,11 +24,11 @@
|
||||
*/
|
||||
struct completion {
|
||||
unsigned int done;
|
||||
- wait_queue_head_t wait;
|
||||
+ struct swait_head wait;
|
||||
};
|
||||
|
||||
#define COMPLETION_INITIALIZER(work) \
|
||||
- { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
|
||||
+ { 0, SWAIT_HEAD_INITIALIZER((work).wait) }
|
||||
|
||||
#define COMPLETION_INITIALIZER_ONSTACK(work) \
|
||||
({ init_completion(&work); work; })
|
||||
@@ -73,7 +73,7 @@ struct completion {
|
||||
static inline void init_completion(struct completion *x)
|
||||
{
|
||||
x->done = 0;
|
||||
- init_waitqueue_head(&x->wait);
|
||||
+ init_swait_head(&x->wait);
|
||||
}
|
||||
|
||||
extern void wait_for_completion(struct completion *);
|
||||
--- a/include/linux/uprobes.h
|
||||
+++ b/include/linux/uprobes.h
|
||||
@@ -26,6 +26,7 @@
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/rbtree.h>
|
||||
+#include <linux/wait.h>
|
||||
|
||||
struct vm_area_struct;
|
||||
struct mm_struct;
|
||||
--- a/kernel/sched/core.c
|
||||
+++ b/kernel/sched/core.c
|
||||
@@ -2412,7 +2412,10 @@ void migrate_disable(void)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SCHED_DEBUG
|
||||
- WARN_ON_ONCE(p->migrate_disable_atomic);
|
||||
+ if (unlikely(p->migrate_disable_atomic)) {
|
||||
+ tracing_off();
|
||||
+ WARN_ON_ONCE(1);
|
||||
+ }
|
||||
#endif
|
||||
|
||||
preempt_disable();
|
||||
@@ -2444,7 +2447,10 @@ void migrate_enable(void)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SCHED_DEBUG
|
||||
- WARN_ON_ONCE(p->migrate_disable_atomic);
|
||||
+ if (unlikely(p->migrate_disable_atomic)) {
|
||||
+ tracing_off();
|
||||
+ WARN_ON_ONCE(1);
|
||||
+ }
|
||||
#endif
|
||||
WARN_ON_ONCE(p->migrate_disable <= 0);
|
||||
|
||||
@@ -2903,10 +2909,10 @@ void complete(struct completion *x)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
- spin_lock_irqsave(&x->wait.lock, flags);
|
||||
+ raw_spin_lock_irqsave(&x->wait.lock, flags);
|
||||
x->done++;
|
||||
- __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
|
||||
- spin_unlock_irqrestore(&x->wait.lock, flags);
|
||||
+ __swait_wake_locked(&x->wait, TASK_NORMAL, 1);
|
||||
+ raw_spin_unlock_irqrestore(&x->wait.lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(complete);
|
||||
|
||||
@@ -2923,10 +2929,10 @@ void complete_all(struct completion *x)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
- spin_lock_irqsave(&x->wait.lock, flags);
|
||||
+ raw_spin_lock_irqsave(&x->wait.lock, flags);
|
||||
x->done += UINT_MAX/2;
|
||||
- __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
|
||||
- spin_unlock_irqrestore(&x->wait.lock, flags);
|
||||
+ __swait_wake_locked(&x->wait, TASK_NORMAL, 0);
|
||||
+ raw_spin_unlock_irqrestore(&x->wait.lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(complete_all);
|
||||
|
||||
@@ -2935,20 +2941,20 @@ do_wait_for_common(struct completion *x,
|
||||
long (*action)(long), long timeout, int state)
|
||||
{
|
||||
if (!x->done) {
|
||||
- DECLARE_WAITQUEUE(wait, current);
|
||||
+ DEFINE_SWAITER(wait);
|
||||
|
||||
- __add_wait_queue_tail_exclusive(&x->wait, &wait);
|
||||
+ swait_prepare_locked(&x->wait, &wait);
|
||||
do {
|
||||
if (signal_pending_state(state, current)) {
|
||||
timeout = -ERESTARTSYS;
|
||||
break;
|
||||
}
|
||||
__set_current_state(state);
|
||||
- spin_unlock_irq(&x->wait.lock);
|
||||
+ raw_spin_unlock_irq(&x->wait.lock);
|
||||
timeout = action(timeout);
|
||||
- spin_lock_irq(&x->wait.lock);
|
||||
+ raw_spin_lock_irq(&x->wait.lock);
|
||||
} while (!x->done && timeout);
|
||||
- __remove_wait_queue(&x->wait, &wait);
|
||||
+ swait_finish_locked(&x->wait, &wait);
|
||||
if (!x->done)
|
||||
return timeout;
|
||||
}
|
||||
@@ -2962,9 +2968,9 @@ static inline long __sched
|
||||
{
|
||||
might_sleep();
|
||||
|
||||
- spin_lock_irq(&x->wait.lock);
|
||||
+ raw_spin_lock_irq(&x->wait.lock);
|
||||
timeout = do_wait_for_common(x, action, timeout, state);
|
||||
- spin_unlock_irq(&x->wait.lock);
|
||||
+ raw_spin_unlock_irq(&x->wait.lock);
|
||||
return timeout;
|
||||
}
|
||||
|
||||
@@ -3140,12 +3146,12 @@ bool try_wait_for_completion(struct comp
|
||||
unsigned long flags;
|
||||
int ret = 1;
|
||||
|
||||
- spin_lock_irqsave(&x->wait.lock, flags);
|
||||
+ raw_spin_lock_irqsave(&x->wait.lock, flags);
|
||||
if (!x->done)
|
||||
ret = 0;
|
||||
else
|
||||
x->done--;
|
||||
- spin_unlock_irqrestore(&x->wait.lock, flags);
|
||||
+ raw_spin_unlock_irqrestore(&x->wait.lock, flags);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(try_wait_for_completion);
|
||||
@@ -3163,10 +3169,10 @@ bool completion_done(struct completion *
|
||||
unsigned long flags;
|
||||
int ret = 1;
|
||||
|
||||
- spin_lock_irqsave(&x->wait.lock, flags);
|
||||
+ raw_spin_lock_irqsave(&x->wait.lock, flags);
|
||||
if (!x->done)
|
||||
ret = 0;
|
||||
- spin_unlock_irqrestore(&x->wait.lock, flags);
|
||||
+ raw_spin_unlock_irqrestore(&x->wait.lock, flags);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(completion_done);
|
|
@ -0,0 +1,21 @@
|
|||
Subject: cond-resched-lock-rt-tweak.patch
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Sun, 17 Jul 2011 22:51:33 +0200
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
---
|
||||
include/linux/sched.h | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
--- a/include/linux/sched.h
|
||||
+++ b/include/linux/sched.h
|
||||
@@ -2482,7 +2482,7 @@ extern int _cond_resched(void);
|
||||
|
||||
extern int __cond_resched_lock(spinlock_t *lock);
|
||||
|
||||
-#ifdef CONFIG_PREEMPT_COUNT
|
||||
+#if defined(CONFIG_PREEMPT_COUNT) && !defined(CONFIG_PREEMPT_RT_FULL)
|
||||
#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
|
||||
#else
|
||||
#define PREEMPT_LOCK_OFFSET 0
|
|
@ -0,0 +1,48 @@
|
|||
Subject: cond-resched-softirq-fix.patch
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 14 Jul 2011 09:56:44 +0200
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
---
|
||||
include/linux/sched.h | 4 ++++
|
||||
kernel/sched/core.c | 2 ++
|
||||
2 files changed, 6 insertions(+)
|
||||
|
||||
--- a/include/linux/sched.h
|
||||
+++ b/include/linux/sched.h
|
||||
@@ -2493,12 +2493,16 @@ extern int __cond_resched_lock(spinlock_
|
||||
__cond_resched_lock(lock); \
|
||||
})
|
||||
|
||||
+#ifndef CONFIG_PREEMPT_RT_FULL
|
||||
extern int __cond_resched_softirq(void);
|
||||
|
||||
#define cond_resched_softirq() ({ \
|
||||
__might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
|
||||
__cond_resched_softirq(); \
|
||||
})
|
||||
+#else
|
||||
+# define cond_resched_softirq() cond_resched()
|
||||
+#endif
|
||||
|
||||
static inline void cond_resched_rcu(void)
|
||||
{
|
||||
--- a/kernel/sched/core.c
|
||||
+++ b/kernel/sched/core.c
|
||||
@@ -3941,6 +3941,7 @@ int __cond_resched_lock(spinlock_t *lock
|
||||
}
|
||||
EXPORT_SYMBOL(__cond_resched_lock);
|
||||
|
||||
+#ifndef CONFIG_PREEMPT_RT_FULL
|
||||
int __sched __cond_resched_softirq(void)
|
||||
{
|
||||
BUG_ON(!in_softirq());
|
||||
@@ -3954,6 +3955,7 @@ int __sched __cond_resched_softirq(void)
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(__cond_resched_softirq);
|
||||
+#endif
|
||||
|
||||
/**
|
||||
* yield - yield the current processor to other threads.
|
126
debian/patches/features/all/rt/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
vendored
Normal file
126
debian/patches/features/all/rt/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
vendored
Normal file
|
@ -0,0 +1,126 @@
|
|||
Subject: cpu: Make hotplug.lock a "sleeping" spinlock on RT
|
||||
From: Steven Rostedt <rostedt@goodmis.org>
|
||||
Date: Fri, 02 Mar 2012 10:36:57 -0500
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Tasks can block on hotplug.lock in pin_current_cpu(), but their state
|
||||
might be != RUNNING. So the mutex wakeup will set the state
|
||||
unconditionally to RUNNING. That might cause spurious unexpected
|
||||
wakeups. We could provide a state preserving mutex_lock() function,
|
||||
but this is semantically backwards. So instead we convert the
|
||||
hotplug.lock() to a spinlock for RT, which has the state preserving
|
||||
semantics already.
|
||||
|
||||
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
|
||||
Cc: Carsten Emde <C.Emde@osadl.org>
|
||||
Cc: John Kacur <jkacur@redhat.com>
|
||||
Cc: Peter Zijlstra <peterz@infradead.org>
|
||||
Cc: Clark Williams <clark.williams@gmail.com>
|
||||
Cc: stable-rt@vger.kernel.org
|
||||
Link: http://lkml.kernel.org/r/1330702617.25686.265.camel@gandalf.stny.rr.com
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
---
|
||||
kernel/cpu.c | 35 ++++++++++++++++++++++++++---------
|
||||
1 file changed, 26 insertions(+), 9 deletions(-)
|
||||
|
||||
--- a/kernel/cpu.c
|
||||
+++ b/kernel/cpu.c
|
||||
@@ -51,7 +51,12 @@ static int cpu_hotplug_disabled;
|
||||
|
||||
static struct {
|
||||
struct task_struct *active_writer;
|
||||
+#ifdef CONFIG_PREEMPT_RT_FULL
|
||||
+ /* Makes the lock keep the task's state */
|
||||
+ spinlock_t lock;
|
||||
+#else
|
||||
struct mutex lock; /* Synchronizes accesses to refcount, */
|
||||
+#endif
|
||||
/*
|
||||
* Also blocks the new readers during
|
||||
* an ongoing cpu hotplug operation.
|
||||
@@ -59,10 +64,22 @@ static struct {
|
||||
int refcount;
|
||||
} cpu_hotplug = {
|
||||
.active_writer = NULL,
|
||||
+#ifdef CONFIG_PREEMPT_RT_FULL
|
||||
+ .lock = __SPIN_LOCK_UNLOCKED(cpu_hotplug.lock),
|
||||
+#else
|
||||
.lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
|
||||
+#endif
|
||||
.refcount = 0,
|
||||
};
|
||||
|
||||
+#ifdef CONFIG_PREEMPT_RT_FULL
|
||||
+# define hotplug_lock() rt_spin_lock(&cpu_hotplug.lock)
|
||||
+# define hotplug_unlock() rt_spin_unlock(&cpu_hotplug.lock)
|
||||
+#else
|
||||
+# define hotplug_lock() mutex_lock(&cpu_hotplug.lock)
|
||||
+# define hotplug_unlock() mutex_unlock(&cpu_hotplug.lock)
|
||||
+#endif
|
||||
+
|
||||
struct hotplug_pcp {
|
||||
struct task_struct *unplug;
|
||||
int refcount;
|
||||
@@ -92,8 +109,8 @@ void pin_current_cpu(void)
|
||||
return;
|
||||
}
|
||||
preempt_enable();
|
||||
- mutex_lock(&cpu_hotplug.lock);
|
||||
- mutex_unlock(&cpu_hotplug.lock);
|
||||
+ hotplug_lock();
|
||||
+ hotplug_unlock();
|
||||
preempt_disable();
|
||||
goto retry;
|
||||
}
|
||||
@@ -165,9 +182,9 @@ void get_online_cpus(void)
|
||||
might_sleep();
|
||||
if (cpu_hotplug.active_writer == current)
|
||||
return;
|
||||
- mutex_lock(&cpu_hotplug.lock);
|
||||
+ hotplug_lock();
|
||||
cpu_hotplug.refcount++;
|
||||
- mutex_unlock(&cpu_hotplug.lock);
|
||||
+ hotplug_unlock();
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(get_online_cpus);
|
||||
@@ -176,14 +193,14 @@ void put_online_cpus(void)
|
||||
{
|
||||
if (cpu_hotplug.active_writer == current)
|
||||
return;
|
||||
- mutex_lock(&cpu_hotplug.lock);
|
||||
|
||||
+ hotplug_lock();
|
||||
if (WARN_ON(!cpu_hotplug.refcount))
|
||||
cpu_hotplug.refcount++; /* try to fix things up */
|
||||
|
||||
if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
|
||||
wake_up_process(cpu_hotplug.active_writer);
|
||||
- mutex_unlock(&cpu_hotplug.lock);
|
||||
+ hotplug_unlock();
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(put_online_cpus);
|
||||
@@ -215,11 +232,11 @@ void cpu_hotplug_begin(void)
|
||||
cpu_hotplug.active_writer = current;
|
||||
|
||||
for (;;) {
|
||||
- mutex_lock(&cpu_hotplug.lock);
|
||||
+ hotplug_lock();
|
||||
if (likely(!cpu_hotplug.refcount))
|
||||
break;
|
||||
__set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
- mutex_unlock(&cpu_hotplug.lock);
|
||||
+ hotplug_unlock();
|
||||
schedule();
|
||||
}
|
||||
}
|
||||
@@ -227,7 +244,7 @@ void cpu_hotplug_begin(void)
|
||||
void cpu_hotplug_done(void)
|
||||
{
|
||||
cpu_hotplug.active_writer = NULL;
|
||||
- mutex_unlock(&cpu_hotplug.lock);
|
||||
+ hotplug_unlock();
|
||||
}
|
||||
|
||||
/*
|
|
@ -0,0 +1,548 @@
|
|||
From: Steven Rostedt <srostedt@redhat.com>
|
||||
Date: Mon, 16 Jul 2012 08:07:43 +0000
|
||||
Subject: cpu/rt: Rework cpu down for PREEMPT_RT
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Bringing a CPU down is a pain with the PREEMPT_RT kernel because
|
||||
tasks can be preempted in many more places than in non-RT. In
|
||||
order to handle per_cpu variables, tasks may be pinned to a CPU
|
||||
for a while, and even sleep. But these tasks need to be off the CPU
|
||||
if that CPU is going down.
|
||||
|
||||
Several synchronization methods have been tried, but when stressed
|
||||
they failed. This is a new approach.
|
||||
|
||||
A sync_tsk thread is still created and tasks may still block on a
|
||||
lock when the CPU is going down, but how that works is a bit different.
|
||||
When cpu_down() starts, it will create the sync_tsk and wait on it
|
||||
to inform that current tasks that are pinned on the CPU are no longer
|
||||
pinned. But new tasks that are about to be pinned will still be allowed
|
||||
to do so at this time.
|
||||
|
||||
Then the notifiers are called. Several notifiers will bring down tasks
|
||||
that will enter these locations. Some of these tasks will take locks
|
||||
of other tasks that are on the CPU. If we don't let those other tasks
|
||||
continue, but make them block until CPU down is done, the tasks that
|
||||
the notifiers are waiting on will never complete as they are waiting
|
||||
for the locks held by the tasks that are blocked.
|
||||
|
||||
Thus we still let the task pin the CPU until the notifiers are done.
|
||||
After the notifiers run, we then make new tasks entering the pinned
|
||||
CPU sections grab a mutex and wait. This mutex is now a per CPU mutex
|
||||
in the hotplug_pcp descriptor.
|
||||
|
||||
To help things along, a new function in the scheduler code is created
|
||||
called migrate_me(). This function will try to migrate the current task
|
||||
off the CPU this is going down if possible. When the sync_tsk is created,
|
||||
all tasks will then try to migrate off the CPU going down. There are
|
||||
several cases that this wont work, but it helps in most cases.
|
||||
|
||||
After the notifiers are called and if a task can't migrate off but enters
|
||||
the pin CPU sections, it will be forced to wait on the hotplug_pcp mutex
|
||||
until the CPU down is complete. Then the scheduler will force the migration
|
||||
anyway.
|
||||
|
||||
Also, I found that THREAD_BOUND need to also be accounted for in the
|
||||
pinned CPU, and the migrate_disable no longer treats them special.
|
||||
This helps fix issues with ksoftirqd and workqueue that unbind on CPU down.
|
||||
|
||||
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
|
||||
---
|
||||
include/linux/sched.h | 7 +
|
||||
kernel/cpu.c | 240 +++++++++++++++++++++++++++++++++++++++++---------
|
||||
kernel/sched/core.c | 82 ++++++++++++++++-
|
||||
3 files changed, 284 insertions(+), 45 deletions(-)
|
||||
|
||||
--- a/include/linux/sched.h
|
||||
+++ b/include/linux/sched.h
|
||||
@@ -1807,6 +1807,10 @@ extern void do_set_cpus_allowed(struct t
|
||||
|
||||
extern int set_cpus_allowed_ptr(struct task_struct *p,
|
||||
const struct cpumask *new_mask);
|
||||
+int migrate_me(void);
|
||||
+void tell_sched_cpu_down_begin(int cpu);
|
||||
+void tell_sched_cpu_down_done(int cpu);
|
||||
+
|
||||
#else
|
||||
static inline void do_set_cpus_allowed(struct task_struct *p,
|
||||
const struct cpumask *new_mask)
|
||||
@@ -1819,6 +1823,9 @@ static inline int set_cpus_allowed_ptr(s
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
+static inline int migrate_me(void) { return 0; }
|
||||
+static inline void tell_sched_cpu_down_begin(int cpu) { }
|
||||
+static inline void tell_sched_cpu_down_done(int cpu) { }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_NO_HZ_COMMON
|
||||
--- a/kernel/cpu.c
|
||||
+++ b/kernel/cpu.c
|
||||
@@ -51,12 +51,7 @@ static int cpu_hotplug_disabled;
|
||||
|
||||
static struct {
|
||||
struct task_struct *active_writer;
|
||||
-#ifdef CONFIG_PREEMPT_RT_FULL
|
||||
- /* Makes the lock keep the task's state */
|
||||
- spinlock_t lock;
|
||||
-#else
|
||||
struct mutex lock; /* Synchronizes accesses to refcount, */
|
||||
-#endif
|
||||
/*
|
||||
* Also blocks the new readers during
|
||||
* an ongoing cpu hotplug operation.
|
||||
@@ -64,28 +59,46 @@ static struct {
|
||||
int refcount;
|
||||
} cpu_hotplug = {
|
||||
.active_writer = NULL,
|
||||
-#ifdef CONFIG_PREEMPT_RT_FULL
|
||||
- .lock = __SPIN_LOCK_UNLOCKED(cpu_hotplug.lock),
|
||||
-#else
|
||||
.lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
|
||||
-#endif
|
||||
.refcount = 0,
|
||||
};
|
||||
|
||||
-#ifdef CONFIG_PREEMPT_RT_FULL
|
||||
-# define hotplug_lock() rt_spin_lock(&cpu_hotplug.lock)
|
||||
-# define hotplug_unlock() rt_spin_unlock(&cpu_hotplug.lock)
|
||||
-#else
|
||||
-# define hotplug_lock() mutex_lock(&cpu_hotplug.lock)
|
||||
-# define hotplug_unlock() mutex_unlock(&cpu_hotplug.lock)
|
||||
-#endif
|
||||
-
|
||||
+/**
|
||||
+ * hotplug_pcp - per cpu hotplug descriptor
|
||||
+ * @unplug: set when pin_current_cpu() needs to sync tasks
|
||||
+ * @sync_tsk: the task that waits for tasks to finish pinned sections
|
||||
+ * @refcount: counter of tasks in pinned sections
|
||||
+ * @grab_lock: set when the tasks entering pinned sections should wait
|
||||
+ * @synced: notifier for @sync_tsk to tell cpu_down it's finished
|
||||
+ * @mutex: the mutex to make tasks wait (used when @grab_lock is true)
|
||||
+ * @mutex_init: zero if the mutex hasn't been initialized yet.
|
||||
+ *
|
||||
+ * Although @unplug and @sync_tsk may point to the same task, the @unplug
|
||||
+ * is used as a flag and still exists after @sync_tsk has exited and
|
||||
+ * @sync_tsk set to NULL.
|
||||
+ */
|
||||
struct hotplug_pcp {
|
||||
struct task_struct *unplug;
|
||||
+ struct task_struct *sync_tsk;
|
||||
int refcount;
|
||||
+ int grab_lock;
|
||||
struct completion synced;
|
||||
+#ifdef CONFIG_PREEMPT_RT_FULL
|
||||
+ spinlock_t lock;
|
||||
+#else
|
||||
+ struct mutex mutex;
|
||||
+#endif
|
||||
+ int mutex_init;
|
||||
};
|
||||
|
||||
+#ifdef CONFIG_PREEMPT_RT_FULL
|
||||
+# define hotplug_lock(hp) rt_spin_lock(&(hp)->lock)
|
||||
+# define hotplug_unlock(hp) rt_spin_unlock(&(hp)->lock)
|
||||
+#else
|
||||
+# define hotplug_lock(hp) mutex_lock(&(hp)->mutex)
|
||||
+# define hotplug_unlock(hp) mutex_unlock(&(hp)->mutex)
|
||||
+#endif
|
||||
+
|
||||
static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp);
|
||||
|
||||
/**
|
||||
@@ -99,18 +112,39 @@ static DEFINE_PER_CPU(struct hotplug_pcp
|
||||
void pin_current_cpu(void)
|
||||
{
|
||||
struct hotplug_pcp *hp;
|
||||
+ int force = 0;
|
||||
|
||||
retry:
|
||||
hp = &__get_cpu_var(hotplug_pcp);
|
||||
|
||||
- if (!hp->unplug || hp->refcount || preempt_count() > 1 ||
|
||||
+ if (!hp->unplug || hp->refcount || force || preempt_count() > 1 ||
|
||||
hp->unplug == current) {
|
||||
hp->refcount++;
|
||||
return;
|
||||
}
|
||||
- preempt_enable();
|
||||
- hotplug_lock();
|
||||
- hotplug_unlock();
|
||||
+ if (hp->grab_lock) {
|
||||
+ preempt_enable();
|
||||
+ hotplug_lock(hp);
|
||||
+ hotplug_unlock(hp);
|
||||
+ } else {
|
||||
+ preempt_enable();
|
||||
+ /*
|
||||
+ * Try to push this task off of this CPU.
|
||||
+ */
|
||||
+ if (!migrate_me()) {
|
||||
+ preempt_disable();
|
||||
+ hp = &__get_cpu_var(hotplug_pcp);
|
||||
+ if (!hp->grab_lock) {
|
||||
+ /*
|
||||
+ * Just let it continue it's already pinned
|
||||
+ * or about to sleep.
|
||||
+ */
|
||||
+ force = 1;
|
||||
+ goto retry;
|
||||
+ }
|
||||
+ preempt_enable();
|
||||
+ }
|
||||
+ }
|
||||
preempt_disable();
|
||||
goto retry;
|
||||
}
|
||||
@@ -131,26 +165,84 @@ void unpin_current_cpu(void)
|
||||
wake_up_process(hp->unplug);
|
||||
}
|
||||
|
||||
-/*
|
||||
- * FIXME: Is this really correct under all circumstances ?
|
||||
- */
|
||||
+static void wait_for_pinned_cpus(struct hotplug_pcp *hp)
|
||||
+{
|
||||
+ set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
+ while (hp->refcount) {
|
||||
+ schedule_preempt_disabled();
|
||||
+ set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
static int sync_unplug_thread(void *data)
|
||||
{
|
||||
struct hotplug_pcp *hp = data;
|
||||
|
||||
preempt_disable();
|
||||
hp->unplug = current;
|
||||
+ wait_for_pinned_cpus(hp);
|
||||
+
|
||||
+ /*
|
||||
+ * This thread will synchronize the cpu_down() with threads
|
||||
+ * that have pinned the CPU. When the pinned CPU count reaches
|
||||
+ * zero, we inform the cpu_down code to continue to the next step.
|
||||
+ */
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
- while (hp->refcount) {
|
||||
- schedule_preempt_disabled();
|
||||
+ preempt_enable();
|
||||
+ complete(&hp->synced);
|
||||
+
|
||||
+ /*
|
||||
+ * If all succeeds, the next step will need tasks to wait till
|
||||
+ * the CPU is offline before continuing. To do this, the grab_lock
|
||||
+ * is set and tasks going into pin_current_cpu() will block on the
|
||||
+ * mutex. But we still need to wait for those that are already in
|
||||
+ * pinned CPU sections. If the cpu_down() failed, the kthread_should_stop()
|
||||
+ * will kick this thread out.
|
||||
+ */
|
||||
+ while (!hp->grab_lock && !kthread_should_stop()) {
|
||||
+ schedule();
|
||||
+ set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
+ }
|
||||
+
|
||||
+ /* Make sure grab_lock is seen before we see a stale completion */
|
||||
+ smp_mb();
|
||||
+
|
||||
+ /*
|
||||
+ * Now just before cpu_down() enters stop machine, we need to make
|
||||
+ * sure all tasks that are in pinned CPU sections are out, and new
|
||||
+ * tasks will now grab the lock, keeping them from entering pinned
|
||||
+ * CPU sections.
|
||||
+ */
|
||||
+ if (!kthread_should_stop()) {
|
||||
+ preempt_disable();
|
||||
+ wait_for_pinned_cpus(hp);
|
||||
+ preempt_enable();
|
||||
+ complete(&hp->synced);
|
||||
+ }
|
||||
+
|
||||
+ set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
+ while (!kthread_should_stop()) {
|
||||
+ schedule();
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
}
|
||||
set_current_state(TASK_RUNNING);
|
||||
- preempt_enable();
|
||||
- complete(&hp->synced);
|
||||
+
|
||||
+ /*
|
||||
+ * Force this thread off this CPU as it's going down and
|
||||
+ * we don't want any more work on this CPU.
|
||||
+ */
|
||||
+ current->flags &= ~PF_NO_SETAFFINITY;
|
||||
+ do_set_cpus_allowed(current, cpu_present_mask);
|
||||
+ migrate_me();
|
||||
return 0;
|
||||
}
|
||||
|
||||
+static void __cpu_unplug_sync(struct hotplug_pcp *hp)
|
||||
+{
|
||||
+ wake_up_process(hp->sync_tsk);
|
||||
+ wait_for_completion(&hp->synced);
|
||||
+}
|
||||
+
|
||||
/*
|
||||
* Start the sync_unplug_thread on the target cpu and wait for it to
|
||||
* complete.
|
||||
@@ -158,23 +250,83 @@ static int sync_unplug_thread(void *data
|
||||
static int cpu_unplug_begin(unsigned int cpu)
|
||||
{
|
||||
struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
|
||||
- struct task_struct *tsk;
|
||||
+ int err;
|
||||
+
|
||||
+ /* Protected by cpu_hotplug.lock */
|
||||
+ if (!hp->mutex_init) {
|
||||
+#ifdef CONFIG_PREEMPT_RT_FULL
|
||||
+ spin_lock_init(&hp->lock);
|
||||
+#else
|
||||
+ mutex_init(&hp->mutex);
|
||||
+#endif
|
||||
+ hp->mutex_init = 1;
|
||||
+ }
|
||||
+
|
||||
+ /* Inform the scheduler to migrate tasks off this CPU */
|
||||
+ tell_sched_cpu_down_begin(cpu);
|
||||
|
||||
init_completion(&hp->synced);
|
||||
- tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
|
||||
- if (IS_ERR(tsk))
|
||||
- return (PTR_ERR(tsk));
|
||||
- kthread_bind(tsk, cpu);
|
||||
- wake_up_process(tsk);
|
||||
- wait_for_completion(&hp->synced);
|
||||
+
|
||||
+ hp->sync_tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
|
||||
+ if (IS_ERR(hp->sync_tsk)) {
|
||||
+ err = PTR_ERR(hp->sync_tsk);
|
||||
+ hp->sync_tsk = NULL;
|
||||
+ return err;
|
||||
+ }
|
||||
+ kthread_bind(hp->sync_tsk, cpu);
|
||||
+
|
||||
+ /*
|
||||
+ * Wait for tasks to get out of the pinned sections,
|
||||
+ * it's still OK if new tasks enter. Some CPU notifiers will
|
||||
+ * wait for tasks that are going to enter these sections and
|
||||
+ * we must not have them block.
|
||||
+ */
|
||||
+ __cpu_unplug_sync(hp);
|
||||
+
|
||||
return 0;
|
||||
}
|
||||
|
||||
+static void cpu_unplug_sync(unsigned int cpu)
|
||||
+{
|
||||
+ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
|
||||
+
|
||||
+ init_completion(&hp->synced);
|
||||
+ /* The completion needs to be initialzied before setting grab_lock */
|
||||
+ smp_wmb();
|
||||
+
|
||||
+ /* Grab the mutex before setting grab_lock */
|
||||
+ hotplug_lock(hp);
|
||||
+ hp->grab_lock = 1;
|
||||
+
|
||||
+ /*
|
||||
+ * The CPU notifiers have been completed.
|
||||
+ * Wait for tasks to get out of pinned CPU sections and have new
|
||||
+ * tasks block until the CPU is completely down.
|
||||
+ */
|
||||
+ __cpu_unplug_sync(hp);
|
||||
+
|
||||
+ /* All done with the sync thread */
|
||||
+ kthread_stop(hp->sync_tsk);
|
||||
+ hp->sync_tsk = NULL;
|
||||
+}
|
||||
+
|
||||
static void cpu_unplug_done(unsigned int cpu)
|
||||
{
|
||||
struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
|
||||
|
||||
hp->unplug = NULL;
|
||||
+ /* Let all tasks know cpu unplug is finished before cleaning up */
|
||||
+ smp_wmb();
|
||||
+
|
||||
+ if (hp->sync_tsk)
|
||||
+ kthread_stop(hp->sync_tsk);
|
||||
+
|
||||
+ if (hp->grab_lock) {
|
||||
+ hotplug_unlock(hp);
|
||||
+ /* protected by cpu_hotplug.lock */
|
||||
+ hp->grab_lock = 0;
|
||||
+ }
|
||||
+ tell_sched_cpu_down_done(cpu);
|
||||
}
|
||||
|
||||
void get_online_cpus(void)
|
||||
@@ -182,9 +334,9 @@ void get_online_cpus(void)
|
||||
might_sleep();
|
||||
if (cpu_hotplug.active_writer == current)
|
||||
return;
|
||||
- hotplug_lock();
|
||||
+ mutex_lock(&cpu_hotplug.lock);
|
||||
cpu_hotplug.refcount++;
|
||||
- hotplug_unlock();
|
||||
+ mutex_unlock(&cpu_hotplug.lock);
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(get_online_cpus);
|
||||
@@ -194,14 +346,13 @@ void put_online_cpus(void)
|
||||
if (cpu_hotplug.active_writer == current)
|
||||
return;
|
||||
|
||||
- hotplug_lock();
|
||||
+ mutex_lock(&cpu_hotplug.lock);
|
||||
if (WARN_ON(!cpu_hotplug.refcount))
|
||||
cpu_hotplug.refcount++; /* try to fix things up */
|
||||
|
||||
if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
|
||||
wake_up_process(cpu_hotplug.active_writer);
|
||||
- hotplug_unlock();
|
||||
-
|
||||
+ mutex_unlock(&cpu_hotplug.lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(put_online_cpus);
|
||||
|
||||
@@ -232,11 +383,11 @@ void cpu_hotplug_begin(void)
|
||||
cpu_hotplug.active_writer = current;
|
||||
|
||||
for (;;) {
|
||||
- hotplug_lock();
|
||||
+ mutex_lock(&cpu_hotplug.lock);
|
||||
if (likely(!cpu_hotplug.refcount))
|
||||
break;
|
||||
__set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
- hotplug_unlock();
|
||||
+ mutex_unlock(&cpu_hotplug.lock);
|
||||
schedule();
|
||||
}
|
||||
}
|
||||
@@ -244,7 +395,7 @@ void cpu_hotplug_begin(void)
|
||||
void cpu_hotplug_done(void)
|
||||
{
|
||||
cpu_hotplug.active_writer = NULL;
|
||||
- hotplug_unlock();
|
||||
+ mutex_unlock(&cpu_hotplug.lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -442,6 +593,9 @@ static int __ref _cpu_down(unsigned int
|
||||
}
|
||||
smpboot_park_threads(cpu);
|
||||
|
||||
+ /* Notifiers are done. Don't let any more tasks pin this CPU. */
|
||||
+ cpu_unplug_sync(cpu);
|
||||
+
|
||||
err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
|
||||
if (err) {
|
||||
/* CPU didn't die: tell everyone. Can't complain. */
|
||||
--- a/kernel/sched/core.c
|
||||
+++ b/kernel/sched/core.c
|
||||
@@ -2359,7 +2359,7 @@ void migrate_disable(void)
|
||||
{
|
||||
struct task_struct *p = current;
|
||||
|
||||
- if (in_atomic() || p->flags & PF_NO_SETAFFINITY) {
|
||||
+ if (in_atomic()) {
|
||||
#ifdef CONFIG_SCHED_DEBUG
|
||||
p->migrate_disable_atomic++;
|
||||
#endif
|
||||
@@ -2390,7 +2390,7 @@ void migrate_enable(void)
|
||||
unsigned long flags;
|
||||
struct rq *rq;
|
||||
|
||||
- if (in_atomic() || p->flags & PF_NO_SETAFFINITY) {
|
||||
+ if (in_atomic()) {
|
||||
#ifdef CONFIG_SCHED_DEBUG
|
||||
p->migrate_disable_atomic--;
|
||||
#endif
|
||||
@@ -4430,6 +4430,84 @@ void do_set_cpus_allowed(struct task_str
|
||||
cpumask_copy(&p->cpus_allowed, new_mask);
|
||||
}
|
||||
|
||||
+static DEFINE_PER_CPU(struct cpumask, sched_cpumasks);
|
||||
+static DEFINE_MUTEX(sched_down_mutex);
|
||||
+static cpumask_t sched_down_cpumask;
|
||||
+
|
||||
+void tell_sched_cpu_down_begin(int cpu)
|
||||
+{
|
||||
+ mutex_lock(&sched_down_mutex);
|
||||
+ cpumask_set_cpu(cpu, &sched_down_cpumask);
|
||||
+ mutex_unlock(&sched_down_mutex);
|
||||
+}
|
||||
+
|
||||
+void tell_sched_cpu_down_done(int cpu)
|
||||
+{
|
||||
+ mutex_lock(&sched_down_mutex);
|
||||
+ cpumask_clear_cpu(cpu, &sched_down_cpumask);
|
||||
+ mutex_unlock(&sched_down_mutex);
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * migrate_me - try to move the current task off this cpu
|
||||
+ *
|
||||
+ * Used by the pin_current_cpu() code to try to get tasks
|
||||
+ * to move off the current CPU as it is going down.
|
||||
+ * It will only move the task if the task isn't pinned to
|
||||
+ * the CPU (with migrate_disable, affinity or NO_SETAFFINITY)
|
||||
+ * and the task has to be in a RUNNING state. Otherwise the
|
||||
+ * movement of the task will wake it up (change its state
|
||||
+ * to running) when the task did not expect it.
|
||||
+ *
|
||||
+ * Returns 1 if it succeeded in moving the current task
|
||||
+ * 0 otherwise.
|
||||
+ */
|
||||
+int migrate_me(void)
|
||||
+{
|
||||
+ struct task_struct *p = current;
|
||||
+ struct migration_arg arg;
|
||||
+ struct cpumask *cpumask;
|
||||
+ struct cpumask *mask;
|
||||
+ unsigned long flags;
|
||||
+ unsigned int dest_cpu;
|
||||
+ struct rq *rq;
|
||||
+
|
||||
+ /*
|
||||
+ * We can not migrate tasks bounded to a CPU or tasks not
|
||||
+ * running. The movement of the task will wake it up.
|
||||
+ */
|
||||
+ if (p->flags & PF_NO_SETAFFINITY || p->state)
|
||||
+ return 0;
|
||||
+
|
||||
+ mutex_lock(&sched_down_mutex);
|
||||
+ rq = task_rq_lock(p, &flags);
|
||||
+
|
||||
+ cpumask = &__get_cpu_var(sched_cpumasks);
|
||||
+ mask = &p->cpus_allowed;
|
||||
+
|
||||
+ cpumask_andnot(cpumask, mask, &sched_down_cpumask);
|
||||
+
|
||||
+ if (!cpumask_weight(cpumask)) {
|
||||
+ /* It's only on this CPU? */
|
||||
+ task_rq_unlock(rq, p, &flags);
|
||||
+ mutex_unlock(&sched_down_mutex);
|
||||
+ return 0;
|
||||
+ }
|
||||
+
|
||||
+ dest_cpu = cpumask_any_and(cpu_active_mask, cpumask);
|
||||
+
|
||||
+ arg.task = p;
|
||||
+ arg.dest_cpu = dest_cpu;
|
||||
+
|
||||
+ task_rq_unlock(rq, p, &flags);
|
||||
+
|
||||
+ stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
|
||||
+ tlb_migrate_finish(p->mm);
|
||||
+ mutex_unlock(&sched_down_mutex);
|
||||
+
|
||||
+ return 1;
|
||||
+}
|
||||
+
|
||||
/*
|
||||
* This is how migration works:
|
||||
*
|
|
@ -0,0 +1,27 @@
|
|||
Subject: cpu-rt-variants.patch
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Fri, 17 Jun 2011 15:42:38 +0200
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
---
|
||||
include/linux/smp.h | 8 ++++++++
|
||||
1 file changed, 8 insertions(+)
|
||||
|
||||
--- a/include/linux/smp.h
|
||||
+++ b/include/linux/smp.h
|
||||
@@ -188,6 +188,14 @@ static inline void __smp_call_function_s
|
||||
#define get_cpu() ({ preempt_disable(); smp_processor_id(); })
|
||||
#define put_cpu() preempt_enable()
|
||||
|
||||
+#ifndef CONFIG_PREEMPT_RT_FULL
|
||||
+# define get_cpu_light() get_cpu()
|
||||
+# define put_cpu_light() put_cpu()
|
||||
+#else
|
||||
+# define get_cpu_light() ({ migrate_disable(); smp_processor_id(); })
|
||||
+# define put_cpu_light() migrate_enable()
|
||||
+#endif
|
||||
+
|
||||
/*
|
||||
* Callback to arch code if there's nosmp or maxcpus=0 on the
|
||||
* boot command line:
|
|
@ -0,0 +1,54 @@
|
|||
From linux-rt-users-owner@vger.kernel.org Thu Nov 7 03:07:12 2013
|
||||
From: Tiejun Chen <tiejun.chen@windriver.com>
|
||||
Subject: [v1][PATCH] cpu_down: move migrate_enable() back
|
||||
Date: Thu, 7 Nov 2013 10:06:07 +0800
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Commit 08c1ab68, "hotplug-use-migrate-disable.patch", intends to
|
||||
use migrate_enable()/migrate_disable() to replace that combination
|
||||
of preempt_enable() and preempt_disable(), but actually in
|
||||
!CONFIG_PREEMPT_RT_FULL case, migrate_enable()/migrate_disable()
|
||||
are still equal to preempt_enable()/preempt_disable(). So that
|
||||
followed cpu_hotplug_begin()/cpu_unplug_begin(cpu) would go schedule()
|
||||
to trigger schedule_debug() like this:
|
||||
|
||||
_cpu_down()
|
||||
|
|
||||
+ migrate_disable() = preempt_disable()
|
||||
|
|
||||
+ cpu_hotplug_begin() or cpu_unplug_begin()
|
||||
|
|
||||
+ schedule()
|
||||
|
|
||||
+ __schedule()
|
||||
|
|
||||
+ preempt_disable();
|
||||
|
|
||||
+ __schedule_bug() is true!
|
||||
|
||||
So we should move migrate_enable() as the original scheme.
|
||||
|
||||
Cc: stable-rt@vger.kernel.org
|
||||
Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
|
||||
---
|
||||
kernel/cpu.c | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
--- a/kernel/cpu.c
|
||||
+++ b/kernel/cpu.c
|
||||
@@ -593,6 +593,7 @@ static int __ref _cpu_down(unsigned int
|
||||
err = -EBUSY;
|
||||
goto restore_cpus;
|
||||
}
|
||||
+ migrate_enable();
|
||||
|
||||
cpu_hotplug_begin();
|
||||
err = cpu_unplug_begin(cpu);
|
||||
@@ -646,7 +647,6 @@ static int __ref _cpu_down(unsigned int
|
||||
out_release:
|
||||
cpu_unplug_done(cpu);
|
||||
out_cancel:
|
||||
- migrate_enable();
|
||||
cpu_hotplug_done();
|
||||
if (!err)
|
||||
cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
|
|
@ -0,0 +1,35 @@
|
|||
Subject: cpumask: Disable CONFIG_CPUMASK_OFFSTACK for RT
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Wed, 14 Dec 2011 01:03:49 +0100
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
We can't deal with the cpumask allocations which happen in atomic
|
||||
context (see arch/x86/kernel/apic/io_apic.c) on RT right now.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
---
|
||||
arch/x86/Kconfig | 2 +-
|
||||
lib/Kconfig | 1 +
|
||||
2 files changed, 2 insertions(+), 1 deletion(-)
|
||||
|
||||
--- a/arch/x86/Kconfig
|
||||
+++ b/arch/x86/Kconfig
|
||||
@@ -820,7 +820,7 @@ config IOMMU_HELPER
|
||||
config MAXSMP
|
||||
bool "Enable Maximum number of SMP Processors and NUMA Nodes"
|
||||
depends on X86_64 && SMP && DEBUG_KERNEL
|
||||
- select CPUMASK_OFFSTACK
|
||||
+ select CPUMASK_OFFSTACK if !PREEMPT_RT_FULL
|
||||
---help---
|
||||
Enable maximum number of CPUS and NUMA Nodes for this architecture.
|
||||
If unsure, say N.
|
||||
--- a/lib/Kconfig
|
||||
+++ b/lib/Kconfig
|
||||
@@ -344,6 +344,7 @@ config CHECK_SIGNATURE
|
||||
|
||||
config CPUMASK_OFFSTACK
|
||||
bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS
|
||||
+ depends on !PREEMPT_RT_FULL
|
||||
help
|
||||
Use dynamic allocation for cpumask_var_t, instead of putting
|
||||
them on the stack. This is a bit more expensive, but avoids
|
|
@ -0,0 +1,24 @@
|
|||
Subject: debugobjects-rt.patch
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Sun, 17 Jul 2011 21:41:35 +0200
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
---
|
||||
lib/debugobjects.c | 5 ++++-
|
||||
1 file changed, 4 insertions(+), 1 deletion(-)
|
||||
|
||||
--- a/lib/debugobjects.c
|
||||
+++ b/lib/debugobjects.c
|
||||
@@ -308,7 +308,10 @@ static void
|
||||
struct debug_obj *obj;
|
||||
unsigned long flags;
|
||||
|
||||
- fill_pool();
|
||||
+#ifdef CONFIG_PREEMPT_RT_FULL
|
||||
+ if (preempt_count() == 0 && !irqs_disabled())
|
||||
+#endif
|
||||
+ fill_pool();
|
||||
|
||||
db = get_bucket((unsigned long) addr);
|
||||
|
|
@ -0,0 +1,35 @@
|
|||
Subject: dm: Make rt aware
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Mon, 14 Nov 2011 23:06:09 +0100
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Use the BUG_ON_NORT variant for the irq_disabled() checks. RT has
|
||||
interrupts legitimately enabled here as we cant deadlock against the
|
||||
irq thread due to the "sleeping spinlocks" conversion.
|
||||
|
||||
Reported-by: Luis Claudio R. Goncalves <lclaudio@uudg.org>
|
||||
Cc: stable-rt@vger.kernel.org
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
---
|
||||
drivers/md/dm.c | 4 ++--
|
||||
1 file changed, 2 insertions(+), 2 deletions(-)
|
||||
|
||||
--- a/drivers/md/dm.c
|
||||
+++ b/drivers/md/dm.c
|
||||
@@ -1813,14 +1813,14 @@ static void dm_request_fn(struct request
|
||||
if (map_request(ti, clone, md))
|
||||
goto requeued;
|
||||
|
||||
- BUG_ON(!irqs_disabled());
|
||||
+ BUG_ON_NONRT(!irqs_disabled());
|
||||
spin_lock(q->queue_lock);
|
||||
}
|
||||
|
||||
goto out;
|
||||
|
||||
requeued:
|
||||
- BUG_ON(!irqs_disabled());
|
||||
+ BUG_ON_NONRT(!irqs_disabled());
|
||||
spin_lock(q->queue_lock);
|
||||
|
||||
delay_and_out:
|
|
@ -0,0 +1,26 @@
|
|||
From: Ingo Molnar <mingo@elte.hu>
|
||||
Date: Fri, 3 Jul 2009 08:29:24 -0500
|
||||
Subject: drivers/net: Use disable_irq_nosync() in 8139too
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Use disable_irq_nosync() instead of disable_irq() as this might be
|
||||
called in atomic context with netpoll.
|
||||
|
||||
Signed-off-by: Ingo Molnar <mingo@elte.hu>
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
|
||||
---
|
||||
drivers/net/ethernet/realtek/8139too.c | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
--- a/drivers/net/ethernet/realtek/8139too.c
|
||||
+++ b/drivers/net/ethernet/realtek/8139too.c
|
||||
@@ -2213,7 +2213,7 @@ static void rtl8139_poll_controller(stru
|
||||
struct rtl8139_private *tp = netdev_priv(dev);
|
||||
const int irq = tp->pci_dev->irq;
|
||||
|
||||
- disable_irq(irq);
|
||||
+ disable_irq_nosync(irq);
|
||||
rtl8139_interrupt(irq, dev);
|
||||
enable_irq(irq);
|
||||
}
|
|
@ -0,0 +1,127 @@
|
|||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Sat, 20 Jun 2009 11:36:54 +0200
|
||||
Subject: drivers/net: fix livelock issues
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Preempt-RT runs into a live lock issue with the NETDEV_TX_LOCKED micro
|
||||
optimization. The reason is that the softirq thread is rescheduling
|
||||
itself on that return value. Depending on priorities it starts to
|
||||
monoplize the CPU and livelock on UP systems.
|
||||
|
||||
Remove it.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
|
||||
---
|
||||
drivers/net/ethernet/atheros/atl1c/atl1c_main.c | 6 +-----
|
||||
drivers/net/ethernet/atheros/atl1e/atl1e_main.c | 3 +--
|
||||
drivers/net/ethernet/chelsio/cxgb/sge.c | 3 +--
|
||||
drivers/net/ethernet/neterion/s2io.c | 7 +------
|
||||
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c | 6 ++----
|
||||
drivers/net/ethernet/tehuti/tehuti.c | 9 ++-------
|
||||
drivers/net/rionet.c | 6 +-----
|
||||
7 files changed, 9 insertions(+), 31 deletions(-)
|
||||
|
||||
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
|
||||
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
|
||||
@@ -2206,11 +2206,7 @@ static netdev_tx_t atl1c_xmit_frame(stru
|
||||
}
|
||||
|
||||
tpd_req = atl1c_cal_tpd_req(skb);
|
||||
- if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) {
|
||||
- if (netif_msg_pktdata(adapter))
|
||||
- dev_info(&adapter->pdev->dev, "tx locked\n");
|
||||
- return NETDEV_TX_LOCKED;
|
||||
- }
|
||||
+ spin_lock_irqsave(&adapter->tx_lock, flags);
|
||||
|
||||
if (atl1c_tpd_avail(adapter, type) < tpd_req) {
|
||||
/* no enough descriptor, just stop queue */
|
||||
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
|
||||
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
|
||||
@@ -1838,8 +1838,7 @@ static netdev_tx_t atl1e_xmit_frame(stru
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
tpd_req = atl1e_cal_tdp_req(skb);
|
||||
- if (!spin_trylock_irqsave(&adapter->tx_lock, flags))
|
||||
- return NETDEV_TX_LOCKED;
|
||||
+ spin_lock_irqsave(&adapter->tx_lock, flags);
|
||||
|
||||
if (atl1e_tpd_avail(adapter) < tpd_req) {
|
||||
/* no enough descriptor, just stop queue */
|
||||
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
|
||||
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
|
||||
@@ -1665,8 +1665,7 @@ static int t1_sge_tx(struct sk_buff *skb
|
||||
struct cmdQ *q = &sge->cmdQ[qid];
|
||||
unsigned int credits, pidx, genbit, count, use_sched_skb = 0;
|
||||
|
||||
- if (!spin_trylock(&q->lock))
|
||||
- return NETDEV_TX_LOCKED;
|
||||
+ spin_lock(&q->lock);
|
||||
|
||||
reclaim_completed_tx(sge, q);
|
||||
|
||||
--- a/drivers/net/ethernet/neterion/s2io.c
|
||||
+++ b/drivers/net/ethernet/neterion/s2io.c
|
||||
@@ -4089,12 +4089,7 @@ static netdev_tx_t s2io_xmit(struct sk_b
|
||||
[skb->priority & (MAX_TX_FIFOS - 1)];
|
||||
fifo = &mac_control->fifos[queue];
|
||||
|
||||
- if (do_spin_lock)
|
||||
- spin_lock_irqsave(&fifo->tx_lock, flags);
|
||||
- else {
|
||||
- if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
|
||||
- return NETDEV_TX_LOCKED;
|
||||
- }
|
||||
+ spin_lock_irqsave(&fifo->tx_lock, flags);
|
||||
|
||||
if (sp->config.multiq) {
|
||||
if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
|
||||
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
|
||||
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
|
||||
@@ -2148,10 +2148,8 @@ static int pch_gbe_xmit_frame(struct sk_
|
||||
struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
|
||||
unsigned long flags;
|
||||
|
||||
- if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) {
|
||||
- /* Collision - tell upper layer to requeue */
|
||||
- return NETDEV_TX_LOCKED;
|
||||
- }
|
||||
+ spin_lock_irqsave(&tx_ring->tx_lock, flags);
|
||||
+
|
||||
if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
|
||||
netif_stop_queue(netdev);
|
||||
spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
|
||||
--- a/drivers/net/ethernet/tehuti/tehuti.c
|
||||
+++ b/drivers/net/ethernet/tehuti/tehuti.c
|
||||
@@ -1629,13 +1629,8 @@ static netdev_tx_t bdx_tx_transmit(struc
|
||||
unsigned long flags;
|
||||
|
||||
ENTER;
|
||||
- local_irq_save(flags);
|
||||
- if (!spin_trylock(&priv->tx_lock)) {
|
||||
- local_irq_restore(flags);
|
||||
- DBG("%s[%s]: TX locked, returning NETDEV_TX_LOCKED\n",
|
||||
- BDX_DRV_NAME, ndev->name);
|
||||
- return NETDEV_TX_LOCKED;
|
||||
- }
|
||||
+
|
||||
+ spin_lock_irqsave(&priv->tx_lock, flags);
|
||||
|
||||
/* build tx descriptor */
|
||||
BDX_ASSERT(f->m.wptr >= f->m.memsz); /* started with valid wptr */
|
||||
--- a/drivers/net/rionet.c
|
||||
+++ b/drivers/net/rionet.c
|
||||
@@ -174,11 +174,7 @@ static int rionet_start_xmit(struct sk_b
|
||||
unsigned long flags;
|
||||
int add_num = 1;
|
||||
|
||||
- local_irq_save(flags);
|
||||
- if (!spin_trylock(&rnet->tx_lock)) {
|
||||
- local_irq_restore(flags);
|
||||
- return NETDEV_TX_LOCKED;
|
||||
- }
|
||||
+ spin_lock_irqsave(&rnet->tx_lock, flags);
|
||||
|
||||
if (is_multicast_ether_addr(eth->h_dest))
|
||||
add_num = nets[rnet->mport->id].nact;
|
|
@ -0,0 +1,56 @@
|
|||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 1 Apr 2010 20:20:57 +0200
|
||||
Subject: drivers: net: gianfar: Make RT aware
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
The adjust_link() disables interrupts before taking the queue
|
||||
locks. On RT those locks are converted to "sleeping" locks and
|
||||
therefor the local_irq_save/restore must be converted to
|
||||
local_irq_save/restore_nort.
|
||||
|
||||
Reported-by: Xianghua Xiao <xiaoxianghua@gmail.com>
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Tested-by: Xianghua Xiao <xiaoxianghua@gmail.com>
|
||||
|
||||
---
|
||||
drivers/net/ethernet/freescale/gianfar.c | 8 ++++----
|
||||
1 file changed, 4 insertions(+), 4 deletions(-)
|
||||
|
||||
--- a/drivers/net/ethernet/freescale/gianfar.c
|
||||
+++ b/drivers/net/ethernet/freescale/gianfar.c
|
||||
@@ -1701,7 +1701,7 @@ void stop_gfar(struct net_device *dev)
|
||||
|
||||
|
||||
/* Lock it down */
|
||||
- local_irq_save(flags);
|
||||
+ local_irq_save_nort(flags);
|
||||
lock_tx_qs(priv);
|
||||
lock_rx_qs(priv);
|
||||
|
||||
@@ -1709,7 +1709,7 @@ void stop_gfar(struct net_device *dev)
|
||||
|
||||
unlock_rx_qs(priv);
|
||||
unlock_tx_qs(priv);
|
||||
- local_irq_restore(flags);
|
||||
+ local_irq_restore_nort(flags);
|
||||
|
||||
/* Free the IRQs */
|
||||
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
|
||||
@@ -3101,7 +3101,7 @@ static void adjust_link(struct net_devic
|
||||
struct phy_device *phydev = priv->phydev;
|
||||
int new_state = 0;
|
||||
|
||||
- local_irq_save(flags);
|
||||
+ local_irq_save_nort(flags);
|
||||
lock_tx_qs(priv);
|
||||
|
||||
if (phydev->link) {
|
||||
@@ -3175,7 +3175,7 @@ static void adjust_link(struct net_devic
|
||||
if (new_state && netif_msg_link(priv))
|
||||
phy_print_status(phydev);
|
||||
unlock_tx_qs(priv);
|
||||
- local_irq_restore(flags);
|
||||
+ local_irq_restore_nort(flags);
|
||||
}
|
||||
|
||||
/* Update the hash table based on the current list of multicast
|
24
debian/patches/features/all/rt/drivers-net-tulip-add-missing-pci-disable.patch
vendored
Normal file
24
debian/patches/features/all/rt/drivers-net-tulip-add-missing-pci-disable.patch
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
From: Ingo Molnar <mingo@elte.hu>
|
||||
Date: Fri, 3 Jul 2009 08:30:18 -0500
|
||||
Subject: drivers/net: tulip_remove_one needs to call pci_disable_device()
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Otherwise the device is not completely shut down.
|
||||
|
||||
Signed-off-by: Ingo Molnar <mingo@elte.hu>
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
|
||||
---
|
||||
drivers/net/ethernet/dec/tulip/tulip_core.c | 1 +
|
||||
1 file changed, 1 insertion(+)
|
||||
|
||||
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
|
||||
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
|
||||
@@ -1939,6 +1939,7 @@ static void tulip_remove_one(struct pci_
|
||||
pci_iounmap(pdev, tp->base_addr);
|
||||
free_netdev (dev);
|
||||
pci_release_regions (pdev);
|
||||
+ pci_disable_device (pdev);
|
||||
pci_set_drvdata (pdev, NULL);
|
||||
|
||||
/* pci_power_off (pdev, -1); */
|
|
@ -0,0 +1,49 @@
|
|||
From: Steven Rostedt <rostedt@goodmis.org>
|
||||
Date: Fri, 3 Jul 2009 08:30:00 -0500
|
||||
Subject: drivers/net: vortex fix locking issues
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Argh, cut and paste wasn't enough...
|
||||
|
||||
Use this patch instead. It needs an irq disable. But, believe it or not,
|
||||
on SMP this is actually better. If the irq is shared (as it is in Mark's
|
||||
case), we don't stop the irq of other devices from being handled on
|
||||
another CPU (unfortunately for Mark, he pinned all interrupts to one CPU).
|
||||
|
||||
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
|
||||
drivers/net/ethernet/3com/3c59x.c | 8 ++++----
|
||||
1 file changed, 4 insertions(+), 4 deletions(-)
|
||||
|
||||
Signed-off-by: Ingo Molnar <mingo@elte.hu>
|
||||
|
||||
--- a/drivers/net/ethernet/3com/3c59x.c
|
||||
+++ b/drivers/net/ethernet/3com/3c59x.c
|
||||
@@ -842,9 +842,9 @@ static void poll_vortex(struct net_devic
|
||||
{
|
||||
struct vortex_private *vp = netdev_priv(dev);
|
||||
unsigned long flags;
|
||||
- local_irq_save(flags);
|
||||
+ local_irq_save_nort(flags);
|
||||
(vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev);
|
||||
- local_irq_restore(flags);
|
||||
+ local_irq_restore_nort(flags);
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -1917,12 +1917,12 @@ static void vortex_tx_timeout(struct net
|
||||
* Block interrupts because vortex_interrupt does a bare spin_lock()
|
||||
*/
|
||||
unsigned long flags;
|
||||
- local_irq_save(flags);
|
||||
+ local_irq_save_nort(flags);
|
||||
if (vp->full_bus_master_tx)
|
||||
boomerang_interrupt(dev->irq, dev);
|
||||
else
|
||||
vortex_interrupt(dev->irq, dev);
|
||||
- local_irq_restore(flags);
|
||||
+ local_irq_restore_nort(flags);
|
||||
}
|
||||
}
|
||||
|
40
debian/patches/features/all/rt/drivers-random-reduce-preempt-disabled-region.patch
vendored
Normal file
40
debian/patches/features/all/rt/drivers-random-reduce-preempt-disabled-region.patch
vendored
Normal file
|
@ -0,0 +1,40 @@
|
|||
From: Ingo Molnar <mingo@elte.hu>
|
||||
Date: Fri, 3 Jul 2009 08:29:30 -0500
|
||||
Subject: drivers: random: Reduce preempt disabled region
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
No need to keep preemption disabled across the whole function.
|
||||
|
||||
Signed-off-by: Ingo Molnar <mingo@elte.hu>
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
|
||||
---
|
||||
drivers/char/random.c | 9 +++++----
|
||||
1 file changed, 5 insertions(+), 4 deletions(-)
|
||||
|
||||
--- a/drivers/char/random.c
|
||||
+++ b/drivers/char/random.c
|
||||
@@ -673,9 +673,12 @@ static void add_timer_randomness(struct
|
||||
preempt_disable();
|
||||
/* if over the trickle threshold, use only 1 in 4096 samples */
|
||||
if (input_pool.entropy_count > trickle_thresh &&
|
||||
- ((__this_cpu_inc_return(trickle_count) - 1) & 0xfff))
|
||||
- goto out;
|
||||
+ ((__this_cpu_inc_return(trickle_count) - 1) & 0xfff)) {
|
||||
+ preempt_enable();
|
||||
+ return;
|
||||
+ }
|
||||
|
||||
+ preempt_enable();
|
||||
sample.jiffies = jiffies;
|
||||
sample.cycles = random_get_entropy();
|
||||
sample.num = num;
|
||||
@@ -716,8 +719,6 @@ static void add_timer_randomness(struct
|
||||
credit_entropy_bits(&input_pool,
|
||||
min_t(int, fls(delta>>1), 11));
|
||||
}
|
||||
-out:
|
||||
- preempt_enable();
|
||||
}
|
||||
|
||||
void add_input_randomness(unsigned int type, unsigned int code,
|
28
debian/patches/features/all/rt/drivers-serial-call-flush_to_ldisc-when-the-irq-is-t.patch
vendored
Normal file
28
debian/patches/features/all/rt/drivers-serial-call-flush_to_ldisc-when-the-irq-is-t.patch
vendored
Normal file
|
@ -0,0 +1,28 @@
|
|||
From: Ingo Molnar <mingo@elte.hu>
|
||||
Date: Fri, 3 Jul 2009 08:30:01 -0500
|
||||
Subject: serial: 8250: Call flush_to_ldisc when the irq is threaded
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Signed-off-by: Ingo Molnar <mingo@elte.hu>
|
||||
|
||||
---
|
||||
drivers/tty/tty_buffer.c | 4 ++++
|
||||
1 file changed, 4 insertions(+)
|
||||
|
||||
--- a/drivers/tty/tty_buffer.c
|
||||
+++ b/drivers/tty/tty_buffer.c
|
||||
@@ -509,10 +509,14 @@ void tty_flip_buffer_push(struct tty_por
|
||||
|
||||
buf->tail->commit = buf->tail->used;
|
||||
|
||||
+#ifndef CONFIG_PREEMPT_RT_FULL
|
||||
if (port->low_latency)
|
||||
flush_to_ldisc(&buf->work);
|
||||
else
|
||||
schedule_work(&buf->work);
|
||||
+#else
|
||||
+ schedule_work(&buf->work);
|
||||
+#endif
|
||||
}
|
||||
EXPORT_SYMBOL(tty_flip_buffer_push);
|
||||
|
|
@ -0,0 +1,43 @@
|
|||
From: Ingo Molnar <mingo@elte.hu>
|
||||
Date: Fri, 3 Jul 2009 08:30:01 -0500
|
||||
Subject: serial: 8250: Clean up the locking for -rt
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Signed-off-by: Ingo Molnar <mingo@elte.hu>
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
|
||||
---
|
||||
drivers/tty/serial/8250/8250_core.c | 15 +++++----------
|
||||
1 file changed, 5 insertions(+), 10 deletions(-)
|
||||
|
||||
--- a/drivers/tty/serial/8250/8250_core.c
|
||||
+++ b/drivers/tty/serial/8250/8250_core.c
|
||||
@@ -2860,14 +2860,10 @@ serial8250_console_write(struct console
|
||||
|
||||
touch_nmi_watchdog();
|
||||
|
||||
- local_irq_save(flags);
|
||||
- if (port->sysrq) {
|
||||
- /* serial8250_handle_irq() already took the lock */
|
||||
- locked = 0;
|
||||
- } else if (oops_in_progress) {
|
||||
- locked = spin_trylock(&port->lock);
|
||||
- } else
|
||||
- spin_lock(&port->lock);
|
||||
+ if (port->sysrq || oops_in_progress)
|
||||
+ locked = spin_trylock_irqsave(&port->lock, flags);
|
||||
+ else
|
||||
+ spin_lock_irqsave(&port->lock, flags);
|
||||
|
||||
/*
|
||||
* First save the IER then disable the interrupts
|
||||
@@ -2899,8 +2895,7 @@ serial8250_console_write(struct console
|
||||
serial8250_modem_status(up);
|
||||
|
||||
if (locked)
|
||||
- spin_unlock(&port->lock);
|
||||
- local_irq_restore(flags);
|
||||
+ spin_unlock_irqrestore(&port->lock, flags);
|
||||
}
|
||||
|
||||
static int __init serial8250_console_setup(struct console *co, char *options)
|
|
@ -0,0 +1,39 @@
|
|||
Subject: drivers-tty-fix-omap-lock-crap.patch
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 28 Jul 2011 13:32:57 +0200
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
---
|
||||
drivers/tty/serial/omap-serial.c | 12 ++++--------
|
||||
1 file changed, 4 insertions(+), 8 deletions(-)
|
||||
|
||||
--- a/drivers/tty/serial/omap-serial.c
|
||||
+++ b/drivers/tty/serial/omap-serial.c
|
||||
@@ -1232,13 +1232,10 @@ serial_omap_console_write(struct console
|
||||
|
||||
pm_runtime_get_sync(up->dev);
|
||||
|
||||
- local_irq_save(flags);
|
||||
- if (up->port.sysrq)
|
||||
- locked = 0;
|
||||
- else if (oops_in_progress)
|
||||
- locked = spin_trylock(&up->port.lock);
|
||||
+ if (up->port.sysrq || oops_in_progress)
|
||||
+ locked = spin_trylock_irqsave(&up->port.lock, flags);
|
||||
else
|
||||
- spin_lock(&up->port.lock);
|
||||
+ spin_lock_irqsave(&up->port.lock, flags);
|
||||
|
||||
/*
|
||||
* First save the IER then disable the interrupts
|
||||
@@ -1267,8 +1264,7 @@ serial_omap_console_write(struct console
|
||||
pm_runtime_mark_last_busy(up->dev);
|
||||
pm_runtime_put_autosuspend(up->dev);
|
||||
if (locked)
|
||||
- spin_unlock(&up->port.lock);
|
||||
- local_irq_restore(flags);
|
||||
+ spin_unlock_irqrestore(&up->port.lock, flags);
|
||||
}
|
||||
|
||||
static int __init
|
|
@ -0,0 +1,45 @@
|
|||
Subject: drivers-tty-pl011-irq-disable-madness.patch
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Tue, 08 Jan 2013 21:36:51 +0100
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
---
|
||||
drivers/tty/serial/amba-pl011.c | 15 ++++++++++-----
|
||||
1 file changed, 10 insertions(+), 5 deletions(-)
|
||||
|
||||
--- a/drivers/tty/serial/amba-pl011.c
|
||||
+++ b/drivers/tty/serial/amba-pl011.c
|
||||
@@ -1910,13 +1910,19 @@ pl011_console_write(struct console *co,
|
||||
|
||||
clk_enable(uap->clk);
|
||||
|
||||
- local_irq_save(flags);
|
||||
+ /*
|
||||
+ * local_irq_save(flags);
|
||||
+ *
|
||||
+ * This local_irq_save() is nonsense. If we come in via sysrq
|
||||
+ * handling then interrupts are already disabled. Aside of
|
||||
+ * that the port.sysrq check is racy on SMP regardless.
|
||||
+ */
|
||||
if (uap->port.sysrq)
|
||||
locked = 0;
|
||||
else if (oops_in_progress)
|
||||
- locked = spin_trylock(&uap->port.lock);
|
||||
+ locked = spin_trylock_irqsave(&uap->port.lock, flags);
|
||||
else
|
||||
- spin_lock(&uap->port.lock);
|
||||
+ spin_lock_irqsave(&uap->port.lock, flags);
|
||||
|
||||
/*
|
||||
* First save the CR then disable the interrupts
|
||||
@@ -1938,8 +1944,7 @@ pl011_console_write(struct console *co,
|
||||
writew(old_cr, uap->port.membase + UART011_CR);
|
||||
|
||||
if (locked)
|
||||
- spin_unlock(&uap->port.lock);
|
||||
- local_irq_restore(flags);
|
||||
+ spin_unlock_irqrestore(&uap->port.lock, flags);
|
||||
|
||||
clk_disable(uap->clk);
|
||||
}
|
60
debian/patches/features/all/rt/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch
vendored
Normal file
60
debian/patches/features/all/rt/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch
vendored
Normal file
|
@ -0,0 +1,60 @@
|
|||
From d841118ac80c5bfb18f47984bc40687eed08b714 Mon Sep 17 00:00:00 2001
|
||||
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Date: Thu, 25 Apr 2013 18:12:52 +0200
|
||||
Subject: [PATCH] drm/i915: drop trace_i915_gem_ring_dispatch on rt
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
This tracepoint is responsible for:
|
||||
|
||||
|[<814cc358>] __schedule_bug+0x4d/0x59
|
||||
|[<814d24cc>] __schedule+0x88c/0x930
|
||||
|[<814d3b90>] ? _raw_spin_unlock_irqrestore+0x40/0x50
|
||||
|[<814d3b95>] ? _raw_spin_unlock_irqrestore+0x45/0x50
|
||||
|[<810b57b5>] ? task_blocks_on_rt_mutex+0x1f5/0x250
|
||||
|[<814d27d9>] schedule+0x29/0x70
|
||||
|[<814d3423>] rt_spin_lock_slowlock+0x15b/0x278
|
||||
|[<814d3786>] rt_spin_lock+0x26/0x30
|
||||
|[<a00dced9>] gen6_gt_force_wake_get+0x29/0x60 [i915]
|
||||
|[<a00e183f>] gen6_ring_get_irq+0x5f/0x100 [i915]
|
||||
|[<a00b2a33>] ftrace_raw_event_i915_gem_ring_dispatch+0xe3/0x100 [i915]
|
||||
|[<a00ac1b3>] i915_gem_do_execbuffer.isra.13+0xbd3/0x1430 [i915]
|
||||
|[<810f8943>] ? trace_buffer_unlock_commit+0x43/0x60
|
||||
|[<8113e8d2>] ? ftrace_raw_event_kmem_alloc+0xd2/0x180
|
||||
|[<8101d063>] ? native_sched_clock+0x13/0x80
|
||||
|[<a00acf29>] i915_gem_execbuffer2+0x99/0x280 [i915]
|
||||
|[<a00114a3>] drm_ioctl+0x4c3/0x570 [drm]
|
||||
|[<8101d0d9>] ? sched_clock+0x9/0x10
|
||||
|[<a00ace90>] ? i915_gem_execbuffer+0x480/0x480 [i915]
|
||||
|[<810f1c18>] ? rb_commit+0x68/0xa0
|
||||
|[<810f1c6c>] ? ring_buffer_unlock_commit+0x1c/0xa0
|
||||
|[<81197467>] do_vfs_ioctl+0x97/0x540
|
||||
|[<81021318>] ? ftrace_raw_event_sys_enter+0xd8/0x130
|
||||
|[<811979a1>] sys_ioctl+0x91/0xb0
|
||||
|[<814db931>] tracesys+0xe1/0xe6
|
||||
|
||||
Chris Wilson does not like to move i915_trace_irq_get() out of the macro
|
||||
|
||||
|No. This enables the IRQ, as well as making a number of
|
||||
|very expensively serialised read, unconditionally.
|
||||
|
||||
so it is gone now on RT.
|
||||
|
||||
Cc: stable-rt@vger.kernel.org
|
||||
Reported-by: Joakim Hernberg <jbh@alchemy.lu>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/gpu/drm/i915/i915_gem_execbuffer.c | 2 ++
|
||||
1 file changed, 2 insertions(+)
|
||||
|
||||
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
|
||||
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
|
||||
@@ -1129,7 +1129,9 @@ i915_gem_do_execbuffer(struct drm_device
|
||||
goto err;
|
||||
}
|
||||
|
||||
+#ifndef CONFIG_PREEMPT_RT_BASE
|
||||
trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
|
||||
+#endif
|
||||
|
||||
i915_gem_execbuffer_move_to_active(&eb->objects, vm, ring);
|
||||
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
|
78
debian/patches/features/all/rt/drm-remove-preempt_disable-from-drm_calc_vbltimestam.patch
vendored
Normal file
78
debian/patches/features/all/rt/drm-remove-preempt_disable-from-drm_calc_vbltimestam.patch
vendored
Normal file
|
@ -0,0 +1,78 @@
|
|||
From c17c11831778d5ed948858ef4bb32058f5013094 Mon Sep 17 00:00:00 2001
|
||||
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Date: Fri, 11 Oct 2013 17:14:31 +0200
|
||||
Subject: [PATCH] drm: remove preempt_disable() from
|
||||
drm_calc_vbltimestamp_from_scanoutpos()
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Luis captured the following:
|
||||
|
||||
| BUG: sleeping function called from invalid context at kernel/rtmutex.c:659
|
||||
| in_atomic(): 1, irqs_disabled(): 0, pid: 517, name: Xorg
|
||||
| 2 locks held by Xorg/517:
|
||||
| #0:
|
||||
| (
|
||||
| &dev->vbl_lock
|
||||
| ){......}
|
||||
| , at:
|
||||
| [<ffffffffa0024c60>] drm_vblank_get+0x30/0x2b0 [drm]
|
||||
| #1:
|
||||
| (
|
||||
| &dev->vblank_time_lock
|
||||
| ){......}
|
||||
| , at:
|
||||
| [<ffffffffa0024ce1>] drm_vblank_get+0xb1/0x2b0 [drm]
|
||||
| Preemption disabled at:
|
||||
| [<ffffffffa008bc95>] i915_get_vblank_timestamp+0x45/0xa0 [i915]
|
||||
| CPU: 3 PID: 517 Comm: Xorg Not tainted 3.10.10-rt7+ #5
|
||||
| Call Trace:
|
||||
| [<ffffffff8164b790>] dump_stack+0x19/0x1b
|
||||
| [<ffffffff8107e62f>] __might_sleep+0xff/0x170
|
||||
| [<ffffffff81651ac4>] rt_spin_lock+0x24/0x60
|
||||
| [<ffffffffa0084e67>] i915_read32+0x27/0x170 [i915]
|
||||
| [<ffffffffa008a591>] i915_pipe_enabled+0x31/0x40 [i915]
|
||||
| [<ffffffffa008a6be>] i915_get_crtc_scanoutpos+0x3e/0x1b0 [i915]
|
||||
| [<ffffffffa00245d4>] drm_calc_vbltimestamp_from_scanoutpos+0xf4/0x430 [drm]
|
||||
| [<ffffffffa008bc95>] i915_get_vblank_timestamp+0x45/0xa0 [i915]
|
||||
| [<ffffffffa0024998>] drm_get_last_vbltimestamp+0x48/0x70 [drm]
|
||||
| [<ffffffffa0024db5>] drm_vblank_get+0x185/0x2b0 [drm]
|
||||
| [<ffffffffa0025d03>] drm_wait_vblank+0x83/0x5d0 [drm]
|
||||
| [<ffffffffa00212a2>] drm_ioctl+0x552/0x6a0 [drm]
|
||||
| [<ffffffff811a0095>] do_vfs_ioctl+0x325/0x5b0
|
||||
| [<ffffffff811a03a1>] SyS_ioctl+0x81/0xa0
|
||||
| [<ffffffff8165a342>] tracesys+0xdd/0xe2
|
||||
|
||||
After a longer thread it was decided to drop the preempt_disable()/
|
||||
enable() invocations which were meant for -RT and Mario Kleiner looks
|
||||
for a replacement.
|
||||
|
||||
Cc: stable-rt@vger.kernel.org
|
||||
Reported-By: Luis Claudio R. Goncalves <lclaudio@uudg.org>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/gpu/drm/drm_irq.c | 7 -------
|
||||
1 file changed, 7 deletions(-)
|
||||
|
||||
--- a/drivers/gpu/drm/drm_irq.c
|
||||
+++ b/drivers/gpu/drm/drm_irq.c
|
||||
@@ -628,11 +628,6 @@ int drm_calc_vbltimestamp_from_scanoutpo
|
||||
* code gets preempted or delayed for some reason.
|
||||
*/
|
||||
for (i = 0; i < DRM_TIMESTAMP_MAXRETRIES; i++) {
|
||||
- /* Disable preemption to make it very likely to
|
||||
- * succeed in the first iteration even on PREEMPT_RT kernel.
|
||||
- */
|
||||
- preempt_disable();
|
||||
-
|
||||
/* Get system timestamp before query. */
|
||||
stime = ktime_get();
|
||||
|
||||
@@ -644,8 +639,6 @@ int drm_calc_vbltimestamp_from_scanoutpo
|
||||
if (!drm_timestamp_monotonic)
|
||||
mono_time_offset = ktime_get_monotonic_offset();
|
||||
|
||||
- preempt_enable();
|
||||
-
|
||||
/* Return as no-op if scanout query unsupported or failed. */
|
||||
if (!(vbl_status & DRM_SCANOUTPOS_VALID)) {
|
||||
DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n",
|
|
@ -0,0 +1,45 @@
|
|||
Subject: early-printk-consolidate.patch
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Sat, 23 Jul 2011 11:04:08 +0200
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
---
|
||||
arch/sparc/kernel/setup_32.c | 1 +
|
||||
arch/sparc/kernel/setup_64.c | 8 +++++++-
|
||||
2 files changed, 8 insertions(+), 1 deletion(-)
|
||||
|
||||
--- a/arch/sparc/kernel/setup_32.c
|
||||
+++ b/arch/sparc/kernel/setup_32.c
|
||||
@@ -309,6 +309,7 @@ void __init setup_arch(char **cmdline_p)
|
||||
|
||||
boot_flags_init(*cmdline_p);
|
||||
|
||||
+ early_console = &prom_early_console;
|
||||
register_console(&prom_early_console);
|
||||
|
||||
printk("ARCH: ");
|
||||
--- a/arch/sparc/kernel/setup_64.c
|
||||
+++ b/arch/sparc/kernel/setup_64.c
|
||||
@@ -555,6 +555,12 @@ static void __init init_sparc64_elf_hwca
|
||||
pause_patch();
|
||||
}
|
||||
|
||||
+static inline void register_prom_console(void)
|
||||
+{
|
||||
+ early_console = &prom_early_console;
|
||||
+ register_console(&prom_early_console);
|
||||
+}
|
||||
+
|
||||
void __init setup_arch(char **cmdline_p)
|
||||
{
|
||||
/* Initialize PROM console and command line. */
|
||||
@@ -566,7 +572,7 @@ void __init setup_arch(char **cmdline_p)
|
||||
#ifdef CONFIG_EARLYFB
|
||||
if (btext_find_display())
|
||||
#endif
|
||||
- register_console(&prom_early_console);
|
||||
+ register_prom_console();
|
||||
|
||||
if (tlb_type == hypervisor)
|
||||
printk("ARCH: SUN4V\n");
|
|
@ -0,0 +1,27 @@
|
|||
Subject: epoll.patch
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Fri, 08 Jul 2011 16:35:35 +0200
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
---
|
||||
fs/eventpoll.c | 4 ++--
|
||||
1 file changed, 2 insertions(+), 2 deletions(-)
|
||||
|
||||
--- a/fs/eventpoll.c
|
||||
+++ b/fs/eventpoll.c
|
||||
@@ -500,12 +500,12 @@ static int ep_poll_wakeup_proc(void *pri
|
||||
*/
|
||||
static void ep_poll_safewake(wait_queue_head_t *wq)
|
||||
{
|
||||
- int this_cpu = get_cpu();
|
||||
+ int this_cpu = get_cpu_light();
|
||||
|
||||
ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS,
|
||||
ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu);
|
||||
|
||||
- put_cpu();
|
||||
+ put_cpu_light();
|
||||
}
|
||||
|
||||
static void ep_remove_wait_queue(struct eppoll_entry *pwq)
|
|
@ -0,0 +1,23 @@
|
|||
Subject: filemap-fix-up.patch
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Fri, 17 Jun 2011 18:56:24 +0200
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Wrecked-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
|
||||
Link: http://lkml.kernel.org/n/tip-m6yuzd6ul717hlnl2gj6p3ou@git.kernel.org
|
||||
---
|
||||
mm/filemap.c | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
--- a/mm/filemap.c
|
||||
+++ b/mm/filemap.c
|
||||
@@ -1976,7 +1976,7 @@ size_t iov_iter_copy_from_user_atomic(st
|
||||
char *kaddr;
|
||||
size_t copied;
|
||||
|
||||
- BUG_ON(!in_atomic());
|
||||
+ BUG_ON(!pagefault_disabled());
|
||||
kaddr = kmap_atomic(page);
|
||||
if (likely(i->nr_segs == 1)) {
|
||||
int left;
|
|
@ -0,0 +1,113 @@
|
|||
From: Steven Rostedt <rostedt@goodmis.org>
|
||||
Subject: x86: Do not disable preemption in int3 on 32bit
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Preemption must be disabled before enabling interrupts in do_trap
|
||||
on x86_64 because the stack in use for int3 and debug is a per CPU
|
||||
stack set by th IST. But 32bit does not have an IST and the stack
|
||||
still belongs to the current task and there is no problem in scheduling
|
||||
out the task.
|
||||
|
||||
Keep preemption enabled on X86_32 when enabling interrupts for
|
||||
do_trap().
|
||||
|
||||
The name of the function is changed from preempt_conditional_sti/cli()
|
||||
to conditional_sti/cli_ist(), to annotate that this function is used
|
||||
when the stack is on the IST.
|
||||
|
||||
Cc: stable-rt@vger.kernel.org
|
||||
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
|
||||
---
|
||||
arch/x86/kernel/traps.c | 32 +++++++++++++++++++++++---------
|
||||
1 file changed, 23 insertions(+), 9 deletions(-)
|
||||
|
||||
--- a/arch/x86/kernel/traps.c
|
||||
+++ b/arch/x86/kernel/traps.c
|
||||
@@ -86,9 +86,21 @@ static inline void conditional_sti(struc
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
-static inline void preempt_conditional_sti(struct pt_regs *regs)
|
||||
+static inline void conditional_sti_ist(struct pt_regs *regs)
|
||||
{
|
||||
+#ifdef CONFIG_X86_64
|
||||
+ /*
|
||||
+ * X86_64 uses a per CPU stack on the IST for certain traps
|
||||
+ * like int3. The task can not be preempted when using one
|
||||
+ * of these stacks, thus preemption must be disabled, otherwise
|
||||
+ * the stack can be corrupted if the task is scheduled out,
|
||||
+ * and another task comes in and uses this stack.
|
||||
+ *
|
||||
+ * On x86_32 the task keeps its own stack and it is OK if the
|
||||
+ * task schedules out.
|
||||
+ */
|
||||
inc_preempt_count();
|
||||
+#endif
|
||||
if (regs->flags & X86_EFLAGS_IF)
|
||||
local_irq_enable();
|
||||
}
|
||||
@@ -99,11 +111,13 @@ static inline void conditional_cli(struc
|
||||
local_irq_disable();
|
||||
}
|
||||
|
||||
-static inline void preempt_conditional_cli(struct pt_regs *regs)
|
||||
+static inline void conditional_cli_ist(struct pt_regs *regs)
|
||||
{
|
||||
if (regs->flags & X86_EFLAGS_IF)
|
||||
local_irq_disable();
|
||||
+#ifdef CONFIG_X86_64
|
||||
dec_preempt_count();
|
||||
+#endif
|
||||
}
|
||||
|
||||
static int __kprobes
|
||||
@@ -236,9 +250,9 @@ dotraplinkage void do_stack_segment(stru
|
||||
prev_state = exception_enter();
|
||||
if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
|
||||
X86_TRAP_SS, SIGBUS) != NOTIFY_STOP) {
|
||||
- preempt_conditional_sti(regs);
|
||||
+ conditional_sti_ist(regs);
|
||||
do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL);
|
||||
- preempt_conditional_cli(regs);
|
||||
+ conditional_cli_ist(regs);
|
||||
}
|
||||
exception_exit(prev_state);
|
||||
}
|
||||
@@ -347,9 +361,9 @@ dotraplinkage void __kprobes notrace do_
|
||||
* as we may switch to the interrupt stack.
|
||||
*/
|
||||
debug_stack_usage_inc();
|
||||
- preempt_conditional_sti(regs);
|
||||
+ conditional_sti_ist(regs);
|
||||
do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
|
||||
- preempt_conditional_cli(regs);
|
||||
+ conditional_cli_ist(regs);
|
||||
debug_stack_usage_dec();
|
||||
exit:
|
||||
exception_exit(prev_state);
|
||||
@@ -455,12 +469,12 @@ dotraplinkage void __kprobes do_debug(st
|
||||
debug_stack_usage_inc();
|
||||
|
||||
/* It's safe to allow irq's after DR6 has been saved */
|
||||
- preempt_conditional_sti(regs);
|
||||
+ conditional_sti_ist(regs);
|
||||
|
||||
if (regs->flags & X86_VM_MASK) {
|
||||
handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
|
||||
X86_TRAP_DB);
|
||||
- preempt_conditional_cli(regs);
|
||||
+ conditional_cli_ist(regs);
|
||||
debug_stack_usage_dec();
|
||||
goto exit;
|
||||
}
|
||||
@@ -480,7 +494,7 @@ dotraplinkage void __kprobes do_debug(st
|
||||
si_code = get_si_code(tsk->thread.debugreg6);
|
||||
if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
|
||||
send_sigtrap(tsk, regs, error_code, si_code);
|
||||
- preempt_conditional_cli(regs);
|
||||
+ conditional_cli_ist(regs);
|
||||
debug_stack_usage_dec();
|
||||
|
||||
exit:
|
|
@ -0,0 +1,67 @@
|
|||
From 53a9508f5983092928b0e6e12f400b686e1f04b1 Mon Sep 17 00:00:00 2001
|
||||
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Date: Mon, 28 Oct 2013 11:50:06 +0100
|
||||
Subject: [PATCH] a few open coded completions
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/net/wireless/orinoco/orinoco_usb.c | 2 +-
|
||||
drivers/usb/gadget/f_fs.c | 2 +-
|
||||
drivers/usb/gadget/inode.c | 4 ++--
|
||||
include/linux/netdevice.h | 2 +-
|
||||
4 files changed, 5 insertions(+), 5 deletions(-)
|
||||
|
||||
--- a/drivers/net/wireless/orinoco/orinoco_usb.c
|
||||
+++ b/drivers/net/wireless/orinoco/orinoco_usb.c
|
||||
@@ -714,7 +714,7 @@ static void ezusb_req_ctx_wait(struct ez
|
||||
while (!ctx->done.done && msecs--)
|
||||
udelay(1000);
|
||||
} else {
|
||||
- wait_event_interruptible(ctx->done.wait,
|
||||
+ swait_event_interruptible(ctx->done.wait,
|
||||
ctx->done.done);
|
||||
}
|
||||
break;
|
||||
--- a/drivers/usb/gadget/f_fs.c
|
||||
+++ b/drivers/usb/gadget/f_fs.c
|
||||
@@ -1282,7 +1282,7 @@ static void ffs_data_put(struct ffs_data
|
||||
pr_info("%s(): freeing\n", __func__);
|
||||
ffs_data_clear(ffs);
|
||||
BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
|
||||
- waitqueue_active(&ffs->ep0req_completion.wait));
|
||||
+ swaitqueue_active(&ffs->ep0req_completion.wait));
|
||||
kfree(ffs->dev_name);
|
||||
kfree(ffs);
|
||||
}
|
||||
--- a/drivers/usb/gadget/inode.c
|
||||
+++ b/drivers/usb/gadget/inode.c
|
||||
@@ -340,7 +340,7 @@ ep_io (struct ep_data *epdata, void *buf
|
||||
spin_unlock_irq (&epdata->dev->lock);
|
||||
|
||||
if (likely (value == 0)) {
|
||||
- value = wait_event_interruptible (done.wait, done.done);
|
||||
+ value = swait_event_interruptible (done.wait, done.done);
|
||||
if (value != 0) {
|
||||
spin_lock_irq (&epdata->dev->lock);
|
||||
if (likely (epdata->ep != NULL)) {
|
||||
@@ -349,7 +349,7 @@ ep_io (struct ep_data *epdata, void *buf
|
||||
usb_ep_dequeue (epdata->ep, epdata->req);
|
||||
spin_unlock_irq (&epdata->dev->lock);
|
||||
|
||||
- wait_event (done.wait, done.done);
|
||||
+ swait_event (done.wait, done.done);
|
||||
if (epdata->status == -ECONNRESET)
|
||||
epdata->status = -EINTR;
|
||||
} else {
|
||||
--- a/include/linux/netdevice.h
|
||||
+++ b/include/linux/netdevice.h
|
||||
@@ -1214,7 +1214,7 @@ struct net_device {
|
||||
unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
|
||||
unsigned char addr_assign_type; /* hw address assignment type */
|
||||
unsigned char addr_len; /* hardware address length */
|
||||
- unsigned char neigh_priv_len;
|
||||
+ unsigned short neigh_priv_len;
|
||||
unsigned short dev_id; /* Used to differentiate devices
|
||||
* that share the same link
|
||||
* layer address
|
|
@ -0,0 +1,21 @@
|
|||
Subject: fs-block-rt-support.patch
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Tue, 14 Jun 2011 17:05:09 +0200
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
---
|
||||
block/blk-core.c | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
--- a/block/blk-core.c
|
||||
+++ b/block/blk-core.c
|
||||
@@ -232,7 +232,7 @@ EXPORT_SYMBOL(blk_delay_queue);
|
||||
**/
|
||||
void blk_start_queue(struct request_queue *q)
|
||||
{
|
||||
- WARN_ON(!irqs_disabled());
|
||||
+ WARN_ON_NONRT(!irqs_disabled());
|
||||
|
||||
queue_flag_clear(QUEUE_FLAG_STOPPED, q);
|
||||
__blk_run_queue(q);
|
86
debian/patches/features/all/rt/fs-dcache-use-cpu-chill-in-trylock-loops.patch
vendored
Normal file
86
debian/patches/features/all/rt/fs-dcache-use-cpu-chill-in-trylock-loops.patch
vendored
Normal file
|
@ -0,0 +1,86 @@
|
|||
Subject: fs: dcache: Use cpu_chill() in trylock loops
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Wed, 07 Mar 2012 21:00:34 +0100
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Retry loops on RT might loop forever when the modifying side was
|
||||
preempted. Use cpu_chill() instead of cpu_relax() to let the system
|
||||
make progress.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Cc: stable-rt@vger.kernel.org
|
||||
---
|
||||
fs/autofs4/autofs_i.h | 1 +
|
||||
fs/autofs4/expire.c | 2 +-
|
||||
fs/dcache.c | 5 +++--
|
||||
fs/namespace.c | 3 ++-
|
||||
4 files changed, 7 insertions(+), 4 deletions(-)
|
||||
|
||||
--- a/fs/autofs4/autofs_i.h
|
||||
+++ b/fs/autofs4/autofs_i.h
|
||||
@@ -34,6 +34,7 @@
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mount.h>
|
||||
#include <linux/namei.h>
|
||||
+#include <linux/delay.h>
|
||||
#include <asm/current.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
--- a/fs/autofs4/expire.c
|
||||
+++ b/fs/autofs4/expire.c
|
||||
@@ -157,7 +157,7 @@ static struct dentry *get_next_positive_
|
||||
parent = p->d_parent;
|
||||
if (!spin_trylock(&parent->d_lock)) {
|
||||
spin_unlock(&p->d_lock);
|
||||
- cpu_relax();
|
||||
+ cpu_chill();
|
||||
goto relock;
|
||||
}
|
||||
spin_unlock(&p->d_lock);
|
||||
--- a/fs/dcache.c
|
||||
+++ b/fs/dcache.c
|
||||
@@ -19,6 +19,7 @@
|
||||
#include <linux/mm.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/fsnotify.h>
|
||||
+#include <linux/delay.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/hash.h>
|
||||
@@ -554,7 +555,7 @@ dentry_kill(struct dentry *dentry, int u
|
||||
relock:
|
||||
if (unlock_on_failure) {
|
||||
spin_unlock(&dentry->d_lock);
|
||||
- cpu_relax();
|
||||
+ cpu_chill();
|
||||
}
|
||||
return dentry; /* try again with same dentry */
|
||||
}
|
||||
@@ -2391,7 +2392,7 @@ void d_delete(struct dentry * dentry)
|
||||
if (dentry->d_lockref.count == 1) {
|
||||
if (!spin_trylock(&inode->i_lock)) {
|
||||
spin_unlock(&dentry->d_lock);
|
||||
- cpu_relax();
|
||||
+ cpu_chill();
|
||||
goto again;
|
||||
}
|
||||
dentry->d_flags &= ~DCACHE_CANT_MOUNT;
|
||||
--- a/fs/namespace.c
|
||||
+++ b/fs/namespace.c
|
||||
@@ -23,6 +23,7 @@
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/proc_ns.h>
|
||||
#include <linux/magic.h>
|
||||
+#include <linux/delay.h>
|
||||
#include "pnode.h"
|
||||
#include "internal.h"
|
||||
|
||||
@@ -317,7 +318,7 @@ int __mnt_want_write(struct vfsmount *m)
|
||||
smp_mb();
|
||||
while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) {
|
||||
preempt_enable();
|
||||
- cpu_relax();
|
||||
+ cpu_chill();
|
||||
preempt_disable();
|
||||
}
|
||||
/*
|
30
debian/patches/features/all/rt/fs-jbd-pull-plug-when-waiting-for-space.patch
vendored
Normal file
30
debian/patches/features/all/rt/fs-jbd-pull-plug-when-waiting-for-space.patch
vendored
Normal file
|
@ -0,0 +1,30 @@
|
|||
From: Mike Galbraith <mgalbraith@suse.de>
|
||||
Date: Wed, 11 Jul 2012 22:05:20 +0000
|
||||
Subject: fs, jbd: pull your plug when waiting for space
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
With an -rt kernel, and a heavy sync IO load, tasks can jam
|
||||
up on journal locks without unplugging, which can lead to
|
||||
terminal IO starvation. Unplug and schedule when waiting for space.
|
||||
|
||||
Signed-off-by: Mike Galbraith <mgalbraith@suse.de>
|
||||
Cc: Steven Rostedt <rostedt@goodmis.org>
|
||||
Cc: Theodore Tso <tytso@mit.edu>
|
||||
Link: http://lkml.kernel.org/r/1341812414.7370.73.camel@marge.simpson.net
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
|
||||
---
|
||||
fs/jbd/checkpoint.c | 2 ++
|
||||
1 file changed, 2 insertions(+)
|
||||
|
||||
--- a/fs/jbd/checkpoint.c
|
||||
+++ b/fs/jbd/checkpoint.c
|
||||
@@ -129,6 +129,8 @@ void __log_wait_for_space(journal_t *jou
|
||||
if (journal->j_flags & JFS_ABORT)
|
||||
return;
|
||||
spin_unlock(&journal->j_state_lock);
|
||||
+ if (current->plug)
|
||||
+ io_schedule();
|
||||
mutex_lock(&journal->j_checkpoint_mutex);
|
||||
|
||||
/*
|
|
@ -0,0 +1,101 @@
|
|||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Fri, 18 Mar 2011 10:11:25 +0100
|
||||
Subject: fs: jbd/jbd2: Make state lock and journal head lock rt safe
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
bit_spin_locks break under RT.
|
||||
|
||||
Based on a previous patch from Steven Rostedt <rostedt@goodmis.org>
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
|
||||
--
|
||||
|
||||
include/linux/buffer_head.h | 10 ++++++++++
|
||||
include/linux/jbd_common.h | 24 ++++++++++++++++++++++++
|
||||
2 files changed, 34 insertions(+)
|
||||
|
||||
--- a/include/linux/buffer_head.h
|
||||
+++ b/include/linux/buffer_head.h
|
||||
@@ -77,6 +77,11 @@ struct buffer_head {
|
||||
atomic_t b_count; /* users using this buffer_head */
|
||||
#ifdef CONFIG_PREEMPT_RT_BASE
|
||||
spinlock_t b_uptodate_lock;
|
||||
+#if defined(CONFIG_JBD) || defined(CONFIG_JBD_MODULE) || \
|
||||
+ defined(CONFIG_JBD2) || defined(CONFIG_JBD2_MODULE)
|
||||
+ spinlock_t b_state_lock;
|
||||
+ spinlock_t b_journal_head_lock;
|
||||
+#endif
|
||||
#endif
|
||||
};
|
||||
|
||||
@@ -108,6 +113,11 @@ static inline void buffer_head_init_lock
|
||||
{
|
||||
#ifdef CONFIG_PREEMPT_RT_BASE
|
||||
spin_lock_init(&bh->b_uptodate_lock);
|
||||
+#if defined(CONFIG_JBD) || defined(CONFIG_JBD_MODULE) || \
|
||||
+ defined(CONFIG_JBD2) || defined(CONFIG_JBD2_MODULE)
|
||||
+ spin_lock_init(&bh->b_state_lock);
|
||||
+ spin_lock_init(&bh->b_journal_head_lock);
|
||||
+#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
--- a/include/linux/jbd_common.h
|
||||
+++ b/include/linux/jbd_common.h
|
||||
@@ -15,32 +15,56 @@ static inline struct journal_head *bh2jh
|
||||
|
||||
static inline void jbd_lock_bh_state(struct buffer_head *bh)
|
||||
{
|
||||
+#ifndef CONFIG_PREEMPT_RT_BASE
|
||||
bit_spin_lock(BH_State, &bh->b_state);
|
||||
+#else
|
||||
+ spin_lock(&bh->b_state_lock);
|
||||
+#endif
|
||||
}
|
||||
|
||||
static inline int jbd_trylock_bh_state(struct buffer_head *bh)
|
||||
{
|
||||
+#ifndef CONFIG_PREEMPT_RT_BASE
|
||||
return bit_spin_trylock(BH_State, &bh->b_state);
|
||||
+#else
|
||||
+ return spin_trylock(&bh->b_state_lock);
|
||||
+#endif
|
||||
}
|
||||
|
||||
static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
|
||||
{
|
||||
+#ifndef CONFIG_PREEMPT_RT_BASE
|
||||
return bit_spin_is_locked(BH_State, &bh->b_state);
|
||||
+#else
|
||||
+ return spin_is_locked(&bh->b_state_lock);
|
||||
+#endif
|
||||
}
|
||||
|
||||
static inline void jbd_unlock_bh_state(struct buffer_head *bh)
|
||||
{
|
||||
+#ifndef CONFIG_PREEMPT_RT_BASE
|
||||
bit_spin_unlock(BH_State, &bh->b_state);
|
||||
+#else
|
||||
+ spin_unlock(&bh->b_state_lock);
|
||||
+#endif
|
||||
}
|
||||
|
||||
static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
|
||||
{
|
||||
+#ifndef CONFIG_PREEMPT_RT_BASE
|
||||
bit_spin_lock(BH_JournalHead, &bh->b_state);
|
||||
+#else
|
||||
+ spin_lock(&bh->b_journal_head_lock);
|
||||
+#endif
|
||||
}
|
||||
|
||||
static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
|
||||
{
|
||||
+#ifndef CONFIG_PREEMPT_RT_BASE
|
||||
bit_spin_unlock(BH_JournalHead, &bh->b_state);
|
||||
+#else
|
||||
+ spin_unlock(&bh->b_journal_head_lock);
|
||||
+#endif
|
||||
}
|
||||
|
||||
#endif
|
|
@ -0,0 +1,31 @@
|
|||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Sun, 19 Jul 2009 08:44:27 -0500
|
||||
Subject: fs: namespace preemption fix
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
On RT we cannot loop with preemption disabled here as
|
||||
mnt_make_readonly() might have been preempted. We can safely enable
|
||||
preemption while waiting for MNT_WRITE_HOLD to be cleared. Safe on !RT
|
||||
as well.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
|
||||
---
|
||||
fs/namespace.c | 5 ++++-
|
||||
1 file changed, 4 insertions(+), 1 deletion(-)
|
||||
|
||||
--- a/fs/namespace.c
|
||||
+++ b/fs/namespace.c
|
||||
@@ -315,8 +315,11 @@ int __mnt_want_write(struct vfsmount *m)
|
||||
* incremented count after it has set MNT_WRITE_HOLD.
|
||||
*/
|
||||
smp_mb();
|
||||
- while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD)
|
||||
+ while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) {
|
||||
+ preempt_enable();
|
||||
cpu_relax();
|
||||
+ preempt_disable();
|
||||
+ }
|
||||
/*
|
||||
* After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
|
||||
* be set to match its requirements. So we must not load that until
|
|
@ -0,0 +1,60 @@
|
|||
From: Mike Galbraith <efault@gmx.de>
|
||||
Date: Fri, 3 Jul 2009 08:44:12 -0500
|
||||
Subject: fs: ntfs: disable interrupt only on !RT
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
On Sat, 2007-10-27 at 11:44 +0200, Ingo Molnar wrote:
|
||||
> * Nick Piggin <nickpiggin@yahoo.com.au> wrote:
|
||||
>
|
||||
> > > [10138.175796] [<c0105de3>] show_trace+0x12/0x14
|
||||
> > > [10138.180291] [<c0105dfb>] dump_stack+0x16/0x18
|
||||
> > > [10138.184769] [<c011609f>] native_smp_call_function_mask+0x138/0x13d
|
||||
> > > [10138.191117] [<c0117606>] smp_call_function+0x1e/0x24
|
||||
> > > [10138.196210] [<c012f85c>] on_each_cpu+0x25/0x50
|
||||
> > > [10138.200807] [<c0115c74>] flush_tlb_all+0x1e/0x20
|
||||
> > > [10138.205553] [<c016caaf>] kmap_high+0x1b6/0x417
|
||||
> > > [10138.210118] [<c011ec88>] kmap+0x4d/0x4f
|
||||
> > > [10138.214102] [<c026a9d8>] ntfs_end_buffer_async_read+0x228/0x2f9
|
||||
> > > [10138.220163] [<c01a0e9e>] end_bio_bh_io_sync+0x26/0x3f
|
||||
> > > [10138.225352] [<c01a2b09>] bio_endio+0x42/0x6d
|
||||
> > > [10138.229769] [<c02c2a08>] __end_that_request_first+0x115/0x4ac
|
||||
> > > [10138.235682] [<c02c2da7>] end_that_request_chunk+0x8/0xa
|
||||
> > > [10138.241052] [<c0365943>] ide_end_request+0x55/0x10a
|
||||
> > > [10138.246058] [<c036dae3>] ide_dma_intr+0x6f/0xac
|
||||
> > > [10138.250727] [<c0366d83>] ide_intr+0x93/0x1e0
|
||||
> > > [10138.255125] [<c015afb4>] handle_IRQ_event+0x5c/0xc9
|
||||
> >
|
||||
> > Looks like ntfs is kmap()ing from interrupt context. Should be using
|
||||
> > kmap_atomic instead, I think.
|
||||
>
|
||||
> it's not atomic interrupt context but irq thread context - and -rt
|
||||
> remaps kmap_atomic() to kmap() internally.
|
||||
|
||||
Hm. Looking at the change to mm/bounce.c, perhaps I should do this
|
||||
instead?
|
||||
|
||||
Signed-off-by: Ingo Molnar <mingo@elte.hu>
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
|
||||
---
|
||||
fs/ntfs/aops.c | 4 ++--
|
||||
1 file changed, 2 insertions(+), 2 deletions(-)
|
||||
|
||||
--- a/fs/ntfs/aops.c
|
||||
+++ b/fs/ntfs/aops.c
|
||||
@@ -144,13 +144,13 @@ static void ntfs_end_buffer_async_read(s
|
||||
recs = PAGE_CACHE_SIZE / rec_size;
|
||||
/* Should have been verified before we got here... */
|
||||
BUG_ON(!recs);
|
||||
- local_irq_save(flags);
|
||||
+ local_irq_save_nort(flags);
|
||||
kaddr = kmap_atomic(page);
|
||||
for (i = 0; i < recs; i++)
|
||||
post_read_mst_fixup((NTFS_RECORD*)(kaddr +
|
||||
i * rec_size), rec_size);
|
||||
kunmap_atomic(kaddr);
|
||||
- local_irq_restore(flags);
|
||||
+ local_irq_restore_nort(flags);
|
||||
flush_dcache_page(page);
|
||||
if (likely(page_uptodate && !PageError(page)))
|
||||
SetPageUptodate(page);
|
|
@ -0,0 +1,162 @@
|
|||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Fri, 18 Mar 2011 09:18:52 +0100
|
||||
Subject: buffer_head: Replace bh_uptodate_lock for -rt
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Wrap the bit_spin_lock calls into a separate inline and add the RT
|
||||
replacements with a real spinlock.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
---
|
||||
fs/buffer.c | 21 +++++++--------------
|
||||
fs/ntfs/aops.c | 10 +++-------
|
||||
include/linux/buffer_head.h | 34 ++++++++++++++++++++++++++++++++++
|
||||
3 files changed, 44 insertions(+), 21 deletions(-)
|
||||
|
||||
--- a/fs/buffer.c
|
||||
+++ b/fs/buffer.c
|
||||
@@ -322,8 +322,7 @@ static void end_buffer_async_read(struct
|
||||
* decide that the page is now completely done.
|
||||
*/
|
||||
first = page_buffers(page);
|
||||
- local_irq_save(flags);
|
||||
- bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
|
||||
+ flags = bh_uptodate_lock_irqsave(first);
|
||||
clear_buffer_async_read(bh);
|
||||
unlock_buffer(bh);
|
||||
tmp = bh;
|
||||
@@ -336,8 +335,7 @@ static void end_buffer_async_read(struct
|
||||
}
|
||||
tmp = tmp->b_this_page;
|
||||
} while (tmp != bh);
|
||||
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
|
||||
- local_irq_restore(flags);
|
||||
+ bh_uptodate_unlock_irqrestore(first, flags);
|
||||
|
||||
/*
|
||||
* If none of the buffers had errors and they are all
|
||||
@@ -349,9 +347,7 @@ static void end_buffer_async_read(struct
|
||||
return;
|
||||
|
||||
still_busy:
|
||||
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
|
||||
- local_irq_restore(flags);
|
||||
- return;
|
||||
+ bh_uptodate_unlock_irqrestore(first, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -385,8 +381,7 @@ void end_buffer_async_write(struct buffe
|
||||
}
|
||||
|
||||
first = page_buffers(page);
|
||||
- local_irq_save(flags);
|
||||
- bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
|
||||
+ flags = bh_uptodate_lock_irqsave(first);
|
||||
|
||||
clear_buffer_async_write(bh);
|
||||
unlock_buffer(bh);
|
||||
@@ -398,15 +393,12 @@ void end_buffer_async_write(struct buffe
|
||||
}
|
||||
tmp = tmp->b_this_page;
|
||||
}
|
||||
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
|
||||
- local_irq_restore(flags);
|
||||
+ bh_uptodate_unlock_irqrestore(first, flags);
|
||||
end_page_writeback(page);
|
||||
return;
|
||||
|
||||
still_busy:
|
||||
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
|
||||
- local_irq_restore(flags);
|
||||
- return;
|
||||
+ bh_uptodate_unlock_irqrestore(first, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(end_buffer_async_write);
|
||||
|
||||
@@ -3334,6 +3326,7 @@ struct buffer_head *alloc_buffer_head(gf
|
||||
struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
|
||||
if (ret) {
|
||||
INIT_LIST_HEAD(&ret->b_assoc_buffers);
|
||||
+ buffer_head_init_locks(ret);
|
||||
preempt_disable();
|
||||
__this_cpu_inc(bh_accounting.nr);
|
||||
recalc_bh_state();
|
||||
--- a/fs/ntfs/aops.c
|
||||
+++ b/fs/ntfs/aops.c
|
||||
@@ -108,8 +108,7 @@ static void ntfs_end_buffer_async_read(s
|
||||
"0x%llx.", (unsigned long long)bh->b_blocknr);
|
||||
}
|
||||
first = page_buffers(page);
|
||||
- local_irq_save(flags);
|
||||
- bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
|
||||
+ flags = bh_uptodate_lock_irqsave(first);
|
||||
clear_buffer_async_read(bh);
|
||||
unlock_buffer(bh);
|
||||
tmp = bh;
|
||||
@@ -124,8 +123,7 @@ static void ntfs_end_buffer_async_read(s
|
||||
}
|
||||
tmp = tmp->b_this_page;
|
||||
} while (tmp != bh);
|
||||
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
|
||||
- local_irq_restore(flags);
|
||||
+ bh_uptodate_unlock_irqrestore(first, flags);
|
||||
/*
|
||||
* If none of the buffers had errors then we can set the page uptodate,
|
||||
* but we first have to perform the post read mst fixups, if the
|
||||
@@ -160,9 +158,7 @@ static void ntfs_end_buffer_async_read(s
|
||||
unlock_page(page);
|
||||
return;
|
||||
still_busy:
|
||||
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
|
||||
- local_irq_restore(flags);
|
||||
- return;
|
||||
+ bh_uptodate_unlock_irqrestore(first, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
--- a/include/linux/buffer_head.h
|
||||
+++ b/include/linux/buffer_head.h
|
||||
@@ -75,8 +75,42 @@ struct buffer_head {
|
||||
struct address_space *b_assoc_map; /* mapping this buffer is
|
||||
associated with */
|
||||
atomic_t b_count; /* users using this buffer_head */
|
||||
+#ifdef CONFIG_PREEMPT_RT_BASE
|
||||
+ spinlock_t b_uptodate_lock;
|
||||
+#endif
|
||||
};
|
||||
|
||||
+static inline unsigned long bh_uptodate_lock_irqsave(struct buffer_head *bh)
|
||||
+{
|
||||
+ unsigned long flags;
|
||||
+
|
||||
+#ifndef CONFIG_PREEMPT_RT_BASE
|
||||
+ local_irq_save(flags);
|
||||
+ bit_spin_lock(BH_Uptodate_Lock, &bh->b_state);
|
||||
+#else
|
||||
+ spin_lock_irqsave(&bh->b_uptodate_lock, flags);
|
||||
+#endif
|
||||
+ return flags;
|
||||
+}
|
||||
+
|
||||
+static inline void
|
||||
+bh_uptodate_unlock_irqrestore(struct buffer_head *bh, unsigned long flags)
|
||||
+{
|
||||
+#ifndef CONFIG_PREEMPT_RT_BASE
|
||||
+ bit_spin_unlock(BH_Uptodate_Lock, &bh->b_state);
|
||||
+ local_irq_restore(flags);
|
||||
+#else
|
||||
+ spin_unlock_irqrestore(&bh->b_uptodate_lock, flags);
|
||||
+#endif
|
||||
+}
|
||||
+
|
||||
+static inline void buffer_head_init_locks(struct buffer_head *bh)
|
||||
+{
|
||||
+#ifdef CONFIG_PREEMPT_RT_BASE
|
||||
+ spin_lock_init(&bh->b_uptodate_lock);
|
||||
+#endif
|
||||
+}
|
||||
+
|
||||
/*
|
||||
* macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
|
||||
* and buffer_foo() functions.
|
|
@ -0,0 +1,82 @@
|
|||
Subject: ftrace-migrate-disable-tracing.patch
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Sun, 17 Jul 2011 21:56:42 +0200
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
---
|
||||
include/linux/ftrace_event.h | 2 ++
|
||||
kernel/trace/trace.c | 11 +++++++----
|
||||
kernel/trace/trace_events.c | 1 +
|
||||
kernel/trace/trace_output.c | 5 +++++
|
||||
4 files changed, 15 insertions(+), 4 deletions(-)
|
||||
|
||||
--- a/include/linux/ftrace_event.h
|
||||
+++ b/include/linux/ftrace_event.h
|
||||
@@ -56,6 +56,8 @@ struct trace_entry {
|
||||
unsigned char flags;
|
||||
unsigned char preempt_count;
|
||||
int pid;
|
||||
+ unsigned short migrate_disable;
|
||||
+ unsigned short padding;
|
||||
};
|
||||
|
||||
#define FTRACE_MAX_EVENT \
|
||||
--- a/kernel/trace/trace.c
|
||||
+++ b/kernel/trace/trace.c
|
||||
@@ -439,7 +439,7 @@ int __trace_puts(unsigned long ip, const
|
||||
|
||||
local_save_flags(irq_flags);
|
||||
buffer = global_trace.trace_buffer.buffer;
|
||||
- event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
|
||||
+ event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
|
||||
irq_flags, preempt_count());
|
||||
if (!event)
|
||||
return 0;
|
||||
@@ -1510,6 +1510,8 @@ tracing_generic_entry_update(struct trac
|
||||
((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
|
||||
((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
|
||||
(need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
|
||||
+
|
||||
+ entry->migrate_disable = (tsk) ? tsk->migrate_disable & 0xFF : 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
|
||||
|
||||
@@ -2404,9 +2406,10 @@ static void print_lat_help_header(struct
|
||||
seq_puts(m, "# | / _----=> need-resched \n");
|
||||
seq_puts(m, "# || / _---=> hardirq/softirq \n");
|
||||
seq_puts(m, "# ||| / _--=> preempt-depth \n");
|
||||
- seq_puts(m, "# |||| / delay \n");
|
||||
- seq_puts(m, "# cmd pid ||||| time | caller \n");
|
||||
- seq_puts(m, "# \\ / ||||| \\ | / \n");
|
||||
+ seq_puts(m, "# |||| / _--=> migrate-disable\n");
|
||||
+ seq_puts(m, "# ||||| / delay \n");
|
||||
+ seq_puts(m, "# cmd pid |||||| time | caller \n");
|
||||
+ seq_puts(m, "# \\ / ||||| \\ | / \n");
|
||||
}
|
||||
|
||||
static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
|
||||
--- a/kernel/trace/trace_events.c
|
||||
+++ b/kernel/trace/trace_events.c
|
||||
@@ -166,6 +166,7 @@ static int trace_define_common_fields(vo
|
||||
__common_field(unsigned char, flags);
|
||||
__common_field(unsigned char, preempt_count);
|
||||
__common_field(int, pid);
|
||||
+ __common_field(unsigned short, migrate_disable);
|
||||
|
||||
return ret;
|
||||
}
|
||||
--- a/kernel/trace/trace_output.c
|
||||
+++ b/kernel/trace/trace_output.c
|
||||
@@ -635,6 +635,11 @@ int trace_print_lat_fmt(struct trace_seq
|
||||
else
|
||||
ret = trace_seq_putc(s, '.');
|
||||
|
||||
+ if (entry->migrate_disable)
|
||||
+ ret = trace_seq_printf(s, "%x", entry->migrate_disable);
|
||||
+ else
|
||||
+ ret = trace_seq_putc(s, '.');
|
||||
+
|
||||
return ret;
|
||||
}
|
||||
|
|
@ -0,0 +1,115 @@
|
|||
From: Steven Rostedt <rostedt@goodmis.org>
|
||||
Subject: futex: Fix bug on when a requeued RT task times out
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Requeue with timeout causes a bug with PREEMPT_RT_FULL.
|
||||
|
||||
The bug comes from a timed out condition.
|
||||
|
||||
|
||||
TASK 1 TASK 2
|
||||
------ ------
|
||||
futex_wait_requeue_pi()
|
||||
futex_wait_queue_me()
|
||||
<timed out>
|
||||
|
||||
double_lock_hb();
|
||||
|
||||
raw_spin_lock(pi_lock);
|
||||
if (current->pi_blocked_on) {
|
||||
} else {
|
||||
current->pi_blocked_on = PI_WAKE_INPROGRESS;
|
||||
run_spin_unlock(pi_lock);
|
||||
spin_lock(hb->lock); <-- blocked!
|
||||
|
||||
|
||||
plist_for_each_entry_safe(this) {
|
||||
rt_mutex_start_proxy_lock();
|
||||
task_blocks_on_rt_mutex();
|
||||
BUG_ON(task->pi_blocked_on)!!!!
|
||||
|
||||
The BUG_ON() actually has a check for PI_WAKE_INPROGRESS, but the
|
||||
problem is that, after TASK 1 sets PI_WAKE_INPROGRESS, it then tries to
|
||||
grab the hb->lock, which it fails to do so. As the hb->lock is a mutex,
|
||||
it will block and set the "pi_blocked_on" to the hb->lock.
|
||||
|
||||
When TASK 2 goes to requeue it, the check for PI_WAKE_INPROGESS fails
|
||||
because the task1's pi_blocked_on is no longer set to that, but instead,
|
||||
set to the hb->lock.
|
||||
|
||||
The fix:
|
||||
|
||||
When calling rt_mutex_start_proxy_lock() a check is made to see
|
||||
if the proxy tasks pi_blocked_on is set. If so, exit out early.
|
||||
Otherwise set it to a new flag PI_REQUEUE_INPROGRESS, which notifies
|
||||
the proxy task that it is being requeued, and will handle things
|
||||
appropriately.
|
||||
|
||||
Cc: stable-rt@vger.kernel.org
|
||||
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
|
||||
|
||||
---
|
||||
kernel/rtmutex.c | 32 +++++++++++++++++++++++++++++++-
|
||||
kernel/rtmutex_common.h | 1 +
|
||||
2 files changed, 32 insertions(+), 1 deletion(-)
|
||||
|
||||
--- a/kernel/rtmutex.c
|
||||
+++ b/kernel/rtmutex.c
|
||||
@@ -70,7 +70,8 @@ static void fixup_rt_mutex_waiters(struc
|
||||
|
||||
static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter)
|
||||
{
|
||||
- return waiter && waiter != PI_WAKEUP_INPROGRESS;
|
||||
+ return waiter && waiter != PI_WAKEUP_INPROGRESS &&
|
||||
+ waiter != PI_REQUEUE_INPROGRESS;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1007,6 +1008,35 @@ int rt_mutex_start_proxy_lock(struct rt_
|
||||
return 1;
|
||||
}
|
||||
|
||||
+#ifdef CONFIG_PREEMPT_RT_FULL
|
||||
+ /*
|
||||
+ * In PREEMPT_RT there's an added race.
|
||||
+ * If the task, that we are about to requeue, times out,
|
||||
+ * it can set the PI_WAKEUP_INPROGRESS. This tells the requeue
|
||||
+ * to skip this task. But right after the task sets
|
||||
+ * its pi_blocked_on to PI_WAKEUP_INPROGRESS it can then
|
||||
+ * block on the spin_lock(&hb->lock), which in RT is an rtmutex.
|
||||
+ * This will replace the PI_WAKEUP_INPROGRESS with the actual
|
||||
+ * lock that it blocks on. We *must not* place this task
|
||||
+ * on this proxy lock in that case.
|
||||
+ *
|
||||
+ * To prevent this race, we first take the task's pi_lock
|
||||
+ * and check if it has updated its pi_blocked_on. If it has,
|
||||
+ * we assume that it woke up and we return -EAGAIN.
|
||||
+ * Otherwise, we set the task's pi_blocked_on to
|
||||
+ * PI_REQUEUE_INPROGRESS, so that if the task is waking up
|
||||
+ * it will know that we are in the process of requeuing it.
|
||||
+ */
|
||||
+ raw_spin_lock_irq(&task->pi_lock);
|
||||
+ if (task->pi_blocked_on) {
|
||||
+ raw_spin_unlock_irq(&task->pi_lock);
|
||||
+ raw_spin_unlock(&lock->wait_lock);
|
||||
+ return -EAGAIN;
|
||||
+ }
|
||||
+ task->pi_blocked_on = PI_REQUEUE_INPROGRESS;
|
||||
+ raw_spin_unlock_irq(&task->pi_lock);
|
||||
+#endif
|
||||
+
|
||||
ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);
|
||||
|
||||
if (ret && !rt_mutex_owner(lock)) {
|
||||
--- a/kernel/rtmutex_common.h
|
||||
+++ b/kernel/rtmutex_common.h
|
||||
@@ -104,6 +104,7 @@ static inline struct task_struct *rt_mut
|
||||
* PI-futex support (proxy locking functions, etc.):
|
||||
*/
|
||||
#define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1)
|
||||
+#define PI_REQUEUE_INPROGRESS ((struct rt_mutex_waiter *) 2)
|
||||
|
||||
extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
|
||||
extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
|
39
debian/patches/features/all/rt/genirq-Set-the-irq-thread-policy-without-checking-CA.patch
vendored
Normal file
39
debian/patches/features/all/rt/genirq-Set-the-irq-thread-policy-without-checking-CA.patch
vendored
Normal file
|
@ -0,0 +1,39 @@
|
|||
From 7f095a71d6bc49d7c33ed33ebc26daf4867ee4c8 Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Pfaff <tpfaff@pcs.com>
|
||||
Date: Fri, 11 Oct 2013 12:42:49 +0200
|
||||
Subject: [PATCH] genirq: Set the irq thread policy without checking
|
||||
CAP_SYS_NICE
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
In commit ee23871389 ("genirq: Set irq thread to RT priority on
|
||||
creation") we moved the assigment of the thread's priority from the
|
||||
thread's function into __setup_irq(). That function may run in user
|
||||
context for instance if the user opens an UART node and then driver
|
||||
calls requests in the ->open() callback. That user may not have
|
||||
CAP_SYS_NICE and so the irq thread won't run with the SCHED_OTHER
|
||||
policy.
|
||||
|
||||
This patch uses sched_setscheduler_nocheck() so we omit the CAP_SYS_NICE
|
||||
check which is otherwise required for the SCHED_OTHER policy.
|
||||
|
||||
Cc: Ivo Sieben <meltedpianoman@gmail.com>
|
||||
Cc: stable@vger.kernel.org
|
||||
Cc: stable-rt@vger.kernel.org
|
||||
Signed-off-by: Thomas Pfaff <tpfaff@pcs.com>
|
||||
[bigeasy: rewrite the changelog]
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
kernel/irq/manage.c | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
--- a/kernel/irq/manage.c
|
||||
+++ b/kernel/irq/manage.c
|
||||
@@ -956,7 +956,7 @@ static int
|
||||
goto out_mput;
|
||||
}
|
||||
|
||||
- sched_setscheduler(t, SCHED_FIFO, ¶m);
|
||||
+ sched_setscheduler_nocheck(t, SCHED_FIFO, ¶m);
|
||||
|
||||
/*
|
||||
* We keep the reference to the task struct even if
|
|
@ -0,0 +1,40 @@
|
|||
From: Ingo Molnar <mingo@elte.hu>
|
||||
Date: Fri, 3 Jul 2009 08:29:57 -0500
|
||||
Subject: genirq: disable irqpoll on -rt
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Creates long latencies for no value
|
||||
|
||||
Signed-off-by: Ingo Molnar <mingo@elte.hu>
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
|
||||
---
|
||||
kernel/irq/spurious.c | 10 ++++++++++
|
||||
1 file changed, 10 insertions(+)
|
||||
|
||||
--- a/kernel/irq/spurious.c
|
||||
+++ b/kernel/irq/spurious.c
|
||||
@@ -340,6 +340,11 @@ MODULE_PARM_DESC(noirqdebug, "Disable ir
|
||||
|
||||
static int __init irqfixup_setup(char *str)
|
||||
{
|
||||
+#ifdef CONFIG_PREEMPT_RT_BASE
|
||||
+ printk(KERN_WARNING "irqfixup boot option not supported "
|
||||
+ "w/ CONFIG_PREEMPT_RT_BASE\n");
|
||||
+ return 1;
|
||||
+#endif
|
||||
irqfixup = 1;
|
||||
printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
|
||||
printk(KERN_WARNING "This may impact system performance.\n");
|
||||
@@ -352,6 +357,11 @@ module_param(irqfixup, int, 0644);
|
||||
|
||||
static int __init irqpoll_setup(char *str)
|
||||
{
|
||||
+#ifdef CONFIG_PREEMPT_RT_BASE
|
||||
+ printk(KERN_WARNING "irqpoll boot option not supported "
|
||||
+ "w/ CONFIG_PREEMPT_RT_BASE\n");
|
||||
+ return 1;
|
||||
+#endif
|
||||
irqfixup = 2;
|
||||
printk(KERN_WARNING "Misrouted IRQ fixup and polling support "
|
||||
"enabled\n");
|
146
debian/patches/features/all/rt/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch
vendored
Normal file
146
debian/patches/features/all/rt/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch
vendored
Normal file
|
@ -0,0 +1,146 @@
|
|||
From 76666dbbdd40e963e7df84c123fc9aea4a2bcc69 Mon Sep 17 00:00:00 2001
|
||||
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Date: Wed, 21 Aug 2013 17:48:46 +0200
|
||||
Subject: [PATCH] genirq: do not invoke the affinity callback via a workqueue
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Joe Korty reported, that __irq_set_affinity_locked() schedules a
|
||||
workqueue while holding a rawlock which results in a might_sleep()
|
||||
warning.
|
||||
This patch moves the invokation into a process context so that we only
|
||||
wakeup() a process while holding the lock.
|
||||
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
include/linux/interrupt.h | 1
|
||||
kernel/irq/manage.c | 79 ++++++++++++++++++++++++++++++++++++++++++++--
|
||||
2 files changed, 77 insertions(+), 3 deletions(-)
|
||||
|
||||
--- a/include/linux/interrupt.h
|
||||
+++ b/include/linux/interrupt.h
|
||||
@@ -220,6 +220,7 @@ struct irq_affinity_notify {
|
||||
unsigned int irq;
|
||||
struct kref kref;
|
||||
struct work_struct work;
|
||||
+ struct list_head list;
|
||||
void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
|
||||
void (*release)(struct kref *ref);
|
||||
};
|
||||
--- a/kernel/irq/manage.c
|
||||
+++ b/kernel/irq/manage.c
|
||||
@@ -164,6 +164,62 @@ int irq_do_set_affinity(struct irq_data
|
||||
return ret;
|
||||
}
|
||||
|
||||
+#ifdef CONFIG_PREEMPT_RT_FULL
|
||||
+static void _irq_affinity_notify(struct irq_affinity_notify *notify);
|
||||
+static struct task_struct *set_affinity_helper;
|
||||
+static LIST_HEAD(affinity_list);
|
||||
+static DEFINE_RAW_SPINLOCK(affinity_list_lock);
|
||||
+
|
||||
+static int set_affinity_thread(void *unused)
|
||||
+{
|
||||
+ while (1) {
|
||||
+ struct irq_affinity_notify *notify;
|
||||
+ int empty;
|
||||
+
|
||||
+ set_current_state(TASK_INTERRUPTIBLE);
|
||||
+
|
||||
+ raw_spin_lock_irq(&affinity_list_lock);
|
||||
+ empty = list_empty(&affinity_list);
|
||||
+ raw_spin_unlock_irq(&affinity_list_lock);
|
||||
+
|
||||
+ if (empty)
|
||||
+ schedule();
|
||||
+ if (kthread_should_stop())
|
||||
+ break;
|
||||
+ set_current_state(TASK_RUNNING);
|
||||
+try_next:
|
||||
+ notify = NULL;
|
||||
+
|
||||
+ raw_spin_lock_irq(&affinity_list_lock);
|
||||
+ if (!list_empty(&affinity_list)) {
|
||||
+ notify = list_first_entry(&affinity_list,
|
||||
+ struct irq_affinity_notify, list);
|
||||
+ list_del_init(¬ify->list);
|
||||
+ }
|
||||
+ raw_spin_unlock_irq(&affinity_list_lock);
|
||||
+
|
||||
+ if (!notify)
|
||||
+ continue;
|
||||
+ _irq_affinity_notify(notify);
|
||||
+ goto try_next;
|
||||
+ }
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+static void init_helper_thread(void)
|
||||
+{
|
||||
+ if (set_affinity_helper)
|
||||
+ return;
|
||||
+ set_affinity_helper = kthread_run(set_affinity_thread, NULL,
|
||||
+ "affinity-cb");
|
||||
+ WARN_ON(IS_ERR(set_affinity_helper));
|
||||
+}
|
||||
+#else
|
||||
+
|
||||
+static inline void init_helper_thread(void) { }
|
||||
+
|
||||
+#endif
|
||||
+
|
||||
int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
|
||||
{
|
||||
struct irq_chip *chip = irq_data_get_irq_chip(data);
|
||||
@@ -182,7 +238,17 @@ int __irq_set_affinity_locked(struct irq
|
||||
|
||||
if (desc->affinity_notify) {
|
||||
kref_get(&desc->affinity_notify->kref);
|
||||
+
|
||||
+#ifdef CONFIG_PREEMPT_RT_FULL
|
||||
+ raw_spin_lock(&affinity_list_lock);
|
||||
+ if (list_empty(&desc->affinity_notify->list))
|
||||
+ list_add_tail(&affinity_list,
|
||||
+ &desc->affinity_notify->list);
|
||||
+ raw_spin_unlock(&affinity_list_lock);
|
||||
+ wake_up_process(set_affinity_helper);
|
||||
+#else
|
||||
schedule_work(&desc->affinity_notify->work);
|
||||
+#endif
|
||||
}
|
||||
irqd_set(data, IRQD_AFFINITY_SET);
|
||||
|
||||
@@ -223,10 +289,8 @@ int irq_set_affinity_hint(unsigned int i
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
|
||||
|
||||
-static void irq_affinity_notify(struct work_struct *work)
|
||||
+static void _irq_affinity_notify(struct irq_affinity_notify *notify)
|
||||
{
|
||||
- struct irq_affinity_notify *notify =
|
||||
- container_of(work, struct irq_affinity_notify, work);
|
||||
struct irq_desc *desc = irq_to_desc(notify->irq);
|
||||
cpumask_var_t cpumask;
|
||||
unsigned long flags;
|
||||
@@ -248,6 +312,13 @@ static void irq_affinity_notify(struct w
|
||||
kref_put(¬ify->kref, notify->release);
|
||||
}
|
||||
|
||||
+static void irq_affinity_notify(struct work_struct *work)
|
||||
+{
|
||||
+ struct irq_affinity_notify *notify =
|
||||
+ container_of(work, struct irq_affinity_notify, work);
|
||||
+ _irq_affinity_notify(notify);
|
||||
+}
|
||||
+
|
||||
/**
|
||||
* irq_set_affinity_notifier - control notification of IRQ affinity changes
|
||||
* @irq: Interrupt for which to enable/disable notification
|
||||
@@ -277,6 +348,8 @@ irq_set_affinity_notifier(unsigned int i
|
||||
notify->irq = irq;
|
||||
kref_init(¬ify->kref);
|
||||
INIT_WORK(¬ify->work, irq_affinity_notify);
|
||||
+ INIT_LIST_HEAD(¬ify->list);
|
||||
+ init_helper_thread();
|
||||
}
|
||||
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
|
@ -0,0 +1,47 @@
|
|||
Subject: genirq-force-threading.patch
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Sun, 03 Apr 2011 11:57:29 +0200
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
---
|
||||
include/linux/interrupt.h | 8 ++++++--
|
||||
kernel/irq/manage.c | 2 ++
|
||||
2 files changed, 8 insertions(+), 2 deletions(-)
|
||||
|
||||
--- a/include/linux/interrupt.h
|
||||
+++ b/include/linux/interrupt.h
|
||||
@@ -314,9 +314,13 @@ static inline int disable_irq_wake(unsig
|
||||
|
||||
|
||||
#ifdef CONFIG_IRQ_FORCED_THREADING
|
||||
-extern bool force_irqthreads;
|
||||
+# ifndef CONFIG_PREEMPT_RT_BASE
|
||||
+ extern bool force_irqthreads;
|
||||
+# else
|
||||
+# define force_irqthreads (true)
|
||||
+# endif
|
||||
#else
|
||||
-#define force_irqthreads (0)
|
||||
+#define force_irqthreads (false)
|
||||
#endif
|
||||
|
||||
#ifndef __ARCH_SET_SOFTIRQ_PENDING
|
||||
--- a/kernel/irq/manage.c
|
||||
+++ b/kernel/irq/manage.c
|
||||
@@ -22,6 +22,7 @@
|
||||
#include "internals.h"
|
||||
|
||||
#ifdef CONFIG_IRQ_FORCED_THREADING
|
||||
+# ifndef CONFIG_PREEMPT_RT_BASE
|
||||
__read_mostly bool force_irqthreads;
|
||||
|
||||
static int __init setup_forced_irqthreads(char *arg)
|
||||
@@ -30,6 +31,7 @@ static int __init setup_forced_irqthread
|
||||
return 0;
|
||||
}
|
||||
early_param("threadirqs", setup_forced_irqthreads);
|
||||
+# endif
|
||||
#endif
|
||||
|
||||
/**
|
|
@ -0,0 +1,21 @@
|
|||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Sun, 18 Mar 2011 10:22:04 +0100
|
||||
Subject: genirq: Disable DEBUG_SHIRQ for rt
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
---
|
||||
lib/Kconfig.debug | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
--- a/lib/Kconfig.debug
|
||||
+++ b/lib/Kconfig.debug
|
||||
@@ -597,7 +597,7 @@ endmenu # "Memory Debugging"
|
||||
|
||||
config DEBUG_SHIRQ
|
||||
bool "Debug shared IRQ handlers"
|
||||
- depends on DEBUG_KERNEL
|
||||
+ depends on DEBUG_KERNEL && !PREEMPT_RT_BASE
|
||||
help
|
||||
Enable this to generate a spurious interrupt as soon as a shared
|
||||
interrupt handler is registered, and just before one is deregistered.
|
60
debian/patches/features/all/rt/hotplug-call-cpu_unplug_begin-a-little-early.patch
vendored
Normal file
60
debian/patches/features/all/rt/hotplug-call-cpu_unplug_begin-a-little-early.patch
vendored
Normal file
|
@ -0,0 +1,60 @@
|
|||
Subject: hotplug: Call cpu_unplug_begin() before DOWN_PREPARE
|
||||
From: Yong Zhang <yong.zhang0@gmail.com>
|
||||
Date: Sun, 16 Oct 2011 18:56:44 +0800
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
cpu_unplug_begin() should be called before CPU_DOWN_PREPARE, because
|
||||
at CPU_DOWN_PREPARE cpu_active is cleared and sched_domain is
|
||||
rebuilt. Otherwise the 'sync_unplug' thread will be running on the cpu
|
||||
on which it's created and not bound on the cpu which is about to go
|
||||
down.
|
||||
|
||||
I found that by an incorrect warning on smp_processor_id() called by
|
||||
sync_unplug/1, and trace shows below:
|
||||
(echo 1 > /sys/device/system/cpu/cpu1/online)
|
||||
bash-1664 [000] 83.136620: _cpu_down: Bind sync_unplug to cpu 1
|
||||
bash-1664 [000] 83.136623: sched_wait_task: comm=sync_unplug/1 pid=1724 prio=120
|
||||
bash-1664 [000] 83.136624: _cpu_down: Wake sync_unplug
|
||||
bash-1664 [000] 83.136629: sched_wakeup: comm=sync_unplug/1 pid=1724 prio=120 success=1 target_cpu=000
|
||||
|
||||
Wants to be folded back....
|
||||
|
||||
Signed-off-by: Yong Zhang <yong.zhang0@gmail.com>
|
||||
Link: http://lkml.kernel.org/r/1318762607-2261-3-git-send-email-yong.zhang0@gmail.com
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
---
|
||||
kernel/cpu.c | 16 +++++++---------
|
||||
1 file changed, 7 insertions(+), 9 deletions(-)
|
||||
|
||||
--- a/kernel/cpu.c
|
||||
+++ b/kernel/cpu.c
|
||||
@@ -408,22 +408,20 @@ static int __ref _cpu_down(unsigned int
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
- err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
|
||||
+ cpu_hotplug_begin();
|
||||
+ err = cpu_unplug_begin(cpu);
|
||||
if (err) {
|
||||
- nr_calls--;
|
||||
- __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
|
||||
- printk("%s: attempt to take down CPU %u failed\n",
|
||||
- __func__, cpu);
|
||||
+ printk("cpu_unplug_begin(%d) failed\n", cpu);
|
||||
goto out_cancel;
|
||||
}
|
||||
|
||||
- cpu_hotplug_begin();
|
||||
- err = cpu_unplug_begin(cpu);
|
||||
+ err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
|
||||
if (err) {
|
||||
nr_calls--;
|
||||
__cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
|
||||
- printk("cpu_unplug_begin(%d) failed\n", cpu);
|
||||
- goto out_cancel;
|
||||
+ printk("%s: attempt to take down CPU %u failed\n",
|
||||
+ __func__, cpu);
|
||||
+ goto out_release;
|
||||
}
|
||||
smpboot_park_threads(cpu);
|
||||
|
|
@ -0,0 +1,208 @@
|
|||
Subject: hotplug: Lightweight get online cpus
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Wed, 15 Jun 2011 12:36:06 +0200
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
get_online_cpus() is a heavy weight function which involves a global
|
||||
mutex. migrate_disable() wants a simpler construct which prevents only
|
||||
a CPU from going doing while a task is in a migrate disabled section.
|
||||
|
||||
Implement a per cpu lockless mechanism, which serializes only in the
|
||||
real unplug case on a global mutex. That serialization affects only
|
||||
tasks on the cpu which should be brought down.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
---
|
||||
include/linux/cpu.h | 4 +
|
||||
kernel/cpu.c | 126 ++++++++++++++++++++++++++++++++++++++++++++++++++--
|
||||
2 files changed, 127 insertions(+), 3 deletions(-)
|
||||
|
||||
--- a/include/linux/cpu.h
|
||||
+++ b/include/linux/cpu.h
|
||||
@@ -179,6 +179,8 @@ extern void get_online_cpus(void);
|
||||
extern void put_online_cpus(void);
|
||||
extern void cpu_hotplug_disable(void);
|
||||
extern void cpu_hotplug_enable(void);
|
||||
+extern void pin_current_cpu(void);
|
||||
+extern void unpin_current_cpu(void);
|
||||
#define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
|
||||
#define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
|
||||
#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
|
||||
@@ -206,6 +208,8 @@ static inline void cpu_hotplug_done(void
|
||||
#define put_online_cpus() do { } while (0)
|
||||
#define cpu_hotplug_disable() do { } while (0)
|
||||
#define cpu_hotplug_enable() do { } while (0)
|
||||
+static inline void pin_current_cpu(void) { }
|
||||
+static inline void unpin_current_cpu(void) { }
|
||||
#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
|
||||
/* These aren't inline functions due to a GCC bug. */
|
||||
#define register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
|
||||
--- a/kernel/cpu.c
|
||||
+++ b/kernel/cpu.c
|
||||
@@ -63,6 +63,101 @@ static struct {
|
||||
.refcount = 0,
|
||||
};
|
||||
|
||||
+struct hotplug_pcp {
|
||||
+ struct task_struct *unplug;
|
||||
+ int refcount;
|
||||
+ struct completion synced;
|
||||
+};
|
||||
+
|
||||
+static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp);
|
||||
+
|
||||
+/**
|
||||
+ * pin_current_cpu - Prevent the current cpu from being unplugged
|
||||
+ *
|
||||
+ * Lightweight version of get_online_cpus() to prevent cpu from being
|
||||
+ * unplugged when code runs in a migration disabled region.
|
||||
+ *
|
||||
+ * Must be called with preemption disabled (preempt_count = 1)!
|
||||
+ */
|
||||
+void pin_current_cpu(void)
|
||||
+{
|
||||
+ struct hotplug_pcp *hp = &__get_cpu_var(hotplug_pcp);
|
||||
+
|
||||
+retry:
|
||||
+ if (!hp->unplug || hp->refcount || preempt_count() > 1 ||
|
||||
+ hp->unplug == current) {
|
||||
+ hp->refcount++;
|
||||
+ return;
|
||||
+ }
|
||||
+ preempt_enable();
|
||||
+ mutex_lock(&cpu_hotplug.lock);
|
||||
+ mutex_unlock(&cpu_hotplug.lock);
|
||||
+ preempt_disable();
|
||||
+ goto retry;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * unpin_current_cpu - Allow unplug of current cpu
|
||||
+ *
|
||||
+ * Must be called with preemption or interrupts disabled!
|
||||
+ */
|
||||
+void unpin_current_cpu(void)
|
||||
+{
|
||||
+ struct hotplug_pcp *hp = &__get_cpu_var(hotplug_pcp);
|
||||
+
|
||||
+ WARN_ON(hp->refcount <= 0);
|
||||
+
|
||||
+ /* This is safe. sync_unplug_thread is pinned to this cpu */
|
||||
+ if (!--hp->refcount && hp->unplug && hp->unplug != current)
|
||||
+ wake_up_process(hp->unplug);
|
||||
+}
|
||||
+
|
||||
+/*
|
||||
+ * FIXME: Is this really correct under all circumstances ?
|
||||
+ */
|
||||
+static int sync_unplug_thread(void *data)
|
||||
+{
|
||||
+ struct hotplug_pcp *hp = data;
|
||||
+
|
||||
+ preempt_disable();
|
||||
+ hp->unplug = current;
|
||||
+ set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
+ while (hp->refcount) {
|
||||
+ schedule_preempt_disabled();
|
||||
+ set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
+ }
|
||||
+ set_current_state(TASK_RUNNING);
|
||||
+ preempt_enable();
|
||||
+ complete(&hp->synced);
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+/*
|
||||
+ * Start the sync_unplug_thread on the target cpu and wait for it to
|
||||
+ * complete.
|
||||
+ */
|
||||
+static int cpu_unplug_begin(unsigned int cpu)
|
||||
+{
|
||||
+ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
|
||||
+ struct task_struct *tsk;
|
||||
+
|
||||
+ init_completion(&hp->synced);
|
||||
+ tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d\n", cpu);
|
||||
+ if (IS_ERR(tsk))
|
||||
+ return (PTR_ERR(tsk));
|
||||
+ kthread_bind(tsk, cpu);
|
||||
+ wake_up_process(tsk);
|
||||
+ wait_for_completion(&hp->synced);
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+static void cpu_unplug_done(unsigned int cpu)
|
||||
+{
|
||||
+ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
|
||||
+
|
||||
+ hp->unplug = NULL;
|
||||
+}
|
||||
+
|
||||
void get_online_cpus(void)
|
||||
{
|
||||
might_sleep();
|
||||
@@ -282,13 +377,14 @@ static int __ref take_cpu_down(void *_pa
|
||||
/* Requires cpu_add_remove_lock to be held */
|
||||
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
|
||||
{
|
||||
- int err, nr_calls = 0;
|
||||
+ int mycpu, err, nr_calls = 0;
|
||||
void *hcpu = (void *)(long)cpu;
|
||||
unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
|
||||
struct take_cpu_down_param tcd_param = {
|
||||
.mod = mod,
|
||||
.hcpu = hcpu,
|
||||
};
|
||||
+ cpumask_var_t cpumask;
|
||||
|
||||
if (num_online_cpus() == 1)
|
||||
return -EBUSY;
|
||||
@@ -296,7 +392,20 @@ static int __ref _cpu_down(unsigned int
|
||||
if (!cpu_online(cpu))
|
||||
return -EINVAL;
|
||||
|
||||
- cpu_hotplug_begin();
|
||||
+ /* Move the downtaker off the unplug cpu */
|
||||
+ if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
|
||||
+ return -ENOMEM;
|
||||
+ cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
|
||||
+ set_cpus_allowed_ptr(current, cpumask);
|
||||
+ free_cpumask_var(cpumask);
|
||||
+ preempt_disable();
|
||||
+ mycpu = smp_processor_id();
|
||||
+ if (mycpu == cpu) {
|
||||
+ printk(KERN_ERR "Yuck! Still on unplug CPU\n!");
|
||||
+ preempt_enable();
|
||||
+ return -EBUSY;
|
||||
+ }
|
||||
+ preempt_enable();
|
||||
|
||||
err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
|
||||
if (err) {
|
||||
@@ -304,7 +413,16 @@ static int __ref _cpu_down(unsigned int
|
||||
__cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
|
||||
printk("%s: attempt to take down CPU %u failed\n",
|
||||
__func__, cpu);
|
||||
- goto out_release;
|
||||
+ goto out_cancel;
|
||||
+ }
|
||||
+
|
||||
+ cpu_hotplug_begin();
|
||||
+ err = cpu_unplug_begin(cpu);
|
||||
+ if (err) {
|
||||
+ nr_calls--;
|
||||
+ __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
|
||||
+ printk("cpu_unplug_begin(%d) failed\n", cpu);
|
||||
+ goto out_cancel;
|
||||
}
|
||||
smpboot_park_threads(cpu);
|
||||
|
||||
@@ -336,6 +454,8 @@ static int __ref _cpu_down(unsigned int
|
||||
check_for_tasks(cpu);
|
||||
|
||||
out_release:
|
||||
+ cpu_unplug_done(cpu);
|
||||
+out_cancel:
|
||||
cpu_hotplug_done();
|
||||
if (!err)
|
||||
cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
|
25
debian/patches/features/all/rt/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch
vendored
Normal file
25
debian/patches/features/all/rt/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch
vendored
Normal file
|
@ -0,0 +1,25 @@
|
|||
Subject: hotplug: sync_unplug: No "\n" in task name
|
||||
From: Yong Zhang <yong.zhang0@gmail.com>
|
||||
Date: Sun, 16 Oct 2011 18:56:43 +0800
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Otherwise the output will look a little odd.
|
||||
|
||||
Signed-off-by: Yong Zhang <yong.zhang0@gmail.com>
|
||||
Link: http://lkml.kernel.org/r/1318762607-2261-2-git-send-email-yong.zhang0@gmail.com
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
---
|
||||
kernel/cpu.c | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
--- a/kernel/cpu.c
|
||||
+++ b/kernel/cpu.c
|
||||
@@ -142,7 +142,7 @@ static int cpu_unplug_begin(unsigned int
|
||||
struct task_struct *tsk;
|
||||
|
||||
init_completion(&hp->synced);
|
||||
- tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d\n", cpu);
|
||||
+ tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
|
||||
if (IS_ERR(tsk))
|
||||
return (PTR_ERR(tsk));
|
||||
kthread_bind(tsk, cpu);
|
|
@ -0,0 +1,37 @@
|
|||
Subject: hotplug-use-migrate-disable.patch
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Sun, 17 Jul 2011 19:35:29 +0200
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
---
|
||||
kernel/cpu.c | 6 +++---
|
||||
1 file changed, 3 insertions(+), 3 deletions(-)
|
||||
|
||||
--- a/kernel/cpu.c
|
||||
+++ b/kernel/cpu.c
|
||||
@@ -400,14 +400,13 @@ static int __ref _cpu_down(unsigned int
|
||||
cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
|
||||
set_cpus_allowed_ptr(current, cpumask);
|
||||
free_cpumask_var(cpumask);
|
||||
- preempt_disable();
|
||||
+ migrate_disable();
|
||||
mycpu = smp_processor_id();
|
||||
if (mycpu == cpu) {
|
||||
printk(KERN_ERR "Yuck! Still on unplug CPU\n!");
|
||||
- preempt_enable();
|
||||
+ migrate_enable();
|
||||
return -EBUSY;
|
||||
}
|
||||
- preempt_enable();
|
||||
|
||||
err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
|
||||
if (err) {
|
||||
@@ -458,6 +457,7 @@ static int __ref _cpu_down(unsigned int
|
||||
out_release:
|
||||
cpu_unplug_done(cpu);
|
||||
out_cancel:
|
||||
+ migrate_enable();
|
||||
cpu_hotplug_done();
|
||||
if (!err)
|
||||
cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
|
119
debian/patches/features/all/rt/hrtimer-Move-schedule_work-call-to-helper-thread.patch
vendored
Normal file
119
debian/patches/features/all/rt/hrtimer-Move-schedule_work-call-to-helper-thread.patch
vendored
Normal file
|
@ -0,0 +1,119 @@
|
|||
From 180cdb93d796bf52c919f5e3df30af83aa6d46ca Mon Sep 17 00:00:00 2001
|
||||
From: Yang Shi <yang.shi@windriver.com>
|
||||
Date: Mon, 16 Sep 2013 14:09:19 -0700
|
||||
Subject: [PATCH] hrtimer: Move schedule_work call to helper thread
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
When run ltp leapsec_timer test, the following call trace is caught:
|
||||
|
||||
BUG: sleeping function called from invalid context at kernel/rtmutex.c:659
|
||||
in_atomic(): 1, irqs_disabled(): 1, pid: 0, name: swapper/1
|
||||
Preemption disabled at:[<ffffffff810857f3>] cpu_startup_entry+0x133/0x310
|
||||
|
||||
CPU: 1 PID: 0 Comm: swapper/1 Not tainted 3.10.10-rt3 #2
|
||||
Hardware name: Intel Corporation Calpella platform/MATXM-CORE-411-B, BIOS 4.6.3 08/18/2010
|
||||
ffffffff81c2f800 ffff880076843e40 ffffffff8169918d ffff880076843e58
|
||||
ffffffff8106db31 ffff88007684b4a0 ffff880076843e70 ffffffff8169d9c0
|
||||
ffff88007684b4a0 ffff880076843eb0 ffffffff81059da1 0000001876851200
|
||||
Call Trace:
|
||||
<IRQ> [<ffffffff8169918d>] dump_stack+0x19/0x1b
|
||||
[<ffffffff8106db31>] __might_sleep+0xf1/0x170
|
||||
[<ffffffff8169d9c0>] rt_spin_lock+0x20/0x50
|
||||
[<ffffffff81059da1>] queue_work_on+0x61/0x100
|
||||
[<ffffffff81065aa1>] clock_was_set_delayed+0x21/0x30
|
||||
[<ffffffff810883be>] do_timer+0x40e/0x660
|
||||
[<ffffffff8108f487>] tick_do_update_jiffies64+0xf7/0x140
|
||||
[<ffffffff8108fe42>] tick_check_idle+0x92/0xc0
|
||||
[<ffffffff81044327>] irq_enter+0x57/0x70
|
||||
[<ffffffff816a040e>] smp_apic_timer_interrupt+0x3e/0x9b
|
||||
[<ffffffff8169f80a>] apic_timer_interrupt+0x6a/0x70
|
||||
<EOI> [<ffffffff8155ea1c>] ? cpuidle_enter_state+0x4c/0xc0
|
||||
[<ffffffff8155eb68>] cpuidle_idle_call+0xd8/0x2d0
|
||||
[<ffffffff8100b59e>] arch_cpu_idle+0xe/0x30
|
||||
[<ffffffff8108585e>] cpu_startup_entry+0x19e/0x310
|
||||
[<ffffffff8168efa2>] start_secondary+0x1ad/0x1b0
|
||||
|
||||
The clock_was_set_delayed is called in hard IRQ handler (timer interrupt), which
|
||||
calls schedule_work.
|
||||
|
||||
Under PREEMPT_RT_FULL, schedule_work calls spinlocks which could sleep, so it's
|
||||
not safe to call schedule_work in interrupt context.
|
||||
|
||||
Reference upstream commit b68d61c705ef02384c0538b8d9374545097899ca
|
||||
(rt,ntp: Move call to schedule_delayed_work() to helper thread)
|
||||
from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-stable-rt.git, which
|
||||
makes a similar change.
|
||||
|
||||
add a helper thread which does the call to schedule_work and wake up that
|
||||
thread instead of calling schedule_work directly.
|
||||
|
||||
Cc: stable-rt@vger.kernel.org
|
||||
Signed-off-by: Yang Shi <yang.shi@windriver.com>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
kernel/hrtimer.c | 40 ++++++++++++++++++++++++++++++++++++++++
|
||||
1 file changed, 40 insertions(+)
|
||||
|
||||
--- a/kernel/hrtimer.c
|
||||
+++ b/kernel/hrtimer.c
|
||||
@@ -47,6 +47,7 @@
|
||||
#include <linux/sched/sysctl.h>
|
||||
#include <linux/sched/rt.h>
|
||||
#include <linux/timer.h>
|
||||
+#include <linux/kthread.h>
|
||||
#include <linux/freezer.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
@@ -741,6 +742,44 @@ static void clock_was_set_work(struct wo
|
||||
|
||||
static DECLARE_WORK(hrtimer_work, clock_was_set_work);
|
||||
|
||||
+#ifdef CONFIG_PREEMPT_RT_FULL
|
||||
+/*
|
||||
+ * RT can not call schedule_work from real interrupt context.
|
||||
+ * Need to make a thread to do the real work.
|
||||
+ */
|
||||
+static struct task_struct *clock_set_delay_thread;
|
||||
+static bool do_clock_set_delay;
|
||||
+
|
||||
+static int run_clock_set_delay(void *ignore)
|
||||
+{
|
||||
+ while (!kthread_should_stop()) {
|
||||
+ set_current_state(TASK_INTERRUPTIBLE);
|
||||
+ if (do_clock_set_delay) {
|
||||
+ do_clock_set_delay = false;
|
||||
+ schedule_work(&hrtimer_work);
|
||||
+ }
|
||||
+ schedule();
|
||||
+ }
|
||||
+ __set_current_state(TASK_RUNNING);
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+void clock_was_set_delayed(void)
|
||||
+{
|
||||
+ do_clock_set_delay = true;
|
||||
+ /* Make visible before waking up process */
|
||||
+ smp_wmb();
|
||||
+ wake_up_process(clock_set_delay_thread);
|
||||
+}
|
||||
+
|
||||
+static __init int create_clock_set_delay_thread(void)
|
||||
+{
|
||||
+ clock_set_delay_thread = kthread_run(run_clock_set_delay, NULL, "kclksetdelayd");
|
||||
+ BUG_ON(!clock_set_delay_thread);
|
||||
+ return 0;
|
||||
+}
|
||||
+early_initcall(create_clock_set_delay_thread);
|
||||
+#else /* PREEMPT_RT_FULL */
|
||||
/*
|
||||
* Called from timekeeping and resume code to reprogramm the hrtimer
|
||||
* interrupt device on all cpus.
|
||||
@@ -749,6 +788,7 @@ void clock_was_set_delayed(void)
|
||||
{
|
||||
schedule_work(&hrtimer_work);
|
||||
}
|
||||
+#endif
|
||||
|
||||
#else
|
||||
|
453
debian/patches/features/all/rt/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
vendored
Normal file
453
debian/patches/features/all/rt/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
vendored
Normal file
|
@ -0,0 +1,453 @@
|
|||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Fri, 3 Jul 2009 08:44:31 -0500
|
||||
Subject: hrtimer: fixup hrtimer callback changes for preempt-rt
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
In preempt-rt we can not call the callbacks which take sleeping locks
|
||||
from the timer interrupt context.
|
||||
|
||||
Bring back the softirq split for now, until we fixed the signal
|
||||
delivery problem for real.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Signed-off-by: Ingo Molnar <mingo@elte.hu>
|
||||
|
||||
---
|
||||
include/linux/hrtimer.h | 3
|
||||
kernel/hrtimer.c | 216 ++++++++++++++++++++++++++++++++++++++++-------
|
||||
kernel/sched/core.c | 1
|
||||
kernel/sched/rt.c | 1
|
||||
kernel/time/tick-sched.c | 1
|
||||
kernel/watchdog.c | 1
|
||||
6 files changed, 195 insertions(+), 28 deletions(-)
|
||||
|
||||
--- a/include/linux/hrtimer.h
|
||||
+++ b/include/linux/hrtimer.h
|
||||
@@ -111,6 +111,8 @@ struct hrtimer {
|
||||
enum hrtimer_restart (*function)(struct hrtimer *);
|
||||
struct hrtimer_clock_base *base;
|
||||
unsigned long state;
|
||||
+ struct list_head cb_entry;
|
||||
+ int irqsafe;
|
||||
#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
|
||||
ktime_t praecox;
|
||||
#endif
|
||||
@@ -150,6 +152,7 @@ struct hrtimer_clock_base {
|
||||
int index;
|
||||
clockid_t clockid;
|
||||
struct timerqueue_head active;
|
||||
+ struct list_head expired;
|
||||
ktime_t resolution;
|
||||
ktime_t (*get_time)(void);
|
||||
ktime_t softirq_time;
|
||||
--- a/kernel/hrtimer.c
|
||||
+++ b/kernel/hrtimer.c
|
||||
@@ -608,8 +608,7 @@ static int hrtimer_reprogram(struct hrti
|
||||
* When the callback is running, we do not reprogram the clock event
|
||||
* device. The timer callback is either running on a different CPU or
|
||||
* the callback is executed in the hrtimer_interrupt context. The
|
||||
- * reprogramming is handled either by the softirq, which called the
|
||||
- * callback or at the end of the hrtimer_interrupt.
|
||||
+ * reprogramming is handled at the end of the hrtimer_interrupt.
|
||||
*/
|
||||
if (hrtimer_callback_running(timer))
|
||||
return 0;
|
||||
@@ -644,6 +643,9 @@ static int hrtimer_reprogram(struct hrti
|
||||
return res;
|
||||
}
|
||||
|
||||
+static void __run_hrtimer(struct hrtimer *timer, ktime_t *now);
|
||||
+static int hrtimer_rt_defer(struct hrtimer *timer);
|
||||
+
|
||||
/*
|
||||
* Initialize the high resolution related parts of cpu_base
|
||||
*/
|
||||
@@ -660,9 +662,18 @@ static inline void hrtimer_init_hres(str
|
||||
* and expiry check is done in the hrtimer_interrupt or in the softirq.
|
||||
*/
|
||||
static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
|
||||
- struct hrtimer_clock_base *base)
|
||||
+ struct hrtimer_clock_base *base,
|
||||
+ int wakeup)
|
||||
{
|
||||
- return base->cpu_base->hres_active && hrtimer_reprogram(timer, base);
|
||||
+ if (!(base->cpu_base->hres_active && hrtimer_reprogram(timer, base)))
|
||||
+ return 0;
|
||||
+ if (!wakeup)
|
||||
+ return -ETIME;
|
||||
+#ifdef CONFIG_PREEMPT_RT_BASE
|
||||
+ if (!hrtimer_rt_defer(timer))
|
||||
+ return -ETIME;
|
||||
+#endif
|
||||
+ return 1;
|
||||
}
|
||||
|
||||
static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
|
||||
@@ -747,12 +758,18 @@ static inline int hrtimer_switch_to_hres
|
||||
static inline void
|
||||
hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
|
||||
static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
|
||||
- struct hrtimer_clock_base *base)
|
||||
+ struct hrtimer_clock_base *base,
|
||||
+ int wakeup)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
|
||||
static inline void retrigger_next_event(void *arg) { }
|
||||
+static inline int hrtimer_reprogram(struct hrtimer *timer,
|
||||
+ struct hrtimer_clock_base *base)
|
||||
+{
|
||||
+ return 0;
|
||||
+}
|
||||
|
||||
#endif /* CONFIG_HIGH_RES_TIMERS */
|
||||
|
||||
@@ -888,9 +905,9 @@ void hrtimer_wait_for_timer(const struct
|
||||
{
|
||||
struct hrtimer_clock_base *base = timer->base;
|
||||
|
||||
- if (base && base->cpu_base && !hrtimer_hres_active(base->cpu_base))
|
||||
+ if (base && base->cpu_base && !timer->irqsafe)
|
||||
wait_event(base->cpu_base->wait,
|
||||
- !(timer->state & HRTIMER_STATE_CALLBACK));
|
||||
+ !(timer->state & HRTIMER_STATE_CALLBACK));
|
||||
}
|
||||
|
||||
#else
|
||||
@@ -940,6 +957,11 @@ static void __remove_hrtimer(struct hrti
|
||||
if (!(timer->state & HRTIMER_STATE_ENQUEUED))
|
||||
goto out;
|
||||
|
||||
+ if (unlikely(!list_empty(&timer->cb_entry))) {
|
||||
+ list_del_init(&timer->cb_entry);
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
next_timer = timerqueue_getnext(&base->active);
|
||||
timerqueue_del(&base->active, &timer->node);
|
||||
if (&timer->node == next_timer) {
|
||||
@@ -1047,9 +1069,19 @@ int __hrtimer_start_range_ns(struct hrti
|
||||
*
|
||||
* XXX send_remote_softirq() ?
|
||||
*/
|
||||
- if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)
|
||||
- && hrtimer_enqueue_reprogram(timer, new_base)) {
|
||||
- if (wakeup) {
|
||||
+ if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)) {
|
||||
+ ret = hrtimer_enqueue_reprogram(timer, new_base, wakeup);
|
||||
+ if (ret < 0) {
|
||||
+ /*
|
||||
+ * In case we failed to reprogram the timer (mostly
|
||||
+ * because out current timer is already elapsed),
|
||||
+ * remove it again and report a failure. This avoids
|
||||
+ * stale base->first entries.
|
||||
+ */
|
||||
+ debug_deactivate(timer);
|
||||
+ __remove_hrtimer(timer, new_base,
|
||||
+ timer->state & HRTIMER_STATE_CALLBACK, 0);
|
||||
+ } else if (ret > 0) {
|
||||
/*
|
||||
* We need to drop cpu_base->lock to avoid a
|
||||
* lock ordering issue vs. rq->lock.
|
||||
@@ -1057,9 +1089,7 @@ int __hrtimer_start_range_ns(struct hrti
|
||||
raw_spin_unlock(&new_base->cpu_base->lock);
|
||||
raise_softirq_irqoff(HRTIMER_SOFTIRQ);
|
||||
local_irq_restore(flags);
|
||||
- return ret;
|
||||
- } else {
|
||||
- __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
|
||||
+ return 0;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1228,6 +1258,7 @@ static void __hrtimer_init(struct hrtime
|
||||
|
||||
base = hrtimer_clockid_to_base(clock_id);
|
||||
timer->base = &cpu_base->clock_base[base];
|
||||
+ INIT_LIST_HEAD(&timer->cb_entry);
|
||||
timerqueue_init(&timer->node);
|
||||
|
||||
#ifdef CONFIG_TIMER_STATS
|
||||
@@ -1311,10 +1342,128 @@ static void __run_hrtimer(struct hrtimer
|
||||
timer->state &= ~HRTIMER_STATE_CALLBACK;
|
||||
}
|
||||
|
||||
-#ifdef CONFIG_HIGH_RES_TIMERS
|
||||
-
|
||||
static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer);
|
||||
|
||||
+#ifdef CONFIG_PREEMPT_RT_BASE
|
||||
+static void hrtimer_rt_reprogram(int restart, struct hrtimer *timer,
|
||||
+ struct hrtimer_clock_base *base)
|
||||
+{
|
||||
+ /*
|
||||
+ * Note, we clear the callback flag before we requeue the
|
||||
+ * timer otherwise we trigger the callback_running() check
|
||||
+ * in hrtimer_reprogram().
|
||||
+ */
|
||||
+ timer->state &= ~HRTIMER_STATE_CALLBACK;
|
||||
+
|
||||
+ if (restart != HRTIMER_NORESTART) {
|
||||
+ BUG_ON(hrtimer_active(timer));
|
||||
+ /*
|
||||
+ * Enqueue the timer, if it's the leftmost timer then
|
||||
+ * we need to reprogram it.
|
||||
+ */
|
||||
+ if (!enqueue_hrtimer(timer, base))
|
||||
+ return;
|
||||
+
|
||||
+#ifndef CONFIG_HIGH_RES_TIMERS
|
||||
+ }
|
||||
+#else
|
||||
+ if (base->cpu_base->hres_active &&
|
||||
+ hrtimer_reprogram(timer, base))
|
||||
+ goto requeue;
|
||||
+
|
||||
+ } else if (hrtimer_active(timer)) {
|
||||
+ /*
|
||||
+ * If the timer was rearmed on another CPU, reprogram
|
||||
+ * the event device.
|
||||
+ */
|
||||
+ if (&timer->node == base->active.next &&
|
||||
+ base->cpu_base->hres_active &&
|
||||
+ hrtimer_reprogram(timer, base))
|
||||
+ goto requeue;
|
||||
+ }
|
||||
+ return;
|
||||
+
|
||||
+requeue:
|
||||
+ /*
|
||||
+ * Timer is expired. Thus move it from tree to pending list
|
||||
+ * again.
|
||||
+ */
|
||||
+ __remove_hrtimer(timer, base, timer->state, 0);
|
||||
+ list_add_tail(&timer->cb_entry, &base->expired);
|
||||
+#endif
|
||||
+}
|
||||
+
|
||||
+/*
|
||||
+ * The changes in mainline which removed the callback modes from
|
||||
+ * hrtimer are not yet working with -rt. The non wakeup_process()
|
||||
+ * based callbacks which involve sleeping locks need to be treated
|
||||
+ * seperately.
|
||||
+ */
|
||||
+static void hrtimer_rt_run_pending(void)
|
||||
+{
|
||||
+ enum hrtimer_restart (*fn)(struct hrtimer *);
|
||||
+ struct hrtimer_cpu_base *cpu_base;
|
||||
+ struct hrtimer_clock_base *base;
|
||||
+ struct hrtimer *timer;
|
||||
+ int index, restart;
|
||||
+
|
||||
+ local_irq_disable();
|
||||
+ cpu_base = &per_cpu(hrtimer_bases, smp_processor_id());
|
||||
+
|
||||
+ raw_spin_lock(&cpu_base->lock);
|
||||
+
|
||||
+ for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) {
|
||||
+ base = &cpu_base->clock_base[index];
|
||||
+
|
||||
+ while (!list_empty(&base->expired)) {
|
||||
+ timer = list_first_entry(&base->expired,
|
||||
+ struct hrtimer, cb_entry);
|
||||
+
|
||||
+ /*
|
||||
+ * Same as the above __run_hrtimer function
|
||||
+ * just we run with interrupts enabled.
|
||||
+ */
|
||||
+ debug_hrtimer_deactivate(timer);
|
||||
+ __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
|
||||
+ timer_stats_account_hrtimer(timer);
|
||||
+ fn = timer->function;
|
||||
+
|
||||
+ raw_spin_unlock_irq(&cpu_base->lock);
|
||||
+ restart = fn(timer);
|
||||
+ raw_spin_lock_irq(&cpu_base->lock);
|
||||
+
|
||||
+ hrtimer_rt_reprogram(restart, timer, base);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ raw_spin_unlock_irq(&cpu_base->lock);
|
||||
+
|
||||
+ wake_up_timer_waiters(cpu_base);
|
||||
+}
|
||||
+
|
||||
+static int hrtimer_rt_defer(struct hrtimer *timer)
|
||||
+{
|
||||
+ if (timer->irqsafe)
|
||||
+ return 0;
|
||||
+
|
||||
+ __remove_hrtimer(timer, timer->base, timer->state, 0);
|
||||
+ list_add_tail(&timer->cb_entry, &timer->base->expired);
|
||||
+ return 1;
|
||||
+}
|
||||
+
|
||||
+#else
|
||||
+
|
||||
+static inline void hrtimer_rt_run_pending(void)
|
||||
+{
|
||||
+ hrtimer_peek_ahead_timers();
|
||||
+}
|
||||
+
|
||||
+static inline int hrtimer_rt_defer(struct hrtimer *timer) { return 0; }
|
||||
+
|
||||
+#endif
|
||||
+
|
||||
+#ifdef CONFIG_HIGH_RES_TIMERS
|
||||
+
|
||||
/*
|
||||
* High resolution timer interrupt
|
||||
* Called with interrupts disabled
|
||||
@@ -1323,7 +1472,7 @@ void hrtimer_interrupt(struct clock_even
|
||||
{
|
||||
struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
|
||||
ktime_t expires_next, now, entry_time, delta;
|
||||
- int i, retries = 0;
|
||||
+ int i, retries = 0, raise = 0;
|
||||
|
||||
BUG_ON(!cpu_base->hres_active);
|
||||
cpu_base->nr_events++;
|
||||
@@ -1392,7 +1541,10 @@ void hrtimer_interrupt(struct clock_even
|
||||
break;
|
||||
}
|
||||
|
||||
- __run_hrtimer(timer, &basenow);
|
||||
+ if (!hrtimer_rt_defer(timer))
|
||||
+ __run_hrtimer(timer, &basenow);
|
||||
+ else
|
||||
+ raise = 1;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1407,6 +1559,10 @@ void hrtimer_interrupt(struct clock_even
|
||||
if (expires_next.tv64 == KTIME_MAX ||
|
||||
!tick_program_event(expires_next, 0)) {
|
||||
cpu_base->hang_detected = 0;
|
||||
+
|
||||
+ if (raise)
|
||||
+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
|
||||
+
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1486,18 +1642,18 @@ void hrtimer_peek_ahead_timers(void)
|
||||
__hrtimer_peek_ahead_timers();
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
-
|
||||
-static void run_hrtimer_softirq(struct softirq_action *h)
|
||||
-{
|
||||
- hrtimer_peek_ahead_timers();
|
||||
-}
|
||||
-
|
||||
#else /* CONFIG_HIGH_RES_TIMERS */
|
||||
|
||||
static inline void __hrtimer_peek_ahead_timers(void) { }
|
||||
|
||||
#endif /* !CONFIG_HIGH_RES_TIMERS */
|
||||
|
||||
+
|
||||
+static void run_hrtimer_softirq(struct softirq_action *h)
|
||||
+{
|
||||
+ hrtimer_rt_run_pending();
|
||||
+}
|
||||
+
|
||||
/*
|
||||
* Called from timer softirq every jiffy, expire hrtimers:
|
||||
*
|
||||
@@ -1530,7 +1686,7 @@ void hrtimer_run_queues(void)
|
||||
struct timerqueue_node *node;
|
||||
struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
|
||||
struct hrtimer_clock_base *base;
|
||||
- int index, gettime = 1;
|
||||
+ int index, gettime = 1, raise = 0;
|
||||
|
||||
if (hrtimer_hres_active())
|
||||
return;
|
||||
@@ -1555,12 +1711,16 @@ void hrtimer_run_queues(void)
|
||||
hrtimer_get_expires_tv64(timer))
|
||||
break;
|
||||
|
||||
- __run_hrtimer(timer, &base->softirq_time);
|
||||
+ if (!hrtimer_rt_defer(timer))
|
||||
+ __run_hrtimer(timer, &base->softirq_time);
|
||||
+ else
|
||||
+ raise = 1;
|
||||
}
|
||||
raw_spin_unlock(&cpu_base->lock);
|
||||
}
|
||||
|
||||
- wake_up_timer_waiters(cpu_base);
|
||||
+ if (raise)
|
||||
+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1582,6 +1742,7 @@ static enum hrtimer_restart hrtimer_wake
|
||||
void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
|
||||
{
|
||||
sl->timer.function = hrtimer_wakeup;
|
||||
+ sl->timer.irqsafe = 1;
|
||||
sl->task = task;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
|
||||
@@ -1718,6 +1879,7 @@ static void init_hrtimers_cpu(int cpu)
|
||||
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
|
||||
cpu_base->clock_base[i].cpu_base = cpu_base;
|
||||
timerqueue_init_head(&cpu_base->clock_base[i].active);
|
||||
+ INIT_LIST_HEAD(&cpu_base->clock_base[i].expired);
|
||||
}
|
||||
|
||||
hrtimer_init_hres(cpu_base);
|
||||
@@ -1836,9 +1998,7 @@ void __init hrtimers_init(void)
|
||||
hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
|
||||
(void *)(long)smp_processor_id());
|
||||
register_cpu_notifier(&hrtimers_nb);
|
||||
-#ifdef CONFIG_HIGH_RES_TIMERS
|
||||
open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
|
||||
-#endif
|
||||
}
|
||||
|
||||
/**
|
||||
--- a/kernel/sched/core.c
|
||||
+++ b/kernel/sched/core.c
|
||||
@@ -491,6 +491,7 @@ static void init_rq_hrtick(struct rq *rq
|
||||
|
||||
hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||
rq->hrtick_timer.function = hrtick;
|
||||
+ rq->hrtick_timer.irqsafe = 1;
|
||||
}
|
||||
#else /* CONFIG_SCHED_HRTICK */
|
||||
static inline void hrtick_clear(struct rq *rq)
|
||||
--- a/kernel/sched/rt.c
|
||||
+++ b/kernel/sched/rt.c
|
||||
@@ -43,6 +43,7 @@ void init_rt_bandwidth(struct rt_bandwid
|
||||
|
||||
hrtimer_init(&rt_b->rt_period_timer,
|
||||
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||
+ rt_b->rt_period_timer.irqsafe = 1;
|
||||
rt_b->rt_period_timer.function = sched_rt_period_timer;
|
||||
}
|
||||
|
||||
--- a/kernel/time/tick-sched.c
|
||||
+++ b/kernel/time/tick-sched.c
|
||||
@@ -1119,6 +1119,7 @@ void tick_setup_sched_timer(void)
|
||||
* Emulate tick processing via per-CPU hrtimers:
|
||||
*/
|
||||
hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
|
||||
+ ts->sched_timer.irqsafe = 1;
|
||||
ts->sched_timer.function = tick_sched_timer;
|
||||
|
||||
/* Get the next period (per cpu) */
|
||||
--- a/kernel/watchdog.c
|
||||
+++ b/kernel/watchdog.c
|
||||
@@ -357,6 +357,7 @@ static void watchdog_enable(unsigned int
|
||||
/* kick off the timer for the hardlockup detector */
|
||||
hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||
hrtimer->function = watchdog_timer_fn;
|
||||
+ hrtimer->irqsafe = 1;
|
||||
|
||||
/* Enable the perf event */
|
||||
watchdog_nmi_enable(cpu);
|
38
debian/patches/features/all/rt/hrtimer-raise-softirq-if-hrtimer-irq-stalled.patch
vendored
Normal file
38
debian/patches/features/all/rt/hrtimer-raise-softirq-if-hrtimer-irq-stalled.patch
vendored
Normal file
|
@ -0,0 +1,38 @@
|
|||
Subject: hrtimer: Raise softirq if hrtimer irq stalled
|
||||
From: Watanabe <shunsuke.watanabe@tel.com>
|
||||
Date: Sun, 28 Oct 2012 11:13:44 +0100
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
When the hrtimer stall detection hits the softirq is not raised.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Cc: stable-rt@vger.kernel.org
|
||||
---
|
||||
kernel/hrtimer.c | 9 ++++-----
|
||||
1 file changed, 4 insertions(+), 5 deletions(-)
|
||||
|
||||
--- a/kernel/hrtimer.c
|
||||
+++ b/kernel/hrtimer.c
|
||||
@@ -1559,11 +1559,7 @@ void hrtimer_interrupt(struct clock_even
|
||||
if (expires_next.tv64 == KTIME_MAX ||
|
||||
!tick_program_event(expires_next, 0)) {
|
||||
cpu_base->hang_detected = 0;
|
||||
-
|
||||
- if (raise)
|
||||
- raise_softirq_irqoff(HRTIMER_SOFTIRQ);
|
||||
-
|
||||
- return;
|
||||
+ goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1607,6 +1603,9 @@ void hrtimer_interrupt(struct clock_even
|
||||
tick_program_event(expires_next, 1);
|
||||
printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n",
|
||||
ktime_to_ns(delta));
|
||||
+out:
|
||||
+ if (raise)
|
||||
+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
|
||||
}
|
||||
|
||||
/*
|
|
@ -0,0 +1,196 @@
|
|||
From: Ingo Molnar <mingo@elte.hu>
|
||||
Date: Fri, 3 Jul 2009 08:29:34 -0500
|
||||
Subject: hrtimers: prepare full preemption
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Make cancellation of a running callback in softirq context safe
|
||||
against preemption.
|
||||
|
||||
Signed-off-by: Ingo Molnar <mingo@elte.hu>
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
|
||||
---
|
||||
include/linux/hrtimer.h | 10 ++++++++++
|
||||
kernel/hrtimer.c | 33 ++++++++++++++++++++++++++++++++-
|
||||
kernel/itimer.c | 1 +
|
||||
kernel/posix-timers.c | 33 +++++++++++++++++++++++++++++++++
|
||||
4 files changed, 76 insertions(+), 1 deletion(-)
|
||||
|
||||
--- a/include/linux/hrtimer.h
|
||||
+++ b/include/linux/hrtimer.h
|
||||
@@ -193,6 +193,9 @@ struct hrtimer_cpu_base {
|
||||
unsigned long nr_hangs;
|
||||
ktime_t max_hang_time;
|
||||
#endif
|
||||
+#ifdef CONFIG_PREEMPT_RT_BASE
|
||||
+ wait_queue_head_t wait;
|
||||
+#endif
|
||||
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
|
||||
};
|
||||
|
||||
@@ -388,6 +391,13 @@ static inline int hrtimer_restart(struct
|
||||
return hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
|
||||
}
|
||||
|
||||
+/* Softirq preemption could deadlock timer removal */
|
||||
+#ifdef CONFIG_PREEMPT_RT_BASE
|
||||
+ extern void hrtimer_wait_for_timer(const struct hrtimer *timer);
|
||||
+#else
|
||||
+# define hrtimer_wait_for_timer(timer) do { cpu_relax(); } while (0)
|
||||
+#endif
|
||||
+
|
||||
/* Query timers: */
|
||||
extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer);
|
||||
extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp);
|
||||
--- a/kernel/hrtimer.c
|
||||
+++ b/kernel/hrtimer.c
|
||||
@@ -871,6 +871,32 @@ u64 hrtimer_forward(struct hrtimer *time
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hrtimer_forward);
|
||||
|
||||
+#ifdef CONFIG_PREEMPT_RT_BASE
|
||||
+# define wake_up_timer_waiters(b) wake_up(&(b)->wait)
|
||||
+
|
||||
+/**
|
||||
+ * hrtimer_wait_for_timer - Wait for a running timer
|
||||
+ *
|
||||
+ * @timer: timer to wait for
|
||||
+ *
|
||||
+ * The function waits in case the timers callback function is
|
||||
+ * currently executed on the waitqueue of the timer base. The
|
||||
+ * waitqueue is woken up after the timer callback function has
|
||||
+ * finished execution.
|
||||
+ */
|
||||
+void hrtimer_wait_for_timer(const struct hrtimer *timer)
|
||||
+{
|
||||
+ struct hrtimer_clock_base *base = timer->base;
|
||||
+
|
||||
+ if (base && base->cpu_base && !hrtimer_hres_active(base->cpu_base))
|
||||
+ wait_event(base->cpu_base->wait,
|
||||
+ !(timer->state & HRTIMER_STATE_CALLBACK));
|
||||
+}
|
||||
+
|
||||
+#else
|
||||
+# define wake_up_timer_waiters(b) do { } while (0)
|
||||
+#endif
|
||||
+
|
||||
/*
|
||||
* enqueue_hrtimer - internal function to (re)start a timer
|
||||
*
|
||||
@@ -1123,7 +1149,7 @@ int hrtimer_cancel(struct hrtimer *timer
|
||||
|
||||
if (ret >= 0)
|
||||
return ret;
|
||||
- cpu_relax();
|
||||
+ hrtimer_wait_for_timer(timer);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hrtimer_cancel);
|
||||
@@ -1533,6 +1559,8 @@ void hrtimer_run_queues(void)
|
||||
}
|
||||
raw_spin_unlock(&cpu_base->lock);
|
||||
}
|
||||
+
|
||||
+ wake_up_timer_waiters(cpu_base);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1693,6 +1721,9 @@ static void init_hrtimers_cpu(int cpu)
|
||||
}
|
||||
|
||||
hrtimer_init_hres(cpu_base);
|
||||
+#ifdef CONFIG_PREEMPT_RT_BASE
|
||||
+ init_waitqueue_head(&cpu_base->wait);
|
||||
+#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
--- a/kernel/itimer.c
|
||||
+++ b/kernel/itimer.c
|
||||
@@ -213,6 +213,7 @@ int do_setitimer(int which, struct itime
|
||||
/* We are sharing ->siglock with it_real_fn() */
|
||||
if (hrtimer_try_to_cancel(timer) < 0) {
|
||||
spin_unlock_irq(&tsk->sighand->siglock);
|
||||
+ hrtimer_wait_for_timer(&tsk->signal->real_timer);
|
||||
goto again;
|
||||
}
|
||||
expires = timeval_to_ktime(value->it_value);
|
||||
--- a/kernel/posix-timers.c
|
||||
+++ b/kernel/posix-timers.c
|
||||
@@ -818,6 +818,20 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_
|
||||
return overrun;
|
||||
}
|
||||
|
||||
+/*
|
||||
+ * Protected by RCU!
|
||||
+ */
|
||||
+static void timer_wait_for_callback(struct k_clock *kc, struct k_itimer *timr)
|
||||
+{
|
||||
+#ifdef CONFIG_PREEMPT_RT_FULL
|
||||
+ if (kc->timer_set == common_timer_set)
|
||||
+ hrtimer_wait_for_timer(&timr->it.real.timer);
|
||||
+ else
|
||||
+ /* FIXME: Whacky hack for posix-cpu-timers */
|
||||
+ schedule_timeout(1);
|
||||
+#endif
|
||||
+}
|
||||
+
|
||||
/* Set a POSIX.1b interval timer. */
|
||||
/* timr->it_lock is taken. */
|
||||
static int
|
||||
@@ -895,6 +909,7 @@ SYSCALL_DEFINE4(timer_settime, timer_t,
|
||||
if (!timr)
|
||||
return -EINVAL;
|
||||
|
||||
+ rcu_read_lock();
|
||||
kc = clockid_to_kclock(timr->it_clock);
|
||||
if (WARN_ON_ONCE(!kc || !kc->timer_set))
|
||||
error = -EINVAL;
|
||||
@@ -903,9 +918,12 @@ SYSCALL_DEFINE4(timer_settime, timer_t,
|
||||
|
||||
unlock_timer(timr, flag);
|
||||
if (error == TIMER_RETRY) {
|
||||
+ timer_wait_for_callback(kc, timr);
|
||||
rtn = NULL; // We already got the old time...
|
||||
+ rcu_read_unlock();
|
||||
goto retry;
|
||||
}
|
||||
+ rcu_read_unlock();
|
||||
|
||||
if (old_setting && !error &&
|
||||
copy_to_user(old_setting, &old_spec, sizeof (old_spec)))
|
||||
@@ -943,10 +961,15 @@ SYSCALL_DEFINE1(timer_delete, timer_t, t
|
||||
if (!timer)
|
||||
return -EINVAL;
|
||||
|
||||
+ rcu_read_lock();
|
||||
if (timer_delete_hook(timer) == TIMER_RETRY) {
|
||||
unlock_timer(timer, flags);
|
||||
+ timer_wait_for_callback(clockid_to_kclock(timer->it_clock),
|
||||
+ timer);
|
||||
+ rcu_read_unlock();
|
||||
goto retry_delete;
|
||||
}
|
||||
+ rcu_read_unlock();
|
||||
|
||||
spin_lock(¤t->sighand->siglock);
|
||||
list_del(&timer->list);
|
||||
@@ -972,8 +995,18 @@ static void itimer_delete(struct k_itime
|
||||
retry_delete:
|
||||
spin_lock_irqsave(&timer->it_lock, flags);
|
||||
|
||||
+ /* On RT we can race with a deletion */
|
||||
+ if (!timer->it_signal) {
|
||||
+ unlock_timer(timer, flags);
|
||||
+ return;
|
||||
+ }
|
||||
+
|
||||
if (timer_delete_hook(timer) == TIMER_RETRY) {
|
||||
+ rcu_read_lock();
|
||||
unlock_timer(timer, flags);
|
||||
+ timer_wait_for_callback(clockid_to_kclock(timer->it_clock),
|
||||
+ timer);
|
||||
+ rcu_read_unlock();
|
||||
goto retry_delete;
|
||||
}
|
||||
list_del(&timer->list);
|
27
debian/patches/features/all/rt/hwlat-detector-Don-t-ignore-threshold-module-paramet.patch
vendored
Normal file
27
debian/patches/features/all/rt/hwlat-detector-Don-t-ignore-threshold-module-paramet.patch
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
From c19bf3baaa55918486b868ab17aae0c0c220e51f Mon Sep 17 00:00:00 2001
|
||||
From: Mike Galbraith <bitbucket@online.de>
|
||||
Date: Fri, 30 Aug 2013 07:57:25 +0200
|
||||
Subject: [PATCH] hwlat-detector: Don't ignore threshold module parameter
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
If the user specified a threshold at module load time, use it.
|
||||
|
||||
Cc: stable-rt@vger.kernel.org
|
||||
Acked-by: Steven Rostedt <rostedt@goodmis.org>
|
||||
Signed-off-by: Mike Galbraith <bitbucket@online.de>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/misc/hwlat_detector.c | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
--- a/drivers/misc/hwlat_detector.c
|
||||
+++ b/drivers/misc/hwlat_detector.c
|
||||
@@ -413,7 +413,7 @@ static int init_stats(void)
|
||||
goto out;
|
||||
|
||||
__reset_stats();
|
||||
- data.threshold = DEFAULT_LAT_THRESHOLD; /* threshold us */
|
||||
+ data.threshold = threshold ?: DEFAULT_LAT_THRESHOLD; /* threshold us */
|
||||
data.sample_window = DEFAULT_SAMPLE_WINDOW; /* window us */
|
||||
data.sample_width = DEFAULT_SAMPLE_WIDTH; /* width us */
|
||||
|
128
debian/patches/features/all/rt/hwlat-detector-Update-hwlat_detector-to-add-outer-lo.patch
vendored
Normal file
128
debian/patches/features/all/rt/hwlat-detector-Update-hwlat_detector-to-add-outer-lo.patch
vendored
Normal file
|
@ -0,0 +1,128 @@
|
|||
From 7a036d4dfcf3f2d3247ff7f739284f4b5056bdcb Mon Sep 17 00:00:00 2001
|
||||
From: Steven Rostedt <rostedt@goodmis.org>
|
||||
Date: Mon, 19 Aug 2013 17:33:25 -0400
|
||||
Subject: [PATCH 1/3] hwlat-detector: Update hwlat_detector to add outer loop
|
||||
detection
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
The hwlat_detector reads two timestamps in a row, then reports any
|
||||
gap between those calls. The problem is, it misses everything between
|
||||
the second reading of the time stamp to the first reading of the time stamp
|
||||
in the next loop. That's were most of the time is spent, which means,
|
||||
chances are likely that it will miss all hardware latencies. This
|
||||
defeats the purpose.
|
||||
|
||||
By also testing the first time stamp from the previous loop second
|
||||
time stamp (the outer loop), we are more likely to find a latency.
|
||||
|
||||
Setting the threshold to 1, here's what the report now looks like:
|
||||
|
||||
1347415723.0232202770 0 2
|
||||
1347415725.0234202822 0 2
|
||||
1347415727.0236202875 0 2
|
||||
1347415729.0238202928 0 2
|
||||
1347415731.0240202980 0 2
|
||||
1347415734.0243203061 0 2
|
||||
1347415736.0245203113 0 2
|
||||
1347415738.0247203166 2 0
|
||||
1347415740.0249203219 0 3
|
||||
1347415742.0251203272 0 3
|
||||
1347415743.0252203299 0 3
|
||||
1347415745.0254203351 0 2
|
||||
1347415747.0256203404 0 2
|
||||
1347415749.0258203457 0 2
|
||||
1347415751.0260203510 0 2
|
||||
1347415754.0263203589 0 2
|
||||
1347415756.0265203642 0 2
|
||||
1347415758.0267203695 0 2
|
||||
1347415760.0269203748 0 2
|
||||
1347415762.0271203801 0 2
|
||||
1347415764.0273203853 2 0
|
||||
|
||||
There's some hardware latency that takes 2 microseconds to run.
|
||||
|
||||
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/misc/hwlat_detector.c | 32 ++++++++++++++++++++++++++------
|
||||
1 file changed, 26 insertions(+), 6 deletions(-)
|
||||
|
||||
--- a/drivers/misc/hwlat_detector.c
|
||||
+++ b/drivers/misc/hwlat_detector.c
|
||||
@@ -143,6 +143,7 @@ static void detector_exit(void);
|
||||
struct sample {
|
||||
u64 seqnum; /* unique sequence */
|
||||
u64 duration; /* ktime delta */
|
||||
+ u64 outer_duration; /* ktime delta (outer loop) */
|
||||
struct timespec timestamp; /* wall time */
|
||||
unsigned long lost;
|
||||
};
|
||||
@@ -219,11 +220,13 @@ static struct sample *buffer_get_sample(
|
||||
*/
|
||||
static int get_sample(void *unused)
|
||||
{
|
||||
- ktime_t start, t1, t2;
|
||||
+ ktime_t start, t1, t2, last_t2;
|
||||
s64 diff, total = 0;
|
||||
u64 sample = 0;
|
||||
+ u64 outer_sample = 0;
|
||||
int ret = 1;
|
||||
|
||||
+ last_t2.tv64 = 0;
|
||||
start = ktime_get(); /* start timestamp */
|
||||
|
||||
do {
|
||||
@@ -231,7 +234,22 @@ static int get_sample(void *unused)
|
||||
t1 = ktime_get(); /* we'll look for a discontinuity */
|
||||
t2 = ktime_get();
|
||||
|
||||
+ if (last_t2.tv64) {
|
||||
+ /* Check the delta from the outer loop (t2 to next t1) */
|
||||
+ diff = ktime_to_us(ktime_sub(t1, last_t2));
|
||||
+ /* This shouldn't happen */
|
||||
+ if (diff < 0) {
|
||||
+ printk(KERN_ERR BANNER "time running backwards\n");
|
||||
+ goto out;
|
||||
+ }
|
||||
+ if (diff > outer_sample)
|
||||
+ outer_sample = diff;
|
||||
+ }
|
||||
+ last_t2 = t2;
|
||||
+
|
||||
total = ktime_to_us(ktime_sub(t2, start)); /* sample width */
|
||||
+
|
||||
+ /* This checks the inner loop (t1 to t2) */
|
||||
diff = ktime_to_us(ktime_sub(t2, t1)); /* current diff */
|
||||
|
||||
/* This shouldn't happen */
|
||||
@@ -246,12 +264,13 @@ static int get_sample(void *unused)
|
||||
} while (total <= data.sample_width);
|
||||
|
||||
/* If we exceed the threshold value, we have found a hardware latency */
|
||||
- if (sample > data.threshold) {
|
||||
+ if (sample > data.threshold || outer_sample > data.threshold) {
|
||||
struct sample s;
|
||||
|
||||
data.count++;
|
||||
s.seqnum = data.count;
|
||||
s.duration = sample;
|
||||
+ s.outer_duration = outer_sample;
|
||||
s.timestamp = CURRENT_TIME;
|
||||
__buffer_add_sample(&s);
|
||||
|
||||
@@ -738,10 +757,11 @@ static ssize_t debug_sample_fread(struct
|
||||
}
|
||||
}
|
||||
|
||||
- len = snprintf(buf, sizeof(buf), "%010lu.%010lu\t%llu\n",
|
||||
- sample->timestamp.tv_sec,
|
||||
- sample->timestamp.tv_nsec,
|
||||
- sample->duration);
|
||||
+ len = snprintf(buf, sizeof(buf), "%010lu.%010lu\t%llu\t%llu\n",
|
||||
+ sample->timestamp.tv_sec,
|
||||
+ sample->timestamp.tv_nsec,
|
||||
+ sample->duration,
|
||||
+ sample->outer_duration);
|
||||
|
||||
|
||||
/* handling partial reads is more trouble than it's worth */
|
184
debian/patches/features/all/rt/hwlat-detector-Use-thread-instead-of-stop-machine.patch
vendored
Normal file
184
debian/patches/features/all/rt/hwlat-detector-Use-thread-instead-of-stop-machine.patch
vendored
Normal file
|
@ -0,0 +1,184 @@
|
|||
From 42b3963c5d3dcdb54226fc6bbb6b5fbcf3f2ddee Mon Sep 17 00:00:00 2001
|
||||
From: Steven Rostedt <rostedt@goodmis.org>
|
||||
Date: Mon, 19 Aug 2013 17:33:27 -0400
|
||||
Subject: [PATCH 3/3] hwlat-detector: Use thread instead of stop machine
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
There's no reason to use stop machine to search for hardware latency.
|
||||
Simply disabling interrupts while running the loop will do enough to
|
||||
check if something comes in that wasn't disabled by interrupts being
|
||||
off, which is exactly what stop machine does.
|
||||
|
||||
Instead of using stop machine, just have the thread disable interrupts
|
||||
while it checks for hardware latency.
|
||||
|
||||
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/misc/hwlat_detector.c | 59 +++++++++++++++++-------------------------
|
||||
1 file changed, 25 insertions(+), 34 deletions(-)
|
||||
|
||||
--- a/drivers/misc/hwlat_detector.c
|
||||
+++ b/drivers/misc/hwlat_detector.c
|
||||
@@ -41,7 +41,6 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/ring_buffer.h>
|
||||
-#include <linux/stop_machine.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/hrtimer.h>
|
||||
#include <linux/kthread.h>
|
||||
@@ -107,7 +106,6 @@ struct data; /* Global state */
|
||||
/* Sampling functions */
|
||||
static int __buffer_add_sample(struct sample *sample);
|
||||
static struct sample *buffer_get_sample(struct sample *sample);
|
||||
-static int get_sample(void *unused);
|
||||
|
||||
/* Threading and state */
|
||||
static int kthread_fn(void *unused);
|
||||
@@ -149,7 +147,7 @@ struct sample {
|
||||
unsigned long lost;
|
||||
};
|
||||
|
||||
-/* keep the global state somewhere. Mostly used under stop_machine. */
|
||||
+/* keep the global state somewhere. */
|
||||
static struct data {
|
||||
|
||||
struct mutex lock; /* protect changes */
|
||||
@@ -172,7 +170,7 @@ static struct data {
|
||||
* @sample: The new latency sample value
|
||||
*
|
||||
* This receives a new latency sample and records it in a global ring buffer.
|
||||
- * No additional locking is used in this case - suited for stop_machine use.
|
||||
+ * No additional locking is used in this case.
|
||||
*/
|
||||
static int __buffer_add_sample(struct sample *sample)
|
||||
{
|
||||
@@ -229,18 +227,17 @@ static struct sample *buffer_get_sample(
|
||||
#endif
|
||||
/**
|
||||
* get_sample - sample the CPU TSC and look for likely hardware latencies
|
||||
- * @unused: This is not used but is a part of the stop_machine API
|
||||
*
|
||||
* Used to repeatedly capture the CPU TSC (or similar), looking for potential
|
||||
- * hardware-induced latency. Called under stop_machine, with data.lock held.
|
||||
+ * hardware-induced latency. Called with interrupts disabled and with data.lock held.
|
||||
*/
|
||||
-static int get_sample(void *unused)
|
||||
+static int get_sample(void)
|
||||
{
|
||||
time_type start, t1, t2, last_t2;
|
||||
s64 diff, total = 0;
|
||||
u64 sample = 0;
|
||||
u64 outer_sample = 0;
|
||||
- int ret = 1;
|
||||
+ int ret = -1;
|
||||
|
||||
init_time(last_t2, 0);
|
||||
start = time_get(); /* start timestamp */
|
||||
@@ -279,10 +276,14 @@ static int get_sample(void *unused)
|
||||
|
||||
} while (total <= data.sample_width);
|
||||
|
||||
+ ret = 0;
|
||||
+
|
||||
/* If we exceed the threshold value, we have found a hardware latency */
|
||||
if (sample > data.threshold || outer_sample > data.threshold) {
|
||||
struct sample s;
|
||||
|
||||
+ ret = 1;
|
||||
+
|
||||
data.count++;
|
||||
s.seqnum = data.count;
|
||||
s.duration = sample;
|
||||
@@ -295,7 +296,6 @@ static int get_sample(void *unused)
|
||||
data.max_sample = sample;
|
||||
}
|
||||
|
||||
- ret = 0;
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
@@ -305,32 +305,30 @@ static int get_sample(void *unused)
|
||||
* @unused: A required part of the kthread API.
|
||||
*
|
||||
* Used to periodically sample the CPU TSC via a call to get_sample. We
|
||||
- * use stop_machine, whith does (intentionally) introduce latency since we
|
||||
+ * disable interrupts, which does (intentionally) introduce latency since we
|
||||
* need to ensure nothing else might be running (and thus pre-empting).
|
||||
* Obviously this should never be used in production environments.
|
||||
*
|
||||
- * stop_machine will schedule us typically only on CPU0 which is fine for
|
||||
- * almost every real-world hardware latency situation - but we might later
|
||||
- * generalize this if we find there are any actualy systems with alternate
|
||||
- * SMI delivery or other non CPU0 hardware latencies.
|
||||
+ * Currently this runs on which ever CPU it was scheduled on, but most
|
||||
+ * real-worald hardware latency situations occur across several CPUs,
|
||||
+ * but we might later generalize this if we find there are any actualy
|
||||
+ * systems with alternate SMI delivery or other hardware latencies.
|
||||
*/
|
||||
static int kthread_fn(void *unused)
|
||||
{
|
||||
- int err = 0;
|
||||
- u64 interval = 0;
|
||||
+ int ret;
|
||||
+ u64 interval;
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
|
||||
mutex_lock(&data.lock);
|
||||
|
||||
- err = stop_machine(get_sample, unused, 0);
|
||||
- if (err) {
|
||||
- /* Houston, we have a problem */
|
||||
- mutex_unlock(&data.lock);
|
||||
- goto err_out;
|
||||
- }
|
||||
+ local_irq_disable();
|
||||
+ ret = get_sample();
|
||||
+ local_irq_enable();
|
||||
|
||||
- wake_up(&data.wq); /* wake up reader(s) */
|
||||
+ if (ret > 0)
|
||||
+ wake_up(&data.wq); /* wake up reader(s) */
|
||||
|
||||
interval = data.sample_window - data.sample_width;
|
||||
do_div(interval, USEC_PER_MSEC); /* modifies interval value */
|
||||
@@ -338,15 +336,10 @@ static int kthread_fn(void *unused)
|
||||
mutex_unlock(&data.lock);
|
||||
|
||||
if (msleep_interruptible(interval))
|
||||
- goto out;
|
||||
+ break;
|
||||
}
|
||||
- goto out;
|
||||
-err_out:
|
||||
- printk(KERN_ERR BANNER "could not call stop_machine, disabling\n");
|
||||
- enabled = 0;
|
||||
-out:
|
||||
- return err;
|
||||
|
||||
+ return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -442,8 +435,7 @@ static int init_stats(void)
|
||||
* This function provides a generic read implementation for the global state
|
||||
* "data" structure debugfs filesystem entries. It would be nice to use
|
||||
* simple_attr_read directly, but we need to make sure that the data.lock
|
||||
- * spinlock is held during the actual read (even though we likely won't ever
|
||||
- * actually race here as the updater runs under a stop_machine context).
|
||||
+ * is held during the actual read.
|
||||
*/
|
||||
static ssize_t simple_data_read(struct file *filp, char __user *ubuf,
|
||||
size_t cnt, loff_t *ppos, const u64 *entry)
|
||||
@@ -478,8 +470,7 @@ static ssize_t simple_data_read(struct f
|
||||
* This function provides a generic write implementation for the global state
|
||||
* "data" structure debugfs filesystem entries. It would be nice to use
|
||||
* simple_attr_write directly, but we need to make sure that the data.lock
|
||||
- * spinlock is held during the actual write (even though we likely won't ever
|
||||
- * actually race here as the updater runs under a stop_machine context).
|
||||
+ * is held during the actual write.
|
||||
*/
|
||||
static ssize_t simple_data_write(struct file *filp, const char __user *ubuf,
|
||||
size_t cnt, loff_t *ppos, u64 *entry)
|
94
debian/patches/features/all/rt/hwlat-detector-Use-trace_clock_local-if-available.patch
vendored
Normal file
94
debian/patches/features/all/rt/hwlat-detector-Use-trace_clock_local-if-available.patch
vendored
Normal file
|
@ -0,0 +1,94 @@
|
|||
From 4aaca90c0255caee9a55371afaecb32365123762 Mon Sep 17 00:00:00 2001
|
||||
From: Steven Rostedt <rostedt@goodmis.org>
|
||||
Date: Mon, 19 Aug 2013 17:33:26 -0400
|
||||
Subject: [PATCH 2/3] hwlat-detector: Use trace_clock_local if available
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
As ktime_get() calls into the timing code which does a read_seq(), it
|
||||
may be affected by other CPUS that touch that lock. To remove this
|
||||
dependency, use the trace_clock_local() which is already exported
|
||||
for module use. If CONFIG_TRACING is enabled, use that as the clock,
|
||||
otherwise use ktime_get().
|
||||
|
||||
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/misc/hwlat_detector.c | 34 +++++++++++++++++++++++++---------
|
||||
1 file changed, 25 insertions(+), 9 deletions(-)
|
||||
|
||||
--- a/drivers/misc/hwlat_detector.c
|
||||
+++ b/drivers/misc/hwlat_detector.c
|
||||
@@ -51,6 +51,7 @@
|
||||
#include <linux/version.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/slab.h>
|
||||
+#include <linux/trace_clock.h>
|
||||
|
||||
#define BUF_SIZE_DEFAULT 262144UL /* 8K*(sizeof(entry)) */
|
||||
#define BUF_FLAGS (RB_FL_OVERWRITE) /* no block on full */
|
||||
@@ -211,6 +212,21 @@ static struct sample *buffer_get_sample(
|
||||
return sample;
|
||||
}
|
||||
|
||||
+#ifndef CONFIG_TRACING
|
||||
+#define time_type ktime_t
|
||||
+#define time_get() ktime_get()
|
||||
+#define time_to_us(x) ktime_to_us(x)
|
||||
+#define time_sub(a, b) ktime_sub(a, b)
|
||||
+#define init_time(a, b) (a).tv64 = b
|
||||
+#define time_u64(a) (a).tv64
|
||||
+#else
|
||||
+#define time_type u64
|
||||
+#define time_get() trace_clock_local()
|
||||
+#define time_to_us(x) div_u64(x, 1000)
|
||||
+#define time_sub(a, b) ((a) - (b))
|
||||
+#define init_time(a, b) a = b
|
||||
+#define time_u64(a) a
|
||||
+#endif
|
||||
/**
|
||||
* get_sample - sample the CPU TSC and look for likely hardware latencies
|
||||
* @unused: This is not used but is a part of the stop_machine API
|
||||
@@ -220,23 +236,23 @@ static struct sample *buffer_get_sample(
|
||||
*/
|
||||
static int get_sample(void *unused)
|
||||
{
|
||||
- ktime_t start, t1, t2, last_t2;
|
||||
+ time_type start, t1, t2, last_t2;
|
||||
s64 diff, total = 0;
|
||||
u64 sample = 0;
|
||||
u64 outer_sample = 0;
|
||||
int ret = 1;
|
||||
|
||||
- last_t2.tv64 = 0;
|
||||
- start = ktime_get(); /* start timestamp */
|
||||
+ init_time(last_t2, 0);
|
||||
+ start = time_get(); /* start timestamp */
|
||||
|
||||
do {
|
||||
|
||||
- t1 = ktime_get(); /* we'll look for a discontinuity */
|
||||
- t2 = ktime_get();
|
||||
+ t1 = time_get(); /* we'll look for a discontinuity */
|
||||
+ t2 = time_get();
|
||||
|
||||
- if (last_t2.tv64) {
|
||||
+ if (time_u64(last_t2)) {
|
||||
/* Check the delta from the outer loop (t2 to next t1) */
|
||||
- diff = ktime_to_us(ktime_sub(t1, last_t2));
|
||||
+ diff = time_to_us(time_sub(t1, last_t2));
|
||||
/* This shouldn't happen */
|
||||
if (diff < 0) {
|
||||
printk(KERN_ERR BANNER "time running backwards\n");
|
||||
@@ -247,10 +263,10 @@ static int get_sample(void *unused)
|
||||
}
|
||||
last_t2 = t2;
|
||||
|
||||
- total = ktime_to_us(ktime_sub(t2, start)); /* sample width */
|
||||
+ total = time_to_us(time_sub(t2, start)); /* sample width */
|
||||
|
||||
/* This checks the inner loop (t1 to t2) */
|
||||
- diff = ktime_to_us(ktime_sub(t2, t1)); /* current diff */
|
||||
+ diff = time_to_us(time_sub(t2, t1)); /* current diff */
|
||||
|
||||
/* This shouldn't happen */
|
||||
if (diff < 0) {
|
File diff suppressed because it is too large
Load Diff
35
debian/patches/features/all/rt/i2c-omap-drop-the-lock-hard-irq-context.patch
vendored
Normal file
35
debian/patches/features/all/rt/i2c-omap-drop-the-lock-hard-irq-context.patch
vendored
Normal file
|
@ -0,0 +1,35 @@
|
|||
From 5145351047b216cca13aaca99f939a9a594c6c4d Mon Sep 17 00:00:00 2001
|
||||
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Date: Thu, 21 Mar 2013 11:35:49 +0100
|
||||
Subject: [PATCH 2/3] i2c/omap: drop the lock hard irq context
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
The lock is taken while reading two registers. On RT the first lock is
|
||||
taken in hard irq where it might sleep and in the threaded irq.
|
||||
The threaded irq runs in oneshot mode so the hard irq does not run until
|
||||
the thread the completes so there is no reason to grab the lock.
|
||||
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/i2c/busses/i2c-omap.c | 5 +----
|
||||
1 file changed, 1 insertion(+), 4 deletions(-)
|
||||
|
||||
--- a/drivers/i2c/busses/i2c-omap.c
|
||||
+++ b/drivers/i2c/busses/i2c-omap.c
|
||||
@@ -879,15 +879,12 @@ omap_i2c_isr(int irq, void *dev_id)
|
||||
u16 mask;
|
||||
u16 stat;
|
||||
|
||||
- spin_lock(&dev->lock);
|
||||
- mask = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG);
|
||||
stat = omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG);
|
||||
+ mask = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG);
|
||||
|
||||
if (stat & mask)
|
||||
ret = IRQ_WAKE_THREAD;
|
||||
|
||||
- spin_unlock(&dev->lock);
|
||||
-
|
||||
return ret;
|
||||
}
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Subject: gpu/i915: don't open code these things
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
The opencode part is gone in 1f83fee0 ("drm/i915: clear up wedged transitions")
|
||||
the owner check is still there.
|
||||
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/gpu/drm/i915/i915_gem.c | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
--- a/drivers/gpu/drm/i915/i915_gem.c
|
||||
+++ b/drivers/gpu/drm/i915/i915_gem.c
|
||||
@@ -4778,7 +4778,7 @@ static bool mutex_is_locked_by(struct mu
|
||||
if (!mutex_is_locked(mutex))
|
||||
return false;
|
||||
|
||||
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
|
||||
+#if (defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)) && !defined(CONFIG_PREEMPT_RT_BASE)
|
||||
return mutex->owner == task;
|
||||
#else
|
||||
/* Since UP may be pre-empted, we cannot assume that we own the lock */
|
|
@ -0,0 +1,170 @@
|
|||
From: Ingo Molnar <mingo@elte.hu>
|
||||
Date: Fri, 3 Jul 2009 08:30:16 -0500
|
||||
Subject: ide: Do not disable interrupts for PREEMPT-RT
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Use the local_irq_*_nort variants.
|
||||
|
||||
Signed-off-by: Ingo Molnar <mingo@elte.hu>
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
|
||||
---
|
||||
drivers/ide/alim15x3.c | 4 ++--
|
||||
drivers/ide/hpt366.c | 4 ++--
|
||||
drivers/ide/ide-io-std.c | 8 ++++----
|
||||
drivers/ide/ide-io.c | 2 +-
|
||||
drivers/ide/ide-iops.c | 4 ++--
|
||||
drivers/ide/ide-probe.c | 4 ++--
|
||||
drivers/ide/ide-taskfile.c | 6 +++---
|
||||
7 files changed, 16 insertions(+), 16 deletions(-)
|
||||
|
||||
--- a/drivers/ide/alim15x3.c
|
||||
+++ b/drivers/ide/alim15x3.c
|
||||
@@ -234,7 +234,7 @@ static int init_chipset_ali15x3(struct p
|
||||
|
||||
isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
|
||||
|
||||
- local_irq_save(flags);
|
||||
+ local_irq_save_nort(flags);
|
||||
|
||||
if (m5229_revision < 0xC2) {
|
||||
/*
|
||||
@@ -325,7 +325,7 @@ static int init_chipset_ali15x3(struct p
|
||||
}
|
||||
pci_dev_put(north);
|
||||
pci_dev_put(isa_dev);
|
||||
- local_irq_restore(flags);
|
||||
+ local_irq_restore_nort(flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
--- a/drivers/ide/hpt366.c
|
||||
+++ b/drivers/ide/hpt366.c
|
||||
@@ -1241,7 +1241,7 @@ static int init_dma_hpt366(ide_hwif_t *h
|
||||
|
||||
dma_old = inb(base + 2);
|
||||
|
||||
- local_irq_save(flags);
|
||||
+ local_irq_save_nort(flags);
|
||||
|
||||
dma_new = dma_old;
|
||||
pci_read_config_byte(dev, hwif->channel ? 0x4b : 0x43, &masterdma);
|
||||
@@ -1252,7 +1252,7 @@ static int init_dma_hpt366(ide_hwif_t *h
|
||||
if (dma_new != dma_old)
|
||||
outb(dma_new, base + 2);
|
||||
|
||||
- local_irq_restore(flags);
|
||||
+ local_irq_restore_nort(flags);
|
||||
|
||||
printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n",
|
||||
hwif->name, base, base + 7);
|
||||
--- a/drivers/ide/ide-io-std.c
|
||||
+++ b/drivers/ide/ide-io-std.c
|
||||
@@ -175,7 +175,7 @@ void ide_input_data(ide_drive_t *drive,
|
||||
unsigned long uninitialized_var(flags);
|
||||
|
||||
if ((io_32bit & 2) && !mmio) {
|
||||
- local_irq_save(flags);
|
||||
+ local_irq_save_nort(flags);
|
||||
ata_vlb_sync(io_ports->nsect_addr);
|
||||
}
|
||||
|
||||
@@ -186,7 +186,7 @@ void ide_input_data(ide_drive_t *drive,
|
||||
insl(data_addr, buf, words);
|
||||
|
||||
if ((io_32bit & 2) && !mmio)
|
||||
- local_irq_restore(flags);
|
||||
+ local_irq_restore_nort(flags);
|
||||
|
||||
if (((len + 1) & 3) < 2)
|
||||
return;
|
||||
@@ -219,7 +219,7 @@ void ide_output_data(ide_drive_t *drive,
|
||||
unsigned long uninitialized_var(flags);
|
||||
|
||||
if ((io_32bit & 2) && !mmio) {
|
||||
- local_irq_save(flags);
|
||||
+ local_irq_save_nort(flags);
|
||||
ata_vlb_sync(io_ports->nsect_addr);
|
||||
}
|
||||
|
||||
@@ -230,7 +230,7 @@ void ide_output_data(ide_drive_t *drive,
|
||||
outsl(data_addr, buf, words);
|
||||
|
||||
if ((io_32bit & 2) && !mmio)
|
||||
- local_irq_restore(flags);
|
||||
+ local_irq_restore_nort(flags);
|
||||
|
||||
if (((len + 1) & 3) < 2)
|
||||
return;
|
||||
--- a/drivers/ide/ide-io.c
|
||||
+++ b/drivers/ide/ide-io.c
|
||||
@@ -659,7 +659,7 @@ void ide_timer_expiry (unsigned long dat
|
||||
/* disable_irq_nosync ?? */
|
||||
disable_irq(hwif->irq);
|
||||
/* local CPU only, as if we were handling an interrupt */
|
||||
- local_irq_disable();
|
||||
+ local_irq_disable_nort();
|
||||
if (hwif->polling) {
|
||||
startstop = handler(drive);
|
||||
} else if (drive_is_ready(drive)) {
|
||||
--- a/drivers/ide/ide-iops.c
|
||||
+++ b/drivers/ide/ide-iops.c
|
||||
@@ -129,12 +129,12 @@ int __ide_wait_stat(ide_drive_t *drive,
|
||||
if ((stat & ATA_BUSY) == 0)
|
||||
break;
|
||||
|
||||
- local_irq_restore(flags);
|
||||
+ local_irq_restore_nort(flags);
|
||||
*rstat = stat;
|
||||
return -EBUSY;
|
||||
}
|
||||
}
|
||||
- local_irq_restore(flags);
|
||||
+ local_irq_restore_nort(flags);
|
||||
}
|
||||
/*
|
||||
* Allow status to settle, then read it again.
|
||||
--- a/drivers/ide/ide-probe.c
|
||||
+++ b/drivers/ide/ide-probe.c
|
||||
@@ -196,10 +196,10 @@ static void do_identify(ide_drive_t *dri
|
||||
int bswap = 1;
|
||||
|
||||
/* local CPU only; some systems need this */
|
||||
- local_irq_save(flags);
|
||||
+ local_irq_save_nort(flags);
|
||||
/* read 512 bytes of id info */
|
||||
hwif->tp_ops->input_data(drive, NULL, id, SECTOR_SIZE);
|
||||
- local_irq_restore(flags);
|
||||
+ local_irq_restore_nort(flags);
|
||||
|
||||
drive->dev_flags |= IDE_DFLAG_ID_READ;
|
||||
#ifdef DEBUG
|
||||
--- a/drivers/ide/ide-taskfile.c
|
||||
+++ b/drivers/ide/ide-taskfile.c
|
||||
@@ -250,7 +250,7 @@ void ide_pio_bytes(ide_drive_t *drive, s
|
||||
|
||||
page_is_high = PageHighMem(page);
|
||||
if (page_is_high)
|
||||
- local_irq_save(flags);
|
||||
+ local_irq_save_nort(flags);
|
||||
|
||||
buf = kmap_atomic(page) + offset;
|
||||
|
||||
@@ -271,7 +271,7 @@ void ide_pio_bytes(ide_drive_t *drive, s
|
||||
kunmap_atomic(buf);
|
||||
|
||||
if (page_is_high)
|
||||
- local_irq_restore(flags);
|
||||
+ local_irq_restore_nort(flags);
|
||||
|
||||
len -= nr_bytes;
|
||||
}
|
||||
@@ -414,7 +414,7 @@ static ide_startstop_t pre_task_out_intr
|
||||
}
|
||||
|
||||
if ((drive->dev_flags & IDE_DFLAG_UNMASK) == 0)
|
||||
- local_irq_disable();
|
||||
+ local_irq_disable_nort();
|
||||
|
||||
ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE);
|
||||
|
|
@ -0,0 +1,20 @@
|
|||
Subject: sched: Init idle->on_rq in init_idle()
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Wed, 09 Jan 2013 23:03:29 +0100
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
---
|
||||
kernel/sched/core.c | 1 +
|
||||
1 file changed, 1 insertion(+)
|
||||
|
||||
--- a/kernel/sched/core.c
|
||||
+++ b/kernel/sched/core.c
|
||||
@@ -4206,6 +4206,7 @@ void init_idle(struct task_struct *idle,
|
||||
rcu_read_unlock();
|
||||
|
||||
rq->curr = rq->idle = idle;
|
||||
+ idle->on_rq = 1;
|
||||
#if defined(CONFIG_SMP)
|
||||
idle->on_cpu = 1;
|
||||
#endif
|
|
@ -0,0 +1,97 @@
|
|||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Subject: idr: Use local lock instead of preempt enable/disable
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
We need to protect the per cpu variable and prevent migration.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
---
|
||||
include/linux/idr.h | 4 ++++
|
||||
lib/idr.c | 37 ++++++++++++++++++++++++++++++++++---
|
||||
2 files changed, 38 insertions(+), 3 deletions(-)
|
||||
|
||||
--- a/include/linux/idr.h
|
||||
+++ b/include/linux/idr.h
|
||||
@@ -92,10 +92,14 @@ void idr_init(struct idr *idp);
|
||||
* Each idr_preload() should be matched with an invocation of this
|
||||
* function. See idr_preload() for details.
|
||||
*/
|
||||
+#ifdef CONFIG_PREEMPT_RT_FULL
|
||||
+void idr_preload_end(void);
|
||||
+#else
|
||||
static inline void idr_preload_end(void)
|
||||
{
|
||||
preempt_enable();
|
||||
}
|
||||
+#endif
|
||||
|
||||
/**
|
||||
* idr_find - return pointer for given id
|
||||
--- a/lib/idr.c
|
||||
+++ b/lib/idr.c
|
||||
@@ -37,6 +37,7 @@
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/hardirq.h>
|
||||
+#include <linux/locallock.h>
|
||||
|
||||
#define MAX_IDR_SHIFT (sizeof(int) * 8 - 1)
|
||||
#define MAX_IDR_BIT (1U << MAX_IDR_SHIFT)
|
||||
@@ -389,6 +390,36 @@ int __idr_get_new_above(struct idr *idp,
|
||||
}
|
||||
EXPORT_SYMBOL(__idr_get_new_above);
|
||||
|
||||
+#ifdef CONFIG_PREEMPT_RT_FULL
|
||||
+static DEFINE_LOCAL_IRQ_LOCK(idr_lock);
|
||||
+
|
||||
+static inline void idr_preload_lock(void)
|
||||
+{
|
||||
+ local_lock(idr_lock);
|
||||
+}
|
||||
+
|
||||
+static inline void idr_preload_unlock(void)
|
||||
+{
|
||||
+ local_unlock(idr_lock);
|
||||
+}
|
||||
+
|
||||
+void idr_preload_end(void)
|
||||
+{
|
||||
+ idr_preload_unlock();
|
||||
+}
|
||||
+EXPORT_SYMBOL(idr_preload_end);
|
||||
+#else
|
||||
+static inline void idr_preload_lock(void)
|
||||
+{
|
||||
+ preempt_disable();
|
||||
+}
|
||||
+
|
||||
+static inline void idr_preload_unlock(void)
|
||||
+{
|
||||
+ preempt_enable();
|
||||
+}
|
||||
+#endif
|
||||
+
|
||||
/**
|
||||
* idr_preload - preload for idr_alloc()
|
||||
* @gfp_mask: allocation mask to use for preloading
|
||||
@@ -423,7 +454,7 @@ void idr_preload(gfp_t gfp_mask)
|
||||
WARN_ON_ONCE(in_interrupt());
|
||||
might_sleep_if(gfp_mask & __GFP_WAIT);
|
||||
|
||||
- preempt_disable();
|
||||
+ idr_preload_lock();
|
||||
|
||||
/*
|
||||
* idr_alloc() is likely to succeed w/o full idr_layer buffer and
|
||||
@@ -435,9 +466,9 @@ void idr_preload(gfp_t gfp_mask)
|
||||
while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) {
|
||||
struct idr_layer *new;
|
||||
|
||||
- preempt_enable();
|
||||
+ idr_preload_unlock();
|
||||
new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
|
||||
- preempt_disable();
|
||||
+ idr_preload_lock();
|
||||
if (!new)
|
||||
break;
|
||||
|
|
@ -0,0 +1,41 @@
|
|||
From: Sven-Thorsten Dietrich <sdietrich@novell.com>
|
||||
Date: Fri, 3 Jul 2009 08:30:35 -0500
|
||||
Subject: infiniband: Mellanox IB driver patch use _nort() primitives
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Fixes in_atomic stack-dump, when Mellanox module is loaded into the RT
|
||||
Kernel.
|
||||
|
||||
Michael S. Tsirkin <mst@dev.mellanox.co.il> sayeth:
|
||||
"Basically, if you just make spin_lock_irqsave (and spin_lock_irq) not disable
|
||||
interrupts for non-raw spinlocks, I think all of infiniband will be fine without
|
||||
changes."
|
||||
|
||||
Signed-off-by: Sven-Thorsten Dietrich <sven@thebigcorporation.com>
|
||||
Signed-off-by: Ingo Molnar <mingo@elte.hu>
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
|
||||
---
|
||||
drivers/infiniband/ulp/ipoib/ipoib_multicast.c | 4 ++--
|
||||
1 file changed, 2 insertions(+), 2 deletions(-)
|
||||
|
||||
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
|
||||
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
|
||||
@@ -783,7 +783,7 @@ void ipoib_mcast_restart_task(struct wor
|
||||
|
||||
ipoib_mcast_stop_thread(dev, 0);
|
||||
|
||||
- local_irq_save(flags);
|
||||
+ local_irq_save_nort(flags);
|
||||
netif_addr_lock(dev);
|
||||
spin_lock(&priv->lock);
|
||||
|
||||
@@ -865,7 +865,7 @@ void ipoib_mcast_restart_task(struct wor
|
||||
|
||||
spin_unlock(&priv->lock);
|
||||
netif_addr_unlock(dev);
|
||||
- local_irq_restore(flags);
|
||||
+ local_irq_restore_nort(flags);
|
||||
|
||||
/* We have to cancel outside of the spinlock */
|
||||
list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
|
|
@ -0,0 +1,45 @@
|
|||
From: Ingo Molnar <mingo@elte.hu>
|
||||
Date: Fri, 3 Jul 2009 08:30:16 -0500
|
||||
Subject: input: gameport: Do not disable interrupts on PREEMPT_RT
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Use the _nort() primitives.
|
||||
|
||||
Signed-off-by: Ingo Molnar <mingo@elte.hu>
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
|
||||
---
|
||||
drivers/input/gameport/gameport.c | 8 ++++----
|
||||
1 file changed, 4 insertions(+), 4 deletions(-)
|
||||
|
||||
--- a/drivers/input/gameport/gameport.c
|
||||
+++ b/drivers/input/gameport/gameport.c
|
||||
@@ -87,12 +87,12 @@ static int gameport_measure_speed(struct
|
||||
tx = 1 << 30;
|
||||
|
||||
for(i = 0; i < 50; i++) {
|
||||
- local_irq_save(flags);
|
||||
+ local_irq_save_nort(flags);
|
||||
GET_TIME(t1);
|
||||
for (t = 0; t < 50; t++) gameport_read(gameport);
|
||||
GET_TIME(t2);
|
||||
GET_TIME(t3);
|
||||
- local_irq_restore(flags);
|
||||
+ local_irq_restore_nort(flags);
|
||||
udelay(i * 10);
|
||||
if ((t = DELTA(t2,t1) - DELTA(t3,t2)) < tx) tx = t;
|
||||
}
|
||||
@@ -111,11 +111,11 @@ static int gameport_measure_speed(struct
|
||||
tx = 1 << 30;
|
||||
|
||||
for(i = 0; i < 50; i++) {
|
||||
- local_irq_save(flags);
|
||||
+ local_irq_save_nort(flags);
|
||||
rdtscl(t1);
|
||||
for (t = 0; t < 50; t++) gameport_read(gameport);
|
||||
rdtscl(t2);
|
||||
- local_irq_restore(flags);
|
||||
+ local_irq_restore_nort(flags);
|
||||
udelay(i * 10);
|
||||
if (t2 - t1 < tx) tx = t2 - t1;
|
||||
}
|
|
@ -0,0 +1,84 @@
|
|||
From: Ingo Molnar <mingo@elte.hu>
|
||||
Date: Fri, 3 Jul 2009 08:30:12 -0500
|
||||
Subject: ipc: Make the ipc code -rt aware
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
RT serializes the code with the (rt)spinlock but keeps preemption
|
||||
enabled. Some parts of the code need to be atomic nevertheless.
|
||||
|
||||
Protect it with preempt_disable/enable_rt pairts.
|
||||
|
||||
Signed-off-by: Ingo Molnar <mingo@elte.hu>
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
|
||||
---
|
||||
ipc/mqueue.c | 5 +++++
|
||||
ipc/msg.c | 16 ++++++++++++++++
|
||||
2 files changed, 21 insertions(+)
|
||||
|
||||
--- a/ipc/mqueue.c
|
||||
+++ b/ipc/mqueue.c
|
||||
@@ -923,12 +923,17 @@ static inline void pipelined_send(struct
|
||||
struct msg_msg *message,
|
||||
struct ext_wait_queue *receiver)
|
||||
{
|
||||
+ /*
|
||||
+ * Keep them in one critical section for PREEMPT_RT:
|
||||
+ */
|
||||
+ preempt_disable_rt();
|
||||
receiver->msg = message;
|
||||
list_del(&receiver->list);
|
||||
receiver->state = STATE_PENDING;
|
||||
wake_up_process(receiver->task);
|
||||
smp_wmb();
|
||||
receiver->state = STATE_READY;
|
||||
+ preempt_enable_rt();
|
||||
}
|
||||
|
||||
/* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
|
||||
--- a/ipc/msg.c
|
||||
+++ b/ipc/msg.c
|
||||
@@ -253,10 +253,18 @@ static void expunge_all(struct msg_queue
|
||||
struct msg_receiver *msr, *t;
|
||||
|
||||
list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) {
|
||||
+ /*
|
||||
+ * Make sure that the wakeup doesnt preempt
|
||||
+ * this CPU prematurely. (on PREEMPT_RT)
|
||||
+ */
|
||||
+ preempt_disable_rt();
|
||||
+
|
||||
msr->r_msg = NULL;
|
||||
wake_up_process(msr->r_tsk);
|
||||
smp_mb();
|
||||
msr->r_msg = ERR_PTR(res);
|
||||
+
|
||||
+ preempt_enable_rt();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -636,6 +644,12 @@ static inline int pipelined_send(struct
|
||||
!security_msg_queue_msgrcv(msq, msg, msr->r_tsk,
|
||||
msr->r_msgtype, msr->r_mode)) {
|
||||
|
||||
+ /*
|
||||
+ * Make sure that the wakeup doesnt preempt
|
||||
+ * this CPU prematurely. (on PREEMPT_RT)
|
||||
+ */
|
||||
+ preempt_disable_rt();
|
||||
+
|
||||
list_del(&msr->r_list);
|
||||
if (msr->r_maxsize < msg->m_ts) {
|
||||
msr->r_msg = NULL;
|
||||
@@ -649,9 +663,11 @@ static inline int pipelined_send(struct
|
||||
wake_up_process(msr->r_tsk);
|
||||
smp_mb();
|
||||
msr->r_msg = msg;
|
||||
+ preempt_enable_rt();
|
||||
|
||||
return 1;
|
||||
}
|
||||
+ preempt_enable_rt();
|
||||
}
|
||||
}
|
||||
return 0;
|
65
debian/patches/features/all/rt/ipc-mqueue-add-a-critical-section-to-avoid-a-deadlock.patch
vendored
Normal file
65
debian/patches/features/all/rt/ipc-mqueue-add-a-critical-section-to-avoid-a-deadlock.patch
vendored
Normal file
|
@ -0,0 +1,65 @@
|
|||
Subject: ipc/mqueue: Add a critical section to avoid a deadlock
|
||||
From: KOBAYASHI Yoshitake <yoshitake.kobayashi@toshiba.co.jp>
|
||||
Date: Sat, 23 Jul 2011 11:57:36 +0900
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
(Repost for v3.0-rt1 and changed the distination addreses)
|
||||
I have tested the following patch on v3.0-rt1 with PREEMPT_RT_FULL.
|
||||
In POSIX message queue, if a sender process uses SCHED_FIFO and
|
||||
has a higher priority than a receiver process, the sender will
|
||||
be stuck at ipc/mqueue.c:452
|
||||
|
||||
452 while (ewp->state == STATE_PENDING)
|
||||
453 cpu_relax();
|
||||
|
||||
Description of the problem
|
||||
(receiver process)
|
||||
1. receiver changes sender's state to STATE_PENDING (mqueue.c:846)
|
||||
2. wake up sender process and "switch to sender" (mqueue.c:847)
|
||||
Note: This context switch only happens in PREEMPT_RT_FULL kernel.
|
||||
(sender process)
|
||||
3. sender check the own state in above loop (mqueue.c:452-453)
|
||||
*. receiver will never wake up and cannot change sender's state to
|
||||
STATE_READY because sender has higher priority
|
||||
|
||||
|
||||
Signed-off-by: Yoshitake Kobayashi <yoshitake.kobayashi@toshiba.co.jp>
|
||||
Cc: viro@zeniv.linux.org.uk
|
||||
Cc: dchinner@redhat.com
|
||||
Cc: npiggin@kernel.dk
|
||||
Cc: hch@lst.de
|
||||
Cc: arnd@arndb.de
|
||||
Link: http://lkml.kernel.org/r/4E2A38A0.1090601@toshiba.co.jp
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
---
|
||||
ipc/mqueue.c | 19 ++++++++++++-------
|
||||
1 file changed, 12 insertions(+), 7 deletions(-)
|
||||
|
||||
--- a/ipc/mqueue.c
|
||||
+++ b/ipc/mqueue.c
|
||||
@@ -947,13 +947,18 @@ static inline void pipelined_receive(str
|
||||
wake_up_interruptible(&info->wait_q);
|
||||
return;
|
||||
}
|
||||
- if (msg_insert(sender->msg, info))
|
||||
- return;
|
||||
- list_del(&sender->list);
|
||||
- sender->state = STATE_PENDING;
|
||||
- wake_up_process(sender->task);
|
||||
- smp_wmb();
|
||||
- sender->state = STATE_READY;
|
||||
+ /*
|
||||
+ * Keep them in one critical section for PREEMPT_RT:
|
||||
+ */
|
||||
+ preempt_disable_rt();
|
||||
+ if (!msg_insert(sender->msg, info)) {
|
||||
+ list_del(&sender->list);
|
||||
+ sender->state = STATE_PENDING;
|
||||
+ wake_up_process(sender->task);
|
||||
+ smp_wmb();
|
||||
+ sender->state = STATE_READY;
|
||||
+ }
|
||||
+ preempt_enable_rt();
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
|
|
@ -0,0 +1,70 @@
|
|||
Subject: ipc/sem: Rework semaphore wakeups
|
||||
From: Peter Zijlstra <peterz@infradead.org>
|
||||
Date: Wed, 14 Sep 2011 11:57:04 +0200
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Current sysv sems have a weird ass wakeup scheme that involves keeping
|
||||
preemption disabled over a potential O(n^2) loop and busy waiting on
|
||||
that on other CPUs.
|
||||
|
||||
Kill this and simply wake the task directly from under the sem_lock.
|
||||
|
||||
This was discovered by a migrate_disable() debug feature that
|
||||
disallows:
|
||||
|
||||
spin_lock();
|
||||
preempt_disable();
|
||||
spin_unlock()
|
||||
preempt_enable();
|
||||
|
||||
Cc: Manfred Spraul <manfred@colorfullife.com>
|
||||
Suggested-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Reported-by: Mike Galbraith <efault@gmx.de>
|
||||
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
|
||||
Cc: Manfred Spraul <manfred@colorfullife.com>
|
||||
Link: http://lkml.kernel.org/r/1315994224.5040.1.camel@twins
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
---
|
||||
ipc/sem.c | 10 ++++++++++
|
||||
1 file changed, 10 insertions(+)
|
||||
|
||||
--- a/ipc/sem.c
|
||||
+++ b/ipc/sem.c
|
||||
@@ -666,6 +666,13 @@ static int perform_atomic_semop(struct s
|
||||
static void wake_up_sem_queue_prepare(struct list_head *pt,
|
||||
struct sem_queue *q, int error)
|
||||
{
|
||||
+#ifdef CONFIG_PREEMPT_RT_BASE
|
||||
+ struct task_struct *p = q->sleeper;
|
||||
+ get_task_struct(p);
|
||||
+ q->status = error;
|
||||
+ wake_up_process(p);
|
||||
+ put_task_struct(p);
|
||||
+#else
|
||||
if (list_empty(pt)) {
|
||||
/*
|
||||
* Hold preempt off so that we don't get preempted and have the
|
||||
@@ -677,6 +684,7 @@ static void wake_up_sem_queue_prepare(st
|
||||
q->pid = error;
|
||||
|
||||
list_add_tail(&q->list, pt);
|
||||
+#endif
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -690,6 +698,7 @@ static void wake_up_sem_queue_prepare(st
|
||||
*/
|
||||
static void wake_up_sem_queue_do(struct list_head *pt)
|
||||
{
|
||||
+#ifndef CONFIG_PREEMPT_RT_BASE
|
||||
struct sem_queue *q, *t;
|
||||
int did_something;
|
||||
|
||||
@@ -702,6 +711,7 @@ static void wake_up_sem_queue_do(struct
|
||||
}
|
||||
if (did_something)
|
||||
preempt_enable();
|
||||
+#endif
|
||||
}
|
||||
|
||||
static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
|
145
debian/patches/features/all/rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
vendored
Normal file
145
debian/patches/features/all/rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
vendored
Normal file
|
@ -0,0 +1,145 @@
|
|||
Subject: genirq: Allow disabling of softirq processing in irq thread context
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Tue, 31 Jan 2012 13:01:27 +0100
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
The processing of softirqs in irq thread context is a performance gain
|
||||
for the non-rt workloads of a system, but it's counterproductive for
|
||||
interrupts which are explicitely related to the realtime
|
||||
workload. Allow such interrupts to prevent softirq processing in their
|
||||
thread context.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Cc: stable-rt@vger.kernel.org
|
||||
---
|
||||
include/linux/interrupt.h | 2 ++
|
||||
include/linux/irq.h | 5 ++++-
|
||||
kernel/irq/manage.c | 13 ++++++++++++-
|
||||
kernel/irq/settings.h | 12 ++++++++++++
|
||||
kernel/softirq.c | 7 +++++++
|
||||
5 files changed, 37 insertions(+), 2 deletions(-)
|
||||
|
||||
--- a/include/linux/interrupt.h
|
||||
+++ b/include/linux/interrupt.h
|
||||
@@ -58,6 +58,7 @@
|
||||
* IRQF_NO_THREAD - Interrupt cannot be threaded
|
||||
* IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
|
||||
* resume time.
|
||||
+ * IRQF_NO_SOFTIRQ_CALL - Do not process softirqs in the irq thread context (RT)
|
||||
*/
|
||||
#define IRQF_DISABLED 0x00000020
|
||||
#define IRQF_SHARED 0x00000080
|
||||
@@ -71,6 +72,7 @@
|
||||
#define IRQF_FORCE_RESUME 0x00008000
|
||||
#define IRQF_NO_THREAD 0x00010000
|
||||
#define IRQF_EARLY_RESUME 0x00020000
|
||||
+#define IRQF_NO_SOFTIRQ_CALL 0x00040000
|
||||
|
||||
#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
|
||||
|
||||
--- a/include/linux/irq.h
|
||||
+++ b/include/linux/irq.h
|
||||
@@ -70,6 +70,7 @@ typedef void (*irq_preflow_handler_t)(st
|
||||
* IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context
|
||||
* IRQ_NESTED_TRHEAD - Interrupt nests into another thread
|
||||
* IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable
|
||||
+ * IRQ_NO_SOFTIRQ_CALL - No softirq processing in the irq thread context (RT)
|
||||
*/
|
||||
enum {
|
||||
IRQ_TYPE_NONE = 0x00000000,
|
||||
@@ -94,12 +95,14 @@ enum {
|
||||
IRQ_NESTED_THREAD = (1 << 15),
|
||||
IRQ_NOTHREAD = (1 << 16),
|
||||
IRQ_PER_CPU_DEVID = (1 << 17),
|
||||
+ IRQ_NO_SOFTIRQ_CALL = (1 << 18),
|
||||
};
|
||||
|
||||
#define IRQF_MODIFY_MASK \
|
||||
(IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
|
||||
IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
|
||||
- IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID)
|
||||
+ IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
|
||||
+ IRQ_NO_SOFTIRQ_CALL)
|
||||
|
||||
#define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
|
||||
|
||||
--- a/kernel/irq/manage.c
|
||||
+++ b/kernel/irq/manage.c
|
||||
@@ -856,7 +856,15 @@ irq_forced_thread_fn(struct irq_desc *de
|
||||
local_bh_disable();
|
||||
ret = action->thread_fn(action->irq, action->dev_id);
|
||||
irq_finalize_oneshot(desc, action);
|
||||
- local_bh_enable();
|
||||
+ /*
|
||||
+ * Interrupts which have real time requirements can be set up
|
||||
+ * to avoid softirq processing in the thread handler. This is
|
||||
+ * safe as these interrupts do not raise soft interrupts.
|
||||
+ */
|
||||
+ if (irq_settings_no_softirq_call(desc))
|
||||
+ _local_bh_enable();
|
||||
+ else
|
||||
+ local_bh_enable();
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1202,6 +1210,9 @@ static int
|
||||
irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
|
||||
}
|
||||
|
||||
+ if (new->flags & IRQF_NO_SOFTIRQ_CALL)
|
||||
+ irq_settings_set_no_softirq_call(desc);
|
||||
+
|
||||
/* Set default affinity mask once everything is setup */
|
||||
setup_affinity(irq, desc, mask);
|
||||
|
||||
--- a/kernel/irq/settings.h
|
||||
+++ b/kernel/irq/settings.h
|
||||
@@ -14,6 +14,7 @@ enum {
|
||||
_IRQ_NO_BALANCING = IRQ_NO_BALANCING,
|
||||
_IRQ_NESTED_THREAD = IRQ_NESTED_THREAD,
|
||||
_IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID,
|
||||
+ _IRQ_NO_SOFTIRQ_CALL = IRQ_NO_SOFTIRQ_CALL,
|
||||
_IRQF_MODIFY_MASK = IRQF_MODIFY_MASK,
|
||||
};
|
||||
|
||||
@@ -26,6 +27,7 @@ enum {
|
||||
#define IRQ_NOAUTOEN GOT_YOU_MORON
|
||||
#define IRQ_NESTED_THREAD GOT_YOU_MORON
|
||||
#define IRQ_PER_CPU_DEVID GOT_YOU_MORON
|
||||
+#define IRQ_NO_SOFTIRQ_CALL GOT_YOU_MORON
|
||||
#undef IRQF_MODIFY_MASK
|
||||
#define IRQF_MODIFY_MASK GOT_YOU_MORON
|
||||
|
||||
@@ -36,6 +38,16 @@ irq_settings_clr_and_set(struct irq_desc
|
||||
desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK);
|
||||
}
|
||||
|
||||
+static inline bool irq_settings_no_softirq_call(struct irq_desc *desc)
|
||||
+{
|
||||
+ return desc->status_use_accessors & _IRQ_NO_SOFTIRQ_CALL;
|
||||
+}
|
||||
+
|
||||
+static inline void irq_settings_set_no_softirq_call(struct irq_desc *desc)
|
||||
+{
|
||||
+ desc->status_use_accessors |= _IRQ_NO_SOFTIRQ_CALL;
|
||||
+}
|
||||
+
|
||||
static inline bool irq_settings_is_per_cpu(struct irq_desc *desc)
|
||||
{
|
||||
return desc->status_use_accessors & _IRQ_PER_CPU;
|
||||
--- a/kernel/softirq.c
|
||||
+++ b/kernel/softirq.c
|
||||
@@ -423,6 +423,13 @@ void local_bh_enable_ip(unsigned long ip
|
||||
}
|
||||
EXPORT_SYMBOL(local_bh_enable_ip);
|
||||
|
||||
+void _local_bh_enable(void)
|
||||
+{
|
||||
+ current->softirq_nestcnt--;
|
||||
+ migrate_enable();
|
||||
+}
|
||||
+EXPORT_SYMBOL(_local_bh_enable);
|
||||
+
|
||||
/* For tracing */
|
||||
int notrace __in_softirq(void)
|
||||
{
|
|
@ -0,0 +1,22 @@
|
|||
Subject: jump-label-rt.patch
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Wed, 13 Jul 2011 11:03:16 +0200
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
---
|
||||
include/linux/jump_label.h | 3 ++-
|
||||
1 file changed, 2 insertions(+), 1 deletion(-)
|
||||
|
||||
--- a/include/linux/jump_label.h
|
||||
+++ b/include/linux/jump_label.h
|
||||
@@ -49,7 +49,8 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/compiler.h>
|
||||
|
||||
-#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
|
||||
+#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) && \
|
||||
+ !defined(CONFIG_PREEMPT_BASE)
|
||||
|
||||
struct static_key {
|
||||
atomic_t enabled;
|
|
@ -0,0 +1,45 @@
|
|||
Subject: kconfig-disable-a-few-options-rt.patch
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Sun, 24 Jul 2011 12:11:43 +0200
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Disable stuff which is known to have issues on RT
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
---
|
||||
arch/Kconfig | 1 +
|
||||
drivers/net/Kconfig | 1 +
|
||||
mm/Kconfig | 2 +-
|
||||
3 files changed, 3 insertions(+), 1 deletion(-)
|
||||
|
||||
--- a/arch/Kconfig
|
||||
+++ b/arch/Kconfig
|
||||
@@ -6,6 +6,7 @@ config OPROFILE
|
||||
tristate "OProfile system profiling"
|
||||
depends on PROFILING
|
||||
depends on HAVE_OPROFILE
|
||||
+ depends on !PREEMPT_RT_FULL
|
||||
select RING_BUFFER
|
||||
select RING_BUFFER_ALLOW_SWAP
|
||||
help
|
||||
--- a/drivers/net/Kconfig
|
||||
+++ b/drivers/net/Kconfig
|
||||
@@ -160,6 +160,7 @@ config VXLAN
|
||||
|
||||
config NETCONSOLE
|
||||
tristate "Network console logging support"
|
||||
+ depends on !PREEMPT_RT_FULL
|
||||
---help---
|
||||
If you want to log kernel messages over the network, enable this.
|
||||
See <file:Documentation/networking/netconsole.txt> for details.
|
||||
--- a/mm/Kconfig
|
||||
+++ b/mm/Kconfig
|
||||
@@ -384,7 +384,7 @@ config NOMMU_INITIAL_TRIM_EXCESS
|
||||
|
||||
config TRANSPARENT_HUGEPAGE
|
||||
bool "Transparent Hugepage Support"
|
||||
- depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE
|
||||
+ depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE && !PREEMPT_RT_FULL
|
||||
select COMPACTION
|
||||
help
|
||||
Transparent Hugepages allows the kernel to use huge pages and
|
|
@ -0,0 +1,57 @@
|
|||
Subject: kconfig-preempt-rt-full.patch
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Wed, 29 Jun 2011 14:58:57 +0200
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
---
|
||||
init/Makefile | 2 +-
|
||||
kernel/Kconfig.preempt | 8 ++++++++
|
||||
scripts/mkcompile_h | 4 +++-
|
||||
3 files changed, 12 insertions(+), 2 deletions(-)
|
||||
|
||||
--- a/init/Makefile
|
||||
+++ b/init/Makefile
|
||||
@@ -33,4 +33,4 @@ mounts-$(CONFIG_BLK_DEV_MD) += do_mounts
|
||||
include/generated/compile.h: FORCE
|
||||
@$($(quiet)chk_compile.h)
|
||||
$(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \
|
||||
- "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CC) $(KBUILD_CFLAGS)"
|
||||
+ "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CONFIG_PREEMPT_RT_FULL)" "$(CC) $(KBUILD_CFLAGS)"
|
||||
--- a/kernel/Kconfig.preempt
|
||||
+++ b/kernel/Kconfig.preempt
|
||||
@@ -67,6 +67,14 @@ config PREEMPT_RTB
|
||||
enables changes which are preliminary for the full preemptible
|
||||
RT kernel.
|
||||
|
||||
+config PREEMPT_RT_FULL
|
||||
+ bool "Fully Preemptible Kernel (RT)"
|
||||
+ depends on IRQ_FORCED_THREADING
|
||||
+ select PREEMPT_RT_BASE
|
||||
+ select PREEMPT_RCU
|
||||
+ help
|
||||
+ All and everything
|
||||
+
|
||||
endchoice
|
||||
|
||||
config PREEMPT_COUNT
|
||||
--- a/scripts/mkcompile_h
|
||||
+++ b/scripts/mkcompile_h
|
||||
@@ -4,7 +4,8 @@ TARGET=$1
|
||||
ARCH=$2
|
||||
SMP=$3
|
||||
PREEMPT=$4
|
||||
-CC=$5
|
||||
+RT=$5
|
||||
+CC=$6
|
||||
|
||||
vecho() { [ "${quiet}" = "silent_" ] || echo "$@" ; }
|
||||
|
||||
@@ -57,6 +58,7 @@ UTS_VERSION="#$VERSION"
|
||||
CONFIG_FLAGS=""
|
||||
if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi
|
||||
if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi
|
||||
+if [ -n "$RT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS RT"; fi
|
||||
UTS_VERSION="$UTS_VERSION $CONFIG_FLAGS $TIMESTAMP"
|
||||
|
||||
# Truncate to maximum length
|
89
debian/patches/features/all/rt/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch
vendored
Normal file
89
debian/patches/features/all/rt/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch
vendored
Normal file
|
@ -0,0 +1,89 @@
|
|||
From 24136a819693ae36039d6b4286bf1f775e062bcc Mon Sep 17 00:00:00 2001
|
||||
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Date: Fri, 7 Jun 2013 22:37:06 +0200
|
||||
Subject: [PATCH] kernel/cpu: fix cpu down problem if kthread's cpu is
|
||||
going down
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
If kthread is pinned to CPUx and CPUx is going down then we get into
|
||||
trouble:
|
||||
- first the unplug thread is created
|
||||
- it will set itself to hp->unplug. As a result, every task that is
|
||||
going to take a lock, has to leave the CPU.
|
||||
- the CPU_DOWN_PREPARE notifier are started. The worker thread will
|
||||
start a new process for the "high priority worker".
|
||||
Now kthread would like to take a lock but since it can't leave the CPU
|
||||
it will never complete its task.
|
||||
|
||||
We could fire the unplug thread after the notifier but then the cpu is
|
||||
no longer marked "online" and the unplug thread will run on CPU0 which
|
||||
was fixed before :)
|
||||
|
||||
So instead the unplug thread is started and kept waiting until the
|
||||
notfier complete their work.
|
||||
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
kernel/cpu.c | 16 ++++++++++++++--
|
||||
1 file changed, 14 insertions(+), 2 deletions(-)
|
||||
|
||||
--- a/kernel/cpu.c
|
||||
+++ b/kernel/cpu.c
|
||||
@@ -83,6 +83,7 @@ struct hotplug_pcp {
|
||||
int refcount;
|
||||
int grab_lock;
|
||||
struct completion synced;
|
||||
+ struct completion unplug_wait;
|
||||
#ifdef CONFIG_PREEMPT_RT_FULL
|
||||
spinlock_t lock;
|
||||
#else
|
||||
@@ -178,6 +179,7 @@ static int sync_unplug_thread(void *data
|
||||
{
|
||||
struct hotplug_pcp *hp = data;
|
||||
|
||||
+ wait_for_completion(&hp->unplug_wait);
|
||||
preempt_disable();
|
||||
hp->unplug = current;
|
||||
wait_for_pinned_cpus(hp);
|
||||
@@ -243,6 +245,14 @@ static void __cpu_unplug_sync(struct hot
|
||||
wait_for_completion(&hp->synced);
|
||||
}
|
||||
|
||||
+static void __cpu_unplug_wait(unsigned int cpu)
|
||||
+{
|
||||
+ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
|
||||
+
|
||||
+ complete(&hp->unplug_wait);
|
||||
+ wait_for_completion(&hp->synced);
|
||||
+}
|
||||
+
|
||||
/*
|
||||
* Start the sync_unplug_thread on the target cpu and wait for it to
|
||||
* complete.
|
||||
@@ -266,6 +276,7 @@ static int cpu_unplug_begin(unsigned int
|
||||
tell_sched_cpu_down_begin(cpu);
|
||||
|
||||
init_completion(&hp->synced);
|
||||
+ init_completion(&hp->unplug_wait);
|
||||
|
||||
hp->sync_tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
|
||||
if (IS_ERR(hp->sync_tsk)) {
|
||||
@@ -281,8 +292,7 @@ static int cpu_unplug_begin(unsigned int
|
||||
* wait for tasks that are going to enter these sections and
|
||||
* we must not have them block.
|
||||
*/
|
||||
- __cpu_unplug_sync(hp);
|
||||
-
|
||||
+ wake_up_process(hp->sync_tsk);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -591,6 +601,8 @@ static int __ref _cpu_down(unsigned int
|
||||
__func__, cpu);
|
||||
goto out_release;
|
||||
}
|
||||
+
|
||||
+ __cpu_unplug_wait(cpu);
|
||||
smpboot_park_threads(cpu);
|
||||
|
||||
/* Notifiers are done. Don't let any more tasks pin this CPU. */
|
60
debian/patches/features/all/rt/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch
vendored
Normal file
60
debian/patches/features/all/rt/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch
vendored
Normal file
|
@ -0,0 +1,60 @@
|
|||
From 4c6df3d78817c20a147c0291f6600d002c0910d3 Mon Sep 17 00:00:00 2001
|
||||
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Date: Fri, 14 Jun 2013 17:16:35 +0200
|
||||
Subject: [PATCH] kernel/hotplug: restore original cpu mask oncpu/down
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
If a task which is allowed to run only on CPU X puts CPU Y down then it
|
||||
will be allowed on all CPUs but the on CPU Y after it comes back from
|
||||
kernel. This patch ensures that we don't lose the initial setting unless
|
||||
the CPU the task is running is going down.
|
||||
|
||||
Cc: stable-rt@vger.kernel.org
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
kernel/cpu.c | 13 ++++++++++++-
|
||||
1 file changed, 12 insertions(+), 1 deletion(-)
|
||||
|
||||
--- a/kernel/cpu.c
|
||||
+++ b/kernel/cpu.c
|
||||
@@ -565,6 +565,7 @@ static int __ref _cpu_down(unsigned int
|
||||
.hcpu = hcpu,
|
||||
};
|
||||
cpumask_var_t cpumask;
|
||||
+ cpumask_var_t cpumask_org;
|
||||
|
||||
if (num_online_cpus() == 1)
|
||||
return -EBUSY;
|
||||
@@ -575,6 +576,12 @@ static int __ref _cpu_down(unsigned int
|
||||
/* Move the downtaker off the unplug cpu */
|
||||
if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
+ if (!alloc_cpumask_var(&cpumask_org, GFP_KERNEL)) {
|
||||
+ free_cpumask_var(cpumask);
|
||||
+ return -ENOMEM;
|
||||
+ }
|
||||
+
|
||||
+ cpumask_copy(cpumask_org, tsk_cpus_allowed(current));
|
||||
cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
|
||||
set_cpus_allowed_ptr(current, cpumask);
|
||||
free_cpumask_var(cpumask);
|
||||
@@ -583,7 +590,8 @@ static int __ref _cpu_down(unsigned int
|
||||
if (mycpu == cpu) {
|
||||
printk(KERN_ERR "Yuck! Still on unplug CPU\n!");
|
||||
migrate_enable();
|
||||
- return -EBUSY;
|
||||
+ err = -EBUSY;
|
||||
+ goto restore_cpus;
|
||||
}
|
||||
|
||||
cpu_hotplug_begin();
|
||||
@@ -642,6 +650,9 @@ static int __ref _cpu_down(unsigned int
|
||||
cpu_hotplug_done();
|
||||
if (!err)
|
||||
cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
|
||||
+restore_cpus:
|
||||
+ set_cpus_allowed_ptr(current, cpumask_org);
|
||||
+ free_cpumask_var(cpumask_org);
|
||||
return err;
|
||||
}
|
||||
|
|
@ -0,0 +1,103 @@
|
|||
From: Jason Wessel <jason.wessel@windriver.com>
|
||||
Date: Thu, 28 Jul 2011 12:42:23 -0500
|
||||
Subject: kgdb/serial: Short term workaround
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
On 07/27/2011 04:37 PM, Thomas Gleixner wrote:
|
||||
> - KGDB (not yet disabled) is reportedly unusable on -rt right now due
|
||||
> to missing hacks in the console locking which I dropped on purpose.
|
||||
>
|
||||
|
||||
To work around this in the short term you can use this patch, in
|
||||
addition to the clocksource watchdog patch that Thomas brewed up.
|
||||
|
||||
Comments are welcome of course. Ultimately the right solution is to
|
||||
change separation between the console and the HW to have a polled mode
|
||||
+ work queue so as not to introduce any kind of latency.
|
||||
|
||||
Thanks,
|
||||
Jason.
|
||||
|
||||
---
|
||||
drivers/tty/serial/8250/8250_core.c | 3 ++-
|
||||
include/linux/kdb.h | 3 ++-
|
||||
kernel/debug/kdb/kdb_io.c | 6 ++----
|
||||
3 files changed, 6 insertions(+), 6 deletions(-)
|
||||
|
||||
--- a/drivers/tty/serial/8250/8250_core.c
|
||||
+++ b/drivers/tty/serial/8250/8250_core.c
|
||||
@@ -38,6 +38,7 @@
|
||||
#include <linux/nmi.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/slab.h>
|
||||
+#include <linux/kdb.h>
|
||||
#ifdef CONFIG_SPARC
|
||||
#include <linux/sunserialcore.h>
|
||||
#endif
|
||||
@@ -2869,7 +2870,7 @@ serial8250_console_write(struct console
|
||||
|
||||
touch_nmi_watchdog();
|
||||
|
||||
- if (port->sysrq || oops_in_progress)
|
||||
+ if (port->sysrq || oops_in_progress || in_kdb_printk())
|
||||
locked = spin_trylock_irqsave(&port->lock, flags);
|
||||
else
|
||||
spin_lock_irqsave(&port->lock, flags);
|
||||
--- a/include/linux/kdb.h
|
||||
+++ b/include/linux/kdb.h
|
||||
@@ -115,7 +115,7 @@ extern int kdb_trap_printk;
|
||||
extern __printf(1, 0) int vkdb_printf(const char *fmt, va_list args);
|
||||
extern __printf(1, 2) int kdb_printf(const char *, ...);
|
||||
typedef __printf(1, 2) int (*kdb_printf_t)(const char *, ...);
|
||||
-
|
||||
+#define in_kdb_printk() (kdb_trap_printk)
|
||||
extern void kdb_init(int level);
|
||||
|
||||
/* Access to kdb specific polling devices */
|
||||
@@ -150,6 +150,7 @@ extern int kdb_register_repeat(char *, k
|
||||
extern int kdb_unregister(char *);
|
||||
#else /* ! CONFIG_KGDB_KDB */
|
||||
static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; }
|
||||
+#define in_kdb_printk() (0)
|
||||
static inline void kdb_init(int level) {}
|
||||
static inline int kdb_register(char *cmd, kdb_func_t func, char *usage,
|
||||
char *help, short minlen) { return 0; }
|
||||
--- a/kernel/debug/kdb/kdb_io.c
|
||||
+++ b/kernel/debug/kdb/kdb_io.c
|
||||
@@ -554,7 +554,6 @@ int vkdb_printf(const char *fmt, va_list
|
||||
int linecount;
|
||||
int colcount;
|
||||
int logging, saved_loglevel = 0;
|
||||
- int saved_trap_printk;
|
||||
int got_printf_lock = 0;
|
||||
int retlen = 0;
|
||||
int fnd, len;
|
||||
@@ -565,8 +564,6 @@ int vkdb_printf(const char *fmt, va_list
|
||||
unsigned long uninitialized_var(flags);
|
||||
|
||||
preempt_disable();
|
||||
- saved_trap_printk = kdb_trap_printk;
|
||||
- kdb_trap_printk = 0;
|
||||
|
||||
/* Serialize kdb_printf if multiple cpus try to write at once.
|
||||
* But if any cpu goes recursive in kdb, just print the output,
|
||||
@@ -833,7 +830,6 @@ int vkdb_printf(const char *fmt, va_list
|
||||
} else {
|
||||
__release(kdb_printf_lock);
|
||||
}
|
||||
- kdb_trap_printk = saved_trap_printk;
|
||||
preempt_enable();
|
||||
return retlen;
|
||||
}
|
||||
@@ -843,9 +839,11 @@ int kdb_printf(const char *fmt, ...)
|
||||
va_list ap;
|
||||
int r;
|
||||
|
||||
+ kdb_trap_printk++;
|
||||
va_start(ap, fmt);
|
||||
r = vkdb_printf(fmt, ap);
|
||||
va_end(ap);
|
||||
+ kdb_trap_printk--;
|
||||
|
||||
return r;
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,174 @@
|
|||
Subject: lglocks-rt.patch
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Wed, 15 Jun 2011 11:02:21 +0200
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
---
|
||||
include/linux/lglock.h | 19 +++++++++++++++--
|
||||
kernel/lglock.c | 54 ++++++++++++++++++++++++++++++++-----------------
|
||||
2 files changed, 53 insertions(+), 20 deletions(-)
|
||||
|
||||
--- a/include/linux/lglock.h
|
||||
+++ b/include/linux/lglock.h
|
||||
@@ -42,22 +42,37 @@
|
||||
#endif
|
||||
|
||||
struct lglock {
|
||||
+#ifndef CONFIG_PREEMPT_RT_FULL
|
||||
arch_spinlock_t __percpu *lock;
|
||||
+#else
|
||||
+ struct rt_mutex __percpu *lock;
|
||||
+#endif
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
struct lock_class_key lock_key;
|
||||
struct lockdep_map lock_dep_map;
|
||||
#endif
|
||||
};
|
||||
|
||||
-#define DEFINE_LGLOCK(name) \
|
||||
+#ifndef CONFIG_PREEMPT_RT_FULL
|
||||
+# define DEFINE_LGLOCK(name) \
|
||||
static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \
|
||||
= __ARCH_SPIN_LOCK_UNLOCKED; \
|
||||
struct lglock name = { .lock = &name ## _lock }
|
||||
|
||||
-#define DEFINE_STATIC_LGLOCK(name) \
|
||||
+# define DEFINE_STATIC_LGLOCK(name) \
|
||||
static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \
|
||||
= __ARCH_SPIN_LOCK_UNLOCKED; \
|
||||
static struct lglock name = { .lock = &name ## _lock }
|
||||
+#else
|
||||
+
|
||||
+# define DEFINE_LGLOCK(name) \
|
||||
+ static DEFINE_PER_CPU(struct rt_mutex, name ## _lock); \
|
||||
+ struct lglock name = { .lock = &name ## _lock }
|
||||
+
|
||||
+# define DEFINE_STATIC_LGLOCK(name) \
|
||||
+ static DEFINE_PER_CPU(struct rt_mutex, name ## _lock); \
|
||||
+ static struct lglock name = { .lock = &name ## _lock }
|
||||
+#endif
|
||||
|
||||
void lg_lock_init(struct lglock *lg, char *name);
|
||||
void lg_local_lock(struct lglock *lg);
|
||||
--- a/kernel/lglock.c
|
||||
+++ b/kernel/lglock.c
|
||||
@@ -4,6 +4,15 @@
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
+#ifndef CONFIG_PREEMPT_RT_FULL
|
||||
+# define lg_lock_ptr arch_spinlock_t
|
||||
+# define lg_do_lock(l) arch_spin_lock(l)
|
||||
+# define lg_do_unlock(l) arch_spin_unlock(l)
|
||||
+#else
|
||||
+# define lg_lock_ptr struct rt_mutex
|
||||
+# define lg_do_lock(l) __rt_spin_lock(l)
|
||||
+# define lg_do_unlock(l) __rt_spin_unlock(l)
|
||||
+#endif
|
||||
/*
|
||||
* Note there is no uninit, so lglocks cannot be defined in
|
||||
* modules (but it's fine to use them from there)
|
||||
@@ -12,51 +21,60 @@
|
||||
|
||||
void lg_lock_init(struct lglock *lg, char *name)
|
||||
{
|
||||
+#ifdef CONFIG_PREEMPT_RT_FULL
|
||||
+ int i;
|
||||
+
|
||||
+ for_each_possible_cpu(i) {
|
||||
+ struct rt_mutex *lock = per_cpu_ptr(lg->lock, i);
|
||||
+
|
||||
+ rt_mutex_init(lock);
|
||||
+ }
|
||||
+#endif
|
||||
LOCKDEP_INIT_MAP(&lg->lock_dep_map, name, &lg->lock_key, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(lg_lock_init);
|
||||
|
||||
void lg_local_lock(struct lglock *lg)
|
||||
{
|
||||
- arch_spinlock_t *lock;
|
||||
+ lg_lock_ptr *lock;
|
||||
|
||||
- preempt_disable();
|
||||
+ migrate_disable();
|
||||
lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
|
||||
lock = this_cpu_ptr(lg->lock);
|
||||
- arch_spin_lock(lock);
|
||||
+ lg_do_lock(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(lg_local_lock);
|
||||
|
||||
void lg_local_unlock(struct lglock *lg)
|
||||
{
|
||||
- arch_spinlock_t *lock;
|
||||
+ lg_lock_ptr *lock;
|
||||
|
||||
lock_release(&lg->lock_dep_map, 1, _RET_IP_);
|
||||
lock = this_cpu_ptr(lg->lock);
|
||||
- arch_spin_unlock(lock);
|
||||
- preempt_enable();
|
||||
+ lg_do_unlock(lock);
|
||||
+ migrate_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(lg_local_unlock);
|
||||
|
||||
void lg_local_lock_cpu(struct lglock *lg, int cpu)
|
||||
{
|
||||
- arch_spinlock_t *lock;
|
||||
+ lg_lock_ptr *lock;
|
||||
|
||||
- preempt_disable();
|
||||
+ preempt_disable_nort();
|
||||
lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
|
||||
lock = per_cpu_ptr(lg->lock, cpu);
|
||||
- arch_spin_lock(lock);
|
||||
+ lg_do_lock(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(lg_local_lock_cpu);
|
||||
|
||||
void lg_local_unlock_cpu(struct lglock *lg, int cpu)
|
||||
{
|
||||
- arch_spinlock_t *lock;
|
||||
+ lg_lock_ptr *lock;
|
||||
|
||||
lock_release(&lg->lock_dep_map, 1, _RET_IP_);
|
||||
lock = per_cpu_ptr(lg->lock, cpu);
|
||||
- arch_spin_unlock(lock);
|
||||
- preempt_enable();
|
||||
+ lg_do_unlock(lock);
|
||||
+ preempt_enable_nort();
|
||||
}
|
||||
EXPORT_SYMBOL(lg_local_unlock_cpu);
|
||||
|
||||
@@ -64,12 +82,12 @@ void lg_global_lock(struct lglock *lg)
|
||||
{
|
||||
int i;
|
||||
|
||||
- preempt_disable();
|
||||
+ preempt_disable_nort();
|
||||
lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
|
||||
for_each_possible_cpu(i) {
|
||||
- arch_spinlock_t *lock;
|
||||
+ lg_lock_ptr *lock;
|
||||
lock = per_cpu_ptr(lg->lock, i);
|
||||
- arch_spin_lock(lock);
|
||||
+ lg_do_lock(lock);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(lg_global_lock);
|
||||
@@ -80,10 +98,10 @@ void lg_global_unlock(struct lglock *lg)
|
||||
|
||||
lock_release(&lg->lock_dep_map, 1, _RET_IP_);
|
||||
for_each_possible_cpu(i) {
|
||||
- arch_spinlock_t *lock;
|
||||
+ lg_lock_ptr *lock;
|
||||
lock = per_cpu_ptr(lg->lock, i);
|
||||
- arch_spin_unlock(lock);
|
||||
+ lg_do_unlock(lock);
|
||||
}
|
||||
- preempt_enable();
|
||||
+ preempt_enable_nort();
|
||||
}
|
||||
EXPORT_SYMBOL(lg_global_unlock);
|
|
@ -0,0 +1,30 @@
|
|||
Subject: list-add-list-last-entry.patch
|
||||
From: Peter Zijlstra <peterz@infradead.org>
|
||||
Date: Tue, 21 Jun 2011 11:22:36 +0200
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
---
|
||||
include/linux/list.h | 11 +++++++++++
|
||||
1 file changed, 11 insertions(+)
|
||||
|
||||
--- a/include/linux/list.h
|
||||
+++ b/include/linux/list.h
|
||||
@@ -373,6 +373,17 @@ static inline void list_splice_tail_init
|
||||
(!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL)
|
||||
|
||||
/**
|
||||
+ * list_last_entry - get the last element from a list
|
||||
+ * @ptr: the list head to take the element from.
|
||||
+ * @type: the type of the struct this is embedded in.
|
||||
+ * @member: the name of the list_struct within the struct.
|
||||
+ *
|
||||
+ * Note, that list is expected to be not empty.
|
||||
+ */
|
||||
+#define list_last_entry(ptr, type, member) \
|
||||
+ list_entry((ptr)->prev, type, member)
|
||||
+
|
||||
+/**
|
||||
* list_for_each - iterate over a list
|
||||
* @pos: the &struct list_head to use as a loop cursor.
|
||||
* @head: the head for your list.
|
115
debian/patches/features/all/rt/list_bl.h-make-list-head-locking-RT-safe.patch
vendored
Normal file
115
debian/patches/features/all/rt/list_bl.h-make-list-head-locking-RT-safe.patch
vendored
Normal file
|
@ -0,0 +1,115 @@
|
|||
From: Paul Gortmaker <paul.gortmaker@windriver.com>
|
||||
Date: Fri, 21 Jun 2013 15:07:25 -0400
|
||||
Subject: [PATCH] list_bl.h: make list head locking RT safe
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
As per changes in include/linux/jbd_common.h for avoiding the
|
||||
bit_spin_locks on RT ("fs: jbd/jbd2: Make state lock and journal
|
||||
head lock rt safe") we do the same thing here.
|
||||
|
||||
We use the non atomic __set_bit and __clear_bit inside the scope of
|
||||
the lock to preserve the ability of the existing LIST_DEBUG code to
|
||||
use the zero'th bit in the sanity checks.
|
||||
|
||||
As a bit spinlock, we had no lockdep visibility into the usage
|
||||
of the list head locking. Now, if we were to implement it as a
|
||||
standard non-raw spinlock, we would see:
|
||||
|
||||
BUG: sleeping function called from invalid context at kernel/rtmutex.c:658
|
||||
in_atomic(): 1, irqs_disabled(): 0, pid: 122, name: udevd
|
||||
5 locks held by udevd/122:
|
||||
#0: (&sb->s_type->i_mutex_key#7/1){+.+.+.}, at: [<ffffffff811967e8>] lock_rename+0xe8/0xf0
|
||||
#1: (rename_lock){+.+...}, at: [<ffffffff811a277c>] d_move+0x2c/0x60
|
||||
#2: (&dentry->d_lock){+.+...}, at: [<ffffffff811a0763>] dentry_lock_for_move+0xf3/0x130
|
||||
#3: (&dentry->d_lock/2){+.+...}, at: [<ffffffff811a0734>] dentry_lock_for_move+0xc4/0x130
|
||||
#4: (&dentry->d_lock/3){+.+...}, at: [<ffffffff811a0747>] dentry_lock_for_move+0xd7/0x130
|
||||
Pid: 122, comm: udevd Not tainted 3.4.47-rt62 #7
|
||||
Call Trace:
|
||||
[<ffffffff810b9624>] __might_sleep+0x134/0x1f0
|
||||
[<ffffffff817a24d4>] rt_spin_lock+0x24/0x60
|
||||
[<ffffffff811a0c4c>] __d_shrink+0x5c/0xa0
|
||||
[<ffffffff811a1b2d>] __d_drop+0x1d/0x40
|
||||
[<ffffffff811a24be>] __d_move+0x8e/0x320
|
||||
[<ffffffff811a278e>] d_move+0x3e/0x60
|
||||
[<ffffffff81199598>] vfs_rename+0x198/0x4c0
|
||||
[<ffffffff8119b093>] sys_renameat+0x213/0x240
|
||||
[<ffffffff817a2de5>] ? _raw_spin_unlock+0x35/0x60
|
||||
[<ffffffff8107781c>] ? do_page_fault+0x1ec/0x4b0
|
||||
[<ffffffff817a32ca>] ? retint_swapgs+0xe/0x13
|
||||
[<ffffffff813eb0e6>] ? trace_hardirqs_on_thunk+0x3a/0x3f
|
||||
[<ffffffff8119b0db>] sys_rename+0x1b/0x20
|
||||
[<ffffffff817a3b96>] system_call_fastpath+0x1a/0x1f
|
||||
|
||||
Since we are only taking the lock during short lived list operations,
|
||||
lets assume for now that it being raw won't be a significant latency
|
||||
concern.
|
||||
|
||||
Cc: stable-rt@vger.kernel.org
|
||||
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
include/linux/list_bl.h | 28 ++++++++++++++++++++++++++--
|
||||
1 file changed, 26 insertions(+), 2 deletions(-)
|
||||
|
||||
--- a/include/linux/list_bl.h
|
||||
+++ b/include/linux/list_bl.h
|
||||
@@ -2,6 +2,7 @@
|
||||
#define _LINUX_LIST_BL_H
|
||||
|
||||
#include <linux/list.h>
|
||||
+#include <linux/spinlock.h>
|
||||
#include <linux/bit_spinlock.h>
|
||||
|
||||
/*
|
||||
@@ -32,13 +33,22 @@
|
||||
|
||||
struct hlist_bl_head {
|
||||
struct hlist_bl_node *first;
|
||||
+#ifdef CONFIG_PREEMPT_RT_BASE
|
||||
+ raw_spinlock_t lock;
|
||||
+#endif
|
||||
};
|
||||
|
||||
struct hlist_bl_node {
|
||||
struct hlist_bl_node *next, **pprev;
|
||||
};
|
||||
-#define INIT_HLIST_BL_HEAD(ptr) \
|
||||
- ((ptr)->first = NULL)
|
||||
+
|
||||
+static inline void INIT_HLIST_BL_HEAD(struct hlist_bl_head *h)
|
||||
+{
|
||||
+ h->first = NULL;
|
||||
+#ifdef CONFIG_PREEMPT_RT_BASE
|
||||
+ raw_spin_lock_init(&h->lock);
|
||||
+#endif
|
||||
+}
|
||||
|
||||
static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h)
|
||||
{
|
||||
@@ -117,12 +127,26 @@ static inline void hlist_bl_del_init(str
|
||||
|
||||
static inline void hlist_bl_lock(struct hlist_bl_head *b)
|
||||
{
|
||||
+#ifndef CONFIG_PREEMPT_RT_BASE
|
||||
bit_spin_lock(0, (unsigned long *)b);
|
||||
+#else
|
||||
+ raw_spin_lock(&b->lock);
|
||||
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
|
||||
+ __set_bit(0, (unsigned long *)b);
|
||||
+#endif
|
||||
+#endif
|
||||
}
|
||||
|
||||
static inline void hlist_bl_unlock(struct hlist_bl_head *b)
|
||||
{
|
||||
+#ifndef CONFIG_PREEMPT_RT_BASE
|
||||
__bit_spin_unlock(0, (unsigned long *)b);
|
||||
+#else
|
||||
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
|
||||
+ __clear_bit(0, (unsigned long *)b);
|
||||
+#endif
|
||||
+ raw_spin_unlock(&b->lock);
|
||||
+#endif
|
||||
}
|
||||
|
||||
static inline bool hlist_bl_is_locked(struct hlist_bl_head *b)
|
|
@ -0,0 +1,53 @@
|
|||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Tue, 21 Jul 2009 22:34:14 +0200
|
||||
Subject: rt: local_irq_* variants depending on RT/!RT
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Add local_irq_*_(no)rt variant which are mainly used to break
|
||||
interrupt disabled sections on PREEMPT_RT or to explicitely disable
|
||||
interrupts on PREEMPT_RT.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
|
||||
---
|
||||
include/linux/interrupt.h | 2 +-
|
||||
include/linux/irqflags.h | 19 +++++++++++++++++++
|
||||
2 files changed, 20 insertions(+), 1 deletion(-)
|
||||
|
||||
--- a/include/linux/interrupt.h
|
||||
+++ b/include/linux/interrupt.h
|
||||
@@ -176,7 +176,7 @@ extern void devm_free_irq(struct device
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
# define local_irq_enable_in_hardirq() do { } while (0)
|
||||
#else
|
||||
-# define local_irq_enable_in_hardirq() local_irq_enable()
|
||||
+# define local_irq_enable_in_hardirq() local_irq_enable_nort()
|
||||
#endif
|
||||
|
||||
extern void disable_irq_nosync(unsigned int irq);
|
||||
--- a/include/linux/irqflags.h
|
||||
+++ b/include/linux/irqflags.h
|
||||
@@ -147,4 +147,23 @@
|
||||
|
||||
#endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */
|
||||
|
||||
+/*
|
||||
+ * local_irq* variants depending on RT/!RT
|
||||
+ */
|
||||
+#ifdef CONFIG_PREEMPT_RT_FULL
|
||||
+# define local_irq_disable_nort() do { } while (0)
|
||||
+# define local_irq_enable_nort() do { } while (0)
|
||||
+# define local_irq_save_nort(flags) do { local_save_flags(flags); } while (0)
|
||||
+# define local_irq_restore_nort(flags) do { (void)(flags); } while (0)
|
||||
+# define local_irq_disable_rt() local_irq_disable()
|
||||
+# define local_irq_enable_rt() local_irq_enable()
|
||||
+#else
|
||||
+# define local_irq_disable_nort() local_irq_disable()
|
||||
+# define local_irq_enable_nort() local_irq_enable()
|
||||
+# define local_irq_save_nort(flags) local_irq_save(flags)
|
||||
+# define local_irq_restore_nort(flags) local_irq_restore(flags)
|
||||
+# define local_irq_disable_rt() do { } while (0)
|
||||
+# define local_irq_enable_rt() do { } while (0)
|
||||
+#endif
|
||||
+
|
||||
#endif
|
|
@ -0,0 +1,24 @@
|
|||
Subject: local-var.patch
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Fri, 24 Jun 2011 18:40:37 +0200
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
---
|
||||
include/linux/percpu.h | 5 +++++
|
||||
1 file changed, 5 insertions(+)
|
||||
|
||||
--- a/include/linux/percpu.h
|
||||
+++ b/include/linux/percpu.h
|
||||
@@ -48,6 +48,11 @@
|
||||
preempt_enable(); \
|
||||
} while (0)
|
||||
|
||||
+#define get_local_var(var) get_cpu_var(var)
|
||||
+#define put_local_var(var) put_cpu_var(var)
|
||||
+#define get_local_ptr(var) get_cpu_ptr(var)
|
||||
+#define put_local_ptr(var) put_cpu_ptr(var)
|
||||
+
|
||||
/* minimum unit size, also is the maximum supported allocation size */
|
||||
#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10)
|
||||
|
|
@ -0,0 +1,47 @@
|
|||
Subject: local-vars-migrate-disable.patch
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Tue, 28 Jun 2011 20:42:16 +0200
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
---
|
||||
include/linux/percpu.h | 28 ++++++++++++++++++++++++----
|
||||
1 file changed, 24 insertions(+), 4 deletions(-)
|
||||
|
||||
--- a/include/linux/percpu.h
|
||||
+++ b/include/linux/percpu.h
|
||||
@@ -48,10 +48,30 @@
|
||||
preempt_enable(); \
|
||||
} while (0)
|
||||
|
||||
-#define get_local_var(var) get_cpu_var(var)
|
||||
-#define put_local_var(var) put_cpu_var(var)
|
||||
-#define get_local_ptr(var) get_cpu_ptr(var)
|
||||
-#define put_local_ptr(var) put_cpu_ptr(var)
|
||||
+#ifndef CONFIG_PREEMPT_RT_FULL
|
||||
+# define get_local_var(var) get_cpu_var(var)
|
||||
+# define put_local_var(var) put_cpu_var(var)
|
||||
+# define get_local_ptr(var) get_cpu_ptr(var)
|
||||
+# define put_local_ptr(var) put_cpu_ptr(var)
|
||||
+#else
|
||||
+# define get_local_var(var) (*({ \
|
||||
+ migrate_disable(); \
|
||||
+ &__get_cpu_var(var); }))
|
||||
+
|
||||
+# define put_local_var(var) do { \
|
||||
+ (void)&(var); \
|
||||
+ migrate_enable(); \
|
||||
+} while (0)
|
||||
+
|
||||
+# define get_local_ptr(var) ({ \
|
||||
+ migrate_disable(); \
|
||||
+ this_cpu_ptr(var); })
|
||||
+
|
||||
+# define put_local_ptr(var) do { \
|
||||
+ (void)(var); \
|
||||
+ migrate_enable(); \
|
||||
+} while (0)
|
||||
+#endif
|
||||
|
||||
/* minimum unit size, also is the maximum supported allocation size */
|
||||
#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10)
|
|
@ -0,0 +1,16 @@
|
|||
Subject: localversion.patch
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Fri, 08 Jul 2011 20:25:16 +0200
|
||||
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
|
||||
Link: http://lkml.kernel.org/n/tip-8vdw4bfcsds27cvox6rpb334@git.kernel.org
|
||||
---
|
||||
localversion-rt | 1 +
|
||||
1 file changed, 1 insertion(+)
|
||||
|
||||
--- /dev/null
|
||||
+++ b/localversion-rt
|
||||
@@ -0,0 +1 @@
|
||||
+-rt2
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue