[rt] Update to 4.19-rt1

This commit is contained in:
Romain Perier 2018-10-30 13:40:05 +01:00
parent 83e21a57f8
commit 8608aa3901
273 changed files with 1599 additions and 2771 deletions

3
debian/changelog vendored
View File

@ -23,6 +23,9 @@ linux (4.19-1~exp1) UNRELEASED; urgency=medium
[ Noah Meyerhans ]
* [cloud-amd64] Enable Amazon ENA ethernet driver (Closes: #910049)
[ Romain Perier ]
* [rt] Update to 4.19-rt1
-- Ben Hutchings <ben@decadent.org.uk> Mon, 08 Oct 2018 18:45:06 +0100
linux (4.19~rc7-1~exp1) experimental; urgency=medium

View File

@ -122,7 +122,7 @@ debug-info: true
signed-code: false
[featureset-rt_base]
enabled: false
enabled: true
[description]
part-long-up: This kernel is not suitable for SMP (multi-processor,

View File

@ -1,7 +1,7 @@
From: Alexandre Belloni <alexandre.belloni@bootlin.com>
Date: Wed, 18 Apr 2018 12:51:38 +0200
Subject: [PATCH 1/6] ARM: at91: add TCB registers definitions
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
Add registers and bits definitions for the timer counter blocks found on
Atmel ARM SoCs.

View File

@ -2,7 +2,7 @@ From: Peter Zijlstra <peterz@infradead.org>
Date: Mon, 28 May 2018 15:24:20 +0200
Subject: [PATCH 1/4] Split IRQ-off and zone->lock while freeing pages from PCP
list #1
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
Split the IRQ-off section while accessing the PCP list from zone->lock
while freeing pages.

View File

@ -1,103 +0,0 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Mon, 7 May 2018 16:51:09 +0200
Subject: [PATCH] bdi: use refcount_t for reference counting instead atomic_t
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
refcount_t type and corresponding API should be used instead of atomic_t when
the variable is used as a reference counter. This allows to avoid accidental
refcounter overflows that might lead to use-after-free situations.
Suggested-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/backing-dev-defs.h | 3 ++-
include/linux/backing-dev.h | 4 ++--
mm/backing-dev.c | 12 ++++++------
3 files changed, 10 insertions(+), 9 deletions(-)
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -12,6 +12,7 @@
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/kref.h>
+#include <linux/refcount.h>
struct page;
struct device;
@@ -75,7 +76,7 @@ enum wb_reason {
*/
struct bdi_writeback_congested {
unsigned long state; /* WB_[a]sync_congested flags */
- atomic_t refcnt; /* nr of attached wb's and blkg */
+ refcount_t refcnt; /* nr of attached wb's and blkg */
#ifdef CONFIG_CGROUP_WRITEBACK
struct backing_dev_info *__bdi; /* the associated bdi, set to NULL
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -404,13 +404,13 @@ static inline bool inode_cgwb_enabled(st
static inline struct bdi_writeback_congested *
wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
{
- atomic_inc(&bdi->wb_congested->refcnt);
+ refcount_inc(&bdi->wb_congested->refcnt);
return bdi->wb_congested;
}
static inline void wb_congested_put(struct bdi_writeback_congested *congested)
{
- if (atomic_dec_and_test(&congested->refcnt))
+ if (refcount_dec_and_test(&congested->refcnt))
kfree(congested);
}
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -438,10 +438,10 @@ wb_congested_get_create(struct backing_d
if (new_congested) {
/* !found and storage for new one already allocated, insert */
congested = new_congested;
- new_congested = NULL;
rb_link_node(&congested->rb_node, parent, node);
rb_insert_color(&congested->rb_node, &bdi->cgwb_congested_tree);
- goto found;
+ spin_unlock_irqrestore(&cgwb_lock, flags);
+ return congested;
}
spin_unlock_irqrestore(&cgwb_lock, flags);
@@ -451,13 +451,13 @@ wb_congested_get_create(struct backing_d
if (!new_congested)
return NULL;
- atomic_set(&new_congested->refcnt, 0);
+ refcount_set(&new_congested->refcnt, 1);
new_congested->__bdi = bdi;
new_congested->blkcg_id = blkcg_id;
goto retry;
found:
- atomic_inc(&congested->refcnt);
+ refcount_inc(&congested->refcnt);
spin_unlock_irqrestore(&cgwb_lock, flags);
kfree(new_congested);
return congested;
@@ -474,7 +474,7 @@ void wb_congested_put(struct bdi_writeba
unsigned long flags;
local_irq_save(flags);
- if (!atomic_dec_and_lock(&congested->refcnt, &cgwb_lock)) {
+ if (!refcount_dec_and_lock(&congested->refcnt, &cgwb_lock)) {
local_irq_restore(flags);
return;
}
@@ -804,7 +804,7 @@ static int cgwb_bdi_init(struct backing_
if (!bdi->wb_congested)
return -ENOMEM;
- atomic_set(&bdi->wb_congested->refcnt, 1);
+ refcount_set(&bdi->wb_congested->refcnt, 1);
err = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
if (err) {

View File

@ -1,31 +0,0 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 3 Jul 2018 12:56:19 +0200
Subject: [PATCH 1/4] mm/list_lru: use list_lru_walk_one() in
list_lru_walk_node()
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
list_lru_walk_node() invokes __list_lru_walk_one() with -1 as the
memcg_idx parameter. The same can be achieved by list_lru_walk_one() and
passing NULL as memcg argument which then gets converted into -1. This
is a preparation step when the spin_lock() function is lifted to the
caller of __list_lru_walk_one().
Invoke list_lru_walk_one() instead __list_lru_walk_one() when possible.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
mm/list_lru.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -272,8 +272,8 @@ unsigned long list_lru_walk_node(struct
long isolated = 0;
int memcg_idx;
- isolated += __list_lru_walk_one(lru, nid, -1, isolate, cb_arg,
- nr_to_walk);
+ isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg,
+ nr_to_walk);
if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
for_each_memcg_cache_index(memcg_idx) {
isolated += __list_lru_walk_one(lru, nid, memcg_idx,

View File

@ -1,46 +0,0 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 22 Jun 2018 10:48:51 +0200
Subject: [PATCH 1/3] mm: workingset: remove local_irq_disable() from
count_shadow_nodes()
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
In commit 0c7c1bed7e13 ("mm: make counting of list_lru_one::nr_items
lockless") the
spin_lock(&nlru->lock);
statement was replaced with
rcu_read_lock();
in __list_lru_count_one(). The comment in count_shadow_nodes() says that
the local_irq_disable() is required because the lock must be acquired
with disabled interrupts and (spin_lock()) does not do so.
Since the lock is replaced with rcu_read_lock() the local_irq_disable()
is no longer needed. The code path is
list_lru_shrink_count()
-> list_lru_count_one()
-> __list_lru_count_one()
-> rcu_read_lock()
-> list_lru_from_memcg_idx()
-> rcu_read_unlock()
Remove the local_irq_disable() statement.
Cc: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
mm/workingset.c | 3 ---
1 file changed, 3 deletions(-)
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -366,10 +366,7 @@ static unsigned long count_shadow_nodes(
unsigned long nodes;
unsigned long cache;
- /* list_lru lock nests inside the IRQ-safe i_pages lock */
- local_irq_disable();
nodes = list_lru_shrink_count(&shadow_nodes, sc);
- local_irq_enable();
/*
* Approximate a reasonable limit for the radix tree nodes

View File

@ -2,7 +2,7 @@ From: Peter Zijlstra <peterz@infradead.org>
Date: Mon, 28 May 2018 15:24:21 +0200
Subject: [PATCH 2/4] Split IRQ-off and zone->lock while freeing pages from PCP
list #2
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
Split the IRQ-off section while accessing the PCP list from zone->lock
while freeing pages.

View File

@ -2,7 +2,7 @@ From: Alexandre Belloni <alexandre.belloni@bootlin.com>
Date: Wed, 18 Apr 2018 12:51:39 +0200
Subject: [PATCH 2/6] clocksource/drivers: Add a new driver for the Atmel ARM
TC blocks
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
Add a driver for the Atmel Timer Counter Blocks. This driver provides a
clocksource and two clockevent devices.

View File

@ -1,37 +0,0 @@
From: Anna-Maria Gleixner <anna-maria@linutronix.de>
Date: Fri, 4 May 2018 17:45:32 +0200
Subject: [PATCH 2/3] drivers/md/raid5: Use irqsave variant of
atomic_dec_and_lock()
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
The irqsave variant of atomic_dec_and_lock handles irqsave/restore when
taking/releasing the spin lock. With this variant the call of
local_irq_save is no longer required.
Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/md/raid5.c | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -409,16 +409,15 @@ void raid5_release_stripe(struct stripe_
md_wakeup_thread(conf->mddev->thread);
return;
slow_path:
- local_irq_save(flags);
/* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */
- if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) {
+ if (atomic_dec_and_lock_irqsave(&sh->count, &conf->device_lock, flags)) {
INIT_LIST_HEAD(&list);
hash = sh->hash_lock_index;
do_release_stripe(conf, sh, &list);
spin_unlock(&conf->device_lock);
release_inactive_stripe_list(conf, &list, hash);
+ local_irq_restore(flags);
}
- local_irq_restore(flags);
}
static inline void remove_hash(struct stripe_head *sh)

View File

@ -1,65 +0,0 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 3 Jul 2018 13:06:07 +0200
Subject: [PATCH 2/4] mm/list_lru: Move locking from __list_lru_walk_one() to
its caller
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Move the locking inside __list_lru_walk_one() to its caller. This is a
preparation step in order to introduce list_lru_walk_one_irq() which
does spin_lock_irq() instead of spin_lock() for the locking.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
mm/list_lru.c | 18 +++++++++++++-----
1 file changed, 13 insertions(+), 5 deletions(-)
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -204,7 +204,6 @@ static unsigned long
struct list_head *item, *n;
unsigned long isolated = 0;
- spin_lock(&nlru->lock);
l = list_lru_from_memcg_idx(nlru, memcg_idx);
restart:
list_for_each_safe(item, n, &l->list) {
@@ -250,8 +249,6 @@ static unsigned long
BUG();
}
}
-
- spin_unlock(&nlru->lock);
return isolated;
}
@@ -260,8 +257,14 @@ list_lru_walk_one(struct list_lru *lru,
list_lru_walk_cb isolate, void *cb_arg,
unsigned long *nr_to_walk)
{
- return __list_lru_walk_one(lru, nid, memcg_cache_id(memcg),
- isolate, cb_arg, nr_to_walk);
+ struct list_lru_node *nlru = &lru->node[nid];
+ unsigned long ret;
+
+ spin_lock(&nlru->lock);
+ ret = __list_lru_walk_one(lru, nid, memcg_cache_id(memcg),
+ isolate, cb_arg, nr_to_walk);
+ spin_unlock(&nlru->lock);
+ return ret;
}
EXPORT_SYMBOL_GPL(list_lru_walk_one);
@@ -276,8 +279,13 @@ unsigned long list_lru_walk_node(struct
nr_to_walk);
if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
for_each_memcg_cache_index(memcg_idx) {
+ struct list_lru_node *nlru = &lru->node[nid];
+
+ spin_lock(&nlru->lock);
isolated += __list_lru_walk_one(lru, nid, memcg_idx,
isolate, cb_arg, nr_to_walk);
+ spin_unlock(&nlru->lock);
+
if (*nr_to_walk <= 0)
break;
}

View File

@ -1,45 +0,0 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 22 Jun 2018 11:43:35 +0200
Subject: [PATCH 2/3] mm: workingset: make shadow_lru_isolate() use locking
suffix
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
shadow_lru_isolate() disables interrupts and acquires a lock. It could
use spin_lock_irq() instead. It also uses local_irq_enable() while it
could use spin_unlock_irq()/xa_unlock_irq().
Use proper suffix for lock/unlock in order to enable/disable interrupts
during release/acquire of a lock.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
mm/workingset.c | 8 +++-----
1 file changed, 3 insertions(+), 5 deletions(-)
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -431,7 +431,7 @@ static enum lru_status shadow_lru_isolat
/* Coming from the list, invert the lock order */
if (!xa_trylock(&mapping->i_pages)) {
- spin_unlock(lru_lock);
+ spin_unlock_irq(lru_lock);
ret = LRU_RETRY;
goto out;
}
@@ -469,13 +469,11 @@ static enum lru_status shadow_lru_isolat
workingset_lookup_update(mapping));
out_invalid:
- xa_unlock(&mapping->i_pages);
+ xa_unlock_irq(&mapping->i_pages);
ret = LRU_REMOVED_RETRY;
out:
- local_irq_enable();
cond_resched();
- local_irq_disable();
- spin_lock(lru_lock);
+ spin_lock_irq(lru_lock);
return ret;
}

View File

@ -1,83 +0,0 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Mon, 7 May 2018 17:09:42 +0200
Subject: [PATCH] userns: use refcount_t for reference counting instead
atomic_t
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
refcount_t type and corresponding API should be used instead of atomic_t when
the variable is used as a reference counter. This allows to avoid accidental
refcounter overflows that might lead to use-after-free situations.
Suggested-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/sched/user.h | 5 +++--
kernel/user.c | 8 ++++----
2 files changed, 7 insertions(+), 6 deletions(-)
--- a/include/linux/sched/user.h
+++ b/include/linux/sched/user.h
@@ -4,6 +4,7 @@
#include <linux/uidgid.h>
#include <linux/atomic.h>
+#include <linux/refcount.h>
#include <linux/ratelimit.h>
struct key;
@@ -12,7 +13,7 @@ struct key;
* Some day this will be a full-fledged user tracking system..
*/
struct user_struct {
- atomic_t __count; /* reference count */
+ refcount_t __count; /* reference count */
atomic_t processes; /* How many processes does this user have? */
atomic_t sigpending; /* How many pending signals does this user have? */
#ifdef CONFIG_FANOTIFY
@@ -59,7 +60,7 @@ extern struct user_struct root_user;
extern struct user_struct * alloc_uid(kuid_t);
static inline struct user_struct *get_uid(struct user_struct *u)
{
- atomic_inc(&u->__count);
+ refcount_inc(&u->__count);
return u;
}
extern void free_uid(struct user_struct *);
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -96,7 +96,7 @@ static DEFINE_SPINLOCK(uidhash_lock);
/* root_user.__count is 1, for init task cred */
struct user_struct root_user = {
- .__count = ATOMIC_INIT(1),
+ .__count = REFCOUNT_INIT(1),
.processes = ATOMIC_INIT(1),
.sigpending = ATOMIC_INIT(0),
.locked_shm = 0,
@@ -123,7 +123,7 @@ static struct user_struct *uid_hash_find
hlist_for_each_entry(user, hashent, uidhash_node) {
if (uid_eq(user->uid, uid)) {
- atomic_inc(&user->__count);
+ refcount_inc(&user->__count);
return user;
}
}
@@ -170,7 +170,7 @@ void free_uid(struct user_struct *up)
return;
local_irq_save(flags);
- if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
+ if (refcount_dec_and_lock(&up->__count, &uidhash_lock))
free_user(up, flags);
else
local_irq_restore(flags);
@@ -191,7 +191,7 @@ struct user_struct *alloc_uid(kuid_t uid
goto out_unlock;
new->uid = uid;
- atomic_set(&new->__count, 1);
+ refcount_set(&new->__count, 1);
ratelimit_state_init(&new->ratelimit, HZ, 100);
ratelimit_set_flags(&new->ratelimit, RATELIMIT_MSG_ON_RELEASE);

View File

@ -1,7 +1,7 @@
From: Alexandre Belloni <alexandre.belloni@bootlin.com>
Date: Wed, 18 Apr 2018 12:51:40 +0200
Subject: [PATCH 3/6] clocksource/drivers: atmel-pit: make option silent
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
To conform with the other option, make the ATMEL_PIT option silent so it
can be selected from the platform

View File

@ -1,31 +0,0 @@
From: Anna-Maria Gleixner <anna-maria@linutronix.de>
Date: Fri, 4 May 2018 17:45:33 +0200
Subject: [PATCH 3/3] drivers/md/raid5: Do not disable irq on
release_inactive_stripe_list() call
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
There is no need to invoke release_inactive_stripe_list() with interrupts
disabled. All call sites, except raid5_release_stripe(), unlock
->device_lock and enable interrupts before invoking the function.
Make it consistent.
Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/md/raid5.c | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -414,9 +414,8 @@ void raid5_release_stripe(struct stripe_
INIT_LIST_HEAD(&list);
hash = sh->hash_lock_index;
do_release_stripe(conf, sh, &list);
- spin_unlock(&conf->device_lock);
+ spin_unlock_irqrestore(&conf->device_lock, flags);
release_inactive_stripe_list(conf, &list, hash);
- local_irq_restore(flags);
}
}

View File

@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 28 May 2018 15:24:22 +0200
Subject: [PATCH 3/4] mm/SLxB: change list_lock to raw_spinlock_t
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
The list_lock is used with used with IRQs off on RT. Make it a raw_spinlock_t
otherwise the interrupts won't be disabled on -RT. The locking rules remain
@ -395,7 +395,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct list_head slabs_partial; /* partial list first, better asm code */
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1169,7 +1169,7 @@ static noinline int free_debug_processin
@@ -1167,7 +1167,7 @@ static noinline int free_debug_processin
unsigned long uninitialized_var(flags);
int ret = 0;
@ -404,7 +404,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slab_lock(page);
if (s->flags & SLAB_CONSISTENCY_CHECKS) {
@@ -1204,7 +1204,7 @@ static noinline int free_debug_processin
@@ -1202,7 +1202,7 @@ static noinline int free_debug_processin
bulk_cnt, cnt);
slab_unlock(page);
@ -413,7 +413,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!ret)
slab_fix(s, "Object at 0x%p not freed", object);
return ret;
@@ -1804,7 +1804,7 @@ static void *get_partial_node(struct kme
@@ -1802,7 +1802,7 @@ static void *get_partial_node(struct kme
if (!n || !n->nr_partial)
return NULL;
@ -422,7 +422,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
list_for_each_entry_safe(page, page2, &n->partial, lru) {
void *t;
@@ -1829,7 +1829,7 @@ static void *get_partial_node(struct kme
@@ -1827,7 +1827,7 @@ static void *get_partial_node(struct kme
break;
}
@ -431,7 +431,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return object;
}
@@ -2075,7 +2075,7 @@ static void deactivate_slab(struct kmem_
@@ -2073,7 +2073,7 @@ static void deactivate_slab(struct kmem_
* that acquire_slab() will see a slab page that
* is frozen
*/
@ -440,7 +440,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
} else {
m = M_FULL;
@@ -2086,7 +2086,7 @@ static void deactivate_slab(struct kmem_
@@ -2084,7 +2084,7 @@ static void deactivate_slab(struct kmem_
* slabs from diagnostic functions will not see
* any frozen slabs.
*/
@ -449,7 +449,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
}
@@ -2121,7 +2121,7 @@ static void deactivate_slab(struct kmem_
@@ -2119,7 +2119,7 @@ static void deactivate_slab(struct kmem_
goto redo;
if (lock)
@ -458,7 +458,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (m == M_FREE) {
stat(s, DEACTIVATE_EMPTY);
@@ -2156,10 +2156,10 @@ static void unfreeze_partials(struct kme
@@ -2154,10 +2154,10 @@ static void unfreeze_partials(struct kme
n2 = get_node(s, page_to_nid(page));
if (n != n2) {
if (n)
@ -471,7 +471,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
do {
@@ -2188,7 +2188,7 @@ static void unfreeze_partials(struct kme
@@ -2186,7 +2186,7 @@ static void unfreeze_partials(struct kme
}
if (n)
@ -480,7 +480,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
while (discard_page) {
page = discard_page;
@@ -2357,10 +2357,10 @@ static unsigned long count_partial(struc
@@ -2355,10 +2355,10 @@ static unsigned long count_partial(struc
unsigned long x = 0;
struct page *page;
@ -493,7 +493,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return x;
}
#endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
@@ -2795,7 +2795,7 @@ static void __slab_free(struct kmem_cach
@@ -2793,7 +2793,7 @@ static void __slab_free(struct kmem_cach
do {
if (unlikely(n)) {
@ -502,7 +502,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
n = NULL;
}
prior = page->freelist;
@@ -2827,7 +2827,7 @@ static void __slab_free(struct kmem_cach
@@ -2825,7 +2825,7 @@ static void __slab_free(struct kmem_cach
* Otherwise the list_lock will synchronize with
* other processors updating the list of slabs.
*/
@ -511,7 +511,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
}
@@ -2869,7 +2869,7 @@ static void __slab_free(struct kmem_cach
@@ -2867,7 +2867,7 @@ static void __slab_free(struct kmem_cach
add_partial(n, page, DEACTIVATE_TO_TAIL);
stat(s, FREE_ADD_PARTIAL);
}
@ -520,7 +520,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return;
slab_empty:
@@ -2884,7 +2884,7 @@ static void __slab_free(struct kmem_cach
@@ -2882,7 +2882,7 @@ static void __slab_free(struct kmem_cach
remove_full(s, n, page);
}
@ -529,7 +529,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
stat(s, FREE_SLAB);
discard_slab(s, page);
}
@@ -3271,7 +3271,7 @@ static void
@@ -3269,7 +3269,7 @@ static void
init_kmem_cache_node(struct kmem_cache_node *n)
{
n->nr_partial = 0;
@ -538,7 +538,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
INIT_LIST_HEAD(&n->partial);
#ifdef CONFIG_SLUB_DEBUG
atomic_long_set(&n->nr_slabs, 0);
@@ -3655,7 +3655,7 @@ static void free_partial(struct kmem_cac
@@ -3653,7 +3653,7 @@ static void free_partial(struct kmem_cac
struct page *page, *h;
BUG_ON(irqs_disabled());
@ -547,7 +547,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
list_for_each_entry_safe(page, h, &n->partial, lru) {
if (!page->inuse) {
remove_partial(n, page);
@@ -3665,7 +3665,7 @@ static void free_partial(struct kmem_cac
@@ -3663,7 +3663,7 @@ static void free_partial(struct kmem_cac
"Objects remaining in %s on __kmem_cache_shutdown()");
}
}
@ -556,7 +556,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
list_for_each_entry_safe(page, h, &discard, lru)
discard_slab(s, page);
@@ -3938,7 +3938,7 @@ int __kmem_cache_shrink(struct kmem_cach
@@ -3936,7 +3936,7 @@ int __kmem_cache_shrink(struct kmem_cach
for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
INIT_LIST_HEAD(promote + i);
@ -565,7 +565,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Build lists of slabs to discard or promote.
@@ -3969,7 +3969,7 @@ int __kmem_cache_shrink(struct kmem_cach
@@ -3967,7 +3967,7 @@ int __kmem_cache_shrink(struct kmem_cach
for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
list_splice(promote + i, &n->partial);
@ -574,7 +574,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Release empty slabs */
list_for_each_entry_safe(page, t, &discard, lru)
@@ -4383,7 +4383,7 @@ static int validate_slab_node(struct kme
@@ -4381,7 +4381,7 @@ static int validate_slab_node(struct kme
struct page *page;
unsigned long flags;
@ -583,7 +583,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
list_for_each_entry(page, &n->partial, lru) {
validate_slab_slab(s, page, map);
@@ -4405,7 +4405,7 @@ static int validate_slab_node(struct kme
@@ -4403,7 +4403,7 @@ static int validate_slab_node(struct kme
s->name, count, atomic_long_read(&n->nr_slabs));
out:
@ -592,7 +592,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return count;
}
@@ -4595,12 +4595,12 @@ static int list_locations(struct kmem_ca
@@ -4593,12 +4593,12 @@ static int list_locations(struct kmem_ca
if (!atomic_long_read(&n->nr_slabs))
continue;

View File

@ -1,56 +0,0 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 3 Jul 2018 13:08:56 +0200
Subject: [PATCH 3/4] mm/list_lru: Pass struct list_lru_node as an argument
__list_lru_walk_one()
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
__list_lru_walk_one() is invoked with struct list_lru *lru, int nid as
the first two argument. Those two are only used to retrieve struct
list_lru_node. Since this is already done by the caller of the function
for the locking, we can pass struct list_lru_node directly and avoid the
dance around it.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
mm/list_lru.c | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -194,12 +194,11 @@ unsigned long list_lru_count_node(struct
EXPORT_SYMBOL_GPL(list_lru_count_node);
static unsigned long
-__list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
+__list_lru_walk_one(struct list_lru_node *nlru, int memcg_idx,
list_lru_walk_cb isolate, void *cb_arg,
unsigned long *nr_to_walk)
{
- struct list_lru_node *nlru = &lru->node[nid];
struct list_lru_one *l;
struct list_head *item, *n;
unsigned long isolated = 0;
@@ -261,8 +260,8 @@ list_lru_walk_one(struct list_lru *lru,
unsigned long ret;
spin_lock(&nlru->lock);
- ret = __list_lru_walk_one(lru, nid, memcg_cache_id(memcg),
- isolate, cb_arg, nr_to_walk);
+ ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg,
+ nr_to_walk);
spin_unlock(&nlru->lock);
return ret;
}
@@ -282,8 +281,9 @@ unsigned long list_lru_walk_node(struct
struct list_lru_node *nlru = &lru->node[nid];
spin_lock(&nlru->lock);
- isolated += __list_lru_walk_one(lru, nid, memcg_idx,
- isolate, cb_arg, nr_to_walk);
+ isolated += __list_lru_walk_one(nlru, memcg_idx,
+ isolate, cb_arg,
+ nr_to_walk);
spin_unlock(&nlru->lock);
if (*nr_to_walk <= 0)

View File

@ -1,7 +1,7 @@
From: Alexandre Belloni <alexandre.belloni@bootlin.com>
Date: Wed, 18 Apr 2018 12:51:41 +0200
Subject: [PATCH 4/6] ARM: at91: Implement clocksource selection
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
Allow selecting and unselecting the PIT clocksource driver so it doesn't
have to be compile when unused.
@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -106,6 +106,31 @@ config SOC_AT91SAM9
@@ -107,6 +107,31 @@ config SOC_AT91SAM9
AT91SAM9X35
AT91SAM9XE

View File

@ -2,7 +2,7 @@ From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 21 Jun 2018 17:29:19 +0200
Subject: [PATCH 4/4] mm/SLUB: delay giving back empty slubs to IRQ enabled
regions
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
__free_slab() is invoked with disabled interrupts which increases the
irq-off time while __free_pages() is doing the work.
@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1332,6 +1332,12 @@ static inline void dec_slabs_node(struct
@@ -1330,6 +1330,12 @@ static inline void dec_slabs_node(struct
#endif /* CONFIG_SLUB_DEBUG */
@ -31,7 +31,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Hooks for other subsystems that check memory allocations. In a typical
* production configuration these hooks all should produce no code at all.
@@ -1686,6 +1692,16 @@ static void __free_slab(struct kmem_cach
@@ -1684,6 +1690,16 @@ static void __free_slab(struct kmem_cach
__free_pages(page, order);
}
@ -48,7 +48,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static void rcu_free_slab(struct rcu_head *h)
{
struct page *page = container_of(h, struct page, rcu_head);
@@ -1697,6 +1713,12 @@ static void free_slab(struct kmem_cache
@@ -1695,6 +1711,12 @@ static void free_slab(struct kmem_cache
{
if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) {
call_rcu(&page->rcu_head, rcu_free_slab);
@ -61,7 +61,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
} else
__free_slab(s, page);
}
@@ -2225,14 +2247,21 @@ static void put_cpu_partial(struct kmem_
@@ -2223,14 +2245,21 @@ static void put_cpu_partial(struct kmem_
pobjects = oldpage->pobjects;
pages = oldpage->pages;
if (drain && pobjects > s->cpu_partial) {
@ -83,7 +83,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
oldpage = NULL;
pobjects = 0;
pages = 0;
@@ -2302,7 +2331,22 @@ static bool has_cpu_slab(int cpu, void *
@@ -2300,7 +2329,22 @@ static bool has_cpu_slab(int cpu, void *
static void flush_all(struct kmem_cache *s)
{
@ -106,7 +106,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
@@ -2500,8 +2544,10 @@ static inline void *get_freelist(struct
@@ -2498,8 +2542,10 @@ static inline void *get_freelist(struct
* already disabled (which is the case for bulk allocation).
*/
static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
@ -118,7 +118,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
void *freelist;
struct page *page;
@@ -2557,6 +2603,13 @@ static void *___slab_alloc(struct kmem_c
@@ -2555,6 +2601,13 @@ static void *___slab_alloc(struct kmem_c
VM_BUG_ON(!c->page->frozen);
c->freelist = get_freepointer(s, freelist);
c->tid = next_tid(c->tid);
@ -132,7 +132,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return freelist;
new_slab:
@@ -2572,7 +2625,7 @@ static void *___slab_alloc(struct kmem_c
@@ -2570,7 +2623,7 @@ static void *___slab_alloc(struct kmem_c
if (unlikely(!freelist)) {
slab_out_of_memory(s, gfpflags, node);
@ -141,7 +141,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
page = c->page;
@@ -2585,7 +2638,7 @@ static void *___slab_alloc(struct kmem_c
@@ -2583,7 +2636,7 @@ static void *___slab_alloc(struct kmem_c
goto new_slab; /* Slab failed checks. Next slab needed */
deactivate_slab(s, page, get_freepointer(s, freelist), c);
@ -150,7 +150,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
@@ -2597,6 +2650,7 @@ static void *__slab_alloc(struct kmem_ca
@@ -2595,6 +2648,7 @@ static void *__slab_alloc(struct kmem_ca
{
void *p;
unsigned long flags;
@ -158,7 +158,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
local_irq_save(flags);
#ifdef CONFIG_PREEMPT
@@ -2608,8 +2662,9 @@ static void *__slab_alloc(struct kmem_ca
@@ -2606,8 +2660,9 @@ static void *__slab_alloc(struct kmem_ca
c = this_cpu_ptr(s->cpu_slab);
#endif
@ -169,7 +169,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return p;
}
@@ -3087,6 +3142,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
@@ -3085,6 +3140,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
void **p)
{
struct kmem_cache_cpu *c;
@ -177,7 +177,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int i;
/* memcg and kmem_cache debug support */
@@ -3110,7 +3166,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
@@ -3108,7 +3164,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
* of re-populating per CPU c->freelist
*/
p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
@ -186,7 +186,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (unlikely(!p[i]))
goto error;
@@ -3122,6 +3178,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
@@ -3120,6 +3176,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
}
c->tid = next_tid(c->tid);
local_irq_enable();
@ -194,7 +194,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Clear memory outside IRQ disabled fastpath loop */
if (unlikely(flags & __GFP_ZERO)) {
@@ -3136,6 +3193,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
@@ -3134,6 +3191,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
return i;
error:
local_irq_enable();
@ -202,7 +202,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slab_post_alloc_hook(s, flags, i, p);
__kmem_cache_free_bulk(s, i, p);
return 0;
@@ -4182,6 +4240,12 @@ void __init kmem_cache_init(void)
@@ -4180,6 +4238,12 @@ void __init kmem_cache_init(void)
{
static __initdata struct kmem_cache boot_kmem_cache,
boot_kmem_cache_node;

View File

@ -1,107 +0,0 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 3 Jul 2018 13:17:27 +0200
Subject: [PATCH 4/4] mm/list_lru: Introduce list_lru_shrink_walk_irq()
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Provide list_lru_shrink_walk_irq() and let it behave like
list_lru_walk_one() except that it locks the spinlock with
spin_lock_irq(). This is used by scan_shadow_nodes() because its lock
nests within the i_pages lock which is acquired with IRQ.
This change allows to use proper locking promitives instead hand crafted
lock_irq_disable() plus spin_lock().
There is no EXPORT_SYMBOL provided because the current user is in-KERNEL
only.
Add list_lru_shrink_walk_irq() which acquires the spinlock with the
proper locking primitives.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/list_lru.h | 25 +++++++++++++++++++++++++
mm/list_lru.c | 15 +++++++++++++++
mm/workingset.c | 8 ++------
3 files changed, 42 insertions(+), 6 deletions(-)
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -162,6 +162,23 @@ unsigned long list_lru_walk_one(struct l
int nid, struct mem_cgroup *memcg,
list_lru_walk_cb isolate, void *cb_arg,
unsigned long *nr_to_walk);
+/**
+ * list_lru_walk_one_irq: walk a list_lru, isolating and disposing freeable items.
+ * @lru: the lru pointer.
+ * @nid: the node id to scan from.
+ * @memcg: the cgroup to scan from.
+ * @isolate: callback function that is resposible for deciding what to do with
+ * the item currently being scanned
+ * @cb_arg: opaque type that will be passed to @isolate
+ * @nr_to_walk: how many items to scan.
+ *
+ * Same as @list_lru_walk_one except that the spinlock is acquired with
+ * spin_lock_irq().
+ */
+unsigned long list_lru_walk_one_irq(struct list_lru *lru,
+ int nid, struct mem_cgroup *memcg,
+ list_lru_walk_cb isolate, void *cb_arg,
+ unsigned long *nr_to_walk);
unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
list_lru_walk_cb isolate, void *cb_arg,
unsigned long *nr_to_walk);
@@ -175,6 +192,14 @@ list_lru_shrink_walk(struct list_lru *lr
}
static inline unsigned long
+list_lru_shrink_walk_irq(struct list_lru *lru, struct shrink_control *sc,
+ list_lru_walk_cb isolate, void *cb_arg)
+{
+ return list_lru_walk_one_irq(lru, sc->nid, sc->memcg, isolate, cb_arg,
+ &sc->nr_to_scan);
+}
+
+static inline unsigned long
list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate,
void *cb_arg, unsigned long nr_to_walk)
{
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -267,6 +267,21 @@ list_lru_walk_one(struct list_lru *lru,
}
EXPORT_SYMBOL_GPL(list_lru_walk_one);
+unsigned long
+list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
+ list_lru_walk_cb isolate, void *cb_arg,
+ unsigned long *nr_to_walk)
+{
+ struct list_lru_node *nlru = &lru->node[nid];
+ unsigned long ret;
+
+ spin_lock_irq(&nlru->lock);
+ ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg,
+ nr_to_walk);
+ spin_unlock_irq(&nlru->lock);
+ return ret;
+}
+
unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
list_lru_walk_cb isolate, void *cb_arg,
unsigned long *nr_to_walk)
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -480,13 +480,9 @@ static enum lru_status shadow_lru_isolat
static unsigned long scan_shadow_nodes(struct shrinker *shrinker,
struct shrink_control *sc)
{
- unsigned long ret;
-
/* list_lru lock nests inside the IRQ-safe i_pages lock */
- local_irq_disable();
- ret = list_lru_shrink_walk(&shadow_nodes, sc, shadow_lru_isolate, NULL);
- local_irq_enable();
- return ret;
+ return list_lru_shrink_walk_irq(&shadow_nodes, sc, shadow_lru_isolate,
+ NULL);
}
static struct shrinker workingset_shadow_shrinker = {

View File

@ -1,7 +1,7 @@
From: Alexandre Belloni <alexandre.belloni@bootlin.com>
Date: Wed, 18 Apr 2018 12:51:42 +0200
Subject: [PATCH 5/6] ARM: configs: at91: use new TCB timer driver
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
Unselecting ATMEL_TCLIB switches the TCB timer driver from tcb_clksrc to
timer-atmel-tcb.

View File

@ -1,31 +0,0 @@
From: Anna-Maria Gleixner <anna-maria@linutronix.de>
Date: Wed, 4 Apr 2018 11:43:56 +0200
Subject: [PATCH] bdi: Use irqsave variant of refcount_dec_and_lock()
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
The irqsave variant of refcount_dec_and_lock handles irqsave/restore when
taking/releasing the spin lock. With this variant the call of
local_irq_save/restore is no longer required.
Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
[bigeasy: s@atomic_dec_and_lock@refcount_dec_and_lock@g ]
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
mm/backing-dev.c | 5 +----
1 file changed, 1 insertion(+), 4 deletions(-)
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -473,11 +473,8 @@ void wb_congested_put(struct bdi_writeba
{
unsigned long flags;
- local_irq_save(flags);
- if (!refcount_dec_and_lock(&congested->refcnt, &cgwb_lock)) {
- local_irq_restore(flags);
+ if (!refcount_dec_and_lock_irqsave(&congested->refcnt, &cgwb_lock, &flags))
return;
- }
/* bdi might already have been destroyed leaving @congested unlinked */
if (congested->__bdi) {

View File

@ -1,7 +1,7 @@
From: Alexandre Belloni <alexandre.belloni@bootlin.com>
Date: Wed, 18 Apr 2018 12:51:43 +0200
Subject: [PATCH 6/6] ARM: configs: at91: unselect PIT
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
The PIT is not required anymore to successfully boot and may actually harm
in case preempt-rt is used because the PIT interrupt is shared.

View File

@ -1,31 +0,0 @@
From: Anna-Maria Gleixner <anna-maria@linutronix.de>
Date: Wed, 4 Apr 2018 11:43:57 +0200
Subject: [PATCH] userns: Use irqsave variant of refcount_dec_and_lock()
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
The irqsave variant of refcount_dec_and_lock handles irqsave/restore when
taking/releasing the spin lock. With this variant the call of
local_irq_save/restore is no longer required.
Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
[bigeasy: s@atomic_dec_and_lock@refcount_dec_and_lock@g ]
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/user.c | 5 +----
1 file changed, 1 insertion(+), 4 deletions(-)
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -169,11 +169,8 @@ void free_uid(struct user_struct *up)
if (!up)
return;
- local_irq_save(flags);
- if (refcount_dec_and_lock(&up->__count, &uidhash_lock))
+ if (refcount_dec_and_lock_irqsave(&up->__count, &uidhash_lock, &flags))
free_user(up, flags);
- else
- local_irq_restore(flags);
}
struct user_struct *alloc_uid(kuid_t uid)

View File

@ -1,7 +1,7 @@
From: "Yadi.hu" <yadi.hu@windriver.com>
Date: Wed, 10 Dec 2014 10:32:09 +0800
Subject: ARM: enable irq in translation/section permission fault handlers
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
Probably happens on all ARM, with
CONFIG_PREEMPT_RT_FULL
@ -64,7 +64,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -438,6 +438,9 @@ do_translation_fault(unsigned long addr,
@@ -439,6 +439,9 @@ do_translation_fault(unsigned long addr,
if (addr < TASK_SIZE)
return do_page_fault(addr, fsr, regs);
@ -74,7 +74,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (user_mode(regs))
goto bad_area;
@@ -505,6 +508,9 @@ do_translation_fault(unsigned long addr,
@@ -506,6 +509,9 @@ do_translation_fault(unsigned long addr,
static int
do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{

View File

@ -4,7 +4,7 @@ Subject: [PATCH] Drivers: hv: vmbus: include header for get_irq_regs()
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
On !RT the header file get_irq_regs() gets pulled in via other header files. On
RT it does not and the build fails:

View File

@ -0,0 +1,44 @@
From: Paul E. McKenney <paulmck@linux.ibm.com>
Date: Mon, 29 Oct 2018 11:53:01 +0100
Subject: [PATCH] EXP rcu: Revert expedited GP parallelization cleverness
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
(Commit 258ba8e089db23f760139266c232f01bad73f85c from linux-rcu)
This commit reverts a series of commits starting with fcc635436501 ("rcu:
Make expedited GPs handle CPU 0 being offline") and its successors, thus
queueing each rcu_node structure's expedited grace-period initialization
work on the first CPU of that rcu_node structure.
Suggested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/rcu/tree_exp.h | 9 +--------
1 file changed, 1 insertion(+), 8 deletions(-)
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -472,7 +472,6 @@ static void sync_rcu_exp_select_node_cpu
static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
smp_call_func_t func)
{
- int cpu;
struct rcu_node *rnp;
trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("reset"));
@@ -494,13 +493,7 @@ static void sync_rcu_exp_select_cpus(str
continue;
}
INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
- preempt_disable();
- cpu = cpumask_next(rnp->grplo - 1, cpu_online_mask);
- /* If all offline, queue the work on an unbound CPU. */
- if (unlikely(cpu > rnp->grphi))
- cpu = WORK_CPU_UNBOUND;
- queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
- preempt_enable();
+ queue_work_on(rnp->grplo, rcu_par_gp_wq, &rnp->rew.rew_work);
rnp->exp_need_flush = true;
}

View File

@ -1,73 +1,73 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 21 Mar 2013 19:01:05 +0100
Subject: printk: Drop the logbuf_lock more often
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
The lock is hold with irgs off. The latency drops 500us+ on my arm bugs
with a "full" buffer after executing "dmesg" on the shell.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/printk/printk.c | 27 +++++++++++++++++++++++++++
1 file changed, 27 insertions(+)
kernel/printk/printk.c | 28 ++++++++++++++++++++++++++++
1 file changed, 28 insertions(+)
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -1411,6 +1411,8 @@ static int syslog_print_all(char __user
{
char *text;
int len = 0;
@@ -1415,12 +1415,23 @@ static int syslog_print_all(char __user
u64 next_seq;
u64 seq;
u32 idx;
+ int attempts = 0;
+ int num_msg;
text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
if (!text)
@@ -1422,6 +1424,14 @@ static int syslog_print_all(char __user
u64 seq;
u32 idx;
return -ENOMEM;
+try_again:
+ attempts++;
+ if (attempts > 10) {
+ len = -EBUSY;
+ goto out;
+ }
+ num_msg = 0;
logbuf_lock_irq();
+
/*
* Find first record that fits, including all following records,
* into the user-provided buffer for this dump.
@@ -1434,6 +1444,14 @@ static int syslog_print_all(char __user
len += msg_print_text(msg, true, NULL, 0);
idx = log_next(idx);
seq++;
+ num_msg++;
+ if (num_msg > 5) {
+ num_msg = 0;
+ logbuf_unlock_irq();
+ logbuf_lock_irq();
+ if (clear_seq < log_first_seq)
+ goto try_again;
+ }
}
+try_again:
+ attempts++;
+ if (attempts > 10) {
+ len = -EBUSY;
+ goto out;
+ }
+ num_msg = 0;
+
/*
* Find first record that fits, including all following records,
* into the user-provided buffer for this dump.
@@ -1433,6 +1444,14 @@ static int syslog_print_all(char __user
len += msg_print_text(msg, true, NULL, 0);
idx = log_next(idx);
seq++;
+ num_msg++;
+ if (num_msg > 5) {
+ num_msg = 0;
+ logbuf_unlock_irq();
+ logbuf_lock_irq();
+ if (clear_seq < log_first_seq)
+ goto try_again;
+ }
}
/* move first record forward until length fits into the buffer */
@@ -1445,6 +1463,14 @@ static int syslog_print_all(char __user
len -= msg_print_text(msg, true, NULL, 0);
idx = log_next(idx);
seq++;
+ num_msg++;
+ if (num_msg > 5) {
+ num_msg = 0;
+ logbuf_unlock_irq();
+ logbuf_lock_irq();
+ if (clear_seq < log_first_seq)
+ goto try_again;
+ }
}
/* move first record forward until length fits into the buffer */
@@ -1444,6 +1463,14 @@ static int syslog_print_all(char __user
len -= msg_print_text(msg, true, NULL, 0);
idx = log_next(idx);
seq++;
+ num_msg++;
+ if (num_msg > 5) {
+ num_msg = 0;
+ logbuf_unlock_irq();
+ logbuf_lock_irq();
+ if (clear_seq < log_first_seq)
+ goto try_again;
+ }
}
/* last message fitting into this dump */
@@ -1483,6 +1509,7 @@ static int syslog_print_all(char __user
/* last message fitting into this dump */
@@ -1481,6 +1508,7 @@ static int syslog_print_all(char __user
clear_seq = log_next_seq;
clear_idx = log_next_idx;
}

View File

@ -1,7 +1,7 @@
From: Josh Cartwright <joshc@ni.com>
Date: Thu, 11 Feb 2016 11:54:01 -0600
Subject: KVM: arm/arm64: downgrade preempt_disable()d region to migrate_disable()
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
kvm_arch_vcpu_ioctl_run() disables the use of preemption when updating
the vgic and timer states to prevent the calling task from migrating to
@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -694,7 +694,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
@@ -700,7 +700,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
* involves poking the GIC, which must be done in a
* non-preemptible context.
*/
@ -32,7 +32,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
kvm_pmu_flush_hwstate(vcpu);
@@ -743,7 +743,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
@@ -749,7 +749,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
kvm_timer_sync_hwstate(vcpu);
kvm_vgic_sync_hwstate(vcpu);
local_irq_enable();
@ -41,7 +41,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
}
@@ -821,7 +821,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
@@ -827,7 +827,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
/* Exit types that need handling before we can be preempted */
handle_exit_early(vcpu, run, ret);

View File

@ -5,7 +5,7 @@ Cc: Anna Schumaker <anna.schumaker@netapp.com>,
linux-nfs@vger.kernel.org, linux-kernel@vger.kernel.org,
tglx@linutronix.de
Subject: NFSv4: replace seqcount_t with a seqlock_t
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
The raw_write_seqcount_begin() in nfs4_reclaim_open_state() bugs me
because it maps to preempt_disable() in -RT which I can't have at this
@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2818,7 +2818,7 @@ static int _nfs4_open_and_get_state(stru
@@ -2859,7 +2859,7 @@ static int _nfs4_open_and_get_state(stru
unsigned int seq;
int ret;
@ -67,15 +67,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
ret = _nfs4_proc_open(opendata, ctx);
if (ret != 0)
@@ -2856,7 +2856,7 @@ static int _nfs4_open_and_get_state(stru
@@ -2900,7 +2900,7 @@ static int _nfs4_open_and_get_state(stru
if (d_inode(dentry) == state->inode) {
nfs_inode_attach_open_context(ctx);
- if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
+ if (read_seqretry(&sp->so_reclaim_seqlock, seq))
nfs4_schedule_stateid_recovery(server, state);
else
pnfs_parse_lgopen(state->inode, opendata->lgp, ctx);
}
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -511,7 +511,7 @@ nfs4_alloc_state_owner(struct nfs_server
@ -87,7 +87,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
mutex_init(&sp->so_delegreturn_mutex);
return sp;
}
@@ -1560,8 +1560,12 @@ static int nfs4_reclaim_open_state(struc
@@ -1563,8 +1563,12 @@ static int nfs4_reclaim_open_state(struc
* recovering after a network partition or a reboot from a
* server that doesn't support a grace period.
*/
@ -101,7 +101,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
restart:
list_for_each_entry(state, &sp->so_states, open_states) {
if (!test_and_clear_bit(ops->state_flag_bit, &state->flags))
@@ -1630,14 +1634,20 @@ static int nfs4_reclaim_open_state(struc
@@ -1651,14 +1655,20 @@ static int nfs4_reclaim_open_state(struc
spin_lock(&sp->so_lock);
goto restart;
}

View File

@ -1,49 +0,0 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 12 Apr 2018 09:16:22 +0200
Subject: [PATCH] [SCSI] libsas: remove irq save in sas_ata_qc_issue()
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
[ upstream commit 2da11d4262639dc0e2fabc6a70886db57af25c43 ]
Since commit 312d3e56119a ("[SCSI] libsas: remove ata_port.lock
management duties from lldds") the sas_ata_qc_issue() function unlocks
the ata_port.lock and disables interrupts before doing so.
That lock is always taken with disabled interrupts so at this point, the
interrupts are already disabled. There is no need to disable the
interrupts before the unlock operation because they are already
disabled.
Restoring the interrupt state later does not change anything because
they were disabled and remain disabled. Therefore remove the operations
which do not change the behaviour.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/scsi/libsas/sas_ata.c | 3 ---
1 file changed, 3 deletions(-)
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -176,7 +176,6 @@ static void sas_ata_task_done(struct sas
static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
{
- unsigned long flags;
struct sas_task *task;
struct scatterlist *sg;
int ret = AC_ERR_SYSTEM;
@@ -190,7 +189,6 @@ static unsigned int sas_ata_qc_issue(str
/* TODO: audit callers to ensure they are ready for qc_issue to
* unconditionally re-enable interrupts
*/
- local_irq_save(flags);
spin_unlock(ap->lock);
/* If the device fell off, no sense in issuing commands */
@@ -252,7 +250,6 @@ static unsigned int sas_ata_qc_issue(str
out:
spin_lock(ap->lock);
- local_irq_restore(flags);
return ret;
}

View File

@ -1,42 +0,0 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 12 Apr 2018 09:55:25 +0200
Subject: [PATCH] [SCSI] qla2xxx: remove irq save in qla2x00_poll()
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
[ upstream commit b3a8aa90c46095cbad454eb068bfb5a8eb56d4e3 ]
In commit d2ba5675d899 ("[SCSI] qla2xxx: Disable local-interrupts while
polling for RISC status.") added a local_irq_disable() before invoking
the ->intr_handler callback. The function, which was used in this
callback, did not disable interrupts while acquiring the spin_lock so a
deadlock was possible and this change was one possible solution.
The function in question was qla2300_intr_handler() and is using
spin_lock_irqsave() since commit 43fac4d97a1a ("[SCSI] qla2xxx: Resolve
a performance issue in interrupt").
I checked all other ->intr_handler callbacks and all of them use the
irqsave variant so it is safe to remove the local_irq_save() block now.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/scsi/qla2xxx/qla_inline.h | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -58,14 +58,12 @@ qla2x00_debounce_register(volatile uint1
static inline void
qla2x00_poll(struct rsp_que *rsp)
{
- unsigned long flags;
struct qla_hw_data *ha = rsp->hw;
- local_irq_save(flags);
+
if (IS_P3P_TYPE(ha))
qla82xx_poll(0, rsp);
else
ha->isp_ops->intr_handler(0, rsp);
- local_irq_restore(flags);
}
static inline uint8_t *

View File

@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Sat, 27 May 2017 19:02:06 +0200
Subject: kernel/sched/core: add migrate_disable()
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
---
include/linux/preempt.h | 23 ++++++++
@ -80,7 +80,7 @@ Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4
* boot command line:
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1030,7 +1030,15 @@ void set_cpus_allowed_common(struct task
@@ -1007,7 +1007,15 @@ void set_cpus_allowed_common(struct task
p->nr_cpus_allowed = cpumask_weight(new_mask);
}
@ -97,7 +97,7 @@ Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4
{
struct rq *rq = task_rq(p);
bool queued, running;
@@ -1059,6 +1067,20 @@ void do_set_cpus_allowed(struct task_str
@@ -1036,6 +1044,20 @@ void do_set_cpus_allowed(struct task_str
set_curr_task(rq, p);
}
@ -118,7 +118,7 @@ Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4
/*
* Change a given task's CPU affinity. Migrate the thread to a
* proper CPU and schedule it away if the CPU it's executing on
@@ -1117,9 +1139,16 @@ static int __set_cpus_allowed_ptr(struct
@@ -1094,9 +1116,16 @@ static int __set_cpus_allowed_ptr(struct
}
/* Can the task run on the task's current CPU? If so, we're done */
@ -136,7 +136,7 @@ Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4
dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
if (task_running(rq, p) || p->state == TASK_WAKING) {
struct migration_arg arg = { p, dest_cpu };
@@ -7076,3 +7105,100 @@ const u32 sched_prio_to_wmult[40] = {
@@ -7053,3 +7082,100 @@ const u32 sched_prio_to_wmult[40] = {
};
#undef CREATE_TRACE_POINTS
@ -239,7 +239,7 @@ Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4
+#endif
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -979,6 +979,10 @@ void proc_sched_show_task(struct task_st
@@ -978,6 +978,10 @@ void proc_sched_show_task(struct task_st
P(dl.runtime);
P(dl.deadline);
}

View File

@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 11 Oct 2017 17:43:49 +0200
Subject: apparmor: use a locallock instead preempt_disable()
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
get_buffers() disables preemption which acts as a lock for the per-CPU
variable. Since we can't disable preemption here on RT, a local_lock is

View File

@ -1,7 +1,7 @@
From: Anders Roxell <anders.roxell@linaro.org>
Date: Thu, 14 May 2015 17:52:17 +0200
Subject: arch/arm64: Add lazy preempt support
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
arm64 is missing support for PREEMPT_RT. The main feature which is
lacking is support for lazy preemption. The arch-specific entry code,
@ -21,14 +21,14 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -125,6 +125,7 @@ config ARM64
@@ -140,6 +140,7 @@ config ARM64
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
+ select HAVE_PREEMPT_LAZY
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RCU_TABLE_FREE
select HAVE_STACKPROTECTOR
select HAVE_RSEQ
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -43,6 +43,7 @@ struct thread_info {
@ -78,7 +78,7 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
DEFINE(TSK_TI_TTBR0, offsetof(struct task_struct, thread_info.ttbr0));
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -633,11 +633,16 @@ ENDPROC(el1_sync)
@@ -623,11 +623,16 @@ ENDPROC(el1_sync)
#ifdef CONFIG_PREEMPT
ldr w24, [tsk, #TSK_TI_PREEMPT] // get preempt count
@ -98,7 +98,7 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_on
@@ -651,6 +656,7 @@ ENDPROC(el1_irq)
@@ -641,6 +646,7 @@ ENDPROC(el1_irq)
1: bl preempt_schedule_irq // irq en/disable is done inside
ldr x0, [tsk, #TSK_TI_FLAGS] // get new tasks TI_FLAGS
tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
@ -108,7 +108,7 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -923,7 +923,7 @@ asmlinkage void do_notify_resume(struct
@@ -926,7 +926,7 @@ asmlinkage void do_notify_resume(struct
/* Check valid user FS if needed */
addr_limit_user_check();

View File

@ -1,7 +1,7 @@
From: Frank Rowand <frank.rowand@am.sony.com>
Date: Mon, 19 Sep 2011 14:51:14 -0700
Subject: arm: Convert arm boot_lock to raw
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
The arm boot_lock is used by the secondary processor startup code. The locking
task is the idle thread, which has idle->sched_class == &idle_sched_class.

View File

@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 1 Dec 2017 10:42:03 +0100
Subject: [PATCH] arm*: disable NEON in kernel mode
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
NEON in kernel mode is used by the crypto algorithms and raid6 code.
While the raid6 code looks okay, the crypto algorithms do not: NEON
@ -21,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -2169,7 +2169,7 @@ config NEON
@@ -2160,7 +2160,7 @@ config NEON
config KERNEL_MODE_NEON
bool "Support for NEON in kernel mode"

View File

@ -1,7 +1,7 @@
Subject: arm: Enable highmem for rt
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 13 Feb 2013 11:03:11 +0100
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
fixup highmem for ARM.

View File

@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Mon, 11 Mar 2013 21:37:27 +0100
Subject: arm/highmem: Flush tlb on unmap
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
The tlb should be flushed on unmap and thus make the mapping entry
invalid. This is only done in the non-debug case which does not look

View File

@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 22 Dec 2016 17:28:33 +0100
Subject: [PATCH] arm: include definition for cpumask_t
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
This definition gets pulled in by other files. With the (later) split of
RCU and spinlock.h it won't compile anymore.

View File

@ -1,7 +1,7 @@
From: Yang Shi <yang.shi@linaro.org>
Date: Thu, 10 Nov 2016 16:17:55 -0800
Subject: [PATCH] arm: kprobe: replace patch_lock to raw lock
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
When running kprobe on -rt kernel, the below bug is caught:

View File

@ -1,7 +1,7 @@
Subject: arm: Add support for lazy preemption
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 31 Oct 2012 12:04:11 +0100
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
Implement the arm pieces for lazy preempt.
@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -89,6 +89,7 @@ config ARM
@@ -90,6 +90,7 @@ config ARM
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
@ -65,7 +65,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif /* __ASM_ARM_THREAD_INFO_H */
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -67,6 +67,7 @@ int main(void)
@@ -56,6 +56,7 @@ int main(void)
BLANK();
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));

View File

@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 20 Sep 2013 14:31:54 +0200
Subject: arm/unwind: use a raw_spin_lock
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
Mostly unwind is done with irqs enabled however SLUB may call it with
irqs disabled while creating a new SLUB cache.

View File

@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 26 Jul 2018 09:13:42 +0200
Subject: [PATCH] arm64: KVM: compute_layout before altenates are applied
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
compute_layout() is invoked as part of an alternative fixup under
stop_machine() and needs a sleeping lock as part of get_random_long().

View File

@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 25 Jul 2018 14:02:38 +0200
Subject: [PATCH] arm64: fpsimd: use preemp_disable in addition to
local_bh_disable()
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
In v4.16-RT I noticed a number of warnings from task_fpsimd_load(). The
code disables BH and expects that it is not preemptible. On -RT the
@ -33,10 +33,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ return sve_state;
+}
+
static void change_cpacr(u64 val, u64 mask)
{
u64 cpacr = read_sysreg(CPACR_EL1);
@@ -566,6 +576,7 @@ int sve_set_vector_length(struct task_st
/*
* TIF_SVE controls whether a task can use SVE without trapping while
* in userspace, and also the way a task's FPSIMD/SVE state is stored
@@ -547,6 +557,7 @@ int sve_set_vector_length(struct task_st
* non-SVE thread.
*/
if (task == current) {
@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
local_bh_disable();
fpsimd_save();
@@ -576,8 +587,10 @@ int sve_set_vector_length(struct task_st
@@ -557,8 +568,10 @@ int sve_set_vector_length(struct task_st
if (test_and_clear_tsk_thread_flag(task, TIF_SVE))
sve_to_fpsimd(task);
@ -56,7 +56,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Force reallocation of task SVE state to the correct size
@@ -832,6 +845,7 @@ asmlinkage void do_sve_acc(unsigned int
@@ -813,6 +826,7 @@ asmlinkage void do_sve_acc(unsigned int
sve_alloc(current);
@ -64,7 +64,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
local_bh_disable();
fpsimd_save();
@@ -845,6 +859,7 @@ asmlinkage void do_sve_acc(unsigned int
@@ -826,6 +840,7 @@ asmlinkage void do_sve_acc(unsigned int
WARN_ON(1); /* SVE access shouldn't have trapped */
local_bh_enable();
@ -72,7 +72,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
@@ -911,10 +926,12 @@ void fpsimd_thread_switch(struct task_st
@@ -892,10 +907,12 @@ void fpsimd_thread_switch(struct task_st
void fpsimd_flush_thread(void)
{
int vl, supported_vl;
@ -85,7 +85,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
local_bh_disable();
memset(&current->thread.uw.fpsimd_state, 0,
@@ -923,7 +940,7 @@ void fpsimd_flush_thread(void)
@@ -904,7 +921,7 @@ void fpsimd_flush_thread(void)
if (system_supports_sve()) {
clear_thread_flag(TIF_SVE);
@ -94,7 +94,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Reset the task vector length as required.
@@ -959,6 +976,8 @@ void fpsimd_flush_thread(void)
@@ -940,6 +957,8 @@ void fpsimd_flush_thread(void)
set_thread_flag(TIF_FOREIGN_FPSTATE);
local_bh_enable();
@ -103,7 +103,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
@@ -970,9 +989,11 @@ void fpsimd_preserve_current_state(void)
@@ -951,9 +970,11 @@ void fpsimd_preserve_current_state(void)
if (!system_supports_fpsimd())
return;
@ -115,7 +115,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
@@ -1030,6 +1051,7 @@ void fpsimd_restore_current_state(void)
@@ -1011,6 +1032,7 @@ void fpsimd_restore_current_state(void)
if (!system_supports_fpsimd())
return;
@ -123,7 +123,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
local_bh_disable();
if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
@@ -1038,6 +1060,7 @@ void fpsimd_restore_current_state(void)
@@ -1019,6 +1041,7 @@ void fpsimd_restore_current_state(void)
}
local_bh_enable();
@ -131,7 +131,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
@@ -1050,6 +1073,7 @@ void fpsimd_update_current_state(struct
@@ -1031,6 +1054,7 @@ void fpsimd_update_current_state(struct
if (!system_supports_fpsimd())
return;
@ -139,7 +139,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
local_bh_disable();
current->thread.uw.fpsimd_state = *state;
@@ -1062,6 +1086,7 @@ void fpsimd_update_current_state(struct
@@ -1043,6 +1067,7 @@ void fpsimd_update_current_state(struct
clear_thread_flag(TIF_FOREIGN_FPSTATE);
local_bh_enable();
@ -147,7 +147,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
@@ -1107,6 +1132,7 @@ void kernel_neon_begin(void)
@@ -1088,6 +1113,7 @@ void kernel_neon_begin(void)
BUG_ON(!may_use_simd());
@ -155,7 +155,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
local_bh_disable();
__this_cpu_write(kernel_neon_busy, true);
@@ -1120,6 +1146,7 @@ void kernel_neon_begin(void)
@@ -1101,6 +1127,7 @@ void kernel_neon_begin(void)
preempt_disable();
local_bh_enable();

View File

@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 09 Mar 2016 10:51:06 +0100
Subject: arm: at91: do not disable/enable clocks in a row
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
Currently the driver will disable the clock and enable it one line later
if it is switching from periodic mode into one shot.

View File

@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 13 Mar 2018 13:49:16 +0100
Subject: [PATCH] block: blk-mq: move blk_queue_usage_counter_release()
into process context
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
| BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:914
| in_atomic(): 1, irqs_disabled(): 0, pid: 255, name: kworker/u257:6
@ -52,7 +52,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -965,12 +965,21 @@ void blk_queue_exit(struct request_queue
@@ -969,12 +969,21 @@ void blk_queue_exit(struct request_queue
percpu_ref_put(&q->q_usage_counter);
}
@ -75,7 +75,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static void blk_rq_timed_out_timer(struct timer_list *t)
@@ -1064,6 +1073,7 @@ struct request_queue *blk_alloc_queue_no
@@ -1067,6 +1076,7 @@ struct request_queue *blk_alloc_queue_no
queue_flag_set_unlocked(QUEUE_FLAG_BYPASS, q);
init_waitqueue_head(&q->mq_freeze_wq);
@ -83,7 +83,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Init percpu_ref in atomic mode so that it's faster to shutdown.
@@ -3948,6 +3958,8 @@ int __init blk_dev_init(void)
@@ -3957,6 +3967,8 @@ int __init blk_dev_init(void)
if (!kblockd_workqueue)
panic("Failed to create kblockd\n");
@ -99,10 +99,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include <linux/scatterlist.h>
#include <linux/blkzoned.h>
+#include <linux/swork.h>
#include <linux/seqlock.h>
#include <linux/u64_stats_sync.h>
@@ -651,6 +652,7 @@ struct request_queue {
struct module;
struct scsi_ioctl_command;
@@ -649,6 +650,7 @@ struct request_queue {
#endif
struct rcu_head rcu_head;
wait_queue_head_t mq_freeze_wq;

View File

@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 29 Jan 2015 15:10:08 +0100
Subject: block/mq: don't complete requests via IPI
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
The IPI runs in hardirq context and there are sleeping locks. This patch
moves the completion into a workqueue.
@ -38,7 +38,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
INIT_LIST_HEAD(&rq->timeout_list);
rq->timeout = 0;
@@ -545,12 +548,24 @@ void blk_mq_end_request(struct request *
@@ -547,12 +550,24 @@ void blk_mq_end_request(struct request *
}
EXPORT_SYMBOL(blk_mq_end_request);
@ -63,7 +63,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static void __blk_mq_complete_request(struct request *rq)
{
@@ -573,10 +588,18 @@ static void __blk_mq_complete_request(st
@@ -575,10 +590,18 @@ static void __blk_mq_complete_request(st
shared = cpus_share_cache(cpu, ctx->cpu);
if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
@ -84,7 +84,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -247,7 +247,7 @@ static inline u16 blk_mq_unique_tag_to_t
@@ -249,7 +249,7 @@ static inline u16 blk_mq_unique_tag_to_t
return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
}
@ -95,7 +95,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
void blk_mq_end_request(struct request *rq, blk_status_t error);
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -151,6 +151,9 @@ enum mq_rq_state {
@@ -149,6 +149,9 @@ enum mq_rq_state {
*/
struct request {
struct request_queue *q;

View File

@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 14 Jul 2015 14:26:34 +0200
Subject: block/mq: do not invoke preempt_disable()
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
preempt_disable() and get_cpu() don't play well together with the sleeping
locks it tries to allocate later.
@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -568,7 +568,7 @@ static void __blk_mq_complete_request(st
@@ -570,7 +570,7 @@ static void __blk_mq_complete_request(st
return;
}
@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
shared = cpus_share_cache(cpu, ctx->cpu);
@@ -580,7 +580,7 @@ static void __blk_mq_complete_request(st
@@ -582,7 +582,7 @@ static void __blk_mq_complete_request(st
} else {
rq->q->softirq_done_fn(rq);
}
@ -32,7 +32,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx)
@@ -1324,14 +1324,14 @@ static void __blk_mq_delay_run_hw_queue(
@@ -1360,14 +1360,14 @@ static void __blk_mq_delay_run_hw_queue(
return;
if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {

View File

@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 9 Apr 2014 10:37:23 +0200
Subject: block: mq: use cpu_light()
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
there is a might sleep splat because get_cpu() disables preemption and
later we grab a lock. As a workaround for this we use get_cpu_light().
@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -112,12 +112,12 @@ static inline struct blk_mq_ctx *__blk_m
@@ -113,12 +113,12 @@ static inline struct blk_mq_ctx *__blk_m
*/
static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
{

View File

@ -1,7 +1,7 @@
Subject: block: Use cpu_chill() for retry loops
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 20 Dec 2012 18:28:26 +0100
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
Retry loops on RT might loop forever when the modifying side was
preempted. Steven also observed a live lock when there was a

View File

@ -1,271 +0,0 @@
From: "Steven Rostedt (VMware)" <rostedt@goodmis.org>
Date: Mon, 9 Jul 2018 17:48:54 -0400
Subject: [PATCH] cgroup/tracing: Move taking of spin lock out of trace event
handlers
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
[ Upstream commit e4f8d81c738db6d3ffdabfb8329aa2feaa310699 ]
It is unwise to take spin locks from the handlers of trace events.
Mainly, because they can introduce lockups, because it introduces locks
in places that are normally not tested. Worse yet, because trace events
are tucked away in the include/trace/events/ directory, locks that are
taken there are forgotten about.
As a general rule, I tell people never to take any locks in a trace
event handler.
Several cgroup trace event handlers call cgroup_path() which eventually
takes the kernfs_rename_lock spinlock. This injects the spinlock in the
code without people realizing it. It also can cause issues for the
PREEMPT_RT patch, as the spinlock becomes a mutex, and the trace event
handlers are called with preemption disabled.
By moving the calculation of the cgroup_path() out of the trace event
handlers and into a macro (surrounded by a
trace_cgroup_##type##_enabled()), then we could place the cgroup_path
into a string, and pass that to the trace event. Not only does this
remove the taking of the spinlock out of the trace event handler, but
it also means that the cgroup_path() only needs to be called once (it
is currently called twice, once to get the length to reserver the
buffer for, and once again to get the path itself. Now it only needs to
be done once.
Reported-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/trace/events/cgroup.h | 47 +++++++++++++++++++---------------------
kernel/cgroup/cgroup-internal.h | 26 ++++++++++++++++++++++
kernel/cgroup/cgroup-v1.c | 4 +--
kernel/cgroup/cgroup.c | 12 +++++-----
4 files changed, 58 insertions(+), 31 deletions(-)
--- a/include/trace/events/cgroup.h
+++ b/include/trace/events/cgroup.h
@@ -53,24 +53,22 @@ DEFINE_EVENT(cgroup_root, cgroup_remount
DECLARE_EVENT_CLASS(cgroup,
- TP_PROTO(struct cgroup *cgrp),
+ TP_PROTO(struct cgroup *cgrp, const char *path),
- TP_ARGS(cgrp),
+ TP_ARGS(cgrp, path),
TP_STRUCT__entry(
__field( int, root )
__field( int, id )
__field( int, level )
- __dynamic_array(char, path,
- cgroup_path(cgrp, NULL, 0) + 1)
+ __string( path, path )
),
TP_fast_assign(
__entry->root = cgrp->root->hierarchy_id;
__entry->id = cgrp->id;
__entry->level = cgrp->level;
- cgroup_path(cgrp, __get_dynamic_array(path),
- __get_dynamic_array_len(path));
+ __assign_str(path, path);
),
TP_printk("root=%d id=%d level=%d path=%s",
@@ -79,45 +77,45 @@ DECLARE_EVENT_CLASS(cgroup,
DEFINE_EVENT(cgroup, cgroup_mkdir,
- TP_PROTO(struct cgroup *cgroup),
+ TP_PROTO(struct cgroup *cgrp, const char *path),
- TP_ARGS(cgroup)
+ TP_ARGS(cgrp, path)
);
DEFINE_EVENT(cgroup, cgroup_rmdir,
- TP_PROTO(struct cgroup *cgroup),
+ TP_PROTO(struct cgroup *cgrp, const char *path),
- TP_ARGS(cgroup)
+ TP_ARGS(cgrp, path)
);
DEFINE_EVENT(cgroup, cgroup_release,
- TP_PROTO(struct cgroup *cgroup),
+ TP_PROTO(struct cgroup *cgrp, const char *path),
- TP_ARGS(cgroup)
+ TP_ARGS(cgrp, path)
);
DEFINE_EVENT(cgroup, cgroup_rename,
- TP_PROTO(struct cgroup *cgroup),
+ TP_PROTO(struct cgroup *cgrp, const char *path),
- TP_ARGS(cgroup)
+ TP_ARGS(cgrp, path)
);
DECLARE_EVENT_CLASS(cgroup_migrate,
- TP_PROTO(struct cgroup *dst_cgrp, struct task_struct *task, bool threadgroup),
+ TP_PROTO(struct cgroup *dst_cgrp, const char *path,
+ struct task_struct *task, bool threadgroup),
- TP_ARGS(dst_cgrp, task, threadgroup),
+ TP_ARGS(dst_cgrp, path, task, threadgroup),
TP_STRUCT__entry(
__field( int, dst_root )
__field( int, dst_id )
__field( int, dst_level )
- __dynamic_array(char, dst_path,
- cgroup_path(dst_cgrp, NULL, 0) + 1)
__field( int, pid )
+ __string( dst_path, path )
__string( comm, task->comm )
),
@@ -125,8 +123,7 @@ DECLARE_EVENT_CLASS(cgroup_migrate,
__entry->dst_root = dst_cgrp->root->hierarchy_id;
__entry->dst_id = dst_cgrp->id;
__entry->dst_level = dst_cgrp->level;
- cgroup_path(dst_cgrp, __get_dynamic_array(dst_path),
- __get_dynamic_array_len(dst_path));
+ __assign_str(dst_path, path);
__entry->pid = task->pid;
__assign_str(comm, task->comm);
),
@@ -138,16 +135,18 @@ DECLARE_EVENT_CLASS(cgroup_migrate,
DEFINE_EVENT(cgroup_migrate, cgroup_attach_task,
- TP_PROTO(struct cgroup *dst_cgrp, struct task_struct *task, bool threadgroup),
+ TP_PROTO(struct cgroup *dst_cgrp, const char *path,
+ struct task_struct *task, bool threadgroup),
- TP_ARGS(dst_cgrp, task, threadgroup)
+ TP_ARGS(dst_cgrp, path, task, threadgroup)
);
DEFINE_EVENT(cgroup_migrate, cgroup_transfer_tasks,
- TP_PROTO(struct cgroup *dst_cgrp, struct task_struct *task, bool threadgroup),
+ TP_PROTO(struct cgroup *dst_cgrp, const char *path,
+ struct task_struct *task, bool threadgroup),
- TP_ARGS(dst_cgrp, task, threadgroup)
+ TP_ARGS(dst_cgrp, path, task, threadgroup)
);
#endif /* _TRACE_CGROUP_H */
--- a/kernel/cgroup/cgroup-internal.h
+++ b/kernel/cgroup/cgroup-internal.h
@@ -8,6 +8,32 @@
#include <linux/list.h>
#include <linux/refcount.h>
+#define TRACE_CGROUP_PATH_LEN 1024
+extern spinlock_t trace_cgroup_path_lock;
+extern char trace_cgroup_path[TRACE_CGROUP_PATH_LEN];
+
+/*
+ * cgroup_path() takes a spin lock. It is good practice not to take
+ * spin locks within trace point handlers, as they are mostly hidden
+ * from normal view. As cgroup_path() can take the kernfs_rename_lock
+ * spin lock, it is best to not call that function from the trace event
+ * handler.
+ *
+ * Note: trace_cgroup_##type##_enabled() is a static branch that will only
+ * be set when the trace event is enabled.
+ */
+#define TRACE_CGROUP_PATH(type, cgrp, ...) \
+ do { \
+ if (trace_cgroup_##type##_enabled()) { \
+ spin_lock(&trace_cgroup_path_lock); \
+ cgroup_path(cgrp, trace_cgroup_path, \
+ TRACE_CGROUP_PATH_LEN); \
+ trace_cgroup_##type(cgrp, trace_cgroup_path, \
+ ##__VA_ARGS__); \
+ spin_unlock(&trace_cgroup_path_lock); \
+ } \
+ } while (0)
+
/*
* A cgroup can be associated with multiple css_sets as different tasks may
* belong to different cgroups on different hierarchies. In the other
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -135,7 +135,7 @@ int cgroup_transfer_tasks(struct cgroup
if (task) {
ret = cgroup_migrate(task, false, &mgctx);
if (!ret)
- trace_cgroup_transfer_tasks(to, task, false);
+ TRACE_CGROUP_PATH(transfer_tasks, to, task, false);
put_task_struct(task);
}
} while (task && !ret);
@@ -865,7 +865,7 @@ static int cgroup1_rename(struct kernfs_
ret = kernfs_rename(kn, new_parent, new_name_str);
if (!ret)
- trace_cgroup_rename(cgrp);
+ TRACE_CGROUP_PATH(rename, cgrp);
mutex_unlock(&cgroup_mutex);
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -83,6 +83,9 @@ EXPORT_SYMBOL_GPL(cgroup_mutex);
EXPORT_SYMBOL_GPL(css_set_lock);
#endif
+DEFINE_SPINLOCK(trace_cgroup_path_lock);
+char trace_cgroup_path[TRACE_CGROUP_PATH_LEN];
+
/*
* Protects cgroup_idr and css_idr so that IDs can be released without
* grabbing cgroup_mutex.
@@ -2638,7 +2641,7 @@ int cgroup_attach_task(struct cgroup *ds
cgroup_migrate_finish(&mgctx);
if (!ret)
- trace_cgroup_attach_task(dst_cgrp, leader, threadgroup);
+ TRACE_CGROUP_PATH(attach_task, dst_cgrp, leader, threadgroup);
return ret;
}
@@ -4634,7 +4637,7 @@ static void css_release_work_fn(struct w
struct cgroup *tcgrp;
/* cgroup release path */
- trace_cgroup_release(cgrp);
+ TRACE_CGROUP_PATH(release, cgrp);
if (cgroup_on_dfl(cgrp))
cgroup_rstat_flush(cgrp);
@@ -4977,7 +4980,7 @@ int cgroup_mkdir(struct kernfs_node *par
if (ret)
goto out_destroy;
- trace_cgroup_mkdir(cgrp);
+ TRACE_CGROUP_PATH(mkdir, cgrp);
/* let's create and online css's */
kernfs_activate(kn);
@@ -5165,9 +5168,8 @@ int cgroup_rmdir(struct kernfs_node *kn)
return 0;
ret = cgroup_destroy_locked(cgrp);
-
if (!ret)
- trace_cgroup_rmdir(cgrp);
+ TRACE_CGROUP_PATH(rmdir, cgrp);
cgroup_kn_unlock(kn);
return ret;

View File

@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 3 Jul 2018 18:19:48 +0200
Subject: [PATCH] cgroup: use irqsave in cgroup_rstat_flush_locked()
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
All callers of cgroup_rstat_flush_locked() acquire cgroup_rstat_lock
either with spin_lock_irq() or spin_lock_irqsave().

View File

@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 13 Feb 2015 15:52:24 +0100
Subject: cgroups: use simple wait in css_release()
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
To avoid:
|BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:914
@ -53,7 +53,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -4611,10 +4611,10 @@ static void css_free_rwork_fn(struct wor
@@ -4620,10 +4620,10 @@ static void css_free_rwork_fn(struct wor
}
}
@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct cgroup_subsys *ss = css->ss;
struct cgroup *cgrp = css->cgroup;
@@ -4674,8 +4674,8 @@ static void css_release(struct percpu_re
@@ -4683,8 +4683,8 @@ static void css_release(struct percpu_re
struct cgroup_subsys_state *css =
container_of(ref, struct cgroup_subsys_state, refcnt);
@ -77,7 +77,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static void init_and_link_css(struct cgroup_subsys_state *css,
@@ -5397,6 +5397,7 @@ static int __init cgroup_wq_init(void)
@@ -5406,6 +5406,7 @@ static int __init cgroup_wq_init(void)
*/
cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
BUG_ON(!cgroup_destroy_wq);

View File

@ -1,7 +1,7 @@
From: Benedikt Spranger <b.spranger@linutronix.de>
Date: Mon, 8 Mar 2010 18:57:04 +0100
Subject: clocksource: TCLIB: Allow higher clock rates for clock events
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
As default the TCLIB uses the 32KiHz base clock rate for clock events.
Add a compile time selection to allow higher clock resulution.

View File

@ -1,7 +1,7 @@
Subject: completion: Use simple wait queues
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 11 Jan 2013 11:23:51 +0100
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
Completions have no long lasting callbacks and therefor do not need
the complex waitqueue variant. Use simple waitqueues which reduces the
@ -9,30 +9,46 @@ contention on the waitqueue lock.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
drivers/net/wireless/intersil/orinoco/orinoco_usb.c | 2 -
arch/powerpc/platforms/ps3/device-init.c | 4 +-
drivers/net/wireless/intersil/orinoco/orinoco_usb.c | 4 +-
drivers/usb/gadget/function/f_fs.c | 2 -
drivers/usb/gadget/legacy/inode.c | 4 +-
include/linux/completion.h | 8 ++--
include/linux/suspend.h | 6 +++
include/linux/swait.h | 1
include/linux/swait.h | 2 +
kernel/power/hibernate.c | 7 ++++
kernel/power/suspend.c | 4 ++
kernel/sched/completion.c | 34 ++++++++++----------
kernel/sched/core.c | 10 ++++-
kernel/sched/swait.c | 19 +++++++++++
11 files changed, 70 insertions(+), 27 deletions(-)
kernel/sched/swait.c | 21 +++++++++++-
12 files changed, 75 insertions(+), 31 deletions(-)
--- a/arch/powerpc/platforms/ps3/device-init.c
+++ b/arch/powerpc/platforms/ps3/device-init.c
@@ -752,8 +752,8 @@ static int ps3_notification_read_write(s
}
pr_debug("%s:%u: notification %s issued\n", __func__, __LINE__, op);
- res = wait_event_interruptible(dev->done.wait,
- dev->done.done || kthread_should_stop());
+ res = swait_event_interruptible_exclusive(dev->done.wait,
+ dev->done.done || kthread_should_stop());
if (kthread_should_stop())
res = -EINTR;
if (res) {
--- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
@@ -697,7 +697,7 @@ static void ezusb_req_ctx_wait(struct ez
@@ -697,8 +697,8 @@ static void ezusb_req_ctx_wait(struct ez
while (!ctx->done.done && msecs--)
udelay(1000);
} else {
- wait_event_interruptible(ctx->done.wait,
+ swait_event_interruptible(ctx->done.wait,
ctx->done.done);
- ctx->done.done);
+ swait_event_interruptible_exclusive(ctx->done.wait,
+ ctx->done.done);
}
break;
default:
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1633,7 +1633,7 @@ static void ffs_data_put(struct ffs_data
@ -51,7 +67,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (likely (value == 0)) {
- value = wait_event_interruptible (done.wait, done.done);
+ value = swait_event_interruptible (done.wait, done.done);
+ value = swait_event_interruptible_exclusive(done.wait, done.done);
if (value != 0) {
spin_lock_irq (&epdata->dev->lock);
if (likely (epdata->ep != NULL)) {
@ -60,7 +76,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
spin_unlock_irq (&epdata->dev->lock);
- wait_event (done.wait, done.done);
+ swait_event (done.wait, done.done);
+ swait_event_exclusive(done.wait, done.done);
if (epdata->status == -ECONNRESET)
epdata->status = -EINTR;
} else {
@ -119,17 +135,19 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
extern suspend_state_t mem_sleep_default;
--- a/include/linux/swait.h
+++ b/include/linux/swait.h
@@ -160,6 +160,7 @@ static inline bool swq_has_sleeper(struc
extern void swake_up(struct swait_queue_head *q);
@@ -160,7 +160,9 @@ static inline bool swq_has_sleeper(struc
extern void swake_up_one(struct swait_queue_head *q);
extern void swake_up_all(struct swait_queue_head *q);
extern void swake_up_locked(struct swait_queue_head *q);
+extern void swake_up_all_locked(struct swait_queue_head *q);
extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
extern void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state);
+extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
extern void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state);
extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state);
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -679,6 +679,10 @@ static int load_image_and_restore(void)
@@ -681,6 +681,10 @@ static int load_image_and_restore(void)
return error;
}
@ -140,7 +158,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* hibernate - Carry out system hibernation, including saving the image.
*/
@@ -692,6 +696,8 @@ int hibernate(void)
@@ -694,6 +698,8 @@ int hibernate(void)
return -EPERM;
}
@ -149,7 +167,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
lock_system_sleep();
/* The snapshot device should not be opened while we're running */
if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
@@ -770,6 +776,7 @@ int hibernate(void)
@@ -772,6 +778,7 @@ int hibernate(void)
atomic_inc(&snapshot_device_available);
Unlock:
unlock_system_sleep();
@ -159,7 +177,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return error;
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -594,6 +594,8 @@ static int enter_state(suspend_state_t s
@@ -600,6 +600,8 @@ static int enter_state(suspend_state_t s
return error;
}
@ -168,7 +186,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* pm_suspend - Externally visible function for suspending the system.
* @state: System sleep state to enter.
@@ -608,6 +610,7 @@ int pm_suspend(suspend_state_t state)
@@ -614,6 +616,7 @@ int pm_suspend(suspend_state_t state)
if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX)
return -EINVAL;
@ -176,7 +194,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pr_info("suspend entry (%s)\n", mem_sleep_labels[state]);
error = enter_state(state);
if (error) {
@@ -617,6 +620,7 @@ int pm_suspend(suspend_state_t state)
@@ -623,6 +626,7 @@ int pm_suspend(suspend_state_t state)
suspend_stats.success++;
}
pr_info("suspend exit\n");
@ -282,7 +300,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
EXPORT_SYMBOL(completion_done);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7124,7 +7124,10 @@ void migrate_disable(void)
@@ -7102,7 +7102,10 @@ void migrate_disable(void)
return;
}
#ifdef CONFIG_SCHED_DEBUG
@ -294,7 +312,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
if (p->migrate_disable) {
@@ -7154,7 +7157,10 @@ void migrate_enable(void)
@@ -7132,7 +7135,10 @@ void migrate_enable(void)
}
#ifdef CONFIG_SCHED_DEBUG
@ -331,6 +349,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+}
+EXPORT_SYMBOL(swake_up_all_locked);
+
void swake_up(struct swait_queue_head *q)
void swake_up_one(struct swait_queue_head *q)
{
unsigned long flags;
@@ -69,7 +88,7 @@ void swake_up_all(struct swait_queue_hea
}
EXPORT_SYMBOL(swake_up_all);
-static void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait)
+void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait)
{
wait->task = current;
if (list_empty(&wait->task_list))

View File

@ -1,7 +1,7 @@
Subject: sched: Use the proper LOCK_OFFSET for cond_resched()
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 17 Jul 2011 22:51:33 +0200
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
RT does not increment preempt count when a 'sleeping' spinlock is
locked. Update PREEMPT_LOCK_OFFSET for that case.

View File

@ -2,7 +2,7 @@ From: Mike Galbraith <umgwanakikbuti@gmail.com>
Date: Sun, 16 Oct 2016 05:11:54 +0200
Subject: [PATCH] connector/cn_proc: Protect send_msg() with a local lock
on RT
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
|BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:931
|in_atomic(): 1, irqs_disabled(): 0, pid: 31807, name: sleep

View File

@ -1,7 +1,7 @@
Subject: cpu/hotplug: Implement CPU pinning
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 19 Jul 2017 17:31:20 +0200
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
@ -33,7 +33,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
static struct lockdep_map cpuhp_state_up_map =
STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
@@ -288,7 +293,30 @@ static int cpu_hotplug_disabled;
@@ -285,7 +290,30 @@ static int cpu_hotplug_disabled;
*/
void pin_current_cpu(void)
{
@ -64,7 +64,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/**
@@ -296,6 +324,13 @@ void pin_current_cpu(void)
@@ -293,6 +321,13 @@ void pin_current_cpu(void)
*/
void unpin_current_cpu(void)
{
@ -78,7 +78,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
@@ -854,6 +889,7 @@ static int take_cpu_down(void *_param)
@@ -846,6 +881,7 @@ static int take_cpu_down(void *_param)
static int takedown_cpu(unsigned int cpu)
{
@ -86,7 +86,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int err;
@@ -866,11 +902,14 @@ static int takedown_cpu(unsigned int cpu
@@ -858,11 +894,14 @@ static int takedown_cpu(unsigned int cpu
*/
irq_lock_sparse();
@ -101,7 +101,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* CPU refused to die */
irq_unlock_sparse();
/* Unpark the hotplug thread so we can rollback there */
@@ -889,6 +928,7 @@ static int takedown_cpu(unsigned int cpu
@@ -881,6 +920,7 @@ static int takedown_cpu(unsigned int cpu
wait_for_ap_thread(st, false);
BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);

View File

@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 9 Apr 2015 15:23:01 +0200
Subject: cpufreq: drop K8's driver from beeing selected
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
Ralf posted a picture of a backtrace from

View File

@ -1,7 +1,7 @@
Subject: cpumask: Disable CONFIG_CPUMASK_OFFSTACK for RT
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 14 Dec 2011 01:03:49 +0100
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
There are "valid" GFP_ATOMIC allocations such as
@ -47,7 +47,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -940,7 +940,7 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT
@@ -938,7 +938,7 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT
config MAXSMP
bool "Enable Maximum number of SMP Processors and NUMA Nodes"
depends on X86_64 && SMP && DEBUG_KERNEL
@ -58,7 +58,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
If unsure, say N.
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -434,6 +434,7 @@ config CHECK_SIGNATURE
@@ -441,6 +441,7 @@ config CHECK_SIGNATURE
config CPUMASK_OFFSTACK
bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS

View File

@ -1,7 +1,7 @@
From: Mike Galbraith <efault@gmx.de>
Date: Sun, 8 Jan 2017 09:32:25 +0100
Subject: [PATCH] cpuset: Convert callback_lock to raw_spinlock_t
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
The two commits below add up to a cpuset might_sleep() splat for RT:

View File

@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 21 Feb 2014 17:24:04 +0100
Subject: crypto: Reduce preempt disabled regions, more algos
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
Don Estabrook reported
| kernel: WARNING: CPU: 2 PID: 858 at kernel/sched/core.c:2428 migrate_disable+0xed/0x100()

View File

@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 26 Jul 2018 18:52:00 +0200
Subject: [PATCH] crypto: cryptd - add a lock instead
preempt_disable/local_bh_disable
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
cryptd has a per-CPU lock which protected with local_bh_disable() and
preempt_disable().

View File

@ -4,7 +4,7 @@ Subject: [PATCH] crypto: limit more FPU-enabled sections
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
Those crypto drivers use SSE/AVX/… for their crypto work and in order to
do so in kernel they need to enable the "FPU" in kernel mode which

View File

@ -2,7 +2,7 @@ From: Mike Galbraith <efault@gmx.de>
Date: Wed, 11 Jul 2018 17:14:47 +0200
Subject: [PATCH] crypto: scompress - serialize RT percpu scratch buffer
access with a local lock
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
| BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:974
| in_atomic(): 1, irqs_disabled(): 0, pid: 1401, name: cryptomgr_test

View File

@ -1,7 +1,7 @@
Subject: debugobjects: Make RT aware
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 17 Jul 2011 21:41:35 +0200
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
Avoid filling the pool / allocating memory with irqs off().
@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -374,7 +374,10 @@ static void
@@ -377,7 +377,10 @@ static void
struct debug_obj *obj;
unsigned long flags;

View File

@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 27 Mar 2018 16:24:15 +0200
Subject: [PATCH] dm rq: remove BUG_ON(!irqs_disabled) check
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
In commit 052189a2ec95 ("dm: remove superfluous irq disablement in
dm_request_fn") the spin_lock_irq() was replaced with spin_lock() + a

View File

@ -2,7 +2,7 @@ From: Mike Galbraith <umgwanakikbuti@gmail.com>
Date: Thu, 31 Mar 2016 04:08:28 +0200
Subject: [PATCH] drivers/block/zram: Replace bit spinlocks with rtmutex
for -rt
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
They're nondeterministic, and lead to ___might_sleep() splats in -rt.
OTOH, they're a lot less wasteful than an rtmutex per page.

View File

@ -1,7 +1,7 @@
Subject: tty/serial/omap: Make the locking RT aware
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 28 Jul 2011 13:32:57 +0200
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
The lock is a sleeping lock and local_irq_save() is not the
optimsation we are looking for. Redo it to make it work on -RT and

View File

@ -1,7 +1,7 @@
Subject: tty/serial/pl011: Make the locking work on RT
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 08 Jan 2013 21:36:51 +0100
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
The lock is a sleeping lock and local_irq_save() is not the optimsation
we are looking for. Redo it to make it work on -RT and non-RT.

View File

@ -2,7 +2,7 @@ From: Mike Galbraith <umgwanakikbuti@gmail.com>
Date: Thu, 20 Oct 2016 11:15:22 +0200
Subject: [PATCH] drivers/zram: Don't disable preemption in
zcomp_stream_get/put()
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
In v4.7, the driver switched to percpu compression streams, disabling
preemption via get/put_cpu_ptr(). Use a per-zcomp_strm lock here. We

View File

@ -2,7 +2,7 @@ From: Mike Galbraith <efault@gmx.de>
Date: Wed, 23 Aug 2017 11:57:29 +0200
Subject: [PATCH] drivers/zram: fix zcomp_stream_get() smp_processor_id() use
in preemptible code
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
Use get_local_ptr() instead this_cpu_ptr() to avoid a warning regarding
smp_processor_id() in preemptible code.

View File

@ -1,7 +1,7 @@
Subject: drm,i915: Use local_lock/unlock_irq() in intel_pipe_update_start/end()
From: Mike Galbraith <umgwanakikbuti@gmail.com>
Date: Sat, 27 Feb 2016 09:01:42 +0100
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
[ 8.014039] BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:918
@ -57,8 +57,8 @@ Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: linux-rt-users <linux-rt-users@vger.kernel.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
drivers/gpu/drm/i915/intel_sprite.c | 11 +++++++----
1 file changed, 7 insertions(+), 4 deletions(-)
drivers/gpu/drm/i915/intel_sprite.c | 13 ++++++++-----
1 file changed, 8 insertions(+), 5 deletions(-)
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@ -70,7 +70,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include "intel_drv.h"
#include "intel_frontbuffer.h"
#include <drm/i915_drm.h>
@@ -74,6 +75,8 @@ int intel_usecs_to_scanlines(const struc
@@ -60,6 +61,8 @@ int intel_usecs_to_scanlines(const struc
#define VBLANK_EVASION_TIME_US 100
#endif
@ -80,15 +80,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* intel_pipe_update_start() - start update of a set of display registers
* @new_crtc_state: the new crtc state
@@ -107,7 +110,7 @@ void intel_pipe_update_start(const struc
VBLANK_EVASION_TIME_US);
max = vblank_start - 1;
if (intel_psr_wait_for_idle(new_crtc_state))
DRM_ERROR("PSR idle timed out, atomic update may fail\n");
- local_irq_disable();
+ local_lock_irq(pipe_update_lock);
if (min <= 0 || max <= 0)
return;
@@ -137,11 +140,11 @@ void intel_pipe_update_start(const struc
crtc->debug.min_vbl = min;
crtc->debug.max_vbl = max;
@@ -131,11 +134,11 @@ void intel_pipe_update_start(const struc
break;
}
@ -102,7 +102,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
finish_wait(wq, &wait);
@@ -206,7 +209,7 @@ void intel_pipe_update_end(struct intel_
@@ -168,7 +171,7 @@ void intel_pipe_update_start(const struc
return;
irq_disable:
- local_irq_disable();
+ local_lock_irq(pipe_update_lock);
}
/**
@@ -204,7 +207,7 @@ void intel_pipe_update_end(struct intel_
new_crtc_state->base.event = NULL;
}

View File

@ -1,7 +1,7 @@
Subject: drm,radeon,i915: Use preempt_disable/enable_rt() where recommended
From: Mike Galbraith <umgwanakikbuti@gmail.com>
Date: Sat, 27 Feb 2016 08:09:11 +0100
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
DRM folks identified the spots, so use them.
@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1009,6 +1009,7 @@ static bool i915_get_crtc_scanoutpos(str
@@ -1025,6 +1025,7 @@ static bool i915_get_crtc_scanoutpos(str
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Get optional system timestamp before query. */
if (stime)
@@ -1060,6 +1061,7 @@ static bool i915_get_crtc_scanoutpos(str
@@ -1076,6 +1077,7 @@ static bool i915_get_crtc_scanoutpos(str
*etime = ktime_get();
/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */

View File

@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 26 Jul 2018 15:06:10 +0200
Subject: [PATCH] efi: Allow efi=runtime
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
In case the option "efi=noruntime" is default at built-time, the user
could overwrite its sate by `efi=runtime' and allow it again.
@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -110,6 +110,9 @@ static int __init parse_efi_cmdline(char
@@ -113,6 +113,9 @@ static int __init parse_efi_cmdline(char
if (parse_option_str(str, "noruntime"))
disable_runtime = true;

View File

@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 26 Jul 2018 15:03:16 +0200
Subject: [PATCH] efi: Disable runtime services on RT
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
Based on meassurements the EFI functions get_variable /
get_next_variable take up to 2us which looks okay.
@ -29,9 +29,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -84,7 +84,7 @@ struct mm_struct efi_mm = {
.mmlist = LIST_HEAD_INIT(efi_mm.mmlist),
};
@@ -87,7 +87,7 @@ struct mm_struct efi_mm = {
struct workqueue_struct *efi_rts_wq;
-static bool disable_runtime;
+static bool disable_runtime = IS_ENABLED(CONFIG_PREEMPT_RT_BASE);

View File

@ -1,7 +1,7 @@
Subject: fs/epoll: Do not disable preemption on RT
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 08 Jul 2011 16:35:35 +0200
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
ep_call_nested() takes a sleeping lock so we can't disable preemption.
The light version is enough since ep_call_nested() doesn't mind beeing
@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -563,12 +563,12 @@ static int ep_poll_wakeup_proc(void *pri
@@ -571,12 +571,12 @@ static int ep_poll_wakeup_proc(void *pri
static void ep_poll_safewake(wait_queue_head_t *wq)
{

View File

@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Mon, 16 Feb 2015 18:49:10 +0100
Subject: fs/aio: simple simple work
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
|BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:768
|in_atomic(): 1, irqs_disabled(): 0, pid: 26, name: rcuos/2
@ -30,7 +30,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -40,6 +40,7 @@
@@ -42,6 +42,7 @@
#include <linux/ramfs.h>
#include <linux/percpu-refcount.h>
#include <linux/mount.h>
@ -38,7 +38,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include <asm/kmap_types.h>
#include <linux/uaccess.h>
@@ -118,6 +119,7 @@ struct kioctx {
@@ -120,6 +121,7 @@ struct kioctx {
long nr_pages;
struct rcu_work free_rwork; /* see free_ioctx() */
@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* signals when all in-flight requests are done
@@ -256,6 +258,7 @@ static int __init aio_setup(void)
@@ -254,6 +256,7 @@ static int __init aio_setup(void)
.mount = aio_mount,
.kill_sb = kill_anon_super,
};
@ -54,7 +54,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
aio_mnt = kern_mount(&aio_fs);
if (IS_ERR(aio_mnt))
panic("Failed to create aio fs mount.");
@@ -597,9 +600,9 @@ static void free_ioctx_reqs(struct percp
@@ -595,9 +598,9 @@ static void free_ioctx_reqs(struct percp
* and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
* now it's safe to cancel any that need to be.
*/
@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct aio_kiocb *req;
spin_lock_irq(&ctx->ctx_lock);
@@ -617,6 +620,14 @@ static void free_ioctx_users(struct perc
@@ -615,6 +618,14 @@ static void free_ioctx_users(struct perc
percpu_ref_put(&ctx->reqs);
}

View File

@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 13 Sep 2017 12:32:34 +0200
Subject: [PATCH] fs/dcache: bring back explicit INIT_HLIST_BL_HEAD init
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
Commit 3d375d78593c ("mm: update callers to use HASH_ZERO flag") removed
INIT_HLIST_BL_HEAD and uses the ZERO flag instead for the init. However
@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -3115,6 +3115,8 @@ static int __init set_dhash_entries(char
@@ -3062,6 +3062,8 @@ static int __init set_dhash_entries(char
static void __init dcache_init_early(void)
{
@ -24,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* If hashes are distributed across NUMA nodes, defer
* hash allocation until vmalloc space is available.
*/
@@ -3131,11 +3133,16 @@ static void __init dcache_init_early(voi
@@ -3078,11 +3080,16 @@ static void __init dcache_init_early(voi
NULL,
0,
0);
@ -41,7 +41,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* A constructor could be added for stable state like the lists,
* but it is probably not worth it because of the cache nature
@@ -3159,6 +3166,10 @@ static void __init dcache_init(void)
@@ -3106,6 +3113,10 @@ static void __init dcache_init(void)
NULL,
0,
0);

View File

@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 20 Oct 2017 11:29:53 +0200
Subject: [PATCH] fs/dcache: disable preemption on i_dir_seq's write side
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
i_dir_seq is an opencoded seqcounter. Based on the code it looks like we
could have two writers in parallel despite the fact that the d_lock is
@ -24,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -2430,9 +2430,10 @@ EXPORT_SYMBOL(d_rehash);
@@ -2404,9 +2404,10 @@ EXPORT_SYMBOL(d_rehash);
static inline unsigned start_dir_add(struct inode *dir)
{
@ -37,7 +37,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return n;
cpu_relax();
}
@@ -2440,7 +2441,8 @@ static inline unsigned start_dir_add(str
@@ -2414,7 +2415,8 @@ static inline unsigned start_dir_add(str
static inline void end_dir_add(struct inode *dir, unsigned n)
{
@ -47,7 +47,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static void d_wait_lookup(struct dentry *dentry)
@@ -2473,7 +2475,7 @@ struct dentry *d_alloc_parallel(struct d
@@ -2447,7 +2449,7 @@ struct dentry *d_alloc_parallel(struct d
retry:
rcu_read_lock();
@ -56,7 +56,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
r_seq = read_seqbegin(&rename_lock);
dentry = __d_lookup_rcu(parent, name, &d_seq);
if (unlikely(dentry)) {
@@ -2501,7 +2503,7 @@ struct dentry *d_alloc_parallel(struct d
@@ -2475,7 +2477,7 @@ struct dentry *d_alloc_parallel(struct d
}
hlist_bl_lock(b);
@ -108,7 +108,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -657,7 +657,7 @@ struct inode {
@@ -669,7 +669,7 @@ struct inode {
struct block_device *i_bdev;
struct cdev *i_cdev;
char *i_link;

View File

@ -1,7 +1,7 @@
Subject: fs: dcache: Use cpu_chill() in trylock loops
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 07 Mar 2012 21:00:34 +0100
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
Retry loops on RT might loop forever when the modifying side was
preempted. Use cpu_chill() instead of cpu_relax() to let the system
@ -23,8 +23,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+#include <linux/delay.h>
#include "autofs_i.h"
static unsigned long now;
@@ -148,7 +149,7 @@ static struct dentry *get_next_positive_
/* Check if a dentry can be expired */
@@ -153,7 +154,7 @@ static struct dentry *get_next_positive_
parent = p->d_parent;
if (!spin_trylock(&parent->d_lock)) {
spin_unlock(&p->d_lock);
@ -43,7 +43,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <linux/security.h>
#include <linux/cred.h>
#include <linux/idr.h>
@@ -353,8 +354,11 @@ int __mnt_want_write(struct vfsmount *m)
@@ -327,8 +328,11 @@ int __mnt_want_write(struct vfsmount *m)
* incremented count after it has set MNT_WRITE_HOLD.
*/
smp_mb();

View File

@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 14 Sep 2016 14:35:49 +0200
Subject: [PATCH] fs/dcache: use swait_queue instead of waitqueue
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
__d_lookup_done() invokes wake_up_all() while holding a hlist_bl_lock()
which disables preemption. As a workaround convert it to swait.
@ -34,7 +34,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -2447,21 +2447,24 @@ static inline void end_dir_add(struct in
@@ -2421,21 +2421,24 @@ static inline void end_dir_add(struct in
static void d_wait_lookup(struct dentry *dentry)
{
@ -55,7 +55,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
+ INIT_LIST_HEAD(&__wait.task_list);
+ do {
+ prepare_to_swait(dentry->d_wait, &__wait, TASK_UNINTERRUPTIBLE);
+ prepare_to_swait_exclusive(dentry->d_wait, &__wait, TASK_UNINTERRUPTIBLE);
+ spin_unlock(&dentry->d_lock);
+ schedule();
+ spin_lock(&dentry->d_lock);
@ -70,7 +70,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
unsigned int hash = name->hash;
struct hlist_bl_head *b = in_lookup_hash(parent, hash);
@@ -2576,7 +2579,7 @@ void __d_lookup_done(struct dentry *dent
@@ -2550,7 +2553,7 @@ void __d_lookup_done(struct dentry *dent
hlist_bl_lock(b);
dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
__hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
@ -81,7 +81,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
INIT_HLIST_NODE(&dentry->d_u.d_alias);
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -1196,7 +1196,7 @@ static int fuse_direntplus_link(struct f
@@ -1203,7 +1203,7 @@ static int fuse_direntplus_link(struct f
struct inode *dir = d_inode(parent);
struct fuse_conn *fc;
struct inode *inode;
@ -92,7 +92,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1604,7 +1604,7 @@ static struct dentry *__lookup_slow(cons
@@ -1645,7 +1645,7 @@ static struct dentry *__lookup_slow(cons
{
struct dentry *dentry, *old;
struct inode *inode = dir->d_inode;
@ -101,7 +101,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Don't go there if it's already dead */
if (unlikely(IS_DEADDIR(inode)))
@@ -3121,7 +3121,7 @@ static int lookup_open(struct nameidata
@@ -3135,7 +3135,7 @@ static int lookup_open(struct nameidata
struct dentry *dentry;
int error, create_error = 0;
umode_t mode = op->mode;
@ -121,9 +121,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct dentry *dentry;
struct dentry *alias;
struct inode *dir = d_inode(parent);
@@ -1454,7 +1454,7 @@ int nfs_atomic_open(struct inode *dir, s
@@ -1459,7 +1459,7 @@ int nfs_atomic_open(struct inode *dir, s
struct file *file, unsigned open_flags,
umode_t mode, int *opened)
umode_t mode)
{
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
+ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
@ -152,7 +152,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
spin_lock(&dentry->d_lock);
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1864,7 +1864,7 @@ bool proc_fill_cache(struct file *file,
@@ -1876,7 +1876,7 @@ bool proc_fill_cache(struct file *file,
child = d_hash_and_lookup(dir, &qname);
if (!child) {
@ -183,7 +183,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
};
struct list_head d_child; /* child of parent list */
struct list_head d_subdirs; /* our children */
@@ -238,7 +238,7 @@ extern struct dentry * d_alloc(struct de
@@ -236,7 +236,7 @@ extern struct dentry * d_alloc(struct de
extern struct dentry * d_alloc_anon(struct super_block *);
extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *);
extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *,
@ -194,7 +194,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
extern struct dentry * d_exact_alias(struct dentry *, struct inode *);
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1536,7 +1536,7 @@ struct nfs_unlinkdata {
@@ -1549,7 +1549,7 @@ struct nfs_unlinkdata {
struct nfs_removeargs args;
struct nfs_removeres res;
struct dentry *dentry;

View File

@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 18 Mar 2011 10:11:25 +0100
Subject: fs: jbd/jbd2: Make state lock and journal head lock rt safe
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
bit_spin_locks break under RT.

View File

@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 15 Sep 2016 10:51:27 +0200
Subject: [PATCH] fs/nfs: turn rmdir_sem into a semaphore
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
The RW semaphore had a reader side which used the _non_owner version
because it most likely took the reader lock in one thread and released it
@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1775,7 +1775,11 @@ int nfs_rmdir(struct inode *dir, struct
@@ -1786,7 +1786,11 @@ int nfs_rmdir(struct inode *dir, struct
trace_nfs_rmdir_enter(dir, dentry);
if (d_really_is_positive(dentry)) {
@ -34,7 +34,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name);
/* Ensure the VFS deletes this inode */
switch (error) {
@@ -1785,7 +1789,11 @@ int nfs_rmdir(struct inode *dir, struct
@@ -1796,7 +1800,11 @@ int nfs_rmdir(struct inode *dir, struct
case -ENOENT:
nfs_dentry_handle_enoent(dentry);
}

View File

@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 18 Mar 2011 09:18:52 +0100
Subject: buffer_head: Replace bh_uptodate_lock for -rt
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
Wrap the bit_spin_lock calls into a separate inline and add the RT
replacements with a real spinlock.
@ -11,9 +11,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
fs/buffer.c | 21 +++++++--------------
fs/ext4/page-io.c | 6 ++----
fs/ntfs/aops.c | 10 +++-------
fs/xfs/xfs_aops.c | 6 ++----
include/linux/buffer_head.h | 34 ++++++++++++++++++++++++++++++++++
5 files changed, 48 insertions(+), 29 deletions(-)
4 files changed, 46 insertions(+), 25 deletions(-)
--- a/fs/buffer.c
+++ b/fs/buffer.c
@ -76,7 +75,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(end_buffer_async_write);
@@ -3349,6 +3341,7 @@ struct buffer_head *alloc_buffer_head(gf
@@ -3360,6 +3352,7 @@ struct buffer_head *alloc_buffer_head(gf
struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
if (ret) {
INIT_LIST_HEAD(&ret->b_assoc_buffers);
@ -139,28 +138,6 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/**
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -108,8 +108,7 @@ xfs_finish_page_writeback(
ASSERT(bvec->bv_offset + bvec->bv_len <= PAGE_SIZE);
ASSERT((bvec->bv_len & (i_blocksize(inode) - 1)) == 0);
- local_irq_save(flags);
- bit_spin_lock(BH_Uptodate_Lock, &head->b_state);
+ flags = bh_uptodate_lock_irqsave(head);
do {
if (off >= bvec->bv_offset &&
off < bvec->bv_offset + bvec->bv_len) {
@@ -131,8 +130,7 @@ xfs_finish_page_writeback(
}
off += bh->b_size;
} while ((bh = bh->b_this_page) != head);
- bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
- local_irq_restore(flags);
+ bh_uptodate_unlock_irqrestore(head, flags);
if (!busy)
end_page_writeback(bvec->bv_page);
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -76,8 +76,42 @@ struct buffer_head {

View File

@ -1,7 +1,7 @@
From: Clark Williams <williams@redhat.com>
Date: Tue, 3 Jul 2018 13:34:30 -0500
Subject: [PATCH] fscache: initialize cookie hash table raw spinlocks
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
The fscache cookie mechanism uses a hash table of hlist_bl_head structures. The
PREEMPT_RT patcheset adds a raw spinlock to this structure and so on PREEMPT_RT
@ -20,7 +20,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/fs/fscache/cookie.c
+++ b/fs/fscache/cookie.c
@@ -973,3 +973,11 @@ int __fscache_check_consistency(struct f
@@ -962,3 +962,11 @@ int __fscache_check_consistency(struct f
return -ESTALE;
}
EXPORT_SYMBOL(__fscache_check_consistency);
@ -34,7 +34,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+}
--- a/fs/fscache/main.c
+++ b/fs/fscache/main.c
@@ -151,6 +151,7 @@ static int __init fscache_init(void)
@@ -149,6 +149,7 @@ static int __init fscache_init(void)
ret = -ENOMEM;
goto error_cookie_jar;
}

View File

@ -1,7 +1,7 @@
From: Mike Galbraith <umgwanakikbuti@gmail.com>
Date: Sun, 16 Oct 2016 05:08:30 +0200
Subject: [PATCH] ftrace: Fix trace header alignment
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
Line up helper arrows to the right column.
@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3347,17 +3347,17 @@ get_total_entries(struct trace_buffer *b
@@ -3348,17 +3348,17 @@ get_total_entries(struct trace_buffer *b
static void print_lat_help_header(struct seq_file *m)
{

View File

@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 17 Jul 2011 21:56:42 +0200
Subject: trace: Add migrate-disabled counter to tracing output
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define TRACE_EVENT_TYPE_MAX \
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2145,6 +2145,8 @@ tracing_generic_entry_update(struct trac
@@ -2146,6 +2146,8 @@ tracing_generic_entry_update(struct trac
((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
(tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
(test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
@ -33,7 +33,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
@@ -3348,9 +3350,10 @@ static void print_lat_help_header(struct
@@ -3349,9 +3351,10 @@ static void print_lat_help_header(struct
"# | / _----=> need-resched \n"
"# || / _---=> hardirq/softirq \n"
"# ||| / _--=> preempt-depth \n"
@ -49,7 +49,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -187,6 +187,8 @@ static int trace_define_common_fields(vo
@@ -188,6 +188,8 @@ static int trace_define_common_fields(vo
__common_field(unsigned char, flags);
__common_field(unsigned char, preempt_count);
__common_field(int, pid);
@ -60,7 +60,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -493,6 +493,11 @@ int trace_print_lat_fmt(struct trace_seq
@@ -494,6 +494,11 @@ int trace_print_lat_fmt(struct trace_seq
else
trace_seq_putc(s, '.');

View File

@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 1 Mar 2013 11:17:42 +0100
Subject: futex: Ensure lock/unlock symetry versus pi_lock and hash bucket lock
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
In exit_pi_state_list() we have the following locking construct:

View File

@ -1,7 +1,7 @@
From: Steven Rostedt <rostedt@goodmis.org>
Date: Tue, 14 Jul 2015 14:26:34 +0200
Subject: futex: Fix bug on when a requeued RT task times out
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
Requeue with timeout causes a bug with PREEMPT_RT_FULL.

View File

@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 8 Mar 2017 14:23:35 +0100
Subject: [PATCH] futex: workaround migrate_disable/enable in different context
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
migrate_disable()/migrate_enable() takes a different path in atomic() vs
!atomic() context. These little hacks ensure that we don't underflow / overflow

View File

@ -1,7 +1,7 @@
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:29:57 -0500
Subject: genirq: Disable irqpoll on -rt
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
Creates long latencies for no value

View File

@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 21 Aug 2013 17:48:46 +0200
Subject: genirq: Do not invoke the affinity callback via a workqueue on RT
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
Joe Korty reported, that __irq_set_affinity_locked() schedules a
workqueue while holding a rawlock which results in a might_sleep()

View File

@ -1,7 +1,7 @@
Subject: genirq: Force interrupt thread on RT
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 03 Apr 2011 11:57:29 +0200
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
Force threaded_irqs and optimize the code (force_irqthreads) in regard
to this.

View File

@ -1,7 +1,7 @@
From: Josh Cartwright <joshc@ni.com>
Date: Thu, 11 Feb 2016 11:54:00 -0600
Subject: genirq: update irq_set_irqchip_state documentation
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
On -rt kernels, the use of migrate_disable()/migrate_enable() is
sufficient to guarantee a task isn't moved to another CPU. Update the
@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -2261,7 +2261,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state)
@@ -2270,7 +2270,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state)
* This call sets the internal irqchip state of an interrupt,
* depending on the value of @which.
*

View File

@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 4 Aug 2017 18:31:00 +0200
Subject: [PATCH] hotplug: duct-tape RT-rwlock usage for non-RT
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
This type is only available on -RT. We need to craft something for
non-RT. Since the only migrate_disable() user is -RT only, there is no
@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static DEFINE_PER_CPU(struct rt_rw_lock, cpuhp_pin_lock) = \
__RWLOCK_RT_INITIALIZER(cpuhp_pin_lock);
#endif
@@ -293,6 +293,7 @@ static int cpu_hotplug_disabled;
@@ -290,6 +290,7 @@ static int cpu_hotplug_disabled;
*/
void pin_current_cpu(void)
{
@ -31,7 +31,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct rt_rw_lock *cpuhp_pin;
unsigned int cpu;
int ret;
@@ -317,6 +318,7 @@ void pin_current_cpu(void)
@@ -314,6 +315,7 @@ void pin_current_cpu(void)
goto again;
}
current->pinned_on_cpu = cpu;
@ -39,7 +39,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/**
@@ -324,6 +326,7 @@ void pin_current_cpu(void)
@@ -321,6 +323,7 @@ void pin_current_cpu(void)
*/
void unpin_current_cpu(void)
{
@ -47,7 +47,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct rt_rw_lock *cpuhp_pin = this_cpu_ptr(&cpuhp_pin_lock);
if (WARN_ON(current->pinned_on_cpu != smp_processor_id()))
@@ -331,6 +334,7 @@ void unpin_current_cpu(void)
@@ -328,6 +331,7 @@ void unpin_current_cpu(void)
current->pinned_on_cpu = -1;
__read_rt_unlock(cpuhp_pin);
@ -55,7 +55,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
@@ -889,7 +893,9 @@ static int take_cpu_down(void *_param)
@@ -881,7 +885,9 @@ static int take_cpu_down(void *_param)
static int takedown_cpu(unsigned int cpu)
{
@ -65,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int err;
@@ -902,14 +908,18 @@ static int takedown_cpu(unsigned int cpu
@@ -894,14 +900,18 @@ static int takedown_cpu(unsigned int cpu
*/
irq_lock_sparse();
@ -84,7 +84,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* CPU refused to die */
irq_unlock_sparse();
/* Unpark the hotplug thread so we can rollback there */
@@ -928,7 +938,9 @@ static int takedown_cpu(unsigned int cpu
@@ -920,7 +930,9 @@ static int takedown_cpu(unsigned int cpu
wait_for_ap_thread(st, false);
BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);

View File

@ -1,7 +1,7 @@
Subject: hotplug: Lightweight get online cpus
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 15 Jun 2011 12:36:06 +0200
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
get_online_cpus() is a heavy weight function which involves a global
mutex. migrate_disable() wants a simpler construct which prevents only
@ -20,7 +20,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -110,6 +110,8 @@ extern void cpu_hotplug_disable(void);
@@ -111,6 +111,8 @@ extern void cpu_hotplug_disable(void);
extern void cpu_hotplug_enable(void);
void clear_tasks_mm_cpumask(int cpu);
int cpu_down(unsigned int cpu);
@ -29,7 +29,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#else /* CONFIG_HOTPLUG_CPU */
@@ -120,6 +122,9 @@ static inline void cpus_read_unlock(void
@@ -122,6 +124,9 @@ static inline int cpus_read_trylock(voi
static inline void lockdep_assert_cpus_held(void) { }
static inline void cpu_hotplug_disable(void) { }
static inline void cpu_hotplug_enable(void) { }
@ -41,7 +41,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Wrappers which go away once all code is converted */
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -283,6 +283,21 @@ static int cpu_hotplug_disabled;
@@ -280,6 +280,21 @@ static int cpu_hotplug_disabled;
#ifdef CONFIG_HOTPLUG_CPU
@ -65,7 +65,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void cpus_read_lock(void)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7219,6 +7219,7 @@ void migrate_disable(void)
@@ -7197,6 +7197,7 @@ void migrate_disable(void)
}
preempt_disable();
@ -73,7 +73,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
migrate_disable_update_cpus_allowed(p);
p->migrate_disable = 1;
@@ -7284,12 +7285,15 @@ void migrate_enable(void)
@@ -7262,12 +7263,15 @@ void migrate_enable(void)
arg.task = p;
arg.dest_cpu = dest_cpu;

View File

@ -1,7 +1,7 @@
From: Yang Shi <yang.shi@windriver.com>
Date: Mon, 16 Sep 2013 14:09:19 -0700
Subject: hrtimer: Move schedule_work call to helper thread
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
When run ltp leapsec_timer test, the following call trace is caught:

View File

@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 3 Jul 2009 08:44:31 -0500
Subject: hrtimer: by timers by default into the softirq context
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
We can't have hrtimers callbacks running in hardirq context on RT. Therefore
the timers are deferred to the softirq context by default.
@ -28,7 +28,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -2176,7 +2176,7 @@ int kvm_create_lapic(struct kvm_vcpu *vc
@@ -2245,7 +2245,7 @@ int kvm_create_lapic(struct kvm_vcpu *vc
apic->vcpu = vcpu;
hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
@ -70,7 +70,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
timer->function = perf_mux_hrtimer_handler;
}
@@ -9166,7 +9166,7 @@ static void perf_swevent_init_hrtimer(st
@@ -9173,7 +9173,7 @@ static void perf_swevent_init_hrtimer(st
if (!is_sampling_event(event))
return;
@ -81,7 +81,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -321,7 +321,7 @@ static void hrtick_rq_init(struct rq *rq
@@ -315,7 +315,7 @@ static void hrtick_rq_init(struct rq *rq
rq->hrtick_csd.info = rq;
#endif
@ -92,7 +92,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#else /* CONFIG_SCHED_HRTICK */
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1053,7 +1053,7 @@ void init_dl_task_timer(struct sched_dl_
@@ -1054,7 +1054,7 @@ void init_dl_task_timer(struct sched_dl_
{
struct hrtimer *timer = &dl_se->dl_timer;
@ -103,7 +103,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5182,9 +5182,9 @@ void init_cfs_bandwidth(struct cfs_bandw
@@ -4878,9 +4878,9 @@ void init_cfs_bandwidth(struct cfs_bandw
cfs_b->period = ns_to_ktime(default_cfs_period());
INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
@ -113,11 +113,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
cfs_b->slack_timer.function = sched_cfs_slack_timer;
cfs_b->distribute_running = 0;
}
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -43,8 +43,8 @@ void init_rt_bandwidth(struct rt_bandwid
@@ -45,8 +45,8 @@ void init_rt_bandwidth(struct rt_bandwid
raw_spin_lock_init(&rt_b->rt_runtime_lock);
@ -160,7 +160,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
memset(timer, 0, sizeof(struct hrtimer));
cpu_base = raw_cpu_ptr(&hrtimer_bases);
@@ -1682,6 +1691,14 @@ static void __hrtimer_init_sleeper(struc
@@ -1681,6 +1690,14 @@ static void __hrtimer_init_sleeper(struc
enum hrtimer_mode mode,
struct task_struct *task)
{
@ -199,7 +199,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Get the next period (per-CPU) */
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -463,7 +463,7 @@ static void watchdog_enable(unsigned int
@@ -483,7 +483,7 @@ static void watchdog_enable(unsigned int
* Start the timer first to prevent the NMI watchdog triggering
* before the timer has a chance to fire.
*/

Some files were not shown because too many files have changed in this diff Show More