diff --git a/debian/changelog b/debian/changelog index 2b8d89f8d..a87fe5d5b 100644 --- a/debian/changelog +++ b/debian/changelog @@ -23,6 +23,9 @@ linux (4.19-1~exp1) UNRELEASED; urgency=medium [ Noah Meyerhans ] * [cloud-amd64] Enable Amazon ENA ethernet driver (Closes: #910049) + [ Romain Perier ] + * [rt] Update to 4.19-rt1 + -- Ben Hutchings Mon, 08 Oct 2018 18:45:06 +0100 linux (4.19~rc7-1~exp1) experimental; urgency=medium diff --git a/debian/config/defines b/debian/config/defines index e5f7a77f5..fa52ae2a6 100644 --- a/debian/config/defines +++ b/debian/config/defines @@ -122,7 +122,7 @@ debug-info: true signed-code: false [featureset-rt_base] -enabled: false +enabled: true [description] part-long-up: This kernel is not suitable for SMP (multi-processor, diff --git a/debian/patches-rt/0001-ARM-at91-add-TCB-registers-definitions.patch b/debian/patches-rt/0001-ARM-at91-add-TCB-registers-definitions.patch index c13bd25e7..b48e619dc 100644 --- a/debian/patches-rt/0001-ARM-at91-add-TCB-registers-definitions.patch +++ b/debian/patches-rt/0001-ARM-at91-add-TCB-registers-definitions.patch @@ -1,7 +1,7 @@ From: Alexandre Belloni Date: Wed, 18 Apr 2018 12:51:38 +0200 Subject: [PATCH 1/6] ARM: at91: add TCB registers definitions -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Add registers and bits definitions for the timer counter blocks found on Atmel ARM SoCs. diff --git a/debian/patches-rt/0001-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch b/debian/patches-rt/0001-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch index 1eeec05d4..bd19309a3 100644 --- a/debian/patches-rt/0001-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch +++ b/debian/patches-rt/0001-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch @@ -2,7 +2,7 @@ From: Peter Zijlstra Date: Mon, 28 May 2018 15:24:20 +0200 Subject: [PATCH 1/4] Split IRQ-off and zone->lock while freeing pages from PCP list #1 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Split the IRQ-off section while accessing the PCP list from zone->lock while freeing pages. diff --git a/debian/patches-rt/0001-bdi-use-refcount_t-for-reference-counting-instead-at.patch b/debian/patches-rt/0001-bdi-use-refcount_t-for-reference-counting-instead-at.patch deleted file mode 100644 index c914464de..000000000 --- a/debian/patches-rt/0001-bdi-use-refcount_t-for-reference-counting-instead-at.patch +++ /dev/null @@ -1,103 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Mon, 7 May 2018 16:51:09 +0200 -Subject: [PATCH] bdi: use refcount_t for reference counting instead atomic_t -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz - -refcount_t type and corresponding API should be used instead of atomic_t when -the variable is used as a reference counter. This allows to avoid accidental -refcounter overflows that might lead to use-after-free situations. - -Suggested-by: Peter Zijlstra -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/backing-dev-defs.h | 3 ++- - include/linux/backing-dev.h | 4 ++-- - mm/backing-dev.c | 12 ++++++------ - 3 files changed, 10 insertions(+), 9 deletions(-) - ---- a/include/linux/backing-dev-defs.h -+++ b/include/linux/backing-dev-defs.h -@@ -12,6 +12,7 @@ - #include - #include - #include -+#include - - struct page; - struct device; -@@ -75,7 +76,7 @@ enum wb_reason { - */ - struct bdi_writeback_congested { - unsigned long state; /* WB_[a]sync_congested flags */ -- atomic_t refcnt; /* nr of attached wb's and blkg */ -+ refcount_t refcnt; /* nr of attached wb's and blkg */ - - #ifdef CONFIG_CGROUP_WRITEBACK - struct backing_dev_info *__bdi; /* the associated bdi, set to NULL ---- a/include/linux/backing-dev.h -+++ b/include/linux/backing-dev.h -@@ -404,13 +404,13 @@ static inline bool inode_cgwb_enabled(st - static inline struct bdi_writeback_congested * - wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp) - { -- atomic_inc(&bdi->wb_congested->refcnt); -+ refcount_inc(&bdi->wb_congested->refcnt); - return bdi->wb_congested; - } - - static inline void wb_congested_put(struct bdi_writeback_congested *congested) - { -- if (atomic_dec_and_test(&congested->refcnt)) -+ if (refcount_dec_and_test(&congested->refcnt)) - kfree(congested); - } - ---- a/mm/backing-dev.c -+++ b/mm/backing-dev.c -@@ -438,10 +438,10 @@ wb_congested_get_create(struct backing_d - if (new_congested) { - /* !found and storage for new one already allocated, insert */ - congested = new_congested; -- new_congested = NULL; - rb_link_node(&congested->rb_node, parent, node); - rb_insert_color(&congested->rb_node, &bdi->cgwb_congested_tree); -- goto found; -+ spin_unlock_irqrestore(&cgwb_lock, flags); -+ return congested; - } - - spin_unlock_irqrestore(&cgwb_lock, flags); -@@ -451,13 +451,13 @@ wb_congested_get_create(struct backing_d - if (!new_congested) - return NULL; - -- atomic_set(&new_congested->refcnt, 0); -+ refcount_set(&new_congested->refcnt, 1); - new_congested->__bdi = bdi; - new_congested->blkcg_id = blkcg_id; - goto retry; - - found: -- atomic_inc(&congested->refcnt); -+ refcount_inc(&congested->refcnt); - spin_unlock_irqrestore(&cgwb_lock, flags); - kfree(new_congested); - return congested; -@@ -474,7 +474,7 @@ void wb_congested_put(struct bdi_writeba - unsigned long flags; - - local_irq_save(flags); -- if (!atomic_dec_and_lock(&congested->refcnt, &cgwb_lock)) { -+ if (!refcount_dec_and_lock(&congested->refcnt, &cgwb_lock)) { - local_irq_restore(flags); - return; - } -@@ -804,7 +804,7 @@ static int cgwb_bdi_init(struct backing_ - if (!bdi->wb_congested) - return -ENOMEM; - -- atomic_set(&bdi->wb_congested->refcnt, 1); -+ refcount_set(&bdi->wb_congested->refcnt, 1); - - err = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL); - if (err) { diff --git a/debian/patches-rt/0001-mm-list_lru-use-list_lru_walk_one-in-list_lru_walk_n.patch b/debian/patches-rt/0001-mm-list_lru-use-list_lru_walk_one-in-list_lru_walk_n.patch deleted file mode 100644 index 4844b9577..000000000 --- a/debian/patches-rt/0001-mm-list_lru-use-list_lru_walk_one-in-list_lru_walk_n.patch +++ /dev/null @@ -1,31 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Tue, 3 Jul 2018 12:56:19 +0200 -Subject: [PATCH 1/4] mm/list_lru: use list_lru_walk_one() in - list_lru_walk_node() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz - -list_lru_walk_node() invokes __list_lru_walk_one() with -1 as the -memcg_idx parameter. The same can be achieved by list_lru_walk_one() and -passing NULL as memcg argument which then gets converted into -1. This -is a preparation step when the spin_lock() function is lifted to the -caller of __list_lru_walk_one(). -Invoke list_lru_walk_one() instead __list_lru_walk_one() when possible. - -Signed-off-by: Sebastian Andrzej Siewior ---- - mm/list_lru.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/mm/list_lru.c -+++ b/mm/list_lru.c -@@ -272,8 +272,8 @@ unsigned long list_lru_walk_node(struct - long isolated = 0; - int memcg_idx; - -- isolated += __list_lru_walk_one(lru, nid, -1, isolate, cb_arg, -- nr_to_walk); -+ isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg, -+ nr_to_walk); - if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) { - for_each_memcg_cache_index(memcg_idx) { - isolated += __list_lru_walk_one(lru, nid, memcg_idx, diff --git a/debian/patches-rt/0001-mm-workingset-remove-local_irq_disable-from-count_sh.patch b/debian/patches-rt/0001-mm-workingset-remove-local_irq_disable-from-count_sh.patch deleted file mode 100644 index 6e8359113..000000000 --- a/debian/patches-rt/0001-mm-workingset-remove-local_irq_disable-from-count_sh.patch +++ /dev/null @@ -1,46 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Fri, 22 Jun 2018 10:48:51 +0200 -Subject: [PATCH 1/3] mm: workingset: remove local_irq_disable() from - count_shadow_nodes() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz - -In commit 0c7c1bed7e13 ("mm: make counting of list_lru_one::nr_items -lockless") the - spin_lock(&nlru->lock); - -statement was replaced with - rcu_read_lock(); - -in __list_lru_count_one(). The comment in count_shadow_nodes() says that -the local_irq_disable() is required because the lock must be acquired -with disabled interrupts and (spin_lock()) does not do so. -Since the lock is replaced with rcu_read_lock() the local_irq_disable() -is no longer needed. The code path is - list_lru_shrink_count() - -> list_lru_count_one() - -> __list_lru_count_one() - -> rcu_read_lock() - -> list_lru_from_memcg_idx() - -> rcu_read_unlock() - -Remove the local_irq_disable() statement. - -Cc: Kirill Tkhai -Signed-off-by: Sebastian Andrzej Siewior ---- - mm/workingset.c | 3 --- - 1 file changed, 3 deletions(-) - ---- a/mm/workingset.c -+++ b/mm/workingset.c -@@ -366,10 +366,7 @@ static unsigned long count_shadow_nodes( - unsigned long nodes; - unsigned long cache; - -- /* list_lru lock nests inside the IRQ-safe i_pages lock */ -- local_irq_disable(); - nodes = list_lru_shrink_count(&shadow_nodes, sc); -- local_irq_enable(); - - /* - * Approximate a reasonable limit for the radix tree nodes diff --git a/debian/patches-rt/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch b/debian/patches-rt/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch index f314ceb66..99fd19222 100644 --- a/debian/patches-rt/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch +++ b/debian/patches-rt/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch @@ -2,7 +2,7 @@ From: Peter Zijlstra Date: Mon, 28 May 2018 15:24:21 +0200 Subject: [PATCH 2/4] Split IRQ-off and zone->lock while freeing pages from PCP list #2 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Split the IRQ-off section while accessing the PCP list from zone->lock while freeing pages. diff --git a/debian/patches-rt/0002-clocksource-drivers-Add-a-new-driver-for-the-Atmel-A.patch b/debian/patches-rt/0002-clocksource-drivers-Add-a-new-driver-for-the-Atmel-A.patch index 1c7fcd6ac..138baac2d 100644 --- a/debian/patches-rt/0002-clocksource-drivers-Add-a-new-driver-for-the-Atmel-A.patch +++ b/debian/patches-rt/0002-clocksource-drivers-Add-a-new-driver-for-the-Atmel-A.patch @@ -2,7 +2,7 @@ From: Alexandre Belloni Date: Wed, 18 Apr 2018 12:51:39 +0200 Subject: [PATCH 2/6] clocksource/drivers: Add a new driver for the Atmel ARM TC blocks -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Add a driver for the Atmel Timer Counter Blocks. This driver provides a clocksource and two clockevent devices. diff --git a/debian/patches-rt/0002-drivers-md-raid5-Use-irqsave-variant-of-atomic_dec_a.patch b/debian/patches-rt/0002-drivers-md-raid5-Use-irqsave-variant-of-atomic_dec_a.patch deleted file mode 100644 index e1f280345..000000000 --- a/debian/patches-rt/0002-drivers-md-raid5-Use-irqsave-variant-of-atomic_dec_a.patch +++ /dev/null @@ -1,37 +0,0 @@ -From: Anna-Maria Gleixner -Date: Fri, 4 May 2018 17:45:32 +0200 -Subject: [PATCH 2/3] drivers/md/raid5: Use irqsave variant of - atomic_dec_and_lock() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz - -The irqsave variant of atomic_dec_and_lock handles irqsave/restore when -taking/releasing the spin lock. With this variant the call of -local_irq_save is no longer required. - -Signed-off-by: Anna-Maria Gleixner -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/md/raid5.c | 5 ++--- - 1 file changed, 2 insertions(+), 3 deletions(-) - ---- a/drivers/md/raid5.c -+++ b/drivers/md/raid5.c -@@ -409,16 +409,15 @@ void raid5_release_stripe(struct stripe_ - md_wakeup_thread(conf->mddev->thread); - return; - slow_path: -- local_irq_save(flags); - /* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */ -- if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) { -+ if (atomic_dec_and_lock_irqsave(&sh->count, &conf->device_lock, flags)) { - INIT_LIST_HEAD(&list); - hash = sh->hash_lock_index; - do_release_stripe(conf, sh, &list); - spin_unlock(&conf->device_lock); - release_inactive_stripe_list(conf, &list, hash); -+ local_irq_restore(flags); - } -- local_irq_restore(flags); - } - - static inline void remove_hash(struct stripe_head *sh) diff --git a/debian/patches-rt/0002-mm-list_lru-Move-locking-from-__list_lru_walk_one-to.patch b/debian/patches-rt/0002-mm-list_lru-Move-locking-from-__list_lru_walk_one-to.patch deleted file mode 100644 index c9c73ee8e..000000000 --- a/debian/patches-rt/0002-mm-list_lru-Move-locking-from-__list_lru_walk_one-to.patch +++ /dev/null @@ -1,65 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Tue, 3 Jul 2018 13:06:07 +0200 -Subject: [PATCH 2/4] mm/list_lru: Move locking from __list_lru_walk_one() to - its caller -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz - -Move the locking inside __list_lru_walk_one() to its caller. This is a -preparation step in order to introduce list_lru_walk_one_irq() which -does spin_lock_irq() instead of spin_lock() for the locking. - -Signed-off-by: Sebastian Andrzej Siewior ---- - mm/list_lru.c | 18 +++++++++++++----- - 1 file changed, 13 insertions(+), 5 deletions(-) - ---- a/mm/list_lru.c -+++ b/mm/list_lru.c -@@ -204,7 +204,6 @@ static unsigned long - struct list_head *item, *n; - unsigned long isolated = 0; - -- spin_lock(&nlru->lock); - l = list_lru_from_memcg_idx(nlru, memcg_idx); - restart: - list_for_each_safe(item, n, &l->list) { -@@ -250,8 +249,6 @@ static unsigned long - BUG(); - } - } -- -- spin_unlock(&nlru->lock); - return isolated; - } - -@@ -260,8 +257,14 @@ list_lru_walk_one(struct list_lru *lru, - list_lru_walk_cb isolate, void *cb_arg, - unsigned long *nr_to_walk) - { -- return __list_lru_walk_one(lru, nid, memcg_cache_id(memcg), -- isolate, cb_arg, nr_to_walk); -+ struct list_lru_node *nlru = &lru->node[nid]; -+ unsigned long ret; -+ -+ spin_lock(&nlru->lock); -+ ret = __list_lru_walk_one(lru, nid, memcg_cache_id(memcg), -+ isolate, cb_arg, nr_to_walk); -+ spin_unlock(&nlru->lock); -+ return ret; - } - EXPORT_SYMBOL_GPL(list_lru_walk_one); - -@@ -276,8 +279,13 @@ unsigned long list_lru_walk_node(struct - nr_to_walk); - if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) { - for_each_memcg_cache_index(memcg_idx) { -+ struct list_lru_node *nlru = &lru->node[nid]; -+ -+ spin_lock(&nlru->lock); - isolated += __list_lru_walk_one(lru, nid, memcg_idx, - isolate, cb_arg, nr_to_walk); -+ spin_unlock(&nlru->lock); -+ - if (*nr_to_walk <= 0) - break; - } diff --git a/debian/patches-rt/0002-mm-workingset-make-shadow_lru_isolate-use-locking-su.patch b/debian/patches-rt/0002-mm-workingset-make-shadow_lru_isolate-use-locking-su.patch deleted file mode 100644 index d961adc46..000000000 --- a/debian/patches-rt/0002-mm-workingset-make-shadow_lru_isolate-use-locking-su.patch +++ /dev/null @@ -1,45 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Fri, 22 Jun 2018 11:43:35 +0200 -Subject: [PATCH 2/3] mm: workingset: make shadow_lru_isolate() use locking - suffix -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz - -shadow_lru_isolate() disables interrupts and acquires a lock. It could -use spin_lock_irq() instead. It also uses local_irq_enable() while it -could use spin_unlock_irq()/xa_unlock_irq(). - -Use proper suffix for lock/unlock in order to enable/disable interrupts -during release/acquire of a lock. - -Signed-off-by: Sebastian Andrzej Siewior ---- - mm/workingset.c | 8 +++----- - 1 file changed, 3 insertions(+), 5 deletions(-) - ---- a/mm/workingset.c -+++ b/mm/workingset.c -@@ -431,7 +431,7 @@ static enum lru_status shadow_lru_isolat - - /* Coming from the list, invert the lock order */ - if (!xa_trylock(&mapping->i_pages)) { -- spin_unlock(lru_lock); -+ spin_unlock_irq(lru_lock); - ret = LRU_RETRY; - goto out; - } -@@ -469,13 +469,11 @@ static enum lru_status shadow_lru_isolat - workingset_lookup_update(mapping)); - - out_invalid: -- xa_unlock(&mapping->i_pages); -+ xa_unlock_irq(&mapping->i_pages); - ret = LRU_REMOVED_RETRY; - out: -- local_irq_enable(); - cond_resched(); -- local_irq_disable(); -- spin_lock(lru_lock); -+ spin_lock_irq(lru_lock); - return ret; - } - diff --git a/debian/patches-rt/0002-userns-use-refcount_t-for-reference-counting-instead.patch b/debian/patches-rt/0002-userns-use-refcount_t-for-reference-counting-instead.patch deleted file mode 100644 index 0de4fb5da..000000000 --- a/debian/patches-rt/0002-userns-use-refcount_t-for-reference-counting-instead.patch +++ /dev/null @@ -1,83 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Mon, 7 May 2018 17:09:42 +0200 -Subject: [PATCH] userns: use refcount_t for reference counting instead - atomic_t -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz - -refcount_t type and corresponding API should be used instead of atomic_t when -the variable is used as a reference counter. This allows to avoid accidental -refcounter overflows that might lead to use-after-free situations. - -Suggested-by: Peter Zijlstra -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/sched/user.h | 5 +++-- - kernel/user.c | 8 ++++---- - 2 files changed, 7 insertions(+), 6 deletions(-) - ---- a/include/linux/sched/user.h -+++ b/include/linux/sched/user.h -@@ -4,6 +4,7 @@ - - #include - #include -+#include - #include - - struct key; -@@ -12,7 +13,7 @@ struct key; - * Some day this will be a full-fledged user tracking system.. - */ - struct user_struct { -- atomic_t __count; /* reference count */ -+ refcount_t __count; /* reference count */ - atomic_t processes; /* How many processes does this user have? */ - atomic_t sigpending; /* How many pending signals does this user have? */ - #ifdef CONFIG_FANOTIFY -@@ -59,7 +60,7 @@ extern struct user_struct root_user; - extern struct user_struct * alloc_uid(kuid_t); - static inline struct user_struct *get_uid(struct user_struct *u) - { -- atomic_inc(&u->__count); -+ refcount_inc(&u->__count); - return u; - } - extern void free_uid(struct user_struct *); ---- a/kernel/user.c -+++ b/kernel/user.c -@@ -96,7 +96,7 @@ static DEFINE_SPINLOCK(uidhash_lock); - - /* root_user.__count is 1, for init task cred */ - struct user_struct root_user = { -- .__count = ATOMIC_INIT(1), -+ .__count = REFCOUNT_INIT(1), - .processes = ATOMIC_INIT(1), - .sigpending = ATOMIC_INIT(0), - .locked_shm = 0, -@@ -123,7 +123,7 @@ static struct user_struct *uid_hash_find - - hlist_for_each_entry(user, hashent, uidhash_node) { - if (uid_eq(user->uid, uid)) { -- atomic_inc(&user->__count); -+ refcount_inc(&user->__count); - return user; - } - } -@@ -170,7 +170,7 @@ void free_uid(struct user_struct *up) - return; - - local_irq_save(flags); -- if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) -+ if (refcount_dec_and_lock(&up->__count, &uidhash_lock)) - free_user(up, flags); - else - local_irq_restore(flags); -@@ -191,7 +191,7 @@ struct user_struct *alloc_uid(kuid_t uid - goto out_unlock; - - new->uid = uid; -- atomic_set(&new->__count, 1); -+ refcount_set(&new->__count, 1); - ratelimit_state_init(&new->ratelimit, HZ, 100); - ratelimit_set_flags(&new->ratelimit, RATELIMIT_MSG_ON_RELEASE); - diff --git a/debian/patches-rt/0003-clocksource-drivers-atmel-pit-make-option-silent.patch b/debian/patches-rt/0003-clocksource-drivers-atmel-pit-make-option-silent.patch index b63dd91ca..0706da0b3 100644 --- a/debian/patches-rt/0003-clocksource-drivers-atmel-pit-make-option-silent.patch +++ b/debian/patches-rt/0003-clocksource-drivers-atmel-pit-make-option-silent.patch @@ -1,7 +1,7 @@ From: Alexandre Belloni Date: Wed, 18 Apr 2018 12:51:40 +0200 Subject: [PATCH 3/6] clocksource/drivers: atmel-pit: make option silent -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz To conform with the other option, make the ATMEL_PIT option silent so it can be selected from the platform diff --git a/debian/patches-rt/0003-drivers-md-raid5-Do-not-disable-irq-on-release_inact.patch b/debian/patches-rt/0003-drivers-md-raid5-Do-not-disable-irq-on-release_inact.patch deleted file mode 100644 index 4dbdfbfca..000000000 --- a/debian/patches-rt/0003-drivers-md-raid5-Do-not-disable-irq-on-release_inact.patch +++ /dev/null @@ -1,31 +0,0 @@ -From: Anna-Maria Gleixner -Date: Fri, 4 May 2018 17:45:33 +0200 -Subject: [PATCH 3/3] drivers/md/raid5: Do not disable irq on - release_inactive_stripe_list() call -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz - -There is no need to invoke release_inactive_stripe_list() with interrupts -disabled. All call sites, except raid5_release_stripe(), unlock -->device_lock and enable interrupts before invoking the function. - -Make it consistent. - -Signed-off-by: Anna-Maria Gleixner -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/md/raid5.c | 3 +-- - 1 file changed, 1 insertion(+), 2 deletions(-) - ---- a/drivers/md/raid5.c -+++ b/drivers/md/raid5.c -@@ -414,9 +414,8 @@ void raid5_release_stripe(struct stripe_ - INIT_LIST_HEAD(&list); - hash = sh->hash_lock_index; - do_release_stripe(conf, sh, &list); -- spin_unlock(&conf->device_lock); -+ spin_unlock_irqrestore(&conf->device_lock, flags); - release_inactive_stripe_list(conf, &list, hash); -- local_irq_restore(flags); - } - } - diff --git a/debian/patches-rt/0003-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch b/debian/patches-rt/0003-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch index 3f2bcd3d1..dbcc2d0d9 100644 --- a/debian/patches-rt/0003-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch +++ b/debian/patches-rt/0003-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch @@ -1,7 +1,7 @@ From: Thomas Gleixner Date: Mon, 28 May 2018 15:24:22 +0200 Subject: [PATCH 3/4] mm/SLxB: change list_lock to raw_spinlock_t -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz The list_lock is used with used with IRQs off on RT. Make it a raw_spinlock_t otherwise the interrupts won't be disabled on -RT. The locking rules remain @@ -395,7 +395,7 @@ Signed-off-by: Sebastian Andrzej Siewior struct list_head slabs_partial; /* partial list first, better asm code */ --- a/mm/slub.c +++ b/mm/slub.c -@@ -1169,7 +1169,7 @@ static noinline int free_debug_processin +@@ -1167,7 +1167,7 @@ static noinline int free_debug_processin unsigned long uninitialized_var(flags); int ret = 0; @@ -404,7 +404,7 @@ Signed-off-by: Sebastian Andrzej Siewior slab_lock(page); if (s->flags & SLAB_CONSISTENCY_CHECKS) { -@@ -1204,7 +1204,7 @@ static noinline int free_debug_processin +@@ -1202,7 +1202,7 @@ static noinline int free_debug_processin bulk_cnt, cnt); slab_unlock(page); @@ -413,7 +413,7 @@ Signed-off-by: Sebastian Andrzej Siewior if (!ret) slab_fix(s, "Object at 0x%p not freed", object); return ret; -@@ -1804,7 +1804,7 @@ static void *get_partial_node(struct kme +@@ -1802,7 +1802,7 @@ static void *get_partial_node(struct kme if (!n || !n->nr_partial) return NULL; @@ -422,7 +422,7 @@ Signed-off-by: Sebastian Andrzej Siewior list_for_each_entry_safe(page, page2, &n->partial, lru) { void *t; -@@ -1829,7 +1829,7 @@ static void *get_partial_node(struct kme +@@ -1827,7 +1827,7 @@ static void *get_partial_node(struct kme break; } @@ -431,7 +431,7 @@ Signed-off-by: Sebastian Andrzej Siewior return object; } -@@ -2075,7 +2075,7 @@ static void deactivate_slab(struct kmem_ +@@ -2073,7 +2073,7 @@ static void deactivate_slab(struct kmem_ * that acquire_slab() will see a slab page that * is frozen */ @@ -440,7 +440,7 @@ Signed-off-by: Sebastian Andrzej Siewior } } else { m = M_FULL; -@@ -2086,7 +2086,7 @@ static void deactivate_slab(struct kmem_ +@@ -2084,7 +2084,7 @@ static void deactivate_slab(struct kmem_ * slabs from diagnostic functions will not see * any frozen slabs. */ @@ -449,7 +449,7 @@ Signed-off-by: Sebastian Andrzej Siewior } } -@@ -2121,7 +2121,7 @@ static void deactivate_slab(struct kmem_ +@@ -2119,7 +2119,7 @@ static void deactivate_slab(struct kmem_ goto redo; if (lock) @@ -458,7 +458,7 @@ Signed-off-by: Sebastian Andrzej Siewior if (m == M_FREE) { stat(s, DEACTIVATE_EMPTY); -@@ -2156,10 +2156,10 @@ static void unfreeze_partials(struct kme +@@ -2154,10 +2154,10 @@ static void unfreeze_partials(struct kme n2 = get_node(s, page_to_nid(page)); if (n != n2) { if (n) @@ -471,7 +471,7 @@ Signed-off-by: Sebastian Andrzej Siewior } do { -@@ -2188,7 +2188,7 @@ static void unfreeze_partials(struct kme +@@ -2186,7 +2186,7 @@ static void unfreeze_partials(struct kme } if (n) @@ -480,7 +480,7 @@ Signed-off-by: Sebastian Andrzej Siewior while (discard_page) { page = discard_page; -@@ -2357,10 +2357,10 @@ static unsigned long count_partial(struc +@@ -2355,10 +2355,10 @@ static unsigned long count_partial(struc unsigned long x = 0; struct page *page; @@ -493,7 +493,7 @@ Signed-off-by: Sebastian Andrzej Siewior return x; } #endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */ -@@ -2795,7 +2795,7 @@ static void __slab_free(struct kmem_cach +@@ -2793,7 +2793,7 @@ static void __slab_free(struct kmem_cach do { if (unlikely(n)) { @@ -502,7 +502,7 @@ Signed-off-by: Sebastian Andrzej Siewior n = NULL; } prior = page->freelist; -@@ -2827,7 +2827,7 @@ static void __slab_free(struct kmem_cach +@@ -2825,7 +2825,7 @@ static void __slab_free(struct kmem_cach * Otherwise the list_lock will synchronize with * other processors updating the list of slabs. */ @@ -511,7 +511,7 @@ Signed-off-by: Sebastian Andrzej Siewior } } -@@ -2869,7 +2869,7 @@ static void __slab_free(struct kmem_cach +@@ -2867,7 +2867,7 @@ static void __slab_free(struct kmem_cach add_partial(n, page, DEACTIVATE_TO_TAIL); stat(s, FREE_ADD_PARTIAL); } @@ -520,7 +520,7 @@ Signed-off-by: Sebastian Andrzej Siewior return; slab_empty: -@@ -2884,7 +2884,7 @@ static void __slab_free(struct kmem_cach +@@ -2882,7 +2882,7 @@ static void __slab_free(struct kmem_cach remove_full(s, n, page); } @@ -529,7 +529,7 @@ Signed-off-by: Sebastian Andrzej Siewior stat(s, FREE_SLAB); discard_slab(s, page); } -@@ -3271,7 +3271,7 @@ static void +@@ -3269,7 +3269,7 @@ static void init_kmem_cache_node(struct kmem_cache_node *n) { n->nr_partial = 0; @@ -538,7 +538,7 @@ Signed-off-by: Sebastian Andrzej Siewior INIT_LIST_HEAD(&n->partial); #ifdef CONFIG_SLUB_DEBUG atomic_long_set(&n->nr_slabs, 0); -@@ -3655,7 +3655,7 @@ static void free_partial(struct kmem_cac +@@ -3653,7 +3653,7 @@ static void free_partial(struct kmem_cac struct page *page, *h; BUG_ON(irqs_disabled()); @@ -547,7 +547,7 @@ Signed-off-by: Sebastian Andrzej Siewior list_for_each_entry_safe(page, h, &n->partial, lru) { if (!page->inuse) { remove_partial(n, page); -@@ -3665,7 +3665,7 @@ static void free_partial(struct kmem_cac +@@ -3663,7 +3663,7 @@ static void free_partial(struct kmem_cac "Objects remaining in %s on __kmem_cache_shutdown()"); } } @@ -556,7 +556,7 @@ Signed-off-by: Sebastian Andrzej Siewior list_for_each_entry_safe(page, h, &discard, lru) discard_slab(s, page); -@@ -3938,7 +3938,7 @@ int __kmem_cache_shrink(struct kmem_cach +@@ -3936,7 +3936,7 @@ int __kmem_cache_shrink(struct kmem_cach for (i = 0; i < SHRINK_PROMOTE_MAX; i++) INIT_LIST_HEAD(promote + i); @@ -565,7 +565,7 @@ Signed-off-by: Sebastian Andrzej Siewior /* * Build lists of slabs to discard or promote. -@@ -3969,7 +3969,7 @@ int __kmem_cache_shrink(struct kmem_cach +@@ -3967,7 +3967,7 @@ int __kmem_cache_shrink(struct kmem_cach for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--) list_splice(promote + i, &n->partial); @@ -574,7 +574,7 @@ Signed-off-by: Sebastian Andrzej Siewior /* Release empty slabs */ list_for_each_entry_safe(page, t, &discard, lru) -@@ -4383,7 +4383,7 @@ static int validate_slab_node(struct kme +@@ -4381,7 +4381,7 @@ static int validate_slab_node(struct kme struct page *page; unsigned long flags; @@ -583,7 +583,7 @@ Signed-off-by: Sebastian Andrzej Siewior list_for_each_entry(page, &n->partial, lru) { validate_slab_slab(s, page, map); -@@ -4405,7 +4405,7 @@ static int validate_slab_node(struct kme +@@ -4403,7 +4403,7 @@ static int validate_slab_node(struct kme s->name, count, atomic_long_read(&n->nr_slabs)); out: @@ -592,7 +592,7 @@ Signed-off-by: Sebastian Andrzej Siewior return count; } -@@ -4595,12 +4595,12 @@ static int list_locations(struct kmem_ca +@@ -4593,12 +4593,12 @@ static int list_locations(struct kmem_ca if (!atomic_long_read(&n->nr_slabs)) continue; diff --git a/debian/patches-rt/0003-mm-list_lru-Pass-struct-list_lru_node-as-an-argument.patch b/debian/patches-rt/0003-mm-list_lru-Pass-struct-list_lru_node-as-an-argument.patch deleted file mode 100644 index b6732b4a7..000000000 --- a/debian/patches-rt/0003-mm-list_lru-Pass-struct-list_lru_node-as-an-argument.patch +++ /dev/null @@ -1,56 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Tue, 3 Jul 2018 13:08:56 +0200 -Subject: [PATCH 3/4] mm/list_lru: Pass struct list_lru_node as an argument - __list_lru_walk_one() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz - -__list_lru_walk_one() is invoked with struct list_lru *lru, int nid as -the first two argument. Those two are only used to retrieve struct -list_lru_node. Since this is already done by the caller of the function -for the locking, we can pass struct list_lru_node directly and avoid the -dance around it. - -Signed-off-by: Sebastian Andrzej Siewior ---- - mm/list_lru.c | 12 ++++++------ - 1 file changed, 6 insertions(+), 6 deletions(-) - ---- a/mm/list_lru.c -+++ b/mm/list_lru.c -@@ -194,12 +194,11 @@ unsigned long list_lru_count_node(struct - EXPORT_SYMBOL_GPL(list_lru_count_node); - - static unsigned long --__list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx, -+__list_lru_walk_one(struct list_lru_node *nlru, int memcg_idx, - list_lru_walk_cb isolate, void *cb_arg, - unsigned long *nr_to_walk) - { - -- struct list_lru_node *nlru = &lru->node[nid]; - struct list_lru_one *l; - struct list_head *item, *n; - unsigned long isolated = 0; -@@ -261,8 +260,8 @@ list_lru_walk_one(struct list_lru *lru, - unsigned long ret; - - spin_lock(&nlru->lock); -- ret = __list_lru_walk_one(lru, nid, memcg_cache_id(memcg), -- isolate, cb_arg, nr_to_walk); -+ ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg, -+ nr_to_walk); - spin_unlock(&nlru->lock); - return ret; - } -@@ -282,8 +281,9 @@ unsigned long list_lru_walk_node(struct - struct list_lru_node *nlru = &lru->node[nid]; - - spin_lock(&nlru->lock); -- isolated += __list_lru_walk_one(lru, nid, memcg_idx, -- isolate, cb_arg, nr_to_walk); -+ isolated += __list_lru_walk_one(nlru, memcg_idx, -+ isolate, cb_arg, -+ nr_to_walk); - spin_unlock(&nlru->lock); - - if (*nr_to_walk <= 0) diff --git a/debian/patches-rt/0004-ARM-at91-Implement-clocksource-selection.patch b/debian/patches-rt/0004-ARM-at91-Implement-clocksource-selection.patch index 7e0ea6b77..e4be9cc1e 100644 --- a/debian/patches-rt/0004-ARM-at91-Implement-clocksource-selection.patch +++ b/debian/patches-rt/0004-ARM-at91-Implement-clocksource-selection.patch @@ -1,7 +1,7 @@ From: Alexandre Belloni Date: Wed, 18 Apr 2018 12:51:41 +0200 Subject: [PATCH 4/6] ARM: at91: Implement clocksource selection -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Allow selecting and unselecting the PIT clocksource driver so it doesn't have to be compile when unused. @@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/arch/arm/mach-at91/Kconfig +++ b/arch/arm/mach-at91/Kconfig -@@ -106,6 +106,31 @@ config SOC_AT91SAM9 +@@ -107,6 +107,31 @@ config SOC_AT91SAM9 AT91SAM9X35 AT91SAM9XE diff --git a/debian/patches-rt/0004-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch b/debian/patches-rt/0004-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch index 556eed81c..02def07d8 100644 --- a/debian/patches-rt/0004-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch +++ b/debian/patches-rt/0004-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch @@ -2,7 +2,7 @@ From: Thomas Gleixner Date: Thu, 21 Jun 2018 17:29:19 +0200 Subject: [PATCH 4/4] mm/SLUB: delay giving back empty slubs to IRQ enabled regions -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz __free_slab() is invoked with disabled interrupts which increases the irq-off time while __free_pages() is doing the work. @@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/mm/slub.c +++ b/mm/slub.c -@@ -1332,6 +1332,12 @@ static inline void dec_slabs_node(struct +@@ -1330,6 +1330,12 @@ static inline void dec_slabs_node(struct #endif /* CONFIG_SLUB_DEBUG */ @@ -31,7 +31,7 @@ Signed-off-by: Sebastian Andrzej Siewior /* * Hooks for other subsystems that check memory allocations. In a typical * production configuration these hooks all should produce no code at all. -@@ -1686,6 +1692,16 @@ static void __free_slab(struct kmem_cach +@@ -1684,6 +1690,16 @@ static void __free_slab(struct kmem_cach __free_pages(page, order); } @@ -48,7 +48,7 @@ Signed-off-by: Sebastian Andrzej Siewior static void rcu_free_slab(struct rcu_head *h) { struct page *page = container_of(h, struct page, rcu_head); -@@ -1697,6 +1713,12 @@ static void free_slab(struct kmem_cache +@@ -1695,6 +1711,12 @@ static void free_slab(struct kmem_cache { if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) { call_rcu(&page->rcu_head, rcu_free_slab); @@ -61,7 +61,7 @@ Signed-off-by: Sebastian Andrzej Siewior } else __free_slab(s, page); } -@@ -2225,14 +2247,21 @@ static void put_cpu_partial(struct kmem_ +@@ -2223,14 +2245,21 @@ static void put_cpu_partial(struct kmem_ pobjects = oldpage->pobjects; pages = oldpage->pages; if (drain && pobjects > s->cpu_partial) { @@ -83,7 +83,7 @@ Signed-off-by: Sebastian Andrzej Siewior oldpage = NULL; pobjects = 0; pages = 0; -@@ -2302,7 +2331,22 @@ static bool has_cpu_slab(int cpu, void * +@@ -2300,7 +2329,22 @@ static bool has_cpu_slab(int cpu, void * static void flush_all(struct kmem_cache *s) { @@ -106,7 +106,7 @@ Signed-off-by: Sebastian Andrzej Siewior } /* -@@ -2500,8 +2544,10 @@ static inline void *get_freelist(struct +@@ -2498,8 +2542,10 @@ static inline void *get_freelist(struct * already disabled (which is the case for bulk allocation). */ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, @@ -118,7 +118,7 @@ Signed-off-by: Sebastian Andrzej Siewior void *freelist; struct page *page; -@@ -2557,6 +2603,13 @@ static void *___slab_alloc(struct kmem_c +@@ -2555,6 +2601,13 @@ static void *___slab_alloc(struct kmem_c VM_BUG_ON(!c->page->frozen); c->freelist = get_freepointer(s, freelist); c->tid = next_tid(c->tid); @@ -132,7 +132,7 @@ Signed-off-by: Sebastian Andrzej Siewior return freelist; new_slab: -@@ -2572,7 +2625,7 @@ static void *___slab_alloc(struct kmem_c +@@ -2570,7 +2623,7 @@ static void *___slab_alloc(struct kmem_c if (unlikely(!freelist)) { slab_out_of_memory(s, gfpflags, node); @@ -141,7 +141,7 @@ Signed-off-by: Sebastian Andrzej Siewior } page = c->page; -@@ -2585,7 +2638,7 @@ static void *___slab_alloc(struct kmem_c +@@ -2583,7 +2636,7 @@ static void *___slab_alloc(struct kmem_c goto new_slab; /* Slab failed checks. Next slab needed */ deactivate_slab(s, page, get_freepointer(s, freelist), c); @@ -150,7 +150,7 @@ Signed-off-by: Sebastian Andrzej Siewior } /* -@@ -2597,6 +2650,7 @@ static void *__slab_alloc(struct kmem_ca +@@ -2595,6 +2648,7 @@ static void *__slab_alloc(struct kmem_ca { void *p; unsigned long flags; @@ -158,7 +158,7 @@ Signed-off-by: Sebastian Andrzej Siewior local_irq_save(flags); #ifdef CONFIG_PREEMPT -@@ -2608,8 +2662,9 @@ static void *__slab_alloc(struct kmem_ca +@@ -2606,8 +2660,9 @@ static void *__slab_alloc(struct kmem_ca c = this_cpu_ptr(s->cpu_slab); #endif @@ -169,7 +169,7 @@ Signed-off-by: Sebastian Andrzej Siewior return p; } -@@ -3087,6 +3142,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca +@@ -3085,6 +3140,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca void **p) { struct kmem_cache_cpu *c; @@ -177,7 +177,7 @@ Signed-off-by: Sebastian Andrzej Siewior int i; /* memcg and kmem_cache debug support */ -@@ -3110,7 +3166,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca +@@ -3108,7 +3164,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca * of re-populating per CPU c->freelist */ p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, @@ -186,7 +186,7 @@ Signed-off-by: Sebastian Andrzej Siewior if (unlikely(!p[i])) goto error; -@@ -3122,6 +3178,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca +@@ -3120,6 +3176,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca } c->tid = next_tid(c->tid); local_irq_enable(); @@ -194,7 +194,7 @@ Signed-off-by: Sebastian Andrzej Siewior /* Clear memory outside IRQ disabled fastpath loop */ if (unlikely(flags & __GFP_ZERO)) { -@@ -3136,6 +3193,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca +@@ -3134,6 +3191,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca return i; error: local_irq_enable(); @@ -202,7 +202,7 @@ Signed-off-by: Sebastian Andrzej Siewior slab_post_alloc_hook(s, flags, i, p); __kmem_cache_free_bulk(s, i, p); return 0; -@@ -4182,6 +4240,12 @@ void __init kmem_cache_init(void) +@@ -4180,6 +4238,12 @@ void __init kmem_cache_init(void) { static __initdata struct kmem_cache boot_kmem_cache, boot_kmem_cache_node; diff --git a/debian/patches-rt/0004-mm-list_lru-Introduce-list_lru_shrink_walk_irq.patch b/debian/patches-rt/0004-mm-list_lru-Introduce-list_lru_shrink_walk_irq.patch deleted file mode 100644 index b88968f63..000000000 --- a/debian/patches-rt/0004-mm-list_lru-Introduce-list_lru_shrink_walk_irq.patch +++ /dev/null @@ -1,107 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Tue, 3 Jul 2018 13:17:27 +0200 -Subject: [PATCH 4/4] mm/list_lru: Introduce list_lru_shrink_walk_irq() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz - -Provide list_lru_shrink_walk_irq() and let it behave like -list_lru_walk_one() except that it locks the spinlock with -spin_lock_irq(). This is used by scan_shadow_nodes() because its lock -nests within the i_pages lock which is acquired with IRQ. -This change allows to use proper locking promitives instead hand crafted -lock_irq_disable() plus spin_lock(). -There is no EXPORT_SYMBOL provided because the current user is in-KERNEL -only. - -Add list_lru_shrink_walk_irq() which acquires the spinlock with the -proper locking primitives. - -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/list_lru.h | 25 +++++++++++++++++++++++++ - mm/list_lru.c | 15 +++++++++++++++ - mm/workingset.c | 8 ++------ - 3 files changed, 42 insertions(+), 6 deletions(-) - ---- a/include/linux/list_lru.h -+++ b/include/linux/list_lru.h -@@ -162,6 +162,23 @@ unsigned long list_lru_walk_one(struct l - int nid, struct mem_cgroup *memcg, - list_lru_walk_cb isolate, void *cb_arg, - unsigned long *nr_to_walk); -+/** -+ * list_lru_walk_one_irq: walk a list_lru, isolating and disposing freeable items. -+ * @lru: the lru pointer. -+ * @nid: the node id to scan from. -+ * @memcg: the cgroup to scan from. -+ * @isolate: callback function that is resposible for deciding what to do with -+ * the item currently being scanned -+ * @cb_arg: opaque type that will be passed to @isolate -+ * @nr_to_walk: how many items to scan. -+ * -+ * Same as @list_lru_walk_one except that the spinlock is acquired with -+ * spin_lock_irq(). -+ */ -+unsigned long list_lru_walk_one_irq(struct list_lru *lru, -+ int nid, struct mem_cgroup *memcg, -+ list_lru_walk_cb isolate, void *cb_arg, -+ unsigned long *nr_to_walk); - unsigned long list_lru_walk_node(struct list_lru *lru, int nid, - list_lru_walk_cb isolate, void *cb_arg, - unsigned long *nr_to_walk); -@@ -175,6 +192,14 @@ list_lru_shrink_walk(struct list_lru *lr - } - - static inline unsigned long -+list_lru_shrink_walk_irq(struct list_lru *lru, struct shrink_control *sc, -+ list_lru_walk_cb isolate, void *cb_arg) -+{ -+ return list_lru_walk_one_irq(lru, sc->nid, sc->memcg, isolate, cb_arg, -+ &sc->nr_to_scan); -+} -+ -+static inline unsigned long - list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate, - void *cb_arg, unsigned long nr_to_walk) - { ---- a/mm/list_lru.c -+++ b/mm/list_lru.c -@@ -267,6 +267,21 @@ list_lru_walk_one(struct list_lru *lru, - } - EXPORT_SYMBOL_GPL(list_lru_walk_one); - -+unsigned long -+list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg, -+ list_lru_walk_cb isolate, void *cb_arg, -+ unsigned long *nr_to_walk) -+{ -+ struct list_lru_node *nlru = &lru->node[nid]; -+ unsigned long ret; -+ -+ spin_lock_irq(&nlru->lock); -+ ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg, -+ nr_to_walk); -+ spin_unlock_irq(&nlru->lock); -+ return ret; -+} -+ - unsigned long list_lru_walk_node(struct list_lru *lru, int nid, - list_lru_walk_cb isolate, void *cb_arg, - unsigned long *nr_to_walk) ---- a/mm/workingset.c -+++ b/mm/workingset.c -@@ -480,13 +480,9 @@ static enum lru_status shadow_lru_isolat - static unsigned long scan_shadow_nodes(struct shrinker *shrinker, - struct shrink_control *sc) - { -- unsigned long ret; -- - /* list_lru lock nests inside the IRQ-safe i_pages lock */ -- local_irq_disable(); -- ret = list_lru_shrink_walk(&shadow_nodes, sc, shadow_lru_isolate, NULL); -- local_irq_enable(); -- return ret; -+ return list_lru_shrink_walk_irq(&shadow_nodes, sc, shadow_lru_isolate, -+ NULL); - } - - static struct shrinker workingset_shadow_shrinker = { diff --git a/debian/patches-rt/0005-ARM-configs-at91-use-new-TCB-timer-driver.patch b/debian/patches-rt/0005-ARM-configs-at91-use-new-TCB-timer-driver.patch index 8ca0704e3..81630a60f 100644 --- a/debian/patches-rt/0005-ARM-configs-at91-use-new-TCB-timer-driver.patch +++ b/debian/patches-rt/0005-ARM-configs-at91-use-new-TCB-timer-driver.patch @@ -1,7 +1,7 @@ From: Alexandre Belloni Date: Wed, 18 Apr 2018 12:51:42 +0200 Subject: [PATCH 5/6] ARM: configs: at91: use new TCB timer driver -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Unselecting ATMEL_TCLIB switches the TCB timer driver from tcb_clksrc to timer-atmel-tcb. diff --git a/debian/patches-rt/0005-bdi-Use-irqsave-variant-of-refcount_dec_and_lock.patch b/debian/patches-rt/0005-bdi-Use-irqsave-variant-of-refcount_dec_and_lock.patch deleted file mode 100644 index fbe585332..000000000 --- a/debian/patches-rt/0005-bdi-Use-irqsave-variant-of-refcount_dec_and_lock.patch +++ /dev/null @@ -1,31 +0,0 @@ -From: Anna-Maria Gleixner -Date: Wed, 4 Apr 2018 11:43:56 +0200 -Subject: [PATCH] bdi: Use irqsave variant of refcount_dec_and_lock() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz - -The irqsave variant of refcount_dec_and_lock handles irqsave/restore when -taking/releasing the spin lock. With this variant the call of -local_irq_save/restore is no longer required. - -Signed-off-by: Anna-Maria Gleixner -[bigeasy: s@atomic_dec_and_lock@refcount_dec_and_lock@g ] -Signed-off-by: Sebastian Andrzej Siewior ---- - mm/backing-dev.c | 5 +---- - 1 file changed, 1 insertion(+), 4 deletions(-) - ---- a/mm/backing-dev.c -+++ b/mm/backing-dev.c -@@ -473,11 +473,8 @@ void wb_congested_put(struct bdi_writeba - { - unsigned long flags; - -- local_irq_save(flags); -- if (!refcount_dec_and_lock(&congested->refcnt, &cgwb_lock)) { -- local_irq_restore(flags); -+ if (!refcount_dec_and_lock_irqsave(&congested->refcnt, &cgwb_lock, &flags)) - return; -- } - - /* bdi might already have been destroyed leaving @congested unlinked */ - if (congested->__bdi) { diff --git a/debian/patches-rt/0006-ARM-configs-at91-unselect-PIT.patch b/debian/patches-rt/0006-ARM-configs-at91-unselect-PIT.patch index e0519e731..8b0da1d0f 100644 --- a/debian/patches-rt/0006-ARM-configs-at91-unselect-PIT.patch +++ b/debian/patches-rt/0006-ARM-configs-at91-unselect-PIT.patch @@ -1,7 +1,7 @@ From: Alexandre Belloni Date: Wed, 18 Apr 2018 12:51:43 +0200 Subject: [PATCH 6/6] ARM: configs: at91: unselect PIT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz The PIT is not required anymore to successfully boot and may actually harm in case preempt-rt is used because the PIT interrupt is shared. diff --git a/debian/patches-rt/0006-userns-Use-irqsave-variant-of-refcount_dec_and_lock.patch b/debian/patches-rt/0006-userns-Use-irqsave-variant-of-refcount_dec_and_lock.patch deleted file mode 100644 index 83535c574..000000000 --- a/debian/patches-rt/0006-userns-Use-irqsave-variant-of-refcount_dec_and_lock.patch +++ /dev/null @@ -1,31 +0,0 @@ -From: Anna-Maria Gleixner -Date: Wed, 4 Apr 2018 11:43:57 +0200 -Subject: [PATCH] userns: Use irqsave variant of refcount_dec_and_lock() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz - -The irqsave variant of refcount_dec_and_lock handles irqsave/restore when -taking/releasing the spin lock. With this variant the call of -local_irq_save/restore is no longer required. - -Signed-off-by: Anna-Maria Gleixner -[bigeasy: s@atomic_dec_and_lock@refcount_dec_and_lock@g ] -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/user.c | 5 +---- - 1 file changed, 1 insertion(+), 4 deletions(-) - ---- a/kernel/user.c -+++ b/kernel/user.c -@@ -169,11 +169,8 @@ void free_uid(struct user_struct *up) - if (!up) - return; - -- local_irq_save(flags); -- if (refcount_dec_and_lock(&up->__count, &uidhash_lock)) -+ if (refcount_dec_and_lock_irqsave(&up->__count, &uidhash_lock, &flags)) - free_user(up, flags); -- else -- local_irq_restore(flags); - } - - struct user_struct *alloc_uid(kuid_t uid) diff --git a/debian/patches-rt/ARM-enable-irq-in-translation-section-permission-fau.patch b/debian/patches-rt/ARM-enable-irq-in-translation-section-permission-fau.patch index a3520acf6..e6796451f 100644 --- a/debian/patches-rt/ARM-enable-irq-in-translation-section-permission-fau.patch +++ b/debian/patches-rt/ARM-enable-irq-in-translation-section-permission-fau.patch @@ -1,7 +1,7 @@ From: "Yadi.hu" Date: Wed, 10 Dec 2014 10:32:09 +0800 Subject: ARM: enable irq in translation/section permission fault handlers -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Probably happens on all ARM, with CONFIG_PREEMPT_RT_FULL @@ -64,7 +64,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c -@@ -438,6 +438,9 @@ do_translation_fault(unsigned long addr, +@@ -439,6 +439,9 @@ do_translation_fault(unsigned long addr, if (addr < TASK_SIZE) return do_page_fault(addr, fsr, regs); @@ -74,7 +74,7 @@ Signed-off-by: Sebastian Andrzej Siewior if (user_mode(regs)) goto bad_area; -@@ -505,6 +508,9 @@ do_translation_fault(unsigned long addr, +@@ -506,6 +509,9 @@ do_translation_fault(unsigned long addr, static int do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { diff --git a/debian/patches-rt/Drivers-hv-vmbus-include-header-for-get_irq_regs.patch b/debian/patches-rt/Drivers-hv-vmbus-include-header-for-get_irq_regs.patch index 3d7f84d4d..6036d7e68 100644 --- a/debian/patches-rt/Drivers-hv-vmbus-include-header-for-get_irq_regs.patch +++ b/debian/patches-rt/Drivers-hv-vmbus-include-header-for-get_irq_regs.patch @@ -4,7 +4,7 @@ Subject: [PATCH] Drivers: hv: vmbus: include header for get_irq_regs() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz On !RT the header file get_irq_regs() gets pulled in via other header files. On RT it does not and the build fails: diff --git a/debian/patches-rt/EXP-rcu-Revert-expedited-GP-parallelization-cleverne.patch b/debian/patches-rt/EXP-rcu-Revert-expedited-GP-parallelization-cleverne.patch new file mode 100644 index 000000000..631e1b12f --- /dev/null +++ b/debian/patches-rt/EXP-rcu-Revert-expedited-GP-parallelization-cleverne.patch @@ -0,0 +1,44 @@ +From: Paul E. McKenney +Date: Mon, 29 Oct 2018 11:53:01 +0100 +Subject: [PATCH] EXP rcu: Revert expedited GP parallelization cleverness +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz + +(Commit 258ba8e089db23f760139266c232f01bad73f85c from linux-rcu) + +This commit reverts a series of commits starting with fcc635436501 ("rcu: +Make expedited GPs handle CPU 0 being offline") and its successors, thus +queueing each rcu_node structure's expedited grace-period initialization +work on the first CPU of that rcu_node structure. + +Suggested-by: Sebastian Andrzej Siewior +Signed-off-by: Paul E. McKenney +Signed-off-by: Sebastian Andrzej Siewior +--- + kernel/rcu/tree_exp.h | 9 +-------- + 1 file changed, 1 insertion(+), 8 deletions(-) + +--- a/kernel/rcu/tree_exp.h ++++ b/kernel/rcu/tree_exp.h +@@ -472,7 +472,6 @@ static void sync_rcu_exp_select_node_cpu + static void sync_rcu_exp_select_cpus(struct rcu_state *rsp, + smp_call_func_t func) + { +- int cpu; + struct rcu_node *rnp; + + trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("reset")); +@@ -494,13 +493,7 @@ static void sync_rcu_exp_select_cpus(str + continue; + } + INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus); +- preempt_disable(); +- cpu = cpumask_next(rnp->grplo - 1, cpu_online_mask); +- /* If all offline, queue the work on an unbound CPU. */ +- if (unlikely(cpu > rnp->grphi)) +- cpu = WORK_CPU_UNBOUND; +- queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work); +- preempt_enable(); ++ queue_work_on(rnp->grplo, rcu_par_gp_wq, &rnp->rew.rew_work); + rnp->exp_need_flush = true; + } + diff --git a/debian/patches-rt/HACK-printk-drop-the-logbuf_lock-more-often.patch b/debian/patches-rt/HACK-printk-drop-the-logbuf_lock-more-often.patch index 21afee898..f3e564889 100644 --- a/debian/patches-rt/HACK-printk-drop-the-logbuf_lock-more-often.patch +++ b/debian/patches-rt/HACK-printk-drop-the-logbuf_lock-more-often.patch @@ -1,73 +1,73 @@ From: Sebastian Andrzej Siewior Date: Thu, 21 Mar 2013 19:01:05 +0100 Subject: printk: Drop the logbuf_lock more often -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz The lock is hold with irgs off. The latency drops 500us+ on my arm bugs with a "full" buffer after executing "dmesg" on the shell. Signed-off-by: Sebastian Andrzej Siewior --- - kernel/printk/printk.c | 27 +++++++++++++++++++++++++++ - 1 file changed, 27 insertions(+) + kernel/printk/printk.c | 28 ++++++++++++++++++++++++++++ + 1 file changed, 28 insertions(+) --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c -@@ -1411,6 +1411,8 @@ static int syslog_print_all(char __user - { - char *text; - int len = 0; +@@ -1415,12 +1415,23 @@ static int syslog_print_all(char __user + u64 next_seq; + u64 seq; + u32 idx; + int attempts = 0; + int num_msg; text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL); if (!text) -@@ -1422,6 +1424,14 @@ static int syslog_print_all(char __user - u64 seq; - u32 idx; + return -ENOMEM; -+try_again: -+ attempts++; -+ if (attempts > 10) { -+ len = -EBUSY; -+ goto out; -+ } -+ num_msg = 0; + logbuf_lock_irq(); + - /* - * Find first record that fits, including all following records, - * into the user-provided buffer for this dump. -@@ -1434,6 +1444,14 @@ static int syslog_print_all(char __user - len += msg_print_text(msg, true, NULL, 0); - idx = log_next(idx); - seq++; -+ num_msg++; -+ if (num_msg > 5) { -+ num_msg = 0; -+ logbuf_unlock_irq(); -+ logbuf_lock_irq(); -+ if (clear_seq < log_first_seq) -+ goto try_again; -+ } - } ++try_again: ++ attempts++; ++ if (attempts > 10) { ++ len = -EBUSY; ++ goto out; ++ } ++ num_msg = 0; ++ + /* + * Find first record that fits, including all following records, + * into the user-provided buffer for this dump. +@@ -1433,6 +1444,14 @@ static int syslog_print_all(char __user + len += msg_print_text(msg, true, NULL, 0); + idx = log_next(idx); + seq++; ++ num_msg++; ++ if (num_msg > 5) { ++ num_msg = 0; ++ logbuf_unlock_irq(); ++ logbuf_lock_irq(); ++ if (clear_seq < log_first_seq) ++ goto try_again; ++ } + } - /* move first record forward until length fits into the buffer */ -@@ -1445,6 +1463,14 @@ static int syslog_print_all(char __user - len -= msg_print_text(msg, true, NULL, 0); - idx = log_next(idx); - seq++; -+ num_msg++; -+ if (num_msg > 5) { -+ num_msg = 0; -+ logbuf_unlock_irq(); -+ logbuf_lock_irq(); -+ if (clear_seq < log_first_seq) -+ goto try_again; -+ } - } + /* move first record forward until length fits into the buffer */ +@@ -1444,6 +1463,14 @@ static int syslog_print_all(char __user + len -= msg_print_text(msg, true, NULL, 0); + idx = log_next(idx); + seq++; ++ num_msg++; ++ if (num_msg > 5) { ++ num_msg = 0; ++ logbuf_unlock_irq(); ++ logbuf_lock_irq(); ++ if (clear_seq < log_first_seq) ++ goto try_again; ++ } + } - /* last message fitting into this dump */ -@@ -1483,6 +1509,7 @@ static int syslog_print_all(char __user + /* last message fitting into this dump */ +@@ -1481,6 +1508,7 @@ static int syslog_print_all(char __user clear_seq = log_next_seq; clear_idx = log_next_idx; } diff --git a/debian/patches-rt/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch b/debian/patches-rt/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch index 81f8de2f7..5e06bbaa6 100644 --- a/debian/patches-rt/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch +++ b/debian/patches-rt/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch @@ -1,7 +1,7 @@ From: Josh Cartwright Date: Thu, 11 Feb 2016 11:54:01 -0600 Subject: KVM: arm/arm64: downgrade preempt_disable()d region to migrate_disable() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz kvm_arch_vcpu_ioctl_run() disables the use of preemption when updating the vgic and timer states to prevent the calling task from migrating to @@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c -@@ -694,7 +694,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v +@@ -700,7 +700,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v * involves poking the GIC, which must be done in a * non-preemptible context. */ @@ -32,7 +32,7 @@ Signed-off-by: Sebastian Andrzej Siewior kvm_pmu_flush_hwstate(vcpu); -@@ -743,7 +743,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v +@@ -749,7 +749,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v kvm_timer_sync_hwstate(vcpu); kvm_vgic_sync_hwstate(vcpu); local_irq_enable(); @@ -41,7 +41,7 @@ Signed-off-by: Sebastian Andrzej Siewior continue; } -@@ -821,7 +821,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v +@@ -827,7 +827,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v /* Exit types that need handling before we can be preempted */ handle_exit_early(vcpu, run, ret); diff --git a/debian/patches-rt/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch b/debian/patches-rt/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch index c9b5895ba..049a318b7 100644 --- a/debian/patches-rt/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch +++ b/debian/patches-rt/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch @@ -5,7 +5,7 @@ Cc: Anna Schumaker , linux-nfs@vger.kernel.org, linux-kernel@vger.kernel.org, tglx@linutronix.de Subject: NFSv4: replace seqcount_t with a seqlock_t -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz The raw_write_seqcount_begin() in nfs4_reclaim_open_state() bugs me because it maps to preempt_disable() in -RT which I can't have at this @@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c -@@ -2818,7 +2818,7 @@ static int _nfs4_open_and_get_state(stru +@@ -2859,7 +2859,7 @@ static int _nfs4_open_and_get_state(stru unsigned int seq; int ret; @@ -67,15 +67,15 @@ Signed-off-by: Sebastian Andrzej Siewior ret = _nfs4_proc_open(opendata, ctx); if (ret != 0) -@@ -2856,7 +2856,7 @@ static int _nfs4_open_and_get_state(stru +@@ -2900,7 +2900,7 @@ static int _nfs4_open_and_get_state(stru if (d_inode(dentry) == state->inode) { nfs_inode_attach_open_context(ctx); - if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) + if (read_seqretry(&sp->so_reclaim_seqlock, seq)) nfs4_schedule_stateid_recovery(server, state); - else - pnfs_parse_lgopen(state->inode, opendata->lgp, ctx); + } + --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -511,7 +511,7 @@ nfs4_alloc_state_owner(struct nfs_server @@ -87,7 +87,7 @@ Signed-off-by: Sebastian Andrzej Siewior mutex_init(&sp->so_delegreturn_mutex); return sp; } -@@ -1560,8 +1560,12 @@ static int nfs4_reclaim_open_state(struc +@@ -1563,8 +1563,12 @@ static int nfs4_reclaim_open_state(struc * recovering after a network partition or a reboot from a * server that doesn't support a grace period. */ @@ -101,7 +101,7 @@ Signed-off-by: Sebastian Andrzej Siewior restart: list_for_each_entry(state, &sp->so_states, open_states) { if (!test_and_clear_bit(ops->state_flag_bit, &state->flags)) -@@ -1630,14 +1634,20 @@ static int nfs4_reclaim_open_state(struc +@@ -1651,14 +1655,20 @@ static int nfs4_reclaim_open_state(struc spin_lock(&sp->so_lock); goto restart; } diff --git a/debian/patches-rt/SCSI-libsas-remove-irq-save-in-sas_ata_qc_issue.patch b/debian/patches-rt/SCSI-libsas-remove-irq-save-in-sas_ata_qc_issue.patch deleted file mode 100644 index 0e4033329..000000000 --- a/debian/patches-rt/SCSI-libsas-remove-irq-save-in-sas_ata_qc_issue.patch +++ /dev/null @@ -1,49 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Thu, 12 Apr 2018 09:16:22 +0200 -Subject: [PATCH] [SCSI] libsas: remove irq save in sas_ata_qc_issue() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz - -[ upstream commit 2da11d4262639dc0e2fabc6a70886db57af25c43 ] - -Since commit 312d3e56119a ("[SCSI] libsas: remove ata_port.lock -management duties from lldds") the sas_ata_qc_issue() function unlocks -the ata_port.lock and disables interrupts before doing so. -That lock is always taken with disabled interrupts so at this point, the -interrupts are already disabled. There is no need to disable the -interrupts before the unlock operation because they are already -disabled. -Restoring the interrupt state later does not change anything because -they were disabled and remain disabled. Therefore remove the operations -which do not change the behaviour. - -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/scsi/libsas/sas_ata.c | 3 --- - 1 file changed, 3 deletions(-) - ---- a/drivers/scsi/libsas/sas_ata.c -+++ b/drivers/scsi/libsas/sas_ata.c -@@ -176,7 +176,6 @@ static void sas_ata_task_done(struct sas - - static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) - { -- unsigned long flags; - struct sas_task *task; - struct scatterlist *sg; - int ret = AC_ERR_SYSTEM; -@@ -190,7 +189,6 @@ static unsigned int sas_ata_qc_issue(str - /* TODO: audit callers to ensure they are ready for qc_issue to - * unconditionally re-enable interrupts - */ -- local_irq_save(flags); - spin_unlock(ap->lock); - - /* If the device fell off, no sense in issuing commands */ -@@ -252,7 +250,6 @@ static unsigned int sas_ata_qc_issue(str - - out: - spin_lock(ap->lock); -- local_irq_restore(flags); - return ret; - } - diff --git a/debian/patches-rt/SCSI-qla2xxx-remove-irq-save-in-qla2x00_poll.patch b/debian/patches-rt/SCSI-qla2xxx-remove-irq-save-in-qla2x00_poll.patch deleted file mode 100644 index 560bb1edf..000000000 --- a/debian/patches-rt/SCSI-qla2xxx-remove-irq-save-in-qla2x00_poll.patch +++ /dev/null @@ -1,42 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Thu, 12 Apr 2018 09:55:25 +0200 -Subject: [PATCH] [SCSI] qla2xxx: remove irq save in qla2x00_poll() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz - -[ upstream commit b3a8aa90c46095cbad454eb068bfb5a8eb56d4e3 ] - -In commit d2ba5675d899 ("[SCSI] qla2xxx: Disable local-interrupts while -polling for RISC status.") added a local_irq_disable() before invoking -the ->intr_handler callback. The function, which was used in this -callback, did not disable interrupts while acquiring the spin_lock so a -deadlock was possible and this change was one possible solution. - -The function in question was qla2300_intr_handler() and is using -spin_lock_irqsave() since commit 43fac4d97a1a ("[SCSI] qla2xxx: Resolve -a performance issue in interrupt"). -I checked all other ->intr_handler callbacks and all of them use the -irqsave variant so it is safe to remove the local_irq_save() block now. - -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/scsi/qla2xxx/qla_inline.h | 4 +--- - 1 file changed, 1 insertion(+), 3 deletions(-) - ---- a/drivers/scsi/qla2xxx/qla_inline.h -+++ b/drivers/scsi/qla2xxx/qla_inline.h -@@ -58,14 +58,12 @@ qla2x00_debounce_register(volatile uint1 - static inline void - qla2x00_poll(struct rsp_que *rsp) - { -- unsigned long flags; - struct qla_hw_data *ha = rsp->hw; -- local_irq_save(flags); -+ - if (IS_P3P_TYPE(ha)) - qla82xx_poll(0, rsp); - else - ha->isp_ops->intr_handler(0, rsp); -- local_irq_restore(flags); - } - - static inline uint8_t * diff --git a/debian/patches-rt/add_migrate_disable.patch b/debian/patches-rt/add_migrate_disable.patch index 83322d576..aa8a59ebc 100644 --- a/debian/patches-rt/add_migrate_disable.patch +++ b/debian/patches-rt/add_migrate_disable.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Sat, 27 May 2017 19:02:06 +0200 Subject: kernel/sched/core: add migrate_disable() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz --- include/linux/preempt.h | 23 ++++++++ @@ -80,7 +80,7 @@ Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4 * boot command line: --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -1030,7 +1030,15 @@ void set_cpus_allowed_common(struct task +@@ -1007,7 +1007,15 @@ void set_cpus_allowed_common(struct task p->nr_cpus_allowed = cpumask_weight(new_mask); } @@ -97,7 +97,7 @@ Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4 { struct rq *rq = task_rq(p); bool queued, running; -@@ -1059,6 +1067,20 @@ void do_set_cpus_allowed(struct task_str +@@ -1036,6 +1044,20 @@ void do_set_cpus_allowed(struct task_str set_curr_task(rq, p); } @@ -118,7 +118,7 @@ Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4 /* * Change a given task's CPU affinity. Migrate the thread to a * proper CPU and schedule it away if the CPU it's executing on -@@ -1117,9 +1139,16 @@ static int __set_cpus_allowed_ptr(struct +@@ -1094,9 +1116,16 @@ static int __set_cpus_allowed_ptr(struct } /* Can the task run on the task's current CPU? If so, we're done */ @@ -136,7 +136,7 @@ Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4 dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask); if (task_running(rq, p) || p->state == TASK_WAKING) { struct migration_arg arg = { p, dest_cpu }; -@@ -7076,3 +7105,100 @@ const u32 sched_prio_to_wmult[40] = { +@@ -7053,3 +7082,100 @@ const u32 sched_prio_to_wmult[40] = { }; #undef CREATE_TRACE_POINTS @@ -239,7 +239,7 @@ Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4 +#endif --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c -@@ -979,6 +979,10 @@ void proc_sched_show_task(struct task_st +@@ -978,6 +978,10 @@ void proc_sched_show_task(struct task_st P(dl.runtime); P(dl.deadline); } diff --git a/debian/patches-rt/apparmor-use-a-locallock-instead-preempt_disable.patch b/debian/patches-rt/apparmor-use-a-locallock-instead-preempt_disable.patch index a3b3955c9..3f2315344 100644 --- a/debian/patches-rt/apparmor-use-a-locallock-instead-preempt_disable.patch +++ b/debian/patches-rt/apparmor-use-a-locallock-instead-preempt_disable.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Wed, 11 Oct 2017 17:43:49 +0200 Subject: apparmor: use a locallock instead preempt_disable() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz get_buffers() disables preemption which acts as a lock for the per-CPU variable. Since we can't disable preemption here on RT, a local_lock is diff --git a/debian/patches-rt/arch-arm64-Add-lazy-preempt-support.patch b/debian/patches-rt/arch-arm64-Add-lazy-preempt-support.patch index 4f435daed..e7f424fac 100644 --- a/debian/patches-rt/arch-arm64-Add-lazy-preempt-support.patch +++ b/debian/patches-rt/arch-arm64-Add-lazy-preempt-support.patch @@ -1,7 +1,7 @@ From: Anders Roxell Date: Thu, 14 May 2015 17:52:17 +0200 Subject: arch/arm64: Add lazy preempt support -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz arm64 is missing support for PREEMPT_RT. The main feature which is lacking is support for lazy preemption. The arch-specific entry code, @@ -21,14 +21,14 @@ Signed-off-by: Anders Roxell --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig -@@ -125,6 +125,7 @@ config ARM64 +@@ -140,6 +140,7 @@ config ARM64 select HAVE_PERF_EVENTS select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP + select HAVE_PREEMPT_LAZY select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RCU_TABLE_FREE - select HAVE_STACKPROTECTOR + select HAVE_RSEQ --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -43,6 +43,7 @@ struct thread_info { @@ -78,7 +78,7 @@ Signed-off-by: Anders Roxell DEFINE(TSK_TI_TTBR0, offsetof(struct task_struct, thread_info.ttbr0)); --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S -@@ -633,11 +633,16 @@ ENDPROC(el1_sync) +@@ -623,11 +623,16 @@ ENDPROC(el1_sync) #ifdef CONFIG_PREEMPT ldr w24, [tsk, #TSK_TI_PREEMPT] // get preempt count @@ -98,7 +98,7 @@ Signed-off-by: Anders Roxell #endif #ifdef CONFIG_TRACE_IRQFLAGS bl trace_hardirqs_on -@@ -651,6 +656,7 @@ ENDPROC(el1_irq) +@@ -641,6 +646,7 @@ ENDPROC(el1_irq) 1: bl preempt_schedule_irq // irq en/disable is done inside ldr x0, [tsk, #TSK_TI_FLAGS] // get new tasks TI_FLAGS tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling? @@ -108,7 +108,7 @@ Signed-off-by: Anders Roxell --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c -@@ -923,7 +923,7 @@ asmlinkage void do_notify_resume(struct +@@ -926,7 +926,7 @@ asmlinkage void do_notify_resume(struct /* Check valid user FS if needed */ addr_limit_user_check(); diff --git a/debian/patches-rt/arm-convert-boot-lock-to-raw.patch b/debian/patches-rt/arm-convert-boot-lock-to-raw.patch index 1a07273f5..d03e2bc95 100644 --- a/debian/patches-rt/arm-convert-boot-lock-to-raw.patch +++ b/debian/patches-rt/arm-convert-boot-lock-to-raw.patch @@ -1,7 +1,7 @@ From: Frank Rowand Date: Mon, 19 Sep 2011 14:51:14 -0700 Subject: arm: Convert arm boot_lock to raw -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz The arm boot_lock is used by the secondary processor startup code. The locking task is the idle thread, which has idle->sched_class == &idle_sched_class. diff --git a/debian/patches-rt/arm-disable-NEON-in-kernel-mode.patch b/debian/patches-rt/arm-disable-NEON-in-kernel-mode.patch index ac5f9b13d..028db0ac0 100644 --- a/debian/patches-rt/arm-disable-NEON-in-kernel-mode.patch +++ b/debian/patches-rt/arm-disable-NEON-in-kernel-mode.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Fri, 1 Dec 2017 10:42:03 +0100 Subject: [PATCH] arm*: disable NEON in kernel mode -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz NEON in kernel mode is used by the crypto algorithms and raid6 code. While the raid6 code looks okay, the crypto algorithms do not: NEON @@ -21,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig -@@ -2169,7 +2169,7 @@ config NEON +@@ -2160,7 +2160,7 @@ config NEON config KERNEL_MODE_NEON bool "Support for NEON in kernel mode" diff --git a/debian/patches-rt/arm-enable-highmem-for-rt.patch b/debian/patches-rt/arm-enable-highmem-for-rt.patch index 08c92082c..ef676cc50 100644 --- a/debian/patches-rt/arm-enable-highmem-for-rt.patch +++ b/debian/patches-rt/arm-enable-highmem-for-rt.patch @@ -1,7 +1,7 @@ Subject: arm: Enable highmem for rt From: Thomas Gleixner Date: Wed, 13 Feb 2013 11:03:11 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz fixup highmem for ARM. diff --git a/debian/patches-rt/arm-highmem-flush-tlb-on-unmap.patch b/debian/patches-rt/arm-highmem-flush-tlb-on-unmap.patch index cab3f087d..e49c39432 100644 --- a/debian/patches-rt/arm-highmem-flush-tlb-on-unmap.patch +++ b/debian/patches-rt/arm-highmem-flush-tlb-on-unmap.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Mon, 11 Mar 2013 21:37:27 +0100 Subject: arm/highmem: Flush tlb on unmap -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz The tlb should be flushed on unmap and thus make the mapping entry invalid. This is only done in the non-debug case which does not look diff --git a/debian/patches-rt/arm-include-definition-for-cpumask_t.patch b/debian/patches-rt/arm-include-definition-for-cpumask_t.patch index 80dd10157..fbabe2932 100644 --- a/debian/patches-rt/arm-include-definition-for-cpumask_t.patch +++ b/debian/patches-rt/arm-include-definition-for-cpumask_t.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Thu, 22 Dec 2016 17:28:33 +0100 Subject: [PATCH] arm: include definition for cpumask_t -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz This definition gets pulled in by other files. With the (later) split of RCU and spinlock.h it won't compile anymore. diff --git a/debian/patches-rt/arm-kprobe-replace-patch_lock-to-raw-lock.patch b/debian/patches-rt/arm-kprobe-replace-patch_lock-to-raw-lock.patch index d77f412db..fc48dfda2 100644 --- a/debian/patches-rt/arm-kprobe-replace-patch_lock-to-raw-lock.patch +++ b/debian/patches-rt/arm-kprobe-replace-patch_lock-to-raw-lock.patch @@ -1,7 +1,7 @@ From: Yang Shi Date: Thu, 10 Nov 2016 16:17:55 -0800 Subject: [PATCH] arm: kprobe: replace patch_lock to raw lock -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz When running kprobe on -rt kernel, the below bug is caught: diff --git a/debian/patches-rt/arm-preempt-lazy-support.patch b/debian/patches-rt/arm-preempt-lazy-support.patch index a9ef2e96f..65ac419ed 100644 --- a/debian/patches-rt/arm-preempt-lazy-support.patch +++ b/debian/patches-rt/arm-preempt-lazy-support.patch @@ -1,7 +1,7 @@ Subject: arm: Add support for lazy preemption From: Thomas Gleixner Date: Wed, 31 Oct 2012 12:04:11 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Implement the arm pieces for lazy preempt. @@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig -@@ -89,6 +89,7 @@ config ARM +@@ -90,6 +90,7 @@ config ARM select HAVE_PERF_EVENTS select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP @@ -65,7 +65,7 @@ Signed-off-by: Thomas Gleixner #endif /* __ASM_ARM_THREAD_INFO_H */ --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c -@@ -67,6 +67,7 @@ int main(void) +@@ -56,6 +56,7 @@ int main(void) BLANK(); DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); diff --git a/debian/patches-rt/arm-unwind-use_raw_lock.patch b/debian/patches-rt/arm-unwind-use_raw_lock.patch index e9d50fb08..d7083acbd 100644 --- a/debian/patches-rt/arm-unwind-use_raw_lock.patch +++ b/debian/patches-rt/arm-unwind-use_raw_lock.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Fri, 20 Sep 2013 14:31:54 +0200 Subject: arm/unwind: use a raw_spin_lock -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Mostly unwind is done with irqs enabled however SLUB may call it with irqs disabled while creating a new SLUB cache. diff --git a/debian/patches-rt/arm64-KVM-compute_layout-before-altenates-are-applie.patch b/debian/patches-rt/arm64-KVM-compute_layout-before-altenates-are-applie.patch index fdbc8d419..7a1a2d726 100644 --- a/debian/patches-rt/arm64-KVM-compute_layout-before-altenates-are-applie.patch +++ b/debian/patches-rt/arm64-KVM-compute_layout-before-altenates-are-applie.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Thu, 26 Jul 2018 09:13:42 +0200 Subject: [PATCH] arm64: KVM: compute_layout before altenates are applied -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz compute_layout() is invoked as part of an alternative fixup under stop_machine() and needs a sleeping lock as part of get_random_long(). diff --git a/debian/patches-rt/arm64-fpsimd-use-preemp_disable-in-addition-to-local.patch b/debian/patches-rt/arm64-fpsimd-use-preemp_disable-in-addition-to-local.patch index 18eb2306d..c31e2718c 100644 --- a/debian/patches-rt/arm64-fpsimd-use-preemp_disable-in-addition-to-local.patch +++ b/debian/patches-rt/arm64-fpsimd-use-preemp_disable-in-addition-to-local.patch @@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior Date: Wed, 25 Jul 2018 14:02:38 +0200 Subject: [PATCH] arm64: fpsimd: use preemp_disable in addition to local_bh_disable() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz In v4.16-RT I noticed a number of warnings from task_fpsimd_load(). The code disables BH and expects that it is not preemptible. On -RT the @@ -33,10 +33,10 @@ Signed-off-by: Sebastian Andrzej Siewior + return sve_state; +} + - static void change_cpacr(u64 val, u64 mask) - { - u64 cpacr = read_sysreg(CPACR_EL1); -@@ -566,6 +576,7 @@ int sve_set_vector_length(struct task_st + /* + * TIF_SVE controls whether a task can use SVE without trapping while + * in userspace, and also the way a task's FPSIMD/SVE state is stored +@@ -547,6 +557,7 @@ int sve_set_vector_length(struct task_st * non-SVE thread. */ if (task == current) { @@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior local_bh_disable(); fpsimd_save(); -@@ -576,8 +587,10 @@ int sve_set_vector_length(struct task_st +@@ -557,8 +568,10 @@ int sve_set_vector_length(struct task_st if (test_and_clear_tsk_thread_flag(task, TIF_SVE)) sve_to_fpsimd(task); @@ -56,7 +56,7 @@ Signed-off-by: Sebastian Andrzej Siewior /* * Force reallocation of task SVE state to the correct size -@@ -832,6 +845,7 @@ asmlinkage void do_sve_acc(unsigned int +@@ -813,6 +826,7 @@ asmlinkage void do_sve_acc(unsigned int sve_alloc(current); @@ -64,7 +64,7 @@ Signed-off-by: Sebastian Andrzej Siewior local_bh_disable(); fpsimd_save(); -@@ -845,6 +859,7 @@ asmlinkage void do_sve_acc(unsigned int +@@ -826,6 +840,7 @@ asmlinkage void do_sve_acc(unsigned int WARN_ON(1); /* SVE access shouldn't have trapped */ local_bh_enable(); @@ -72,7 +72,7 @@ Signed-off-by: Sebastian Andrzej Siewior } /* -@@ -911,10 +926,12 @@ void fpsimd_thread_switch(struct task_st +@@ -892,10 +907,12 @@ void fpsimd_thread_switch(struct task_st void fpsimd_flush_thread(void) { int vl, supported_vl; @@ -85,7 +85,7 @@ Signed-off-by: Sebastian Andrzej Siewior local_bh_disable(); memset(¤t->thread.uw.fpsimd_state, 0, -@@ -923,7 +940,7 @@ void fpsimd_flush_thread(void) +@@ -904,7 +921,7 @@ void fpsimd_flush_thread(void) if (system_supports_sve()) { clear_thread_flag(TIF_SVE); @@ -94,7 +94,7 @@ Signed-off-by: Sebastian Andrzej Siewior /* * Reset the task vector length as required. -@@ -959,6 +976,8 @@ void fpsimd_flush_thread(void) +@@ -940,6 +957,8 @@ void fpsimd_flush_thread(void) set_thread_flag(TIF_FOREIGN_FPSTATE); local_bh_enable(); @@ -103,7 +103,7 @@ Signed-off-by: Sebastian Andrzej Siewior } /* -@@ -970,9 +989,11 @@ void fpsimd_preserve_current_state(void) +@@ -951,9 +970,11 @@ void fpsimd_preserve_current_state(void) if (!system_supports_fpsimd()) return; @@ -115,7 +115,7 @@ Signed-off-by: Sebastian Andrzej Siewior } /* -@@ -1030,6 +1051,7 @@ void fpsimd_restore_current_state(void) +@@ -1011,6 +1032,7 @@ void fpsimd_restore_current_state(void) if (!system_supports_fpsimd()) return; @@ -123,7 +123,7 @@ Signed-off-by: Sebastian Andrzej Siewior local_bh_disable(); if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) { -@@ -1038,6 +1060,7 @@ void fpsimd_restore_current_state(void) +@@ -1019,6 +1041,7 @@ void fpsimd_restore_current_state(void) } local_bh_enable(); @@ -131,7 +131,7 @@ Signed-off-by: Sebastian Andrzej Siewior } /* -@@ -1050,6 +1073,7 @@ void fpsimd_update_current_state(struct +@@ -1031,6 +1054,7 @@ void fpsimd_update_current_state(struct if (!system_supports_fpsimd()) return; @@ -139,7 +139,7 @@ Signed-off-by: Sebastian Andrzej Siewior local_bh_disable(); current->thread.uw.fpsimd_state = *state; -@@ -1062,6 +1086,7 @@ void fpsimd_update_current_state(struct +@@ -1043,6 +1067,7 @@ void fpsimd_update_current_state(struct clear_thread_flag(TIF_FOREIGN_FPSTATE); local_bh_enable(); @@ -147,7 +147,7 @@ Signed-off-by: Sebastian Andrzej Siewior } /* -@@ -1107,6 +1132,7 @@ void kernel_neon_begin(void) +@@ -1088,6 +1113,7 @@ void kernel_neon_begin(void) BUG_ON(!may_use_simd()); @@ -155,7 +155,7 @@ Signed-off-by: Sebastian Andrzej Siewior local_bh_disable(); __this_cpu_write(kernel_neon_busy, true); -@@ -1120,6 +1146,7 @@ void kernel_neon_begin(void) +@@ -1101,6 +1127,7 @@ void kernel_neon_begin(void) preempt_disable(); local_bh_enable(); diff --git a/debian/patches-rt/at91_dont_enable_disable_clock.patch b/debian/patches-rt/at91_dont_enable_disable_clock.patch index b2167c8d0..8a365fb01 100644 --- a/debian/patches-rt/at91_dont_enable_disable_clock.patch +++ b/debian/patches-rt/at91_dont_enable_disable_clock.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Wed, 09 Mar 2016 10:51:06 +0100 Subject: arm: at91: do not disable/enable clocks in a row -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Currently the driver will disable the clock and enable it one line later if it is switching from periodic mode into one shot. diff --git a/debian/patches-rt/block-blk-mq-move-blk_queue_usage_counter_release-in.patch b/debian/patches-rt/block-blk-mq-move-blk_queue_usage_counter_release-in.patch index 52fc4239c..abd040dad 100644 --- a/debian/patches-rt/block-blk-mq-move-blk_queue_usage_counter_release-in.patch +++ b/debian/patches-rt/block-blk-mq-move-blk_queue_usage_counter_release-in.patch @@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior Date: Tue, 13 Mar 2018 13:49:16 +0100 Subject: [PATCH] block: blk-mq: move blk_queue_usage_counter_release() into process context -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz | BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:914 | in_atomic(): 1, irqs_disabled(): 0, pid: 255, name: kworker/u257:6 @@ -52,7 +52,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/block/blk-core.c +++ b/block/blk-core.c -@@ -965,12 +965,21 @@ void blk_queue_exit(struct request_queue +@@ -969,12 +969,21 @@ void blk_queue_exit(struct request_queue percpu_ref_put(&q->q_usage_counter); } @@ -75,7 +75,7 @@ Signed-off-by: Sebastian Andrzej Siewior } static void blk_rq_timed_out_timer(struct timer_list *t) -@@ -1064,6 +1073,7 @@ struct request_queue *blk_alloc_queue_no +@@ -1067,6 +1076,7 @@ struct request_queue *blk_alloc_queue_no queue_flag_set_unlocked(QUEUE_FLAG_BYPASS, q); init_waitqueue_head(&q->mq_freeze_wq); @@ -83,7 +83,7 @@ Signed-off-by: Sebastian Andrzej Siewior /* * Init percpu_ref in atomic mode so that it's faster to shutdown. -@@ -3948,6 +3958,8 @@ int __init blk_dev_init(void) +@@ -3957,6 +3967,8 @@ int __init blk_dev_init(void) if (!kblockd_workqueue) panic("Failed to create kblockd\n"); @@ -99,10 +99,10 @@ Signed-off-by: Sebastian Andrzej Siewior #include #include +#include - #include - #include -@@ -651,6 +652,7 @@ struct request_queue { + struct module; + struct scsi_ioctl_command; +@@ -649,6 +650,7 @@ struct request_queue { #endif struct rcu_head rcu_head; wait_queue_head_t mq_freeze_wq; diff --git a/debian/patches-rt/block-mq-don-t-complete-requests-via-IPI.patch b/debian/patches-rt/block-mq-don-t-complete-requests-via-IPI.patch index 3cf363281..e7b5779eb 100644 --- a/debian/patches-rt/block-mq-don-t-complete-requests-via-IPI.patch +++ b/debian/patches-rt/block-mq-don-t-complete-requests-via-IPI.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Thu, 29 Jan 2015 15:10:08 +0100 Subject: block/mq: don't complete requests via IPI -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz The IPI runs in hardirq context and there are sleeping locks. This patch moves the completion into a workqueue. @@ -38,7 +38,7 @@ Signed-off-by: Sebastian Andrzej Siewior INIT_LIST_HEAD(&rq->timeout_list); rq->timeout = 0; -@@ -545,12 +548,24 @@ void blk_mq_end_request(struct request * +@@ -547,12 +550,24 @@ void blk_mq_end_request(struct request * } EXPORT_SYMBOL(blk_mq_end_request); @@ -63,7 +63,7 @@ Signed-off-by: Sebastian Andrzej Siewior static void __blk_mq_complete_request(struct request *rq) { -@@ -573,10 +588,18 @@ static void __blk_mq_complete_request(st +@@ -575,10 +590,18 @@ static void __blk_mq_complete_request(st shared = cpus_share_cache(cpu, ctx->cpu); if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) { @@ -84,7 +84,7 @@ Signed-off-by: Sebastian Andrzej Siewior } --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h -@@ -247,7 +247,7 @@ static inline u16 blk_mq_unique_tag_to_t +@@ -249,7 +249,7 @@ static inline u16 blk_mq_unique_tag_to_t return unique_tag & BLK_MQ_UNIQUE_TAG_MASK; } @@ -95,7 +95,7 @@ Signed-off-by: Sebastian Andrzej Siewior void blk_mq_end_request(struct request *rq, blk_status_t error); --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h -@@ -151,6 +151,9 @@ enum mq_rq_state { +@@ -149,6 +149,9 @@ enum mq_rq_state { */ struct request { struct request_queue *q; diff --git a/debian/patches-rt/block-mq-drop-preempt-disable.patch b/debian/patches-rt/block-mq-drop-preempt-disable.patch index 48097f5bf..9024521ba 100644 --- a/debian/patches-rt/block-mq-drop-preempt-disable.patch +++ b/debian/patches-rt/block-mq-drop-preempt-disable.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Tue, 14 Jul 2015 14:26:34 +0200 Subject: block/mq: do not invoke preempt_disable() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz preempt_disable() and get_cpu() don't play well together with the sleeping locks it tries to allocate later. @@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/block/blk-mq.c +++ b/block/blk-mq.c -@@ -568,7 +568,7 @@ static void __blk_mq_complete_request(st +@@ -570,7 +570,7 @@ static void __blk_mq_complete_request(st return; } @@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags)) shared = cpus_share_cache(cpu, ctx->cpu); -@@ -580,7 +580,7 @@ static void __blk_mq_complete_request(st +@@ -582,7 +582,7 @@ static void __blk_mq_complete_request(st } else { rq->q->softirq_done_fn(rq); } @@ -32,7 +32,7 @@ Signed-off-by: Sebastian Andrzej Siewior } static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx) -@@ -1324,14 +1324,14 @@ static void __blk_mq_delay_run_hw_queue( +@@ -1360,14 +1360,14 @@ static void __blk_mq_delay_run_hw_queue( return; if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) { diff --git a/debian/patches-rt/block-mq-use-cpu_light.patch b/debian/patches-rt/block-mq-use-cpu_light.patch index 6f2c4dc0c..a6a866f5e 100644 --- a/debian/patches-rt/block-mq-use-cpu_light.patch +++ b/debian/patches-rt/block-mq-use-cpu_light.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Wed, 9 Apr 2014 10:37:23 +0200 Subject: block: mq: use cpu_light() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz there is a might sleep splat because get_cpu() disables preemption and later we grab a lock. As a workaround for this we use get_cpu_light(). @@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/block/blk-mq.h +++ b/block/blk-mq.h -@@ -112,12 +112,12 @@ static inline struct blk_mq_ctx *__blk_m +@@ -113,12 +113,12 @@ static inline struct blk_mq_ctx *__blk_m */ static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) { diff --git a/debian/patches-rt/block-use-cpu-chill.patch b/debian/patches-rt/block-use-cpu-chill.patch index 092a0bdae..fbbffaa16 100644 --- a/debian/patches-rt/block-use-cpu-chill.patch +++ b/debian/patches-rt/block-use-cpu-chill.patch @@ -1,7 +1,7 @@ Subject: block: Use cpu_chill() for retry loops From: Thomas Gleixner Date: Thu, 20 Dec 2012 18:28:26 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Retry loops on RT might loop forever when the modifying side was preempted. Steven also observed a live lock when there was a diff --git a/debian/patches-rt/cgroup-tracing-Move-taking-of-spin-lock-out-of-trace.patch b/debian/patches-rt/cgroup-tracing-Move-taking-of-spin-lock-out-of-trace.patch deleted file mode 100644 index b94d6fafa..000000000 --- a/debian/patches-rt/cgroup-tracing-Move-taking-of-spin-lock-out-of-trace.patch +++ /dev/null @@ -1,271 +0,0 @@ -From: "Steven Rostedt (VMware)" -Date: Mon, 9 Jul 2018 17:48:54 -0400 -Subject: [PATCH] cgroup/tracing: Move taking of spin lock out of trace event - handlers -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz - -[ Upstream commit e4f8d81c738db6d3ffdabfb8329aa2feaa310699 ] - -It is unwise to take spin locks from the handlers of trace events. -Mainly, because they can introduce lockups, because it introduces locks -in places that are normally not tested. Worse yet, because trace events -are tucked away in the include/trace/events/ directory, locks that are -taken there are forgotten about. - -As a general rule, I tell people never to take any locks in a trace -event handler. - -Several cgroup trace event handlers call cgroup_path() which eventually -takes the kernfs_rename_lock spinlock. This injects the spinlock in the -code without people realizing it. It also can cause issues for the -PREEMPT_RT patch, as the spinlock becomes a mutex, and the trace event -handlers are called with preemption disabled. - -By moving the calculation of the cgroup_path() out of the trace event -handlers and into a macro (surrounded by a -trace_cgroup_##type##_enabled()), then we could place the cgroup_path -into a string, and pass that to the trace event. Not only does this -remove the taking of the spinlock out of the trace event handler, but -it also means that the cgroup_path() only needs to be called once (it -is currently called twice, once to get the length to reserver the -buffer for, and once again to get the path itself. Now it only needs to -be done once. - -Reported-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) -Signed-off-by: Tejun Heo -Signed-off-by: Sebastian Andrzej Siewior ---- - include/trace/events/cgroup.h | 47 +++++++++++++++++++--------------------- - kernel/cgroup/cgroup-internal.h | 26 ++++++++++++++++++++++ - kernel/cgroup/cgroup-v1.c | 4 +-- - kernel/cgroup/cgroup.c | 12 +++++----- - 4 files changed, 58 insertions(+), 31 deletions(-) - ---- a/include/trace/events/cgroup.h -+++ b/include/trace/events/cgroup.h -@@ -53,24 +53,22 @@ DEFINE_EVENT(cgroup_root, cgroup_remount - - DECLARE_EVENT_CLASS(cgroup, - -- TP_PROTO(struct cgroup *cgrp), -+ TP_PROTO(struct cgroup *cgrp, const char *path), - -- TP_ARGS(cgrp), -+ TP_ARGS(cgrp, path), - - TP_STRUCT__entry( - __field( int, root ) - __field( int, id ) - __field( int, level ) -- __dynamic_array(char, path, -- cgroup_path(cgrp, NULL, 0) + 1) -+ __string( path, path ) - ), - - TP_fast_assign( - __entry->root = cgrp->root->hierarchy_id; - __entry->id = cgrp->id; - __entry->level = cgrp->level; -- cgroup_path(cgrp, __get_dynamic_array(path), -- __get_dynamic_array_len(path)); -+ __assign_str(path, path); - ), - - TP_printk("root=%d id=%d level=%d path=%s", -@@ -79,45 +77,45 @@ DECLARE_EVENT_CLASS(cgroup, - - DEFINE_EVENT(cgroup, cgroup_mkdir, - -- TP_PROTO(struct cgroup *cgroup), -+ TP_PROTO(struct cgroup *cgrp, const char *path), - -- TP_ARGS(cgroup) -+ TP_ARGS(cgrp, path) - ); - - DEFINE_EVENT(cgroup, cgroup_rmdir, - -- TP_PROTO(struct cgroup *cgroup), -+ TP_PROTO(struct cgroup *cgrp, const char *path), - -- TP_ARGS(cgroup) -+ TP_ARGS(cgrp, path) - ); - - DEFINE_EVENT(cgroup, cgroup_release, - -- TP_PROTO(struct cgroup *cgroup), -+ TP_PROTO(struct cgroup *cgrp, const char *path), - -- TP_ARGS(cgroup) -+ TP_ARGS(cgrp, path) - ); - - DEFINE_EVENT(cgroup, cgroup_rename, - -- TP_PROTO(struct cgroup *cgroup), -+ TP_PROTO(struct cgroup *cgrp, const char *path), - -- TP_ARGS(cgroup) -+ TP_ARGS(cgrp, path) - ); - - DECLARE_EVENT_CLASS(cgroup_migrate, - -- TP_PROTO(struct cgroup *dst_cgrp, struct task_struct *task, bool threadgroup), -+ TP_PROTO(struct cgroup *dst_cgrp, const char *path, -+ struct task_struct *task, bool threadgroup), - -- TP_ARGS(dst_cgrp, task, threadgroup), -+ TP_ARGS(dst_cgrp, path, task, threadgroup), - - TP_STRUCT__entry( - __field( int, dst_root ) - __field( int, dst_id ) - __field( int, dst_level ) -- __dynamic_array(char, dst_path, -- cgroup_path(dst_cgrp, NULL, 0) + 1) - __field( int, pid ) -+ __string( dst_path, path ) - __string( comm, task->comm ) - ), - -@@ -125,8 +123,7 @@ DECLARE_EVENT_CLASS(cgroup_migrate, - __entry->dst_root = dst_cgrp->root->hierarchy_id; - __entry->dst_id = dst_cgrp->id; - __entry->dst_level = dst_cgrp->level; -- cgroup_path(dst_cgrp, __get_dynamic_array(dst_path), -- __get_dynamic_array_len(dst_path)); -+ __assign_str(dst_path, path); - __entry->pid = task->pid; - __assign_str(comm, task->comm); - ), -@@ -138,16 +135,18 @@ DECLARE_EVENT_CLASS(cgroup_migrate, - - DEFINE_EVENT(cgroup_migrate, cgroup_attach_task, - -- TP_PROTO(struct cgroup *dst_cgrp, struct task_struct *task, bool threadgroup), -+ TP_PROTO(struct cgroup *dst_cgrp, const char *path, -+ struct task_struct *task, bool threadgroup), - -- TP_ARGS(dst_cgrp, task, threadgroup) -+ TP_ARGS(dst_cgrp, path, task, threadgroup) - ); - - DEFINE_EVENT(cgroup_migrate, cgroup_transfer_tasks, - -- TP_PROTO(struct cgroup *dst_cgrp, struct task_struct *task, bool threadgroup), -+ TP_PROTO(struct cgroup *dst_cgrp, const char *path, -+ struct task_struct *task, bool threadgroup), - -- TP_ARGS(dst_cgrp, task, threadgroup) -+ TP_ARGS(dst_cgrp, path, task, threadgroup) - ); - - #endif /* _TRACE_CGROUP_H */ ---- a/kernel/cgroup/cgroup-internal.h -+++ b/kernel/cgroup/cgroup-internal.h -@@ -8,6 +8,32 @@ - #include - #include - -+#define TRACE_CGROUP_PATH_LEN 1024 -+extern spinlock_t trace_cgroup_path_lock; -+extern char trace_cgroup_path[TRACE_CGROUP_PATH_LEN]; -+ -+/* -+ * cgroup_path() takes a spin lock. It is good practice not to take -+ * spin locks within trace point handlers, as they are mostly hidden -+ * from normal view. As cgroup_path() can take the kernfs_rename_lock -+ * spin lock, it is best to not call that function from the trace event -+ * handler. -+ * -+ * Note: trace_cgroup_##type##_enabled() is a static branch that will only -+ * be set when the trace event is enabled. -+ */ -+#define TRACE_CGROUP_PATH(type, cgrp, ...) \ -+ do { \ -+ if (trace_cgroup_##type##_enabled()) { \ -+ spin_lock(&trace_cgroup_path_lock); \ -+ cgroup_path(cgrp, trace_cgroup_path, \ -+ TRACE_CGROUP_PATH_LEN); \ -+ trace_cgroup_##type(cgrp, trace_cgroup_path, \ -+ ##__VA_ARGS__); \ -+ spin_unlock(&trace_cgroup_path_lock); \ -+ } \ -+ } while (0) -+ - /* - * A cgroup can be associated with multiple css_sets as different tasks may - * belong to different cgroups on different hierarchies. In the other ---- a/kernel/cgroup/cgroup-v1.c -+++ b/kernel/cgroup/cgroup-v1.c -@@ -135,7 +135,7 @@ int cgroup_transfer_tasks(struct cgroup - if (task) { - ret = cgroup_migrate(task, false, &mgctx); - if (!ret) -- trace_cgroup_transfer_tasks(to, task, false); -+ TRACE_CGROUP_PATH(transfer_tasks, to, task, false); - put_task_struct(task); - } - } while (task && !ret); -@@ -865,7 +865,7 @@ static int cgroup1_rename(struct kernfs_ - - ret = kernfs_rename(kn, new_parent, new_name_str); - if (!ret) -- trace_cgroup_rename(cgrp); -+ TRACE_CGROUP_PATH(rename, cgrp); - - mutex_unlock(&cgroup_mutex); - ---- a/kernel/cgroup/cgroup.c -+++ b/kernel/cgroup/cgroup.c -@@ -83,6 +83,9 @@ EXPORT_SYMBOL_GPL(cgroup_mutex); - EXPORT_SYMBOL_GPL(css_set_lock); - #endif - -+DEFINE_SPINLOCK(trace_cgroup_path_lock); -+char trace_cgroup_path[TRACE_CGROUP_PATH_LEN]; -+ - /* - * Protects cgroup_idr and css_idr so that IDs can be released without - * grabbing cgroup_mutex. -@@ -2638,7 +2641,7 @@ int cgroup_attach_task(struct cgroup *ds - cgroup_migrate_finish(&mgctx); - - if (!ret) -- trace_cgroup_attach_task(dst_cgrp, leader, threadgroup); -+ TRACE_CGROUP_PATH(attach_task, dst_cgrp, leader, threadgroup); - - return ret; - } -@@ -4634,7 +4637,7 @@ static void css_release_work_fn(struct w - struct cgroup *tcgrp; - - /* cgroup release path */ -- trace_cgroup_release(cgrp); -+ TRACE_CGROUP_PATH(release, cgrp); - - if (cgroup_on_dfl(cgrp)) - cgroup_rstat_flush(cgrp); -@@ -4977,7 +4980,7 @@ int cgroup_mkdir(struct kernfs_node *par - if (ret) - goto out_destroy; - -- trace_cgroup_mkdir(cgrp); -+ TRACE_CGROUP_PATH(mkdir, cgrp); - - /* let's create and online css's */ - kernfs_activate(kn); -@@ -5165,9 +5168,8 @@ int cgroup_rmdir(struct kernfs_node *kn) - return 0; - - ret = cgroup_destroy_locked(cgrp); -- - if (!ret) -- trace_cgroup_rmdir(cgrp); -+ TRACE_CGROUP_PATH(rmdir, cgrp); - - cgroup_kn_unlock(kn); - return ret; diff --git a/debian/patches-rt/cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch b/debian/patches-rt/cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch index 4b5b61e78..4d41a369a 100644 --- a/debian/patches-rt/cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch +++ b/debian/patches-rt/cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Tue, 3 Jul 2018 18:19:48 +0200 Subject: [PATCH] cgroup: use irqsave in cgroup_rstat_flush_locked() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz All callers of cgroup_rstat_flush_locked() acquire cgroup_rstat_lock either with spin_lock_irq() or spin_lock_irqsave(). diff --git a/debian/patches-rt/cgroups-use-simple-wait-in-css_release.patch b/debian/patches-rt/cgroups-use-simple-wait-in-css_release.patch index 2486ee8ee..259f79e55 100644 --- a/debian/patches-rt/cgroups-use-simple-wait-in-css_release.patch +++ b/debian/patches-rt/cgroups-use-simple-wait-in-css_release.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Fri, 13 Feb 2015 15:52:24 +0100 Subject: cgroups: use simple wait in css_release() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz To avoid: |BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:914 @@ -53,7 +53,7 @@ Signed-off-by: Sebastian Andrzej Siewior /* --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c -@@ -4611,10 +4611,10 @@ static void css_free_rwork_fn(struct wor +@@ -4620,10 +4620,10 @@ static void css_free_rwork_fn(struct wor } } @@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior struct cgroup_subsys *ss = css->ss; struct cgroup *cgrp = css->cgroup; -@@ -4674,8 +4674,8 @@ static void css_release(struct percpu_re +@@ -4683,8 +4683,8 @@ static void css_release(struct percpu_re struct cgroup_subsys_state *css = container_of(ref, struct cgroup_subsys_state, refcnt); @@ -77,7 +77,7 @@ Signed-off-by: Sebastian Andrzej Siewior } static void init_and_link_css(struct cgroup_subsys_state *css, -@@ -5397,6 +5397,7 @@ static int __init cgroup_wq_init(void) +@@ -5406,6 +5406,7 @@ static int __init cgroup_wq_init(void) */ cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1); BUG_ON(!cgroup_destroy_wq); diff --git a/debian/patches-rt/clocksource-tclib-allow-higher-clockrates.patch b/debian/patches-rt/clocksource-tclib-allow-higher-clockrates.patch index f15e3b1e2..fa14b4525 100644 --- a/debian/patches-rt/clocksource-tclib-allow-higher-clockrates.patch +++ b/debian/patches-rt/clocksource-tclib-allow-higher-clockrates.patch @@ -1,7 +1,7 @@ From: Benedikt Spranger Date: Mon, 8 Mar 2010 18:57:04 +0100 Subject: clocksource: TCLIB: Allow higher clock rates for clock events -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz As default the TCLIB uses the 32KiHz base clock rate for clock events. Add a compile time selection to allow higher clock resulution. diff --git a/debian/patches-rt/completion-use-simple-wait-queues.patch b/debian/patches-rt/completion-use-simple-wait-queues.patch index 42b2ff85f..1f7982316 100644 --- a/debian/patches-rt/completion-use-simple-wait-queues.patch +++ b/debian/patches-rt/completion-use-simple-wait-queues.patch @@ -1,7 +1,7 @@ Subject: completion: Use simple wait queues From: Thomas Gleixner Date: Fri, 11 Jan 2013 11:23:51 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Completions have no long lasting callbacks and therefor do not need the complex waitqueue variant. Use simple waitqueues which reduces the @@ -9,30 +9,46 @@ contention on the waitqueue lock. Signed-off-by: Thomas Gleixner --- - drivers/net/wireless/intersil/orinoco/orinoco_usb.c | 2 - + arch/powerpc/platforms/ps3/device-init.c | 4 +- + drivers/net/wireless/intersil/orinoco/orinoco_usb.c | 4 +- drivers/usb/gadget/function/f_fs.c | 2 - drivers/usb/gadget/legacy/inode.c | 4 +- include/linux/completion.h | 8 ++-- include/linux/suspend.h | 6 +++ - include/linux/swait.h | 1 + include/linux/swait.h | 2 + kernel/power/hibernate.c | 7 ++++ kernel/power/suspend.c | 4 ++ kernel/sched/completion.c | 34 ++++++++++---------- kernel/sched/core.c | 10 ++++- - kernel/sched/swait.c | 19 +++++++++++ - 11 files changed, 70 insertions(+), 27 deletions(-) + kernel/sched/swait.c | 21 +++++++++++- + 12 files changed, 75 insertions(+), 31 deletions(-) +--- a/arch/powerpc/platforms/ps3/device-init.c ++++ b/arch/powerpc/platforms/ps3/device-init.c +@@ -752,8 +752,8 @@ static int ps3_notification_read_write(s + } + pr_debug("%s:%u: notification %s issued\n", __func__, __LINE__, op); + +- res = wait_event_interruptible(dev->done.wait, +- dev->done.done || kthread_should_stop()); ++ res = swait_event_interruptible_exclusive(dev->done.wait, ++ dev->done.done || kthread_should_stop()); + if (kthread_should_stop()) + res = -EINTR; + if (res) { --- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c +++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c -@@ -697,7 +697,7 @@ static void ezusb_req_ctx_wait(struct ez +@@ -697,8 +697,8 @@ static void ezusb_req_ctx_wait(struct ez while (!ctx->done.done && msecs--) udelay(1000); } else { - wait_event_interruptible(ctx->done.wait, -+ swait_event_interruptible(ctx->done.wait, - ctx->done.done); +- ctx->done.done); ++ swait_event_interruptible_exclusive(ctx->done.wait, ++ ctx->done.done); } break; + default: --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -1633,7 +1633,7 @@ static void ffs_data_put(struct ffs_data @@ -51,7 +67,7 @@ Signed-off-by: Thomas Gleixner if (likely (value == 0)) { - value = wait_event_interruptible (done.wait, done.done); -+ value = swait_event_interruptible (done.wait, done.done); ++ value = swait_event_interruptible_exclusive(done.wait, done.done); if (value != 0) { spin_lock_irq (&epdata->dev->lock); if (likely (epdata->ep != NULL)) { @@ -60,7 +76,7 @@ Signed-off-by: Thomas Gleixner spin_unlock_irq (&epdata->dev->lock); - wait_event (done.wait, done.done); -+ swait_event (done.wait, done.done); ++ swait_event_exclusive(done.wait, done.done); if (epdata->status == -ECONNRESET) epdata->status = -EINTR; } else { @@ -119,17 +135,19 @@ Signed-off-by: Thomas Gleixner extern suspend_state_t mem_sleep_default; --- a/include/linux/swait.h +++ b/include/linux/swait.h -@@ -160,6 +160,7 @@ static inline bool swq_has_sleeper(struc - extern void swake_up(struct swait_queue_head *q); +@@ -160,7 +160,9 @@ static inline bool swq_has_sleeper(struc + extern void swake_up_one(struct swait_queue_head *q); extern void swake_up_all(struct swait_queue_head *q); extern void swake_up_locked(struct swait_queue_head *q); +extern void swake_up_all_locked(struct swait_queue_head *q); - extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait); - extern void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state); ++extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait); + extern void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state); + extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state); + --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c -@@ -679,6 +679,10 @@ static int load_image_and_restore(void) +@@ -681,6 +681,10 @@ static int load_image_and_restore(void) return error; } @@ -140,7 +158,7 @@ Signed-off-by: Thomas Gleixner /** * hibernate - Carry out system hibernation, including saving the image. */ -@@ -692,6 +696,8 @@ int hibernate(void) +@@ -694,6 +698,8 @@ int hibernate(void) return -EPERM; } @@ -149,7 +167,7 @@ Signed-off-by: Thomas Gleixner lock_system_sleep(); /* The snapshot device should not be opened while we're running */ if (!atomic_add_unless(&snapshot_device_available, -1, 0)) { -@@ -770,6 +776,7 @@ int hibernate(void) +@@ -772,6 +778,7 @@ int hibernate(void) atomic_inc(&snapshot_device_available); Unlock: unlock_system_sleep(); @@ -159,7 +177,7 @@ Signed-off-by: Thomas Gleixner return error; --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c -@@ -594,6 +594,8 @@ static int enter_state(suspend_state_t s +@@ -600,6 +600,8 @@ static int enter_state(suspend_state_t s return error; } @@ -168,7 +186,7 @@ Signed-off-by: Thomas Gleixner /** * pm_suspend - Externally visible function for suspending the system. * @state: System sleep state to enter. -@@ -608,6 +610,7 @@ int pm_suspend(suspend_state_t state) +@@ -614,6 +616,7 @@ int pm_suspend(suspend_state_t state) if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX) return -EINVAL; @@ -176,7 +194,7 @@ Signed-off-by: Thomas Gleixner pr_info("suspend entry (%s)\n", mem_sleep_labels[state]); error = enter_state(state); if (error) { -@@ -617,6 +620,7 @@ int pm_suspend(suspend_state_t state) +@@ -623,6 +626,7 @@ int pm_suspend(suspend_state_t state) suspend_stats.success++; } pr_info("suspend exit\n"); @@ -282,7 +300,7 @@ Signed-off-by: Thomas Gleixner EXPORT_SYMBOL(completion_done); --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -7124,7 +7124,10 @@ void migrate_disable(void) +@@ -7102,7 +7102,10 @@ void migrate_disable(void) return; } #ifdef CONFIG_SCHED_DEBUG @@ -294,7 +312,7 @@ Signed-off-by: Thomas Gleixner #endif if (p->migrate_disable) { -@@ -7154,7 +7157,10 @@ void migrate_enable(void) +@@ -7132,7 +7135,10 @@ void migrate_enable(void) } #ifdef CONFIG_SCHED_DEBUG @@ -331,6 +349,15 @@ Signed-off-by: Thomas Gleixner +} +EXPORT_SYMBOL(swake_up_all_locked); + - void swake_up(struct swait_queue_head *q) + void swake_up_one(struct swait_queue_head *q) { unsigned long flags; +@@ -69,7 +88,7 @@ void swake_up_all(struct swait_queue_hea + } + EXPORT_SYMBOL(swake_up_all); + +-static void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait) ++void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait) + { + wait->task = current; + if (list_empty(&wait->task_list)) diff --git a/debian/patches-rt/cond-resched-lock-rt-tweak.patch b/debian/patches-rt/cond-resched-lock-rt-tweak.patch index ff9af3288..b9bf3153d 100644 --- a/debian/patches-rt/cond-resched-lock-rt-tweak.patch +++ b/debian/patches-rt/cond-resched-lock-rt-tweak.patch @@ -1,7 +1,7 @@ Subject: sched: Use the proper LOCK_OFFSET for cond_resched() From: Thomas Gleixner Date: Sun, 17 Jul 2011 22:51:33 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz RT does not increment preempt count when a 'sleeping' spinlock is locked. Update PREEMPT_LOCK_OFFSET for that case. diff --git a/debian/patches-rt/connector-cn_proc-Protect-send_msg-with-a-local-lock.patch b/debian/patches-rt/connector-cn_proc-Protect-send_msg-with-a-local-lock.patch index a44e8c90d..231eb7dfe 100644 --- a/debian/patches-rt/connector-cn_proc-Protect-send_msg-with-a-local-lock.patch +++ b/debian/patches-rt/connector-cn_proc-Protect-send_msg-with-a-local-lock.patch @@ -2,7 +2,7 @@ From: Mike Galbraith Date: Sun, 16 Oct 2016 05:11:54 +0200 Subject: [PATCH] connector/cn_proc: Protect send_msg() with a local lock on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz |BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:931 |in_atomic(): 1, irqs_disabled(): 0, pid: 31807, name: sleep diff --git a/debian/patches-rt/cpu-hotplug--Implement-CPU-pinning.patch b/debian/patches-rt/cpu-hotplug--Implement-CPU-pinning.patch index d9a70b7f4..e4c28e027 100644 --- a/debian/patches-rt/cpu-hotplug--Implement-CPU-pinning.patch +++ b/debian/patches-rt/cpu-hotplug--Implement-CPU-pinning.patch @@ -1,7 +1,7 @@ Subject: cpu/hotplug: Implement CPU pinning From: Thomas Gleixner Date: Wed, 19 Jul 2017 17:31:20 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Signed-off-by: Thomas Gleixner --- @@ -33,7 +33,7 @@ Signed-off-by: Thomas Gleixner #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP) static struct lockdep_map cpuhp_state_up_map = STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map); -@@ -288,7 +293,30 @@ static int cpu_hotplug_disabled; +@@ -285,7 +290,30 @@ static int cpu_hotplug_disabled; */ void pin_current_cpu(void) { @@ -64,7 +64,7 @@ Signed-off-by: Thomas Gleixner } /** -@@ -296,6 +324,13 @@ void pin_current_cpu(void) +@@ -293,6 +321,13 @@ void pin_current_cpu(void) */ void unpin_current_cpu(void) { @@ -78,7 +78,7 @@ Signed-off-by: Thomas Gleixner } DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock); -@@ -854,6 +889,7 @@ static int take_cpu_down(void *_param) +@@ -846,6 +881,7 @@ static int take_cpu_down(void *_param) static int takedown_cpu(unsigned int cpu) { @@ -86,7 +86,7 @@ Signed-off-by: Thomas Gleixner struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); int err; -@@ -866,11 +902,14 @@ static int takedown_cpu(unsigned int cpu +@@ -858,11 +894,14 @@ static int takedown_cpu(unsigned int cpu */ irq_lock_sparse(); @@ -101,7 +101,7 @@ Signed-off-by: Thomas Gleixner /* CPU refused to die */ irq_unlock_sparse(); /* Unpark the hotplug thread so we can rollback there */ -@@ -889,6 +928,7 @@ static int takedown_cpu(unsigned int cpu +@@ -881,6 +920,7 @@ static int takedown_cpu(unsigned int cpu wait_for_ap_thread(st, false); BUG_ON(st->state != CPUHP_AP_IDLE_DEAD); diff --git a/debian/patches-rt/cpufreq-drop-K8-s-driver-from-beeing-selected.patch b/debian/patches-rt/cpufreq-drop-K8-s-driver-from-beeing-selected.patch index 81ee462d5..5106c2042 100644 --- a/debian/patches-rt/cpufreq-drop-K8-s-driver-from-beeing-selected.patch +++ b/debian/patches-rt/cpufreq-drop-K8-s-driver-from-beeing-selected.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Thu, 9 Apr 2015 15:23:01 +0200 Subject: cpufreq: drop K8's driver from beeing selected -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Ralf posted a picture of a backtrace from diff --git a/debian/patches-rt/cpumask-disable-offstack-on-rt.patch b/debian/patches-rt/cpumask-disable-offstack-on-rt.patch index d4f17b4b8..b3f8ec59d 100644 --- a/debian/patches-rt/cpumask-disable-offstack-on-rt.patch +++ b/debian/patches-rt/cpumask-disable-offstack-on-rt.patch @@ -1,7 +1,7 @@ Subject: cpumask: Disable CONFIG_CPUMASK_OFFSTACK for RT From: Thomas Gleixner Date: Wed, 14 Dec 2011 01:03:49 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz There are "valid" GFP_ATOMIC allocations such as @@ -47,7 +47,7 @@ Signed-off-by: Thomas Gleixner --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig -@@ -940,7 +940,7 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT +@@ -938,7 +938,7 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT config MAXSMP bool "Enable Maximum number of SMP Processors and NUMA Nodes" depends on X86_64 && SMP && DEBUG_KERNEL @@ -58,7 +58,7 @@ Signed-off-by: Thomas Gleixner If unsure, say N. --- a/lib/Kconfig +++ b/lib/Kconfig -@@ -434,6 +434,7 @@ config CHECK_SIGNATURE +@@ -441,6 +441,7 @@ config CHECK_SIGNATURE config CPUMASK_OFFSTACK bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS diff --git a/debian/patches-rt/cpuset-Convert-callback_lock-to-raw_spinlock_t.patch b/debian/patches-rt/cpuset-Convert-callback_lock-to-raw_spinlock_t.patch index b12fca43e..c40e55f4f 100644 --- a/debian/patches-rt/cpuset-Convert-callback_lock-to-raw_spinlock_t.patch +++ b/debian/patches-rt/cpuset-Convert-callback_lock-to-raw_spinlock_t.patch @@ -1,7 +1,7 @@ From: Mike Galbraith Date: Sun, 8 Jan 2017 09:32:25 +0100 Subject: [PATCH] cpuset: Convert callback_lock to raw_spinlock_t -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz The two commits below add up to a cpuset might_sleep() splat for RT: diff --git a/debian/patches-rt/crypto-Reduce-preempt-disabled-regions-more-algos.patch b/debian/patches-rt/crypto-Reduce-preempt-disabled-regions-more-algos.patch index d656e65fa..0cc3f44b2 100644 --- a/debian/patches-rt/crypto-Reduce-preempt-disabled-regions-more-algos.patch +++ b/debian/patches-rt/crypto-Reduce-preempt-disabled-regions-more-algos.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Fri, 21 Feb 2014 17:24:04 +0100 Subject: crypto: Reduce preempt disabled regions, more algos -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Don Estabrook reported | kernel: WARNING: CPU: 2 PID: 858 at kernel/sched/core.c:2428 migrate_disable+0xed/0x100() diff --git a/debian/patches-rt/crypto-cryptd-add-a-lock-instead-preempt_disable-loc.patch b/debian/patches-rt/crypto-cryptd-add-a-lock-instead-preempt_disable-loc.patch index d13b4530b..60e7090f2 100644 --- a/debian/patches-rt/crypto-cryptd-add-a-lock-instead-preempt_disable-loc.patch +++ b/debian/patches-rt/crypto-cryptd-add-a-lock-instead-preempt_disable-loc.patch @@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior Date: Thu, 26 Jul 2018 18:52:00 +0200 Subject: [PATCH] crypto: cryptd - add a lock instead preempt_disable/local_bh_disable -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz cryptd has a per-CPU lock which protected with local_bh_disable() and preempt_disable(). diff --git a/debian/patches-rt/crypto-limit-more-FPU-enabled-sections.patch b/debian/patches-rt/crypto-limit-more-FPU-enabled-sections.patch index ad646c3af..593528e48 100644 --- a/debian/patches-rt/crypto-limit-more-FPU-enabled-sections.patch +++ b/debian/patches-rt/crypto-limit-more-FPU-enabled-sections.patch @@ -4,7 +4,7 @@ Subject: [PATCH] crypto: limit more FPU-enabled sections MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Those crypto drivers use SSE/AVX/… for their crypto work and in order to do so in kernel they need to enable the "FPU" in kernel mode which diff --git a/debian/patches-rt/crypto-scompress-serialize-RT-percpu-scratch-buffer-.patch b/debian/patches-rt/crypto-scompress-serialize-RT-percpu-scratch-buffer-.patch index 3afc6c0bf..cba549360 100644 --- a/debian/patches-rt/crypto-scompress-serialize-RT-percpu-scratch-buffer-.patch +++ b/debian/patches-rt/crypto-scompress-serialize-RT-percpu-scratch-buffer-.patch @@ -2,7 +2,7 @@ From: Mike Galbraith Date: Wed, 11 Jul 2018 17:14:47 +0200 Subject: [PATCH] crypto: scompress - serialize RT percpu scratch buffer access with a local lock -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz | BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:974 | in_atomic(): 1, irqs_disabled(): 0, pid: 1401, name: cryptomgr_test diff --git a/debian/patches-rt/debugobjects-rt.patch b/debian/patches-rt/debugobjects-rt.patch index ace4838f3..271bb24ea 100644 --- a/debian/patches-rt/debugobjects-rt.patch +++ b/debian/patches-rt/debugobjects-rt.patch @@ -1,7 +1,7 @@ Subject: debugobjects: Make RT aware From: Thomas Gleixner Date: Sun, 17 Jul 2011 21:41:35 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Avoid filling the pool / allocating memory with irqs off(). @@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner --- a/lib/debugobjects.c +++ b/lib/debugobjects.c -@@ -374,7 +374,10 @@ static void +@@ -377,7 +377,10 @@ static void struct debug_obj *obj; unsigned long flags; diff --git a/debian/patches-rt/dm-rq-remove-BUG_ON-irqs_disabled-check.patch b/debian/patches-rt/dm-rq-remove-BUG_ON-irqs_disabled-check.patch index 31c96a291..35515c0a6 100644 --- a/debian/patches-rt/dm-rq-remove-BUG_ON-irqs_disabled-check.patch +++ b/debian/patches-rt/dm-rq-remove-BUG_ON-irqs_disabled-check.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Tue, 27 Mar 2018 16:24:15 +0200 Subject: [PATCH] dm rq: remove BUG_ON(!irqs_disabled) check -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz In commit 052189a2ec95 ("dm: remove superfluous irq disablement in dm_request_fn") the spin_lock_irq() was replaced with spin_lock() + a diff --git a/debian/patches-rt/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch b/debian/patches-rt/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch index 33ca88e55..bc698a9d7 100644 --- a/debian/patches-rt/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch +++ b/debian/patches-rt/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch @@ -2,7 +2,7 @@ From: Mike Galbraith Date: Thu, 31 Mar 2016 04:08:28 +0200 Subject: [PATCH] drivers/block/zram: Replace bit spinlocks with rtmutex for -rt -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz They're nondeterministic, and lead to ___might_sleep() splats in -rt. OTOH, they're a lot less wasteful than an rtmutex per page. diff --git a/debian/patches-rt/drivers-tty-fix-omap-lock-crap.patch b/debian/patches-rt/drivers-tty-fix-omap-lock-crap.patch index f19a88acc..eb4f82253 100644 --- a/debian/patches-rt/drivers-tty-fix-omap-lock-crap.patch +++ b/debian/patches-rt/drivers-tty-fix-omap-lock-crap.patch @@ -1,7 +1,7 @@ Subject: tty/serial/omap: Make the locking RT aware From: Thomas Gleixner Date: Thu, 28 Jul 2011 13:32:57 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz The lock is a sleeping lock and local_irq_save() is not the optimsation we are looking for. Redo it to make it work on -RT and diff --git a/debian/patches-rt/drivers-tty-pl011-irq-disable-madness.patch b/debian/patches-rt/drivers-tty-pl011-irq-disable-madness.patch index b98c1cec2..b3d0f33a1 100644 --- a/debian/patches-rt/drivers-tty-pl011-irq-disable-madness.patch +++ b/debian/patches-rt/drivers-tty-pl011-irq-disable-madness.patch @@ -1,7 +1,7 @@ Subject: tty/serial/pl011: Make the locking work on RT From: Thomas Gleixner Date: Tue, 08 Jan 2013 21:36:51 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz The lock is a sleeping lock and local_irq_save() is not the optimsation we are looking for. Redo it to make it work on -RT and non-RT. diff --git a/debian/patches-rt/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch b/debian/patches-rt/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch index 4004dbc33..906a6236e 100644 --- a/debian/patches-rt/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch +++ b/debian/patches-rt/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch @@ -2,7 +2,7 @@ From: Mike Galbraith Date: Thu, 20 Oct 2016 11:15:22 +0200 Subject: [PATCH] drivers/zram: Don't disable preemption in zcomp_stream_get/put() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz In v4.7, the driver switched to percpu compression streams, disabling preemption via get/put_cpu_ptr(). Use a per-zcomp_strm lock here. We diff --git a/debian/patches-rt/drivers-zram-fix-zcomp_stream_get-smp_processor_id-u.patch b/debian/patches-rt/drivers-zram-fix-zcomp_stream_get-smp_processor_id-u.patch index b3d3072b3..c92684e0f 100644 --- a/debian/patches-rt/drivers-zram-fix-zcomp_stream_get-smp_processor_id-u.patch +++ b/debian/patches-rt/drivers-zram-fix-zcomp_stream_get-smp_processor_id-u.patch @@ -2,7 +2,7 @@ From: Mike Galbraith Date: Wed, 23 Aug 2017 11:57:29 +0200 Subject: [PATCH] drivers/zram: fix zcomp_stream_get() smp_processor_id() use in preemptible code -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Use get_local_ptr() instead this_cpu_ptr() to avoid a warning regarding smp_processor_id() in preemptible code. diff --git a/debian/patches-rt/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch b/debian/patches-rt/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch index 86636667b..647d61471 100644 --- a/debian/patches-rt/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch +++ b/debian/patches-rt/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch @@ -1,7 +1,7 @@ Subject: drm,i915: Use local_lock/unlock_irq() in intel_pipe_update_start/end() From: Mike Galbraith Date: Sat, 27 Feb 2016 09:01:42 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz [ 8.014039] BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:918 @@ -57,8 +57,8 @@ Cc: Sebastian Andrzej Siewior Cc: linux-rt-users Signed-off-by: Thomas Gleixner --- - drivers/gpu/drm/i915/intel_sprite.c | 11 +++++++---- - 1 file changed, 7 insertions(+), 4 deletions(-) + drivers/gpu/drm/i915/intel_sprite.c | 13 ++++++++----- + 1 file changed, 8 insertions(+), 5 deletions(-) --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -70,7 +70,7 @@ Signed-off-by: Thomas Gleixner #include "intel_drv.h" #include "intel_frontbuffer.h" #include -@@ -74,6 +75,8 @@ int intel_usecs_to_scanlines(const struc +@@ -60,6 +61,8 @@ int intel_usecs_to_scanlines(const struc #define VBLANK_EVASION_TIME_US 100 #endif @@ -80,15 +80,15 @@ Signed-off-by: Thomas Gleixner * intel_pipe_update_start() - start update of a set of display registers * @new_crtc_state: the new crtc state @@ -107,7 +110,7 @@ void intel_pipe_update_start(const struc - VBLANK_EVASION_TIME_US); - max = vblank_start - 1; + if (intel_psr_wait_for_idle(new_crtc_state)) + DRM_ERROR("PSR idle timed out, atomic update may fail\n"); - local_irq_disable(); + local_lock_irq(pipe_update_lock); - if (min <= 0 || max <= 0) - return; -@@ -137,11 +140,11 @@ void intel_pipe_update_start(const struc + crtc->debug.min_vbl = min; + crtc->debug.max_vbl = max; +@@ -131,11 +134,11 @@ void intel_pipe_update_start(const struc break; } @@ -102,7 +102,16 @@ Signed-off-by: Thomas Gleixner } finish_wait(wq, &wait); -@@ -206,7 +209,7 @@ void intel_pipe_update_end(struct intel_ +@@ -168,7 +171,7 @@ void intel_pipe_update_start(const struc + return; + + irq_disable: +- local_irq_disable(); ++ local_lock_irq(pipe_update_lock); + } + + /** +@@ -204,7 +207,7 @@ void intel_pipe_update_end(struct intel_ new_crtc_state->base.event = NULL; } diff --git a/debian/patches-rt/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch b/debian/patches-rt/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch index 3ee483efe..d782a432d 100644 --- a/debian/patches-rt/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch +++ b/debian/patches-rt/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch @@ -1,7 +1,7 @@ Subject: drm,radeon,i915: Use preempt_disable/enable_rt() where recommended From: Mike Galbraith Date: Sat, 27 Feb 2016 08:09:11 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz DRM folks identified the spots, so use them. @@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c -@@ -1009,6 +1009,7 @@ static bool i915_get_crtc_scanoutpos(str +@@ -1025,6 +1025,7 @@ static bool i915_get_crtc_scanoutpos(str spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ @@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner /* Get optional system timestamp before query. */ if (stime) -@@ -1060,6 +1061,7 @@ static bool i915_get_crtc_scanoutpos(str +@@ -1076,6 +1077,7 @@ static bool i915_get_crtc_scanoutpos(str *etime = ktime_get(); /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ diff --git a/debian/patches-rt/efi-Allow-efi-runtime.patch b/debian/patches-rt/efi-Allow-efi-runtime.patch index 0ef8d4e1c..3e568f7d2 100644 --- a/debian/patches-rt/efi-Allow-efi-runtime.patch +++ b/debian/patches-rt/efi-Allow-efi-runtime.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Thu, 26 Jul 2018 15:06:10 +0200 Subject: [PATCH] efi: Allow efi=runtime -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz In case the option "efi=noruntime" is default at built-time, the user could overwrite its sate by `efi=runtime' and allow it again. @@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c -@@ -110,6 +110,9 @@ static int __init parse_efi_cmdline(char +@@ -113,6 +113,9 @@ static int __init parse_efi_cmdline(char if (parse_option_str(str, "noruntime")) disable_runtime = true; diff --git a/debian/patches-rt/efi-Disable-runtime-services-on-RT.patch b/debian/patches-rt/efi-Disable-runtime-services-on-RT.patch index df75857a8..4e051386a 100644 --- a/debian/patches-rt/efi-Disable-runtime-services-on-RT.patch +++ b/debian/patches-rt/efi-Disable-runtime-services-on-RT.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Thu, 26 Jul 2018 15:03:16 +0200 Subject: [PATCH] efi: Disable runtime services on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Based on meassurements the EFI functions get_variable / get_next_variable take up to 2us which looks okay. @@ -29,9 +29,9 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c -@@ -84,7 +84,7 @@ struct mm_struct efi_mm = { - .mmlist = LIST_HEAD_INIT(efi_mm.mmlist), - }; +@@ -87,7 +87,7 @@ struct mm_struct efi_mm = { + + struct workqueue_struct *efi_rts_wq; -static bool disable_runtime; +static bool disable_runtime = IS_ENABLED(CONFIG_PREEMPT_RT_BASE); diff --git a/debian/patches-rt/epoll-use-get-cpu-light.patch b/debian/patches-rt/epoll-use-get-cpu-light.patch index 2f3111471..8b933529c 100644 --- a/debian/patches-rt/epoll-use-get-cpu-light.patch +++ b/debian/patches-rt/epoll-use-get-cpu-light.patch @@ -1,7 +1,7 @@ Subject: fs/epoll: Do not disable preemption on RT From: Thomas Gleixner Date: Fri, 08 Jul 2011 16:35:35 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz ep_call_nested() takes a sleeping lock so we can't disable preemption. The light version is enough since ep_call_nested() doesn't mind beeing @@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner --- a/fs/eventpoll.c +++ b/fs/eventpoll.c -@@ -563,12 +563,12 @@ static int ep_poll_wakeup_proc(void *pri +@@ -571,12 +571,12 @@ static int ep_poll_wakeup_proc(void *pri static void ep_poll_safewake(wait_queue_head_t *wq) { diff --git a/debian/patches-rt/fs-aio-simple-simple-work.patch b/debian/patches-rt/fs-aio-simple-simple-work.patch index c5d7c2f06..a10be67aa 100644 --- a/debian/patches-rt/fs-aio-simple-simple-work.patch +++ b/debian/patches-rt/fs-aio-simple-simple-work.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Mon, 16 Feb 2015 18:49:10 +0100 Subject: fs/aio: simple simple work -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz |BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:768 |in_atomic(): 1, irqs_disabled(): 0, pid: 26, name: rcuos/2 @@ -30,7 +30,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/fs/aio.c +++ b/fs/aio.c -@@ -40,6 +40,7 @@ +@@ -42,6 +42,7 @@ #include #include #include @@ -38,7 +38,7 @@ Signed-off-by: Sebastian Andrzej Siewior #include #include -@@ -118,6 +119,7 @@ struct kioctx { +@@ -120,6 +121,7 @@ struct kioctx { long nr_pages; struct rcu_work free_rwork; /* see free_ioctx() */ @@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior /* * signals when all in-flight requests are done -@@ -256,6 +258,7 @@ static int __init aio_setup(void) +@@ -254,6 +256,7 @@ static int __init aio_setup(void) .mount = aio_mount, .kill_sb = kill_anon_super, }; @@ -54,7 +54,7 @@ Signed-off-by: Sebastian Andrzej Siewior aio_mnt = kern_mount(&aio_fs); if (IS_ERR(aio_mnt)) panic("Failed to create aio fs mount."); -@@ -597,9 +600,9 @@ static void free_ioctx_reqs(struct percp +@@ -595,9 +598,9 @@ static void free_ioctx_reqs(struct percp * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted - * now it's safe to cancel any that need to be. */ @@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior struct aio_kiocb *req; spin_lock_irq(&ctx->ctx_lock); -@@ -617,6 +620,14 @@ static void free_ioctx_users(struct perc +@@ -615,6 +618,14 @@ static void free_ioctx_users(struct perc percpu_ref_put(&ctx->reqs); } diff --git a/debian/patches-rt/fs-dcache-bring-back-explicit-INIT_HLIST_BL_HEAD-in.patch b/debian/patches-rt/fs-dcache-bring-back-explicit-INIT_HLIST_BL_HEAD-in.patch index ed1dee255..bea5d2153 100644 --- a/debian/patches-rt/fs-dcache-bring-back-explicit-INIT_HLIST_BL_HEAD-in.patch +++ b/debian/patches-rt/fs-dcache-bring-back-explicit-INIT_HLIST_BL_HEAD-in.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Wed, 13 Sep 2017 12:32:34 +0200 Subject: [PATCH] fs/dcache: bring back explicit INIT_HLIST_BL_HEAD init -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Commit 3d375d78593c ("mm: update callers to use HASH_ZERO flag") removed INIT_HLIST_BL_HEAD and uses the ZERO flag instead for the init. However @@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/fs/dcache.c +++ b/fs/dcache.c -@@ -3115,6 +3115,8 @@ static int __init set_dhash_entries(char +@@ -3062,6 +3062,8 @@ static int __init set_dhash_entries(char static void __init dcache_init_early(void) { @@ -24,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior /* If hashes are distributed across NUMA nodes, defer * hash allocation until vmalloc space is available. */ -@@ -3131,11 +3133,16 @@ static void __init dcache_init_early(voi +@@ -3078,11 +3080,16 @@ static void __init dcache_init_early(voi NULL, 0, 0); @@ -41,7 +41,7 @@ Signed-off-by: Sebastian Andrzej Siewior /* * A constructor could be added for stable state like the lists, * but it is probably not worth it because of the cache nature -@@ -3159,6 +3166,10 @@ static void __init dcache_init(void) +@@ -3106,6 +3113,10 @@ static void __init dcache_init(void) NULL, 0, 0); diff --git a/debian/patches-rt/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch b/debian/patches-rt/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch index deae6de4f..d08e7ab3c 100644 --- a/debian/patches-rt/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch +++ b/debian/patches-rt/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Fri, 20 Oct 2017 11:29:53 +0200 Subject: [PATCH] fs/dcache: disable preemption on i_dir_seq's write side -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz i_dir_seq is an opencoded seqcounter. Based on the code it looks like we could have two writers in parallel despite the fact that the d_lock is @@ -24,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/fs/dcache.c +++ b/fs/dcache.c -@@ -2430,9 +2430,10 @@ EXPORT_SYMBOL(d_rehash); +@@ -2404,9 +2404,10 @@ EXPORT_SYMBOL(d_rehash); static inline unsigned start_dir_add(struct inode *dir) { @@ -37,7 +37,7 @@ Signed-off-by: Sebastian Andrzej Siewior return n; cpu_relax(); } -@@ -2440,7 +2441,8 @@ static inline unsigned start_dir_add(str +@@ -2414,7 +2415,8 @@ static inline unsigned start_dir_add(str static inline void end_dir_add(struct inode *dir, unsigned n) { @@ -47,7 +47,7 @@ Signed-off-by: Sebastian Andrzej Siewior } static void d_wait_lookup(struct dentry *dentry) -@@ -2473,7 +2475,7 @@ struct dentry *d_alloc_parallel(struct d +@@ -2447,7 +2449,7 @@ struct dentry *d_alloc_parallel(struct d retry: rcu_read_lock(); @@ -56,7 +56,7 @@ Signed-off-by: Sebastian Andrzej Siewior r_seq = read_seqbegin(&rename_lock); dentry = __d_lookup_rcu(parent, name, &d_seq); if (unlikely(dentry)) { -@@ -2501,7 +2503,7 @@ struct dentry *d_alloc_parallel(struct d +@@ -2475,7 +2477,7 @@ struct dentry *d_alloc_parallel(struct d } hlist_bl_lock(b); @@ -108,7 +108,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/include/linux/fs.h +++ b/include/linux/fs.h -@@ -657,7 +657,7 @@ struct inode { +@@ -669,7 +669,7 @@ struct inode { struct block_device *i_bdev; struct cdev *i_cdev; char *i_link; diff --git a/debian/patches-rt/fs-dcache-use-cpu-chill-in-trylock-loops.patch b/debian/patches-rt/fs-dcache-use-cpu-chill-in-trylock-loops.patch index 853bd7ef4..85f39d84c 100644 --- a/debian/patches-rt/fs-dcache-use-cpu-chill-in-trylock-loops.patch +++ b/debian/patches-rt/fs-dcache-use-cpu-chill-in-trylock-loops.patch @@ -1,7 +1,7 @@ Subject: fs: dcache: Use cpu_chill() in trylock loops From: Thomas Gleixner Date: Wed, 07 Mar 2012 21:00:34 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Retry loops on RT might loop forever when the modifying side was preempted. Use cpu_chill() instead of cpu_relax() to let the system @@ -23,8 +23,8 @@ Signed-off-by: Thomas Gleixner +#include #include "autofs_i.h" - static unsigned long now; -@@ -148,7 +149,7 @@ static struct dentry *get_next_positive_ + /* Check if a dentry can be expired */ +@@ -153,7 +154,7 @@ static struct dentry *get_next_positive_ parent = p->d_parent; if (!spin_trylock(&parent->d_lock)) { spin_unlock(&p->d_lock); @@ -43,7 +43,7 @@ Signed-off-by: Thomas Gleixner #include #include #include -@@ -353,8 +354,11 @@ int __mnt_want_write(struct vfsmount *m) +@@ -327,8 +328,11 @@ int __mnt_want_write(struct vfsmount *m) * incremented count after it has set MNT_WRITE_HOLD. */ smp_mb(); diff --git a/debian/patches-rt/fs-dcache-use-swait_queue-instead-of-waitqueue.patch b/debian/patches-rt/fs-dcache-use-swait_queue-instead-of-waitqueue.patch index 0ba18fd53..76c8f476c 100644 --- a/debian/patches-rt/fs-dcache-use-swait_queue-instead-of-waitqueue.patch +++ b/debian/patches-rt/fs-dcache-use-swait_queue-instead-of-waitqueue.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Wed, 14 Sep 2016 14:35:49 +0200 Subject: [PATCH] fs/dcache: use swait_queue instead of waitqueue -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz __d_lookup_done() invokes wake_up_all() while holding a hlist_bl_lock() which disables preemption. As a workaround convert it to swait. @@ -34,7 +34,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/fs/dcache.c +++ b/fs/dcache.c -@@ -2447,21 +2447,24 @@ static inline void end_dir_add(struct in +@@ -2421,21 +2421,24 @@ static inline void end_dir_add(struct in static void d_wait_lookup(struct dentry *dentry) { @@ -55,7 +55,7 @@ Signed-off-by: Sebastian Andrzej Siewior + + INIT_LIST_HEAD(&__wait.task_list); + do { -+ prepare_to_swait(dentry->d_wait, &__wait, TASK_UNINTERRUPTIBLE); ++ prepare_to_swait_exclusive(dentry->d_wait, &__wait, TASK_UNINTERRUPTIBLE); + spin_unlock(&dentry->d_lock); + schedule(); + spin_lock(&dentry->d_lock); @@ -70,7 +70,7 @@ Signed-off-by: Sebastian Andrzej Siewior { unsigned int hash = name->hash; struct hlist_bl_head *b = in_lookup_hash(parent, hash); -@@ -2576,7 +2579,7 @@ void __d_lookup_done(struct dentry *dent +@@ -2550,7 +2553,7 @@ void __d_lookup_done(struct dentry *dent hlist_bl_lock(b); dentry->d_flags &= ~DCACHE_PAR_LOOKUP; __hlist_bl_del(&dentry->d_u.d_in_lookup_hash); @@ -81,7 +81,7 @@ Signed-off-by: Sebastian Andrzej Siewior INIT_HLIST_NODE(&dentry->d_u.d_alias); --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c -@@ -1196,7 +1196,7 @@ static int fuse_direntplus_link(struct f +@@ -1203,7 +1203,7 @@ static int fuse_direntplus_link(struct f struct inode *dir = d_inode(parent); struct fuse_conn *fc; struct inode *inode; @@ -92,7 +92,7 @@ Signed-off-by: Sebastian Andrzej Siewior /* --- a/fs/namei.c +++ b/fs/namei.c -@@ -1604,7 +1604,7 @@ static struct dentry *__lookup_slow(cons +@@ -1645,7 +1645,7 @@ static struct dentry *__lookup_slow(cons { struct dentry *dentry, *old; struct inode *inode = dir->d_inode; @@ -101,7 +101,7 @@ Signed-off-by: Sebastian Andrzej Siewior /* Don't go there if it's already dead */ if (unlikely(IS_DEADDIR(inode))) -@@ -3121,7 +3121,7 @@ static int lookup_open(struct nameidata +@@ -3135,7 +3135,7 @@ static int lookup_open(struct nameidata struct dentry *dentry; int error, create_error = 0; umode_t mode = op->mode; @@ -121,9 +121,9 @@ Signed-off-by: Sebastian Andrzej Siewior struct dentry *dentry; struct dentry *alias; struct inode *dir = d_inode(parent); -@@ -1454,7 +1454,7 @@ int nfs_atomic_open(struct inode *dir, s +@@ -1459,7 +1459,7 @@ int nfs_atomic_open(struct inode *dir, s struct file *file, unsigned open_flags, - umode_t mode, int *opened) + umode_t mode) { - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); + DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq); @@ -152,7 +152,7 @@ Signed-off-by: Sebastian Andrzej Siewior spin_lock(&dentry->d_lock); --- a/fs/proc/base.c +++ b/fs/proc/base.c -@@ -1864,7 +1864,7 @@ bool proc_fill_cache(struct file *file, +@@ -1876,7 +1876,7 @@ bool proc_fill_cache(struct file *file, child = d_hash_and_lookup(dir, &qname); if (!child) { @@ -183,7 +183,7 @@ Signed-off-by: Sebastian Andrzej Siewior }; struct list_head d_child; /* child of parent list */ struct list_head d_subdirs; /* our children */ -@@ -238,7 +238,7 @@ extern struct dentry * d_alloc(struct de +@@ -236,7 +236,7 @@ extern struct dentry * d_alloc(struct de extern struct dentry * d_alloc_anon(struct super_block *); extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *); extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *, @@ -194,7 +194,7 @@ Signed-off-by: Sebastian Andrzej Siewior extern struct dentry * d_exact_alias(struct dentry *, struct inode *); --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h -@@ -1536,7 +1536,7 @@ struct nfs_unlinkdata { +@@ -1549,7 +1549,7 @@ struct nfs_unlinkdata { struct nfs_removeargs args; struct nfs_removeres res; struct dentry *dentry; diff --git a/debian/patches-rt/fs-jbd-replace-bh_state-lock.patch b/debian/patches-rt/fs-jbd-replace-bh_state-lock.patch index c6a40b2a5..e011fcee2 100644 --- a/debian/patches-rt/fs-jbd-replace-bh_state-lock.patch +++ b/debian/patches-rt/fs-jbd-replace-bh_state-lock.patch @@ -1,7 +1,7 @@ From: Thomas Gleixner Date: Fri, 18 Mar 2011 10:11:25 +0100 Subject: fs: jbd/jbd2: Make state lock and journal head lock rt safe -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz bit_spin_locks break under RT. diff --git a/debian/patches-rt/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch b/debian/patches-rt/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch index 3ba4f5f59..7c6b0a340 100644 --- a/debian/patches-rt/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch +++ b/debian/patches-rt/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Thu, 15 Sep 2016 10:51:27 +0200 Subject: [PATCH] fs/nfs: turn rmdir_sem into a semaphore -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz The RW semaphore had a reader side which used the _non_owner version because it most likely took the reader lock in one thread and released it @@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c -@@ -1775,7 +1775,11 @@ int nfs_rmdir(struct inode *dir, struct +@@ -1786,7 +1786,11 @@ int nfs_rmdir(struct inode *dir, struct trace_nfs_rmdir_enter(dir, dentry); if (d_really_is_positive(dentry)) { @@ -34,7 +34,7 @@ Signed-off-by: Sebastian Andrzej Siewior error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name); /* Ensure the VFS deletes this inode */ switch (error) { -@@ -1785,7 +1789,11 @@ int nfs_rmdir(struct inode *dir, struct +@@ -1796,7 +1800,11 @@ int nfs_rmdir(struct inode *dir, struct case -ENOENT: nfs_dentry_handle_enoent(dentry); } diff --git a/debian/patches-rt/fs-replace-bh_uptodate_lock-for-rt.patch b/debian/patches-rt/fs-replace-bh_uptodate_lock-for-rt.patch index 7ac15e4c5..2e76306f8 100644 --- a/debian/patches-rt/fs-replace-bh_uptodate_lock-for-rt.patch +++ b/debian/patches-rt/fs-replace-bh_uptodate_lock-for-rt.patch @@ -1,7 +1,7 @@ From: Thomas Gleixner Date: Fri, 18 Mar 2011 09:18:52 +0100 Subject: buffer_head: Replace bh_uptodate_lock for -rt -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Wrap the bit_spin_lock calls into a separate inline and add the RT replacements with a real spinlock. @@ -11,9 +11,8 @@ Signed-off-by: Thomas Gleixner fs/buffer.c | 21 +++++++-------------- fs/ext4/page-io.c | 6 ++---- fs/ntfs/aops.c | 10 +++------- - fs/xfs/xfs_aops.c | 6 ++---- include/linux/buffer_head.h | 34 ++++++++++++++++++++++++++++++++++ - 5 files changed, 48 insertions(+), 29 deletions(-) + 4 files changed, 46 insertions(+), 25 deletions(-) --- a/fs/buffer.c +++ b/fs/buffer.c @@ -76,7 +75,7 @@ Signed-off-by: Thomas Gleixner } EXPORT_SYMBOL(end_buffer_async_write); -@@ -3349,6 +3341,7 @@ struct buffer_head *alloc_buffer_head(gf +@@ -3360,6 +3352,7 @@ struct buffer_head *alloc_buffer_head(gf struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags); if (ret) { INIT_LIST_HEAD(&ret->b_assoc_buffers); @@ -139,28 +138,6 @@ Signed-off-by: Thomas Gleixner } /** ---- a/fs/xfs/xfs_aops.c -+++ b/fs/xfs/xfs_aops.c -@@ -108,8 +108,7 @@ xfs_finish_page_writeback( - ASSERT(bvec->bv_offset + bvec->bv_len <= PAGE_SIZE); - ASSERT((bvec->bv_len & (i_blocksize(inode) - 1)) == 0); - -- local_irq_save(flags); -- bit_spin_lock(BH_Uptodate_Lock, &head->b_state); -+ flags = bh_uptodate_lock_irqsave(head); - do { - if (off >= bvec->bv_offset && - off < bvec->bv_offset + bvec->bv_len) { -@@ -131,8 +130,7 @@ xfs_finish_page_writeback( - } - off += bh->b_size; - } while ((bh = bh->b_this_page) != head); -- bit_spin_unlock(BH_Uptodate_Lock, &head->b_state); -- local_irq_restore(flags); -+ bh_uptodate_unlock_irqrestore(head, flags); - - if (!busy) - end_page_writeback(bvec->bv_page); --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -76,8 +76,42 @@ struct buffer_head { diff --git a/debian/patches-rt/fscache-initialize-cookie-hash-table-raw-spinlocks.patch b/debian/patches-rt/fscache-initialize-cookie-hash-table-raw-spinlocks.patch index 926cb415c..6ea61a9d3 100644 --- a/debian/patches-rt/fscache-initialize-cookie-hash-table-raw-spinlocks.patch +++ b/debian/patches-rt/fscache-initialize-cookie-hash-table-raw-spinlocks.patch @@ -1,7 +1,7 @@ From: Clark Williams Date: Tue, 3 Jul 2018 13:34:30 -0500 Subject: [PATCH] fscache: initialize cookie hash table raw spinlocks -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz The fscache cookie mechanism uses a hash table of hlist_bl_head structures. The PREEMPT_RT patcheset adds a raw spinlock to this structure and so on PREEMPT_RT @@ -20,7 +20,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/fs/fscache/cookie.c +++ b/fs/fscache/cookie.c -@@ -973,3 +973,11 @@ int __fscache_check_consistency(struct f +@@ -962,3 +962,11 @@ int __fscache_check_consistency(struct f return -ESTALE; } EXPORT_SYMBOL(__fscache_check_consistency); @@ -34,7 +34,7 @@ Signed-off-by: Sebastian Andrzej Siewior +} --- a/fs/fscache/main.c +++ b/fs/fscache/main.c -@@ -151,6 +151,7 @@ static int __init fscache_init(void) +@@ -149,6 +149,7 @@ static int __init fscache_init(void) ret = -ENOMEM; goto error_cookie_jar; } diff --git a/debian/patches-rt/ftrace-Fix-trace-header-alignment.patch b/debian/patches-rt/ftrace-Fix-trace-header-alignment.patch index 7b7b34fb1..3bf8140e1 100644 --- a/debian/patches-rt/ftrace-Fix-trace-header-alignment.patch +++ b/debian/patches-rt/ftrace-Fix-trace-header-alignment.patch @@ -1,7 +1,7 @@ From: Mike Galbraith Date: Sun, 16 Oct 2016 05:08:30 +0200 Subject: [PATCH] ftrace: Fix trace header alignment -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Line up helper arrows to the right column. @@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c -@@ -3347,17 +3347,17 @@ get_total_entries(struct trace_buffer *b +@@ -3348,17 +3348,17 @@ get_total_entries(struct trace_buffer *b static void print_lat_help_header(struct seq_file *m) { diff --git a/debian/patches-rt/ftrace-migrate-disable-tracing.patch b/debian/patches-rt/ftrace-migrate-disable-tracing.patch index 26d63f49c..7b5f25743 100644 --- a/debian/patches-rt/ftrace-migrate-disable-tracing.patch +++ b/debian/patches-rt/ftrace-migrate-disable-tracing.patch @@ -1,7 +1,7 @@ From: Thomas Gleixner Date: Sun, 17 Jul 2011 21:56:42 +0200 Subject: trace: Add migrate-disabled counter to tracing output -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Signed-off-by: Thomas Gleixner --- @@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner #define TRACE_EVENT_TYPE_MAX \ --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c -@@ -2145,6 +2145,8 @@ tracing_generic_entry_update(struct trac +@@ -2146,6 +2146,8 @@ tracing_generic_entry_update(struct trac ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) | (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) | (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0); @@ -33,7 +33,7 @@ Signed-off-by: Thomas Gleixner } EXPORT_SYMBOL_GPL(tracing_generic_entry_update); -@@ -3348,9 +3350,10 @@ static void print_lat_help_header(struct +@@ -3349,9 +3351,10 @@ static void print_lat_help_header(struct "# | / _----=> need-resched \n" "# || / _---=> hardirq/softirq \n" "# ||| / _--=> preempt-depth \n" @@ -49,7 +49,7 @@ Signed-off-by: Thomas Gleixner static void print_event_info(struct trace_buffer *buf, struct seq_file *m) --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c -@@ -187,6 +187,8 @@ static int trace_define_common_fields(vo +@@ -188,6 +188,8 @@ static int trace_define_common_fields(vo __common_field(unsigned char, flags); __common_field(unsigned char, preempt_count); __common_field(int, pid); @@ -60,7 +60,7 @@ Signed-off-by: Thomas Gleixner } --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c -@@ -493,6 +493,11 @@ int trace_print_lat_fmt(struct trace_seq +@@ -494,6 +494,11 @@ int trace_print_lat_fmt(struct trace_seq else trace_seq_putc(s, '.'); diff --git a/debian/patches-rt/futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch b/debian/patches-rt/futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch index c682c2820..ff40aaf70 100644 --- a/debian/patches-rt/futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch +++ b/debian/patches-rt/futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch @@ -1,7 +1,7 @@ From: Thomas Gleixner Date: Fri, 1 Mar 2013 11:17:42 +0100 Subject: futex: Ensure lock/unlock symetry versus pi_lock and hash bucket lock -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz In exit_pi_state_list() we have the following locking construct: diff --git a/debian/patches-rt/futex-requeue-pi-fix.patch b/debian/patches-rt/futex-requeue-pi-fix.patch index 25e3e472a..aad565be3 100644 --- a/debian/patches-rt/futex-requeue-pi-fix.patch +++ b/debian/patches-rt/futex-requeue-pi-fix.patch @@ -1,7 +1,7 @@ From: Steven Rostedt Date: Tue, 14 Jul 2015 14:26:34 +0200 Subject: futex: Fix bug on when a requeued RT task times out -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Requeue with timeout causes a bug with PREEMPT_RT_FULL. diff --git a/debian/patches-rt/futex-workaround-migrate_disable-enable-in-different.patch b/debian/patches-rt/futex-workaround-migrate_disable-enable-in-different.patch index 6f6ab69af..f138965fe 100644 --- a/debian/patches-rt/futex-workaround-migrate_disable-enable-in-different.patch +++ b/debian/patches-rt/futex-workaround-migrate_disable-enable-in-different.patch @@ -1,7 +1,7 @@ From: Thomas Gleixner Date: Wed, 8 Mar 2017 14:23:35 +0100 Subject: [PATCH] futex: workaround migrate_disable/enable in different context -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz migrate_disable()/migrate_enable() takes a different path in atomic() vs !atomic() context. These little hacks ensure that we don't underflow / overflow diff --git a/debian/patches-rt/genirq-disable-irqpoll-on-rt.patch b/debian/patches-rt/genirq-disable-irqpoll-on-rt.patch index 857321c57..a97878adc 100644 --- a/debian/patches-rt/genirq-disable-irqpoll-on-rt.patch +++ b/debian/patches-rt/genirq-disable-irqpoll-on-rt.patch @@ -1,7 +1,7 @@ From: Ingo Molnar Date: Fri, 3 Jul 2009 08:29:57 -0500 Subject: genirq: Disable irqpoll on -rt -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Creates long latencies for no value diff --git a/debian/patches-rt/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch b/debian/patches-rt/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch index 7db9d9d03..07090937b 100644 --- a/debian/patches-rt/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch +++ b/debian/patches-rt/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Wed, 21 Aug 2013 17:48:46 +0200 Subject: genirq: Do not invoke the affinity callback via a workqueue on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Joe Korty reported, that __irq_set_affinity_locked() schedules a workqueue while holding a rawlock which results in a might_sleep() diff --git a/debian/patches-rt/genirq-force-threading.patch b/debian/patches-rt/genirq-force-threading.patch index 778334a4e..0033726a7 100644 --- a/debian/patches-rt/genirq-force-threading.patch +++ b/debian/patches-rt/genirq-force-threading.patch @@ -1,7 +1,7 @@ Subject: genirq: Force interrupt thread on RT From: Thomas Gleixner Date: Sun, 03 Apr 2011 11:57:29 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Force threaded_irqs and optimize the code (force_irqthreads) in regard to this. diff --git a/debian/patches-rt/genirq-update-irq_set_irqchip_state-documentation.patch b/debian/patches-rt/genirq-update-irq_set_irqchip_state-documentation.patch index 842ae2167..62d51af56 100644 --- a/debian/patches-rt/genirq-update-irq_set_irqchip_state-documentation.patch +++ b/debian/patches-rt/genirq-update-irq_set_irqchip_state-documentation.patch @@ -1,7 +1,7 @@ From: Josh Cartwright Date: Thu, 11 Feb 2016 11:54:00 -0600 Subject: genirq: update irq_set_irqchip_state documentation -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz On -rt kernels, the use of migrate_disable()/migrate_enable() is sufficient to guarantee a task isn't moved to another CPU. Update the @@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c -@@ -2261,7 +2261,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state) +@@ -2270,7 +2270,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state) * This call sets the internal irqchip state of an interrupt, * depending on the value of @which. * diff --git a/debian/patches-rt/hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch b/debian/patches-rt/hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch index e2d3eefb0..f4bc66bab 100644 --- a/debian/patches-rt/hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch +++ b/debian/patches-rt/hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Fri, 4 Aug 2017 18:31:00 +0200 Subject: [PATCH] hotplug: duct-tape RT-rwlock usage for non-RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz This type is only available on -RT. We need to craft something for non-RT. Since the only migrate_disable() user is -RT only, there is no @@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior static DEFINE_PER_CPU(struct rt_rw_lock, cpuhp_pin_lock) = \ __RWLOCK_RT_INITIALIZER(cpuhp_pin_lock); #endif -@@ -293,6 +293,7 @@ static int cpu_hotplug_disabled; +@@ -290,6 +290,7 @@ static int cpu_hotplug_disabled; */ void pin_current_cpu(void) { @@ -31,7 +31,7 @@ Signed-off-by: Sebastian Andrzej Siewior struct rt_rw_lock *cpuhp_pin; unsigned int cpu; int ret; -@@ -317,6 +318,7 @@ void pin_current_cpu(void) +@@ -314,6 +315,7 @@ void pin_current_cpu(void) goto again; } current->pinned_on_cpu = cpu; @@ -39,7 +39,7 @@ Signed-off-by: Sebastian Andrzej Siewior } /** -@@ -324,6 +326,7 @@ void pin_current_cpu(void) +@@ -321,6 +323,7 @@ void pin_current_cpu(void) */ void unpin_current_cpu(void) { @@ -47,7 +47,7 @@ Signed-off-by: Sebastian Andrzej Siewior struct rt_rw_lock *cpuhp_pin = this_cpu_ptr(&cpuhp_pin_lock); if (WARN_ON(current->pinned_on_cpu != smp_processor_id())) -@@ -331,6 +334,7 @@ void unpin_current_cpu(void) +@@ -328,6 +331,7 @@ void unpin_current_cpu(void) current->pinned_on_cpu = -1; __read_rt_unlock(cpuhp_pin); @@ -55,7 +55,7 @@ Signed-off-by: Sebastian Andrzej Siewior } DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock); -@@ -889,7 +893,9 @@ static int take_cpu_down(void *_param) +@@ -881,7 +885,9 @@ static int take_cpu_down(void *_param) static int takedown_cpu(unsigned int cpu) { @@ -65,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); int err; -@@ -902,14 +908,18 @@ static int takedown_cpu(unsigned int cpu +@@ -894,14 +900,18 @@ static int takedown_cpu(unsigned int cpu */ irq_lock_sparse(); @@ -84,7 +84,7 @@ Signed-off-by: Sebastian Andrzej Siewior /* CPU refused to die */ irq_unlock_sparse(); /* Unpark the hotplug thread so we can rollback there */ -@@ -928,7 +938,9 @@ static int takedown_cpu(unsigned int cpu +@@ -920,7 +930,9 @@ static int takedown_cpu(unsigned int cpu wait_for_ap_thread(st, false); BUG_ON(st->state != CPUHP_AP_IDLE_DEAD); diff --git a/debian/patches-rt/hotplug-light-get-online-cpus.patch b/debian/patches-rt/hotplug-light-get-online-cpus.patch index 945f71d91..bdc296702 100644 --- a/debian/patches-rt/hotplug-light-get-online-cpus.patch +++ b/debian/patches-rt/hotplug-light-get-online-cpus.patch @@ -1,7 +1,7 @@ Subject: hotplug: Lightweight get online cpus From: Thomas Gleixner Date: Wed, 15 Jun 2011 12:36:06 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz get_online_cpus() is a heavy weight function which involves a global mutex. migrate_disable() wants a simpler construct which prevents only @@ -20,7 +20,7 @@ Signed-off-by: Thomas Gleixner --- a/include/linux/cpu.h +++ b/include/linux/cpu.h -@@ -110,6 +110,8 @@ extern void cpu_hotplug_disable(void); +@@ -111,6 +111,8 @@ extern void cpu_hotplug_disable(void); extern void cpu_hotplug_enable(void); void clear_tasks_mm_cpumask(int cpu); int cpu_down(unsigned int cpu); @@ -29,7 +29,7 @@ Signed-off-by: Thomas Gleixner #else /* CONFIG_HOTPLUG_CPU */ -@@ -120,6 +122,9 @@ static inline void cpus_read_unlock(void +@@ -122,6 +124,9 @@ static inline int cpus_read_trylock(voi static inline void lockdep_assert_cpus_held(void) { } static inline void cpu_hotplug_disable(void) { } static inline void cpu_hotplug_enable(void) { } @@ -41,7 +41,7 @@ Signed-off-by: Thomas Gleixner /* Wrappers which go away once all code is converted */ --- a/kernel/cpu.c +++ b/kernel/cpu.c -@@ -283,6 +283,21 @@ static int cpu_hotplug_disabled; +@@ -280,6 +280,21 @@ static int cpu_hotplug_disabled; #ifdef CONFIG_HOTPLUG_CPU @@ -65,7 +65,7 @@ Signed-off-by: Thomas Gleixner void cpus_read_lock(void) --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -7219,6 +7219,7 @@ void migrate_disable(void) +@@ -7197,6 +7197,7 @@ void migrate_disable(void) } preempt_disable(); @@ -73,7 +73,7 @@ Signed-off-by: Thomas Gleixner migrate_disable_update_cpus_allowed(p); p->migrate_disable = 1; -@@ -7284,12 +7285,15 @@ void migrate_enable(void) +@@ -7262,12 +7263,15 @@ void migrate_enable(void) arg.task = p; arg.dest_cpu = dest_cpu; diff --git a/debian/patches-rt/hrtimer-Move-schedule_work-call-to-helper-thread.patch b/debian/patches-rt/hrtimer-Move-schedule_work-call-to-helper-thread.patch index c6b5f0459..b2b070361 100644 --- a/debian/patches-rt/hrtimer-Move-schedule_work-call-to-helper-thread.patch +++ b/debian/patches-rt/hrtimer-Move-schedule_work-call-to-helper-thread.patch @@ -1,7 +1,7 @@ From: Yang Shi Date: Mon, 16 Sep 2013 14:09:19 -0700 Subject: hrtimer: Move schedule_work call to helper thread -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz When run ltp leapsec_timer test, the following call trace is caught: diff --git a/debian/patches-rt/hrtimer-by-timers-by-default-into-the-softirq-context.patch b/debian/patches-rt/hrtimer-by-timers-by-default-into-the-softirq-context.patch index 529711c6c..1e33aac12 100644 --- a/debian/patches-rt/hrtimer-by-timers-by-default-into-the-softirq-context.patch +++ b/debian/patches-rt/hrtimer-by-timers-by-default-into-the-softirq-context.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Fri, 3 Jul 2009 08:44:31 -0500 Subject: hrtimer: by timers by default into the softirq context -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz We can't have hrtimers callbacks running in hardirq context on RT. Therefore the timers are deferred to the softirq context by default. @@ -28,7 +28,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c -@@ -2176,7 +2176,7 @@ int kvm_create_lapic(struct kvm_vcpu *vc +@@ -2245,7 +2245,7 @@ int kvm_create_lapic(struct kvm_vcpu *vc apic->vcpu = vcpu; hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC, @@ -70,7 +70,7 @@ Signed-off-by: Sebastian Andrzej Siewior timer->function = perf_mux_hrtimer_handler; } -@@ -9166,7 +9166,7 @@ static void perf_swevent_init_hrtimer(st +@@ -9173,7 +9173,7 @@ static void perf_swevent_init_hrtimer(st if (!is_sampling_event(event)) return; @@ -81,7 +81,7 @@ Signed-off-by: Sebastian Andrzej Siewior /* --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -321,7 +321,7 @@ static void hrtick_rq_init(struct rq *rq +@@ -315,7 +315,7 @@ static void hrtick_rq_init(struct rq *rq rq->hrtick_csd.info = rq; #endif @@ -92,7 +92,7 @@ Signed-off-by: Sebastian Andrzej Siewior #else /* CONFIG_SCHED_HRTICK */ --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c -@@ -1053,7 +1053,7 @@ void init_dl_task_timer(struct sched_dl_ +@@ -1054,7 +1054,7 @@ void init_dl_task_timer(struct sched_dl_ { struct hrtimer *timer = &dl_se->dl_timer; @@ -103,7 +103,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c -@@ -5182,9 +5182,9 @@ void init_cfs_bandwidth(struct cfs_bandw +@@ -4878,9 +4878,9 @@ void init_cfs_bandwidth(struct cfs_bandw cfs_b->period = ns_to_ktime(default_cfs_period()); INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq); @@ -113,11 +113,11 @@ Signed-off-by: Sebastian Andrzej Siewior - hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); cfs_b->slack_timer.function = sched_cfs_slack_timer; + cfs_b->distribute_running = 0; } - --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c -@@ -43,8 +43,8 @@ void init_rt_bandwidth(struct rt_bandwid +@@ -45,8 +45,8 @@ void init_rt_bandwidth(struct rt_bandwid raw_spin_lock_init(&rt_b->rt_runtime_lock); @@ -160,7 +160,7 @@ Signed-off-by: Sebastian Andrzej Siewior memset(timer, 0, sizeof(struct hrtimer)); cpu_base = raw_cpu_ptr(&hrtimer_bases); -@@ -1682,6 +1691,14 @@ static void __hrtimer_init_sleeper(struc +@@ -1681,6 +1690,14 @@ static void __hrtimer_init_sleeper(struc enum hrtimer_mode mode, struct task_struct *task) { @@ -199,7 +199,7 @@ Signed-off-by: Sebastian Andrzej Siewior /* Get the next period (per-CPU) */ --- a/kernel/watchdog.c +++ b/kernel/watchdog.c -@@ -463,7 +463,7 @@ static void watchdog_enable(unsigned int +@@ -483,7 +483,7 @@ static void watchdog_enable(unsigned int * Start the timer first to prevent the NMI watchdog triggering * before the timer has a chance to fire. */ diff --git a/debian/patches-rt/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch b/debian/patches-rt/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch index ec0e300ec..3554cc4d1 100644 --- a/debian/patches-rt/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch +++ b/debian/patches-rt/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Tue, 3 Jul 2018 11:25:41 +0200 Subject: [PATCH v2] hrtimer: consolidate hrtimer_init() + hrtimer_init_sleeper() calls -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz hrtimer_init_sleeper() calls require a prior initialisation of the hrtimer object with hrtimer_init(). Lets make the initialisation of @@ -27,7 +27,7 @@ Signed-off-by: Anna-Maria Gleixner --- a/block/blk-mq.c +++ b/block/blk-mq.c -@@ -2982,10 +2982,9 @@ static bool blk_mq_poll_hybrid_sleep(str +@@ -3115,10 +3115,9 @@ static bool blk_mq_poll_hybrid_sleep(str kt = nsecs; mode = HRTIMER_MODE_REL; @@ -41,15 +41,15 @@ Signed-off-by: Anna-Maria Gleixner break; --- a/drivers/staging/android/vsoc.c +++ b/drivers/staging/android/vsoc.c -@@ -438,12 +438,10 @@ static int handle_vsoc_cond_wait(struct - - if (!timespec_valid(&ts)) +@@ -437,12 +437,10 @@ static int handle_vsoc_cond_wait(struct return -EINVAL; + wake_time = ktime_set(arg->wake_time_sec, arg->wake_time_nsec); + - hrtimer_init_on_stack(&to->timer, CLOCK_MONOTONIC, - HRTIMER_MODE_ABS); + hrtimer_init_sleeper_on_stack(to, CLOCK_MONOTONIC, + HRTIMER_MODE_ABS, current); - hrtimer_set_expires_range_ns(&to->timer, timespec_to_ktime(ts), + hrtimer_set_expires_range_ns(&to->timer, wake_time, current->timer_slack_ns); - - hrtimer_init_sleeper(to, current); @@ -159,7 +159,7 @@ Signed-off-by: Anna-Maria Gleixner } --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c -@@ -1649,13 +1649,44 @@ static enum hrtimer_restart hrtimer_wake +@@ -1648,13 +1648,44 @@ static enum hrtimer_restart hrtimer_wake return HRTIMER_NORESTART; } @@ -205,7 +205,7 @@ Signed-off-by: Anna-Maria Gleixner int nanosleep_copyout(struct restart_block *restart, struct timespec64 *ts) { switch(restart->nanosleep.type) { -@@ -1679,8 +1710,6 @@ static int __sched do_nanosleep(struct h +@@ -1678,8 +1709,6 @@ static int __sched do_nanosleep(struct h { struct restart_block *restart; @@ -214,7 +214,7 @@ Signed-off-by: Anna-Maria Gleixner do { set_current_state(TASK_INTERRUPTIBLE); hrtimer_start_expires(&t->timer, mode); -@@ -1717,10 +1746,9 @@ static long __sched hrtimer_nanosleep_re +@@ -1716,10 +1745,9 @@ static long __sched hrtimer_nanosleep_re struct hrtimer_sleeper t; int ret; @@ -227,7 +227,7 @@ Signed-off-by: Anna-Maria Gleixner ret = do_nanosleep(&t, HRTIMER_MODE_ABS); destroy_hrtimer_on_stack(&t.timer); return ret; -@@ -1738,7 +1766,7 @@ long hrtimer_nanosleep(const struct time +@@ -1737,7 +1765,7 @@ long hrtimer_nanosleep(const struct time if (dl_task(current) || rt_task(current)) slack = 0; @@ -236,7 +236,7 @@ Signed-off-by: Anna-Maria Gleixner hrtimer_set_expires_range_ns(&t.timer, timespec64_to_ktime(*rqtp), slack); ret = do_nanosleep(&t, mode); if (ret != -ERESTART_RESTARTBLOCK) -@@ -1937,11 +1965,9 @@ schedule_hrtimeout_range_clock(ktime_t * +@@ -1936,11 +1964,9 @@ schedule_hrtimeout_range_clock(ktime_t * return -EINTR; } @@ -251,7 +251,7 @@ Signed-off-by: Anna-Maria Gleixner if (likely(t.task)) --- a/net/core/pktgen.c +++ b/net/core/pktgen.c -@@ -2162,7 +2162,8 @@ static void spin(struct pktgen_dev *pkt_ +@@ -2160,7 +2160,8 @@ static void spin(struct pktgen_dev *pkt_ s64 remaining; struct hrtimer_sleeper t; @@ -261,7 +261,7 @@ Signed-off-by: Anna-Maria Gleixner hrtimer_set_expires(&t.timer, spin_until); remaining = ktime_to_ns(hrtimer_expires_remaining(&t.timer)); -@@ -2177,7 +2178,6 @@ static void spin(struct pktgen_dev *pkt_ +@@ -2175,7 +2176,6 @@ static void spin(struct pktgen_dev *pkt_ } while (ktime_compare(end_time, spin_until) < 0); } else { /* see do_nanosleep */ diff --git a/debian/patches-rt/hrtimers-prepare-full-preemption.patch b/debian/patches-rt/hrtimers-prepare-full-preemption.patch index 558a7c987..c85ad3be4 100644 --- a/debian/patches-rt/hrtimers-prepare-full-preemption.patch +++ b/debian/patches-rt/hrtimers-prepare-full-preemption.patch @@ -1,7 +1,7 @@ From: Ingo Molnar Date: Fri, 3 Jul 2009 08:29:34 -0500 Subject: hrtimers: Prepare full preemption -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Make cancellation of a running callback in softirq context safe against preemption. @@ -152,7 +152,7 @@ Signed-off-by: Thomas Gleixner } #ifdef CONFIG_HIGH_RES_TIMERS -@@ -1847,6 +1875,9 @@ int hrtimers_prepare_cpu(unsigned int cp +@@ -1846,6 +1874,9 @@ int hrtimers_prepare_cpu(unsigned int cp cpu_base->softirq_next_timer = NULL; cpu_base->expires_next = KTIME_MAX; cpu_base->softirq_expires_next = KTIME_MAX; @@ -164,7 +164,7 @@ Signed-off-by: Thomas Gleixner --- a/kernel/time/itimer.c +++ b/kernel/time/itimer.c -@@ -214,6 +214,7 @@ int do_setitimer(int which, struct itime +@@ -215,6 +215,7 @@ int do_setitimer(int which, struct itime /* We are sharing ->siglock with it_real_fn() */ if (hrtimer_try_to_cancel(timer) < 0) { spin_unlock_irq(&tsk->sighand->siglock); @@ -174,7 +174,7 @@ Signed-off-by: Thomas Gleixner expires = timeval_to_ktime(value->it_value); --- a/kernel/time/posix-timers.c +++ b/kernel/time/posix-timers.c -@@ -469,7 +469,7 @@ static struct k_itimer * alloc_posix_tim +@@ -466,7 +466,7 @@ static struct k_itimer * alloc_posix_tim static void k_itimer_rcu_free(struct rcu_head *head) { @@ -183,7 +183,7 @@ Signed-off-by: Thomas Gleixner kmem_cache_free(posix_timers_cache, tmr); } -@@ -486,7 +486,7 @@ static void release_posix_timer(struct k +@@ -483,7 +483,7 @@ static void release_posix_timer(struct k } put_pid(tmr->it_pid); sigqueue_free(tmr->sigq); @@ -192,7 +192,7 @@ Signed-off-by: Thomas Gleixner } static int common_timer_create(struct k_itimer *new_timer) -@@ -825,6 +825,22 @@ static void common_hrtimer_arm(struct k_ +@@ -824,6 +824,22 @@ static void common_hrtimer_arm(struct k_ hrtimer_start_expires(timer, HRTIMER_MODE_ABS); } @@ -215,7 +215,7 @@ Signed-off-by: Thomas Gleixner static int common_hrtimer_try_to_cancel(struct k_itimer *timr) { return hrtimer_try_to_cancel(&timr->it.real.timer); -@@ -889,6 +905,7 @@ static int do_timer_settime(timer_t time +@@ -888,6 +904,7 @@ static int do_timer_settime(timer_t time if (!timr) return -EINVAL; @@ -223,7 +223,7 @@ Signed-off-by: Thomas Gleixner kc = timr->kclock; if (WARN_ON_ONCE(!kc || !kc->timer_set)) error = -EINVAL; -@@ -897,9 +914,12 @@ static int do_timer_settime(timer_t time +@@ -896,9 +913,12 @@ static int do_timer_settime(timer_t time unlock_timer(timr, flag); if (error == TIMER_RETRY) { @@ -236,7 +236,7 @@ Signed-off-by: Thomas Gleixner return error; } -@@ -981,10 +1001,15 @@ SYSCALL_DEFINE1(timer_delete, timer_t, t +@@ -980,10 +1000,15 @@ SYSCALL_DEFINE1(timer_delete, timer_t, t if (!timer) return -EINVAL; @@ -252,7 +252,7 @@ Signed-off-by: Thomas Gleixner spin_lock(¤t->sighand->siglock); list_del(&timer->list); -@@ -1010,8 +1035,18 @@ static void itimer_delete(struct k_itime +@@ -1009,8 +1034,18 @@ static void itimer_delete(struct k_itime retry_delete: spin_lock_irqsave(&timer->it_lock, flags); diff --git a/debian/patches-rt/iommu-amd-drop-irqs_disabled-warn_on.patch b/debian/patches-rt/iommu-amd-drop-irqs_disabled-warn_on.patch deleted file mode 100644 index 07a3549d3..000000000 --- a/debian/patches-rt/iommu-amd-drop-irqs_disabled-warn_on.patch +++ /dev/null @@ -1,53 +0,0 @@ -From: Anna-Maria Gleixner -Subject: [PATCH] iommu/amd: Remove redundant WARN_ON() -Date: Fri, 20 Jul 2018 10:45:45 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz - -The WARN_ON() was introduced in commit 272e4f99e966 ("iommu/amd: WARN -when __[attach|detach]_device are called with irqs enabled") to ensure -that the domain->lock is taken in proper irqs disabled context. This -is required, because the domain->lock is taken as well in irq -context. - -The proper context check by the WARN_ON() is redundant, because it is -already covered by LOCKDEP. When working with locks and changing -context, a run with LOCKDEP is required anyway and would detect the -wrong lock context. - -Furthermore all callers for those functions are within the same file -and all callers acquire another lock which already disables interrupts. - -Signed-off-by: Anna-Maria Gleixner -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/iommu/amd_iommu.c | 12 ------------ - 1 file changed, 12 deletions(-) - ---- a/drivers/iommu/amd_iommu.c -+++ b/drivers/iommu/amd_iommu.c -@@ -1944,12 +1944,6 @@ static int __attach_device(struct iommu_ - { - int ret; - -- /* -- * Must be called with IRQs disabled. Warn here to detect early -- * when its not. -- */ -- WARN_ON(!irqs_disabled()); -- - /* lock domain */ - spin_lock(&domain->lock); - -@@ -2115,12 +2109,6 @@ static void __detach_device(struct iommu - { - struct protection_domain *domain; - -- /* -- * Must be called with IRQs disabled. Warn here to detect early -- * when its not. -- */ -- WARN_ON(!irqs_disabled()); -- - domain = dev_data->domain; - - spin_lock(&domain->lock); diff --git a/debian/patches-rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch b/debian/patches-rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch index 750907c84..12f55d4aa 100644 --- a/debian/patches-rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch +++ b/debian/patches-rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch @@ -1,7 +1,7 @@ Subject: genirq: Allow disabling of softirq processing in irq thread context From: Thomas Gleixner Date: Tue, 31 Jan 2012 13:01:27 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz The processing of softirqs in irq thread context is a performance gain for the non-rt workloads of a system, but it's counterproductive for @@ -65,7 +65,7 @@ Signed-off-by: Thomas Gleixner --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c -@@ -960,7 +960,15 @@ irq_forced_thread_fn(struct irq_desc *de +@@ -967,7 +967,15 @@ irq_forced_thread_fn(struct irq_desc *de local_bh_disable(); ret = action->thread_fn(action->irq, action->dev_id); irq_finalize_oneshot(desc, action); @@ -82,7 +82,7 @@ Signed-off-by: Thomas Gleixner return ret; } -@@ -1470,6 +1478,9 @@ static int +@@ -1476,6 +1484,9 @@ static int irqd_set(&desc->irq_data, IRQD_NO_BALANCING); } diff --git a/debian/patches-rt/irqchip-gic-v3-its-Make-its_lock-a-raw_spin_lock_t.patch b/debian/patches-rt/irqchip-gic-v3-its-Make-its_lock-a-raw_spin_lock_t.patch deleted file mode 100644 index 32a17c83d..000000000 --- a/debian/patches-rt/irqchip-gic-v3-its-Make-its_lock-a-raw_spin_lock_t.patch +++ /dev/null @@ -1,93 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Wed, 18 Jul 2018 17:42:04 +0200 -Subject: [PATCH] irqchip/gic-v3-its: Make its_lock a raw_spin_lock_t -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz - -[ Upstream commit a8db74564b0c634667e1722264bde303d296f566 ] - -The its_lock lock is held while a new device is added to the list and -during setup while the CPU is booted. Even on -RT the CPU-bootup is -performed with disabled interrupts. - -Make its_lock a raw_spin_lock_t. - -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Marc Zyngier ---- - drivers/irqchip/irq-gic-v3-its.c | 18 +++++++++--------- - 1 file changed, 9 insertions(+), 9 deletions(-) - ---- a/drivers/irqchip/irq-gic-v3-its.c -+++ b/drivers/irqchip/irq-gic-v3-its.c -@@ -160,7 +160,7 @@ static struct { - } vpe_proxy; - - static LIST_HEAD(its_nodes); --static DEFINE_SPINLOCK(its_lock); -+static DEFINE_RAW_SPINLOCK(its_lock); - static struct rdists *gic_rdists; - static struct irq_domain *its_parent; - -@@ -1997,12 +1997,12 @@ static void its_cpu_init_collections(voi - { - struct its_node *its; - -- spin_lock(&its_lock); -+ raw_spin_lock(&its_lock); - - list_for_each_entry(its, &its_nodes, entry) - its_cpu_init_collection(its); - -- spin_unlock(&its_lock); -+ raw_spin_unlock(&its_lock); - } - - static struct its_device *its_find_device(struct its_node *its, u32 dev_id) -@@ -3070,7 +3070,7 @@ static int its_save_disable(void) - struct its_node *its; - int err = 0; - -- spin_lock(&its_lock); -+ raw_spin_lock(&its_lock); - list_for_each_entry(its, &its_nodes, entry) { - void __iomem *base; - -@@ -3102,7 +3102,7 @@ static int its_save_disable(void) - writel_relaxed(its->ctlr_save, base + GITS_CTLR); - } - } -- spin_unlock(&its_lock); -+ raw_spin_unlock(&its_lock); - - return err; - } -@@ -3112,7 +3112,7 @@ static void its_restore_enable(void) - struct its_node *its; - int ret; - -- spin_lock(&its_lock); -+ raw_spin_lock(&its_lock); - list_for_each_entry(its, &its_nodes, entry) { - void __iomem *base; - int i; -@@ -3164,7 +3164,7 @@ static void its_restore_enable(void) - GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER))) - its_cpu_init_collection(its); - } -- spin_unlock(&its_lock); -+ raw_spin_unlock(&its_lock); - } - - static struct syscore_ops its_syscore_ops = { -@@ -3398,9 +3398,9 @@ static int __init its_probe_one(struct r - if (err) - goto out_free_tables; - -- spin_lock(&its_lock); -+ raw_spin_lock(&its_lock); - list_add(&its->entry, &its_nodes); -- spin_unlock(&its_lock); -+ raw_spin_unlock(&its_lock); - - return 0; - diff --git a/debian/patches-rt/irqchip-gic-v3-its-Move-pending-table-allocation-to-.patch b/debian/patches-rt/irqchip-gic-v3-its-Move-pending-table-allocation-to-.patch index 42f790702..372cc232d 100644 --- a/debian/patches-rt/irqchip-gic-v3-its-Move-pending-table-allocation-to-.patch +++ b/debian/patches-rt/irqchip-gic-v3-its-Move-pending-table-allocation-to-.patch @@ -2,7 +2,7 @@ From: Marc Zyngier Date: Fri, 27 Jul 2018 13:38:54 +0100 Subject: [PATCH] irqchip/gic-v3-its: Move pending table allocation to init time -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Signed-off-by: Marc Zyngier Signed-off-by: Sebastian Andrzej Siewior @@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c -@@ -171,6 +171,7 @@ static DEFINE_RAW_SPINLOCK(vmovp_lock); +@@ -173,6 +173,7 @@ static DEFINE_RAW_SPINLOCK(vmovp_lock); static DEFINE_IDA(its_vpeid_ida); #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist)) @@ -21,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K) -@@ -1555,7 +1556,7 @@ static void its_free_prop_table(struct p +@@ -1622,7 +1623,7 @@ static void its_free_prop_table(struct p get_order(LPI_PROPBASE_SZ)); } @@ -30,7 +30,7 @@ Signed-off-by: Sebastian Andrzej Siewior { phys_addr_t paddr; -@@ -1877,30 +1878,47 @@ static void its_free_pending_table(struc +@@ -1945,30 +1946,47 @@ static void its_free_pending_table(struc get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K))); } @@ -93,7 +93,7 @@ Signed-off-by: Sebastian Andrzej Siewior /* set PROPBASE */ val = (page_to_phys(gic_rdists->prop_page) | GICR_PROPBASER_InnerShareable | -@@ -1952,6 +1970,10 @@ static void its_cpu_init_lpis(void) +@@ -2020,6 +2038,10 @@ static void its_cpu_init_lpis(void) /* Make sure the GIC has seen the above */ dsb(sy); @@ -104,7 +104,7 @@ Signed-off-by: Sebastian Andrzej Siewior } static void its_cpu_init_collection(struct its_node *its) -@@ -3427,16 +3449,6 @@ static int redist_disable_lpis(void) +@@ -3498,16 +3520,6 @@ static int redist_disable_lpis(void) u64 timeout = USEC_PER_SEC; u64 val; @@ -121,7 +121,7 @@ Signed-off-by: Sebastian Andrzej Siewior if (!gic_rdists_supports_plpis()) { pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); return -ENXIO; -@@ -3446,7 +3458,18 @@ static int redist_disable_lpis(void) +@@ -3517,7 +3529,18 @@ static int redist_disable_lpis(void) if (!(val & GICR_CTLR_ENABLE_LPIS)) return 0; @@ -141,7 +141,7 @@ Signed-off-by: Sebastian Andrzej Siewior smp_processor_id()); add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); -@@ -3702,7 +3725,8 @@ int __init its_init(struct fwnode_handle +@@ -3773,7 +3796,8 @@ int __init its_init(struct fwnode_handle } gic_rdists = rdists; @@ -153,11 +153,11 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h -@@ -574,6 +574,7 @@ struct rdists { +@@ -585,6 +585,7 @@ struct rdists { void __iomem *rd_base; struct page *pend_page; phys_addr_t phys_base; + bool lpi_enabled; } __percpu *rdist; struct page *prop_page; - int id_bits; + u64 flags; diff --git a/debian/patches-rt/irqwork-push_most_work_into_softirq_context.patch b/debian/patches-rt/irqwork-push_most_work_into_softirq_context.patch index bc5432f46..aac536dc6 100644 --- a/debian/patches-rt/irqwork-push_most_work_into_softirq_context.patch +++ b/debian/patches-rt/irqwork-push_most_work_into_softirq_context.patch @@ -1,7 +1,7 @@ Subject: irqwork: push most work into softirq context From: Sebastian Andrzej Siewior Date: Tue, 23 Jun 2015 15:32:51 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Initially we defered all irqwork into softirq because we didn't want the latency spikes if perf or another user was busy and delayed the RT task. @@ -22,16 +22,14 @@ Mike Galbraith, hard and soft variant] Signed-off-by: Sebastian Andrzej Siewior --- - include/linux/irq_work.h | 8 ++++++ - kernel/irq_work.c | 59 ++++++++++++++++++++++++++++++++-------- - kernel/rcu/tree.c | 1 + - kernel/sched/topology.c | 1 + - kernel/time/tick-sched.c | 1 + - kernel/time/timer.c | 2 ++ - 6 files changed, 60 insertions(+), 12 deletions(-) + include/linux/irq_work.h | 8 ++++++ + kernel/irq_work.c | 60 ++++++++++++++++++++++++++++++++++++----------- + kernel/rcu/tree.c | 1 + kernel/sched/topology.c | 1 + kernel/time/tick-sched.c | 1 + kernel/time/timer.c | 2 + + 6 files changed, 60 insertions(+), 13 deletions(-) -diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h -index b11fcdfd0770..c1afbba27902 100644 --- a/include/linux/irq_work.h +++ b/include/linux/irq_work.h @@ -18,6 +18,8 @@ @@ -43,7 +41,7 @@ index b11fcdfd0770..c1afbba27902 100644 #define IRQ_WORK_CLAIMED (IRQ_WORK_PENDING | IRQ_WORK_BUSY) -@@ -52,4 +54,10 @@ static inline bool irq_work_needs_cpu(void) { return false; } +@@ -52,4 +54,10 @@ static inline bool irq_work_needs_cpu(vo static inline void irq_work_run(void) { } #endif @@ -54,8 +52,6 @@ index b11fcdfd0770..c1afbba27902 100644 +#endif + #endif /* _LINUX_IRQ_WORK_H */ -diff --git a/kernel/irq_work.c b/kernel/irq_work.c -index 6b7cdf17ccf8..e765a79ef48b 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c @@ -17,6 +17,7 @@ @@ -75,7 +71,7 @@ index 6b7cdf17ccf8..e765a79ef48b 100644 /* All work should have been flushed before going offline */ WARN_ON_ONCE(cpu_is_offline(cpu)); -@@ -76,7 +79,12 @@ bool irq_work_queue_on(struct irq_work *work, int cpu) +@@ -76,7 +79,12 @@ bool irq_work_queue_on(struct irq_work * if (!irq_work_claim(work)) return false; @@ -89,7 +85,7 @@ index 6b7cdf17ccf8..e765a79ef48b 100644 arch_send_call_function_single_ipi(cpu); #else /* #ifdef CONFIG_SMP */ -@@ -89,6 +97,9 @@ bool irq_work_queue_on(struct irq_work *work, int cpu) +@@ -89,6 +97,9 @@ bool irq_work_queue_on(struct irq_work * /* Enqueue the irq work @work on the current CPU */ bool irq_work_queue(struct irq_work *work) { @@ -99,7 +95,7 @@ index 6b7cdf17ccf8..e765a79ef48b 100644 /* Only queue if not already pending */ if (!irq_work_claim(work)) return false; -@@ -96,13 +107,15 @@ bool irq_work_queue(struct irq_work *work) +@@ -96,13 +107,15 @@ bool irq_work_queue(struct irq_work *wor /* Queue the entry and raise the IPI if needed. */ preempt_disable(); @@ -134,7 +130,7 @@ index 6b7cdf17ccf8..e765a79ef48b 100644 /* All work should have been flushed before going offline */ WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); -@@ -135,7 +147,12 @@ static void irq_work_run_list(struct llist_head *list) +@@ -135,8 +147,12 @@ static void irq_work_run_list(struct lli struct llist_node *llnode; unsigned long flags; @@ -143,11 +139,12 @@ index 6b7cdf17ccf8..e765a79ef48b 100644 + * nort: On RT IRQ-work may run in SOFTIRQ context. + */ BUG_ON(!irqs_disabled()); +- +#endif - if (llist_empty(list)) return; -@@ -168,7 +185,16 @@ static void irq_work_run_list(struct llist_head *list) + +@@ -168,7 +184,16 @@ static void irq_work_run_list(struct lli void irq_work_run(void) { irq_work_run_list(this_cpu_ptr(&raised_list)); @@ -165,7 +162,7 @@ index 6b7cdf17ccf8..e765a79ef48b 100644 } EXPORT_SYMBOL_GPL(irq_work_run); -@@ -178,8 +204,17 @@ void irq_work_tick(void) +@@ -178,8 +203,17 @@ void irq_work_tick(void) if (!llist_empty(raised) && !arch_irq_work_has_interrupt()) irq_work_run_list(raised); @@ -183,23 +180,19 @@ index 6b7cdf17ccf8..e765a79ef48b 100644 /* * Synchronize against the irq_work @entry, ensures the entry is not -diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c -index aa7cade1b9f3..131fe93756c4 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c -@@ -1259,6 +1259,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) - !rdp->rcu_iw_pending && rdp->rcu_iw_gpnum != rnp->gpnum && +@@ -1296,6 +1296,7 @@ static int rcu_implicit_dynticks_qs(stru + !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq && (rnp->ffmask & rdp->grpmask)) { init_irq_work(&rdp->rcu_iw, rcu_iw_handler); + rdp->rcu_iw.flags = IRQ_WORK_HARD_IRQ; rdp->rcu_iw_pending = true; - rdp->rcu_iw_gpnum = rnp->gpnum; + rdp->rcu_iw_gp_seq = rnp->gp_seq; irq_work_queue_on(&rdp->rcu_iw, rdp->cpu); -diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c -index 56a0fed30c0a..dc7fd09d66fa 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c -@@ -279,6 +279,7 @@ static int init_rootdomain(struct root_domain *rd) +@@ -279,6 +279,7 @@ static int init_rootdomain(struct root_d rd->rto_cpu = -1; raw_spin_lock_init(&rd->rto_lock); init_irq_work(&rd->rto_push_work, rto_push_irq_work_func); @@ -207,11 +200,9 @@ index 56a0fed30c0a..dc7fd09d66fa 100644 #endif init_dl_bw(&rd->dl_bw); -diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c -index 5b33e2f5c0ed..2fd4a37ffdc2 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c -@@ -227,6 +227,7 @@ static void nohz_full_kick_func(struct irq_work *work) +@@ -232,6 +232,7 @@ static void nohz_full_kick_func(struct i static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = { .func = nohz_full_kick_func, @@ -219,11 +210,9 @@ index 5b33e2f5c0ed..2fd4a37ffdc2 100644 }; /* -diff --git a/kernel/time/timer.c b/kernel/time/timer.c -index 786f8c014e7e..6c996ba08e0a 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c -@@ -1692,6 +1692,8 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h) +@@ -1733,6 +1733,8 @@ static __latent_entropy void run_timer_s { struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); @@ -232,6 +221,3 @@ index 786f8c014e7e..6c996ba08e0a 100644 __run_timers(base); if (IS_ENABLED(CONFIG_NO_HZ_COMMON)) __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF])); --- -2.19.0 - diff --git a/debian/patches-rt/jump-label-rt.patch b/debian/patches-rt/jump-label-rt.patch index 6ca56a367..dcd1f9400 100644 --- a/debian/patches-rt/jump-label-rt.patch +++ b/debian/patches-rt/jump-label-rt.patch @@ -1,7 +1,7 @@ Subject: jump-label: disable if stop_machine() is used From: Thomas Gleixner Date: Wed, 08 Jul 2015 17:14:48 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Some architectures are using stop_machine() while switching the opcode which leads to latency spikes. @@ -25,7 +25,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig -@@ -50,7 +50,7 @@ config ARM +@@ -51,7 +51,7 @@ config ARM select HARDIRQS_SW_RESEND select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT) select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6 diff --git a/debian/patches-rt/kconfig-disable-a-few-options-rt.patch b/debian/patches-rt/kconfig-disable-a-few-options-rt.patch index 1612a0da5..8acb5cee2 100644 --- a/debian/patches-rt/kconfig-disable-a-few-options-rt.patch +++ b/debian/patches-rt/kconfig-disable-a-few-options-rt.patch @@ -1,7 +1,7 @@ Subject: kconfig: Disable config options which are not RT compatible From: Thomas Gleixner Date: Sun, 24 Jul 2011 12:11:43 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Disable stuff which is known to have issues on RT @@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner --- a/arch/Kconfig +++ b/arch/Kconfig -@@ -20,6 +20,7 @@ config OPROFILE +@@ -28,6 +28,7 @@ config OPROFILE tristate "OProfile system profiling" depends on PROFILING depends on HAVE_OPROFILE @@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner help --- a/mm/Kconfig +++ b/mm/Kconfig -@@ -378,7 +378,7 @@ config NOMMU_INITIAL_TRIM_EXCESS +@@ -377,7 +377,7 @@ config NOMMU_INITIAL_TRIM_EXCESS config TRANSPARENT_HUGEPAGE bool "Transparent Hugepage Support" diff --git a/debian/patches-rt/kconfig-preempt-rt-full.patch b/debian/patches-rt/kconfig-preempt-rt-full.patch index c7f6cbfc6..cb43f766f 100644 --- a/debian/patches-rt/kconfig-preempt-rt-full.patch +++ b/debian/patches-rt/kconfig-preempt-rt-full.patch @@ -1,7 +1,7 @@ Subject: kconfig: Add PREEMPT_RT_FULL From: Thomas Gleixner Date: Wed, 29 Jun 2011 14:58:57 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Introduce the final symbol for PREEMPT_RT_FULL. @@ -22,7 +22,7 @@ Signed-off-by: Thomas Gleixner + "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CONFIG_PREEMPT_RT_FULL)" "$(CC) $(KBUILD_CFLAGS)" --- a/kernel/Kconfig.preempt +++ b/kernel/Kconfig.preempt -@@ -67,6 +67,14 @@ config PREEMPT_RTB +@@ -69,6 +69,14 @@ config PREEMPT_RTB enables changes which are preliminary for the full preemptible RT kernel. diff --git a/debian/patches-rt/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch b/debian/patches-rt/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch index 01cafe6ae..0ff557ec8 100644 --- a/debian/patches-rt/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch +++ b/debian/patches-rt/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Thu, 19 May 2016 17:45:27 +0200 Subject: [PATCH] kernel/printk: Don't try to print from IRQ/NMI region -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz On -RT we try to acquire sleeping locks which might lead to warnings from lockdep or a warn_on() from spin_try_lock() (which is a rtmutex on @@ -16,7 +16,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c -@@ -1771,6 +1771,11 @@ static void call_console_drivers(const c +@@ -1777,6 +1777,11 @@ static void call_console_drivers(const c if (!console_drivers) return; @@ -28,7 +28,7 @@ Signed-off-by: Sebastian Andrzej Siewior migrate_disable(); for_each_console(con) { if (exclusive_console && con != exclusive_console) -@@ -2528,6 +2533,11 @@ void console_unblank(void) +@@ -2535,6 +2540,11 @@ void console_unblank(void) { struct console *c; diff --git a/debian/patches-rt/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch b/debian/patches-rt/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch index 340087384..f19e427c2 100644 --- a/debian/patches-rt/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch +++ b/debian/patches-rt/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch @@ -4,7 +4,7 @@ Subject: [PATCH] kernel: sched: Provide a pointer to the valid CPU mask MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz In commit 4b53a3412d66 ("sched/core: Remove the tsk_nr_cpus_allowed() wrapper") the tsk_nr_cpus_allowed() wrapper was removed. There was not @@ -57,29 +57,30 @@ Cc: Ingo Molnar Cc: Rafael J. Wysocki Signed-off-by: Sebastian Andrzej Siewior --- - arch/ia64/kernel/mca.c | 2 - - arch/mips/include/asm/switch_to.h | 4 +- - arch/mips/kernel/mips-mt-fpaff.c | 2 - - arch/mips/kernel/traps.c | 6 ++-- - arch/powerpc/platforms/cell/spufs/sched.c | 2 - - drivers/infiniband/hw/hfi1/affinity.c | 6 ++-- - drivers/infiniband/hw/hfi1/sdma.c | 3 -- - drivers/infiniband/hw/qib/qib_file_ops.c | 7 ++--- - fs/proc/array.c | 4 +- - include/linux/sched.h | 5 ++- - init/init_task.c | 3 +- - kernel/cgroup/cpuset.c | 2 - - kernel/fork.c | 2 + - kernel/sched/core.c | 40 ++++++++++++++--------------- - kernel/sched/cpudeadline.c | 4 +- - kernel/sched/cpupri.c | 4 +- - kernel/sched/deadline.c | 6 ++-- - kernel/sched/fair.c | 32 +++++++++++------------ - kernel/sched/rt.c | 4 +- - kernel/trace/trace_hwlat.c | 2 - - lib/smp_processor_id.c | 2 - - samples/trace_events/trace-events-sample.c | 2 - - 22 files changed, 73 insertions(+), 71 deletions(-) + arch/ia64/kernel/mca.c | 2 - + arch/mips/include/asm/switch_to.h | 4 +- + arch/mips/kernel/mips-mt-fpaff.c | 2 - + arch/mips/kernel/traps.c | 6 ++-- + arch/powerpc/platforms/cell/spufs/sched.c | 2 - + arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c | 2 - + drivers/infiniband/hw/hfi1/affinity.c | 6 ++-- + drivers/infiniband/hw/hfi1/sdma.c | 3 -- + drivers/infiniband/hw/qib/qib_file_ops.c | 7 ++-- + fs/proc/array.c | 4 +- + include/linux/sched.h | 5 ++- + init/init_task.c | 3 +- + kernel/cgroup/cpuset.c | 2 - + kernel/fork.c | 2 + + kernel/sched/core.c | 40 ++++++++++++++-------------- + kernel/sched/cpudeadline.c | 4 +- + kernel/sched/cpupri.c | 4 +- + kernel/sched/deadline.c | 6 ++-- + kernel/sched/fair.c | 32 +++++++++++----------- + kernel/sched/rt.c | 4 +- + kernel/trace/trace_hwlat.c | 2 - + lib/smp_processor_id.c | 2 - + samples/trace_events/trace-events-sample.c | 2 - + 23 files changed, 74 insertions(+), 72 deletions(-) --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -125,7 +126,7 @@ Signed-off-by: Sebastian Andrzej Siewior out_unlock: --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c -@@ -1176,12 +1176,12 @@ static void mt_ase_fp_affinity(void) +@@ -1174,12 +1174,12 @@ static void mt_ase_fp_affinity(void) * restricted the allowed set to exclude any CPUs with FPUs, * we'll skip the procedure. */ @@ -152,9 +153,20 @@ Signed-off-by: Sebastian Andrzej Siewior /* Save the current cpu id for spu interrupt routing. */ ctx->last_ran = raw_smp_processor_id(); +--- a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c ++++ b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c +@@ -1435,7 +1435,7 @@ static int pseudo_lock_dev_mmap(struct f + * may be scheduled elsewhere and invalidate entries in the + * pseudo-locked region. + */ +- if (!cpumask_subset(¤t->cpus_allowed, &plr->d->cpu_mask)) { ++ if (!cpumask_subset(current->cpus_ptr, &plr->d->cpu_mask)) { + mutex_unlock(&rdtgroup_mutex); + return -EINVAL; + } --- a/drivers/infiniband/hw/hfi1/affinity.c +++ b/drivers/infiniband/hw/hfi1/affinity.c -@@ -1019,7 +1019,7 @@ int hfi1_get_proc_affinity(int node) +@@ -1037,7 +1037,7 @@ int hfi1_get_proc_affinity(int node) struct hfi1_affinity_node *entry; cpumask_var_t diff, hw_thread_mask, available_mask, intrs_mask; const struct cpumask *node_mask, @@ -163,7 +175,7 @@ Signed-off-by: Sebastian Andrzej Siewior struct hfi1_affinity_node_list *affinity = &node_affinity; struct cpu_mask_set *set = &affinity->proc; -@@ -1027,7 +1027,7 @@ int hfi1_get_proc_affinity(int node) +@@ -1045,7 +1045,7 @@ int hfi1_get_proc_affinity(int node) * check whether process/context affinity has already * been set */ @@ -172,7 +184,7 @@ Signed-off-by: Sebastian Andrzej Siewior hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl", current->pid, current->comm, cpumask_pr_args(proc_mask)); -@@ -1038,7 +1038,7 @@ int hfi1_get_proc_affinity(int node) +@@ -1056,7 +1056,7 @@ int hfi1_get_proc_affinity(int node) cpu = cpumask_first(proc_mask); cpumask_set_cpu(cpu, &set->used); goto done; @@ -248,7 +260,7 @@ Signed-off-by: Sebastian Andrzej Siewior #ifdef CONFIG_PREEMPT_RCU int rcu_read_lock_nesting; -@@ -1397,7 +1398,7 @@ extern struct pid *cad_pid; +@@ -1389,7 +1390,7 @@ extern struct pid *cad_pid; #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ #define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ @@ -259,7 +271,7 @@ Signed-off-by: Sebastian Andrzej Siewior #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ --- a/init/init_task.c +++ b/init/init_task.c -@@ -64,7 +64,8 @@ struct task_struct init_task +@@ -71,7 +71,8 @@ struct task_struct init_task .static_prio = MAX_PRIO - 20, .normal_prio = MAX_PRIO - 20, .policy = SCHED_NORMAL, @@ -293,7 +305,7 @@ Signed-off-by: Sebastian Andrzej Siewior * One for us, one for whoever does the "release_task()" (usually --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -900,7 +900,7 @@ static inline bool is_per_cpu_kthread(st +@@ -877,7 +877,7 @@ static inline bool is_per_cpu_kthread(st */ static inline bool is_cpu_allowed(struct task_struct *p, int cpu) { @@ -302,7 +314,7 @@ Signed-off-by: Sebastian Andrzej Siewior return false; if (is_per_cpu_kthread(p)) -@@ -995,7 +995,7 @@ static int migration_cpu_stop(void *data +@@ -972,7 +972,7 @@ static int migration_cpu_stop(void *data local_irq_disable(); /* * We need to explicitly wake pending tasks before running @@ -311,7 +323,7 @@ Signed-off-by: Sebastian Andrzej Siewior * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test. */ sched_ttwu_pending(); -@@ -1026,7 +1026,7 @@ static int migration_cpu_stop(void *data +@@ -1003,7 +1003,7 @@ static int migration_cpu_stop(void *data */ void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask) { @@ -320,7 +332,7 @@ Signed-off-by: Sebastian Andrzej Siewior p->nr_cpus_allowed = cpumask_weight(new_mask); } -@@ -1096,7 +1096,7 @@ static int __set_cpus_allowed_ptr(struct +@@ -1073,7 +1073,7 @@ static int __set_cpus_allowed_ptr(struct goto out; } @@ -329,7 +341,7 @@ Signed-off-by: Sebastian Andrzej Siewior goto out; if (!cpumask_intersects(new_mask, cpu_valid_mask)) { -@@ -1258,10 +1258,10 @@ static int migrate_swap_stop(void *data) +@@ -1236,10 +1236,10 @@ static int migrate_swap_stop(void *data) if (task_cpu(arg->src_task) != arg->src_cpu) goto unlock; @@ -342,7 +354,7 @@ Signed-off-by: Sebastian Andrzej Siewior goto unlock; __migrate_swap_task(arg->src_task, arg->dst_cpu); -@@ -1302,10 +1302,10 @@ int migrate_swap(struct task_struct *cur +@@ -1281,10 +1281,10 @@ int migrate_swap(struct task_struct *cur if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu)) goto out; @@ -355,7 +367,7 @@ Signed-off-by: Sebastian Andrzej Siewior goto out; trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu); -@@ -1449,7 +1449,7 @@ void kick_process(struct task_struct *p) +@@ -1429,7 +1429,7 @@ void kick_process(struct task_struct *p) EXPORT_SYMBOL_GPL(kick_process); /* @@ -364,7 +376,7 @@ Signed-off-by: Sebastian Andrzej Siewior * * A few notes on cpu_active vs cpu_online: * -@@ -1489,14 +1489,14 @@ static int select_fallback_rq(int cpu, s +@@ -1469,14 +1469,14 @@ static int select_fallback_rq(int cpu, s for_each_cpu(dest_cpu, nodemask) { if (!cpu_active(dest_cpu)) continue; @@ -381,7 +393,7 @@ Signed-off-by: Sebastian Andrzej Siewior if (!is_cpu_allowed(p, dest_cpu)) continue; -@@ -1540,7 +1540,7 @@ static int select_fallback_rq(int cpu, s +@@ -1520,7 +1520,7 @@ static int select_fallback_rq(int cpu, s } /* @@ -390,7 +402,7 @@ Signed-off-by: Sebastian Andrzej Siewior */ static inline int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) -@@ -1550,11 +1550,11 @@ int select_task_rq(struct task_struct *p +@@ -1530,11 +1530,11 @@ int select_task_rq(struct task_struct *p if (p->nr_cpus_allowed > 1) cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); else @@ -404,7 +416,7 @@ Signed-off-by: Sebastian Andrzej Siewior * CPU. * * Since this is common to all placement strategies, this lives here. -@@ -2426,7 +2426,7 @@ void wake_up_new_task(struct task_struct +@@ -2401,7 +2401,7 @@ void wake_up_new_task(struct task_struct #ifdef CONFIG_SMP /* * Fork balancing, do it here and not earlier because: @@ -413,7 +425,7 @@ Signed-off-by: Sebastian Andrzej Siewior * - any previously selected CPU might disappear through hotplug * * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq, -@@ -4297,7 +4297,7 @@ static int __sched_setscheduler(struct t +@@ -4274,7 +4274,7 @@ static int __sched_setscheduler(struct t * the entire root_domain to become SCHED_DEADLINE. We * will also fail if there's no bandwidth available. */ @@ -422,7 +434,7 @@ Signed-off-by: Sebastian Andrzej Siewior rq->rd->dl_bw.bw == 0) { task_rq_unlock(rq, p, &rf); return -EPERM; -@@ -4896,7 +4896,7 @@ long sched_getaffinity(pid_t pid, struct +@@ -4873,7 +4873,7 @@ long sched_getaffinity(pid_t pid, struct goto out_unlock; raw_spin_lock_irqsave(&p->pi_lock, flags); @@ -431,7 +443,7 @@ Signed-off-by: Sebastian Andrzej Siewior raw_spin_unlock_irqrestore(&p->pi_lock, flags); out_unlock: -@@ -5476,7 +5476,7 @@ int task_can_attach(struct task_struct * +@@ -5453,7 +5453,7 @@ int task_can_attach(struct task_struct * * allowed nodes is unnecessary. Thus, cpusets are not * applicable for such threads. This prevents checking for * success of set_cpus_allowed_ptr() on all attached tasks @@ -440,7 +452,7 @@ Signed-off-by: Sebastian Andrzej Siewior */ if (p->flags & PF_NO_SETAFFINITY) { ret = -EINVAL; -@@ -5503,7 +5503,7 @@ int migrate_task_to(struct task_struct * +@@ -5480,7 +5480,7 @@ int migrate_task_to(struct task_struct * if (curr_cpu == target_cpu) return 0; @@ -449,7 +461,7 @@ Signed-off-by: Sebastian Andrzej Siewior return -EINVAL; /* TODO: This is not properly updating schedstats */ -@@ -5641,7 +5641,7 @@ static void migrate_tasks(struct rq *dea +@@ -5618,7 +5618,7 @@ static void migrate_tasks(struct rq *dea put_prev_task(rq, next); /* @@ -495,7 +507,7 @@ Signed-off-by: Sebastian Andrzej Siewior * We have to ensure that we have at least one bit --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c -@@ -538,7 +538,7 @@ static struct rq *dl_task_offline_migrat +@@ -539,7 +539,7 @@ static struct rq *dl_task_offline_migrat * If we cannot preempt any rq, fall back to pick any * online CPU: */ @@ -504,7 +516,7 @@ Signed-off-by: Sebastian Andrzej Siewior if (cpu >= nr_cpu_ids) { /* * Failed to find any suitable CPU. -@@ -1820,7 +1820,7 @@ static void set_curr_task_dl(struct rq * +@@ -1824,7 +1824,7 @@ static void set_curr_task_dl(struct rq * static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu) { if (!task_running(rq, p) && @@ -513,7 +525,7 @@ Signed-off-by: Sebastian Andrzej Siewior return 1; return 0; } -@@ -1970,7 +1970,7 @@ static struct rq *find_lock_later_rq(str +@@ -1974,7 +1974,7 @@ static struct rq *find_lock_later_rq(str /* Retry if something changed. */ if (double_lock_balance(rq, later_rq)) { if (unlikely(task_rq(task) != rq || @@ -524,16 +536,16 @@ Signed-off-by: Sebastian Andrzej Siewior !task_on_rq_queued(task))) { --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c -@@ -1614,7 +1614,7 @@ static void task_numa_compare(struct tas +@@ -1631,7 +1631,7 @@ static void task_numa_compare(struct tas + * be incurred if the tasks were swapped. */ - if (cur) { - /* Skip this swap candidate if cannot move to the source CPU: */ -- if (!cpumask_test_cpu(env->src_cpu, &cur->cpus_allowed)) -+ if (!cpumask_test_cpu(env->src_cpu, cur->cpus_ptr)) - goto unlock; + /* Skip this swap candidate if cannot move to the source cpu */ +- if (!cpumask_test_cpu(env->src_cpu, &cur->cpus_allowed)) ++ if (!cpumask_test_cpu(env->src_cpu, cur->cpus_ptr)) + goto unlock; - /* -@@ -1724,7 +1724,7 @@ static void task_numa_find_cpu(struct ta + /* +@@ -1728,7 +1728,7 @@ static void task_numa_find_cpu(struct ta for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) { /* Skip this CPU if the source task cannot migrate */ @@ -542,7 +554,7 @@ Signed-off-by: Sebastian Andrzej Siewior continue; env->dst_cpu = cpu; -@@ -6016,7 +6016,7 @@ find_idlest_group(struct sched_domain *s +@@ -5711,7 +5711,7 @@ find_idlest_group(struct sched_domain *s /* Skip over this group if it has no CPUs allowed */ if (!cpumask_intersects(sched_group_span(group), @@ -551,7 +563,7 @@ Signed-off-by: Sebastian Andrzej Siewior continue; local_group = cpumask_test_cpu(this_cpu, -@@ -6148,7 +6148,7 @@ find_idlest_group_cpu(struct sched_group +@@ -5843,7 +5843,7 @@ find_idlest_group_cpu(struct sched_group return cpumask_first(sched_group_span(group)); /* Traverse only the allowed CPUs */ @@ -560,7 +572,7 @@ Signed-off-by: Sebastian Andrzej Siewior if (available_idle_cpu(i)) { struct rq *rq = cpu_rq(i); struct cpuidle_state *idle = idle_get_state(rq); -@@ -6188,7 +6188,7 @@ static inline int find_idlest_cpu(struct +@@ -5883,7 +5883,7 @@ static inline int find_idlest_cpu(struct { int new_cpu = cpu; @@ -569,7 +581,7 @@ Signed-off-by: Sebastian Andrzej Siewior return prev_cpu; /* -@@ -6304,7 +6304,7 @@ static int select_idle_core(struct task_ +@@ -5999,7 +5999,7 @@ static int select_idle_core(struct task_ if (!test_idle_cores(target, false)) return -1; @@ -578,7 +590,7 @@ Signed-off-by: Sebastian Andrzej Siewior for_each_cpu_wrap(core, cpus, target) { bool idle = true; -@@ -6338,7 +6338,7 @@ static int select_idle_smt(struct task_s +@@ -6033,7 +6033,7 @@ static int select_idle_smt(struct task_s return -1; for_each_cpu(cpu, cpu_smt_mask(target)) { @@ -587,7 +599,7 @@ Signed-off-by: Sebastian Andrzej Siewior continue; if (available_idle_cpu(cpu)) return cpu; -@@ -6401,7 +6401,7 @@ static int select_idle_cpu(struct task_s +@@ -6096,7 +6096,7 @@ static int select_idle_cpu(struct task_s for_each_cpu_wrap(cpu, sched_domain_span(sd), target) { if (!--nr) return -1; @@ -596,7 +608,7 @@ Signed-off-by: Sebastian Andrzej Siewior continue; if (available_idle_cpu(cpu)) break; -@@ -6438,7 +6438,7 @@ static int select_idle_sibling(struct ta +@@ -6133,7 +6133,7 @@ static int select_idle_sibling(struct ta recent_used_cpu != target && cpus_share_cache(recent_used_cpu, target) && available_idle_cpu(recent_used_cpu) && @@ -605,7 +617,7 @@ Signed-off-by: Sebastian Andrzej Siewior /* * Replace recent_used_cpu with prev as it is a potential * candidate for the next wake: -@@ -6622,7 +6622,7 @@ select_task_rq_fair(struct task_struct * +@@ -6317,7 +6317,7 @@ select_task_rq_fair(struct task_struct * if (sd_flag & SD_BALANCE_WAKE) { record_wakee(p); want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu) @@ -614,7 +626,7 @@ Signed-off-by: Sebastian Andrzej Siewior } rcu_read_lock(); -@@ -7358,14 +7358,14 @@ int can_migrate_task(struct task_struct +@@ -7056,14 +7056,14 @@ int can_migrate_task(struct task_struct /* * We do not migrate tasks that are: * 1) throttled_lb_pair, or @@ -631,7 +643,7 @@ Signed-off-by: Sebastian Andrzej Siewior int cpu; schedstat_inc(p->se.statistics.nr_failed_migrations_affine); -@@ -7385,7 +7385,7 @@ int can_migrate_task(struct task_struct +@@ -7083,7 +7083,7 @@ int can_migrate_task(struct task_struct /* Prevent to re-select dst_cpu via env's CPUs: */ for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) { @@ -640,7 +652,7 @@ Signed-off-by: Sebastian Andrzej Siewior env->flags |= LBF_DST_PINNED; env->new_dst_cpu = cpu; break; -@@ -7982,7 +7982,7 @@ check_cpu_capacity(struct rq *rq, struct +@@ -7704,7 +7704,7 @@ check_cpu_capacity(struct rq *rq, struct /* * Group imbalance indicates (and tries to solve) the problem where balancing @@ -649,7 +661,7 @@ Signed-off-by: Sebastian Andrzej Siewior * * Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a * cpumask covering 1 CPU of the first group and 3 CPUs of the second group. -@@ -8597,7 +8597,7 @@ static struct sched_group *find_busiest_ +@@ -8319,7 +8319,7 @@ static struct sched_group *find_busiest_ /* * If the busiest group is imbalanced the below checks don't * work because they assume all things are equal, which typically @@ -658,7 +670,7 @@ Signed-off-by: Sebastian Andrzej Siewior */ if (busiest->group_type == group_imbalanced) goto force_balance; -@@ -8993,7 +8993,7 @@ static int load_balance(int this_cpu, st +@@ -8715,7 +8715,7 @@ static int load_balance(int this_cpu, st * if the curr task on busiest CPU can't be * moved to this_cpu: */ @@ -669,7 +681,7 @@ Signed-off-by: Sebastian Andrzej Siewior env.flags |= LBF_ALL_PINNED; --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c -@@ -1601,7 +1601,7 @@ static void put_prev_task_rt(struct rq * +@@ -1611,7 +1611,7 @@ static void put_prev_task_rt(struct rq * static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) { if (!task_running(rq, p) && @@ -678,7 +690,7 @@ Signed-off-by: Sebastian Andrzej Siewior return 1; return 0; -@@ -1738,7 +1738,7 @@ static struct rq *find_lock_lowest_rq(st +@@ -1748,7 +1748,7 @@ static struct rq *find_lock_lowest_rq(st * Also make sure that it wasn't scheduled on its rq. */ if (unlikely(task_rq(task) != rq || @@ -689,7 +701,7 @@ Signed-off-by: Sebastian Andrzej Siewior !task_on_rq_queued(task))) { --- a/kernel/trace/trace_hwlat.c +++ b/kernel/trace/trace_hwlat.c -@@ -279,7 +279,7 @@ static void move_to_next_cpu(void) +@@ -277,7 +277,7 @@ static void move_to_next_cpu(void) * of this thread, than stop migrating for the duration * of the current test. */ diff --git a/debian/patches-rt/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch b/debian/patches-rt/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch index b09b5c872..aa6d59398 100644 --- a/debian/patches-rt/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch +++ b/debian/patches-rt/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch @@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior Date: Mon, 21 Nov 2016 19:31:08 +0100 Subject: [PATCH] kernel/sched: move stack + kprobe clean up to __put_task_struct() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz There is no need to free the stack before the task struct (except for reasons mentioned in commit 68f24b08ee89 ("sched/core: Free the stack early if @@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior security_task_free(tsk); --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -2764,15 +2764,6 @@ static struct rq *finish_task_switch(str +@@ -2740,15 +2740,6 @@ static struct rq *finish_task_switch(str if (prev->sched_class->task_dead) prev->sched_class->task_dead(prev); diff --git a/debian/patches-rt/kgb-serial-hackaround.patch b/debian/patches-rt/kgb-serial-hackaround.patch index cef946181..9cbd75b73 100644 --- a/debian/patches-rt/kgb-serial-hackaround.patch +++ b/debian/patches-rt/kgb-serial-hackaround.patch @@ -1,7 +1,7 @@ From: Jason Wessel Date: Thu, 28 Jul 2011 12:42:23 -0500 Subject: kgdb/serial: Short term workaround -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz On 07/27/2011 04:37 PM, Thomas Gleixner wrote: > - KGDB (not yet disabled) is reportedly unusable on -rt right now due @@ -34,7 +34,7 @@ Jason. #include #include #include -@@ -3223,6 +3224,8 @@ void serial8250_console_write(struct uar +@@ -3241,6 +3242,8 @@ void serial8250_console_write(struct uar if (port->sysrq || oops_in_progress) locked = 0; diff --git a/debian/patches-rt/kthread-convert-worker-lock-to-raw-spinlock.patch b/debian/patches-rt/kthread-convert-worker-lock-to-raw-spinlock.patch new file mode 100644 index 000000000..b0410ed97 --- /dev/null +++ b/debian/patches-rt/kthread-convert-worker-lock-to-raw-spinlock.patch @@ -0,0 +1,195 @@ +From: Julia Cartwright +Date: Fri, 28 Sep 2018 21:03:51 +0000 +Subject: [PATCH] kthread: convert worker lock to raw spinlock +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz + +In order to enable the queuing of kthread work items from hardirq +context even when PREEMPT_RT_FULL is enabled, convert the worker +spin_lock to a raw_spin_lock. + +This is only acceptable to do because the work performed under the lock +is well-bounded and minimal. + +Cc: Sebastian Andrzej Siewior +Cc: Guenter Roeck +Reported-and-tested-by: Steffen Trumtrar +Reported-by: Tim Sander +Signed-off-by: Julia Cartwright +Signed-off-by: Sebastian Andrzej Siewior +--- + include/linux/kthread.h | 2 +- + kernel/kthread.c | 42 +++++++++++++++++++++--------------------- + 2 files changed, 22 insertions(+), 22 deletions(-) + +--- a/include/linux/kthread.h ++++ b/include/linux/kthread.h +@@ -85,7 +85,7 @@ enum { + + struct kthread_worker { + unsigned int flags; +- spinlock_t lock; ++ raw_spinlock_t lock; + struct list_head work_list; + struct list_head delayed_work_list; + struct task_struct *task; +--- a/kernel/kthread.c ++++ b/kernel/kthread.c +@@ -599,7 +599,7 @@ void __kthread_init_worker(struct kthrea + struct lock_class_key *key) + { + memset(worker, 0, sizeof(struct kthread_worker)); +- spin_lock_init(&worker->lock); ++ raw_spin_lock_init(&worker->lock); + lockdep_set_class_and_name(&worker->lock, key, name); + INIT_LIST_HEAD(&worker->work_list); + INIT_LIST_HEAD(&worker->delayed_work_list); +@@ -641,21 +641,21 @@ int kthread_worker_fn(void *worker_ptr) + + if (kthread_should_stop()) { + __set_current_state(TASK_RUNNING); +- spin_lock_irq(&worker->lock); ++ raw_spin_lock_irq(&worker->lock); + worker->task = NULL; +- spin_unlock_irq(&worker->lock); ++ raw_spin_unlock_irq(&worker->lock); + return 0; + } + + work = NULL; +- spin_lock_irq(&worker->lock); ++ raw_spin_lock_irq(&worker->lock); + if (!list_empty(&worker->work_list)) { + work = list_first_entry(&worker->work_list, + struct kthread_work, node); + list_del_init(&work->node); + } + worker->current_work = work; +- spin_unlock_irq(&worker->lock); ++ raw_spin_unlock_irq(&worker->lock); + + if (work) { + __set_current_state(TASK_RUNNING); +@@ -812,12 +812,12 @@ bool kthread_queue_work(struct kthread_w + bool ret = false; + unsigned long flags; + +- spin_lock_irqsave(&worker->lock, flags); ++ raw_spin_lock_irqsave(&worker->lock, flags); + if (!queuing_blocked(worker, work)) { + kthread_insert_work(worker, work, &worker->work_list); + ret = true; + } +- spin_unlock_irqrestore(&worker->lock, flags); ++ raw_spin_unlock_irqrestore(&worker->lock, flags); + return ret; + } + EXPORT_SYMBOL_GPL(kthread_queue_work); +@@ -843,7 +843,7 @@ void kthread_delayed_work_timer_fn(struc + if (WARN_ON_ONCE(!worker)) + return; + +- spin_lock(&worker->lock); ++ raw_spin_lock(&worker->lock); + /* Work must not be used with >1 worker, see kthread_queue_work(). */ + WARN_ON_ONCE(work->worker != worker); + +@@ -852,7 +852,7 @@ void kthread_delayed_work_timer_fn(struc + list_del_init(&work->node); + kthread_insert_work(worker, work, &worker->work_list); + +- spin_unlock(&worker->lock); ++ raw_spin_unlock(&worker->lock); + } + EXPORT_SYMBOL(kthread_delayed_work_timer_fn); + +@@ -908,14 +908,14 @@ bool kthread_queue_delayed_work(struct k + unsigned long flags; + bool ret = false; + +- spin_lock_irqsave(&worker->lock, flags); ++ raw_spin_lock_irqsave(&worker->lock, flags); + + if (!queuing_blocked(worker, work)) { + __kthread_queue_delayed_work(worker, dwork, delay); + ret = true; + } + +- spin_unlock_irqrestore(&worker->lock, flags); ++ raw_spin_unlock_irqrestore(&worker->lock, flags); + return ret; + } + EXPORT_SYMBOL_GPL(kthread_queue_delayed_work); +@@ -951,7 +951,7 @@ void kthread_flush_work(struct kthread_w + if (!worker) + return; + +- spin_lock_irq(&worker->lock); ++ raw_spin_lock_irq(&worker->lock); + /* Work must not be used with >1 worker, see kthread_queue_work(). */ + WARN_ON_ONCE(work->worker != worker); + +@@ -963,7 +963,7 @@ void kthread_flush_work(struct kthread_w + else + noop = true; + +- spin_unlock_irq(&worker->lock); ++ raw_spin_unlock_irq(&worker->lock); + + if (!noop) + wait_for_completion(&fwork.done); +@@ -996,9 +996,9 @@ static bool __kthread_cancel_work(struct + * any queuing is blocked by setting the canceling counter. + */ + work->canceling++; +- spin_unlock_irqrestore(&worker->lock, *flags); ++ raw_spin_unlock_irqrestore(&worker->lock, *flags); + del_timer_sync(&dwork->timer); +- spin_lock_irqsave(&worker->lock, *flags); ++ raw_spin_lock_irqsave(&worker->lock, *flags); + work->canceling--; + } + +@@ -1045,7 +1045,7 @@ bool kthread_mod_delayed_work(struct kth + unsigned long flags; + int ret = false; + +- spin_lock_irqsave(&worker->lock, flags); ++ raw_spin_lock_irqsave(&worker->lock, flags); + + /* Do not bother with canceling when never queued. */ + if (!work->worker) +@@ -1062,7 +1062,7 @@ bool kthread_mod_delayed_work(struct kth + fast_queue: + __kthread_queue_delayed_work(worker, dwork, delay); + out: +- spin_unlock_irqrestore(&worker->lock, flags); ++ raw_spin_unlock_irqrestore(&worker->lock, flags); + return ret; + } + EXPORT_SYMBOL_GPL(kthread_mod_delayed_work); +@@ -1076,7 +1076,7 @@ static bool __kthread_cancel_work_sync(s + if (!worker) + goto out; + +- spin_lock_irqsave(&worker->lock, flags); ++ raw_spin_lock_irqsave(&worker->lock, flags); + /* Work must not be used with >1 worker, see kthread_queue_work(). */ + WARN_ON_ONCE(work->worker != worker); + +@@ -1090,13 +1090,13 @@ static bool __kthread_cancel_work_sync(s + * In the meantime, block any queuing by setting the canceling counter. + */ + work->canceling++; +- spin_unlock_irqrestore(&worker->lock, flags); ++ raw_spin_unlock_irqrestore(&worker->lock, flags); + kthread_flush_work(work); +- spin_lock_irqsave(&worker->lock, flags); ++ raw_spin_lock_irqsave(&worker->lock, flags); + work->canceling--; + + out_fast: +- spin_unlock_irqrestore(&worker->lock, flags); ++ raw_spin_unlock_irqrestore(&worker->lock, flags); + out: + return ret; + } diff --git a/debian/patches-rt/leds-trigger-disable-CPU-trigger-on-RT.patch b/debian/patches-rt/leds-trigger-disable-CPU-trigger-on-RT.patch index 422e8951f..d0efcf898 100644 --- a/debian/patches-rt/leds-trigger-disable-CPU-trigger-on-RT.patch +++ b/debian/patches-rt/leds-trigger-disable-CPU-trigger-on-RT.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Thu, 23 Jan 2014 14:45:59 +0100 Subject: leds: trigger: disable CPU trigger on -RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz as it triggers: |CPU: 0 PID: 0 Comm: swapper Not tainted 3.12.8-rt10 #141 @@ -20,17 +20,16 @@ as it triggers: Signed-off-by: Sebastian Andrzej Siewior --- - drivers/leds/trigger/Kconfig | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) + drivers/leds/trigger/Kconfig | 1 + + 1 file changed, 1 insertion(+) --- a/drivers/leds/trigger/Kconfig +++ b/drivers/leds/trigger/Kconfig -@@ -69,7 +69,7 @@ config LEDS_TRIGGER_BACKLIGHT +@@ -63,6 +63,7 @@ config LEDS_TRIGGER_BACKLIGHT config LEDS_TRIGGER_CPU bool "LED CPU Trigger" -- depends on LEDS_TRIGGERS -+ depends on LEDS_TRIGGERS && !PREEMPT_RT_BASE ++ depends on !PREEMPT_RT_BASE help This allows LEDs to be controlled by active CPUs. This shows the active CPUs across an array of LEDs so you can see which diff --git a/debian/patches-rt/libata-remove-ata_sff_data_xfer_noirq.patch b/debian/patches-rt/libata-remove-ata_sff_data_xfer_noirq.patch deleted file mode 100644 index 045d9bc85..000000000 --- a/debian/patches-rt/libata-remove-ata_sff_data_xfer_noirq.patch +++ /dev/null @@ -1,197 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Thu, 19 Apr 2018 12:55:14 +0200 -Subject: [PATCH] libata: remove ata_sff_data_xfer_noirq() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz - -ata_sff_data_xfer_noirq() is invoked via the ->sff_data_xfer hook. The -latter is invoked by ata_pio_sector(), atapi_send_cdb() and -__atapi_pio_bytes() which in turn is invoked by ata_sff_hsm_move(). -The latter function requires that the "ap->lock" lock is held which -needs to be taken with disabled interrupts. - -There is no need have to have ata_sff_data_xfer_noirq() which invokes -ata_sff_data_xfer32() with disabled interrupts because at this point the -interrupts are already disabled. -Remove the function and its references to it and replace all callers -with ata_sff_data_xfer32(). - -Signed-off-by: Sebastian Andrzej Siewior ---- - Documentation/driver-api/libata.rst | 3 +-- - drivers/ata/libata-sff.c | 30 ------------------------------ - drivers/ata/pata_cmd640.c | 2 +- - drivers/ata/pata_icside.c | 2 +- - drivers/ata/pata_imx.c | 2 +- - drivers/ata/pata_legacy.c | 6 +++--- - drivers/ata/pata_palmld.c | 2 +- - drivers/ata/pata_pcmcia.c | 2 +- - drivers/ata/pata_platform.c | 2 +- - drivers/ata/pata_via.c | 2 +- - include/linux/libata.h | 2 -- - 11 files changed, 11 insertions(+), 44 deletions(-) - ---- a/Documentation/driver-api/libata.rst -+++ b/Documentation/driver-api/libata.rst -@@ -118,8 +118,7 @@ PIO data read/write - All bmdma-style drivers must implement this hook. This is the low-level - operation that actually copies the data bytes during a PIO data - transfer. Typically the driver will choose one of --:c:func:`ata_sff_data_xfer_noirq`, :c:func:`ata_sff_data_xfer`, or --:c:func:`ata_sff_data_xfer32`. -+:c:func:`ata_sff_data_xfer`, or :c:func:`ata_sff_data_xfer32`. - - ATA command execute - ~~~~~~~~~~~~~~~~~~~ ---- a/drivers/ata/libata-sff.c -+++ b/drivers/ata/libata-sff.c -@@ -658,36 +658,6 @@ unsigned int ata_sff_data_xfer32(struct - EXPORT_SYMBOL_GPL(ata_sff_data_xfer32); - - /** -- * ata_sff_data_xfer_noirq - Transfer data by PIO -- * @qc: queued command -- * @buf: data buffer -- * @buflen: buffer length -- * @rw: read/write -- * -- * Transfer data from/to the device data register by PIO. Do the -- * transfer with interrupts disabled. -- * -- * LOCKING: -- * Inherited from caller. -- * -- * RETURNS: -- * Bytes consumed. -- */ --unsigned int ata_sff_data_xfer_noirq(struct ata_queued_cmd *qc, unsigned char *buf, -- unsigned int buflen, int rw) --{ -- unsigned long flags; -- unsigned int consumed; -- -- local_irq_save(flags); -- consumed = ata_sff_data_xfer32(qc, buf, buflen, rw); -- local_irq_restore(flags); -- -- return consumed; --} --EXPORT_SYMBOL_GPL(ata_sff_data_xfer_noirq); -- --/** - * ata_pio_sector - Transfer a sector of data. - * @qc: Command on going - * ---- a/drivers/ata/pata_cmd640.c -+++ b/drivers/ata/pata_cmd640.c -@@ -178,7 +178,7 @@ static struct scsi_host_template cmd640_ - static struct ata_port_operations cmd640_port_ops = { - .inherits = &ata_sff_port_ops, - /* In theory xfer_noirq is not needed once we kill the prefetcher */ -- .sff_data_xfer = ata_sff_data_xfer_noirq, -+ .sff_data_xfer = ata_sff_data_xfer32, - .sff_irq_check = cmd640_sff_irq_check, - .qc_issue = cmd640_qc_issue, - .cable_detect = ata_cable_40wire, ---- a/drivers/ata/pata_icside.c -+++ b/drivers/ata/pata_icside.c -@@ -324,7 +324,7 @@ static struct ata_port_operations pata_i - .inherits = &ata_bmdma_port_ops, - /* no need to build any PRD tables for DMA */ - .qc_prep = ata_noop_qc_prep, -- .sff_data_xfer = ata_sff_data_xfer_noirq, -+ .sff_data_xfer = ata_sff_data_xfer32, - .bmdma_setup = pata_icside_bmdma_setup, - .bmdma_start = pata_icside_bmdma_start, - .bmdma_stop = pata_icside_bmdma_stop, ---- a/drivers/ata/pata_imx.c -+++ b/drivers/ata/pata_imx.c -@@ -102,7 +102,7 @@ static struct scsi_host_template pata_im - - static struct ata_port_operations pata_imx_port_ops = { - .inherits = &ata_sff_port_ops, -- .sff_data_xfer = ata_sff_data_xfer_noirq, -+ .sff_data_xfer = ata_sff_data_xfer32, - .cable_detect = ata_cable_unknown, - .set_piomode = pata_imx_set_piomode, - }; ---- a/drivers/ata/pata_legacy.c -+++ b/drivers/ata/pata_legacy.c -@@ -246,12 +246,12 @@ static const struct ata_port_operations - - static struct ata_port_operations simple_port_ops = { - .inherits = &legacy_base_port_ops, -- .sff_data_xfer = ata_sff_data_xfer_noirq, -+ .sff_data_xfer = ata_sff_data_xfer32, - }; - - static struct ata_port_operations legacy_port_ops = { - .inherits = &legacy_base_port_ops, -- .sff_data_xfer = ata_sff_data_xfer_noirq, -+ .sff_data_xfer = ata_sff_data_xfer32, - .set_mode = legacy_set_mode, - }; - -@@ -341,7 +341,7 @@ static unsigned int pdc_data_xfer_vlb(st - } - local_irq_restore(flags); - } else -- buflen = ata_sff_data_xfer_noirq(qc, buf, buflen, rw); -+ buflen = ata_sff_data_xfer32(qc, buf, buflen, rw); - - return buflen; - } ---- a/drivers/ata/pata_palmld.c -+++ b/drivers/ata/pata_palmld.c -@@ -44,7 +44,7 @@ static struct scsi_host_template palmld_ - - static struct ata_port_operations palmld_port_ops = { - .inherits = &ata_sff_port_ops, -- .sff_data_xfer = ata_sff_data_xfer_noirq, -+ .sff_data_xfer = ata_sff_data_xfer32, - .cable_detect = ata_cable_40wire, - }; - ---- a/drivers/ata/pata_pcmcia.c -+++ b/drivers/ata/pata_pcmcia.c -@@ -151,7 +151,7 @@ static struct scsi_host_template pcmcia_ - - static struct ata_port_operations pcmcia_port_ops = { - .inherits = &ata_sff_port_ops, -- .sff_data_xfer = ata_sff_data_xfer_noirq, -+ .sff_data_xfer = ata_sff_data_xfer32, - .cable_detect = ata_cable_40wire, - .set_mode = pcmcia_set_mode, - }; ---- a/drivers/ata/pata_platform.c -+++ b/drivers/ata/pata_platform.c -@@ -49,7 +49,7 @@ static struct scsi_host_template pata_pl - - static struct ata_port_operations pata_platform_port_ops = { - .inherits = &ata_sff_port_ops, -- .sff_data_xfer = ata_sff_data_xfer_noirq, -+ .sff_data_xfer = ata_sff_data_xfer32, - .cable_detect = ata_cable_unknown, - .set_mode = pata_platform_set_mode, - }; ---- a/drivers/ata/pata_via.c -+++ b/drivers/ata/pata_via.c -@@ -471,7 +471,7 @@ static struct ata_port_operations via_po - - static struct ata_port_operations via_port_ops_noirq = { - .inherits = &via_port_ops, -- .sff_data_xfer = ata_sff_data_xfer_noirq, -+ .sff_data_xfer = ata_sff_data_xfer32, - }; - - /** ---- a/include/linux/libata.h -+++ b/include/linux/libata.h -@@ -1858,8 +1858,6 @@ extern unsigned int ata_sff_data_xfer(st - unsigned char *buf, unsigned int buflen, int rw); - extern unsigned int ata_sff_data_xfer32(struct ata_queued_cmd *qc, - unsigned char *buf, unsigned int buflen, int rw); --extern unsigned int ata_sff_data_xfer_noirq(struct ata_queued_cmd *qc, -- unsigned char *buf, unsigned int buflen, int rw); - extern void ata_sff_irq_on(struct ata_port *ap); - extern void ata_sff_irq_clear(struct ata_port *ap); - extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, diff --git a/debian/patches-rt/list_bl-fixup-bogus-lockdep-warning.patch b/debian/patches-rt/list_bl-fixup-bogus-lockdep-warning.patch index 658afe790..0a3af9b4f 100644 --- a/debian/patches-rt/list_bl-fixup-bogus-lockdep-warning.patch +++ b/debian/patches-rt/list_bl-fixup-bogus-lockdep-warning.patch @@ -1,7 +1,7 @@ From: Josh Cartwright Date: Thu, 31 Mar 2016 00:04:25 -0500 Subject: [PATCH] list_bl: fixup bogus lockdep warning -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz At first glance, the use of 'static inline' seems appropriate for INIT_HLIST_BL_HEAD(). diff --git a/debian/patches-rt/list_bl.h-make-list-head-locking-RT-safe.patch b/debian/patches-rt/list_bl.h-make-list-head-locking-RT-safe.patch index 53f2853e8..42edcbdcd 100644 --- a/debian/patches-rt/list_bl.h-make-list-head-locking-RT-safe.patch +++ b/debian/patches-rt/list_bl.h-make-list-head-locking-RT-safe.patch @@ -1,7 +1,7 @@ From: Paul Gortmaker Date: Fri, 21 Jun 2013 15:07:25 -0400 Subject: list_bl: Make list head locking RT safe -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz As per changes in include/linux/jbd_common.h for avoiding the bit_spin_locks on RT ("fs: jbd/jbd2: Make state lock and journal diff --git a/debian/patches-rt/locallock-provide-get-put-_locked_ptr-variants.patch b/debian/patches-rt/locallock-provide-get-put-_locked_ptr-variants.patch index be67e78d3..7176b6d12 100644 --- a/debian/patches-rt/locallock-provide-get-put-_locked_ptr-variants.patch +++ b/debian/patches-rt/locallock-provide-get-put-_locked_ptr-variants.patch @@ -1,7 +1,7 @@ From: Julia Cartwright Date: Mon, 7 May 2018 08:58:56 -0500 Subject: [PATCH] locallock: provide {get,put}_locked_ptr() variants -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Provide a set of locallocked accessors for pointers to per-CPU data; this is useful for dynamically-allocated per-CPU regions, for example. diff --git a/debian/patches-rt/localversion.patch b/debian/patches-rt/localversion.patch index cf758ace9..0f75e79b7 100644 --- a/debian/patches-rt/localversion.patch +++ b/debian/patches-rt/localversion.patch @@ -1,7 +1,7 @@ Subject: Add localversion for -RT release From: Thomas Gleixner Date: Fri, 08 Jul 2011 20:25:16 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Signed-off-by: Thomas Gleixner --- @@ -11,4 +11,4 @@ Signed-off-by: Thomas Gleixner --- /dev/null +++ b/localversion-rt @@ -0,0 +1 @@ -+-rt5 ++-rt1 diff --git a/debian/patches-rt/lockdep-disable-self-test.patch b/debian/patches-rt/lockdep-disable-self-test.patch index e0578ff09..4ee90cb92 100644 --- a/debian/patches-rt/lockdep-disable-self-test.patch +++ b/debian/patches-rt/lockdep-disable-self-test.patch @@ -4,7 +4,7 @@ Subject: [PATCH] lockdep: disable self-test MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz The self-test wasn't always 100% accurate for RT. We disabled a few tests which failed because they had a different semantic for RT. Some @@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug -@@ -1201,7 +1201,7 @@ config DEBUG_ATOMIC_SLEEP +@@ -1207,7 +1207,7 @@ config DEBUG_ATOMIC_SLEEP config DEBUG_LOCKING_API_SELFTESTS bool "Locking API boot-time self-tests" diff --git a/debian/patches-rt/lockdep-no-softirq-accounting-on-rt.patch b/debian/patches-rt/lockdep-no-softirq-accounting-on-rt.patch index fb87b82b3..5fbb177e7 100644 --- a/debian/patches-rt/lockdep-no-softirq-accounting-on-rt.patch +++ b/debian/patches-rt/lockdep-no-softirq-accounting-on-rt.patch @@ -1,7 +1,7 @@ Subject: lockdep: Make it RT aware From: Thomas Gleixner Date: Sun, 17 Jul 2011 18:51:23 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz teach lockdep that we don't really do softirqs on -RT. @@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner --- a/include/linux/irqflags.h +++ b/include/linux/irqflags.h -@@ -32,14 +32,6 @@ do { \ +@@ -43,14 +43,6 @@ do { \ do { \ current->hardirq_context--; \ } while (0) @@ -28,7 +28,7 @@ Signed-off-by: Thomas Gleixner #else # define trace_hardirqs_on() do { } while (0) # define trace_hardirqs_off() do { } while (0) -@@ -54,6 +46,21 @@ do { \ +@@ -63,6 +55,21 @@ do { \ # define lockdep_softirq_enter() do { } while (0) # define lockdep_softirq_exit() do { } while (0) #endif @@ -52,7 +52,7 @@ Signed-off-by: Thomas Gleixner defined(CONFIG_PREEMPT_TRACER) --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c -@@ -3847,6 +3847,7 @@ static void check_flags(unsigned long fl +@@ -3823,6 +3823,7 @@ static void check_flags(unsigned long fl } } @@ -60,7 +60,7 @@ Signed-off-by: Thomas Gleixner /* * We dont accurately track softirq state in e.g. * hardirq contexts (such as on 4KSTACKS), so only -@@ -3861,6 +3862,7 @@ static void check_flags(unsigned long fl +@@ -3837,6 +3838,7 @@ static void check_flags(unsigned long fl DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled); } } diff --git a/debian/patches-rt/lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch b/debian/patches-rt/lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch index 79cd40e47..906ba991e 100644 --- a/debian/patches-rt/lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch +++ b/debian/patches-rt/lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch @@ -1,7 +1,7 @@ From: Josh Cartwright Date: Wed, 28 Jan 2015 13:08:45 -0600 Subject: lockdep: selftest: fix warnings due to missing PREEMPT_RT conditionals -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz "lockdep: Selftest: Only do hardirq context test for raw spinlock" disabled the execution of certain tests with PREEMPT_RT_FULL, but did diff --git a/debian/patches-rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch b/debian/patches-rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch index e4bdfe7d8..9e6a75328 100644 --- a/debian/patches-rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch +++ b/debian/patches-rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch @@ -1,7 +1,7 @@ Subject: lockdep: selftest: Only do hardirq context test for raw spinlock From: Yong Zhang Date: Mon, 16 Apr 2012 15:01:56 +0800 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz From: Yong Zhang diff --git a/debian/patches-rt/locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch b/debian/patches-rt/locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch index 534ccfb25..61531fc26 100644 --- a/debian/patches-rt/locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch +++ b/debian/patches-rt/locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch @@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior Date: Fri, 4 Aug 2017 17:40:42 +0200 Subject: [PATCH 1/2] locking: don't check for __LINUX_SPINLOCK_TYPES_H on -RT archs -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Upstream uses arch_spinlock_t within spinlock_t and requests that spinlock_types.h header file is included first. @@ -62,9 +62,9 @@ Signed-off-by: Sebastian Andrzej Siewior -# error "please don't include this file directly" -#endif - - #include + #include + #include - #define TICKET_SHIFT 16 --- a/arch/hexagon/include/asm/spinlock_types.h +++ b/arch/hexagon/include/asm/spinlock_types.h @@ -21,10 +21,6 @@ diff --git a/debian/patches-rt/locking-locktorture-Do-NOT-include-rwlock.h-directly.patch b/debian/patches-rt/locking-locktorture-Do-NOT-include-rwlock.h-directly.patch index 8a9c4b9fd..201693ef6 100644 --- a/debian/patches-rt/locking-locktorture-Do-NOT-include-rwlock.h-directly.patch +++ b/debian/patches-rt/locking-locktorture-Do-NOT-include-rwlock.h-directly.patch @@ -1,7 +1,7 @@ From: "Wolfgang M. Reimer" Date: Tue, 21 Jul 2015 16:20:07 +0200 Subject: locking: locktorture: Do NOT include rwlock.h directly -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Including rwlock.h directly will cause kernel builds to fail if CONFIG_PREEMPT_RT_FULL is defined. The correct header file @@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/kernel/locking/locktorture.c +++ b/kernel/locking/locktorture.c -@@ -26,7 +26,6 @@ +@@ -29,7 +29,6 @@ #include #include #include diff --git a/debian/patches-rt/locking-rt-mutex-fix-deadlock-in-device-mapper-block.patch b/debian/patches-rt/locking-rt-mutex-fix-deadlock-in-device-mapper-block.patch index d82a845a4..7cb89f52c 100644 --- a/debian/patches-rt/locking-rt-mutex-fix-deadlock-in-device-mapper-block.patch +++ b/debian/patches-rt/locking-rt-mutex-fix-deadlock-in-device-mapper-block.patch @@ -1,7 +1,7 @@ From: Mikulas Patocka Date: Mon, 13 Nov 2017 12:56:53 -0500 Subject: [PATCH] locking/rt-mutex: fix deadlock in device mapper / block-IO -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz When some block device driver creates a bio and submits it to another block device driver, the bio is added to current->bio_list (in order to diff --git a/debian/patches-rt/locking-rtmutex-re-init-the-wait_lock-in-rt_mutex_in.patch b/debian/patches-rt/locking-rtmutex-re-init-the-wait_lock-in-rt_mutex_in.patch index b7213f293..0c19c97cf 100644 --- a/debian/patches-rt/locking-rtmutex-re-init-the-wait_lock-in-rt_mutex_in.patch +++ b/debian/patches-rt/locking-rtmutex-re-init-the-wait_lock-in-rt_mutex_in.patch @@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior Date: Thu, 16 Nov 2017 16:48:48 +0100 Subject: [PATCH] locking/rtmutex: re-init the wait_lock in rt_mutex_init_proxy_locked() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz We could provide a key-class for the lockdep (and fixup all callers) or move the init to all callers (like it was) in order to avoid lockdep diff --git a/debian/patches-rt/md-disable-bcache.patch b/debian/patches-rt/md-disable-bcache.patch index 0f20c4217..ac87cfaa0 100644 --- a/debian/patches-rt/md-disable-bcache.patch +++ b/debian/patches-rt/md-disable-bcache.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Thu, 29 Aug 2013 11:48:57 +0200 Subject: md: disable bcache -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz It uses anon semaphores |drivers/md/bcache/request.c: In function ‘cached_dev_write_complete’: @@ -27,6 +27,6 @@ Signed-off-by: Sebastian Andrzej Siewior config BCACHE tristate "Block device as cache" + depends on !PREEMPT_RT_FULL - ---help--- + select CRC64 + help Allows a block device to be used as cache for other devices; uses - a btree for indexing and the layout is optimized for SSDs. diff --git a/debian/patches-rt/md-raid5-percpu-handling-rt-aware.patch b/debian/patches-rt/md-raid5-percpu-handling-rt-aware.patch index 532a13acd..ff7a1f5b1 100644 --- a/debian/patches-rt/md-raid5-percpu-handling-rt-aware.patch +++ b/debian/patches-rt/md-raid5-percpu-handling-rt-aware.patch @@ -1,7 +1,7 @@ From: Thomas Gleixner Date: Tue, 6 Apr 2010 16:51:31 +0200 Subject: md: raid5: Make raid5_percpu handling RT aware -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz __raid_run_ops() disables preemption with get_cpu() around the access to the raid5_percpu variables. That causes scheduling while atomic @@ -42,7 +42,7 @@ Tested-by: Udo van den Heuvel } static void free_stripe(struct kmem_cache *sc, struct stripe_head *sh) -@@ -6793,6 +6795,7 @@ static int raid456_cpu_up_prepare(unsign +@@ -6799,6 +6801,7 @@ static int raid456_cpu_up_prepare(unsign __func__, cpu); return -ENOMEM; } @@ -50,7 +50,7 @@ Tested-by: Udo van den Heuvel return 0; } -@@ -6803,7 +6806,6 @@ static int raid5_alloc_percpu(struct r5c +@@ -6809,7 +6812,6 @@ static int raid5_alloc_percpu(struct r5c conf->percpu = alloc_percpu(struct raid5_percpu); if (!conf->percpu) return -ENOMEM; diff --git a/debian/patches-rt/mips-disable-highmem-on-rt.patch b/debian/patches-rt/mips-disable-highmem-on-rt.patch index 740f90fd9..829d0e283 100644 --- a/debian/patches-rt/mips-disable-highmem-on-rt.patch +++ b/debian/patches-rt/mips-disable-highmem-on-rt.patch @@ -1,7 +1,7 @@ Subject: mips: Disable highmem on RT From: Thomas Gleixner Date: Mon, 18 Jul 2011 17:10:12 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz The current highmem handling on -RT is not compatible and needs fixups. @@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig -@@ -2519,7 +2519,7 @@ config MIPS_CRC_SUPPORT +@@ -2514,7 +2514,7 @@ config MIPS_CRC_SUPPORT # config HIGHMEM bool "High Memory Support" diff --git a/debian/patches-rt/mm-convert-swap-to-percpu-locked.patch b/debian/patches-rt/mm-convert-swap-to-percpu-locked.patch index 67b96c248..852e40a88 100644 --- a/debian/patches-rt/mm-convert-swap-to-percpu-locked.patch +++ b/debian/patches-rt/mm-convert-swap-to-percpu-locked.patch @@ -1,7 +1,7 @@ From: Ingo Molnar Date: Fri, 3 Jul 2009 08:29:51 -0500 Subject: mm/swap: Convert to percpu locked -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Replace global locks (get_cpu + local_irq_save) with "local_locks()". Currently there is one of for "rotate" and one for "swap". @@ -53,7 +53,7 @@ Signed-off-by: Thomas Gleixner } --- a/mm/page_alloc.c +++ b/mm/page_alloc.c -@@ -7137,8 +7137,9 @@ void __init free_area_init(unsigned long +@@ -7184,8 +7184,9 @@ void __init free_area_init(unsigned long static int page_alloc_cpu_dead(unsigned int cpu) { diff --git a/debian/patches-rt/mm-disable-sloub-rt.patch b/debian/patches-rt/mm-disable-sloub-rt.patch index 809ea7ba4..2eb49ffaa 100644 --- a/debian/patches-rt/mm-disable-sloub-rt.patch +++ b/debian/patches-rt/mm-disable-sloub-rt.patch @@ -1,7 +1,7 @@ From: Ingo Molnar Date: Fri, 3 Jul 2009 08:44:03 -0500 Subject: mm: Allow only slub on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Disable SLAB and SLOB on -RT. Only SLUB is adopted to -RT needs. @@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner --- a/init/Kconfig +++ b/init/Kconfig -@@ -1591,6 +1591,7 @@ choice +@@ -1628,6 +1628,7 @@ choice config SLAB bool "SLAB" @@ -22,7 +22,7 @@ Signed-off-by: Thomas Gleixner select HAVE_HARDENED_USERCOPY_ALLOCATOR help The regular slab allocator that is established and known to work -@@ -1611,6 +1612,7 @@ config SLUB +@@ -1648,6 +1649,7 @@ config SLUB config SLOB depends on EXPERT bool "SLOB (Simple Allocator)" diff --git a/debian/patches-rt/mm-enable-slub.patch b/debian/patches-rt/mm-enable-slub.patch index 908ad73f5..f68ccf281 100644 --- a/debian/patches-rt/mm-enable-slub.patch +++ b/debian/patches-rt/mm-enable-slub.patch @@ -1,7 +1,7 @@ Subject: mm: Enable SLUB for RT From: Thomas Gleixner Date: Thu, 25 Oct 2012 10:32:35 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Avoid the memory allocation in IRQ section @@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/mm/slub.c +++ b/mm/slub.c -@@ -3679,6 +3679,11 @@ static void list_slab_objects(struct kme +@@ -3677,6 +3677,11 @@ static void list_slab_objects(struct kme const char *text) { #ifdef CONFIG_SLUB_DEBUG @@ -26,7 +26,7 @@ Signed-off-by: Sebastian Andrzej Siewior void *addr = page_address(page); void *p; unsigned long *map = kcalloc(BITS_TO_LONGS(page->objects), -@@ -3700,6 +3705,7 @@ static void list_slab_objects(struct kme +@@ -3698,6 +3703,7 @@ static void list_slab_objects(struct kme slab_unlock(page); kfree(map); #endif diff --git a/debian/patches-rt/mm-kasan-make-quarantine_lock-a-raw_spinlock_t.patch b/debian/patches-rt/mm-kasan-make-quarantine_lock-a-raw_spinlock_t.patch new file mode 100644 index 000000000..57d470860 --- /dev/null +++ b/debian/patches-rt/mm-kasan-make-quarantine_lock-a-raw_spinlock_t.patch @@ -0,0 +1,92 @@ +From: Clark Williams +Date: Tue, 18 Sep 2018 10:29:31 -0500 +Subject: [PATCH] mm/kasan: make quarantine_lock a raw_spinlock_t +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz + +The static lock quarantine_lock is used in quarantine.c to protect the +quarantine queue datastructures. It is taken inside quarantine queue +manipulation routines (quarantine_put(), quarantine_reduce() and +quarantine_remove_cache()), with IRQs disabled. +This is not a problem on a stock kernel but is problematic on an RT +kernel where spin locks are sleeping spinlocks, which can sleep and can +not be acquired with disabled interrupts. + +Convert the quarantine_lock to a raw spinlock_t. The usage of +quarantine_lock is confined to quarantine.c and the work performed while +the lock is held is limited. + +Signed-off-by: Clark Williams +Signed-off-by: Sebastian Andrzej Siewior +--- + mm/kasan/quarantine.c | 18 +++++++++--------- + 1 file changed, 9 insertions(+), 9 deletions(-) + +--- a/mm/kasan/quarantine.c ++++ b/mm/kasan/quarantine.c +@@ -103,7 +103,7 @@ static int quarantine_head; + static int quarantine_tail; + /* Total size of all objects in global_quarantine across all batches. */ + static unsigned long quarantine_size; +-static DEFINE_SPINLOCK(quarantine_lock); ++static DEFINE_RAW_SPINLOCK(quarantine_lock); + DEFINE_STATIC_SRCU(remove_cache_srcu); + + /* Maximum size of the global queue. */ +@@ -190,7 +190,7 @@ void quarantine_put(struct kasan_free_me + if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) { + qlist_move_all(q, &temp); + +- spin_lock(&quarantine_lock); ++ raw_spin_lock(&quarantine_lock); + WRITE_ONCE(quarantine_size, quarantine_size + temp.bytes); + qlist_move_all(&temp, &global_quarantine[quarantine_tail]); + if (global_quarantine[quarantine_tail].bytes >= +@@ -203,7 +203,7 @@ void quarantine_put(struct kasan_free_me + if (new_tail != quarantine_head) + quarantine_tail = new_tail; + } +- spin_unlock(&quarantine_lock); ++ raw_spin_unlock(&quarantine_lock); + } + + local_irq_restore(flags); +@@ -230,7 +230,7 @@ void quarantine_reduce(void) + * expected case). + */ + srcu_idx = srcu_read_lock(&remove_cache_srcu); +- spin_lock_irqsave(&quarantine_lock, flags); ++ raw_spin_lock_irqsave(&quarantine_lock, flags); + + /* + * Update quarantine size in case of hotplug. Allocate a fraction of +@@ -254,7 +254,7 @@ void quarantine_reduce(void) + quarantine_head = 0; + } + +- spin_unlock_irqrestore(&quarantine_lock, flags); ++ raw_spin_unlock_irqrestore(&quarantine_lock, flags); + + qlist_free_all(&to_free, NULL); + srcu_read_unlock(&remove_cache_srcu, srcu_idx); +@@ -310,17 +310,17 @@ void quarantine_remove_cache(struct kmem + */ + on_each_cpu(per_cpu_remove_cache, cache, 1); + +- spin_lock_irqsave(&quarantine_lock, flags); ++ raw_spin_lock_irqsave(&quarantine_lock, flags); + for (i = 0; i < QUARANTINE_BATCHES; i++) { + if (qlist_empty(&global_quarantine[i])) + continue; + qlist_move_cache(&global_quarantine[i], &to_free, cache); + /* Scanning whole quarantine can take a while. */ +- spin_unlock_irqrestore(&quarantine_lock, flags); ++ raw_spin_unlock_irqrestore(&quarantine_lock, flags); + cond_resched(); +- spin_lock_irqsave(&quarantine_lock, flags); ++ raw_spin_lock_irqsave(&quarantine_lock, flags); + } +- spin_unlock_irqrestore(&quarantine_lock, flags); ++ raw_spin_unlock_irqrestore(&quarantine_lock, flags); + + qlist_free_all(&to_free, cache); + diff --git a/debian/patches-rt/mm-make-vmstat-rt-aware.patch b/debian/patches-rt/mm-make-vmstat-rt-aware.patch index 1d62b36b9..09be43ca8 100644 --- a/debian/patches-rt/mm-make-vmstat-rt-aware.patch +++ b/debian/patches-rt/mm-make-vmstat-rt-aware.patch @@ -1,7 +1,7 @@ From: Ingo Molnar Date: Fri, 3 Jul 2009 08:30:13 -0500 Subject: mm/vmstat: Protect per cpu variables with preempt disable on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Disable preemption on -RT for the vmstat code. On vanila the code runs in IRQ-off regions while on -RT it is not. "preempt_disable" ensures that the diff --git a/debian/patches-rt/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch b/debian/patches-rt/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch index 475addcf1..5635fd934 100644 --- a/debian/patches-rt/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch +++ b/debian/patches-rt/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch @@ -1,7 +1,7 @@ From: Yang Shi Subject: mm/memcontrol: Don't call schedule_work_on in preemption disabled context Date: Wed, 30 Oct 2013 11:48:33 -0700 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz The following trace is triggered when running ltp oom test cases: @@ -49,7 +49,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/mm/memcontrol.c +++ b/mm/memcontrol.c -@@ -1783,7 +1783,7 @@ static void drain_all_stock(struct mem_c +@@ -2036,7 +2036,7 @@ static void drain_all_stock(struct mem_c * as well as workers from this path always operate on the local * per-cpu data. CPU up doesn't touch memcg_stock at all. */ @@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior for_each_online_cpu(cpu) { struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); struct mem_cgroup *memcg; -@@ -1803,7 +1803,7 @@ static void drain_all_stock(struct mem_c +@@ -2056,7 +2056,7 @@ static void drain_all_stock(struct mem_c } css_put(&memcg->css); } diff --git a/debian/patches-rt/mm-memcontrol-do_not_disable_irq.patch b/debian/patches-rt/mm-memcontrol-do_not_disable_irq.patch index b9d050677..be899a5f9 100644 --- a/debian/patches-rt/mm-memcontrol-do_not_disable_irq.patch +++ b/debian/patches-rt/mm-memcontrol-do_not_disable_irq.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Subject: mm/memcontrol: Replace local_irq_disable with local locks Date: Wed, 28 Jan 2015 17:14:16 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz There are a few local_irq_disable() which then take sleeping locks. This patch converts them local locks. @@ -30,7 +30,7 @@ Signed-off-by: Sebastian Andrzej Siewior /* Whether legacy memory+swap accounting is active */ static bool do_memsw_account(void) { -@@ -4558,12 +4561,12 @@ static int mem_cgroup_move_account(struc +@@ -4843,12 +4846,12 @@ static int mem_cgroup_move_account(struc ret = 0; @@ -45,7 +45,7 @@ Signed-off-by: Sebastian Andrzej Siewior out_unlock: unlock_page(page); out: -@@ -5637,10 +5640,10 @@ void mem_cgroup_commit_charge(struct pag +@@ -5967,10 +5970,10 @@ void mem_cgroup_commit_charge(struct pag commit_charge(page, memcg, lrucare); @@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior if (do_memsw_account() && PageSwapCache(page)) { swp_entry_t entry = { .val = page_private(page) }; -@@ -5709,7 +5712,7 @@ static void uncharge_batch(const struct +@@ -6039,7 +6042,7 @@ static void uncharge_batch(const struct memcg_oom_recover(ug->memcg); } @@ -67,7 +67,7 @@ Signed-off-by: Sebastian Andrzej Siewior __mod_memcg_state(ug->memcg, MEMCG_RSS, -ug->nr_anon); __mod_memcg_state(ug->memcg, MEMCG_CACHE, -ug->nr_file); __mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge); -@@ -5717,7 +5720,7 @@ static void uncharge_batch(const struct +@@ -6047,7 +6050,7 @@ static void uncharge_batch(const struct __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); __this_cpu_add(ug->memcg->stat_cpu->nr_page_events, nr_pages); memcg_check_events(ug->memcg, ug->dummy_page); @@ -76,7 +76,7 @@ Signed-off-by: Sebastian Andrzej Siewior if (!mem_cgroup_is_root(ug->memcg)) css_put_many(&ug->memcg->css, nr_pages); -@@ -5880,10 +5883,10 @@ void mem_cgroup_migrate(struct page *old +@@ -6210,10 +6213,10 @@ void mem_cgroup_migrate(struct page *old commit_charge(newpage, memcg, false); @@ -89,7 +89,7 @@ Signed-off-by: Sebastian Andrzej Siewior } DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); -@@ -6075,6 +6078,7 @@ void mem_cgroup_swapout(struct page *pag +@@ -6405,6 +6408,7 @@ void mem_cgroup_swapout(struct page *pag struct mem_cgroup *memcg, *swap_memcg; unsigned int nr_entries; unsigned short oldid; @@ -97,7 +97,7 @@ Signed-off-by: Sebastian Andrzej Siewior VM_BUG_ON_PAGE(PageLRU(page), page); VM_BUG_ON_PAGE(page_count(page), page); -@@ -6120,13 +6124,17 @@ void mem_cgroup_swapout(struct page *pag +@@ -6450,13 +6454,17 @@ void mem_cgroup_swapout(struct page *pag * important here to have the interrupts disabled because it is the * only synchronisation we have for updating the per-CPU variables. */ diff --git a/debian/patches-rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch b/debian/patches-rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch index ddbca964a..33ef8ff81 100644 --- a/debian/patches-rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch +++ b/debian/patches-rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch @@ -1,7 +1,7 @@ From: Ingo Molnar Date: Fri, 3 Jul 2009 08:29:37 -0500 Subject: mm: page_alloc: rt-friendly per-cpu pages -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz rt-friendly per-cpu pages: convert the irqs-off per-cpu locking method into a preemptible, explicit-per-cpu-locks method. @@ -18,7 +18,7 @@ Signed-off-by: Thomas Gleixner --- a/mm/page_alloc.c +++ b/mm/page_alloc.c -@@ -61,6 +61,7 @@ +@@ -60,6 +60,7 @@ #include #include #include @@ -213,7 +213,7 @@ Signed-off-by: Thomas Gleixner return NULL; } -@@ -8011,7 +8034,7 @@ void zone_pcp_reset(struct zone *zone) +@@ -8062,7 +8085,7 @@ void zone_pcp_reset(struct zone *zone) struct per_cpu_pageset *pset; /* avoid races with drain_pages() */ @@ -222,7 +222,7 @@ Signed-off-by: Thomas Gleixner if (zone->pageset != &boot_pageset) { for_each_online_cpu(cpu) { pset = per_cpu_ptr(zone->pageset, cpu); -@@ -8020,7 +8043,7 @@ void zone_pcp_reset(struct zone *zone) +@@ -8071,7 +8094,7 @@ void zone_pcp_reset(struct zone *zone) free_percpu(zone->pageset); zone->pageset = &boot_pageset; } diff --git a/debian/patches-rt/mm-perform-lru_add_drain_all-remotely.patch b/debian/patches-rt/mm-perform-lru_add_drain_all-remotely.patch index 5c86d2dd6..aad8d9686 100644 --- a/debian/patches-rt/mm-perform-lru_add_drain_all-remotely.patch +++ b/debian/patches-rt/mm-perform-lru_add_drain_all-remotely.patch @@ -1,7 +1,7 @@ From: Luiz Capitulino Date: Fri, 27 May 2016 15:03:28 +0200 Subject: [PATCH] mm: perform lru_add_drain_all() remotely -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz lru_add_drain_all() works by scheduling lru_add_drain_cpu() to run on all CPUs that have non-empty LRU pagevecs and then waiting for diff --git a/debian/patches-rt/mm-protect-activate-switch-mm.patch b/debian/patches-rt/mm-protect-activate-switch-mm.patch index 06724d9cc..8f5aa4ac1 100644 --- a/debian/patches-rt/mm-protect-activate-switch-mm.patch +++ b/debian/patches-rt/mm-protect-activate-switch-mm.patch @@ -1,7 +1,7 @@ From: Yong Zhang Date: Tue, 15 May 2012 13:53:56 +0800 Subject: mm: Protect activate_mm() by preempt_[disable&enable]_rt() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz User preempt_*_rt instead of local_irq_*_rt or otherwise there will be warning on ARM like below: diff --git a/debian/patches-rt/mm-rt-kmap-atomic-scheduling.patch b/debian/patches-rt/mm-rt-kmap-atomic-scheduling.patch index ff117af10..9b8e5e123 100644 --- a/debian/patches-rt/mm-rt-kmap-atomic-scheduling.patch +++ b/debian/patches-rt/mm-rt-kmap-atomic-scheduling.patch @@ -1,7 +1,7 @@ Subject: mm, rt: kmap_atomic scheduling From: Peter Zijlstra Date: Thu, 28 Jul 2011 10:43:51 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz In fact, with migrate_disable() existing one could play games with kmap_atomic. You could save/restore the kmap_atomic slots on context @@ -248,7 +248,7 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins /* task_struct member predeclarations (sorted alphabetically): */ struct audit_context; -@@ -1193,6 +1194,12 @@ struct task_struct { +@@ -1205,6 +1206,12 @@ struct task_struct { int softirq_nestcnt; unsigned int softirqs_raised; #endif diff --git a/debian/patches-rt/mm-scatterlist-dont-disable-irqs-on-RT.patch b/debian/patches-rt/mm-scatterlist-dont-disable-irqs-on-RT.patch index 3f5e3d371..b6e3744d1 100644 --- a/debian/patches-rt/mm-scatterlist-dont-disable-irqs-on-RT.patch +++ b/debian/patches-rt/mm-scatterlist-dont-disable-irqs-on-RT.patch @@ -1,7 +1,7 @@ From: Thomas Gleixner Date: Fri, 3 Jul 2009 08:44:34 -0500 Subject: mm/scatterlist: Do not disable irqs on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz For -RT it is enough to keep pagefault disabled (which is currently handled by kmap_atomic()). diff --git a/debian/patches-rt/mm-vmalloc-use-get-cpu-light.patch b/debian/patches-rt/mm-vmalloc-use-get-cpu-light.patch index 5e2e83923..828ed486f 100644 --- a/debian/patches-rt/mm-vmalloc-use-get-cpu-light.patch +++ b/debian/patches-rt/mm-vmalloc-use-get-cpu-light.patch @@ -1,7 +1,7 @@ Subject: mm/vmalloc: Another preempt disable region which sucks From: Thomas Gleixner Date: Tue, 12 Jul 2011 11:39:36 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Avoid the preempt disable version of get_cpu_var(). The inner-lock should provide enough serialisation. diff --git a/debian/patches-rt/mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch b/debian/patches-rt/mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch index 8205440c5..33b4f8c30 100644 --- a/debian/patches-rt/mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch +++ b/debian/patches-rt/mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch @@ -1,7 +1,7 @@ From: Mike Galbraith Date: Tue, 22 Mar 2016 11:16:09 +0100 Subject: [PATCH] mm/zsmalloc: copy with get_cpu_var() and locking -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz get_cpu_var() disables preemption and triggers a might_sleep() splat later. This is replaced with get_locked_var(). @@ -177,7 +177,7 @@ Signed-off-by: Sebastian Andrzej Siewior } static void reset_page(struct page *page) -@@ -1350,7 +1418,7 @@ void *zs_map_object(struct zs_pool *pool +@@ -1337,7 +1405,7 @@ void *zs_map_object(struct zs_pool *pool class = pool->size_class[class_idx]; off = (class->size * obj_idx) & ~PAGE_MASK; @@ -186,7 +186,7 @@ Signed-off-by: Sebastian Andrzej Siewior area->vm_mm = mm; if (off + class->size <= PAGE_SIZE) { /* this object is contained entirely within a page */ -@@ -1404,7 +1472,7 @@ void zs_unmap_object(struct zs_pool *poo +@@ -1391,7 +1459,7 @@ void zs_unmap_object(struct zs_pool *poo __zs_unmap_object(area, pages, off, class->size); } diff --git a/debian/patches-rt/mutex-no-spin-on-rt.patch b/debian/patches-rt/mutex-no-spin-on-rt.patch index b9442841a..553aebf14 100644 --- a/debian/patches-rt/mutex-no-spin-on-rt.patch +++ b/debian/patches-rt/mutex-no-spin-on-rt.patch @@ -1,7 +1,7 @@ From: Thomas Gleixner Date: Sun, 17 Jul 2011 21:51:45 +0200 Subject: locking: Disable spin on owner for RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Drop spin on owner for mutex / rwsem. We are most likely not using it but… diff --git a/debian/patches-rt/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch b/debian/patches-rt/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch index 15dd860a7..af1e752e4 100644 --- a/debian/patches-rt/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch +++ b/debian/patches-rt/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch @@ -2,7 +2,7 @@ From: Steven Rostedt Date: Tue, 6 Dec 2016 17:50:30 -0500 Subject: [PATCH] net: Have __napi_schedule_irqoff() disable interrupts on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz A customer hit a crash where the napi sd->poll_list became corrupted. The customer had the bnx2x driver, which does a @@ -29,7 +29,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h -@@ -411,7 +411,19 @@ typedef enum rx_handler_result rx_handle +@@ -422,7 +422,19 @@ typedef enum rx_handler_result rx_handle typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); void __napi_schedule(struct napi_struct *n); @@ -51,7 +51,7 @@ Signed-off-by: Sebastian Andrzej Siewior { --- a/net/core/dev.c +++ b/net/core/dev.c -@@ -5470,6 +5470,7 @@ bool napi_schedule_prep(struct napi_stru +@@ -5926,6 +5926,7 @@ bool napi_schedule_prep(struct napi_stru } EXPORT_SYMBOL(napi_schedule_prep); @@ -59,7 +59,7 @@ Signed-off-by: Sebastian Andrzej Siewior /** * __napi_schedule_irqoff - schedule for receive * @n: entry to schedule -@@ -5481,6 +5482,7 @@ void __napi_schedule_irqoff(struct napi_ +@@ -5937,6 +5938,7 @@ void __napi_schedule_irqoff(struct napi_ ____napi_schedule(this_cpu_ptr(&softnet_data), n); } EXPORT_SYMBOL(__napi_schedule_irqoff); diff --git a/debian/patches-rt/net-Qdisc-use-a-seqlock-instead-seqcount.patch b/debian/patches-rt/net-Qdisc-use-a-seqlock-instead-seqcount.patch index b6f43a9a6..1f64b42c1 100644 --- a/debian/patches-rt/net-Qdisc-use-a-seqlock-instead-seqcount.patch +++ b/debian/patches-rt/net-Qdisc-use-a-seqlock-instead-seqcount.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Wed, 14 Sep 2016 17:36:35 +0200 Subject: [PATCH] net/Qdisc: use a seqlock instead seqcount -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz The seqcount disables preemption on -RT while it is held which can't remove. Also we don't want the reader to spin for ages if the writer is @@ -66,14 +66,14 @@ Signed-off-by: Sebastian Andrzej Siewior @@ -60,13 +61,13 @@ int gen_new_estimator(struct gnet_stats_ struct gnet_stats_basic_cpu __percpu *cpu_bstats, struct net_rate_estimator __rcu **rate_est, - spinlock_t *stats_lock, + spinlock_t *lock, - seqcount_t *running, struct nlattr *opt); + net_seqlock_t *running, struct nlattr *opt); void gen_kill_estimator(struct net_rate_estimator __rcu **ptr); int gen_replace_estimator(struct gnet_stats_basic_packed *bstats, struct gnet_stats_basic_cpu __percpu *cpu_bstats, struct net_rate_estimator __rcu **ptr, - spinlock_t *stats_lock, + spinlock_t *lock, - seqcount_t *running, struct nlattr *opt); + net_seqlock_t *running, struct nlattr *opt); bool gen_estimator_active(struct net_rate_estimator __rcu **ptr); @@ -107,7 +107,7 @@ Signed-off-by: Sebastian Andrzej Siewior #include #include #include -@@ -94,7 +95,7 @@ struct Qdisc { +@@ -97,7 +98,7 @@ struct Qdisc { struct sk_buff_head gso_skb ____cacheline_aligned_in_smp; struct qdisc_skb_head q; struct gnet_stats_basic_packed bstats; @@ -116,7 +116,7 @@ Signed-off-by: Sebastian Andrzej Siewior struct gnet_stats_queue qstats; unsigned long state; struct Qdisc *next_sched; -@@ -115,7 +116,11 @@ static inline bool qdisc_is_running(stru +@@ -118,7 +119,11 @@ static inline bool qdisc_is_running(stru { if (qdisc->flags & TCQ_F_NOLOCK) return spin_is_locked(&qdisc->seqlock); @@ -128,7 +128,7 @@ Signed-off-by: Sebastian Andrzej Siewior } static inline bool qdisc_run_begin(struct Qdisc *qdisc) -@@ -126,17 +131,27 @@ static inline bool qdisc_run_begin(struc +@@ -129,17 +134,27 @@ static inline bool qdisc_run_begin(struc } else if (qdisc_is_running(qdisc)) { return false; } @@ -156,7 +156,7 @@ Signed-off-by: Sebastian Andrzej Siewior if (qdisc->flags & TCQ_F_NOLOCK) spin_unlock(&qdisc->seqlock); } -@@ -412,7 +427,7 @@ static inline spinlock_t *qdisc_root_sle +@@ -458,7 +473,7 @@ static inline spinlock_t *qdisc_root_sle return qdisc_lock(root); } @@ -179,21 +179,21 @@ Signed-off-by: Sebastian Andrzej Siewior @@ -129,7 +129,7 @@ int gen_new_estimator(struct gnet_stats_ struct gnet_stats_basic_cpu __percpu *cpu_bstats, struct net_rate_estimator __rcu **rate_est, - spinlock_t *stats_lock, + spinlock_t *lock, - seqcount_t *running, + net_seqlock_t *running, struct nlattr *opt) { struct gnet_estimator *parm = nla_data(opt); -@@ -222,7 +222,7 @@ int gen_replace_estimator(struct gnet_st +@@ -227,7 +227,7 @@ int gen_replace_estimator(struct gnet_st struct gnet_stats_basic_cpu __percpu *cpu_bstats, struct net_rate_estimator __rcu **rate_est, - spinlock_t *stats_lock, + spinlock_t *lock, - seqcount_t *running, struct nlattr *opt) + net_seqlock_t *running, struct nlattr *opt) { return gen_new_estimator(bstats, cpu_bstats, rate_est, - stats_lock, running, opt); + lock, running, opt); --- a/net/core/gen_stats.c +++ b/net/core/gen_stats.c @@ -142,7 +142,7 @@ static void @@ -229,7 +229,7 @@ Signed-off-by: Sebastian Andrzej Siewior struct gnet_stats_basic_packed *b) --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c -@@ -1159,7 +1159,7 @@ static struct Qdisc *qdisc_create(struct +@@ -1166,7 +1166,7 @@ static struct Qdisc *qdisc_create(struct rcu_assign_pointer(sch->stab, stab); } if (tca[TCA_RATE]) { diff --git a/debian/patches-rt/net-add-a-lock-around-icmp_sk.patch b/debian/patches-rt/net-add-a-lock-around-icmp_sk.patch index b96ff98a1..100ff672e 100644 --- a/debian/patches-rt/net-add-a-lock-around-icmp_sk.patch +++ b/debian/patches-rt/net-add-a-lock-around-icmp_sk.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Wed, 31 Aug 2016 17:54:09 +0200 Subject: [PATCH] net: add a lock around icmp_sk() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz It looks like the this_cpu_ptr() access in icmp_sk() is protected with local_bh_disable(). To avoid missing serialization in -RT I am adding diff --git a/debian/patches-rt/net-add-back-the-missing-serialization-in-ip_send_un.patch b/debian/patches-rt/net-add-back-the-missing-serialization-in-ip_send_un.patch index ebcb75262..c10807370 100644 --- a/debian/patches-rt/net-add-back-the-missing-serialization-in-ip_send_un.patch +++ b/debian/patches-rt/net-add-back-the-missing-serialization-in-ip_send_un.patch @@ -5,7 +5,7 @@ Subject: [PATCH] net: add back the missing serialization in MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Some time ago Sami Pietikäinen reported a crash on -RT in ip_send_unicast_reply() which was later fixed by Nicholas Mc Guire @@ -51,7 +51,7 @@ Signed-off-by: Sebastian Andrzej Siewior #include #include -@@ -632,6 +633,7 @@ void tcp_v4_send_check(struct sock *sk, +@@ -633,6 +634,7 @@ void tcp_v4_send_check(struct sock *sk, } EXPORT_SYMBOL(tcp_v4_send_check); @@ -59,7 +59,7 @@ Signed-off-by: Sebastian Andrzej Siewior /* * This routine will send an RST to the other tcp. * -@@ -766,6 +768,7 @@ static void tcp_v4_send_reset(const stru +@@ -767,6 +769,7 @@ static void tcp_v4_send_reset(const stru arg.tos = ip_hdr(skb)->tos; arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL); local_bh_disable(); @@ -67,7 +67,7 @@ Signed-off-by: Sebastian Andrzej Siewior ctl_sk = *this_cpu_ptr(net->ipv4.tcp_sk); if (sk) ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ? -@@ -778,6 +781,7 @@ static void tcp_v4_send_reset(const stru +@@ -779,6 +782,7 @@ static void tcp_v4_send_reset(const stru ctl_sk->sk_mark = 0; __TCP_INC_STATS(net, TCP_MIB_OUTSEGS); __TCP_INC_STATS(net, TCP_MIB_OUTRSTS); @@ -75,7 +75,7 @@ Signed-off-by: Sebastian Andrzej Siewior local_bh_enable(); #ifdef CONFIG_TCP_MD5SIG -@@ -858,6 +862,7 @@ static void tcp_v4_send_ack(const struct +@@ -859,6 +863,7 @@ static void tcp_v4_send_ack(const struct arg.tos = tos; arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL); local_bh_disable(); @@ -83,7 +83,7 @@ Signed-off-by: Sebastian Andrzej Siewior ctl_sk = *this_cpu_ptr(net->ipv4.tcp_sk); if (sk) ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ? -@@ -869,6 +874,7 @@ static void tcp_v4_send_ack(const struct +@@ -870,6 +875,7 @@ static void tcp_v4_send_ack(const struct ctl_sk->sk_mark = 0; __TCP_INC_STATS(net, TCP_MIB_OUTSEGS); diff --git a/debian/patches-rt/net-another-local-irq-disable-alloc-atomic-headache.patch b/debian/patches-rt/net-another-local-irq-disable-alloc-atomic-headache.patch index b9894b3b6..ff6f971d3 100644 --- a/debian/patches-rt/net-another-local-irq-disable-alloc-atomic-headache.patch +++ b/debian/patches-rt/net-another-local-irq-disable-alloc-atomic-headache.patch @@ -1,7 +1,7 @@ From: Thomas Gleixner Date: Wed, 26 Sep 2012 16:21:08 +0200 Subject: net: Another local_irq_disable/kmalloc headache -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Replace it by a local lock. Though that's pretty inefficient :( diff --git a/debian/patches-rt/net-core-protect-users-of-napi_alloc_cache-against-r.patch b/debian/patches-rt/net-core-protect-users-of-napi_alloc_cache-against-r.patch index 461ec58a1..a75c59239 100644 --- a/debian/patches-rt/net-core-protect-users-of-napi_alloc_cache-against-r.patch +++ b/debian/patches-rt/net-core-protect-users-of-napi_alloc_cache-against-r.patch @@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior Date: Fri, 15 Jan 2016 16:33:34 +0100 Subject: net/core: protect users of napi_alloc_cache against reentrance -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz On -RT the code running in BH can not be moved to another CPU so CPU local variable remain local. However the code can be preempted diff --git a/debian/patches-rt/net-core-use-local_bh_disable-in-netif_rx_ni.patch b/debian/patches-rt/net-core-use-local_bh_disable-in-netif_rx_ni.patch index 2c4ed9061..341a110fa 100644 --- a/debian/patches-rt/net-core-use-local_bh_disable-in-netif_rx_ni.patch +++ b/debian/patches-rt/net-core-use-local_bh_disable-in-netif_rx_ni.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Fri, 16 Jun 2017 19:03:16 +0200 Subject: [PATCH] net/core: use local_bh_disable() in netif_rx_ni() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz In 2004 netif_rx_ni() gained a preempt_disable() section around netif_rx() and its do_softirq() + testing for it. The do_softirq() part @@ -19,7 +19,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/net/core/dev.c +++ b/net/core/dev.c -@@ -4252,11 +4252,9 @@ int netif_rx_ni(struct sk_buff *skb) +@@ -4507,11 +4507,9 @@ int netif_rx_ni(struct sk_buff *skb) trace_netif_rx_ni_entry(skb); diff --git a/debian/patches-rt/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch b/debian/patches-rt/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch index ce390323b..f6ddb1fff 100644 --- a/debian/patches-rt/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch +++ b/debian/patches-rt/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Wed, 30 Mar 2016 13:36:29 +0200 Subject: [PATCH] net: dev: always take qdisc's busylock in __dev_xmit_skb() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz The root-lock is dropped before dev_hard_start_xmit() is invoked and after setting the __QDISC___STATE_RUNNING bit. If this task is now pushed away @@ -21,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/net/core/dev.c +++ b/net/core/dev.c -@@ -3242,7 +3242,11 @@ static inline int __dev_xmit_skb(struct +@@ -3446,7 +3446,11 @@ static inline int __dev_xmit_skb(struct * This permits qdisc->running owner to get the lock more * often and dequeue packets faster. */ diff --git a/debian/patches-rt/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch b/debian/patches-rt/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch index 319577d73..d75f3a7a6 100644 --- a/debian/patches-rt/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch +++ b/debian/patches-rt/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch @@ -1,7 +1,7 @@ Subject: net: netfilter: Serialize xt_write_recseq sections on RT From: Thomas Gleixner Date: Sun, 28 Oct 2012 11:18:08 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz The netfilter code relies only on the implicit semantics of local_bh_disable() for serializing wt_write_recseq sections. RT breaks diff --git a/debian/patches-rt/net-make-devnet_rename_seq-a-mutex.patch b/debian/patches-rt/net-make-devnet_rename_seq-a-mutex.patch index 367ea1d68..77b33a552 100644 --- a/debian/patches-rt/net-make-devnet_rename_seq-a-mutex.patch +++ b/debian/patches-rt/net-make-devnet_rename_seq-a-mutex.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Wed, 20 Mar 2013 18:06:20 +0100 Subject: net: Add a mutex around devnet_rename_seq -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz On RT write_seqcount_begin() disables preemption and device_rename() allocates memory with GFP_KERNEL and grabs later the sysfs_mutex @@ -22,7 +22,7 @@ Signed-off-by: Thomas Gleixner --- a/net/core/dev.c +++ b/net/core/dev.c -@@ -197,6 +197,7 @@ static unsigned int napi_gen_id = NR_CPU +@@ -195,6 +195,7 @@ static unsigned int napi_gen_id = NR_CPU static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8); static seqcount_t devnet_rename_seq; @@ -30,7 +30,7 @@ Signed-off-by: Thomas Gleixner static inline void dev_base_seq_inc(struct net *net) { -@@ -922,7 +923,8 @@ int netdev_get_name(struct net *net, cha +@@ -920,7 +921,8 @@ int netdev_get_name(struct net *net, cha strcpy(name, dev->name); rcu_read_unlock(); if (read_seqcount_retry(&devnet_rename_seq, seq)) { @@ -40,7 +40,7 @@ Signed-off-by: Thomas Gleixner goto retry; } -@@ -1185,20 +1187,17 @@ int dev_change_name(struct net_device *d +@@ -1183,20 +1185,17 @@ int dev_change_name(struct net_device *d if (dev->flags & IFF_UP) return -EBUSY; @@ -67,7 +67,7 @@ Signed-off-by: Thomas Gleixner if (oldname[0] && !strchr(oldname, '%')) netdev_info(dev, "renamed from %s\n", oldname); -@@ -1211,11 +1210,12 @@ int dev_change_name(struct net_device *d +@@ -1209,11 +1208,12 @@ int dev_change_name(struct net_device *d if (ret) { memcpy(dev->name, oldname, IFNAMSIZ); dev->name_assign_type = old_assign_type; @@ -83,7 +83,7 @@ Signed-off-by: Thomas Gleixner netdev_adjacent_rename_links(dev, oldname); -@@ -1236,7 +1236,8 @@ int dev_change_name(struct net_device *d +@@ -1234,7 +1234,8 @@ int dev_change_name(struct net_device *d /* err >= 0 after dev_alloc_name() or stores the first errno */ if (err >= 0) { err = ret; @@ -93,7 +93,7 @@ Signed-off-by: Thomas Gleixner memcpy(dev->name, oldname, IFNAMSIZ); memcpy(oldname, newname, IFNAMSIZ); dev->name_assign_type = old_assign_type; -@@ -1249,6 +1250,11 @@ int dev_change_name(struct net_device *d +@@ -1247,6 +1248,11 @@ int dev_change_name(struct net_device *d } return err; diff --git a/debian/patches-rt/net-move-xmit_recursion-to-per-task-variable-on-RT.patch b/debian/patches-rt/net-move-xmit_recursion-to-per-task-variable-on-RT.patch index 5c246015d..3c9e3edfe 100644 --- a/debian/patches-rt/net-move-xmit_recursion-to-per-task-variable-on-RT.patch +++ b/debian/patches-rt/net-move-xmit_recursion-to-per-task-variable-on-RT.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Wed, 13 Jan 2016 15:55:02 +0100 Subject: net: move xmit_recursion to per-task variable on -RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz A softirq on -RT can be preempted. That means one task is in __dev_queue_xmit(), gets preempted and another task may enter @@ -25,7 +25,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h -@@ -573,7 +573,11 @@ struct netdev_queue { +@@ -587,7 +587,11 @@ struct netdev_queue { * write-mostly part */ spinlock_t _xmit_lock ____cacheline_aligned_in_smp; @@ -37,7 +37,7 @@ Signed-off-by: Sebastian Andrzej Siewior /* * Time (in jiffies) of last Tx */ -@@ -2554,14 +2558,53 @@ void netdev_freemem(struct net_device *d +@@ -2605,14 +2609,53 @@ void netdev_freemem(struct net_device *d void synchronize_net(void); int init_dummy_netdev(struct net_device *dev); @@ -92,7 +92,7 @@ Signed-off-by: Sebastian Andrzej Siewior struct net_device *dev_get_by_index(struct net *net, int ifindex); struct net_device *__dev_get_by_index(struct net *net, int ifindex); struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); -@@ -3639,10 +3682,48 @@ static inline u32 netif_msg_init(int deb +@@ -3788,10 +3831,48 @@ static inline u32 netif_msg_init(int deb return (1 << debug_value) - 1; } @@ -142,7 +142,7 @@ Signed-off-by: Sebastian Andrzej Siewior } static inline bool __netif_tx_acquire(struct netdev_queue *txq) -@@ -3659,32 +3740,32 @@ static inline void __netif_tx_release(st +@@ -3808,32 +3889,32 @@ static inline void __netif_tx_release(st static inline void __netif_tx_lock_bh(struct netdev_queue *txq) { spin_lock_bh(&txq->_xmit_lock); @@ -182,7 +182,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/include/linux/sched.h +++ b/include/linux/sched.h -@@ -1196,6 +1196,9 @@ struct task_struct { +@@ -1208,6 +1208,9 @@ struct task_struct { #ifdef CONFIG_DEBUG_ATOMIC_SLEEP unsigned long task_state_change; #endif @@ -194,7 +194,7 @@ Signed-off-by: Sebastian Andrzej Siewior struct task_struct *oom_reaper_list; --- a/net/core/dev.c +++ b/net/core/dev.c -@@ -3314,8 +3314,10 @@ static void skb_update_prio(struct sk_bu +@@ -3518,8 +3518,10 @@ static void skb_update_prio(struct sk_bu #define skb_update_prio(skb) #endif @@ -205,7 +205,7 @@ Signed-off-by: Sebastian Andrzej Siewior /** * dev_loopback_xmit - loop back @skb -@@ -3555,9 +3557,12 @@ static int __dev_queue_xmit(struct sk_bu +@@ -3810,9 +3812,12 @@ static int __dev_queue_xmit(struct sk_bu if (dev->flags & IFF_UP) { int cpu = smp_processor_id(); /* ok because BHs are off */ @@ -220,7 +220,7 @@ Signed-off-by: Sebastian Andrzej Siewior goto recursion_alert; skb = validate_xmit_skb(skb, dev, &again); -@@ -3567,9 +3572,9 @@ static int __dev_queue_xmit(struct sk_bu +@@ -3822,9 +3827,9 @@ static int __dev_queue_xmit(struct sk_bu HARD_TX_LOCK(dev, txq, cpu); if (!netif_xmit_stopped(txq)) { @@ -232,7 +232,7 @@ Signed-off-by: Sebastian Andrzej Siewior if (dev_xmit_complete(rc)) { HARD_TX_UNLOCK(dev, txq); goto out; -@@ -7858,7 +7863,7 @@ static void netdev_init_one_queue(struct +@@ -8359,7 +8364,7 @@ static void netdev_init_one_queue(struct /* Initialize queue lock */ spin_lock_init(&queue->_xmit_lock); netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type); @@ -243,7 +243,7 @@ Signed-off-by: Sebastian Andrzej Siewior #ifdef CONFIG_BQL --- a/net/core/filter.c +++ b/net/core/filter.c -@@ -1983,7 +1983,7 @@ static inline int __bpf_tx_skb(struct ne +@@ -2000,7 +2000,7 @@ static inline int __bpf_tx_skb(struct ne { int ret; @@ -252,7 +252,7 @@ Signed-off-by: Sebastian Andrzej Siewior net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n"); kfree_skb(skb); return -ENETDOWN; -@@ -1991,9 +1991,9 @@ static inline int __bpf_tx_skb(struct ne +@@ -2008,9 +2008,9 @@ static inline int __bpf_tx_skb(struct ne skb->dev = dev; diff --git a/debian/patches-rt/net-provide-a-way-to-delegate-processing-a-softirq-t.patch b/debian/patches-rt/net-provide-a-way-to-delegate-processing-a-softirq-t.patch index 8f67c0e13..f8362a477 100644 --- a/debian/patches-rt/net-provide-a-way-to-delegate-processing-a-softirq-t.patch +++ b/debian/patches-rt/net-provide-a-way-to-delegate-processing-a-softirq-t.patch @@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior Date: Wed, 20 Jan 2016 15:39:05 +0100 Subject: net: provide a way to delegate processing a softirq to ksoftirqd -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz If the NET_RX uses up all of his budget it moves the following NAPI invocations into the `ksoftirqd`. On -RT it does not do so. Instead it @@ -68,7 +68,7 @@ Signed-off-by: Sebastian Andrzej Siewior void raise_softirq_irqoff(unsigned int nr) --- a/net/core/dev.c +++ b/net/core/dev.c -@@ -5875,7 +5875,7 @@ static __latent_entropy void net_rx_acti +@@ -6353,7 +6353,7 @@ static __latent_entropy void net_rx_acti list_splice_tail(&repoll, &list); list_splice(&list, &sd->poll_list); if (!list_empty(&sd->poll_list)) diff --git a/debian/patches-rt/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch b/debian/patches-rt/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch index 5abbd0a73..cce22305b 100644 --- a/debian/patches-rt/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch +++ b/debian/patches-rt/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch @@ -1,7 +1,7 @@ From: Marc Kleine-Budde Date: Wed, 5 Mar 2014 00:49:47 +0100 Subject: net: sched: Use msleep() instead of yield() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz On PREEMPT_RT enabled systems the interrupt handler run as threads at prio 50 (by default). If a high priority userspace process tries to shut down a busy diff --git a/debian/patches-rt/net-use-cpu-chill.patch b/debian/patches-rt/net-use-cpu-chill.patch index 088ed22b2..fb676b89d 100644 --- a/debian/patches-rt/net-use-cpu-chill.patch +++ b/debian/patches-rt/net-use-cpu-chill.patch @@ -1,7 +1,7 @@ Subject: net: Use cpu_chill() instead of cpu_relax() From: Thomas Gleixner Date: Wed, 07 Mar 2012 21:10:04 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Retry loops on RT might loop forever when the modifying side was preempted. Use cpu_chill() instead of cpu_relax() to let the system @@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner #include #include #include -@@ -666,7 +667,7 @@ static void prb_retire_rx_blk_timer_expi +@@ -667,7 +668,7 @@ static void prb_retire_rx_blk_timer_expi if (BLOCK_NUM_PKTS(pbd)) { while (atomic_read(&pkc->blk_fill_in_prog)) { /* Waiting for skb_copy_bits to finish... */ @@ -33,7 +33,7 @@ Signed-off-by: Thomas Gleixner } } -@@ -928,7 +929,7 @@ static void prb_retire_current_block(str +@@ -929,7 +930,7 @@ static void prb_retire_current_block(str if (!(status & TP_STATUS_BLK_TMO)) { while (atomic_read(&pkc->blk_fill_in_prog)) { /* Waiting for skb_copy_bits to finish... */ @@ -52,7 +52,7 @@ Signed-off-by: Thomas Gleixner #include "rds_single_path.h" #include "ib_mr.h" -@@ -210,7 +211,7 @@ static inline void wait_clean_list_grace +@@ -222,7 +223,7 @@ static inline void wait_clean_list_grace for_each_online_cpu(cpu) { flag = &per_cpu(clean_list_grace, cpu); while (test_bit(CLEAN_LIST_BUSY_BIT, flag)) diff --git a/debian/patches-rt/net_disable_NET_RX_BUSY_POLL.patch b/debian/patches-rt/net_disable_NET_RX_BUSY_POLL.patch index 810a66a4a..65164b2ba 100644 --- a/debian/patches-rt/net_disable_NET_RX_BUSY_POLL.patch +++ b/debian/patches-rt/net_disable_NET_RX_BUSY_POLL.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Sat, 27 May 2017 19:02:06 +0200 Subject: net/core: disable NET_RX_BUSY_POLL -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz sk_busy_loop() does preempt_disable() followed by a few operations which can take sleeping locks and may get long. diff --git a/debian/patches-rt/ntfs-avoid-disabling-interrupts-during-kmap_atomic.patch b/debian/patches-rt/ntfs-avoid-disabling-interrupts-during-kmap_atomic.patch deleted file mode 100644 index 889ae343e..000000000 --- a/debian/patches-rt/ntfs-avoid-disabling-interrupts-during-kmap_atomic.patch +++ /dev/null @@ -1,52 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Tue, 10 Apr 2018 17:54:32 +0200 -Subject: [PATCH] ntfs: don't disable interrupts during kmap_atomic() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz - -ntfs_end_buffer_async_read() disables interrupts around kmap_atomic(). This is -a leftover from the old kmap_atomic() implementation which relied on fixed -mapping slots, so the caller had to make sure that the same slot could not be -reused from an interrupting context. - -kmap_atomic() was changed to dynamic slots long ago and commit 1ec9c5ddc17a -("include/linux/highmem.h: remove the second argument of k[un]map_atomic()") -removed the slot assignements, but the callers were not checked for now -redundant interrupt disabling. - -Remove the conditional interrupt disable. - -Signed-off-by: Sebastian Andrzej Siewior ---- - fs/ntfs/aops.c | 4 ---- - 1 file changed, 4 deletions(-) - ---- a/fs/ntfs/aops.c -+++ b/fs/ntfs/aops.c -@@ -93,13 +93,11 @@ static void ntfs_end_buffer_async_read(s - ofs = 0; - if (file_ofs < init_size) - ofs = init_size - file_ofs; -- local_irq_save(flags); - kaddr = kmap_atomic(page); - memset(kaddr + bh_offset(bh) + ofs, 0, - bh->b_size - ofs); - flush_dcache_page(page); - kunmap_atomic(kaddr); -- local_irq_restore(flags); - } - } else { - clear_buffer_uptodate(bh); -@@ -146,13 +144,11 @@ static void ntfs_end_buffer_async_read(s - recs = PAGE_SIZE / rec_size; - /* Should have been verified before we got here... */ - BUG_ON(!recs); -- local_irq_save(flags); - kaddr = kmap_atomic(page); - for (i = 0; i < recs; i++) - post_read_mst_fixup((NTFS_RECORD*)(kaddr + - i * rec_size), rec_size); - kunmap_atomic(kaddr); -- local_irq_restore(flags); - flush_dcache_page(page); - if (likely(page_uptodate && !PageError(page))) - SetPageUptodate(page); diff --git a/debian/patches-rt/of-allocate-free-phandle-cache-outside-of-the-devtre.patch b/debian/patches-rt/of-allocate-free-phandle-cache-outside-of-the-devtre.patch index 19cc858d5..64ed1a25b 100644 --- a/debian/patches-rt/of-allocate-free-phandle-cache-outside-of-the-devtre.patch +++ b/debian/patches-rt/of-allocate-free-phandle-cache-outside-of-the-devtre.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Fri, 31 Aug 2018 14:16:30 +0200 Subject: [PATCH] of: allocate / free phandle cache outside of the devtree_lock -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz The phandle cache code allocates memory while holding devtree_lock which is a raw_spinlock_t. Memory allocation (and free()) is not possible on @@ -13,19 +13,19 @@ Cc: Frank Rowand Cc: devicetree@vger.kernel.org Signed-off-by: Sebastian Andrzej Siewior --- - drivers/of/base.c | 22 ++++++++++++++-------- + drivers/of/base.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) --- a/drivers/of/base.c +++ b/drivers/of/base.c -@@ -108,46 +108,52 @@ void of_populate_phandle_cache(void) +@@ -130,46 +130,52 @@ void of_populate_phandle_cache(void) u32 cache_entries; struct device_node *np; u32 phandles = 0; + struct device_node **shadow; raw_spin_lock_irqsave(&devtree_lock, flags); -- + - kfree(phandle_cache); + shadow = phandle_cache; phandle_cache = NULL; @@ -33,11 +33,12 @@ Signed-off-by: Sebastian Andrzej Siewior for_each_of_allnodes(np) if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) phandles++; - + raw_spin_unlock_irqrestore(&devtree_lock, flags); -+ ++ kfree(shadow); + if (!phandles) - goto out; +- goto out; ++ return; cache_entries = roundup_pow_of_two(phandles); phandle_cache_mask = cache_entries - 1; @@ -46,11 +47,10 @@ Signed-off-by: Sebastian Andrzej Siewior - GFP_ATOMIC); - if (!phandle_cache) - goto out; -+ kfree(shadow); + shadow = kcalloc(cache_entries, sizeof(*phandle_cache), GFP_KERNEL); -+ + if (!shadow) + return; ++ + raw_spin_lock_irqsave(&devtree_lock, flags); + phandle_cache = shadow; diff --git a/debian/patches-rt/oleg-signal-rt-fix.patch b/debian/patches-rt/oleg-signal-rt-fix.patch index 38ae98bd4..8b5b24907 100644 --- a/debian/patches-rt/oleg-signal-rt-fix.patch +++ b/debian/patches-rt/oleg-signal-rt-fix.patch @@ -1,7 +1,7 @@ From: Oleg Nesterov Date: Tue, 14 Jul 2015 14:26:34 +0200 Subject: signal/x86: Delay calling signals in atomic -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz On x86_64 we must disable preemption before we enable interrupts for stack faults, int3 and debugging, because the current task is using @@ -77,7 +77,7 @@ Signed-off-by: Thomas Gleixner #endif --- a/include/linux/sched.h +++ b/include/linux/sched.h -@@ -875,6 +875,10 @@ struct task_struct { +@@ -881,6 +881,10 @@ struct task_struct { /* Restored if set_restore_sigmask() was used: */ sigset_t saved_sigmask; struct sigpending pending; @@ -90,7 +90,7 @@ Signed-off-by: Thomas Gleixner unsigned int sas_ss_flags; --- a/kernel/signal.c +++ b/kernel/signal.c -@@ -1185,8 +1185,8 @@ int do_send_sig_info(int sig, struct sig +@@ -1226,8 +1226,8 @@ int do_send_sig_info(int sig, struct sig * We don't want to have recursive SIGSEGV's etc, for example, * that is why we also clear SIGNAL_UNKILLABLE. */ @@ -101,7 +101,7 @@ Signed-off-by: Thomas Gleixner { unsigned long int flags; int ret, blocked, ignored; -@@ -1215,6 +1215,39 @@ force_sig_info(int sig, struct siginfo * +@@ -1256,6 +1256,39 @@ force_sig_info(int sig, struct siginfo * return ret; } diff --git a/debian/patches-rt/panic-disable-random-on-rt.patch b/debian/patches-rt/panic-disable-random-on-rt.patch index 8c42306aa..a37a3348d 100644 --- a/debian/patches-rt/panic-disable-random-on-rt.patch +++ b/debian/patches-rt/panic-disable-random-on-rt.patch @@ -1,7 +1,7 @@ From: Thomas Gleixner Date: Tue, 14 Jul 2015 14:26:34 +0200 Subject: panic: skip get_random_bytes for RT_FULL in init_oops_id -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Disable on -RT. If this is invoked from irq-context we will have problems to acquire the sleeping lock. diff --git a/debian/patches-rt/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch b/debian/patches-rt/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch index 12ab16ae9..0d3608ab0 100644 --- a/debian/patches-rt/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch +++ b/debian/patches-rt/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch @@ -1,7 +1,7 @@ Subject: rcu: Make ksoftirqd do RCU quiescent states From: "Paul E. McKenney" Date: Wed, 5 Oct 2011 11:45:18 -0700 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Implementing RCU-bh in terms of RCU-preempt makes the system vulnerable to network-based denial-of-service attacks. This patch therefore @@ -30,7 +30,7 @@ Signed-off-by: Thomas Gleixner --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c -@@ -243,7 +243,19 @@ void rcu_sched_qs(void) +@@ -244,7 +244,19 @@ void rcu_sched_qs(void) this_cpu_ptr(&rcu_sched_data), true); } @@ -61,7 +61,7 @@ Signed-off-by: Thomas Gleixner #include #include #include "../time/tick-internal.h" -@@ -1336,7 +1337,7 @@ static void rcu_prepare_kthreads(int cpu +@@ -1407,7 +1408,7 @@ static void rcu_prepare_kthreads(int cpu #endif /* #else #ifdef CONFIG_RCU_BOOST */ @@ -70,7 +70,7 @@ Signed-off-by: Thomas Gleixner /* * Check to see if any future RCU-related work will need to be done -@@ -1352,7 +1353,9 @@ int rcu_needs_cpu(u64 basemono, u64 *nex +@@ -1423,7 +1424,9 @@ int rcu_needs_cpu(u64 basemono, u64 *nex *nextevt = KTIME_MAX; return rcu_cpu_has_callbacks(NULL); } @@ -80,7 +80,7 @@ Signed-off-by: Thomas Gleixner /* * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up * after it. -@@ -1448,6 +1451,8 @@ static bool __maybe_unused rcu_try_advan +@@ -1520,6 +1523,8 @@ static bool __maybe_unused rcu_try_advan return cbs_ready; } @@ -89,7 +89,7 @@ Signed-off-by: Thomas Gleixner /* * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready * to invoke. If the CPU has callbacks, try to advance them. Tell the -@@ -1490,6 +1495,7 @@ int rcu_needs_cpu(u64 basemono, u64 *nex +@@ -1562,6 +1567,7 @@ int rcu_needs_cpu(u64 basemono, u64 *nex *nextevt = basemono + dj * TICK_NSEC; return 0; } diff --git a/debian/patches-rt/pci-switchtec-Don-t-use-completion-s-wait-queue.patch b/debian/patches-rt/pci-switchtec-Don-t-use-completion-s-wait-queue.patch index 0ba1936e3..06f698c1f 100644 --- a/debian/patches-rt/pci-switchtec-Don-t-use-completion-s-wait-queue.patch +++ b/debian/patches-rt/pci-switchtec-Don-t-use-completion-s-wait-queue.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Wed, 4 Oct 2017 10:24:23 +0200 Subject: [PATCH] pci/switchtec: Don't use completion's wait queue -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz The poll callback is using completion's wait_queue_head_t member and puts it in poll_wait() so the poll() caller gets a wakeup after command @@ -24,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/drivers/pci/switch/switchtec.c +++ b/drivers/pci/switch/switchtec.c -@@ -41,10 +41,11 @@ struct switchtec_user { +@@ -43,10 +43,11 @@ struct switchtec_user { enum mrpc_state state; @@ -37,7 +37,7 @@ Signed-off-by: Sebastian Andrzej Siewior u32 cmd; u32 status; u32 return_code; -@@ -66,7 +67,7 @@ static struct switchtec_user *stuser_cre +@@ -68,7 +69,7 @@ static struct switchtec_user *stuser_cre stuser->stdev = stdev; kref_init(&stuser->kref); INIT_LIST_HEAD(&stuser->list); @@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior stuser->event_cnt = atomic_read(&stdev->event_cnt); dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser); -@@ -149,7 +150,7 @@ static int mrpc_queue_cmd(struct switcht +@@ -151,7 +152,7 @@ static int mrpc_queue_cmd(struct switcht kref_get(&stuser->kref); stuser->read_len = sizeof(stuser->data); stuser_set_state(stuser, MRPC_QUEUED); @@ -55,7 +55,7 @@ Signed-off-by: Sebastian Andrzej Siewior list_add_tail(&stuser->list, &stdev->mrpc_queue); mrpc_cmd_submit(stdev); -@@ -186,7 +187,8 @@ static void mrpc_complete_cmd(struct swi +@@ -188,7 +189,8 @@ static void mrpc_complete_cmd(struct swi stuser->read_len); out: @@ -65,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior list_del_init(&stuser->list); stuser_put(stuser); stdev->mrpc_busy = 0; -@@ -456,10 +458,11 @@ static ssize_t switchtec_dev_read(struct +@@ -458,10 +460,11 @@ static ssize_t switchtec_dev_read(struct mutex_unlock(&stdev->mrpc_mutex); if (filp->f_flags & O_NONBLOCK) { @@ -79,7 +79,7 @@ Signed-off-by: Sebastian Andrzej Siewior if (rc < 0) return rc; } -@@ -507,7 +510,7 @@ static __poll_t switchtec_dev_poll(struc +@@ -509,7 +512,7 @@ static __poll_t switchtec_dev_poll(struc struct switchtec_dev *stdev = stuser->stdev; __poll_t ret = 0; @@ -88,7 +88,7 @@ Signed-off-by: Sebastian Andrzej Siewior poll_wait(filp, &stdev->event_wq, wait); if (lock_mutex_and_test_alive(stdev)) -@@ -515,7 +518,7 @@ static __poll_t switchtec_dev_poll(struc +@@ -517,7 +520,7 @@ static __poll_t switchtec_dev_poll(struc mutex_unlock(&stdev->mrpc_mutex); @@ -97,7 +97,7 @@ Signed-off-by: Sebastian Andrzej Siewior ret |= EPOLLIN | EPOLLRDNORM; if (stuser->event_cnt != atomic_read(&stdev->event_cnt)) -@@ -1034,7 +1037,8 @@ static void stdev_kill(struct switchtec_ +@@ -1038,7 +1041,8 @@ static void stdev_kill(struct switchtec_ /* Wake up and kill any users waiting on an MRPC request */ list_for_each_entry_safe(stuser, tmpuser, &stdev->mrpc_queue, list) { diff --git a/debian/patches-rt/percpu-include-irqflags.h-for-raw_local_irq_save.patch b/debian/patches-rt/percpu-include-irqflags.h-for-raw_local_irq_save.patch new file mode 100644 index 000000000..5916bd712 --- /dev/null +++ b/debian/patches-rt/percpu-include-irqflags.h-for-raw_local_irq_save.patch @@ -0,0 +1,27 @@ +From: Sebastian Andrzej Siewior +Date: Thu, 11 Oct 2018 16:39:59 +0200 +Subject: [PATCH] percpu: include irqflags.h for raw_local_irq_save() +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz + +The header percpu.h header file is using raw_local_irq_save() but does +not include irqflags.h for its definition. It compiles because the +header file is included via an other header file. +On -RT the build fails because raw_local_irq_save() is not defined. + +Include irqflags.h in percpu.h. + +Signed-off-by: Sebastian Andrzej Siewior +--- + include/asm-generic/percpu.h | 1 + + 1 file changed, 1 insertion(+) + +--- a/include/asm-generic/percpu.h ++++ b/include/asm-generic/percpu.h +@@ -5,6 +5,7 @@ + #include + #include + #include ++#include + + #ifdef CONFIG_SMP + diff --git a/debian/patches-rt/peter_zijlstra-frob-rcu.patch b/debian/patches-rt/peter_zijlstra-frob-rcu.patch index cdfbc4247..919d0c433 100644 --- a/debian/patches-rt/peter_zijlstra-frob-rcu.patch +++ b/debian/patches-rt/peter_zijlstra-frob-rcu.patch @@ -1,7 +1,7 @@ Subject: rcu: Frob softirq test From: Peter Zijlstra Date: Sat Aug 13 00:23:17 CEST 2011 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz With RT_FULL we get the below wreckage: @@ -156,7 +156,7 @@ Signed-off-by: Peter Zijlstra --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h -@@ -512,7 +512,7 @@ void rcu_read_unlock_special(struct task +@@ -524,7 +524,7 @@ static void rcu_read_unlock_special(stru } /* Hardware IRQ handlers cannot block, complain if they get here. */ diff --git a/debian/patches-rt/peterz-percpu-rwsem-rt.patch b/debian/patches-rt/peterz-percpu-rwsem-rt.patch index 92d5b2fa7..5d3161626 100644 --- a/debian/patches-rt/peterz-percpu-rwsem-rt.patch +++ b/debian/patches-rt/peterz-percpu-rwsem-rt.patch @@ -1,7 +1,7 @@ Subject: locking/percpu-rwsem: Remove preempt_disable variants From: Peter Zijlstra Date: Wed Nov 23 16:29:32 CET 2016 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Effective revert commit: @@ -19,7 +19,7 @@ Signed-off-by: Peter Zijlstra (Intel) --- a/fs/locks.c +++ b/fs/locks.c -@@ -945,7 +945,7 @@ static int flock_lock_inode(struct inode +@@ -936,7 +936,7 @@ static int flock_lock_inode(struct inode return -ENOMEM; } @@ -28,7 +28,7 @@ Signed-off-by: Peter Zijlstra (Intel) spin_lock(&ctx->flc_lock); if (request->fl_flags & FL_ACCESS) goto find_conflict; -@@ -986,7 +986,7 @@ static int flock_lock_inode(struct inode +@@ -977,7 +977,7 @@ static int flock_lock_inode(struct inode out: spin_unlock(&ctx->flc_lock); @@ -37,7 +37,7 @@ Signed-off-by: Peter Zijlstra (Intel) if (new_fl) locks_free_lock(new_fl); locks_dispose_list(&dispose); -@@ -1023,7 +1023,7 @@ static int posix_lock_inode(struct inode +@@ -1015,7 +1015,7 @@ static int posix_lock_inode(struct inode new_fl2 = locks_alloc_lock(); } @@ -46,7 +46,7 @@ Signed-off-by: Peter Zijlstra (Intel) spin_lock(&ctx->flc_lock); /* * New lock request. Walk all POSIX locks and look for conflicts. If -@@ -1195,7 +1195,7 @@ static int posix_lock_inode(struct inode +@@ -1187,7 +1187,7 @@ static int posix_lock_inode(struct inode } out: spin_unlock(&ctx->flc_lock); @@ -55,7 +55,7 @@ Signed-off-by: Peter Zijlstra (Intel) /* * Free any unused locks. */ -@@ -1470,7 +1470,7 @@ int __break_lease(struct inode *inode, u +@@ -1462,7 +1462,7 @@ int __break_lease(struct inode *inode, u return error; } @@ -64,7 +64,7 @@ Signed-off-by: Peter Zijlstra (Intel) spin_lock(&ctx->flc_lock); time_out_leases(inode, &dispose); -@@ -1522,13 +1522,13 @@ int __break_lease(struct inode *inode, u +@@ -1514,13 +1514,13 @@ int __break_lease(struct inode *inode, u locks_insert_block(fl, new_fl); trace_break_lease_block(inode, new_fl); spin_unlock(&ctx->flc_lock); @@ -80,7 +80,7 @@ Signed-off-by: Peter Zijlstra (Intel) spin_lock(&ctx->flc_lock); trace_break_lease_unblock(inode, new_fl); locks_delete_block(new_fl); -@@ -1545,7 +1545,7 @@ int __break_lease(struct inode *inode, u +@@ -1537,7 +1537,7 @@ int __break_lease(struct inode *inode, u } out: spin_unlock(&ctx->flc_lock); @@ -89,7 +89,7 @@ Signed-off-by: Peter Zijlstra (Intel) locks_dispose_list(&dispose); locks_free_lock(new_fl); return error; -@@ -1617,7 +1617,7 @@ int fcntl_getlease(struct file *filp) +@@ -1609,7 +1609,7 @@ int fcntl_getlease(struct file *filp) ctx = smp_load_acquire(&inode->i_flctx); if (ctx && !list_empty_careful(&ctx->flc_lease)) { @@ -98,7 +98,7 @@ Signed-off-by: Peter Zijlstra (Intel) spin_lock(&ctx->flc_lock); time_out_leases(inode, &dispose); list_for_each_entry(fl, &ctx->flc_lease, fl_list) { -@@ -1627,7 +1627,7 @@ int fcntl_getlease(struct file *filp) +@@ -1619,7 +1619,7 @@ int fcntl_getlease(struct file *filp) break; } spin_unlock(&ctx->flc_lock); @@ -107,7 +107,7 @@ Signed-off-by: Peter Zijlstra (Intel) locks_dispose_list(&dispose); } -@@ -1702,7 +1702,7 @@ generic_add_lease(struct file *filp, lon +@@ -1693,7 +1693,7 @@ generic_add_lease(struct file *filp, lon return -EINVAL; } @@ -116,7 +116,7 @@ Signed-off-by: Peter Zijlstra (Intel) spin_lock(&ctx->flc_lock); time_out_leases(inode, &dispose); error = check_conflicting_open(dentry, arg, lease->fl_flags); -@@ -1773,7 +1773,7 @@ generic_add_lease(struct file *filp, lon +@@ -1764,7 +1764,7 @@ generic_add_lease(struct file *filp, lon lease->fl_lmops->lm_setup(lease, priv); out: spin_unlock(&ctx->flc_lock); @@ -125,7 +125,7 @@ Signed-off-by: Peter Zijlstra (Intel) locks_dispose_list(&dispose); if (is_deleg) inode_unlock(inode); -@@ -1796,7 +1796,7 @@ static int generic_delete_lease(struct f +@@ -1787,7 +1787,7 @@ static int generic_delete_lease(struct f return error; } @@ -134,7 +134,7 @@ Signed-off-by: Peter Zijlstra (Intel) spin_lock(&ctx->flc_lock); list_for_each_entry(fl, &ctx->flc_lease, fl_list) { if (fl->fl_file == filp && -@@ -1809,7 +1809,7 @@ static int generic_delete_lease(struct f +@@ -1800,7 +1800,7 @@ static int generic_delete_lease(struct f if (victim) error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose); spin_unlock(&ctx->flc_lock); @@ -143,7 +143,7 @@ Signed-off-by: Peter Zijlstra (Intel) locks_dispose_list(&dispose); return error; } -@@ -2533,13 +2533,13 @@ locks_remove_lease(struct file *filp, st +@@ -2531,13 +2531,13 @@ locks_remove_lease(struct file *filp, st if (list_empty(&ctx->flc_lease)) return; diff --git a/debian/patches-rt/pid.h-include-atomic.h.patch b/debian/patches-rt/pid.h-include-atomic.h.patch index 83c25da79..473616808 100644 --- a/debian/patches-rt/pid.h-include-atomic.h.patch +++ b/debian/patches-rt/pid.h-include-atomic.h.patch @@ -1,7 +1,7 @@ From: Grygorii Strashko Date: Tue, 21 Jul 2015 19:43:56 +0300 Subject: pid.h: include atomic.h -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz This patch fixes build error: CC kernel/pid_namespace.o diff --git a/debian/patches-rt/posix-timers-thread-posix-cpu-timers-on-rt.patch b/debian/patches-rt/posix-timers-thread-posix-cpu-timers-on-rt.patch index b35530498..e8208af19 100644 --- a/debian/patches-rt/posix-timers-thread-posix-cpu-timers-on-rt.patch +++ b/debian/patches-rt/posix-timers-thread-posix-cpu-timers-on-rt.patch @@ -1,7 +1,7 @@ From: John Stultz Date: Fri, 3 Jul 2009 08:29:58 -0500 Subject: posix-timers: Thread posix-cpu-timers on -rt -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz posix-cpu-timer code takes non -rt safe locks in hard irq context. Move it to a thread. @@ -20,7 +20,7 @@ Signed-off-by: Thomas Gleixner --- a/include/linux/sched.h +++ b/include/linux/sched.h -@@ -827,6 +827,9 @@ struct task_struct { +@@ -832,6 +832,9 @@ struct task_struct { #ifdef CONFIG_POSIX_TIMERS struct task_cputime cputime_expires; struct list_head cpu_timers[3]; @@ -32,7 +32,7 @@ Signed-off-by: Thomas Gleixner /* Process credentials: */ --- a/init/init_task.c +++ b/init/init_task.c -@@ -43,6 +43,12 @@ static struct sighand_struct init_sighan +@@ -50,6 +50,12 @@ static struct sighand_struct init_sighan .signalfd_wqh = __WAIT_QUEUE_HEAD_INITIALIZER(init_sighand.signalfd_wqh), }; @@ -45,17 +45,17 @@ Signed-off-by: Thomas Gleixner /* * Set up the first task table, touch at your own risk!. Base=0, * limit=0x1fffff (=2MB) -@@ -112,6 +118,7 @@ struct task_struct init_task +@@ -119,6 +125,7 @@ struct task_struct init_task INIT_CPU_TIMERS(init_task) .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(init_task.pi_lock), .timer_slack_ns = 50000, /* 50 usec default slack */ + INIT_TIMER_LIST - .pids = { - [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), - [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), + .thread_pid = &init_struct_pid, + .thread_group = LIST_HEAD_INIT(init_task.thread_group), + .thread_node = LIST_HEAD_INIT(init_signals.thread_head), --- a/kernel/fork.c +++ b/kernel/fork.c -@@ -1563,6 +1563,9 @@ static void rt_mutex_init_task(struct ta +@@ -1575,6 +1575,9 @@ static void rt_mutex_init_task(struct ta */ static void posix_cpu_timers_init(struct task_struct *tsk) { diff --git a/debian/patches-rt/power-disable-highmem-on-rt.patch b/debian/patches-rt/power-disable-highmem-on-rt.patch index e0c073adf..4e738f062 100644 --- a/debian/patches-rt/power-disable-highmem-on-rt.patch +++ b/debian/patches-rt/power-disable-highmem-on-rt.patch @@ -1,7 +1,7 @@ Subject: powerpc: Disable highmem on RT From: Thomas Gleixner Date: Mon, 18 Jul 2011 17:08:34 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz The current highmem handling on -RT is not compatible and needs fixups. @@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig -@@ -395,7 +395,7 @@ menu "Kernel options" +@@ -398,7 +398,7 @@ menu "Kernel options" config HIGHMEM bool "High memory support" @@ -20,4 +20,4 @@ Signed-off-by: Thomas Gleixner + depends on PPC32 && !PREEMPT_RT_FULL source kernel/Kconfig.hz - source kernel/Kconfig.preempt + diff --git a/debian/patches-rt/power-use-generic-rwsem-on-rt.patch b/debian/patches-rt/power-use-generic-rwsem-on-rt.patch index 6238f917b..6de5be9a2 100644 --- a/debian/patches-rt/power-use-generic-rwsem-on-rt.patch +++ b/debian/patches-rt/power-use-generic-rwsem-on-rt.patch @@ -1,7 +1,7 @@ From: Thomas Gleixner Date: Tue, 14 Jul 2015 14:26:34 +0200 Subject: powerpc: Use generic rwsem on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Use generic code which uses rtmutex diff --git a/debian/patches-rt/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch b/debian/patches-rt/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch index d9cc7e358..97e276b33 100644 --- a/debian/patches-rt/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch +++ b/debian/patches-rt/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch @@ -1,7 +1,7 @@ From: Bogdan Purcareata Date: Fri, 24 Apr 2015 15:53:13 +0000 Subject: powerpc/kvm: Disable in-kernel MPIC emulation for PREEMPT_RT_FULL -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz While converting the openpic emulation code to use a raw_spinlock_t enables guests to run on RT, there's still a performance issue. For interrupts sent in diff --git a/debian/patches-rt/powerpc-preempt-lazy-support.patch b/debian/patches-rt/powerpc-preempt-lazy-support.patch index e825ba274..846ff9764 100644 --- a/debian/patches-rt/powerpc-preempt-lazy-support.patch +++ b/debian/patches-rt/powerpc-preempt-lazy-support.patch @@ -1,7 +1,7 @@ From: Thomas Gleixner Date: Thu, 1 Nov 2012 10:14:11 +0100 Subject: powerpc: Add support for lazy preemption -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Implement the powerpc pieces for lazy preempt. @@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig -@@ -215,6 +215,7 @@ config PPC +@@ -216,6 +216,7 @@ config PPC select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP @@ -26,7 +26,7 @@ Signed-off-by: Thomas Gleixner select HAVE_RELIABLE_STACKTRACE if PPC64 && CPU_LITTLE_ENDIAN --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h -@@ -36,6 +36,8 @@ struct thread_info { +@@ -37,6 +37,8 @@ struct thread_info { int cpu; /* cpu we're on */ int preempt_count; /* 0 => preemptable, <0 => BUG */ @@ -35,7 +35,7 @@ Signed-off-by: Thomas Gleixner unsigned long local_flags; /* private flags for thread */ #ifdef CONFIG_LIVEPATCH unsigned long *livepatch_sp; -@@ -80,7 +82,7 @@ extern int arch_dup_task_struct(struct t +@@ -81,7 +83,7 @@ extern int arch_dup_task_struct(struct t #define TIF_SIGPENDING 1 /* signal pending */ #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ #define TIF_FSCHECK 3 /* Check FS is USER_DS on return */ @@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner #define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */ #define TIF_PATCH_PENDING 6 /* pending live patching update */ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ -@@ -99,6 +101,7 @@ extern int arch_dup_task_struct(struct t +@@ -100,6 +102,7 @@ extern int arch_dup_task_struct(struct t #define TIF_ELF2ABI 18 /* function descriptors must die! */ #endif #define TIF_POLLING_NRFLAG 19 /* true if poll_idle() is polling TIF_NEED_RESCHED */ @@ -52,7 +52,7 @@ Signed-off-by: Thomas Gleixner /* as above, but as bit values */ #define _TIF_SYSCALL_TRACE (1< #define _TIF_FSCHECK (1< --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S -@@ -873,7 +873,14 @@ user_exc_return: /* r10 contains MSR_KE +@@ -885,7 +885,14 @@ user_exc_return: /* r10 contains MSR_KE cmpwi 0,r0,0 /* if non-zero, just restore regs and return */ bne restore andi. r8,r8,_TIF_NEED_RESCHED @@ -98,7 +98,7 @@ Signed-off-by: Thomas Gleixner lwz r3,_MSR(r1) andi. r0,r3,MSR_EE /* interrupts off? */ beq restore /* don't schedule if so */ -@@ -884,11 +891,11 @@ user_exc_return: /* r10 contains MSR_KE +@@ -896,11 +903,11 @@ user_exc_return: /* r10 contains MSR_KE */ bl trace_hardirqs_off #endif @@ -113,7 +113,7 @@ Signed-off-by: Thomas Gleixner #ifdef CONFIG_TRACE_IRQFLAGS /* And now, to properly rebalance the above, we tell lockdep they * are being turned back on, which will happen when we return -@@ -1211,7 +1218,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRE +@@ -1223,7 +1230,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRE #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */ do_work: /* r10 contains MSR_KERNEL here */ @@ -122,7 +122,7 @@ Signed-off-by: Thomas Gleixner beq do_user_signal do_resched: /* r10 contains MSR_KERNEL here */ -@@ -1232,7 +1239,7 @@ do_resched: /* r10 contains MSR_KERNEL +@@ -1244,7 +1251,7 @@ do_resched: /* r10 contains MSR_KERNEL MTMSRD(r10) /* disable interrupts */ CURRENT_THREAD_INFO(r9, r1) lwz r9,TI_FLAGS(r9) @@ -133,7 +133,7 @@ Signed-off-by: Thomas Gleixner beq restore_user --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S -@@ -168,7 +168,7 @@ system_call: /* label this so stack tr +@@ -171,7 +171,7 @@ system_call: /* label this so stack tr * based on caller's run-mode / personality. */ ld r11,SYS_CALL_TABLE@toc(2) @@ -142,7 +142,7 @@ Signed-off-by: Thomas Gleixner beq 15f addi r11,r11,8 /* use 32-bit syscall entries */ clrldi r3,r3,32 -@@ -707,7 +707,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEG +@@ -763,7 +763,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEG bl restore_math b restore #endif @@ -151,7 +151,7 @@ Signed-off-by: Thomas Gleixner beq 2f bl restore_interrupts SCHEDULE_USER -@@ -769,10 +769,18 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEG +@@ -825,10 +825,18 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEG #ifdef CONFIG_PREEMPT /* Check if we need to preempt */ @@ -171,7 +171,7 @@ Signed-off-by: Thomas Gleixner cmpwi cr0,r8,0 bne restore ld r0,SOFTE(r1) -@@ -789,7 +797,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEG +@@ -845,7 +853,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEG /* Re-test flags and eventually loop */ CURRENT_THREAD_INFO(r9, r1) ld r4,TI_FLAGS(r9) diff --git a/debian/patches-rt/powerpc-ps3-device-init.c-adapt-to-completions-using.patch b/debian/patches-rt/powerpc-ps3-device-init.c-adapt-to-completions-using.patch deleted file mode 100644 index 60125dfa2..000000000 --- a/debian/patches-rt/powerpc-ps3-device-init.c-adapt-to-completions-using.patch +++ /dev/null @@ -1,32 +0,0 @@ -From: Paul Gortmaker -Date: Sun, 31 May 2015 14:44:42 -0400 -Subject: powerpc: ps3/device-init.c - adapt to completions using swait vs wait -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz - -To fix: - - cc1: warnings being treated as errors - arch/powerpc/platforms/ps3/device-init.c: In function 'ps3_notification_read_write': - arch/powerpc/platforms/ps3/device-init.c:755:2: error: passing argument 1 of 'prepare_to_wait_event' from incompatible pointer type - arch/powerpc/platforms/ps3/device-init.c:755:2: error: passing argument 1 of 'abort_exclusive_wait' from incompatible pointer type - arch/powerpc/platforms/ps3/device-init.c:755:2: error: passing argument 1 of 'finish_wait' from incompatible pointer type - arch/powerpc/platforms/ps3/device-init.o] Error 1 - make[3]: *** Waiting for unfinished jobs.... - -Signed-off-by: Paul Gortmaker -Signed-off-by: Sebastian Andrzej Siewior ---- - arch/powerpc/platforms/ps3/device-init.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/arch/powerpc/platforms/ps3/device-init.c -+++ b/arch/powerpc/platforms/ps3/device-init.c -@@ -752,7 +752,7 @@ static int ps3_notification_read_write(s - } - pr_debug("%s:%u: notification %s issued\n", __func__, __LINE__, op); - -- res = wait_event_interruptible(dev->done.wait, -+ res = swait_event_interruptible(dev->done.wait, - dev->done.done || kthread_should_stop()); - if (kthread_should_stop()) - res = -EINTR; diff --git a/debian/patches-rt/preempt-lazy-support.patch b/debian/patches-rt/preempt-lazy-support.patch index 080e17013..3e6609f48 100644 --- a/debian/patches-rt/preempt-lazy-support.patch +++ b/debian/patches-rt/preempt-lazy-support.patch @@ -1,7 +1,7 @@ Subject: sched: Add support for lazy preemption From: Thomas Gleixner Date: Fri, 26 Oct 2012 18:50:54 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz It has become an obsession to mitigate the determinism vs. throughput loss of RT. Looking at the mainline semantics of preemption points @@ -141,7 +141,7 @@ Signed-off-by: Thomas Gleixner --- a/include/linux/sched.h +++ b/include/linux/sched.h -@@ -1723,6 +1723,44 @@ static inline int test_tsk_need_resched( +@@ -1715,6 +1715,44 @@ static inline int test_tsk_need_resched( return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); } @@ -234,7 +234,7 @@ Signed-off-by: Thomas Gleixner default PREEMPT_NONE --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -498,6 +498,48 @@ void resched_curr(struct rq *rq) +@@ -492,6 +492,48 @@ void resched_curr(struct rq *rq) trace_sched_wake_idle_without_ipi(cpu); } @@ -283,7 +283,7 @@ Signed-off-by: Thomas Gleixner void resched_cpu(int cpu) { struct rq *rq = cpu_rq(cpu); -@@ -2428,6 +2470,9 @@ int sched_fork(unsigned long clone_flags +@@ -2404,6 +2446,9 @@ int sched_fork(unsigned long clone_flags p->on_cpu = 0; #endif init_task_preempt_count(p); @@ -293,7 +293,7 @@ Signed-off-by: Thomas Gleixner #ifdef CONFIG_SMP plist_node_init(&p->pushable_tasks, MAX_PRIO); RB_CLEAR_NODE(&p->pushable_dl_tasks); -@@ -3493,6 +3538,7 @@ static void __sched notrace __schedule(b +@@ -3471,6 +3516,7 @@ static void __sched notrace __schedule(b next = pick_next_task(rq, prev, &rf); clear_tsk_need_resched(prev); @@ -301,7 +301,7 @@ Signed-off-by: Thomas Gleixner clear_preempt_need_resched(); if (likely(prev != next)) { -@@ -3673,6 +3719,30 @@ static void __sched notrace preempt_sche +@@ -3651,6 +3697,30 @@ static void __sched notrace preempt_sche } while (need_resched()); } @@ -332,7 +332,7 @@ Signed-off-by: Thomas Gleixner #ifdef CONFIG_PREEMPT /* * this is the entry point to schedule() from in-kernel preemption -@@ -3687,7 +3757,8 @@ asmlinkage __visible void __sched notrac +@@ -3665,7 +3735,8 @@ asmlinkage __visible void __sched notrac */ if (likely(!preemptible())) return; @@ -342,7 +342,7 @@ Signed-off-by: Thomas Gleixner preempt_schedule_common(); } NOKPROBE_SYMBOL(preempt_schedule); -@@ -3714,6 +3785,9 @@ asmlinkage __visible void __sched notrac +@@ -3692,6 +3763,9 @@ asmlinkage __visible void __sched notrac if (likely(!preemptible())) return; @@ -352,7 +352,7 @@ Signed-off-by: Thomas Gleixner do { /* * Because the function tracer can trace preempt_count_sub() -@@ -5482,7 +5556,9 @@ void init_idle(struct task_struct *idle, +@@ -5460,7 +5534,9 @@ void init_idle(struct task_struct *idle, /* Set the preempt count _outside_ the spinlocks! */ init_idle_preempt_count(idle, cpu); @@ -363,7 +363,7 @@ Signed-off-by: Thomas Gleixner /* * The idle tasks have their own, simple scheduling class: */ -@@ -7198,6 +7274,7 @@ void migrate_disable(void) +@@ -7176,6 +7252,7 @@ void migrate_disable(void) } preempt_disable(); @@ -371,7 +371,7 @@ Signed-off-by: Thomas Gleixner pin_current_cpu(); migrate_disable_update_cpus_allowed(p); -@@ -7265,6 +7342,7 @@ void migrate_enable(void) +@@ -7243,6 +7320,7 @@ void migrate_enable(void) arg.dest_cpu = dest_cpu; unpin_current_cpu(); @@ -379,7 +379,7 @@ Signed-off-by: Thomas Gleixner preempt_enable(); stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); tlb_migrate_finish(p->mm); -@@ -7273,6 +7351,7 @@ void migrate_enable(void) +@@ -7251,6 +7329,7 @@ void migrate_enable(void) } } unpin_current_cpu(); @@ -389,7 +389,7 @@ Signed-off-by: Thomas Gleixner EXPORT_SYMBOL(migrate_enable); --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c -@@ -4336,7 +4336,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq +@@ -4018,7 +4018,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq ideal_runtime = sched_slice(cfs_rq, curr); delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; if (delta_exec > ideal_runtime) { @@ -398,7 +398,7 @@ Signed-off-by: Thomas Gleixner /* * The current task ran long enough, ensure it doesn't get * re-elected due to buddy favours. -@@ -4360,7 +4360,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq +@@ -4042,7 +4042,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq return; if (delta > ideal_runtime) @@ -407,7 +407,7 @@ Signed-off-by: Thomas Gleixner } static void -@@ -4502,7 +4502,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc +@@ -4184,7 +4184,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc * validating it and just reschedule. */ if (queued) { @@ -416,7 +416,7 @@ Signed-off-by: Thomas Gleixner return; } /* -@@ -4686,7 +4686,7 @@ static void __account_cfs_rq_runtime(str +@@ -4368,7 +4368,7 @@ static void __account_cfs_rq_runtime(str * hierarchy can be throttled */ if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) @@ -425,7 +425,7 @@ Signed-off-by: Thomas Gleixner } static __always_inline -@@ -5340,7 +5340,7 @@ static void hrtick_start_fair(struct rq +@@ -5037,7 +5037,7 @@ static void hrtick_start_fair(struct rq if (delta < 0) { if (rq->curr == p) @@ -434,7 +434,7 @@ Signed-off-by: Thomas Gleixner return; } hrtick_start(rq, delta); -@@ -6881,7 +6881,7 @@ static void check_preempt_wakeup(struct +@@ -6578,7 +6578,7 @@ static void check_preempt_wakeup(struct return; preempt: @@ -443,7 +443,7 @@ Signed-off-by: Thomas Gleixner /* * Only set the backward buddy when the current task is still * on the rq. This can happen when a wakeup gets interleaved -@@ -9967,7 +9967,7 @@ static void task_fork_fair(struct task_s +@@ -9689,7 +9689,7 @@ static void task_fork_fair(struct task_s * 'current' within the tree based on its new key value. */ swap(curr->vruntime, se->vruntime); @@ -452,7 +452,7 @@ Signed-off-by: Thomas Gleixner } se->vruntime -= cfs_rq->min_vruntime; -@@ -9991,7 +9991,7 @@ prio_changed_fair(struct rq *rq, struct +@@ -9713,7 +9713,7 @@ prio_changed_fair(struct rq *rq, struct */ if (rq->curr == p) { if (p->prio > oldprio) @@ -475,7 +475,7 @@ Signed-off-by: Thomas Gleixner /* --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h -@@ -1613,6 +1613,15 @@ extern void reweight_task(struct task_st +@@ -1641,6 +1641,15 @@ extern void reweight_task(struct task_st extern void resched_curr(struct rq *rq); extern void resched_cpu(int cpu); @@ -493,7 +493,7 @@ Signed-off-by: Thomas Gleixner --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c -@@ -2133,6 +2133,7 @@ tracing_generic_entry_update(struct trac +@@ -2134,6 +2134,7 @@ tracing_generic_entry_update(struct trac struct task_struct *tsk = current; entry->preempt_count = pc & 0xff; @@ -501,7 +501,7 @@ Signed-off-by: Thomas Gleixner entry->pid = (tsk) ? tsk->pid : 0; entry->flags = #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT -@@ -2143,7 +2144,8 @@ tracing_generic_entry_update(struct trac +@@ -2144,7 +2145,8 @@ tracing_generic_entry_update(struct trac ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) | ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) | @@ -511,7 +511,7 @@ Signed-off-by: Thomas Gleixner (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0); entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0; -@@ -3345,15 +3347,17 @@ get_total_entries(struct trace_buffer *b +@@ -3346,15 +3348,17 @@ get_total_entries(struct trace_buffer *b static void print_lat_help_header(struct seq_file *m) { @@ -538,7 +538,7 @@ Signed-off-by: Thomas Gleixner } static void print_event_info(struct trace_buffer *buf, struct seq_file *m) -@@ -3389,15 +3393,17 @@ static void print_func_help_header_irq(s +@@ -3390,15 +3394,17 @@ static void print_func_help_header_irq(s tgid ? tgid_space : space); seq_printf(m, "# %s / _----=> need-resched\n", tgid ? tgid_space : space); @@ -581,7 +581,7 @@ Signed-off-by: Thomas Gleixner #define TRACE_BUF_SIZE 1024 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c -@@ -447,6 +447,7 @@ int trace_print_lat_fmt(struct trace_seq +@@ -448,6 +448,7 @@ int trace_print_lat_fmt(struct trace_seq { char hardsoft_irq; char need_resched; @@ -589,7 +589,7 @@ Signed-off-by: Thomas Gleixner char irqs_off; int hardirq; int softirq; -@@ -477,6 +478,9 @@ int trace_print_lat_fmt(struct trace_seq +@@ -478,6 +479,9 @@ int trace_print_lat_fmt(struct trace_seq break; } @@ -599,7 +599,7 @@ Signed-off-by: Thomas Gleixner hardsoft_irq = (nmi && hardirq) ? 'Z' : nmi ? 'z' : -@@ -485,14 +489,20 @@ int trace_print_lat_fmt(struct trace_seq +@@ -486,14 +490,20 @@ int trace_print_lat_fmt(struct trace_seq softirq ? 's' : '.' ; diff --git a/debian/patches-rt/preempt-nort-rt-variants.patch b/debian/patches-rt/preempt-nort-rt-variants.patch index e1f9bc37f..f5eb99645 100644 --- a/debian/patches-rt/preempt-nort-rt-variants.patch +++ b/debian/patches-rt/preempt-nort-rt-variants.patch @@ -1,7 +1,7 @@ From: Thomas Gleixner Date: Fri, 24 Jul 2009 12:38:56 +0200 Subject: preempt: Provide preempt_*_(no)rt variants -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz RT needs a few preempt_disable/enable points which are not necessary otherwise. Implement variants to avoid #ifdeffery. diff --git a/debian/patches-rt/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch b/debian/patches-rt/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch index ec4369f53..8bf0b67cf 100644 --- a/debian/patches-rt/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch +++ b/debian/patches-rt/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch @@ -1,7 +1,7 @@ Subject: printk: Add "force_early_printk" boot param to help with debugging From: Peter Zijlstra Date: Fri, 02 Sep 2011 14:41:29 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Gives me an option to screw printk and actually see what the machine says. @@ -16,7 +16,7 @@ Link: http://lkml.kernel.org/n/tip-ykb97nsfmobq44xketrxs977@git.kernel.org --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c -@@ -434,6 +434,13 @@ asmlinkage void early_printk(const char +@@ -435,6 +435,13 @@ asmlinkage void early_printk(const char */ static bool __read_mostly printk_killswitch; diff --git a/debian/patches-rt/printk-kill.patch b/debian/patches-rt/printk-kill.patch index 7199c5489..e4c84e783 100644 --- a/debian/patches-rt/printk-kill.patch +++ b/debian/patches-rt/printk-kill.patch @@ -1,7 +1,7 @@ Subject: printk: Add a printk kill switch From: Ingo Molnar Date: Fri, 22 Jul 2011 17:58:40 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Add a prinkt-kill-switch. This is used from (NMI) watchdog to ensure that it does not dead-lock with the early printk code. @@ -29,7 +29,7 @@ Signed-off-by: Thomas Gleixner #ifdef CONFIG_PRINTK_NMI --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c -@@ -404,6 +404,58 @@ DEFINE_RAW_SPINLOCK(logbuf_lock); +@@ -405,6 +405,58 @@ DEFINE_RAW_SPINLOCK(logbuf_lock); printk_safe_exit_irqrestore(flags); \ } while (0) @@ -88,7 +88,7 @@ Signed-off-by: Thomas Gleixner #ifdef CONFIG_PRINTK DECLARE_WAIT_QUEUE_HEAD(log_wait); /* the next printk record to read by syslog(READ) or /proc/kmsg */ -@@ -1886,6 +1938,13 @@ asmlinkage int vprintk_emit(int facility +@@ -1892,6 +1944,13 @@ asmlinkage int vprintk_emit(int facility bool in_sched = false; unsigned long flags; @@ -102,7 +102,7 @@ Signed-off-by: Thomas Gleixner if (level == LOGLEVEL_SCHED) { level = LOGLEVEL_DEFAULT; in_sched = true; -@@ -2026,26 +2085,6 @@ static bool suppress_message_printing(in +@@ -2032,26 +2091,6 @@ static bool suppress_message_printing(in #endif /* CONFIG_PRINTK */ diff --git a/debian/patches-rt/printk-rt-aware.patch b/debian/patches-rt/printk-rt-aware.patch index bf4458afe..4e77e7af7 100644 --- a/debian/patches-rt/printk-rt-aware.patch +++ b/debian/patches-rt/printk-rt-aware.patch @@ -1,7 +1,7 @@ Subject: printk: Make rt aware From: Thomas Gleixner Date: Wed, 19 Sep 2012 14:50:37 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Drop the lock before calling the console driver and do not disable interrupts while printing to a serial console. @@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c -@@ -1606,6 +1606,7 @@ SYSCALL_DEFINE3(syslog, int, type, char +@@ -1612,6 +1612,7 @@ SYSCALL_DEFINE3(syslog, int, type, char return do_syslog(type, buf, len, SYSLOG_FROM_READER); } @@ -21,7 +21,7 @@ Signed-off-by: Thomas Gleixner /* * Special console_lock variants that help to reduce the risk of soft-lockups. * They allow to pass console_lock to another printk() call using a busy wait. -@@ -1746,6 +1747,15 @@ static int console_trylock_spinning(void +@@ -1752,6 +1753,15 @@ static int console_trylock_spinning(void return 1; } @@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner /* * Call the console drivers, asking them to write out * log_buf[start] to log_buf[end - 1]. -@@ -1761,6 +1771,7 @@ static void call_console_drivers(const c +@@ -1767,6 +1777,7 @@ static void call_console_drivers(const c if (!console_drivers) return; @@ -45,7 +45,7 @@ Signed-off-by: Thomas Gleixner for_each_console(con) { if (exclusive_console && con != exclusive_console) continue; -@@ -1776,6 +1787,7 @@ static void call_console_drivers(const c +@@ -1782,6 +1793,7 @@ static void call_console_drivers(const c else con->write(con, text, len); } @@ -53,7 +53,7 @@ Signed-off-by: Thomas Gleixner } int printk_delay_msec __read_mostly; -@@ -1967,20 +1979,30 @@ asmlinkage int vprintk_emit(int facility +@@ -1973,20 +1985,30 @@ asmlinkage int vprintk_emit(int facility /* If called from the scheduler, we can not call up(). */ if (!in_sched) { @@ -87,7 +87,7 @@ Signed-off-by: Thomas Gleixner } wake_up_klogd(); -@@ -2432,6 +2454,10 @@ void console_unlock(void) +@@ -2439,6 +2461,10 @@ void console_unlock(void) console_seq++; raw_spin_unlock(&logbuf_lock); @@ -98,7 +98,7 @@ Signed-off-by: Thomas Gleixner /* * While actively printing out messages, if another printk() * were to occur on another CPU, it may wait for this one to -@@ -2450,6 +2476,7 @@ void console_unlock(void) +@@ -2457,6 +2483,7 @@ void console_unlock(void) } printk_safe_exit_irqrestore(flags); diff --git a/debian/patches-rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch b/debian/patches-rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch index 43359a829..c773063f5 100644 --- a/debian/patches-rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch +++ b/debian/patches-rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Thu, 29 Aug 2013 18:21:04 +0200 Subject: ptrace: fix ptrace vs tasklist_lock race -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz As explained by Alexander Fyodorov : @@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior #define task_contributes_to_load(task) ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ (task->flags & PF_FROZEN) == 0 && \ (task->state & TASK_NOLOAD) == 0) -@@ -1707,6 +1703,51 @@ static inline int test_tsk_need_resched( +@@ -1699,6 +1695,51 @@ static inline int test_tsk_need_resched( return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); } @@ -116,9 +116,9 @@ Signed-off-by: Sebastian Andrzej Siewior spin_unlock_irq(&task->sighand->siglock); --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -1367,6 +1367,18 @@ int migrate_swap(struct task_struct *cur - return ret; +@@ -1348,6 +1348,18 @@ int migrate_swap(struct task_struct *cur } + #endif /* CONFIG_NUMA_BALANCING */ +static bool check_task_state(struct task_struct *p, long match_state) +{ @@ -135,7 +135,7 @@ Signed-off-by: Sebastian Andrzej Siewior /* * wait_task_inactive - wait for a thread to unschedule. * -@@ -1411,7 +1423,7 @@ unsigned long wait_task_inactive(struct +@@ -1392,7 +1404,7 @@ unsigned long wait_task_inactive(struct * is actually now running somewhere else! */ while (task_running(rq, p)) { @@ -144,7 +144,7 @@ Signed-off-by: Sebastian Andrzej Siewior return 0; cpu_relax(); } -@@ -1426,7 +1438,8 @@ unsigned long wait_task_inactive(struct +@@ -1407,7 +1419,8 @@ unsigned long wait_task_inactive(struct running = task_running(rq, p); queued = task_on_rq_queued(p); ncsw = 0; diff --git a/debian/patches-rt/radix-tree-use-local-locks.patch b/debian/patches-rt/radix-tree-use-local-locks.patch index 685246ff7..9c9deb410 100644 --- a/debian/patches-rt/radix-tree-use-local-locks.patch +++ b/debian/patches-rt/radix-tree-use-local-locks.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Wed, 25 Jan 2017 16:34:27 +0100 Subject: [PATCH] radix-tree: use local locks -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz The preload functionality uses per-CPU variables and preempt-disable to ensure that it does not switch CPUs during its usage. This patch adds @@ -19,7 +19,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/include/linux/idr.h +++ b/include/linux/idr.h -@@ -158,10 +158,7 @@ static inline bool idr_is_empty(const st +@@ -169,10 +169,7 @@ static inline bool idr_is_empty(const st * Each idr_preload() should be matched with an invocation of this * function. See idr_preload() for details. */ @@ -152,10 +152,10 @@ Signed-off-by: Sebastian Andrzej Siewior +} +EXPORT_SYMBOL(idr_preload_end); + - /** - * ida_pre_get - reserve resources for ida allocation - * @ida: ida handle -@@ -2122,7 +2136,7 @@ int ida_pre_get(struct ida *ida, gfp_t g + int ida_pre_get(struct ida *ida, gfp_t gfp) + { + /* +@@ -2114,7 +2128,7 @@ int ida_pre_get(struct ida *ida, gfp_t g * to return to the ida_pre_get() step. */ if (!__radix_tree_preload(gfp, IDA_PRELOAD_SIZE)) diff --git a/debian/patches-rt/random-Remove-preempt-disabled-region.patch b/debian/patches-rt/random-Remove-preempt-disabled-region.patch deleted file mode 100644 index 02d880c12..000000000 --- a/debian/patches-rt/random-Remove-preempt-disabled-region.patch +++ /dev/null @@ -1,48 +0,0 @@ -From: Ingo Molnar -Date: Fri, 3 Jul 2009 08:29:30 -0500 -Subject: [PATCH] random: Remove preempt disabled region -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz - -No need to keep preemption disabled across the whole function. - -mix_pool_bytes() uses a spin_lock() to protect the pool and there are -other places like write_pool() whhich invoke mix_pool_bytes() without -disabling preemption. -credit_entropy_bits() is invoked from other places like -add_hwgenerator_randomness() without disabling preemption. - -Before commit 95b709b6be49 ("random: drop trickle mode") the function -used __this_cpu_inc_return() which would require disabled preemption. -The preempt_disable() section was added in commit 43d5d3018c37 ("[PATCH] -random driver preempt robustness", history tree). It was claimed that -the code relied on "vt_ioctl() being called under BKL". - -Cc: "Theodore Ts'o" -Signed-off-by: Ingo Molnar -Signed-off-by: Thomas Gleixner -[bigeasy: enhance the commit message] -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/char/random.c | 4 ---- - 1 file changed, 4 deletions(-) - ---- a/drivers/char/random.c -+++ b/drivers/char/random.c -@@ -1122,8 +1122,6 @@ static void add_timer_randomness(struct - } sample; - long delta, delta2, delta3; - -- preempt_disable(); -- - sample.jiffies = jiffies; - sample.cycles = random_get_entropy(); - sample.num = num; -@@ -1161,8 +1159,6 @@ static void add_timer_randomness(struct - * and limit entropy entimate to 12 bits. - */ - credit_entropy_bits(r, min_t(int, fls(delta>>1), 11)); -- -- preempt_enable(); - } - - void add_input_randomness(unsigned int type, unsigned int code, diff --git a/debian/patches-rt/random-avoid-preempt_disable-ed-section.patch b/debian/patches-rt/random-avoid-preempt_disable-ed-section.patch index 91762b3bc..75832b6b0 100644 --- a/debian/patches-rt/random-avoid-preempt_disable-ed-section.patch +++ b/debian/patches-rt/random-avoid-preempt_disable-ed-section.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Fri, 12 May 2017 15:46:17 +0200 Subject: [PATCH] random: avoid preempt_disable()ed section -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz extract_crng() will use sleeping locks while in a preempt_disable() section due to get_cpu_var(). @@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior #include #include -@@ -2193,6 +2194,7 @@ static rwlock_t batched_entropy_reset_lo +@@ -2223,6 +2224,7 @@ static rwlock_t batched_entropy_reset_lo * at any point prior. */ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64); @@ -31,7 +31,7 @@ Signed-off-by: Sebastian Andrzej Siewior u64 get_random_u64(void) { u64 ret; -@@ -2213,7 +2215,7 @@ u64 get_random_u64(void) +@@ -2243,7 +2245,7 @@ u64 get_random_u64(void) warn_unseeded_randomness(&previous); use_lock = READ_ONCE(crng_init) < 2; @@ -40,7 +40,7 @@ Signed-off-by: Sebastian Andrzej Siewior if (use_lock) read_lock_irqsave(&batched_entropy_reset_lock, flags); if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) { -@@ -2223,12 +2225,13 @@ u64 get_random_u64(void) +@@ -2253,12 +2255,13 @@ u64 get_random_u64(void) ret = batch->entropy_u64[batch->position++]; if (use_lock) read_unlock_irqrestore(&batched_entropy_reset_lock, flags); @@ -55,7 +55,7 @@ Signed-off-by: Sebastian Andrzej Siewior u32 get_random_u32(void) { u32 ret; -@@ -2243,7 +2246,7 @@ u32 get_random_u32(void) +@@ -2273,7 +2276,7 @@ u32 get_random_u32(void) warn_unseeded_randomness(&previous); use_lock = READ_ONCE(crng_init) < 2; @@ -64,7 +64,7 @@ Signed-off-by: Sebastian Andrzej Siewior if (use_lock) read_lock_irqsave(&batched_entropy_reset_lock, flags); if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) { -@@ -2253,7 +2256,7 @@ u32 get_random_u32(void) +@@ -2283,7 +2286,7 @@ u32 get_random_u32(void) ret = batch->entropy_u32[batch->position++]; if (use_lock) read_unlock_irqrestore(&batched_entropy_reset_lock, flags); diff --git a/debian/patches-rt/random-make-it-work-on-rt.patch b/debian/patches-rt/random-make-it-work-on-rt.patch index 98f37b15f..865bb35ae 100644 --- a/debian/patches-rt/random-make-it-work-on-rt.patch +++ b/debian/patches-rt/random-make-it-work-on-rt.patch @@ -1,7 +1,7 @@ Subject: random: Make it work on rt From: Thomas Gleixner Date: Tue, 21 Aug 2012 20:38:50 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Delegate the random insertion to the forced threaded interrupt handler. Store the return IP of the hard interrupt handler in the irq @@ -22,7 +22,7 @@ Signed-off-by: Thomas Gleixner --- a/drivers/char/random.c +++ b/drivers/char/random.c -@@ -1215,28 +1215,27 @@ static __u32 get_reg(struct fast_pool *f +@@ -1229,28 +1229,27 @@ static __u32 get_reg(struct fast_pool *f return *ptr; } @@ -74,7 +74,7 @@ Signed-off-by: Thomas Gleixner static int hv_ce_set_next_event(unsigned long delta, --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c -@@ -972,6 +972,8 @@ static void vmbus_isr(void) +@@ -991,6 +991,8 @@ static void vmbus_isr(void) void *page_addr = hv_cpu->synic_event_page; struct hv_message *msg; union hv_synic_event_flags *event; @@ -83,7 +83,7 @@ Signed-off-by: Thomas Gleixner bool handled = false; if (unlikely(page_addr == NULL)) -@@ -1015,7 +1017,7 @@ static void vmbus_isr(void) +@@ -1034,7 +1036,7 @@ static void vmbus_isr(void) tasklet_schedule(&hv_cpu->msg_dpc); } @@ -91,7 +91,7 @@ Signed-off-by: Thomas Gleixner + add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0, ip); } - + /* --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -70,6 +70,7 @@ struct irq_desc { @@ -135,7 +135,7 @@ Signed-off-by: Thomas Gleixner note_interrupt(desc, retval); --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c -@@ -1065,6 +1065,12 @@ static int irq_thread(void *data) +@@ -1072,6 +1072,12 @@ static int irq_thread(void *data) if (action_ret == IRQ_WAKE_THREAD) irq_wake_secondary(desc, action); diff --git a/debian/patches-rt/rcu-Eliminate-softirq-processing-from-rcutree.patch b/debian/patches-rt/rcu-Eliminate-softirq-processing-from-rcutree.patch index 4dfac63ca..ac93f6ff9 100644 --- a/debian/patches-rt/rcu-Eliminate-softirq-processing-from-rcutree.patch +++ b/debian/patches-rt/rcu-Eliminate-softirq-processing-from-rcutree.patch @@ -1,7 +1,7 @@ From: "Paul E. McKenney" Date: Mon, 4 Nov 2013 13:21:10 -0800 Subject: rcu: Eliminate softirq processing from rcutree -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Running RCU out of softirq is a problem for some workloads that would like to manage RCU core processing independently of other softirq work, @@ -24,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c -@@ -58,6 +58,13 @@ +@@ -61,6 +61,13 @@ #include #include #include @@ -38,7 +38,7 @@ Signed-off-by: Sebastian Andrzej Siewior #include "tree.h" #include "rcu.h" -@@ -2819,18 +2826,17 @@ static void +@@ -2879,18 +2886,17 @@ static void /* * Do RCU core processing for the current CPU. */ @@ -59,7 +59,7 @@ Signed-off-by: Sebastian Andrzej Siewior /* * Schedule RCU callback invocation. If the specified type of RCU * does not support RCU priority boosting, just do a direct call, -@@ -2842,18 +2848,105 @@ static void invoke_rcu_callbacks(struct +@@ -2902,18 +2908,105 @@ static void invoke_rcu_callbacks(struct { if (unlikely(!READ_ONCE(rcu_scheduler_fully_active))) return; @@ -171,7 +171,7 @@ Signed-off-by: Sebastian Andrzej Siewior /* * Handle any core-RCU processing required by a call_rcu() invocation. -@@ -4122,7 +4215,6 @@ void __init rcu_init(void) +@@ -4179,7 +4272,6 @@ void __init rcu_init(void) if (dump_tree) rcu_dump_rcu_node_tree(&rcu_sched_state); __rcu_init_preempt(); @@ -181,7 +181,7 @@ Signed-off-by: Sebastian Andrzej Siewior * We don't need protection against CPU-hotplug here because --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h -@@ -432,12 +432,10 @@ extern struct rcu_state rcu_preempt_stat +@@ -423,12 +423,10 @@ extern struct rcu_state rcu_preempt_stat int rcu_dynticks_snap(struct rcu_dynticks *rdtp); @@ -194,8 +194,8 @@ Signed-off-by: Sebastian Andrzej Siewior #ifndef RCU_TREE_NONCORE -@@ -457,8 +455,8 @@ void call_rcu(struct rcu_head *head, rcu - static void __init __rcu_init_preempt(void); +@@ -451,8 +449,8 @@ static void dump_blkd_tasks(struct rcu_s + int ncheck); static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); -static void invoke_rcu_callbacks_kthread(void); @@ -249,7 +249,7 @@ Signed-off-by: Sebastian Andrzej Siewior #ifdef CONFIG_RCU_NOCB_CPU static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */ static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */ -@@ -956,18 +930,21 @@ void exit_rcu(void) +@@ -1027,18 +1001,21 @@ dump_blkd_tasks(struct rcu_state *rsp, s #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ @@ -279,7 +279,7 @@ Signed-off-by: Sebastian Andrzej Siewior /* * Carry out RCU priority boosting on the task indicated by ->exp_tasks * or ->boost_tasks, advancing the pointer to the next task in the -@@ -1106,23 +1083,6 @@ static void rcu_initiate_boost(struct rc +@@ -1177,23 +1154,6 @@ static void rcu_initiate_boost(struct rc } /* @@ -303,7 +303,7 @@ Signed-off-by: Sebastian Andrzej Siewior * Is the current CPU running the RCU-callbacks kthread? * Caller must have preemption disabled. */ -@@ -1176,67 +1136,6 @@ static int rcu_spawn_one_boost_kthread(s +@@ -1247,67 +1207,6 @@ static int rcu_spawn_one_boost_kthread(s return 0; } @@ -371,7 +371,7 @@ Signed-off-by: Sebastian Andrzej Siewior /* * Set the per-rcu_node kthread's affinity to cover all CPUs that are * served by the rcu_node in question. The CPU hotplug lock is still -@@ -1267,26 +1166,12 @@ static void rcu_boost_kthread_setaffinit +@@ -1338,26 +1237,12 @@ static void rcu_boost_kthread_setaffinit free_cpumask_var(cm); } @@ -398,7 +398,7 @@ Signed-off-by: Sebastian Andrzej Siewior rcu_for_each_leaf_node(rcu_state_p, rnp) (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp); } -@@ -1309,11 +1194,6 @@ static void rcu_initiate_boost(struct rc +@@ -1380,11 +1265,6 @@ static void rcu_initiate_boost(struct rc raw_spin_unlock_irqrestore_rcu_node(rnp, flags); } diff --git a/debian/patches-rt/rcu-Use-cpus_read_lock-while-looking-at-cpu_online_m.patch b/debian/patches-rt/rcu-Use-cpus_read_lock-while-looking-at-cpu_online_m.patch deleted file mode 100644 index 7e41f3fe9..000000000 --- a/debian/patches-rt/rcu-Use-cpus_read_lock-while-looking-at-cpu_online_m.patch +++ /dev/null @@ -1,61 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Mon, 10 Sep 2018 14:58:37 +0200 -Subject: [PATCH] rcu: Use cpus_read_lock() while looking at cpu_online_mask -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz - -It was possible that sync_rcu_exp_select_cpus() enqueued something on -CPU0 while CPU0 was offline. Such a work item wouldn't be processed -until CPU0 gets back online. This problem was addressed in commit -fcc6354365015 ("rcu: Make expedited GPs handle CPU 0 being offline"). I -don't think the issue fully addressed. - -Assume grplo = 0 and grphi = 7 and sync_rcu_exp_select_cpus() is invoked -on CPU1. The preempt_disable() section on CPU1 won't ensure that CPU0 -remains online between looking at cpu_online_mask and invoking -queue_work_on() on CPU1. - -Use cpus_read_lock() to ensure that `cpu' is not going down between -looking at cpu_online_mask at invoking queue_work_on() and waiting for -its completion. It is added around the loop + flush_work() which is -similar to work_on_cpu_safe() (and we can have multiple jobs running on -NUMA systems). - -Fixes: fcc6354365015 ("rcu: Make expedited GPs handle CPU 0 being - offline") -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/rcu/tree_exp.h | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/kernel/rcu/tree_exp.h -+++ b/kernel/rcu/tree_exp.h -@@ -479,6 +479,7 @@ static void sync_rcu_exp_select_cpus(str - sync_exp_reset_tree(rsp); - trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("select")); - -+ cpus_read_lock(); - /* Schedule work for each leaf rcu_node structure. */ - rcu_for_each_leaf_node(rsp, rnp) { - rnp->exp_need_flush = false; -@@ -493,13 +494,11 @@ static void sync_rcu_exp_select_cpus(str - continue; - } - INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus); -- preempt_disable(); - cpu = cpumask_next(rnp->grplo - 1, cpu_online_mask); - /* If all offline, queue the work on an unbound CPU. */ - if (unlikely(cpu > rnp->grphi)) - cpu = WORK_CPU_UNBOUND; - queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work); -- preempt_enable(); - rnp->exp_need_flush = true; - } - -@@ -507,6 +506,7 @@ static void sync_rcu_exp_select_cpus(str - rcu_for_each_leaf_node(rsp, rnp) - if (rnp->exp_need_flush) - flush_work(&rnp->rew.rew_work); -+ cpus_read_unlock(); - } - - static void synchronize_sched_expedited_wait(struct rcu_state *rsp) diff --git a/debian/patches-rt/rcu-disable-rcu-fast-no-hz-on-rt.patch b/debian/patches-rt/rcu-disable-rcu-fast-no-hz-on-rt.patch index e639a2b0b..28c7bb597 100644 --- a/debian/patches-rt/rcu-disable-rcu-fast-no-hz-on-rt.patch +++ b/debian/patches-rt/rcu-disable-rcu-fast-no-hz-on-rt.patch @@ -1,7 +1,7 @@ Subject: rcu: Disable RCU_FAST_NO_HZ on RT From: Thomas Gleixner Date: Sun, 28 Oct 2012 13:26:09 +0000 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz This uses a timer_list timer from the irq disabled guts of the idle code. Disable it for now to prevent wreckage. diff --git a/debian/patches-rt/rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch b/debian/patches-rt/rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch index 883a0c92c..3d6e68055 100644 --- a/debian/patches-rt/rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch +++ b/debian/patches-rt/rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch @@ -1,7 +1,7 @@ From: Julia Cartwright Date: Wed, 12 Oct 2016 11:21:14 -0500 Subject: [PATCH] rcu: enable rcu_normal_after_boot by default for RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz The forcing of an expedited grace period is an expensive and very RT-application unfriendly operation, as it forcibly preempts all running diff --git a/debian/patches-rt/rcu-make-RCU_BOOST-default-on-RT.patch b/debian/patches-rt/rcu-make-RCU_BOOST-default-on-RT.patch index 673193257..5e0aadb0f 100644 --- a/debian/patches-rt/rcu-make-RCU_BOOST-default-on-RT.patch +++ b/debian/patches-rt/rcu-make-RCU_BOOST-default-on-RT.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Fri, 21 Mar 2014 20:19:05 +0100 Subject: rcu: make RCU_BOOST default on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Since it is no longer invoked from the softirq people run into OOM more often if the priority of the RCU thread is too low. Making boosting diff --git a/debian/patches-rt/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch b/debian/patches-rt/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch index 6dff2f248..2937f8c3c 100644 --- a/debian/patches-rt/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch +++ b/debian/patches-rt/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch @@ -1,7 +1,7 @@ Subject: rcu: Merge RCU-bh into RCU-preempt Date: Wed, 5 Oct 2011 11:59:38 -0700 From: Thomas Gleixner -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz The Linux kernel has long RCU-bh read-side critical sections that intolerably increase scheduling latency under mainline's RCU-bh rules, @@ -27,12 +27,12 @@ Signed-off-by: Thomas Gleixner --- include/linux/rcupdate.h | 19 +++++++++++++++++++ include/linux/rcutree.h | 8 ++++++++ - kernel/rcu/rcu.h | 14 +++++++++++--- + kernel/rcu/rcu.h | 11 +++++++++-- kernel/rcu/rcutorture.c | 7 +++++++ - kernel/rcu/tree.c | 24 ++++++++++++++++++++++++ + kernel/rcu/tree.c | 22 ++++++++++++++++++++++ kernel/rcu/tree.h | 2 ++ kernel/rcu/update.c | 2 ++ - 7 files changed, 73 insertions(+), 3 deletions(-) + 7 files changed, 69 insertions(+), 2 deletions(-) --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -48,7 +48,7 @@ Signed-off-by: Thomas Gleixner void call_rcu_sched(struct rcu_head *head, rcu_callback_t func); void synchronize_sched(void); void rcu_barrier_tasks(void); -@@ -264,7 +268,14 @@ extern struct lockdep_map rcu_sched_lock +@@ -263,7 +267,14 @@ extern struct lockdep_map rcu_sched_lock extern struct lockdep_map rcu_callback_map; int debug_lockdep_rcu_enabled(void); int rcu_read_lock_held(void); @@ -63,7 +63,7 @@ Signed-off-by: Thomas Gleixner int rcu_read_lock_sched_held(void); #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ -@@ -665,10 +676,14 @@ static inline void rcu_read_unlock(void) +@@ -663,10 +674,14 @@ static inline void rcu_read_unlock(void) static inline void rcu_read_lock_bh(void) { local_bh_disable(); @@ -78,7 +78,7 @@ Signed-off-by: Thomas Gleixner } /* -@@ -678,10 +693,14 @@ static inline void rcu_read_lock_bh(void +@@ -676,10 +691,14 @@ static inline void rcu_read_lock_bh(void */ static inline void rcu_read_unlock_bh(void) { @@ -121,33 +121,29 @@ Signed-off-by: Thomas Gleixner unsigned long get_state_synchronize_rcu(void); --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h -@@ -478,20 +478,28 @@ static inline void show_rcu_gp_kthreads( - extern unsigned long rcutorture_testseq; - extern unsigned long rcutorture_vernum; - unsigned long rcu_batches_started(void); --unsigned long rcu_batches_started_bh(void); - unsigned long rcu_batches_started_sched(void); - unsigned long rcu_batches_completed(void); --unsigned long rcu_batches_completed_bh(void); - unsigned long rcu_batches_completed_sched(void); +@@ -528,7 +528,6 @@ static inline void show_rcu_gp_kthreads( + static inline int rcu_get_gp_kthreads_prio(void) { return 0; } + #else /* #ifdef CONFIG_TINY_RCU */ + unsigned long rcu_get_gp_seq(void); +-unsigned long rcu_bh_get_gp_seq(void); + unsigned long rcu_sched_get_gp_seq(void); unsigned long rcu_exp_batches_completed(void); unsigned long rcu_exp_batches_completed_sched(void); - unsigned long srcu_batches_completed(struct srcu_struct *sp); +@@ -536,10 +535,18 @@ unsigned long srcu_batches_completed(str void show_rcu_gp_kthreads(void); + int rcu_get_gp_kthreads_prio(void); void rcu_force_quiescent_state(void); -void rcu_bh_force_quiescent_state(void); void rcu_sched_force_quiescent_state(void); extern struct workqueue_struct *rcu_gp_wq; extern struct workqueue_struct *rcu_par_gp_wq; + -+#ifndef CONFIG_PREEMPT_RT_FULL -+void rcu_bh_force_quiescent_state(void); -+unsigned long rcu_batches_started_bh(void); -+unsigned long rcu_batches_completed_bh(void); ++#ifdef CONFIG_PREEMPT_RT_FULL ++#define rcu_bh_get_gp_seq rcu_get_gp_seq ++#define rcu_bh_force_quiescent_state rcu_force_quiescent_state +#else -+# define rcu_bh_force_quiescent_state rcu_force_quiescent_state -+# define rcu_batches_completed_bh rcu_batches_completed -+# define rcu_batches_started_bh rcu_batches_completed ++unsigned long rcu_bh_get_gp_seq(void); ++void rcu_bh_force_quiescent_state(void); +#endif + #endif /* #else #ifdef CONFIG_TINY_RCU */ @@ -155,7 +151,7 @@ Signed-off-by: Thomas Gleixner #ifdef CONFIG_RCU_NOCB_CPU --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c -@@ -413,6 +413,7 @@ static struct rcu_torture_ops rcu_ops = +@@ -434,6 +434,7 @@ static struct rcu_torture_ops rcu_ops = .name = "rcu" }; @@ -163,7 +159,7 @@ Signed-off-by: Thomas Gleixner /* * Definitions for rcu_bh torture testing. */ -@@ -452,6 +453,12 @@ static struct rcu_torture_ops rcu_bh_ops +@@ -475,6 +476,12 @@ static struct rcu_torture_ops rcu_bh_ops .name = "rcu_bh" }; @@ -178,7 +174,7 @@ Signed-off-by: Thomas Gleixner * The names includes "busted", and they really means it! --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c -@@ -243,6 +243,7 @@ void rcu_sched_qs(void) +@@ -244,6 +244,7 @@ void rcu_sched_qs(void) this_cpu_ptr(&rcu_sched_data), true); } @@ -186,7 +182,7 @@ Signed-off-by: Thomas Gleixner void rcu_bh_qs(void) { RCU_LOCKDEP_WARN(preemptible(), "rcu_bh_qs() invoked with preemption enabled!!!"); -@@ -253,6 +254,7 @@ void rcu_bh_qs(void) +@@ -254,6 +255,7 @@ void rcu_bh_qs(void) __this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false); } } @@ -194,37 +190,23 @@ Signed-off-by: Thomas Gleixner /* * Steal a bit from the bottom of ->dynticks for idle entry/exit -@@ -549,11 +551,13 @@ EXPORT_SYMBOL_GPL(rcu_batches_started_sc - /* - * Return the number of RCU BH batches started thus far for debug & stats. - */ -+#ifndef CONFIG_PREEMPT_RT_FULL - unsigned long rcu_batches_started_bh(void) - { - return rcu_bh_state.gpnum; +@@ -568,6 +570,7 @@ unsigned long rcu_sched_get_gp_seq(void) } - EXPORT_SYMBOL_GPL(rcu_batches_started_bh); -+#endif - - /* - * Return the number of RCU batches completed thus far for debug & stats. -@@ -573,6 +577,7 @@ unsigned long rcu_batches_completed_sche - } - EXPORT_SYMBOL_GPL(rcu_batches_completed_sched); + EXPORT_SYMBOL_GPL(rcu_sched_get_gp_seq); +#ifndef CONFIG_PREEMPT_RT_FULL /* - * Return the number of RCU BH batches completed thus far for debug & stats. + * Return the number of RCU-bh GPs completed thus far for debug & stats. */ -@@ -581,6 +586,7 @@ unsigned long rcu_batches_completed_bh(v - return rcu_bh_state.completed; +@@ -576,6 +579,7 @@ unsigned long rcu_bh_get_gp_seq(void) + return READ_ONCE(rcu_bh_state.gp_seq); } - EXPORT_SYMBOL_GPL(rcu_batches_completed_bh); + EXPORT_SYMBOL_GPL(rcu_bh_get_gp_seq); +#endif /* * Return the number of RCU expedited batches completed thus far for -@@ -604,6 +610,7 @@ unsigned long rcu_exp_batches_completed_ +@@ -599,6 +603,7 @@ unsigned long rcu_exp_batches_completed_ } EXPORT_SYMBOL_GPL(rcu_exp_batches_completed_sched); @@ -232,7 +214,7 @@ Signed-off-by: Thomas Gleixner /* * Force a quiescent state. */ -@@ -622,6 +629,13 @@ void rcu_bh_force_quiescent_state(void) +@@ -617,6 +622,13 @@ void rcu_bh_force_quiescent_state(void) } EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); @@ -246,7 +228,7 @@ Signed-off-by: Thomas Gleixner /* * Force a quiescent state for RCU-sched. */ -@@ -672,9 +686,11 @@ void rcutorture_get_gp_data(enum rcutort +@@ -674,9 +686,11 @@ void rcutorture_get_gp_data(enum rcutort case RCU_FLAVOR: rsp = rcu_state_p; break; @@ -258,7 +240,7 @@ Signed-off-by: Thomas Gleixner case RCU_SCHED_FLAVOR: rsp = &rcu_sched_state; break; -@@ -2986,6 +3002,7 @@ void call_rcu_sched(struct rcu_head *hea +@@ -3040,6 +3054,7 @@ void call_rcu_sched(struct rcu_head *hea } EXPORT_SYMBOL_GPL(call_rcu_sched); @@ -266,7 +248,7 @@ Signed-off-by: Thomas Gleixner /** * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. * @head: structure to be used for queueing the RCU updates. -@@ -3013,6 +3030,7 @@ void call_rcu_bh(struct rcu_head *head, +@@ -3067,6 +3082,7 @@ void call_rcu_bh(struct rcu_head *head, __call_rcu(head, func, &rcu_bh_state, -1, 0); } EXPORT_SYMBOL_GPL(call_rcu_bh); @@ -274,7 +256,7 @@ Signed-off-by: Thomas Gleixner /* * Queue an RCU callback for lazy invocation after a grace period. -@@ -3098,6 +3116,7 @@ void synchronize_sched(void) +@@ -3152,6 +3168,7 @@ void synchronize_sched(void) } EXPORT_SYMBOL_GPL(synchronize_sched); @@ -282,7 +264,7 @@ Signed-off-by: Thomas Gleixner /** * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. * -@@ -3124,6 +3143,7 @@ void synchronize_rcu_bh(void) +@@ -3178,6 +3195,7 @@ void synchronize_rcu_bh(void) wait_rcu_gp(call_rcu_bh); } EXPORT_SYMBOL_GPL(synchronize_rcu_bh); @@ -290,7 +272,7 @@ Signed-off-by: Thomas Gleixner /** * get_state_synchronize_rcu - Snapshot current RCU state -@@ -3457,6 +3477,7 @@ static void _rcu_barrier(struct rcu_stat +@@ -3485,6 +3503,7 @@ static void _rcu_barrier(struct rcu_stat mutex_unlock(&rsp->barrier_mutex); } @@ -298,7 +280,7 @@ Signed-off-by: Thomas Gleixner /** * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete. */ -@@ -3465,6 +3486,7 @@ void rcu_barrier_bh(void) +@@ -3493,6 +3512,7 @@ void rcu_barrier_bh(void) _rcu_barrier(&rcu_bh_state); } EXPORT_SYMBOL_GPL(rcu_barrier_bh); @@ -306,7 +288,7 @@ Signed-off-by: Thomas Gleixner /** * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks. -@@ -4081,7 +4103,9 @@ void __init rcu_init(void) +@@ -4140,7 +4160,9 @@ void __init rcu_init(void) rcu_bootup_announce(); rcu_init_geometry(); @@ -318,7 +300,7 @@ Signed-off-by: Thomas Gleixner rcu_dump_rcu_node_tree(&rcu_sched_state); --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h -@@ -422,7 +422,9 @@ extern struct list_head rcu_struct_flavo +@@ -413,7 +413,9 @@ extern struct list_head rcu_struct_flavo */ extern struct rcu_state rcu_sched_state; diff --git a/debian/patches-rt/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch b/debian/patches-rt/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch index ff3caa4ca..ae97ec730 100644 --- a/debian/patches-rt/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch +++ b/debian/patches-rt/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch @@ -1,7 +1,7 @@ Subject: ARM: Initialize split page table locks for vector page From: Frank Rowand Date: Sat, 1 Oct 2011 18:58:13 -0700 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Without this patch, ARM can not use SPLIT_PTLOCK_CPUS if PREEMPT_RT_FULL=y because vectors_user_mapping() creates a diff --git a/debian/patches-rt/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch b/debian/patches-rt/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch index e824529f3..cac8c291e 100644 --- a/debian/patches-rt/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch +++ b/debian/patches-rt/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch @@ -1,7 +1,7 @@ From: Daniel Bristot de Oliveira Date: Mon, 26 Jun 2017 17:07:15 +0200 Subject: rt: Increase/decrease the nr of migratory tasks when enabling/disabling migration -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz There is a problem in the migrate_disable()/enable() implementation regarding the number of migratory tasks in the rt/dl RQs. The problem @@ -81,7 +81,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -7155,6 +7155,47 @@ const u32 sched_prio_to_wmult[40] = { +@@ -7133,6 +7133,47 @@ const u32 sched_prio_to_wmult[40] = { #if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP) @@ -129,7 +129,7 @@ Signed-off-by: Sebastian Andrzej Siewior void migrate_disable(void) { struct task_struct *p = current; -@@ -7178,10 +7219,9 @@ void migrate_disable(void) +@@ -7156,10 +7197,9 @@ void migrate_disable(void) } preempt_disable(); @@ -142,7 +142,7 @@ Signed-off-by: Sebastian Andrzej Siewior preempt_enable(); } -@@ -7213,9 +7253,8 @@ void migrate_enable(void) +@@ -7191,9 +7231,8 @@ void migrate_enable(void) preempt_disable(); diff --git a/debian/patches-rt/rt-introduce-cpu-chill.patch b/debian/patches-rt/rt-introduce-cpu-chill.patch index fb9d3dbe3..4b38ae62e 100644 --- a/debian/patches-rt/rt-introduce-cpu-chill.patch +++ b/debian/patches-rt/rt-introduce-cpu-chill.patch @@ -1,7 +1,7 @@ Subject: rt: Introduce cpu_chill() From: Thomas Gleixner Date: Wed, 07 Mar 2012 20:51:03 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Retry loops on RT might loop forever when the modifying side was preempted. Add cpu_chill() to replace cpu_relax(). cpu_chill() @@ -75,7 +75,7 @@ Signed-off-by: Sebastian Andrzej Siewior #endif /* defined(_LINUX_DELAY_H) */ --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c -@@ -1895,6 +1895,27 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct +@@ -1894,6 +1894,27 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct } #endif diff --git a/debian/patches-rt/rt-local-irq-lock.patch b/debian/patches-rt/rt-local-irq-lock.patch index 3b341c697..0a5e3e709 100644 --- a/debian/patches-rt/rt-local-irq-lock.patch +++ b/debian/patches-rt/rt-local-irq-lock.patch @@ -1,7 +1,7 @@ Subject: rt: Add local irq locks From: Thomas Gleixner Date: Mon, 20 Jun 2011 09:03:47 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Introduce locallock. For !RT this maps to preempt_disable()/ local_irq_disable() so there is not much that changes. For RT this will diff --git a/debian/patches-rt/rt-preempt-base-config.patch b/debian/patches-rt/rt-preempt-base-config.patch index 387a3336c..9197abebc 100644 --- a/debian/patches-rt/rt-preempt-base-config.patch +++ b/debian/patches-rt/rt-preempt-base-config.patch @@ -1,7 +1,7 @@ Subject: rt: Provide PREEMPT_RT_BASE config switch From: Thomas Gleixner Date: Fri, 17 Jun 2011 12:39:57 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Introduce PREEMPT_RT_BASE which enables parts of PREEMPT_RT_FULL. Forces interrupt threading and enables some of the RT @@ -9,8 +9,8 @@ substitutions for testing. Signed-off-by: Thomas Gleixner --- - kernel/Kconfig.preempt | 19 +++++++++++++++++-- - 1 file changed, 17 insertions(+), 2 deletions(-) + kernel/Kconfig.preempt | 21 ++++++++++++++++++--- + 1 file changed, 18 insertions(+), 3 deletions(-) --- a/kernel/Kconfig.preempt +++ b/kernel/Kconfig.preempt @@ -25,19 +25,20 @@ Signed-off-by: Thomas Gleixner choice prompt "Preemption Model" -@@ -33,9 +40,9 @@ config PREEMPT_VOLUNTARY +@@ -34,10 +41,10 @@ config PREEMPT_VOLUNTARY Select this if you are building a kernel for a desktop system. -config PREEMPT +config PREEMPT__LL bool "Preemptible Kernel (Low-Latency Desktop)" + depends on !ARCH_NO_PREEMPT - select PREEMPT_COUNT + select PREEMPT select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK help This option reduces the latency of the kernel by making -@@ -52,6 +59,14 @@ config PREEMPT +@@ -54,7 +61,15 @@ config PREEMPT embedded system with latency requirements in the milliseconds range. @@ -52,3 +53,6 @@ Signed-off-by: Thomas Gleixner endchoice config PREEMPT_COUNT +- bool +\ No newline at end of file ++ bool diff --git a/debian/patches-rt/rt-serial-warn-fix.patch b/debian/patches-rt/rt-serial-warn-fix.patch index 55060473b..70f6483e2 100644 --- a/debian/patches-rt/rt-serial-warn-fix.patch +++ b/debian/patches-rt/rt-serial-warn-fix.patch @@ -1,7 +1,7 @@ Subject: rt: Improve the serial console PASS_LIMIT From: Ingo Molnar Date: Wed Dec 14 13:05:54 CET 2011 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Beyond the warning: diff --git a/debian/patches-rt/rtmutex-Make-lock_killable-work.patch b/debian/patches-rt/rtmutex-Make-lock_killable-work.patch index 989afc41d..88783568b 100644 --- a/debian/patches-rt/rtmutex-Make-lock_killable-work.patch +++ b/debian/patches-rt/rtmutex-Make-lock_killable-work.patch @@ -1,7 +1,7 @@ From: Thomas Gleixner Date: Sat, 1 Apr 2017 12:50:59 +0200 Subject: [PATCH] rtmutex: Make lock_killable work -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Locking an rt mutex killable does not work because signal handling is restricted to TASK_INTERRUPTIBLE. diff --git a/debian/patches-rt/rtmutex-Provide-rt_mutex_slowlock_locked.patch b/debian/patches-rt/rtmutex-Provide-rt_mutex_slowlock_locked.patch index b5e5ccf82..b9d241f7b 100644 --- a/debian/patches-rt/rtmutex-Provide-rt_mutex_slowlock_locked.patch +++ b/debian/patches-rt/rtmutex-Provide-rt_mutex_slowlock_locked.patch @@ -1,7 +1,7 @@ From: Thomas Gleixner Date: Thu, 12 Oct 2017 16:14:22 +0200 Subject: rtmutex: Provide rt_mutex_slowlock_locked() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz This is the inner-part of rt_mutex_slowlock(), required for rwsem-rt. diff --git a/debian/patches-rt/rtmutex-add-mutex-implementation-based-on-rtmutex.patch b/debian/patches-rt/rtmutex-add-mutex-implementation-based-on-rtmutex.patch index 3dcb6fbdb..64cd2db49 100644 --- a/debian/patches-rt/rtmutex-add-mutex-implementation-based-on-rtmutex.patch +++ b/debian/patches-rt/rtmutex-add-mutex-implementation-based-on-rtmutex.patch @@ -1,7 +1,7 @@ From: Thomas Gleixner Date: Thu, 12 Oct 2017 17:17:03 +0200 Subject: rtmutex: add mutex implementation based on rtmutex -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Signed-off-by: Thomas Gleixner Signed-off-by: Sebastian Andrzej Siewior diff --git a/debian/patches-rt/rtmutex-add-rwlock-implementation-based-on-rtmutex.patch b/debian/patches-rt/rtmutex-add-rwlock-implementation-based-on-rtmutex.patch index 63a13cd5c..ae5e49e51 100644 --- a/debian/patches-rt/rtmutex-add-rwlock-implementation-based-on-rtmutex.patch +++ b/debian/patches-rt/rtmutex-add-rwlock-implementation-based-on-rtmutex.patch @@ -1,7 +1,7 @@ From: Thomas Gleixner Date: Thu, 12 Oct 2017 17:18:06 +0200 Subject: rtmutex: add rwlock implementation based on rtmutex -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz The implementation is bias-based, similar to the rwsem implementation. diff --git a/debian/patches-rt/rtmutex-add-rwsem-implementation-based-on-rtmutex.patch b/debian/patches-rt/rtmutex-add-rwsem-implementation-based-on-rtmutex.patch index fcff1ce99..be4c00899 100644 --- a/debian/patches-rt/rtmutex-add-rwsem-implementation-based-on-rtmutex.patch +++ b/debian/patches-rt/rtmutex-add-rwsem-implementation-based-on-rtmutex.patch @@ -1,7 +1,7 @@ From: Thomas Gleixner Date: Thu, 12 Oct 2017 17:28:34 +0200 Subject: rtmutex: add rwsem implementation based on rtmutex -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz The RT specific R/W semaphore implementation restricts the number of readers to one because a writer cannot block on multiple readers and inherit its diff --git a/debian/patches-rt/rtmutex-add-sleeping-lock-implementation.patch b/debian/patches-rt/rtmutex-add-sleeping-lock-implementation.patch index 753147609..5206b49a8 100644 --- a/debian/patches-rt/rtmutex-add-sleeping-lock-implementation.patch +++ b/debian/patches-rt/rtmutex-add-sleeping-lock-implementation.patch @@ -1,7 +1,7 @@ From: Thomas Gleixner Date: Thu, 12 Oct 2017 17:11:19 +0200 Subject: rtmutex: add sleeping lock implementation -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Signed-off-by: Thomas Gleixner Signed-off-by: Sebastian Andrzej Siewior @@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/include/linux/kernel.h +++ b/include/linux/kernel.h -@@ -226,6 +226,9 @@ extern int _cond_resched(void); +@@ -259,6 +259,9 @@ extern int _cond_resched(void); */ # define might_sleep() \ do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0) @@ -33,7 +33,7 @@ Signed-off-by: Sebastian Andrzej Siewior # define sched_annotate_sleep() (current->task_state_change = 0) #else static inline void ___might_sleep(const char *file, int line, -@@ -233,6 +236,7 @@ extern int _cond_resched(void); +@@ -266,6 +269,7 @@ extern int _cond_resched(void); static inline void __might_sleep(const char *file, int line, int preempt_offset) { } # define might_sleep() do { might_resched(); } while (0) @@ -127,7 +127,7 @@ Signed-off-by: Sebastian Andrzej Siewior /* * set_special_state() should be used for those states when the blocking task * can not use the regular condition based wait-loop. In that case we must -@@ -908,6 +915,7 @@ struct task_struct { +@@ -914,6 +921,7 @@ struct task_struct { raw_spinlock_t pi_lock; struct wake_q_node wake_q; @@ -1130,7 +1130,7 @@ Signed-off-by: Sebastian Andrzej Siewior # include "rtmutex-debug.h" --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -407,9 +407,15 @@ static bool set_nr_if_polling(struct tas +@@ -401,9 +401,15 @@ static bool set_nr_if_polling(struct tas #endif #endif @@ -1148,7 +1148,7 @@ Signed-off-by: Sebastian Andrzej Siewior /* * Atomically grab the task, if ->wake_q is !nil already it means -@@ -431,24 +437,32 @@ void wake_q_add(struct wake_q_head *head +@@ -425,24 +431,32 @@ void wake_q_add(struct wake_q_head *head head->lastp = &node->next; } @@ -1175,8 +1175,8 @@ Signed-off-by: Sebastian Andrzej Siewior + else + task->wake_q.next = NULL; /* - * wake_up_process() implies a wmb() to pair with the queueing - * in wake_q_add() so as not to miss wakeups. + * wake_up_process() executes a full barrier, which pairs with + * the queueing in wake_q_add() so as not to miss wakeups. */ - wake_up_process(task); + if (sleeper) diff --git a/debian/patches-rt/rtmutex-add-ww_mutex-addon-for-mutex-rt.patch b/debian/patches-rt/rtmutex-add-ww_mutex-addon-for-mutex-rt.patch index 2e9679a5f..6014c25c0 100644 --- a/debian/patches-rt/rtmutex-add-ww_mutex-addon-for-mutex-rt.patch +++ b/debian/patches-rt/rtmutex-add-ww_mutex-addon-for-mutex-rt.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Thu, 12 Oct 2017 17:34:38 +0200 Subject: rtmutex: add ww_mutex addon for mutex-rt -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Signed-off-by: Sebastian Andrzej Siewior --- diff --git a/debian/patches-rt/rtmutex-annotate-sleeping-lock-context.patch b/debian/patches-rt/rtmutex-annotate-sleeping-lock-context.patch index 55783fb2e..58be81394 100644 --- a/debian/patches-rt/rtmutex-annotate-sleeping-lock-context.patch +++ b/debian/patches-rt/rtmutex-annotate-sleeping-lock-context.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Thu, 21 Sep 2017 14:25:13 +0200 Subject: [PATCH] rtmutex: annotate sleeping lock context -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz The RCU code complains on schedule() within a rcu_readlock() section. The valid scenario on -RT is if a sleeping is held. In order to suppress @@ -68,7 +68,7 @@ Signed-off-by: Sebastian Andrzej Siewior #endif #ifdef CONFIG_PREEMPT_RCU -@@ -1800,6 +1809,23 @@ static __always_inline bool need_resched +@@ -1792,6 +1801,23 @@ static __always_inline bool need_resched return unlikely(tif_need_resched()); } @@ -223,7 +223,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h -@@ -325,9 +325,13 @@ static void rcu_preempt_note_context_swi +@@ -337,9 +337,13 @@ static void rcu_preempt_note_context_swi struct task_struct *t = current; struct rcu_data *rdp; struct rcu_node *rnp; @@ -240,7 +240,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -7324,4 +7324,49 @@ void migrate_enable(void) +@@ -7302,4 +7302,49 @@ void migrate_enable(void) preempt_enable(); } EXPORT_SYMBOL(migrate_enable); diff --git a/debian/patches-rt/rtmutex-avoid-include-hell.patch b/debian/patches-rt/rtmutex-avoid-include-hell.patch index 867fb3bf4..1aaa29d6a 100644 --- a/debian/patches-rt/rtmutex-avoid-include-hell.patch +++ b/debian/patches-rt/rtmutex-avoid-include-hell.patch @@ -1,7 +1,7 @@ Subject: rtmutex: Avoid include hell From: Thomas Gleixner Date: Wed, 29 Jun 2011 20:06:39 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Include only the required raw types. This avoids pulling in the complete spinlock header which in turn requires rtmutex.h at some point. diff --git a/debian/patches-rt/rtmutex-export-lockdep-less-version-of-rt_mutex-s-lo.patch b/debian/patches-rt/rtmutex-export-lockdep-less-version-of-rt_mutex-s-lo.patch index a5897533b..9318e47c6 100644 --- a/debian/patches-rt/rtmutex-export-lockdep-less-version-of-rt_mutex-s-lo.patch +++ b/debian/patches-rt/rtmutex-export-lockdep-less-version-of-rt_mutex-s-lo.patch @@ -2,7 +2,7 @@ From: Thomas Gleixner Date: Thu, 12 Oct 2017 16:36:39 +0200 Subject: rtmutex: export lockdep-less version of rt_mutex's lock, trylock and unlock -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Required for lock implementation ontop of rtmutex. diff --git a/debian/patches-rt/rtmutex-futex-prepare-rt.patch b/debian/patches-rt/rtmutex-futex-prepare-rt.patch index 056ee7b4d..631669432 100644 --- a/debian/patches-rt/rtmutex-futex-prepare-rt.patch +++ b/debian/patches-rt/rtmutex-futex-prepare-rt.patch @@ -1,7 +1,7 @@ Subject: rtmutex: Handle the various new futex race conditions From: Thomas Gleixner Date: Fri, 10 Jun 2011 11:04:15 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz RT opens a few new interesting race conditions in the rtmutex/futex combo due to futex hash bucket lock being a 'sleeping' spinlock and diff --git a/debian/patches-rt/rtmutex-lock-killable.patch b/debian/patches-rt/rtmutex-lock-killable.patch index 8539f38b9..5317b3470 100644 --- a/debian/patches-rt/rtmutex-lock-killable.patch +++ b/debian/patches-rt/rtmutex-lock-killable.patch @@ -1,7 +1,7 @@ Subject: rtmutex: Add rtmutex_lock_killable() From: Thomas Gleixner Date: Thu, 09 Jun 2011 11:43:52 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Add "killable" type to rtmutex. We need this since rtmutex are used as "normal" mutexes which do use this type. diff --git a/debian/patches-rt/rtmutex-trylock-is-okay-on-RT.patch b/debian/patches-rt/rtmutex-trylock-is-okay-on-RT.patch index 2444a6880..7ddac8c04 100644 --- a/debian/patches-rt/rtmutex-trylock-is-okay-on-RT.patch +++ b/debian/patches-rt/rtmutex-trylock-is-okay-on-RT.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Wed 02 Dec 2015 11:34:07 +0100 Subject: rtmutex: trylock is okay on -RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz non-RT kernel could deadlock on rt_mutex_trylock() in softirq context. On -RT we don't run softirqs in IRQ context but in thread context so it is diff --git a/debian/patches-rt/rtmutex-wire-up-RT-s-locking.patch b/debian/patches-rt/rtmutex-wire-up-RT-s-locking.patch index 5fc242572..5b4adec1e 100644 --- a/debian/patches-rt/rtmutex-wire-up-RT-s-locking.patch +++ b/debian/patches-rt/rtmutex-wire-up-RT-s-locking.patch @@ -1,7 +1,7 @@ From: Thomas Gleixner Date: Thu, 12 Oct 2017 17:31:14 +0200 Subject: rtmutex: wire up RT's locking -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Signed-off-by: Thomas Gleixner Signed-off-by: Sebastian Andrzej Siewior @@ -86,7 +86,7 @@ Signed-off-by: Sebastian Andrzej Siewior */ --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h -@@ -279,7 +279,11 @@ static inline void do_raw_spin_unlock(ra +@@ -298,7 +298,11 @@ static inline void do_raw_spin_unlock(ra }) /* Include rwlock functions */ @@ -99,7 +99,7 @@ Signed-off-by: Sebastian Andrzej Siewior /* * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: -@@ -290,6 +294,10 @@ static inline void do_raw_spin_unlock(ra +@@ -309,6 +313,10 @@ static inline void do_raw_spin_unlock(ra # include #endif @@ -110,7 +110,7 @@ Signed-off-by: Sebastian Andrzej Siewior /* * Map the spin_lock functions to the raw variants for PREEMPT_RT=n */ -@@ -410,6 +418,8 @@ static __always_inline int spin_is_conte +@@ -429,6 +437,8 @@ static __always_inline int spin_is_conte #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock) diff --git a/debian/patches-rt/rtmutex_dont_include_rcu.patch b/debian/patches-rt/rtmutex_dont_include_rcu.patch index 194dd279b..764a8384f 100644 --- a/debian/patches-rt/rtmutex_dont_include_rcu.patch +++ b/debian/patches-rt/rtmutex_dont_include_rcu.patch @@ -1,6 +1,6 @@ From: Sebastian Andrzej Siewior Subject: rbtree: don't include the rcu header -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz The RCU header pulls in spinlock.h and fails due not yet defined types: @@ -102,7 +102,7 @@ Signed-off-by: Sebastian Andrzej Siewior #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) #define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b)) -@@ -373,54 +374,6 @@ static inline void rcu_preempt_sleep_che +@@ -372,54 +373,6 @@ static inline void rcu_preempt_sleep_che }) /** diff --git a/debian/patches-rt/sched-Allow-pinned-user-tasks-to-be-awakened-to-the-.patch b/debian/patches-rt/sched-Allow-pinned-user-tasks-to-be-awakened-to-the-.patch index 3f3c5c2c7..617b934c3 100644 --- a/debian/patches-rt/sched-Allow-pinned-user-tasks-to-be-awakened-to-the-.patch +++ b/debian/patches-rt/sched-Allow-pinned-user-tasks-to-be-awakened-to-the-.patch @@ -2,7 +2,7 @@ From: Mike Galbraith Date: Sun, 19 Aug 2018 08:28:35 +0200 Subject: [PATCH] sched: Allow pinned user tasks to be awakened to the CPU they pinned -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Since commit 7af443ee16976 ("sched/core: Require cpu_active() in select_task_rq(), for user tasks") select_fallback_rq() will BUG() if @@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -926,7 +926,7 @@ static inline bool is_cpu_allowed(struct +@@ -903,7 +903,7 @@ static inline bool is_cpu_allowed(struct if (!cpumask_test_cpu(cpu, p->cpus_ptr)) return false; diff --git a/debian/patches-rt/sched-core-Remove-get_cpu-from-sched_fork.patch b/debian/patches-rt/sched-core-Remove-get_cpu-from-sched_fork.patch deleted file mode 100644 index fba3d93eb..000000000 --- a/debian/patches-rt/sched-core-Remove-get_cpu-from-sched_fork.patch +++ /dev/null @@ -1,88 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Fri, 6 Jul 2018 15:06:15 +0200 -Subject: [PATCH] sched/core: Remove get_cpu() from sched_fork() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz - -[ Upstream commit af0fffd9300b97d8875aa745bc78e2f6fdb3c1f0 ] - -get_cpu() disables preemption for the entire sched_fork() function. -This get_cpu() was introduced in commit: - - dd41f596cda0 ("sched: cfs core code") - -... which also invoked sched_balance_self() and this function -required preemption do be off. - -Today, sched_balance_self() seems to be moved to ->task_fork callback -which is invoked while the ->pi_lock is held. - -set_load_weight() could invoke reweight_task() which then via $callchain -might end up in smp_processor_id() but since `update_load' is false -this won't happen. - -I didn't find any this_cpu*() or similar usage during the initialisation -of the task_struct. - -The `cpu' value (from get_cpu()) is only used later in __set_task_cpu() -while the ->pi_lock lock is held. - -Based on this it is possible to remove get_cpu() and use -smp_processor_id() for the `cpu' variable without breaking anything. - -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Peter Zijlstra (Intel) -Cc: Linus Torvalds -Cc: Peter Zijlstra -Cc: Thomas Gleixner -Link: http://lkml.kernel.org/r/20180706130615.g2ex2kmfu5kcvlq6@linutronix.de -Signed-off-by: Ingo Molnar ---- - kernel/sched/core.c | 13 ++++--------- - 1 file changed, 4 insertions(+), 9 deletions(-) - ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -2317,7 +2317,6 @@ static inline void init_schedstats(void) - int sched_fork(unsigned long clone_flags, struct task_struct *p) - { - unsigned long flags; -- int cpu = get_cpu(); - - __sched_fork(clone_flags, p); - /* -@@ -2353,14 +2352,12 @@ int sched_fork(unsigned long clone_flags - p->sched_reset_on_fork = 0; - } - -- if (dl_prio(p->prio)) { -- put_cpu(); -+ if (dl_prio(p->prio)) - return -EAGAIN; -- } else if (rt_prio(p->prio)) { -+ else if (rt_prio(p->prio)) - p->sched_class = &rt_sched_class; -- } else { -+ else - p->sched_class = &fair_sched_class; -- } - - init_entity_runnable_average(&p->se); - -@@ -2376,7 +2373,7 @@ int sched_fork(unsigned long clone_flags - * We're setting the CPU for the first time, we don't migrate, - * so use __set_task_cpu(). - */ -- __set_task_cpu(p, cpu); -+ __set_task_cpu(p, smp_processor_id()); - if (p->sched_class->task_fork) - p->sched_class->task_fork(p); - raw_spin_unlock_irqrestore(&p->pi_lock, flags); -@@ -2393,8 +2390,6 @@ int sched_fork(unsigned long clone_flags - plist_node_init(&p->pushable_tasks, MAX_PRIO); - RB_CLEAR_NODE(&p->pushable_dl_tasks); - #endif -- -- put_cpu(); - return 0; - } - diff --git a/debian/patches-rt/sched-delay-put-task.patch b/debian/patches-rt/sched-delay-put-task.patch index 0ae790527..713206afb 100644 --- a/debian/patches-rt/sched-delay-put-task.patch +++ b/debian/patches-rt/sched-delay-put-task.patch @@ -1,7 +1,7 @@ Subject: sched: Move task_struct cleanup to RCU From: Thomas Gleixner Date: Tue, 31 May 2011 16:59:16 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz __put_task_struct() does quite some expensive work. We don't want to burden random tasks with that. @@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner --- a/include/linux/sched.h +++ b/include/linux/sched.h -@@ -1173,6 +1173,9 @@ struct task_struct { +@@ -1185,6 +1185,9 @@ struct task_struct { unsigned int sequential_io; unsigned int sequential_io_avg; #endif diff --git a/debian/patches-rt/sched-disable-rt-group-sched-on-rt.patch b/debian/patches-rt/sched-disable-rt-group-sched-on-rt.patch index 8fff26331..57328dedb 100644 --- a/debian/patches-rt/sched-disable-rt-group-sched-on-rt.patch +++ b/debian/patches-rt/sched-disable-rt-group-sched-on-rt.patch @@ -1,7 +1,7 @@ Subject: sched: Disable CONFIG_RT_GROUP_SCHED on RT From: Thomas Gleixner Date: Mon, 18 Jul 2011 17:03:52 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Carsten reported problems when running: @@ -19,7 +19,7 @@ Signed-off-by: Thomas Gleixner --- a/init/Kconfig +++ b/init/Kconfig -@@ -741,6 +741,7 @@ config CFS_BANDWIDTH +@@ -776,6 +776,7 @@ config CFS_BANDWIDTH config RT_GROUP_SCHED bool "Group scheduling for SCHED_RR/FIFO" depends on CGROUP_SCHED diff --git a/debian/patches-rt/sched-disable-ttwu-queue.patch b/debian/patches-rt/sched-disable-ttwu-queue.patch index 7da8c2a4e..5d80c78a1 100644 --- a/debian/patches-rt/sched-disable-ttwu-queue.patch +++ b/debian/patches-rt/sched-disable-ttwu-queue.patch @@ -1,7 +1,7 @@ Subject: sched: Disable TTWU_QUEUE on RT From: Thomas Gleixner Date: Tue, 13 Sep 2011 16:42:35 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz The queued remote wakeup mechanism can introduce rather large latencies if the number of migrated tasks is high. Disable it for RT. diff --git a/debian/patches-rt/sched-limit-nr-migrate.patch b/debian/patches-rt/sched-limit-nr-migrate.patch index d2e53bb97..8a629bb91 100644 --- a/debian/patches-rt/sched-limit-nr-migrate.patch +++ b/debian/patches-rt/sched-limit-nr-migrate.patch @@ -1,7 +1,7 @@ Subject: sched: Limit the number of task migrations per batch From: Thomas Gleixner Date: Mon, 06 Jun 2011 12:12:51 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Put an upper limit on the number of tasks which are migrated per batch to avoid large latencies. @@ -13,15 +13,15 @@ Signed-off-by: Thomas Gleixner --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -42,7 +42,11 @@ const_debug unsigned int sysctl_sched_fe +@@ -44,7 +44,11 @@ const_debug unsigned int sysctl_sched_fe * Number of tasks to iterate in a single balance run. * Limited because this is done with IRQs disabled. */ -+#ifndef CONFIG_PREEMPT_RT_FULL - const_debug unsigned int sysctl_sched_nr_migrate = 32; -+#else ++#ifdef CONFIG_PREEMPT_RT_FULL +const_debug unsigned int sysctl_sched_nr_migrate = 8; ++#else + const_debug unsigned int sysctl_sched_nr_migrate = 32; +#endif /* - * period over which we average the RT time consumption, measured + * period over which we measure -rt task CPU usage in us. diff --git a/debian/patches-rt/sched-might-sleep-do-not-account-rcu-depth.patch b/debian/patches-rt/sched-might-sleep-do-not-account-rcu-depth.patch index 73b9ac57b..732ac164f 100644 --- a/debian/patches-rt/sched-might-sleep-do-not-account-rcu-depth.patch +++ b/debian/patches-rt/sched-might-sleep-do-not-account-rcu-depth.patch @@ -1,7 +1,7 @@ Subject: sched: Do not account rcu_preempt_depth on RT in might_sleep() From: Thomas Gleixner Date: Tue, 07 Jun 2011 09:19:06 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz RT changes the rcu_preempt_depth semantics, so we cannot check for it in might_sleep(). @@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h -@@ -74,6 +74,11 @@ void synchronize_rcu(void); +@@ -73,6 +73,11 @@ void synchronize_rcu(void); * types of kernel builds, the rcu_read_lock() nesting depth is unknowable. */ #define rcu_preempt_depth() (current->rcu_read_lock_nesting) @@ -26,7 +26,7 @@ Signed-off-by: Thomas Gleixner #else /* #ifdef CONFIG_PREEMPT_RCU */ -@@ -99,6 +104,8 @@ static inline int rcu_preempt_depth(void +@@ -98,6 +103,8 @@ static inline int rcu_preempt_depth(void return 0; } @@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner /* Internal to kernel */ --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -6181,7 +6181,7 @@ void __init sched_init(void) +@@ -6149,7 +6149,7 @@ void __init sched_init(void) #ifdef CONFIG_DEBUG_ATOMIC_SLEEP static inline int preempt_count_equals(int preempt_offset) { diff --git a/debian/patches-rt/sched-migrate_disable-Add-export_symbol_gpl-for-__mi.patch b/debian/patches-rt/sched-migrate_disable-Add-export_symbol_gpl-for-__mi.patch new file mode 100644 index 000000000..b8afa2eeb --- /dev/null +++ b/debian/patches-rt/sched-migrate_disable-Add-export_symbol_gpl-for-__mi.patch @@ -0,0 +1,32 @@ +From: Sebastian Andrzej Siewior +Date: Tue, 9 Oct 2018 17:34:50 +0200 +Subject: [PATCH] sched/migrate_disable: Add export_symbol_gpl for + __migrate_disabled +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz + +Jonathan reported that lttng/modules can't use __migrate_disabled(). +This function is only used by sched/core itself and the tracing +infrastructure to report the migrate counter (lttng does probably the +same). Since the rework migrate_disable() it moved from sched.h to +preempt.h and is became an exported function instead of a "static +inline" due to the header recursion of preempt vs sched. + +Since the compiler inlines the function for sched/core usage, add a +EXPORT_SYMBOL_GPL to allow the module/LTTNG usage. + +Reported-by: Jonathan Rajott +Signed-off-by: Sebastian Andrzej Siewior +--- + kernel/sched/core.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -1012,6 +1012,7 @@ int __migrate_disabled(struct task_struc + { + return p->migrate_disable; + } ++EXPORT_SYMBOL_GPL(__migrate_disabled); + #endif + + static void __do_set_cpus_allowed_tail(struct task_struct *p, diff --git a/debian/patches-rt/sched-migrate_disable-fallback-to-preempt_disable-in.patch b/debian/patches-rt/sched-migrate_disable-fallback-to-preempt_disable-in.patch index 0554eeee1..b5376c328 100644 --- a/debian/patches-rt/sched-migrate_disable-fallback-to-preempt_disable-in.patch +++ b/debian/patches-rt/sched-migrate_disable-fallback-to-preempt_disable-in.patch @@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior Date: Thu, 5 Jul 2018 14:44:51 +0200 Subject: [PATCH] sched/migrate_disable: fallback to preempt_disable() instead barrier() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz On SMP + !RT migrate_disable() is still around. It is not part of spin_lock() anymore so it has almost no users. However the futex code has a workaround for @@ -90,7 +90,7 @@ Signed-off-by: Sebastian Andrzej Siewior #endif --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -1053,7 +1053,7 @@ void set_cpus_allowed_common(struct task +@@ -1030,7 +1030,7 @@ void set_cpus_allowed_common(struct task p->nr_cpus_allowed = cpumask_weight(new_mask); } @@ -99,7 +99,7 @@ Signed-off-by: Sebastian Andrzej Siewior int __migrate_disabled(struct task_struct *p) { return p->migrate_disable; -@@ -1092,7 +1092,7 @@ static void __do_set_cpus_allowed_tail(s +@@ -1070,7 +1070,7 @@ static void __do_set_cpus_allowed_tail(s void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) { @@ -108,7 +108,7 @@ Signed-off-by: Sebastian Andrzej Siewior if (__migrate_disabled(p)) { lockdep_assert_held(&p->pi_lock); -@@ -1165,7 +1165,7 @@ static int __set_cpus_allowed_ptr(struct +@@ -1143,7 +1143,7 @@ static int __set_cpus_allowed_ptr(struct if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p)) goto out; @@ -117,7 +117,7 @@ Signed-off-by: Sebastian Andrzej Siewior if (__migrate_disabled(p)) { p->migrate_disable_update = 1; goto out; -@@ -7180,7 +7180,7 @@ const u32 sched_prio_to_wmult[40] = { +@@ -7158,7 +7158,7 @@ const u32 sched_prio_to_wmult[40] = { #undef CREATE_TRACE_POINTS @@ -126,7 +126,7 @@ Signed-off-by: Sebastian Andrzej Siewior static inline void update_nr_migratory(struct task_struct *p, long delta) -@@ -7328,45 +7328,44 @@ EXPORT_SYMBOL(migrate_enable); +@@ -7306,45 +7306,44 @@ EXPORT_SYMBOL(migrate_enable); #elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) void migrate_disable(void) { @@ -181,7 +181,7 @@ Signed-off-by: Sebastian Andrzej Siewior #endif --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c -@@ -979,7 +979,7 @@ void proc_sched_show_task(struct task_st +@@ -978,7 +978,7 @@ void proc_sched_show_task(struct task_st P(dl.runtime); P(dl.deadline); } diff --git a/debian/patches-rt/sched-mmdrop-delayed.patch b/debian/patches-rt/sched-mmdrop-delayed.patch index 368e95235..d4f8a04cf 100644 --- a/debian/patches-rt/sched-mmdrop-delayed.patch +++ b/debian/patches-rt/sched-mmdrop-delayed.patch @@ -1,7 +1,7 @@ Subject: sched: Move mmdrop to RCU on RT From: Thomas Gleixner Date: Mon, 06 Jun 2011 12:20:33 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Takes sleeping locks and calls into the memory allocator, so nothing we want to do in task switch and oder atomic contexts. @@ -24,15 +24,15 @@ Signed-off-by: Thomas Gleixner #include #include -@@ -483,6 +484,9 @@ struct mm_struct { - bool tlb_flush_batched; +@@ -482,6 +483,9 @@ struct mm_struct { + bool tlb_flush_batched; #endif - struct uprobes_state uprobes_state; + struct uprobes_state uprobes_state; +#ifdef CONFIG_PREEMPT_RT_BASE -+ struct rcu_head delayed_drop; ++ struct rcu_head delayed_drop; +#endif #ifdef CONFIG_HUGETLB_PAGE - atomic_long_t hugetlb_usage; + atomic_long_t hugetlb_usage; #endif --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -78,7 +78,7 @@ Signed-off-by: Thomas Gleixner struct mm_struct *mm; --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -2752,9 +2752,13 @@ static struct rq *finish_task_switch(str +@@ -2728,9 +2728,13 @@ static struct rq *finish_task_switch(str * provided by mmdrop(), * - a sync_core for SYNC_CORE. */ @@ -93,7 +93,7 @@ Signed-off-by: Thomas Gleixner } if (unlikely(prev_state == TASK_DEAD)) { if (prev->sched_class->task_dead) -@@ -5580,6 +5584,8 @@ void sched_setnuma(struct task_struct *p +@@ -5558,6 +5562,8 @@ void sched_setnuma(struct task_struct *p #endif /* CONFIG_NUMA_BALANCING */ #ifdef CONFIG_HOTPLUG_CPU @@ -102,7 +102,7 @@ Signed-off-by: Thomas Gleixner /* * Ensure that the idle task is using init_mm right before its CPU goes * offline. -@@ -5595,7 +5601,11 @@ void idle_task_exit(void) +@@ -5573,7 +5579,11 @@ void idle_task_exit(void) current->active_mm = &init_mm; finish_arch_post_lock_switch(); } @@ -115,7 +115,7 @@ Signed-off-by: Thomas Gleixner } /* -@@ -5912,6 +5922,10 @@ int sched_cpu_dying(unsigned int cpu) +@@ -5882,6 +5892,10 @@ int sched_cpu_dying(unsigned int cpu) update_max_interval(); nohz_balance_exit_idle(rq); hrtick_clear(rq); diff --git a/debian/patches-rt/sched-rt-mutex-wakeup.patch b/debian/patches-rt/sched-rt-mutex-wakeup.patch index 7f82cf9b1..17aaa6e3a 100644 --- a/debian/patches-rt/sched-rt-mutex-wakeup.patch +++ b/debian/patches-rt/sched-rt-mutex-wakeup.patch @@ -1,7 +1,7 @@ Subject: sched: Add saved_state for tasks blocked on sleeping locks From: Thomas Gleixner Date: Sat, 25 Jun 2011 09:21:04 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Spinlocks are state preserving in !RT. RT changes the state when a task gets blocked on a lock. So we need to remember the state before @@ -27,7 +27,7 @@ Signed-off-by: Thomas Gleixner /* * This begins the randomizable portion of task_struct. Only -@@ -1611,6 +1613,7 @@ extern struct task_struct *find_get_task +@@ -1603,6 +1605,7 @@ extern struct task_struct *find_get_task extern int wake_up_state(struct task_struct *tsk, unsigned int state); extern int wake_up_process(struct task_struct *tsk); @@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner #ifdef CONFIG_SMP --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -2022,8 +2022,27 @@ try_to_wake_up(struct task_struct *p, un +@@ -1998,8 +1998,27 @@ try_to_wake_up(struct task_struct *p, un */ raw_spin_lock_irqsave(&p->pi_lock, flags); smp_mb__after_spinlock(); @@ -66,7 +66,7 @@ Signed-off-by: Thomas Gleixner trace_sched_waking(p); -@@ -2187,6 +2206,18 @@ int wake_up_process(struct task_struct * +@@ -2163,6 +2182,18 @@ int wake_up_process(struct task_struct * } EXPORT_SYMBOL(wake_up_process); @@ -87,7 +87,7 @@ Signed-off-by: Thomas Gleixner return try_to_wake_up(p, state, 0); --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h -@@ -1418,6 +1418,7 @@ static inline int task_on_rq_migrating(s +@@ -1446,6 +1446,7 @@ static inline int task_on_rq_migrating(s #define WF_SYNC 0x01 /* Waker goes to sleep after wakeup */ #define WF_FORK 0x02 /* Child wakeup after fork */ #define WF_MIGRATED 0x4 /* Internal use, task got migrated */ diff --git a/debian/patches-rt/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch b/debian/patches-rt/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch index b0396b124..aefb92dbc 100644 --- a/debian/patches-rt/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch +++ b/debian/patches-rt/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch @@ -1,7 +1,7 @@ From: Steven Rostedt Date: Mon, 18 Mar 2013 15:12:49 -0400 Subject: sched/workqueue: Only wake up idle workers if not blocked on sleeping spin lock -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz In -rt, most spin_locks() turn into mutexes. One of these spin_lock conversions is performed on the workqueue gcwq->lock. When the idle @@ -24,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -3519,8 +3519,10 @@ static void __sched notrace __schedule(b +@@ -3497,8 +3497,10 @@ static void __sched notrace __schedule(b * If a worker went to sleep, notify and ask workqueue * whether it wants to wake up a task to maintain * concurrency. diff --git a/debian/patches-rt/scsi-fcoe-rt-aware.patch b/debian/patches-rt/scsi-fcoe-rt-aware.patch index 839bad417..3754769b5 100644 --- a/debian/patches-rt/scsi-fcoe-rt-aware.patch +++ b/debian/patches-rt/scsi-fcoe-rt-aware.patch @@ -1,7 +1,7 @@ Subject: scsi/fcoe: Make RT aware. From: Thomas Gleixner Date: Sat, 12 Nov 2011 14:00:48 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Do not disable preemption while taking sleeping locks. All user look safe for migrate_diable() only. diff --git a/debian/patches-rt/seqlock-prevent-rt-starvation.patch b/debian/patches-rt/seqlock-prevent-rt-starvation.patch index 855c6f803..d05a990a5 100644 --- a/debian/patches-rt/seqlock-prevent-rt-starvation.patch +++ b/debian/patches-rt/seqlock-prevent-rt-starvation.patch @@ -1,7 +1,7 @@ Subject: seqlock: Prevent rt starvation From: Thomas Gleixner Date: Wed, 22 Feb 2012 12:03:30 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz If a low prio writer gets preempted while holding the seqlock write locked, a high prio reader spins forever on RT. diff --git a/debian/patches-rt/series b/debian/patches-rt/series index 41df8ad05..e86026944 100644 --- a/debian/patches-rt/series +++ b/debian/patches-rt/series @@ -6,31 +6,6 @@ # UPSTREAM merged ############################################################ -#misc -SCSI-libsas-remove-irq-save-in-sas_ata_qc_issue.patch -SCSI-qla2xxx-remove-irq-save-in-qla2x00_poll.patch -cgroup-tracing-Move-taking-of-spin-lock-out-of-trace.patch -sched-core-Remove-get_cpu-from-sched_fork.patch -random-Remove-preempt-disabled-region.patch -iommu-amd-drop-irqs_disabled-warn_on.patch -# atomic_dec_and_lock_irqsave / refcount_dec_and_lock_irqsave -0002-drivers-md-raid5-Use-irqsave-variant-of-atomic_dec_a.patch -0003-drivers-md-raid5-Do-not-disable-irq-on-release_inact.patch -0001-bdi-use-refcount_t-for-reference-counting-instead-at.patch -0002-userns-use-refcount_t-for-reference-counting-instead.patch -0005-bdi-Use-irqsave-variant-of-refcount_dec_and_lock.patch -0006-userns-Use-irqsave-variant-of-refcount_dec_and_lock.patch -libata-remove-ata_sff_data_xfer_noirq.patch -ntfs-avoid-disabling-interrupts-during-kmap_atomic.patch -# mm/workingset -0001-mm-workingset-remove-local_irq_disable-from-count_sh.patch -0002-mm-workingset-make-shadow_lru_isolate-use-locking-su.patch -0001-mm-list_lru-use-list_lru_walk_one-in-list_lru_walk_n.patch -0002-mm-list_lru-Move-locking-from-__list_lru_walk_one-to.patch -0003-mm-list_lru-Pass-struct-list_lru_node-as-an-argument.patch -0004-mm-list_lru-Introduce-list_lru_shrink_walk_irq.patch -irqchip-gic-v3-its-Make-its_lock-a-raw_spin_lock_t.patch - ############################################################ # POSTED by others ############################################################ @@ -45,6 +20,7 @@ irqchip-gic-v3-its-Make-its_lock-a-raw_spin_lock_t.patch 0006-ARM-configs-at91-unselect-PIT.patch irqchip-gic-v3-its-Move-pending-table-allocation-to-.patch +kthread-convert-worker-lock-to-raw-spinlock.patch ############################################################ # POSTED @@ -56,7 +32,7 @@ arm-unwind-use_raw_lock.patch cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch fscache-initialize-cookie-hash-table-raw-spinlocks.patch Drivers-hv-vmbus-include-header-for-get_irq_regs.patch -rcu-Use-cpus_read_lock-while-looking-at-cpu_online_m.patch +percpu-include-irqflags.h-for-raw_local_irq_save.patch ############################################################ # Ready for posting @@ -73,6 +49,8 @@ efi-Allow-efi-runtime.patch x86-efi-drop-task_lock-from-efi_switch_mm.patch arm64-KVM-compute_layout-before-altenates-are-applie.patch of-allocate-free-phandle-cache-outside-of-the-devtre.patch +mm-kasan-make-quarantine_lock-a-raw_spinlock_t.patch +EXP-rcu-Revert-expedited-GP-parallelization-cleverne.patch ############################################################### # Stuff broken upstream and upstream wants something different @@ -80,6 +58,7 @@ of-allocate-free-phandle-cache-outside-of-the-devtre.patch NFSv4-replace-seqcount_t-with-a-seqlock_t.patch kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch add_migrate_disable.patch +sched-migrate_disable-Add-export_symbol_gpl-for-__mi.patch # Those two should vanish soon (not use PIT during bootup) at91_dont_enable_disable_clock.patch @@ -283,6 +262,7 @@ rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch # DRIVERS SERIAL drivers-tty-fix-omap-lock-crap.patch drivers-tty-pl011-irq-disable-madness.patch +tty-serial-pl011-warning-about-uninitialized.patch rt-serial-warn-fix.patch tty-serial-8250-don-t-take-the-trylock-during-oops.patch @@ -349,9 +329,6 @@ printk-rt-aware.patch kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch HACK-printk-drop-the-logbuf_lock-more-often.patch -# POWERC -powerpc-ps3-device-init.c-adapt-to-completions-using.patch - # ARM ARM-enable-irq-in-translation-section-permission-fau.patch genirq-update-irq_set_irqchip_state-documentation.patch @@ -416,6 +393,7 @@ drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch drivers-zram-fix-zcomp_stream_get-smp_processor_id-u.patch tpm_tis-fix-stall-after-iowrite-s.patch +watchdog-prevent-deferral-of-watchdogd-wakeup-on-RT.patch # I915 drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch diff --git a/debian/patches-rt/signal-revert-ptrace-preempt-magic.patch b/debian/patches-rt/signal-revert-ptrace-preempt-magic.patch index ca74e90c0..333ec9f08 100644 --- a/debian/patches-rt/signal-revert-ptrace-preempt-magic.patch +++ b/debian/patches-rt/signal-revert-ptrace-preempt-magic.patch @@ -1,7 +1,7 @@ Subject: signal: Revert ptrace preempt magic From: Thomas Gleixner Date: Wed, 21 Sep 2011 19:57:12 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Upstream commit '53da1d9456fe7f8 fix ptrace slowness' is nothing more than a bandaid around the ptrace design trainwreck. It's not a @@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner --- a/kernel/signal.c +++ b/kernel/signal.c -@@ -2007,15 +2007,7 @@ static void ptrace_stop(int exit_code, i +@@ -2052,15 +2052,7 @@ static void ptrace_stop(int exit_code, i if (gstop_done && ptrace_reparented(current)) do_notify_parent_cldstop(current, false, why); diff --git a/debian/patches-rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch b/debian/patches-rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch index 9285a4d98..c167e6ef3 100644 --- a/debian/patches-rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch +++ b/debian/patches-rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch @@ -1,7 +1,7 @@ From: Thomas Gleixner Date: Fri, 3 Jul 2009 08:44:56 -0500 Subject: signals: Allow rt tasks to cache one sigqueue struct -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz To avoid allocation allow rt tasks to cache one sigqueue struct in task struct. @@ -18,7 +18,7 @@ Signed-off-by: Thomas Gleixner --- a/include/linux/sched.h +++ b/include/linux/sched.h -@@ -889,6 +889,8 @@ struct task_struct { +@@ -895,6 +895,8 @@ struct task_struct { /* Signal handlers: */ struct signal_struct *signal; struct sighand_struct *sighand; @@ -39,7 +39,7 @@ Signed-off-by: Thomas Gleixner static inline int valid_signal(unsigned long sig) --- a/kernel/exit.c +++ b/kernel/exit.c -@@ -159,7 +159,7 @@ static void __exit_signal(struct task_st +@@ -160,7 +160,7 @@ static void __exit_signal(struct task_st * Do this under ->siglock, we can race with another thread * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. */ @@ -50,7 +50,7 @@ Signed-off-by: Thomas Gleixner --- a/kernel/fork.c +++ b/kernel/fork.c -@@ -1759,6 +1759,7 @@ static __latent_entropy struct task_stru +@@ -1802,6 +1802,7 @@ static __latent_entropy struct task_stru spin_lock_init(&p->alloc_lock); init_sigpending(&p->pending); @@ -68,8 +68,8 @@ Signed-off-by: Thomas Gleixner #include #include #include -@@ -362,13 +363,30 @@ static bool task_participate_group_stop( - return false; +@@ -388,13 +389,30 @@ void task_join_group_stop(struct task_st + } } +static inline struct sigqueue *get_task_cache(struct task_struct *t) @@ -100,7 +100,7 @@ Signed-off-by: Thomas Gleixner { struct sigqueue *q = NULL; struct user_struct *user; -@@ -385,7 +403,10 @@ static struct sigqueue * +@@ -411,7 +429,10 @@ static struct sigqueue * if (override_rlimit || atomic_read(&user->sigpending) <= task_rlimit(t, RLIMIT_SIGPENDING)) { @@ -112,7 +112,7 @@ Signed-off-by: Thomas Gleixner } else { print_dropped_signal(sig); } -@@ -402,6 +423,13 @@ static struct sigqueue * +@@ -428,6 +449,13 @@ static struct sigqueue * return q; } @@ -126,7 +126,7 @@ Signed-off-by: Thomas Gleixner static void __sigqueue_free(struct sigqueue *q) { if (q->flags & SIGQUEUE_PREALLOC) -@@ -411,6 +439,21 @@ static void __sigqueue_free(struct sigqu +@@ -437,6 +465,21 @@ static void __sigqueue_free(struct sigqu kmem_cache_free(sigqueue_cachep, q); } @@ -148,7 +148,7 @@ Signed-off-by: Thomas Gleixner void flush_sigqueue(struct sigpending *queue) { struct sigqueue *q; -@@ -424,6 +467,21 @@ void flush_sigqueue(struct sigpending *q +@@ -450,6 +493,21 @@ void flush_sigqueue(struct sigpending *q } /* @@ -170,7 +170,7 @@ Signed-off-by: Thomas Gleixner * Flush all pending signals for this kthread. */ void flush_signals(struct task_struct *t) -@@ -544,7 +602,7 @@ static void collect_signal(int sig, stru +@@ -572,7 +630,7 @@ static void collect_signal(int sig, stru (info->si_code == SI_TIMER) && (info->si_sys_private); @@ -179,7 +179,7 @@ Signed-off-by: Thomas Gleixner } else { /* * Ok, it wasn't in the queue. This must be -@@ -581,6 +639,8 @@ int dequeue_signal(struct task_struct *t +@@ -609,6 +667,8 @@ int dequeue_signal(struct task_struct *t bool resched_timer = false; int signr; @@ -188,7 +188,7 @@ Signed-off-by: Thomas Gleixner /* We only dequeue private signals from ourselves, we don't let * signalfd steal them */ -@@ -1664,7 +1724,8 @@ EXPORT_SYMBOL(kill_pid); +@@ -1705,7 +1765,8 @@ EXPORT_SYMBOL(kill_pid); */ struct sigqueue *sigqueue_alloc(void) { diff --git a/debian/patches-rt/skbufhead-raw-lock.patch b/debian/patches-rt/skbufhead-raw-lock.patch index 54991d06c..1bdd58a8e 100644 --- a/debian/patches-rt/skbufhead-raw-lock.patch +++ b/debian/patches-rt/skbufhead-raw-lock.patch @@ -1,7 +1,7 @@ From: Thomas Gleixner Date: Tue, 12 Jul 2011 15:38:34 +0200 Subject: net: Use skbufhead with raw lock -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Use the rps lock as rawlock so we can keep irq-off regions. It looks low latency. However we can't kfree() from this context therefore we defer this @@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h -@@ -2916,6 +2916,7 @@ struct softnet_data { +@@ -2967,6 +2967,7 @@ struct softnet_data { unsigned int dropped; struct sk_buff_head input_pkt_queue; struct napi_struct backlog; @@ -34,7 +34,7 @@ Signed-off-by: Thomas Gleixner }; struct sk_buff; -@@ -1666,6 +1667,12 @@ static inline void skb_queue_head_init(s +@@ -1675,6 +1676,12 @@ static inline void skb_queue_head_init(s __skb_queue_head_init(list); } @@ -49,7 +49,7 @@ Signed-off-by: Thomas Gleixner { --- a/net/core/dev.c +++ b/net/core/dev.c -@@ -219,14 +219,14 @@ static inline struct hlist_head *dev_ind +@@ -217,14 +217,14 @@ static inline struct hlist_head *dev_ind static inline void rps_lock(struct softnet_data *sd) { #ifdef CONFIG_RPS @@ -66,7 +66,7 @@ Signed-off-by: Thomas Gleixner #endif } -@@ -4809,7 +4809,7 @@ static void flush_backlog(struct work_st +@@ -5239,7 +5239,7 @@ static void flush_backlog(struct work_st skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { if (skb->dev->reg_state == NETREG_UNREGISTERING) { __skb_unlink(skb, &sd->input_pkt_queue); @@ -75,7 +75,7 @@ Signed-off-by: Thomas Gleixner input_queue_head_incr(sd); } } -@@ -4819,11 +4819,14 @@ static void flush_backlog(struct work_st +@@ -5249,11 +5249,14 @@ static void flush_backlog(struct work_st skb_queue_walk_safe(&sd->process_queue, skb, tmp) { if (skb->dev->reg_state == NETREG_UNREGISTERING) { __skb_unlink(skb, &sd->process_queue); @@ -91,7 +91,7 @@ Signed-off-by: Thomas Gleixner } static void flush_all_backlogs(void) -@@ -5371,7 +5374,9 @@ static int process_backlog(struct napi_s +@@ -5827,7 +5830,9 @@ static int process_backlog(struct napi_s while (again) { struct sk_buff *skb; @@ -101,7 +101,7 @@ Signed-off-by: Thomas Gleixner rcu_read_lock(); __netif_receive_skb(skb); rcu_read_unlock(); -@@ -5379,9 +5384,9 @@ static int process_backlog(struct napi_s +@@ -5835,9 +5840,9 @@ static int process_backlog(struct napi_s if (++work >= quota) return work; @@ -112,7 +112,7 @@ Signed-off-by: Thomas Gleixner rps_lock(sd); if (skb_queue_empty(&sd->input_pkt_queue)) { /* -@@ -5821,13 +5826,21 @@ static __latent_entropy void net_rx_acti +@@ -6299,13 +6304,21 @@ static __latent_entropy void net_rx_acti unsigned long time_limit = jiffies + usecs_to_jiffies(netdev_budget_usecs); int budget = netdev_budget; @@ -134,7 +134,7 @@ Signed-off-by: Thomas Gleixner for (;;) { struct napi_struct *n; -@@ -8790,10 +8803,13 @@ static int dev_cpu_dead(unsigned int old +@@ -9291,10 +9304,13 @@ static int dev_cpu_dead(unsigned int old netif_rx_ni(skb); input_queue_head_incr(oldsd); } @@ -149,7 +149,7 @@ Signed-off-by: Thomas Gleixner return 0; } -@@ -9099,8 +9115,9 @@ static int __init net_dev_init(void) +@@ -9603,8 +9619,9 @@ static int __init net_dev_init(void) INIT_WORK(flush, flush_backlog); diff --git a/debian/patches-rt/slub-disable-SLUB_CPU_PARTIAL.patch b/debian/patches-rt/slub-disable-SLUB_CPU_PARTIAL.patch index 4a96191ae..ea7cb7d8b 100644 --- a/debian/patches-rt/slub-disable-SLUB_CPU_PARTIAL.patch +++ b/debian/patches-rt/slub-disable-SLUB_CPU_PARTIAL.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Wed, 15 Apr 2015 19:00:47 +0200 Subject: slub: Disable SLUB_CPU_PARTIAL -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz |BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:915 |in_atomic(): 1, irqs_disabled(): 0, pid: 87, name: rcuop/7 @@ -37,7 +37,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/init/Kconfig +++ b/init/Kconfig -@@ -1655,7 +1655,7 @@ config SLAB_FREELIST_HARDENED +@@ -1692,7 +1692,7 @@ config SLAB_FREELIST_HARDENED config SLUB_CPU_PARTIAL default y diff --git a/debian/patches-rt/slub-enable-irqs-for-no-wait.patch b/debian/patches-rt/slub-enable-irqs-for-no-wait.patch index 22ac8836e..f158dec87 100644 --- a/debian/patches-rt/slub-enable-irqs-for-no-wait.patch +++ b/debian/patches-rt/slub-enable-irqs-for-no-wait.patch @@ -1,7 +1,7 @@ Subject: slub: Enable irqs for __GFP_WAIT From: Thomas Gleixner Date: Wed, 09 Jan 2013 12:08:15 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz SYSTEM_RUNNING might be too late for enabling interrupts. Allocations with GFP_WAIT can happen before that. So use this as an indicator. @@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner --- a/mm/slub.c +++ b/mm/slub.c -@@ -1572,10 +1572,17 @@ static struct page *allocate_slab(struct +@@ -1570,10 +1570,17 @@ static struct page *allocate_slab(struct void *start, *p; int idx, order; bool shuffle; @@ -31,7 +31,7 @@ Signed-off-by: Thomas Gleixner local_irq_enable(); flags |= s->allocflags; -@@ -1634,7 +1641,7 @@ static struct page *allocate_slab(struct +@@ -1632,7 +1639,7 @@ static struct page *allocate_slab(struct page->frozen = 1; out: diff --git a/debian/patches-rt/softirq-disable-softirq-stacks-for-rt.patch b/debian/patches-rt/softirq-disable-softirq-stacks-for-rt.patch index 2e79144f4..4327a6864 100644 --- a/debian/patches-rt/softirq-disable-softirq-stacks-for-rt.patch +++ b/debian/patches-rt/softirq-disable-softirq-stacks-for-rt.patch @@ -1,7 +1,7 @@ Subject: softirq: Disable softirq stacks for RT From: Thomas Gleixner Date: Mon, 18 Jul 2011 13:59:17 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Disable extra stacks for softirqs. We want to preempt softirqs and having them on special IRQ-stack does not make this easier. @@ -20,7 +20,7 @@ Signed-off-by: Thomas Gleixner --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c -@@ -745,6 +745,7 @@ void irq_ctx_init(void) +@@ -766,6 +766,7 @@ void irq_ctx_init(void) } } @@ -28,7 +28,7 @@ Signed-off-by: Thomas Gleixner void do_softirq_own_stack(void) { struct thread_info *curtp, *irqtp; -@@ -762,6 +763,7 @@ void do_softirq_own_stack(void) +@@ -783,6 +784,7 @@ void do_softirq_own_stack(void) if (irqtp->flags) set_bits(irqtp->flags, &curtp->flags); } @@ -38,7 +38,7 @@ Signed-off-by: Thomas Gleixner { --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S -@@ -41,6 +41,7 @@ +@@ -42,6 +42,7 @@ * We store the saved ksp_limit in the unused part * of the STACK_FRAME_OVERHEAD */ @@ -46,7 +46,7 @@ Signed-off-by: Thomas Gleixner _GLOBAL(call_do_softirq) mflr r0 stw r0,4(r1) -@@ -57,6 +58,7 @@ +@@ -58,6 +59,7 @@ stw r10,THREAD+KSP_LIMIT(r2) mtlr r0 blr @@ -56,7 +56,7 @@ Signed-off-by: Thomas Gleixner * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp); --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S -@@ -31,6 +31,7 @@ +@@ -32,6 +32,7 @@ .text @@ -64,7 +64,7 @@ Signed-off-by: Thomas Gleixner _GLOBAL(call_do_softirq) mflr r0 std r0,16(r1) -@@ -41,6 +42,7 @@ +@@ -42,6 +43,7 @@ ld r0,16(r1) mtlr r0 blr @@ -110,7 +110,7 @@ Signed-off-by: Thomas Gleixner void fixup_irqs(void) --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S -@@ -1038,6 +1038,7 @@ EXPORT_SYMBOL(native_load_gs_index) +@@ -1039,6 +1039,7 @@ EXPORT_SYMBOL(native_load_gs_index) jmp 2b .previous @@ -118,7 +118,7 @@ Signed-off-by: Thomas Gleixner /* Call softirq on interrupt stack. Interrupts are off. */ ENTRY(do_softirq_own_stack) pushq %rbp -@@ -1048,6 +1049,7 @@ ENTRY(do_softirq_own_stack) +@@ -1049,6 +1050,7 @@ ENTRY(do_softirq_own_stack) leaveq ret ENDPROC(do_softirq_own_stack) diff --git a/debian/patches-rt/softirq-preempt-fix-3-re.patch b/debian/patches-rt/softirq-preempt-fix-3-re.patch index 389850f4f..ccf81862c 100644 --- a/debian/patches-rt/softirq-preempt-fix-3-re.patch +++ b/debian/patches-rt/softirq-preempt-fix-3-re.patch @@ -1,7 +1,7 @@ Subject: softirq: Check preemption after reenabling interrupts From: Thomas Gleixner Date: Sun, 13 Nov 2011 17:17:09 +0100 (CET) -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz raise_softirq_irqoff() disables interrupts and wakes the softirq daemon, but after reenabling interrupts there is no preemption check, @@ -112,7 +112,7 @@ Signed-off-by: Thomas Gleixner } --- a/net/core/dev.c +++ b/net/core/dev.c -@@ -2505,6 +2505,7 @@ static void __netif_reschedule(struct Qd +@@ -2707,6 +2707,7 @@ static void __netif_reschedule(struct Qd sd->output_queue_tailp = &q->next_sched; raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_restore(flags); @@ -120,7 +120,7 @@ Signed-off-by: Thomas Gleixner } void __netif_schedule(struct Qdisc *q) -@@ -2567,6 +2568,7 @@ void __dev_kfree_skb_irq(struct sk_buff +@@ -2769,6 +2770,7 @@ void __dev_kfree_skb_irq(struct sk_buff __this_cpu_write(softnet_data.completion_queue, skb); raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_restore(flags); @@ -128,7 +128,7 @@ Signed-off-by: Thomas Gleixner } EXPORT_SYMBOL(__dev_kfree_skb_irq); -@@ -3986,6 +3988,7 @@ static int enqueue_to_backlog(struct sk_ +@@ -4241,6 +4243,7 @@ static int enqueue_to_backlog(struct sk_ rps_unlock(sd); local_irq_restore(flags); @@ -136,7 +136,7 @@ Signed-off-by: Thomas Gleixner atomic_long_inc(&skb->dev->rx_dropped); kfree_skb(skb); -@@ -5333,12 +5336,14 @@ static void net_rps_action_and_irq_enabl +@@ -5789,12 +5792,14 @@ static void net_rps_action_and_irq_enabl sd->rps_ipi_list = NULL; local_irq_enable(); @@ -151,7 +151,7 @@ Signed-off-by: Thomas Gleixner } static bool sd_has_rps_ipi_waiting(struct softnet_data *sd) -@@ -5416,6 +5421,7 @@ void __napi_schedule(struct napi_struct +@@ -5872,6 +5877,7 @@ void __napi_schedule(struct napi_struct local_irq_save(flags); ____napi_schedule(this_cpu_ptr(&softnet_data), n); local_irq_restore(flags); @@ -159,7 +159,7 @@ Signed-off-by: Thomas Gleixner } EXPORT_SYMBOL(__napi_schedule); -@@ -8772,6 +8778,7 @@ static int dev_cpu_dead(unsigned int old +@@ -9273,6 +9279,7 @@ static int dev_cpu_dead(unsigned int old raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_enable(); diff --git a/debian/patches-rt/softirq-split-locks.patch b/debian/patches-rt/softirq-split-locks.patch index 3bc79ab2c..45434b38f 100644 --- a/debian/patches-rt/softirq-split-locks.patch +++ b/debian/patches-rt/softirq-split-locks.patch @@ -1,7 +1,7 @@ From: Thomas Gleixner Date: Thu, 04 Oct 2012 14:20:47 +0100 Subject: softirq: Split softirq locks -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz The 3.x RT series removed the split softirq implementation in favour of pushing softirq processing into the context of the thread which @@ -172,7 +172,7 @@ Signed-off-by: Thomas Gleixner (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET))) --- a/include/linux/sched.h +++ b/include/linux/sched.h -@@ -1177,6 +1177,8 @@ struct task_struct { +@@ -1189,6 +1189,8 @@ struct task_struct { #endif #ifdef CONFIG_PREEMPT_RT_BASE struct rcu_head put_rcu; @@ -181,7 +181,7 @@ Signed-off-by: Thomas Gleixner #endif #ifdef CONFIG_DEBUG_ATOMIC_SLEEP unsigned long task_state_change; -@@ -1394,6 +1396,7 @@ extern struct pid *cad_pid; +@@ -1386,6 +1388,7 @@ extern struct pid *cad_pid; /* * Per process flags */ diff --git a/debian/patches-rt/softirq-split-timer-softirqs-out-of-ksoftirqd.patch b/debian/patches-rt/softirq-split-timer-softirqs-out-of-ksoftirqd.patch index c623ba452..6de3e645e 100644 --- a/debian/patches-rt/softirq-split-timer-softirqs-out-of-ksoftirqd.patch +++ b/debian/patches-rt/softirq-split-timer-softirqs-out-of-ksoftirqd.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Wed, 20 Jan 2016 16:34:17 +0100 Subject: softirq: split timer softirqs out of ksoftirqd -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz The softirqd runs in -RT with SCHED_FIFO (prio 1) and deals mostly with timer wakeup which can not happen in hardirq context. The prio has been diff --git a/debian/patches-rt/spinlock-types-separate-raw.patch b/debian/patches-rt/spinlock-types-separate-raw.patch index 0fc4424cc..f3bf5cde6 100644 --- a/debian/patches-rt/spinlock-types-separate-raw.patch +++ b/debian/patches-rt/spinlock-types-separate-raw.patch @@ -1,7 +1,7 @@ Subject: spinlock: Split the lock types header From: Thomas Gleixner Date: Wed, 29 Jun 2011 19:34:01 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Split raw_spinlock into its own file and the remaining spinlock_t into its own non-RT header. The non-RT header will be replaced later by sleeping diff --git a/debian/patches-rt/squashfs-make-use-of-local-lock-in-multi_cpu-decompr.patch b/debian/patches-rt/squashfs-make-use-of-local-lock-in-multi_cpu-decompr.patch index e0c01e18c..ac0088a79 100644 --- a/debian/patches-rt/squashfs-make-use-of-local-lock-in-multi_cpu-decompr.patch +++ b/debian/patches-rt/squashfs-make-use-of-local-lock-in-multi_cpu-decompr.patch @@ -2,7 +2,7 @@ From: Julia Cartwright Date: Mon, 7 May 2018 08:58:57 -0500 Subject: [PATCH] squashfs: make use of local lock in multi_cpu decompressor -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Currently, the squashfs multi_cpu decompressor makes use of get_cpu_ptr()/put_cpu_ptr(), which unconditionally disable preemption diff --git a/debian/patches-rt/srcu-replace-local_irqsave-with-a-locallock.patch b/debian/patches-rt/srcu-replace-local_irqsave-with-a-locallock.patch index 4e3519209..c0cec203a 100644 --- a/debian/patches-rt/srcu-replace-local_irqsave-with-a-locallock.patch +++ b/debian/patches-rt/srcu-replace-local_irqsave-with-a-locallock.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Thu, 12 Oct 2017 18:37:12 +0200 Subject: [PATCH] srcu: replace local_irqsave() with a locallock -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz There are two instances which disable interrupts in order to become a stable this_cpu_ptr() pointer. The restore part is coupled with @@ -16,7 +16,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/kernel/rcu/srcutree.c +++ b/kernel/rcu/srcutree.c -@@ -37,6 +37,7 @@ +@@ -39,6 +39,7 @@ #include #include #include @@ -24,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior #include "rcu.h" #include "rcu_segcblist.h" -@@ -752,6 +753,8 @@ static void srcu_flip(struct srcu_struct +@@ -758,6 +759,8 @@ static void srcu_flip(struct srcu_struct * negligible when amoritized over that time period, and the extra latency * of a needlessly non-expedited grace period is similarly negligible. */ @@ -33,7 +33,7 @@ Signed-off-by: Sebastian Andrzej Siewior static bool srcu_might_be_idle(struct srcu_struct *sp) { unsigned long curseq; -@@ -760,13 +763,13 @@ static bool srcu_might_be_idle(struct sr +@@ -766,13 +769,13 @@ static bool srcu_might_be_idle(struct sr unsigned long t; /* If the local srcu_data structure has callbacks, not idle. */ @@ -50,7 +50,7 @@ Signed-off-by: Sebastian Andrzej Siewior /* * No local callbacks, so probabalistically probe global state. -@@ -844,7 +847,7 @@ void __call_srcu(struct srcu_struct *sp, +@@ -850,7 +853,7 @@ void __call_srcu(struct srcu_struct *sp, return; } rhp->func = func; @@ -59,7 +59,7 @@ Signed-off-by: Sebastian Andrzej Siewior sdp = this_cpu_ptr(sp->sda); spin_lock_rcu_node(sdp); rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false); -@@ -860,7 +863,8 @@ void __call_srcu(struct srcu_struct *sp, +@@ -866,7 +869,8 @@ void __call_srcu(struct srcu_struct *sp, sdp->srcu_gp_seq_needed_exp = s; needexp = true; } diff --git a/debian/patches-rt/srcu-use-cpu_online-instead-custom-check.patch b/debian/patches-rt/srcu-use-cpu_online-instead-custom-check.patch index aaf78b324..6240e8da3 100644 --- a/debian/patches-rt/srcu-use-cpu_online-instead-custom-check.patch +++ b/debian/patches-rt/srcu-use-cpu_online-instead-custom-check.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Wed, 13 Sep 2017 14:43:41 +0200 Subject: [PATCH] srcu: use cpu_online() instead custom check -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz The current check via srcu_online is slightly racy because after looking at srcu_online there could be an interrupt that interrupted us long @@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/kernel/rcu/srcutree.c +++ b/kernel/rcu/srcutree.c -@@ -36,6 +36,7 @@ +@@ -38,6 +38,7 @@ #include #include #include @@ -31,7 +31,7 @@ Signed-off-by: Sebastian Andrzej Siewior #include "rcu.h" #include "rcu_segcblist.h" -@@ -456,21 +457,6 @@ static void srcu_gp_start(struct srcu_st +@@ -459,21 +460,6 @@ static void srcu_gp_start(struct srcu_st } /* @@ -53,7 +53,7 @@ Signed-off-by: Sebastian Andrzej Siewior * Place the workqueue handler on the specified CPU if online, otherwise * just run it whereever. This is useful for placing workqueue handlers * that are to invoke the specified CPU's callbacks. -@@ -481,12 +467,12 @@ static bool srcu_queue_delayed_work_on(i +@@ -484,12 +470,12 @@ static bool srcu_queue_delayed_work_on(i { bool ret; @@ -71,7 +71,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c -@@ -3732,8 +3732,6 @@ int rcutree_online_cpu(unsigned int cpu) +@@ -3767,8 +3767,6 @@ int rcutree_online_cpu(unsigned int cpu) rnp->ffmask |= rdp->grpmask; raw_spin_unlock_irqrestore_rcu_node(rnp, flags); } @@ -80,7 +80,7 @@ Signed-off-by: Sebastian Andrzej Siewior if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) return 0; /* Too early in boot for scheduler work. */ sync_sched_exp_online_cleanup(cpu); -@@ -3761,8 +3759,6 @@ int rcutree_offline_cpu(unsigned int cpu +@@ -3796,8 +3794,6 @@ int rcutree_offline_cpu(unsigned int cpu } rcutree_affinity_setting(cpu, cpu); diff --git a/debian/patches-rt/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch b/debian/patches-rt/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch index f071bf123..80d75ab6e 100644 --- a/debian/patches-rt/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch +++ b/debian/patches-rt/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch @@ -1,7 +1,7 @@ From: Mike Galbraith Date: Wed, 18 Feb 2015 16:05:28 +0100 Subject: sunrpc: Make svc_xprt_do_enqueue() use get_cpu_light() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz |BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:915 |in_atomic(): 1, irqs_disabled(): 0, pid: 3194, name: rpc.nfsd diff --git a/debian/patches-rt/sysfs-realtime-entry.patch b/debian/patches-rt/sysfs-realtime-entry.patch index 12dac1a9d..44ffb84f3 100644 --- a/debian/patches-rt/sysfs-realtime-entry.patch +++ b/debian/patches-rt/sysfs-realtime-entry.patch @@ -1,7 +1,7 @@ Subject: sysfs: Add /sys/kernel/realtime entry From: Clark Williams Date: Sat Jul 30 21:55:53 2011 -0500 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Add a /sys/kernel entry to indicate that the kernel is a realtime kernel. diff --git a/debian/patches-rt/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch b/debian/patches-rt/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch index 83687c284..8db338a93 100644 --- a/debian/patches-rt/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch +++ b/debian/patches-rt/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch @@ -1,7 +1,7 @@ Subject: tasklet: Prevent tasklets from going into infinite spin in RT From: Ingo Molnar Date: Tue Nov 29 20:18:22 2011 -0500 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz When CONFIG_PREEMPT_RT_FULL is enabled, tasklets run as threads, and spinlocks turn are mutexes. But this can cause issues with diff --git a/debian/patches-rt/thermal-Defer-thermal-wakups-to-threads.patch b/debian/patches-rt/thermal-Defer-thermal-wakups-to-threads.patch index a36a5a057..6c2673446 100644 --- a/debian/patches-rt/thermal-Defer-thermal-wakups-to-threads.patch +++ b/debian/patches-rt/thermal-Defer-thermal-wakups-to-threads.patch @@ -1,7 +1,7 @@ From: Daniel Wagner Date: Tue, 17 Feb 2015 09:37:44 +0100 Subject: thermal: Defer thermal wakups to threads -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz On RT the spin lock in pkg_temp_thermal_platfrom_thermal_notify will call schedule while we run in irq context. diff --git a/debian/patches-rt/time-hrtimer-avoid-schedule_work-with-interrupts-dis.patch b/debian/patches-rt/time-hrtimer-avoid-schedule_work-with-interrupts-dis.patch index 1c29d4328..c445ca97c 100644 --- a/debian/patches-rt/time-hrtimer-avoid-schedule_work-with-interrupts-dis.patch +++ b/debian/patches-rt/time-hrtimer-avoid-schedule_work-with-interrupts-dis.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Wed, 15 Nov 2017 17:29:51 +0100 Subject: [PATCH] time/hrtimer: avoid schedule_work() with interrupts disabled -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz The NOHZ code tries to schedule a workqueue with interrupts disabled. Since this does not work -RT I am switching it to swork instead. diff --git a/debian/patches-rt/timekeeping-split-jiffies-lock.patch b/debian/patches-rt/timekeeping-split-jiffies-lock.patch index b03927926..6719801c3 100644 --- a/debian/patches-rt/timekeeping-split-jiffies-lock.patch +++ b/debian/patches-rt/timekeeping-split-jiffies-lock.patch @@ -1,7 +1,7 @@ Subject: timekeeping: Split jiffies seqlock From: Thomas Gleixner Date: Thu, 14 Feb 2013 22:36:59 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Replace jiffies_lock seqlock with a simple seqcounter and a rawlock so it can be taken in atomic context on RT. @@ -130,7 +130,7 @@ Signed-off-by: Thomas Gleixner --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c -@@ -2362,8 +2362,10 @@ EXPORT_SYMBOL(hardpps); +@@ -2415,8 +2415,10 @@ EXPORT_SYMBOL(hardpps); */ void xtime_update(unsigned long ticks) { diff --git a/debian/patches-rt/timers-prepare-for-full-preemption.patch b/debian/patches-rt/timers-prepare-for-full-preemption.patch index 4e4acc3f7..083e39848 100644 --- a/debian/patches-rt/timers-prepare-for-full-preemption.patch +++ b/debian/patches-rt/timers-prepare-for-full-preemption.patch @@ -1,7 +1,7 @@ From: Ingo Molnar Date: Fri, 3 Jul 2009 08:29:34 -0500 Subject: timers: Prepare for full preemption -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz When softirqs can be preempted we need to make sure that cancelling the timer from the active thread can not deadlock vs. a running timer @@ -29,7 +29,7 @@ Signed-off-by: Thomas Gleixner # define del_timer_sync(t) del_timer(t) --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -503,11 +503,14 @@ void resched_cpu(int cpu) +@@ -497,11 +497,14 @@ void resched_cpu(int cpu) */ int get_nohz_timer_target(void) { @@ -46,7 +46,7 @@ Signed-off-by: Thomas Gleixner rcu_read_lock(); for_each_domain(cpu, sd) { -@@ -526,6 +529,8 @@ int get_nohz_timer_target(void) +@@ -520,6 +523,8 @@ int get_nohz_timer_target(void) cpu = housekeeping_any_cpu(HK_FLAG_TIMER); unlock: rcu_read_unlock(); @@ -92,8 +92,8 @@ Signed-off-by: Thomas Gleixner + return; + + base = get_timer_base(tf); -+ swait_event(base->wait_for_running_timer, -+ base->running_timer != timer); ++ swait_event_exclusive(base->wait_for_running_timer, ++ base->running_timer != timer); +} + +# define wakeup_timer_waiters(b) swake_up_all(&(b)->wait_for_running_timer) @@ -145,7 +145,7 @@ Signed-off-by: Thomas Gleixner raw_spin_lock_irq(&base->lock); } } -@@ -1665,8 +1699,8 @@ static inline void __run_timers(struct t +@@ -1681,8 +1715,8 @@ static inline void __run_timers(struct t while (levels--) expire_timers(base, heads + levels); } @@ -155,7 +155,7 @@ Signed-off-by: Thomas Gleixner } /* -@@ -1924,6 +1958,9 @@ static void __init init_timer_cpu(int cp +@@ -1927,6 +1961,9 @@ static void __init init_timer_cpu(int cp base->cpu = cpu; raw_spin_lock_init(&base->lock); base->clk = jiffies; diff --git a/debian/patches-rt/tpm_tis-fix-stall-after-iowrite-s.patch b/debian/patches-rt/tpm_tis-fix-stall-after-iowrite-s.patch index 2044208db..5c5dcdccb 100644 --- a/debian/patches-rt/tpm_tis-fix-stall-after-iowrite-s.patch +++ b/debian/patches-rt/tpm_tis-fix-stall-after-iowrite-s.patch @@ -1,7 +1,7 @@ From: Haris Okanovic Date: Tue, 15 Aug 2017 15:13:08 -0500 Subject: [PATCH] tpm_tis: fix stall after iowrite*()s -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz ioread8() operations to TPM MMIO addresses can stall the cpu when immediately following a sequence of iowrite*()'s to the same region. diff --git a/debian/patches-rt/tty-serial-8250-don-t-take-the-trylock-during-oops.patch b/debian/patches-rt/tty-serial-8250-don-t-take-the-trylock-during-oops.patch index 86c5aa614..b446a373d 100644 --- a/debian/patches-rt/tty-serial-8250-don-t-take-the-trylock-during-oops.patch +++ b/debian/patches-rt/tty-serial-8250-don-t-take-the-trylock-during-oops.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Mon, 11 Apr 2016 16:55:02 +0200 Subject: [PATCH] tty: serial: 8250: don't take the trylock during oops -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz An oops with irqs off (panic() from irqsafe hrtimer like the watchdog timer) will lead to a lockdep warning on each invocation and as such @@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c -@@ -3221,10 +3221,8 @@ void serial8250_console_write(struct uar +@@ -3239,10 +3239,8 @@ void serial8250_console_write(struct uar serial8250_rpm_get(up); diff --git a/debian/patches-rt/tty-serial-pl011-warning-about-uninitialized.patch b/debian/patches-rt/tty-serial-pl011-warning-about-uninitialized.patch new file mode 100644 index 000000000..0e15df39a --- /dev/null +++ b/debian/patches-rt/tty-serial-pl011-warning-about-uninitialized.patch @@ -0,0 +1,38 @@ +From: Kurt Kanzenbach +Date: Mon, 24 Sep 2018 10:29:01 +0200 +Subject: [PATCH] tty: serial: pl011: explicitly initialize the flags variable +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz + +Silence the following gcc warning: + +drivers/tty/serial/amba-pl011.c: In function ‘pl011_console_write’: +./include/linux/spinlock.h:260:3: warning: ‘flags’ may be used uninitialized in this function [-Wmaybe-uninitialized] + _raw_spin_unlock_irqrestore(lock, flags); \ + ^~~~~~~~~~~~~~~~~~~~~~~~~~~ +drivers/tty/serial/amba-pl011.c:2214:16: note: ‘flags’ was declared here + unsigned long flags; + ^~~~~ + +The code is correct. Thus, initializing flags to zero doesn't change the +behavior and resolves the warning. + +Signed-off-by: Kurt Kanzenbach +Signed-off-by: Sebastian Andrzej Siewior +--- + drivers/tty/serial/amba-pl011.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/tty/serial/amba-pl011.c ++++ b/drivers/tty/serial/amba-pl011.c +@@ -2211,7 +2211,7 @@ pl011_console_write(struct console *co, + { + struct uart_amba_port *uap = amba_ports[co->index]; + unsigned int old_cr = 0, new_cr; +- unsigned long flags; ++ unsigned long flags = 0; + int locked = 1; + + clk_enable(uap->clk); diff --git a/debian/patches-rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch b/debian/patches-rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch index e9a251bbc..6e4e3b4fe 100644 --- a/debian/patches-rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch +++ b/debian/patches-rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch @@ -1,7 +1,7 @@ Subject: net: Remove preemption disabling in netif_rx() From: Priyanka Jain Date: Thu, 17 May 2012 09:35:11 +0530 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz 1)enqueue_to_backlog() (called from netif_rx) should be bind to a particluar CPU. This can be achieved by @@ -38,7 +38,7 @@ Signed-off-by: Thomas Gleixner --- a/net/core/dev.c +++ b/net/core/dev.c -@@ -4210,7 +4210,7 @@ static int netif_rx_internal(struct sk_b +@@ -4465,7 +4465,7 @@ static int netif_rx_internal(struct sk_b struct rps_dev_flow voidflow, *rflow = &voidflow; int cpu; @@ -47,7 +47,7 @@ Signed-off-by: Thomas Gleixner rcu_read_lock(); cpu = get_rps_cpu(skb->dev, skb, &rflow); -@@ -4220,14 +4220,14 @@ static int netif_rx_internal(struct sk_b +@@ -4475,14 +4475,14 @@ static int netif_rx_internal(struct sk_b ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); rcu_read_unlock(); diff --git a/debian/patches-rt/usb-do-not-disable-interrupts-in-giveback.patch b/debian/patches-rt/usb-do-not-disable-interrupts-in-giveback.patch index 592898613..70c9a0e04 100644 --- a/debian/patches-rt/usb-do-not-disable-interrupts-in-giveback.patch +++ b/debian/patches-rt/usb-do-not-disable-interrupts-in-giveback.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Fri, 8 Nov 2013 17:34:54 +0100 Subject: usb: do no disable interrupts in giveback -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Since commit 94dfd7ed ("USB: HCD: support giveback of URB in tasklet context") the USB code disables interrupts before invoking the complete diff --git a/debian/patches-rt/wait.h-include-atomic.h.patch b/debian/patches-rt/wait.h-include-atomic.h.patch index 695b6ba36..55cb392ca 100644 --- a/debian/patches-rt/wait.h-include-atomic.h.patch +++ b/debian/patches-rt/wait.h-include-atomic.h.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Mon, 28 Oct 2013 12:19:57 +0100 Subject: wait.h: include atomic.h -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz | CC init/main.o |In file included from include/linux/mmzone.h:9:0, diff --git a/debian/patches-rt/watchdog-prevent-deferral-of-watchdogd-wakeup-on-RT.patch b/debian/patches-rt/watchdog-prevent-deferral-of-watchdogd-wakeup-on-RT.patch new file mode 100644 index 000000000..b3f61f5f9 --- /dev/null +++ b/debian/patches-rt/watchdog-prevent-deferral-of-watchdogd-wakeup-on-RT.patch @@ -0,0 +1,75 @@ +From: Julia Cartwright +Date: Fri, 28 Sep 2018 21:03:51 +0000 +Subject: [PATCH] watchdog: prevent deferral of watchdogd wakeup on RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz + +When PREEMPT_RT_FULL is enabled, all hrtimer expiry functions are +deferred for execution into the context of ktimersoftd unless otherwise +annotated. + +Deferring the expiry of the hrtimer used by the watchdog core, however, +is a waste, as the callback does nothing but queue a kthread work item +and wakeup watchdogd. + +It's worst then that, too: the deferral through ktimersoftd also means +that for correct behavior a user must adjust the scheduling parameters +of both watchdogd _and_ ktimersoftd, which is unnecessary and has other +side effects (like causing unrelated expiry functions to execute at +potentially elevated priority). + +Instead, mark the hrtimer used by the watchdog core as being _HARD to +allow it's execution directly from hardirq context. The work done in +this expiry function is well-bounded and minimal. + +A user still must adjust the scheduling parameters of the watchdogd +to be correct w.r.t. their application needs. + +Cc: Guenter Roeck +Reported-and-tested-by: Steffen Trumtrar +Reported-by: Tim Sander +Signed-off-by: Julia Cartwright +Acked-by: Guenter Roeck +[bigeasy: use only HRTIMER_MODE_REL_HARD] +Signed-off-by: Sebastian Andrzej Siewior +--- + drivers/watchdog/watchdog_dev.c | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +--- a/drivers/watchdog/watchdog_dev.c ++++ b/drivers/watchdog/watchdog_dev.c +@@ -147,7 +147,7 @@ static inline void watchdog_update_worke + ktime_t t = watchdog_next_keepalive(wdd); + + if (t > 0) +- hrtimer_start(&wd_data->timer, t, HRTIMER_MODE_REL); ++ hrtimer_start(&wd_data->timer, t, HRTIMER_MODE_REL_HARD); + } else { + hrtimer_cancel(&wd_data->timer); + } +@@ -166,7 +166,7 @@ static int __watchdog_ping(struct watchd + if (ktime_after(earliest_keepalive, now)) { + hrtimer_start(&wd_data->timer, + ktime_sub(earliest_keepalive, now), +- HRTIMER_MODE_REL); ++ HRTIMER_MODE_REL_HARD); + return 0; + } + +@@ -945,7 +945,7 @@ static int watchdog_cdev_register(struct + return -ENODEV; + + kthread_init_work(&wd_data->work, watchdog_ping_work); +- hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); ++ hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); + wd_data->timer.function = watchdog_timer_expired; + + if (wdd->id == 0) { +@@ -992,7 +992,7 @@ static int watchdog_cdev_register(struct + __module_get(wdd->ops->owner); + kref_get(&wd_data->kref); + if (handle_boot_enabled) +- hrtimer_start(&wd_data->timer, 0, HRTIMER_MODE_REL); ++ hrtimer_start(&wd_data->timer, 0, HRTIMER_MODE_REL_HARD); + else + pr_info("watchdog%d running and kernel based pre-userspace handler disabled\n", + wdd->id); diff --git a/debian/patches-rt/work-queue-work-around-irqsafe-timer-optimization.patch b/debian/patches-rt/work-queue-work-around-irqsafe-timer-optimization.patch index 7b11a595c..025a3281f 100644 --- a/debian/patches-rt/work-queue-work-around-irqsafe-timer-optimization.patch +++ b/debian/patches-rt/work-queue-work-around-irqsafe-timer-optimization.patch @@ -1,7 +1,7 @@ From: Thomas Gleixner Date: Mon, 01 Jul 2013 11:02:42 +0200 Subject: workqueue: Prevent workqueue versus ata-piix livelock -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz An Intel i7 system regularly detected rcu_preempt stalls after the kernel was upgraded from 3.6-rt to 3.8-rt. When the stall happened, disk I/O was no diff --git a/debian/patches-rt/work-simple-Simple-work-queue-implemenation.patch b/debian/patches-rt/work-simple-Simple-work-queue-implemenation.patch index 0cfde959d..470df6ec9 100644 --- a/debian/patches-rt/work-simple-Simple-work-queue-implemenation.patch +++ b/debian/patches-rt/work-simple-Simple-work-queue-implemenation.patch @@ -1,7 +1,7 @@ From: Daniel Wagner Date: Fri, 11 Jul 2014 15:26:11 +0200 Subject: work-simple: Simple work queue implemenation -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Provides a framework for enqueuing callbacks from irq context PREEMPT_RT_FULL safe. The callbacks are executed in kthread context. @@ -52,7 +52,7 @@ Signed-off-by: Daniel Wagner -obj-y += wait.o wait_bit.o swait.o completion.o +obj-y += wait.o wait_bit.o swait.o swork.o completion.o - obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o + obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o pelt.o obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o --- /dev/null +++ b/kernel/sched/swork.c @@ -105,8 +105,8 @@ Signed-off-by: Daniel Wagner + struct sworker *worker = arg; + + for (;;) { -+ swait_event_interruptible(worker->wq, -+ swork_readable(worker)); ++ swait_event_interruptible_exclusive(worker->wq, ++ swork_readable(worker)); + if (kthread_should_stop()) + break; + @@ -176,7 +176,7 @@ Signed-off-by: Daniel Wagner + list_add_tail(&sev->item, &glob_worker->events); + raw_spin_unlock_irqrestore(&glob_worker->lock, flags); + -+ swake_up(&glob_worker->wq); ++ swake_up_one(&glob_worker->wq); + return true; +} +EXPORT_SYMBOL_GPL(swork_queue); diff --git a/debian/patches-rt/work-simple-drop-a-shit-statement-in-SWORK_EVENT_PEN.patch b/debian/patches-rt/work-simple-drop-a-shit-statement-in-SWORK_EVENT_PEN.patch index 5fef466a0..f3e9b219a 100644 --- a/debian/patches-rt/work-simple-drop-a-shit-statement-in-SWORK_EVENT_PEN.patch +++ b/debian/patches-rt/work-simple-drop-a-shit-statement-in-SWORK_EVENT_PEN.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Mon, 10 Sep 2018 18:00:31 +0200 Subject: [PATCH] work-simple: drop a shit statement in SWORK_EVENT_PENDING -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Dan Carpenter reported | smatch warnings: diff --git a/debian/patches-rt/workqueue-distangle-from-rq-lock.patch b/debian/patches-rt/workqueue-distangle-from-rq-lock.patch index 160598852..01f0c74aa 100644 --- a/debian/patches-rt/workqueue-distangle-from-rq-lock.patch +++ b/debian/patches-rt/workqueue-distangle-from-rq-lock.patch @@ -22,7 +22,7 @@ Cc: Jens Axboe Cc: Linus Torvalds Link: http://lkml.kernel.org/r/20110622174919.135236139@linutronix.de Signed-off-by: Thomas Gleixner -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz [bigeasy: preempt_disable() around wq_worker_sleeping() by Daniel Bristot de Oliveira] @@ -35,7 +35,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -1722,10 +1722,6 @@ static inline void ttwu_activate(struct +@@ -1703,10 +1703,6 @@ static inline void ttwu_activate(struct { activate_task(rq, p, en_flags); p->on_rq = TASK_ON_RQ_QUEUED; @@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior } /* -@@ -2166,56 +2162,6 @@ try_to_wake_up(struct task_struct *p, un +@@ -2143,56 +2139,6 @@ try_to_wake_up(struct task_struct *p, un } /** @@ -103,7 +103,7 @@ Signed-off-by: Sebastian Andrzej Siewior * wake_up_process - Wake up a specific process * @p: The process to be woken up. * -@@ -3541,21 +3487,6 @@ static void __sched notrace __schedule(b +@@ -3519,21 +3465,6 @@ static void __sched notrace __schedule(b atomic_inc(&rq->nr_iowait); delayacct_blkio_start(); } @@ -125,7 +125,7 @@ Signed-off-by: Sebastian Andrzej Siewior } switch_count = &prev->nvcsw; } -@@ -3615,6 +3546,20 @@ static inline void sched_submit_work(str +@@ -3593,6 +3524,20 @@ static inline void sched_submit_work(str { if (!tsk->state || tsk_is_pi_blocked(tsk)) return; @@ -146,7 +146,7 @@ Signed-off-by: Sebastian Andrzej Siewior /* * If we are going to sleep and we have plugged IO queued, * make sure to submit it to avoid deadlocks. -@@ -3623,6 +3568,12 @@ static inline void sched_submit_work(str +@@ -3601,6 +3546,12 @@ static inline void sched_submit_work(str blk_schedule_flush_plug(tsk); } @@ -159,7 +159,7 @@ Signed-off-by: Sebastian Andrzej Siewior asmlinkage __visible void __sched schedule(void) { struct task_struct *tsk = current; -@@ -3633,6 +3584,7 @@ asmlinkage __visible void __sched schedu +@@ -3611,6 +3562,7 @@ asmlinkage __visible void __sched schedu __schedule(false); sched_preempt_enable_no_resched(); } while (need_resched()); diff --git a/debian/patches-rt/workqueue-prevent-deadlock-stall.patch b/debian/patches-rt/workqueue-prevent-deadlock-stall.patch index 9b209d6c2..836c5996e 100644 --- a/debian/patches-rt/workqueue-prevent-deadlock-stall.patch +++ b/debian/patches-rt/workqueue-prevent-deadlock-stall.patch @@ -1,7 +1,7 @@ Subject: workqueue: Prevent deadlock/stall on RT From: Thomas Gleixner Date: Fri, 27 Jun 2014 16:24:52 +0200 (CEST) -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Austin reported a XFS deadlock/stall on RT where scheduled work gets never exececuted and tasks are waiting for each other for ever. @@ -44,7 +44,7 @@ Cc: Steven Rostedt --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -3590,9 +3590,8 @@ void __noreturn do_task_dead(void) +@@ -3568,9 +3568,8 @@ void __noreturn do_task_dead(void) static inline void sched_submit_work(struct task_struct *tsk) { @@ -55,7 +55,7 @@ Cc: Steven Rostedt /* * If a worker went to sleep, notify and ask workqueue whether * it wants to wake up a task to maintain concurrency. -@@ -3606,6 +3605,9 @@ static inline void sched_submit_work(str +@@ -3584,6 +3583,9 @@ static inline void sched_submit_work(str preempt_enable_no_resched(); } diff --git a/debian/patches-rt/workqueue-use-locallock.patch b/debian/patches-rt/workqueue-use-locallock.patch index 0f4806a28..b09a75ae3 100644 --- a/debian/patches-rt/workqueue-use-locallock.patch +++ b/debian/patches-rt/workqueue-use-locallock.patch @@ -1,7 +1,7 @@ Subject: workqueue: Use local irq lock instead of irq disable regions From: Thomas Gleixner Date: Sun, 17 Jul 2011 21:42:26 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Use a local_irq_lock as a replacement for irq off regions. We keep the semantic of irq-off in regard to the pool->lock and remain preemptible. @@ -146,7 +146,7 @@ Signed-off-by: Thomas Gleixner } /** -@@ -2984,7 +2999,7 @@ static bool __cancel_work_timer(struct w +@@ -2999,7 +3014,7 @@ static bool __cancel_work_timer(struct w /* tell other tasks trying to grab @work to back off */ mark_work_canceling(work); @@ -155,7 +155,7 @@ Signed-off-by: Thomas Gleixner /* * This allows canceling during early boot. We know that @work -@@ -3045,10 +3060,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync); +@@ -3060,10 +3075,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync); */ bool flush_delayed_work(struct delayed_work *dwork) { @@ -168,7 +168,7 @@ Signed-off-by: Thomas Gleixner return flush_work(&dwork->work); } EXPORT_SYMBOL(flush_delayed_work); -@@ -3086,7 +3101,7 @@ static bool __cancel_work(struct work_st +@@ -3101,7 +3116,7 @@ static bool __cancel_work(struct work_st return false; set_work_pool_and_clear_pending(work, get_work_pool_id(work)); diff --git a/debian/patches-rt/workqueue-use-rcu.patch b/debian/patches-rt/workqueue-use-rcu.patch index 4441f0443..15d726d46 100644 --- a/debian/patches-rt/workqueue-use-rcu.patch +++ b/debian/patches-rt/workqueue-use-rcu.patch @@ -1,7 +1,7 @@ Subject: workqueue: Use normal rcu From: Thomas Gleixner Date: Wed, 24 Jul 2013 15:26:54 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz There is no need for sched_rcu. The undocumented reason why sched_rcu is used is to avoid a few explicit rcu_read_lock()/unlock() pairs by @@ -177,7 +177,7 @@ Signed-off-by: Thomas Gleixner } /** -@@ -2851,14 +2855,14 @@ static bool start_flush_work(struct work +@@ -2855,14 +2859,14 @@ static bool start_flush_work(struct work might_sleep(); @@ -195,7 +195,7 @@ Signed-off-by: Thomas Gleixner /* see the comment in try_to_grab_pending() with the same code */ pwq = get_work_pwq(work); if (pwq) { -@@ -2889,10 +2893,11 @@ static bool start_flush_work(struct work +@@ -2894,10 +2898,11 @@ static bool start_flush_work(struct work lock_map_acquire(&pwq->wq->lockdep_map); lock_map_release(&pwq->wq->lockdep_map); } @@ -208,7 +208,7 @@ Signed-off-by: Thomas Gleixner return false; } -@@ -3326,7 +3331,7 @@ static void rcu_free_pool(struct rcu_hea +@@ -3341,7 +3346,7 @@ static void rcu_free_pool(struct rcu_hea * put_unbound_pool - put a worker_pool * @pool: worker_pool to put * @@ -217,7 +217,7 @@ Signed-off-by: Thomas Gleixner * safe manner. get_unbound_pool() calls this function on its failure path * and this function should be able to release pools which went through, * successfully or not, init_worker_pool(). -@@ -3380,8 +3385,8 @@ static void put_unbound_pool(struct work +@@ -3395,8 +3400,8 @@ static void put_unbound_pool(struct work del_timer_sync(&pool->idle_timer); del_timer_sync(&pool->mayday_timer); @@ -228,7 +228,7 @@ Signed-off-by: Thomas Gleixner } /** -@@ -3488,14 +3493,14 @@ static void pwq_unbound_release_workfn(s +@@ -3503,14 +3508,14 @@ static void pwq_unbound_release_workfn(s put_unbound_pool(pool); mutex_unlock(&wq_pool_mutex); @@ -245,7 +245,7 @@ Signed-off-by: Thomas Gleixner } /** -@@ -4180,7 +4185,7 @@ void destroy_workqueue(struct workqueue_ +@@ -4195,7 +4200,7 @@ void destroy_workqueue(struct workqueue_ * The base ref is never dropped on per-cpu pwqs. Directly * schedule RCU free. */ @@ -254,7 +254,7 @@ Signed-off-by: Thomas Gleixner } else { /* * We're the sole accessor of @wq at this point. Directly -@@ -4290,7 +4295,8 @@ bool workqueue_congested(int cpu, struct +@@ -4305,7 +4310,8 @@ bool workqueue_congested(int cpu, struct struct pool_workqueue *pwq; bool ret; @@ -264,7 +264,7 @@ Signed-off-by: Thomas Gleixner if (cpu == WORK_CPU_UNBOUND) cpu = smp_processor_id(); -@@ -4301,7 +4307,8 @@ bool workqueue_congested(int cpu, struct +@@ -4316,7 +4322,8 @@ bool workqueue_congested(int cpu, struct pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); ret = !list_empty(&pwq->delayed_works); @@ -274,7 +274,7 @@ Signed-off-by: Thomas Gleixner return ret; } -@@ -4327,15 +4334,15 @@ unsigned int work_busy(struct work_struc +@@ -4342,15 +4349,15 @@ unsigned int work_busy(struct work_struc if (work_pending(work)) ret |= WORK_BUSY_PENDING; @@ -294,7 +294,7 @@ Signed-off-by: Thomas Gleixner return ret; } -@@ -4519,7 +4526,7 @@ void show_workqueue_state(void) +@@ -4534,7 +4541,7 @@ void show_workqueue_state(void) unsigned long flags; int pi; @@ -303,7 +303,7 @@ Signed-off-by: Thomas Gleixner pr_info("Showing busy workqueues and worker pools:\n"); -@@ -4584,7 +4591,7 @@ void show_workqueue_state(void) +@@ -4599,7 +4606,7 @@ void show_workqueue_state(void) touch_nmi_watchdog(); } @@ -312,7 +312,7 @@ Signed-off-by: Thomas Gleixner } /* used to show worker information through /proc/PID/{comm,stat,status} */ -@@ -4971,16 +4978,16 @@ bool freeze_workqueues_busy(void) +@@ -4986,16 +4993,16 @@ bool freeze_workqueues_busy(void) * nr_active is monotonically decreasing. It's safe * to peek without lock. */ @@ -332,7 +332,7 @@ Signed-off-by: Thomas Gleixner } out_unlock: mutex_unlock(&wq_pool_mutex); -@@ -5175,7 +5182,8 @@ static ssize_t wq_pool_ids_show(struct d +@@ -5190,7 +5197,8 @@ static ssize_t wq_pool_ids_show(struct d const char *delim = ""; int node, written = 0; @@ -342,7 +342,7 @@ Signed-off-by: Thomas Gleixner for_each_node(node) { written += scnprintf(buf + written, PAGE_SIZE - written, "%s%d:%d", delim, node, -@@ -5183,7 +5191,8 @@ static ssize_t wq_pool_ids_show(struct d +@@ -5198,7 +5206,8 @@ static ssize_t wq_pool_ids_show(struct d delim = " "; } written += scnprintf(buf + written, PAGE_SIZE - written, "\n"); diff --git a/debian/patches-rt/x86-crypto-reduce-preempt-disabled-regions.patch b/debian/patches-rt/x86-crypto-reduce-preempt-disabled-regions.patch index a1ca156c1..e5e2dec9f 100644 --- a/debian/patches-rt/x86-crypto-reduce-preempt-disabled-regions.patch +++ b/debian/patches-rt/x86-crypto-reduce-preempt-disabled-regions.patch @@ -1,7 +1,7 @@ Subject: x86: crypto: Reduce preempt disabled regions From: Peter Zijlstra Date: Mon, 14 Nov 2011 18:19:27 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Restrict the preempt disabled regions to the actual floating point operations and enable preemption for the administrative actions. diff --git a/debian/patches-rt/x86-efi-drop-task_lock-from-efi_switch_mm.patch b/debian/patches-rt/x86-efi-drop-task_lock-from-efi_switch_mm.patch index c40150476..c79386792 100644 --- a/debian/patches-rt/x86-efi-drop-task_lock-from-efi_switch_mm.patch +++ b/debian/patches-rt/x86-efi-drop-task_lock-from-efi_switch_mm.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Tue, 24 Jul 2018 14:48:55 +0200 Subject: [PATCH] x86/efi: drop task_lock() from efi_switch_mm() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz efi_switch_mm() is a wrapper around switch_mm() which saves current's ->active_mm, sets the requests mm as ->active_mm and invokes diff --git a/debian/patches-rt/x86-highmem-add-a-already-used-pte-check.patch b/debian/patches-rt/x86-highmem-add-a-already-used-pte-check.patch index 6eb5221e2..fe0d35938 100644 --- a/debian/patches-rt/x86-highmem-add-a-already-used-pte-check.patch +++ b/debian/patches-rt/x86-highmem-add-a-already-used-pte-check.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior Date: Mon, 11 Mar 2013 17:09:55 +0100 Subject: x86/highmem: Add a "already used pte" check -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz This is a copy from kmap_atomic_prot(). diff --git a/debian/patches-rt/x86-ioapic-Don-t-let-setaffinity-unmask-threaded-EOI.patch b/debian/patches-rt/x86-ioapic-Don-t-let-setaffinity-unmask-threaded-EOI.patch index 920336f1a..e3e0b9b11 100644 --- a/debian/patches-rt/x86-ioapic-Don-t-let-setaffinity-unmask-threaded-EOI.patch +++ b/debian/patches-rt/x86-ioapic-Don-t-let-setaffinity-unmask-threaded-EOI.patch @@ -2,7 +2,7 @@ From: Thomas Gleixner Date: Tue, 17 Jul 2018 18:25:31 +0200 Subject: [PATCH] x86/ioapic: Don't let setaffinity unmask threaded EOI interrupt too early -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz There is an issue with threaded interrupts which are marked ONESHOT and using the fasteoi handler. diff --git a/debian/patches-rt/x86-kvm-require-const-tsc-for-rt.patch b/debian/patches-rt/x86-kvm-require-const-tsc-for-rt.patch index 0b4a2d772..9011690b3 100644 --- a/debian/patches-rt/x86-kvm-require-const-tsc-for-rt.patch +++ b/debian/patches-rt/x86-kvm-require-const-tsc-for-rt.patch @@ -1,7 +1,7 @@ Subject: x86: kvm Require const tsc for RT From: Thomas Gleixner Date: Sun, 06 Nov 2011 12:26:18 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Non constant TSC is a nightmare on bare metal already, but with virtualization it becomes a complete disaster because the workarounds @@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c -@@ -6597,6 +6597,13 @@ int kvm_arch_init(void *opaque) +@@ -6690,6 +6690,13 @@ int kvm_arch_init(void *opaque) goto out; } diff --git a/debian/patches-rt/x86-preempt-lazy.patch b/debian/patches-rt/x86-preempt-lazy.patch index b5aaf316c..73baf2dd6 100644 --- a/debian/patches-rt/x86-preempt-lazy.patch +++ b/debian/patches-rt/x86-preempt-lazy.patch @@ -1,7 +1,7 @@ Subject: x86: Support for lazy preemption From: Thomas Gleixner Date: Thu, 01 Nov 2012 11:03:47 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Implement the x86 pieces for lazy preempt. @@ -18,12 +18,12 @@ Signed-off-by: Thomas Gleixner --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig -@@ -178,6 +178,7 @@ config X86 +@@ -180,6 +180,7 @@ config X86 select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP + select HAVE_PREEMPT_LAZY - select HAVE_RCU_TABLE_FREE + select HAVE_RCU_TABLE_FREE if PARAVIRT select HAVE_RCU_TABLE_INVALIDATE if HAVE_RCU_TABLE_FREE select HAVE_REGS_AND_STACK_ACCESS_API --- a/arch/x86/entry/common.c @@ -48,16 +48,16 @@ Signed-off-by: Thomas Gleixner #ifdef ARCH_RT_DELAYS_SIGNAL_SEND --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S -@@ -350,8 +350,25 @@ END(ret_from_exception) +@@ -764,8 +764,25 @@ END(ret_from_exception) ENTRY(resume_kernel) DISABLE_INTERRUPTS(CLBR_ANY) .Lneed_resched: + # preempt count == 0 + NEED_RS set? cmpl $0, PER_CPU_VAR(__preempt_count) +#ifndef CONFIG_PREEMPT_LAZY - jnz restore_all + jnz restore_all_kernel +#else -+ jz test_int_off ++ jz test_int_off + + # atleast preempt count == 0 ? + cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count) @@ -69,15 +69,15 @@ Signed-off-by: Thomas Gleixner + + testl $_TIF_NEED_RESCHED_LAZY, TASK_TI_flags(%ebp) + jz restore_all -+test_int_off: ++ test_int_off: +#endif testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ? - jz restore_all + jz restore_all_kernel call preempt_schedule_irq --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S -@@ -704,7 +704,23 @@ GLOBAL(swapgs_restore_regs_and_return_to - bt $9, EFLAGS(%rsp) /* were interrupts off? */ +@@ -705,7 +705,23 @@ GLOBAL(swapgs_restore_regs_and_return_to + btl $9, EFLAGS(%rsp) /* were interrupts off? */ jnc 1f 0: cmpl $0, PER_CPU_VAR(__preempt_count) +#ifndef CONFIG_PREEMPT_LAZY @@ -93,7 +93,7 @@ Signed-off-by: Thomas Gleixner + cmpl $0, TASK_TI_preempt_lazy_count(%rcx) + jnz 1f + -+ bt $TIF_NEED_RESCHED_LAZY,TASK_TI_flags(%rcx) ++ btl $TIF_NEED_RESCHED_LAZY,TASK_TI_flags(%rcx) + jnc 1f +do_preempt_schedule_irq: +#endif diff --git a/debian/patches-rt/x86-signal-delay-calling-signals-on-32bit.patch b/debian/patches-rt/x86-signal-delay-calling-signals-on-32bit.patch index 5e2e9e2ad..79bf50197 100644 --- a/debian/patches-rt/x86-signal-delay-calling-signals-on-32bit.patch +++ b/debian/patches-rt/x86-signal-delay-calling-signals-on-32bit.patch @@ -1,7 +1,7 @@ From: Yang Shi Date: Thu, 10 Dec 2015 10:58:51 -0800 Subject: x86/signal: delay calling signals on 32bit -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz When running some ptrace single step tests on x86-32 machine, the below problem is triggered: diff --git a/debian/patches-rt/x86-stackprot-no-random-on-rt.patch b/debian/patches-rt/x86-stackprot-no-random-on-rt.patch index 64ab72689..50644cd3b 100644 --- a/debian/patches-rt/x86-stackprot-no-random-on-rt.patch +++ b/debian/patches-rt/x86-stackprot-no-random-on-rt.patch @@ -1,7 +1,7 @@ From: Thomas Gleixner Date: Thu, 16 Dec 2010 14:25:18 +0100 Subject: x86: stackprotector: Avoid random pool on rt -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz CPU bringup calls into the random pool to initialize the stack canary. During boot that works nicely even on RT as the might sleep diff --git a/debian/patches-rt/x86-use-gen-rwsem-spinlocks-rt.patch b/debian/patches-rt/x86-use-gen-rwsem-spinlocks-rt.patch index 516aa5ce3..5ca41fcc6 100644 --- a/debian/patches-rt/x86-use-gen-rwsem-spinlocks-rt.patch +++ b/debian/patches-rt/x86-use-gen-rwsem-spinlocks-rt.patch @@ -1,7 +1,7 @@ From: Thomas Gleixner Date: Sun, 26 Jul 2009 02:21:32 +0200 Subject: x86: Use generic rwsem_spinlocks on -rt -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz Simplifies the separation of anon_rw_semaphores and rw_semaphores for -rt. @@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig -@@ -262,8 +262,11 @@ config ARCH_MAY_HAVE_PC_FDC +@@ -264,8 +264,11 @@ config ARCH_MAY_HAVE_PC_FDC def_bool y depends on ISA_DMA_API