linux/debian/patches-rt/0023-EXP-rcu-Revert-expedit...

55 lines
2.2 KiB
Diff
Raw Normal View History

2020-10-12 12:52:06 +00:00
From 826426c0daabf1e601bbe81b30be496da4248e89 Mon Sep 17 00:00:00 2001
Message-Id: <826426c0daabf1e601bbe81b30be496da4248e89.1601675151.git.zanussi@kernel.org>
In-Reply-To: <5b5a156f9808b1acf1205606e03da117214549ea.1601675151.git.zanussi@kernel.org>
References: <5b5a156f9808b1acf1205606e03da117214549ea.1601675151.git.zanussi@kernel.org>
2019-04-08 23:49:20 +00:00
From: "Paul E. McKenney" <paulmck@linux.ibm.com>
2018-10-30 12:40:05 +00:00
Date: Mon, 29 Oct 2018 11:53:01 +0100
2020-09-04 20:10:21 +00:00
Subject: [PATCH 023/333] EXP rcu: Revert expedited GP parallelization
2019-04-08 23:49:20 +00:00
cleverness
2020-10-12 12:52:06 +00:00
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.148-rt64.tar.xz
2018-10-30 12:40:05 +00:00
(Commit 258ba8e089db23f760139266c232f01bad73f85c from linux-rcu)
This commit reverts a series of commits starting with fcc635436501 ("rcu:
Make expedited GPs handle CPU 0 being offline") and its successors, thus
queueing each rcu_node structure's expedited grace-period initialization
work on the first CPU of that rcu_node structure.
Suggested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
2019-04-08 23:49:20 +00:00
kernel/rcu/tree_exp.h | 9 +--------
2018-10-30 12:40:05 +00:00
1 file changed, 1 insertion(+), 8 deletions(-)
2019-04-08 23:49:20 +00:00
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index 0b2c2ad69629..a0486414edb4 100644
2018-10-30 12:40:05 +00:00
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
2019-04-08 23:49:20 +00:00
@@ -472,7 +472,6 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
2018-10-30 12:40:05 +00:00
static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
smp_call_func_t func)
{
- int cpu;
struct rcu_node *rnp;
trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("reset"));
2019-04-08 23:49:20 +00:00
@@ -494,13 +493,7 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
2018-10-30 12:40:05 +00:00
continue;
}
INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
- preempt_disable();
- cpu = cpumask_next(rnp->grplo - 1, cpu_online_mask);
- /* If all offline, queue the work on an unbound CPU. */
- if (unlikely(cpu > rnp->grphi))
- cpu = WORK_CPU_UNBOUND;
- queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
- preempt_enable();
+ queue_work_on(rnp->grplo, rcu_par_gp_wq, &rnp->rew.rew_work);
rnp->exp_need_flush = true;
}
2020-01-03 23:36:11 +00:00
--
2020-06-22 13:14:16 +00:00
2.17.1
2020-01-03 23:36:11 +00:00