96 lines
3.1 KiB
Diff
96 lines
3.1 KiB
Diff
From: Thomas Gleixner <tglx@linutronix.de>
|
|
Date: Thu, 13 Apr 2017 10:20:23 +0200
|
|
Subject: [PATCH 13/13] crypto: N2 - Replace racy task affinity logic
|
|
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.9-rt7.tar.xz
|
|
|
|
spu_queue_register() needs to invoke setup functions on a particular
|
|
CPU. This is achieved by temporarily setting the affinity of the
|
|
calling user space thread to the requested CPU and reset it to the original
|
|
affinity afterwards.
|
|
|
|
That's racy vs. CPU hotplug and concurrent affinity settings for that
|
|
thread resulting in code executing on the wrong CPU and overwriting the
|
|
new affinity setting.
|
|
|
|
Replace it by using work_on_cpu_safe() which guarantees to run the code on
|
|
the requested CPU or to fail in case the CPU is offline.
|
|
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Acked-by: Herbert Xu <herbert@gondor.apana.org.au>
|
|
Acked-by: "David S. Miller" <davem@davemloft.net>
|
|
Cc: Fenghua Yu <fenghua.yu@intel.com>
|
|
Cc: Tony Luck <tony.luck@intel.com>
|
|
Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
|
|
Cc: Peter Zijlstra <peterz@infradead.org>
|
|
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
|
|
Cc: Sebastian Siewior <bigeasy@linutronix.de>
|
|
Cc: Lai Jiangshan <jiangshanlai@gmail.com>
|
|
Cc: Viresh Kumar <viresh.kumar@linaro.org>
|
|
Cc: linux-crypto@vger.kernel.org
|
|
Cc: Michael Ellerman <mpe@ellerman.id.au>
|
|
Cc: Tejun Heo <tj@kernel.org>
|
|
Cc: Len Brown <lenb@kernel.org>
|
|
Link: http://lkml.kernel.org/r/alpine.DEB.2.20.1704131019420.2408@nanos
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
---
|
|
drivers/crypto/n2_core.c | 31 ++++++++++++++++---------------
|
|
1 file changed, 16 insertions(+), 15 deletions(-)
|
|
|
|
--- a/drivers/crypto/n2_core.c
|
|
+++ b/drivers/crypto/n2_core.c
|
|
@@ -65,6 +65,11 @@ struct spu_queue {
|
|
struct list_head list;
|
|
};
|
|
|
|
+struct spu_qreg {
|
|
+ struct spu_queue *queue;
|
|
+ unsigned long type;
|
|
+};
|
|
+
|
|
static struct spu_queue **cpu_to_cwq;
|
|
static struct spu_queue **cpu_to_mau;
|
|
|
|
@@ -1631,31 +1636,27 @@ static void queue_cache_destroy(void)
|
|
kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
|
|
}
|
|
|
|
-static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
|
|
+static long spu_queue_register_workfn(void *arg)
|
|
{
|
|
- cpumask_var_t old_allowed;
|
|
+ struct spu_qreg *qr = arg;
|
|
+ struct spu_queue *p = qr->queue;
|
|
+ unsigned long q_type = qr->type;
|
|
unsigned long hv_ret;
|
|
|
|
- if (cpumask_empty(&p->sharing))
|
|
- return -EINVAL;
|
|
-
|
|
- if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL))
|
|
- return -ENOMEM;
|
|
-
|
|
- cpumask_copy(old_allowed, ¤t->cpus_allowed);
|
|
-
|
|
- set_cpus_allowed_ptr(current, &p->sharing);
|
|
-
|
|
hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q),
|
|
CWQ_NUM_ENTRIES, &p->qhandle);
|
|
if (!hv_ret)
|
|
sun4v_ncs_sethead_marker(p->qhandle, 0);
|
|
|
|
- set_cpus_allowed_ptr(current, old_allowed);
|
|
+ return hv_ret ? -EINVAL : 0;
|
|
+}
|
|
|
|
- free_cpumask_var(old_allowed);
|
|
+static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
|
|
+{
|
|
+ int cpu = cpumask_any_and(&p->sharing, cpu_online_mask);
|
|
+ struct spu_qreg qr = { .queue = p, .type = q_type };
|
|
|
|
- return (hv_ret ? -EINVAL : 0);
|
|
+ return work_on_cpu_safe(cpu, spu_queue_register_workfn, &qr);
|
|
}
|
|
|
|
static int spu_queue_setup(struct spu_queue *p)
|