113 lines
3.1 KiB
Diff
113 lines
3.1 KiB
Diff
Subject: x86: crypto: Reduce preempt disabled regions
|
|
From: Peter Zijlstra <peterz@infradead.org>
|
|
Date: Mon, 14 Nov 2011 18:19:27 +0100
|
|
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.10-rt8.tar.xz
|
|
|
|
Restrict the preempt disabled regions to the actual floating point
|
|
operations and enable preemption for the administrative actions.
|
|
|
|
This is necessary on RT to avoid that kfree and other operations are
|
|
called with preemption disabled.
|
|
|
|
Reported-and-tested-by: Carsten Emde <cbe@osadl.org>
|
|
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
|
|
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
---
|
|
arch/x86/crypto/aesni-intel_glue.c | 22 ++++++++++++----------
|
|
1 file changed, 12 insertions(+), 10 deletions(-)
|
|
|
|
--- a/arch/x86/crypto/aesni-intel_glue.c
|
|
+++ b/arch/x86/crypto/aesni-intel_glue.c
|
|
@@ -434,14 +434,14 @@ static int ecb_encrypt(struct skcipher_r
|
|
|
|
err = skcipher_walk_virt(&walk, req, true);
|
|
|
|
- kernel_fpu_begin();
|
|
while ((nbytes = walk.nbytes)) {
|
|
+ kernel_fpu_begin();
|
|
aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
|
nbytes & AES_BLOCK_MASK);
|
|
+ kernel_fpu_end();
|
|
nbytes &= AES_BLOCK_SIZE - 1;
|
|
err = skcipher_walk_done(&walk, nbytes);
|
|
}
|
|
- kernel_fpu_end();
|
|
|
|
return err;
|
|
}
|
|
@@ -456,14 +456,14 @@ static int ecb_decrypt(struct skcipher_r
|
|
|
|
err = skcipher_walk_virt(&walk, req, true);
|
|
|
|
- kernel_fpu_begin();
|
|
while ((nbytes = walk.nbytes)) {
|
|
+ kernel_fpu_begin();
|
|
aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
|
nbytes & AES_BLOCK_MASK);
|
|
+ kernel_fpu_end();
|
|
nbytes &= AES_BLOCK_SIZE - 1;
|
|
err = skcipher_walk_done(&walk, nbytes);
|
|
}
|
|
- kernel_fpu_end();
|
|
|
|
return err;
|
|
}
|
|
@@ -478,14 +478,14 @@ static int cbc_encrypt(struct skcipher_r
|
|
|
|
err = skcipher_walk_virt(&walk, req, true);
|
|
|
|
- kernel_fpu_begin();
|
|
while ((nbytes = walk.nbytes)) {
|
|
+ kernel_fpu_begin();
|
|
aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
|
nbytes & AES_BLOCK_MASK, walk.iv);
|
|
+ kernel_fpu_end();
|
|
nbytes &= AES_BLOCK_SIZE - 1;
|
|
err = skcipher_walk_done(&walk, nbytes);
|
|
}
|
|
- kernel_fpu_end();
|
|
|
|
return err;
|
|
}
|
|
@@ -500,14 +500,14 @@ static int cbc_decrypt(struct skcipher_r
|
|
|
|
err = skcipher_walk_virt(&walk, req, true);
|
|
|
|
- kernel_fpu_begin();
|
|
while ((nbytes = walk.nbytes)) {
|
|
+ kernel_fpu_begin();
|
|
aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
|
nbytes & AES_BLOCK_MASK, walk.iv);
|
|
+ kernel_fpu_end();
|
|
nbytes &= AES_BLOCK_SIZE - 1;
|
|
err = skcipher_walk_done(&walk, nbytes);
|
|
}
|
|
- kernel_fpu_end();
|
|
|
|
return err;
|
|
}
|
|
@@ -557,18 +557,20 @@ static int ctr_crypt(struct skcipher_req
|
|
|
|
err = skcipher_walk_virt(&walk, req, true);
|
|
|
|
- kernel_fpu_begin();
|
|
while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
|
|
+ kernel_fpu_begin();
|
|
aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
|
nbytes & AES_BLOCK_MASK, walk.iv);
|
|
+ kernel_fpu_end();
|
|
nbytes &= AES_BLOCK_SIZE - 1;
|
|
err = skcipher_walk_done(&walk, nbytes);
|
|
}
|
|
if (walk.nbytes) {
|
|
+ kernel_fpu_begin();
|
|
ctr_crypt_final(ctx, &walk);
|
|
+ kernel_fpu_end();
|
|
err = skcipher_walk_done(&walk, 0);
|
|
}
|
|
- kernel_fpu_end();
|
|
|
|
return err;
|
|
}
|