114 lines
3.4 KiB
Diff
114 lines
3.4 KiB
Diff
Subject: x86: crypto: Reduce preempt disabled regions
|
|
From: Peter Zijlstra <peterz@infradead.org>
|
|
Date: Mon, 14 Nov 2011 18:19:27 +0100
|
|
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.6-rt9.tar.xz
|
|
|
|
Restrict the preempt disabled regions to the actual floating point
|
|
operations and enable preemption for the administrative actions.
|
|
|
|
This is necessary on RT to avoid that kfree and other operations are
|
|
called with preemption disabled.
|
|
|
|
Reported-and-tested-by: Carsten Emde <cbe@osadl.org>
|
|
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
|
|
Cc: stable-rt@vger.kernel.org
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
---
|
|
arch/x86/crypto/aesni-intel_glue.c | 24 +++++++++++++-----------
|
|
1 file changed, 13 insertions(+), 11 deletions(-)
|
|
|
|
--- a/arch/x86/crypto/aesni-intel_glue.c
|
|
+++ b/arch/x86/crypto/aesni-intel_glue.c
|
|
@@ -252,14 +252,14 @@ static int ecb_encrypt(struct blkcipher_
|
|
err = blkcipher_walk_virt(desc, &walk);
|
|
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
|
|
|
- kernel_fpu_begin();
|
|
while ((nbytes = walk.nbytes)) {
|
|
+ kernel_fpu_begin();
|
|
aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
|
- nbytes & AES_BLOCK_MASK);
|
|
+ nbytes & AES_BLOCK_MASK);
|
|
+ kernel_fpu_end();
|
|
nbytes &= AES_BLOCK_SIZE - 1;
|
|
err = blkcipher_walk_done(desc, &walk, nbytes);
|
|
}
|
|
- kernel_fpu_end();
|
|
|
|
return err;
|
|
}
|
|
@@ -276,14 +276,14 @@ static int ecb_decrypt(struct blkcipher_
|
|
err = blkcipher_walk_virt(desc, &walk);
|
|
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
|
|
|
- kernel_fpu_begin();
|
|
while ((nbytes = walk.nbytes)) {
|
|
+ kernel_fpu_begin();
|
|
aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
|
nbytes & AES_BLOCK_MASK);
|
|
+ kernel_fpu_end();
|
|
nbytes &= AES_BLOCK_SIZE - 1;
|
|
err = blkcipher_walk_done(desc, &walk, nbytes);
|
|
}
|
|
- kernel_fpu_end();
|
|
|
|
return err;
|
|
}
|
|
@@ -300,14 +300,14 @@ static int cbc_encrypt(struct blkcipher_
|
|
err = blkcipher_walk_virt(desc, &walk);
|
|
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
|
|
|
- kernel_fpu_begin();
|
|
while ((nbytes = walk.nbytes)) {
|
|
+ kernel_fpu_begin();
|
|
aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
|
nbytes & AES_BLOCK_MASK, walk.iv);
|
|
+ kernel_fpu_end();
|
|
nbytes &= AES_BLOCK_SIZE - 1;
|
|
err = blkcipher_walk_done(desc, &walk, nbytes);
|
|
}
|
|
- kernel_fpu_end();
|
|
|
|
return err;
|
|
}
|
|
@@ -324,14 +324,14 @@ static int cbc_decrypt(struct blkcipher_
|
|
err = blkcipher_walk_virt(desc, &walk);
|
|
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
|
|
|
- kernel_fpu_begin();
|
|
while ((nbytes = walk.nbytes)) {
|
|
+ kernel_fpu_begin();
|
|
aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
|
nbytes & AES_BLOCK_MASK, walk.iv);
|
|
+ kernel_fpu_end();
|
|
nbytes &= AES_BLOCK_SIZE - 1;
|
|
err = blkcipher_walk_done(desc, &walk, nbytes);
|
|
}
|
|
- kernel_fpu_end();
|
|
|
|
return err;
|
|
}
|
|
@@ -364,18 +364,20 @@ static int ctr_crypt(struct blkcipher_de
|
|
err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
|
|
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
|
|
|
- kernel_fpu_begin();
|
|
while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
|
|
+ kernel_fpu_begin();
|
|
aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
|
nbytes & AES_BLOCK_MASK, walk.iv);
|
|
+ kernel_fpu_end();
|
|
nbytes &= AES_BLOCK_SIZE - 1;
|
|
err = blkcipher_walk_done(desc, &walk, nbytes);
|
|
}
|
|
if (walk.nbytes) {
|
|
+ kernel_fpu_begin();
|
|
ctr_crypt_final(ctx, &walk);
|
|
+ kernel_fpu_end();
|
|
err = blkcipher_walk_done(desc, &walk, 0);
|
|
}
|
|
- kernel_fpu_end();
|
|
|
|
return err;
|
|
}
|