sched/preempt, powerpc: Disable preemption in enable_kernel_altivec() explicitly
enable_kernel_altivec() has to be called with disabled preemption. Let's make this explicit, to prepare for pagefault_disable() not touching preemption anymore. Reviewed-and-tested-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: David.Laight@ACULAB.COM Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: airlied@linux.ie Cc: akpm@linux-foundation.org Cc: bigeasy@linutronix.de Cc: borntraeger@de.ibm.com Cc: daniel.vetter@intel.com Cc: heiko.carstens@de.ibm.com Cc: herbert@gondor.apana.org.au Cc: hocko@suse.cz Cc: hughd@google.com Cc: mst@redhat.com Cc: paulus@samba.org Cc: ralf@linux-mips.org Cc: schwidefsky@de.ibm.com Cc: yang.shi@windriver.com Link: http://lkml.kernel.org/r/1431359540-32227-14-git-send-email-dahi@linux.vnet.ibm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
2f09b227ee
commit
5f76eea88d
|
@ -27,11 +27,11 @@ int enter_vmx_usercopy(void)
|
|||
if (in_interrupt())
|
||||
return 0;
|
||||
|
||||
/* This acts as preempt_disable() as well and will make
|
||||
* enable_kernel_altivec(). We need to disable page faults
|
||||
* as they can call schedule and thus make us lose the VMX
|
||||
* context. So on page faults, we just fail which will cause
|
||||
* a fallback to the normal non-vmx copy.
|
||||
preempt_disable();
|
||||
/*
|
||||
* We need to disable page faults as they can call schedule and
|
||||
* thus make us lose the VMX context. So on page faults, we just
|
||||
* fail which will cause a fallback to the normal non-vmx copy.
|
||||
*/
|
||||
pagefault_disable();
|
||||
|
||||
|
@ -47,6 +47,7 @@ int enter_vmx_usercopy(void)
|
|||
int exit_vmx_usercopy(void)
|
||||
{
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -78,12 +78,14 @@ static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key,
|
|||
int ret;
|
||||
struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_altivec();
|
||||
ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
|
||||
ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
|
||||
pagefault_enable();
|
||||
|
||||
preempt_enable();
|
||||
|
||||
ret += crypto_cipher_setkey(ctx->fallback, key, keylen);
|
||||
return ret;
|
||||
}
|
||||
|
@ -95,10 +97,12 @@ static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
|||
if (in_interrupt()) {
|
||||
crypto_cipher_encrypt_one(ctx->fallback, dst, src);
|
||||
} else {
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_altivec();
|
||||
aes_p8_encrypt(src, dst, &ctx->enc_key);
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -109,10 +113,12 @@ static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
|||
if (in_interrupt()) {
|
||||
crypto_cipher_decrypt_one(ctx->fallback, dst, src);
|
||||
} else {
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_altivec();
|
||||
aes_p8_decrypt(src, dst, &ctx->dec_key);
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -79,11 +79,13 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
|
|||
int ret;
|
||||
struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_altivec();
|
||||
ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
|
||||
ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
|
||||
ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen);
|
||||
return ret;
|
||||
|
@ -106,6 +108,7 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
|
|||
if (in_interrupt()) {
|
||||
ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src, nbytes);
|
||||
} else {
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_altivec();
|
||||
|
||||
|
@ -119,6 +122,7 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
|
|||
}
|
||||
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -141,6 +145,7 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
|
|||
if (in_interrupt()) {
|
||||
ret = crypto_blkcipher_decrypt(&fallback_desc, dst, src, nbytes);
|
||||
} else {
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_altivec();
|
||||
|
||||
|
@ -154,6 +159,7 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
|
|||
}
|
||||
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -114,11 +114,13 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
|
|||
if (keylen != GHASH_KEY_LEN)
|
||||
return -EINVAL;
|
||||
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_altivec();
|
||||
enable_kernel_fp();
|
||||
gcm_init_p8(ctx->htable, (const u64 *) key);
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
return crypto_shash_setkey(ctx->fallback, key, keylen);
|
||||
}
|
||||
|
||||
|
@ -140,23 +142,27 @@ static int p8_ghash_update(struct shash_desc *desc,
|
|||
}
|
||||
memcpy(dctx->buffer + dctx->bytes, src,
|
||||
GHASH_DIGEST_SIZE - dctx->bytes);
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_altivec();
|
||||
enable_kernel_fp();
|
||||
gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer,
|
||||
GHASH_DIGEST_SIZE);
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
src += GHASH_DIGEST_SIZE - dctx->bytes;
|
||||
srclen -= GHASH_DIGEST_SIZE - dctx->bytes;
|
||||
dctx->bytes = 0;
|
||||
}
|
||||
len = srclen & ~(GHASH_DIGEST_SIZE - 1);
|
||||
if (len) {
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_altivec();
|
||||
enable_kernel_fp();
|
||||
gcm_ghash_p8(dctx->shash, ctx->htable, src, len);
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
src += len;
|
||||
srclen -= len;
|
||||
}
|
||||
|
@ -180,12 +186,14 @@ static int p8_ghash_final(struct shash_desc *desc, u8 *out)
|
|||
if (dctx->bytes) {
|
||||
for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++)
|
||||
dctx->buffer[i] = 0;
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_altivec();
|
||||
enable_kernel_fp();
|
||||
gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer,
|
||||
GHASH_DIGEST_SIZE);
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
dctx->bytes = 0;
|
||||
}
|
||||
memcpy(out, dctx->shash, GHASH_DIGEST_SIZE);
|
||||
|
|
Loading…
Reference in New Issue