Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto fixes from Herbert Xu:

 - Check for the right CPU feature bit in sm4-ce on arm64.

 - Fix scatterwalk WARN_ON in aes-gcm-ce on arm64.

 - Fix unaligned fault in aesni on x86.

 - Fix potential NULL pointer dereference on exit in chtls.

 - Fix DMA mapping direction for RSA in caam.

 - Fix error path return value for xts setkey in caam.

 - Fix address endianness when DMA unmapping in caam.

 - Fix sleep-in-atomic in vmx.

 - Fix command corruption when queue is full in cavium/nitrox.

* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6:
  crypto: cavium/nitrox - fix for command corruption in queue full case with backlog submissions.
  crypto: vmx - Fix sleep-in-atomic bugs
  crypto: arm64/aes-gcm-ce - fix scatterwalk API violation
  crypto: aesni - Use unaligned loads from gcm_context_data
  crypto: chtls - fix null dereference chtls_free_uld()
  crypto: arm64/sm4-ce - check for the right CPU feature bit
  crypto: caam - fix DMA mapping direction for RSA forms 2 & 3
  crypto: caam/qi - fix error path in xts setkey
  crypto: caam/jr - fix descriptor DMA unmapping
This commit is contained in:
Linus Torvalds 2018-08-29 13:38:39 -07:00
commit b4df50de6a
13 changed files with 144 additions and 106 deletions

View File

@ -417,7 +417,7 @@ static int gcm_encrypt(struct aead_request *req)
__aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds); __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds);
put_unaligned_be32(2, iv + GCM_IV_SIZE); put_unaligned_be32(2, iv + GCM_IV_SIZE);
while (walk.nbytes >= AES_BLOCK_SIZE) { while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
int blocks = walk.nbytes / AES_BLOCK_SIZE; int blocks = walk.nbytes / AES_BLOCK_SIZE;
u8 *dst = walk.dst.virt.addr; u8 *dst = walk.dst.virt.addr;
u8 *src = walk.src.virt.addr; u8 *src = walk.src.virt.addr;
@ -437,11 +437,18 @@ static int gcm_encrypt(struct aead_request *req)
NULL); NULL);
err = skcipher_walk_done(&walk, err = skcipher_walk_done(&walk,
walk.nbytes % AES_BLOCK_SIZE); walk.nbytes % (2 * AES_BLOCK_SIZE));
} }
if (walk.nbytes) if (walk.nbytes) {
__aes_arm64_encrypt(ctx->aes_key.key_enc, ks, iv, __aes_arm64_encrypt(ctx->aes_key.key_enc, ks, iv,
nrounds); nrounds);
if (walk.nbytes > AES_BLOCK_SIZE) {
crypto_inc(iv, AES_BLOCK_SIZE);
__aes_arm64_encrypt(ctx->aes_key.key_enc,
ks + AES_BLOCK_SIZE, iv,
nrounds);
}
}
} }
/* handle the tail */ /* handle the tail */
@ -545,7 +552,7 @@ static int gcm_decrypt(struct aead_request *req)
__aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds); __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds);
put_unaligned_be32(2, iv + GCM_IV_SIZE); put_unaligned_be32(2, iv + GCM_IV_SIZE);
while (walk.nbytes >= AES_BLOCK_SIZE) { while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
int blocks = walk.nbytes / AES_BLOCK_SIZE; int blocks = walk.nbytes / AES_BLOCK_SIZE;
u8 *dst = walk.dst.virt.addr; u8 *dst = walk.dst.virt.addr;
u8 *src = walk.src.virt.addr; u8 *src = walk.src.virt.addr;
@ -564,11 +571,21 @@ static int gcm_decrypt(struct aead_request *req)
} while (--blocks > 0); } while (--blocks > 0);
err = skcipher_walk_done(&walk, err = skcipher_walk_done(&walk,
walk.nbytes % AES_BLOCK_SIZE); walk.nbytes % (2 * AES_BLOCK_SIZE));
} }
if (walk.nbytes) if (walk.nbytes) {
if (walk.nbytes > AES_BLOCK_SIZE) {
u8 *iv2 = iv + AES_BLOCK_SIZE;
memcpy(iv2, iv, AES_BLOCK_SIZE);
crypto_inc(iv2, AES_BLOCK_SIZE);
__aes_arm64_encrypt(ctx->aes_key.key_enc, iv2,
iv2, nrounds);
}
__aes_arm64_encrypt(ctx->aes_key.key_enc, iv, iv, __aes_arm64_encrypt(ctx->aes_key.key_enc, iv, iv,
nrounds); nrounds);
}
} }
/* handle the tail */ /* handle the tail */

View File

@ -69,5 +69,5 @@ static void __exit sm4_ce_mod_fini(void)
crypto_unregister_alg(&sm4_ce_alg); crypto_unregister_alg(&sm4_ce_alg);
} }
module_cpu_feature_match(SM3, sm4_ce_mod_init); module_cpu_feature_match(SM4, sm4_ce_mod_init);
module_exit(sm4_ce_mod_fini); module_exit(sm4_ce_mod_fini);

View File

@ -223,34 +223,34 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff
pcmpeqd TWOONE(%rip), \TMP2 pcmpeqd TWOONE(%rip), \TMP2
pand POLY(%rip), \TMP2 pand POLY(%rip), \TMP2
pxor \TMP2, \TMP3 pxor \TMP2, \TMP3
movdqa \TMP3, HashKey(%arg2) movdqu \TMP3, HashKey(%arg2)
movdqa \TMP3, \TMP5 movdqa \TMP3, \TMP5
pshufd $78, \TMP3, \TMP1 pshufd $78, \TMP3, \TMP1
pxor \TMP3, \TMP1 pxor \TMP3, \TMP1
movdqa \TMP1, HashKey_k(%arg2) movdqu \TMP1, HashKey_k(%arg2)
GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
# TMP5 = HashKey^2<<1 (mod poly) # TMP5 = HashKey^2<<1 (mod poly)
movdqa \TMP5, HashKey_2(%arg2) movdqu \TMP5, HashKey_2(%arg2)
# HashKey_2 = HashKey^2<<1 (mod poly) # HashKey_2 = HashKey^2<<1 (mod poly)
pshufd $78, \TMP5, \TMP1 pshufd $78, \TMP5, \TMP1
pxor \TMP5, \TMP1 pxor \TMP5, \TMP1
movdqa \TMP1, HashKey_2_k(%arg2) movdqu \TMP1, HashKey_2_k(%arg2)
GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
# TMP5 = HashKey^3<<1 (mod poly) # TMP5 = HashKey^3<<1 (mod poly)
movdqa \TMP5, HashKey_3(%arg2) movdqu \TMP5, HashKey_3(%arg2)
pshufd $78, \TMP5, \TMP1 pshufd $78, \TMP5, \TMP1
pxor \TMP5, \TMP1 pxor \TMP5, \TMP1
movdqa \TMP1, HashKey_3_k(%arg2) movdqu \TMP1, HashKey_3_k(%arg2)
GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
# TMP5 = HashKey^3<<1 (mod poly) # TMP5 = HashKey^3<<1 (mod poly)
movdqa \TMP5, HashKey_4(%arg2) movdqu \TMP5, HashKey_4(%arg2)
pshufd $78, \TMP5, \TMP1 pshufd $78, \TMP5, \TMP1
pxor \TMP5, \TMP1 pxor \TMP5, \TMP1
movdqa \TMP1, HashKey_4_k(%arg2) movdqu \TMP1, HashKey_4_k(%arg2)
.endm .endm
# GCM_INIT initializes a gcm_context struct to prepare for encoding/decoding. # GCM_INIT initializes a gcm_context struct to prepare for encoding/decoding.
@ -271,7 +271,7 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff
movdqu %xmm0, CurCount(%arg2) # ctx_data.current_counter = iv movdqu %xmm0, CurCount(%arg2) # ctx_data.current_counter = iv
PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
movdqa HashKey(%arg2), %xmm13 movdqu HashKey(%arg2), %xmm13
CALC_AAD_HASH %xmm13, \AAD, \AADLEN, %xmm0, %xmm1, %xmm2, %xmm3, \ CALC_AAD_HASH %xmm13, \AAD, \AADLEN, %xmm0, %xmm1, %xmm2, %xmm3, \
%xmm4, %xmm5, %xmm6 %xmm4, %xmm5, %xmm6
@ -997,7 +997,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
pshufd $78, \XMM5, \TMP6 pshufd $78, \XMM5, \TMP6
pxor \XMM5, \TMP6 pxor \XMM5, \TMP6
paddd ONE(%rip), \XMM0 # INCR CNT paddd ONE(%rip), \XMM0 # INCR CNT
movdqa HashKey_4(%arg2), \TMP5 movdqu HashKey_4(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1 PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1
movdqa \XMM0, \XMM1 movdqa \XMM0, \XMM1
paddd ONE(%rip), \XMM0 # INCR CNT paddd ONE(%rip), \XMM0 # INCR CNT
@ -1016,7 +1016,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
pxor (%arg1), \XMM2 pxor (%arg1), \XMM2
pxor (%arg1), \XMM3 pxor (%arg1), \XMM3
pxor (%arg1), \XMM4 pxor (%arg1), \XMM4
movdqa HashKey_4_k(%arg2), \TMP5 movdqu HashKey_4_k(%arg2), \TMP5
PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0) PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0)
movaps 0x10(%arg1), \TMP1 movaps 0x10(%arg1), \TMP1
AESENC \TMP1, \XMM1 # Round 1 AESENC \TMP1, \XMM1 # Round 1
@ -1031,7 +1031,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
movdqa \XMM6, \TMP1 movdqa \XMM6, \TMP1
pshufd $78, \XMM6, \TMP2 pshufd $78, \XMM6, \TMP2
pxor \XMM6, \TMP2 pxor \XMM6, \TMP2
movdqa HashKey_3(%arg2), \TMP5 movdqu HashKey_3(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1
movaps 0x30(%arg1), \TMP3 movaps 0x30(%arg1), \TMP3
AESENC \TMP3, \XMM1 # Round 3 AESENC \TMP3, \XMM1 # Round 3
@ -1044,7 +1044,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
AESENC \TMP3, \XMM2 AESENC \TMP3, \XMM2
AESENC \TMP3, \XMM3 AESENC \TMP3, \XMM3
AESENC \TMP3, \XMM4 AESENC \TMP3, \XMM4
movdqa HashKey_3_k(%arg2), \TMP5 movdqu HashKey_3_k(%arg2), \TMP5
PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
movaps 0x50(%arg1), \TMP3 movaps 0x50(%arg1), \TMP3
AESENC \TMP3, \XMM1 # Round 5 AESENC \TMP3, \XMM1 # Round 5
@ -1058,7 +1058,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
movdqa \XMM7, \TMP1 movdqa \XMM7, \TMP1
pshufd $78, \XMM7, \TMP2 pshufd $78, \XMM7, \TMP2
pxor \XMM7, \TMP2 pxor \XMM7, \TMP2
movdqa HashKey_2(%arg2), \TMP5 movdqu HashKey_2(%arg2), \TMP5
# Multiply TMP5 * HashKey using karatsuba # Multiply TMP5 * HashKey using karatsuba
@ -1074,7 +1074,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
AESENC \TMP3, \XMM2 AESENC \TMP3, \XMM2
AESENC \TMP3, \XMM3 AESENC \TMP3, \XMM3
AESENC \TMP3, \XMM4 AESENC \TMP3, \XMM4
movdqa HashKey_2_k(%arg2), \TMP5 movdqu HashKey_2_k(%arg2), \TMP5
PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
movaps 0x80(%arg1), \TMP3 movaps 0x80(%arg1), \TMP3
AESENC \TMP3, \XMM1 # Round 8 AESENC \TMP3, \XMM1 # Round 8
@ -1092,7 +1092,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
movdqa \XMM8, \TMP1 movdqa \XMM8, \TMP1
pshufd $78, \XMM8, \TMP2 pshufd $78, \XMM8, \TMP2
pxor \XMM8, \TMP2 pxor \XMM8, \TMP2
movdqa HashKey(%arg2), \TMP5 movdqu HashKey(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
movaps 0x90(%arg1), \TMP3 movaps 0x90(%arg1), \TMP3
AESENC \TMP3, \XMM1 # Round 9 AESENC \TMP3, \XMM1 # Round 9
@ -1121,7 +1121,7 @@ aes_loop_par_enc_done\@:
AESENCLAST \TMP3, \XMM2 AESENCLAST \TMP3, \XMM2
AESENCLAST \TMP3, \XMM3 AESENCLAST \TMP3, \XMM3
AESENCLAST \TMP3, \XMM4 AESENCLAST \TMP3, \XMM4
movdqa HashKey_k(%arg2), \TMP5 movdqu HashKey_k(%arg2), \TMP5
PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
movdqu (%arg4,%r11,1), \TMP3 movdqu (%arg4,%r11,1), \TMP3
pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK
@ -1205,7 +1205,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
pshufd $78, \XMM5, \TMP6 pshufd $78, \XMM5, \TMP6
pxor \XMM5, \TMP6 pxor \XMM5, \TMP6
paddd ONE(%rip), \XMM0 # INCR CNT paddd ONE(%rip), \XMM0 # INCR CNT
movdqa HashKey_4(%arg2), \TMP5 movdqu HashKey_4(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1 PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1
movdqa \XMM0, \XMM1 movdqa \XMM0, \XMM1
paddd ONE(%rip), \XMM0 # INCR CNT paddd ONE(%rip), \XMM0 # INCR CNT
@ -1224,7 +1224,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
pxor (%arg1), \XMM2 pxor (%arg1), \XMM2
pxor (%arg1), \XMM3 pxor (%arg1), \XMM3
pxor (%arg1), \XMM4 pxor (%arg1), \XMM4
movdqa HashKey_4_k(%arg2), \TMP5 movdqu HashKey_4_k(%arg2), \TMP5
PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0) PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0)
movaps 0x10(%arg1), \TMP1 movaps 0x10(%arg1), \TMP1
AESENC \TMP1, \XMM1 # Round 1 AESENC \TMP1, \XMM1 # Round 1
@ -1239,7 +1239,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
movdqa \XMM6, \TMP1 movdqa \XMM6, \TMP1
pshufd $78, \XMM6, \TMP2 pshufd $78, \XMM6, \TMP2
pxor \XMM6, \TMP2 pxor \XMM6, \TMP2
movdqa HashKey_3(%arg2), \TMP5 movdqu HashKey_3(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1
movaps 0x30(%arg1), \TMP3 movaps 0x30(%arg1), \TMP3
AESENC \TMP3, \XMM1 # Round 3 AESENC \TMP3, \XMM1 # Round 3
@ -1252,7 +1252,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
AESENC \TMP3, \XMM2 AESENC \TMP3, \XMM2
AESENC \TMP3, \XMM3 AESENC \TMP3, \XMM3
AESENC \TMP3, \XMM4 AESENC \TMP3, \XMM4
movdqa HashKey_3_k(%arg2), \TMP5 movdqu HashKey_3_k(%arg2), \TMP5
PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
movaps 0x50(%arg1), \TMP3 movaps 0x50(%arg1), \TMP3
AESENC \TMP3, \XMM1 # Round 5 AESENC \TMP3, \XMM1 # Round 5
@ -1266,7 +1266,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
movdqa \XMM7, \TMP1 movdqa \XMM7, \TMP1
pshufd $78, \XMM7, \TMP2 pshufd $78, \XMM7, \TMP2
pxor \XMM7, \TMP2 pxor \XMM7, \TMP2
movdqa HashKey_2(%arg2), \TMP5 movdqu HashKey_2(%arg2), \TMP5
# Multiply TMP5 * HashKey using karatsuba # Multiply TMP5 * HashKey using karatsuba
@ -1282,7 +1282,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
AESENC \TMP3, \XMM2 AESENC \TMP3, \XMM2
AESENC \TMP3, \XMM3 AESENC \TMP3, \XMM3
AESENC \TMP3, \XMM4 AESENC \TMP3, \XMM4
movdqa HashKey_2_k(%arg2), \TMP5 movdqu HashKey_2_k(%arg2), \TMP5
PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
movaps 0x80(%arg1), \TMP3 movaps 0x80(%arg1), \TMP3
AESENC \TMP3, \XMM1 # Round 8 AESENC \TMP3, \XMM1 # Round 8
@ -1300,7 +1300,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
movdqa \XMM8, \TMP1 movdqa \XMM8, \TMP1
pshufd $78, \XMM8, \TMP2 pshufd $78, \XMM8, \TMP2
pxor \XMM8, \TMP2 pxor \XMM8, \TMP2
movdqa HashKey(%arg2), \TMP5 movdqu HashKey(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
movaps 0x90(%arg1), \TMP3 movaps 0x90(%arg1), \TMP3
AESENC \TMP3, \XMM1 # Round 9 AESENC \TMP3, \XMM1 # Round 9
@ -1329,7 +1329,7 @@ aes_loop_par_dec_done\@:
AESENCLAST \TMP3, \XMM2 AESENCLAST \TMP3, \XMM2
AESENCLAST \TMP3, \XMM3 AESENCLAST \TMP3, \XMM3
AESENCLAST \TMP3, \XMM4 AESENCLAST \TMP3, \XMM4
movdqa HashKey_k(%arg2), \TMP5 movdqu HashKey_k(%arg2), \TMP5
PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
movdqu (%arg4,%r11,1), \TMP3 movdqu (%arg4,%r11,1), \TMP3
pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK
@ -1405,10 +1405,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
movdqa \XMM1, \TMP6 movdqa \XMM1, \TMP6
pshufd $78, \XMM1, \TMP2 pshufd $78, \XMM1, \TMP2
pxor \XMM1, \TMP2 pxor \XMM1, \TMP2
movdqa HashKey_4(%arg2), \TMP5 movdqu HashKey_4(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP6 # TMP6 = a1*b1 PCLMULQDQ 0x11, \TMP5, \TMP6 # TMP6 = a1*b1
PCLMULQDQ 0x00, \TMP5, \XMM1 # XMM1 = a0*b0 PCLMULQDQ 0x00, \TMP5, \XMM1 # XMM1 = a0*b0
movdqa HashKey_4_k(%arg2), \TMP4 movdqu HashKey_4_k(%arg2), \TMP4
PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
movdqa \XMM1, \XMMDst movdqa \XMM1, \XMMDst
movdqa \TMP2, \XMM1 # result in TMP6, XMMDst, XMM1 movdqa \TMP2, \XMM1 # result in TMP6, XMMDst, XMM1
@ -1418,10 +1418,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
movdqa \XMM2, \TMP1 movdqa \XMM2, \TMP1
pshufd $78, \XMM2, \TMP2 pshufd $78, \XMM2, \TMP2
pxor \XMM2, \TMP2 pxor \XMM2, \TMP2
movdqa HashKey_3(%arg2), \TMP5 movdqu HashKey_3(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
PCLMULQDQ 0x00, \TMP5, \XMM2 # XMM2 = a0*b0 PCLMULQDQ 0x00, \TMP5, \XMM2 # XMM2 = a0*b0
movdqa HashKey_3_k(%arg2), \TMP4 movdqu HashKey_3_k(%arg2), \TMP4
PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
pxor \TMP1, \TMP6 pxor \TMP1, \TMP6
pxor \XMM2, \XMMDst pxor \XMM2, \XMMDst
@ -1433,10 +1433,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
movdqa \XMM3, \TMP1 movdqa \XMM3, \TMP1
pshufd $78, \XMM3, \TMP2 pshufd $78, \XMM3, \TMP2
pxor \XMM3, \TMP2 pxor \XMM3, \TMP2
movdqa HashKey_2(%arg2), \TMP5 movdqu HashKey_2(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
PCLMULQDQ 0x00, \TMP5, \XMM3 # XMM3 = a0*b0 PCLMULQDQ 0x00, \TMP5, \XMM3 # XMM3 = a0*b0
movdqa HashKey_2_k(%arg2), \TMP4 movdqu HashKey_2_k(%arg2), \TMP4
PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
pxor \TMP1, \TMP6 pxor \TMP1, \TMP6
pxor \XMM3, \XMMDst pxor \XMM3, \XMMDst
@ -1446,10 +1446,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
movdqa \XMM4, \TMP1 movdqa \XMM4, \TMP1
pshufd $78, \XMM4, \TMP2 pshufd $78, \XMM4, \TMP2
pxor \XMM4, \TMP2 pxor \XMM4, \TMP2
movdqa HashKey(%arg2), \TMP5 movdqu HashKey(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
PCLMULQDQ 0x00, \TMP5, \XMM4 # XMM4 = a0*b0 PCLMULQDQ 0x00, \TMP5, \XMM4 # XMM4 = a0*b0
movdqa HashKey_k(%arg2), \TMP4 movdqu HashKey_k(%arg2), \TMP4
PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
pxor \TMP1, \TMP6 pxor \TMP1, \TMP6
pxor \XMM4, \XMMDst pxor \XMM4, \XMMDst

View File

@ -679,10 +679,8 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
int ret = 0; int ret = 0;
if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
crypto_ablkcipher_set_flags(ablkcipher,
CRYPTO_TFM_RES_BAD_KEY_LEN);
dev_err(jrdev, "key size mismatch\n"); dev_err(jrdev, "key size mismatch\n");
return -EINVAL; goto badkey;
} }
ctx->cdata.keylen = keylen; ctx->cdata.keylen = keylen;
@ -715,7 +713,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
return ret; return ret;
badkey: badkey:
crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return 0; return -EINVAL;
} }
/* /*

View File

@ -71,8 +71,8 @@ static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
} }
static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc, static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
@ -90,8 +90,8 @@ static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
} }
/* RSA Job Completion handler */ /* RSA Job Completion handler */
@ -417,13 +417,13 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
goto unmap_p; goto unmap_p;
} }
pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE); pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, pdb->tmp1_dma)) { if (dma_mapping_error(dev, pdb->tmp1_dma)) {
dev_err(dev, "Unable to map RSA tmp1 memory\n"); dev_err(dev, "Unable to map RSA tmp1 memory\n");
goto unmap_q; goto unmap_q;
} }
pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE); pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, pdb->tmp2_dma)) { if (dma_mapping_error(dev, pdb->tmp2_dma)) {
dev_err(dev, "Unable to map RSA tmp2 memory\n"); dev_err(dev, "Unable to map RSA tmp2 memory\n");
goto unmap_tmp1; goto unmap_tmp1;
@ -451,7 +451,7 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
return 0; return 0;
unmap_tmp1: unmap_tmp1:
dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
unmap_q: unmap_q:
dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
unmap_p: unmap_p:
@ -504,13 +504,13 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
goto unmap_dq; goto unmap_dq;
} }
pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE); pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, pdb->tmp1_dma)) { if (dma_mapping_error(dev, pdb->tmp1_dma)) {
dev_err(dev, "Unable to map RSA tmp1 memory\n"); dev_err(dev, "Unable to map RSA tmp1 memory\n");
goto unmap_qinv; goto unmap_qinv;
} }
pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE); pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, pdb->tmp2_dma)) { if (dma_mapping_error(dev, pdb->tmp2_dma)) {
dev_err(dev, "Unable to map RSA tmp2 memory\n"); dev_err(dev, "Unable to map RSA tmp2 memory\n");
goto unmap_tmp1; goto unmap_tmp1;
@ -538,7 +538,7 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
return 0; return 0;
unmap_tmp1: unmap_tmp1:
dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
unmap_qinv: unmap_qinv:
dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
unmap_dq: unmap_dq:

View File

@ -190,7 +190,8 @@ static void caam_jr_dequeue(unsigned long devarg)
BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0); BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0);
/* Unmap just-run descriptor so we can post-process */ /* Unmap just-run descriptor so we can post-process */
dma_unmap_single(dev, jrp->outring[hw_idx].desc, dma_unmap_single(dev,
caam_dma_to_cpu(jrp->outring[hw_idx].desc),
jrp->entinfo[sw_idx].desc_size, jrp->entinfo[sw_idx].desc_size,
DMA_TO_DEVICE); DMA_TO_DEVICE);

View File

@ -35,6 +35,7 @@ struct nitrox_cmdq {
/* requests in backlog queues */ /* requests in backlog queues */
atomic_t backlog_count; atomic_t backlog_count;
int write_idx;
/* command size 32B/64B */ /* command size 32B/64B */
u8 instr_size; u8 instr_size;
u8 qno; u8 qno;
@ -87,7 +88,7 @@ struct nitrox_bh {
struct bh_data *slc; struct bh_data *slc;
}; };
/* NITROX-5 driver state */ /* NITROX-V driver state */
#define NITROX_UCODE_LOADED 0 #define NITROX_UCODE_LOADED 0
#define NITROX_READY 1 #define NITROX_READY 1

View File

@ -36,6 +36,7 @@ static int cmdq_common_init(struct nitrox_cmdq *cmdq)
cmdq->head = PTR_ALIGN(cmdq->head_unaligned, PKT_IN_ALIGN); cmdq->head = PTR_ALIGN(cmdq->head_unaligned, PKT_IN_ALIGN);
cmdq->dma = PTR_ALIGN(cmdq->dma_unaligned, PKT_IN_ALIGN); cmdq->dma = PTR_ALIGN(cmdq->dma_unaligned, PKT_IN_ALIGN);
cmdq->qsize = (qsize + PKT_IN_ALIGN); cmdq->qsize = (qsize + PKT_IN_ALIGN);
cmdq->write_idx = 0;
spin_lock_init(&cmdq->response_lock); spin_lock_init(&cmdq->response_lock);
spin_lock_init(&cmdq->cmdq_lock); spin_lock_init(&cmdq->cmdq_lock);

View File

@ -42,6 +42,16 @@
* Invalid flag options in AES-CCM IV. * Invalid flag options in AES-CCM IV.
*/ */
static inline int incr_index(int index, int count, int max)
{
if ((index + count) >= max)
index = index + count - max;
else
index += count;
return index;
}
/** /**
* dma_free_sglist - unmap and free the sg lists. * dma_free_sglist - unmap and free the sg lists.
* @ndev: N5 device * @ndev: N5 device
@ -426,30 +436,29 @@ static void post_se_instr(struct nitrox_softreq *sr,
struct nitrox_cmdq *cmdq) struct nitrox_cmdq *cmdq)
{ {
struct nitrox_device *ndev = sr->ndev; struct nitrox_device *ndev = sr->ndev;
union nps_pkt_in_instr_baoff_dbell pkt_in_baoff_dbell; int idx;
u64 offset;
u8 *ent; u8 *ent;
spin_lock_bh(&cmdq->cmdq_lock); spin_lock_bh(&cmdq->cmdq_lock);
/* get the next write offset */ idx = cmdq->write_idx;
offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(cmdq->qno);
pkt_in_baoff_dbell.value = nitrox_read_csr(ndev, offset);
/* copy the instruction */ /* copy the instruction */
ent = cmdq->head + pkt_in_baoff_dbell.s.aoff; ent = cmdq->head + (idx * cmdq->instr_size);
memcpy(ent, &sr->instr, cmdq->instr_size); memcpy(ent, &sr->instr, cmdq->instr_size);
/* flush the command queue updates */
dma_wmb();
sr->tstamp = jiffies;
atomic_set(&sr->status, REQ_POSTED); atomic_set(&sr->status, REQ_POSTED);
response_list_add(sr, cmdq); response_list_add(sr, cmdq);
sr->tstamp = jiffies;
/* flush the command queue updates */
dma_wmb();
/* Ring doorbell with count 1 */ /* Ring doorbell with count 1 */
writeq(1, cmdq->dbell_csr_addr); writeq(1, cmdq->dbell_csr_addr);
/* orders the doorbell rings */ /* orders the doorbell rings */
mmiowb(); mmiowb();
cmdq->write_idx = incr_index(idx, 1, ndev->qlen);
spin_unlock_bh(&cmdq->cmdq_lock); spin_unlock_bh(&cmdq->cmdq_lock);
} }
@ -459,6 +468,9 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
struct nitrox_softreq *sr, *tmp; struct nitrox_softreq *sr, *tmp;
int ret = 0; int ret = 0;
if (!atomic_read(&cmdq->backlog_count))
return 0;
spin_lock_bh(&cmdq->backlog_lock); spin_lock_bh(&cmdq->backlog_lock);
list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) { list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) {
@ -466,7 +478,7 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
/* submit until space available */ /* submit until space available */
if (unlikely(cmdq_full(cmdq, ndev->qlen))) { if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
ret = -EBUSY; ret = -ENOSPC;
break; break;
} }
/* delete from backlog list */ /* delete from backlog list */
@ -491,23 +503,20 @@ static int nitrox_enqueue_request(struct nitrox_softreq *sr)
{ {
struct nitrox_cmdq *cmdq = sr->cmdq; struct nitrox_cmdq *cmdq = sr->cmdq;
struct nitrox_device *ndev = sr->ndev; struct nitrox_device *ndev = sr->ndev;
int ret = -EBUSY;
/* try to post backlog requests */
post_backlog_cmds(cmdq);
if (unlikely(cmdq_full(cmdq, ndev->qlen))) { if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EAGAIN; return -ENOSPC;
/* add to backlog list */
backlog_list_add(sr, cmdq); backlog_list_add(sr, cmdq);
} else { return -EBUSY;
ret = post_backlog_cmds(cmdq);
if (ret) {
backlog_list_add(sr, cmdq);
return ret;
}
post_se_instr(sr, cmdq);
ret = -EINPROGRESS;
} }
return ret; post_se_instr(sr, cmdq);
return -EINPROGRESS;
} }
/** /**
@ -624,11 +633,9 @@ int nitrox_process_se_request(struct nitrox_device *ndev,
*/ */
sr->instr.fdata[0] = *((u64 *)&req->gph); sr->instr.fdata[0] = *((u64 *)&req->gph);
sr->instr.fdata[1] = 0; sr->instr.fdata[1] = 0;
/* flush the soft_req changes before posting the cmd */
wmb();
ret = nitrox_enqueue_request(sr); ret = nitrox_enqueue_request(sr);
if (ret == -EAGAIN) if (ret == -ENOSPC)
goto send_fail; goto send_fail;
return ret; return ret;

View File

@ -96,6 +96,10 @@ enum csk_flags {
CSK_CONN_INLINE, /* Connection on HW */ CSK_CONN_INLINE, /* Connection on HW */
}; };
enum chtls_cdev_state {
CHTLS_CDEV_STATE_UP = 1
};
struct listen_ctx { struct listen_ctx {
struct sock *lsk; struct sock *lsk;
struct chtls_dev *cdev; struct chtls_dev *cdev;
@ -146,6 +150,7 @@ struct chtls_dev {
unsigned int send_page_order; unsigned int send_page_order;
int max_host_sndbuf; int max_host_sndbuf;
struct key_map kmap; struct key_map kmap;
unsigned int cdev_state;
}; };
struct chtls_hws { struct chtls_hws {

View File

@ -160,6 +160,7 @@ static void chtls_register_dev(struct chtls_dev *cdev)
tlsdev->hash = chtls_create_hash; tlsdev->hash = chtls_create_hash;
tlsdev->unhash = chtls_destroy_hash; tlsdev->unhash = chtls_destroy_hash;
tls_register_device(&cdev->tlsdev); tls_register_device(&cdev->tlsdev);
cdev->cdev_state = CHTLS_CDEV_STATE_UP;
} }
static void chtls_unregister_dev(struct chtls_dev *cdev) static void chtls_unregister_dev(struct chtls_dev *cdev)
@ -281,8 +282,10 @@ static void chtls_free_all_uld(void)
struct chtls_dev *cdev, *tmp; struct chtls_dev *cdev, *tmp;
mutex_lock(&cdev_mutex); mutex_lock(&cdev_mutex);
list_for_each_entry_safe(cdev, tmp, &cdev_list, list) list_for_each_entry_safe(cdev, tmp, &cdev_list, list) {
chtls_free_uld(cdev); if (cdev->cdev_state == CHTLS_CDEV_STATE_UP)
chtls_free_uld(cdev);
}
mutex_unlock(&cdev_mutex); mutex_unlock(&cdev_mutex);
} }

View File

@ -107,24 +107,23 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
ret = crypto_skcipher_encrypt(req); ret = crypto_skcipher_encrypt(req);
skcipher_request_zero(req); skcipher_request_zero(req);
} else { } else {
preempt_disable();
pagefault_disable();
enable_kernel_vsx();
blkcipher_walk_init(&walk, dst, src, nbytes); blkcipher_walk_init(&walk, dst, src, nbytes);
ret = blkcipher_walk_virt(desc, &walk); ret = blkcipher_walk_virt(desc, &walk);
while ((nbytes = walk.nbytes)) { while ((nbytes = walk.nbytes)) {
preempt_disable();
pagefault_disable();
enable_kernel_vsx();
aes_p8_cbc_encrypt(walk.src.virt.addr, aes_p8_cbc_encrypt(walk.src.virt.addr,
walk.dst.virt.addr, walk.dst.virt.addr,
nbytes & AES_BLOCK_MASK, nbytes & AES_BLOCK_MASK,
&ctx->enc_key, walk.iv, 1); &ctx->enc_key, walk.iv, 1);
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
nbytes &= AES_BLOCK_SIZE - 1; nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, &walk, nbytes); ret = blkcipher_walk_done(desc, &walk, nbytes);
} }
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
} }
return ret; return ret;
@ -147,24 +146,23 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
ret = crypto_skcipher_decrypt(req); ret = crypto_skcipher_decrypt(req);
skcipher_request_zero(req); skcipher_request_zero(req);
} else { } else {
preempt_disable();
pagefault_disable();
enable_kernel_vsx();
blkcipher_walk_init(&walk, dst, src, nbytes); blkcipher_walk_init(&walk, dst, src, nbytes);
ret = blkcipher_walk_virt(desc, &walk); ret = blkcipher_walk_virt(desc, &walk);
while ((nbytes = walk.nbytes)) { while ((nbytes = walk.nbytes)) {
preempt_disable();
pagefault_disable();
enable_kernel_vsx();
aes_p8_cbc_encrypt(walk.src.virt.addr, aes_p8_cbc_encrypt(walk.src.virt.addr,
walk.dst.virt.addr, walk.dst.virt.addr,
nbytes & AES_BLOCK_MASK, nbytes & AES_BLOCK_MASK,
&ctx->dec_key, walk.iv, 0); &ctx->dec_key, walk.iv, 0);
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
nbytes &= AES_BLOCK_SIZE - 1; nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, &walk, nbytes); ret = blkcipher_walk_done(desc, &walk, nbytes);
} }
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
} }
return ret; return ret;

View File

@ -116,32 +116,39 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req); ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req);
skcipher_request_zero(req); skcipher_request_zero(req);
} else { } else {
blkcipher_walk_init(&walk, dst, src, nbytes);
ret = blkcipher_walk_virt(desc, &walk);
preempt_disable(); preempt_disable();
pagefault_disable(); pagefault_disable();
enable_kernel_vsx(); enable_kernel_vsx();
blkcipher_walk_init(&walk, dst, src, nbytes);
ret = blkcipher_walk_virt(desc, &walk);
iv = walk.iv; iv = walk.iv;
memset(tweak, 0, AES_BLOCK_SIZE); memset(tweak, 0, AES_BLOCK_SIZE);
aes_p8_encrypt(iv, tweak, &ctx->tweak_key); aes_p8_encrypt(iv, tweak, &ctx->tweak_key);
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
while ((nbytes = walk.nbytes)) { while ((nbytes = walk.nbytes)) {
preempt_disable();
pagefault_disable();
enable_kernel_vsx();
if (enc) if (enc)
aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr, aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak); nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak);
else else
aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr, aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak); nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak);
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
nbytes &= AES_BLOCK_SIZE - 1; nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, &walk, nbytes); ret = blkcipher_walk_done(desc, &walk, nbytes);
} }
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
} }
return ret; return ret;
} }